├── .gitignore ├── LICENSE ├── README.md ├── data ├── data_pipeline │ ├── Dockerfile │ ├── build_records.py │ ├── dataloader.py │ └── process.py ├── interim │ └── 01411.parquet ├── processed │ └── fold3-1-140.tfrecords └── raw │ └── 01411.mp4 ├── images ├── api-list.png ├── demo.gif ├── deployment-single-vm.png ├── frontend-1.png ├── frontend-2.png ├── solution-arch.png └── technical-arch.png ├── model ├── Dockerfile ├── Pipfile ├── capy-trainer.tar.gz ├── cli.sh ├── docker-shell.sh ├── entrypoint.sh ├── package-trainer.sh ├── package │ ├── PKG-INFO │ ├── capy-trainer │ │ ├── __init__.py │ │ └── task.py │ ├── setup.cfg │ └── setup.py └── requirements.txt ├── models └── .gitkeep ├── notebooks ├── .gitkeep ├── eda.ipynb └── model_testing.ipynb ├── preprocessing ├── csv_to_parquet.ipynb ├── gcp-upload.ipynb └── video_to_landmark_csv.ipynb ├── references ├── .gitkeep ├── 2020.coling-main.525.pdf ├── Camgoz_Neural_Sign_Language_CVPR_2018_paper.pdf ├── methods.md └── sign_language_transformers.pdf ├── requirements.txt ├── src ├── api-service │ ├── .gitkeep │ ├── Dockerfile │ ├── Pipfile │ ├── Pipfile.lock │ ├── api │ │ ├── local_model │ │ │ └── asl_model.h5 │ │ ├── model.py │ │ ├── service.py │ │ ├── test.mp4 │ │ └── tracker.py │ ├── docker-entrypoint.sh │ ├── docker-shell.bat │ └── docker-shell.sh ├── data-collector │ └── .gitkeep ├── data-processor │ ├── Dockerfile │ ├── Pipfile │ ├── Pipfile.lock │ ├── cli.py │ ├── docker-shell.sh │ ├── entrypoint.sh │ ├── requirements.txt │ └── wlasl_deploy_video │ │ ├── 60578.mp4 │ │ └── 70349.mp4 ├── deployment │ └── .gitkeep ├── frontend │ ├── .env.development │ ├── .env.production │ ├── .gitignore │ ├── .gitkeep │ ├── Dockerfile │ ├── Dockerfile.dev │ ├── docker-shell.bat │ ├── docker-shell.sh │ ├── package-lock.json │ ├── package.json │ ├── public │ │ ├── favicon.ico │ │ ├── index.html │ │ └── manifest.json │ ├── src │ │ ├── app │ │ │ ├── App.css │ │ │ ├── App.js │ │ │ ├── AppRoutes.js │ │ │ ├── Theme.js │ │ │ ├── VideoUpload.js │ │ │ ├── index.js │ │ │ └── styles.css │ │ ├── common │ │ │ ├── Content │ │ │ │ ├── index.js │ │ │ │ └── styles.js │ │ │ ├── Footer │ │ │ │ ├── index.js │ │ │ │ └── styles.js │ │ │ └── Header │ │ │ │ ├── index.js │ │ │ │ └── styles.js │ │ ├── components │ │ │ ├── Currentmodel │ │ │ │ ├── index.js │ │ │ │ └── styles.js │ │ │ ├── Error │ │ │ │ └── 404.js │ │ │ └── Home │ │ │ │ ├── background.png │ │ │ │ ├── index.js │ │ │ │ └── styles.js │ │ ├── index.css │ │ ├── index.js │ │ └── services │ │ │ ├── Common.js │ │ │ └── DataService.js │ └── yarn.lock ├── model-deploy │ └── .gitkeep ├── model-prediction │ ├── Dockerfile │ ├── Pipfile │ ├── Pipfile.lock │ ├── cli.py │ ├── docker-shell.sh │ ├── entrypoint.sh │ ├── islr-fp16-192-8-seed42-foldall-full.h5 │ └── requirements.txt └── workflow │ └── .gitkeep └── test_project.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | 131 | .DS_Store 132 | src/.DS_Store -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | GNU GENERAL PUBLIC LICENSE 2 | Version 3, 29 June 2007 3 | 4 | Copyright (C) 2007 Free Software Foundation, Inc. 5 | Everyone is permitted to copy and distribute verbatim copies 6 | of this license document, but changing it is not allowed. 7 | 8 | Preamble 9 | 10 | The GNU General Public License is a free, copyleft license for 11 | software and other kinds of works. 12 | 13 | The licenses for most software and other practical works are designed 14 | to take away your freedom to share and change the works. By contrast, 15 | the GNU General Public License is intended to guarantee your freedom to 16 | share and change all versions of a program--to make sure it remains free 17 | software for all its users. We, the Free Software Foundation, use the 18 | GNU General Public License for most of our software; it applies also to 19 | any other work released this way by its authors. You can apply it to 20 | your programs, too. 21 | 22 | When we speak of free software, we are referring to freedom, not 23 | price. Our General Public Licenses are designed to make sure that you 24 | have the freedom to distribute copies of free software (and charge for 25 | them if you wish), that you receive source code or can get it if you 26 | want it, that you can change the software or use pieces of it in new 27 | free programs, and that you know you can do these things. 28 | 29 | To protect your rights, we need to prevent others from denying you 30 | these rights or asking you to surrender the rights. Therefore, you have 31 | certain responsibilities if you distribute copies of the software, or if 32 | you modify it: responsibilities to respect the freedom of others. 33 | 34 | For example, if you distribute copies of such a program, whether 35 | gratis or for a fee, you must pass on to the recipients the same 36 | freedoms that you received. You must make sure that they, too, receive 37 | or can get the source code. And you must show them these terms so they 38 | know their rights. 39 | 40 | Developers that use the GNU GPL protect your rights with two steps: 41 | (1) assert copyright on the software, and (2) offer you this License 42 | giving you legal permission to copy, distribute and/or modify it. 43 | 44 | For the developers' and authors' protection, the GPL clearly explains 45 | that there is no warranty for this free software. For both users' and 46 | authors' sake, the GPL requires that modified versions be marked as 47 | changed, so that their problems will not be attributed erroneously to 48 | authors of previous versions. 49 | 50 | Some devices are designed to deny users access to install or run 51 | modified versions of the software inside them, although the manufacturer 52 | can do so. This is fundamentally incompatible with the aim of 53 | protecting users' freedom to change the software. The systematic 54 | pattern of such abuse occurs in the area of products for individuals to 55 | use, which is precisely where it is most unacceptable. Therefore, we 56 | have designed this version of the GPL to prohibit the practice for those 57 | products. If such problems arise substantially in other domains, we 58 | stand ready to extend this provision to those domains in future versions 59 | of the GPL, as needed to protect the freedom of users. 60 | 61 | Finally, every program is threatened constantly by software patents. 62 | States should not allow patents to restrict development and use of 63 | software on general-purpose computers, but in those that do, we wish to 64 | avoid the special danger that patents applied to a free program could 65 | make it effectively proprietary. To prevent this, the GPL assures that 66 | patents cannot be used to render the program non-free. 67 | 68 | The precise terms and conditions for copying, distribution and 69 | modification follow. 70 | 71 | TERMS AND CONDITIONS 72 | 73 | 0. Definitions. 74 | 75 | "This License" refers to version 3 of the GNU General Public License. 76 | 77 | "Copyright" also means copyright-like laws that apply to other kinds of 78 | works, such as semiconductor masks. 79 | 80 | "The Program" refers to any copyrightable work licensed under this 81 | License. Each licensee is addressed as "you". "Licensees" and 82 | "recipients" may be individuals or organizations. 83 | 84 | To "modify" a work means to copy from or adapt all or part of the work 85 | in a fashion requiring copyright permission, other than the making of an 86 | exact copy. The resulting work is called a "modified version" of the 87 | earlier work or a work "based on" the earlier work. 88 | 89 | A "covered work" means either the unmodified Program or a work based 90 | on the Program. 91 | 92 | To "propagate" a work means to do anything with it that, without 93 | permission, would make you directly or secondarily liable for 94 | infringement under applicable copyright law, except executing it on a 95 | computer or modifying a private copy. Propagation includes copying, 96 | distribution (with or without modification), making available to the 97 | public, and in some countries other activities as well. 98 | 99 | To "convey" a work means any kind of propagation that enables other 100 | parties to make or receive copies. Mere interaction with a user through 101 | a computer network, with no transfer of a copy, is not conveying. 102 | 103 | An interactive user interface displays "Appropriate Legal Notices" 104 | to the extent that it includes a convenient and prominently visible 105 | feature that (1) displays an appropriate copyright notice, and (2) 106 | tells the user that there is no warranty for the work (except to the 107 | extent that warranties are provided), that licensees may convey the 108 | work under this License, and how to view a copy of this License. If 109 | the interface presents a list of user commands or options, such as a 110 | menu, a prominent item in the list meets this criterion. 111 | 112 | 1. Source Code. 113 | 114 | The "source code" for a work means the preferred form of the work 115 | for making modifications to it. "Object code" means any non-source 116 | form of a work. 117 | 118 | A "Standard Interface" means an interface that either is an official 119 | standard defined by a recognized standards body, or, in the case of 120 | interfaces specified for a particular programming language, one that 121 | is widely used among developers working in that language. 122 | 123 | The "System Libraries" of an executable work include anything, other 124 | than the work as a whole, that (a) is included in the normal form of 125 | packaging a Major Component, but which is not part of that Major 126 | Component, and (b) serves only to enable use of the work with that 127 | Major Component, or to implement a Standard Interface for which an 128 | implementation is available to the public in source code form. A 129 | "Major Component", in this context, means a major essential component 130 | (kernel, window system, and so on) of the specific operating system 131 | (if any) on which the executable work runs, or a compiler used to 132 | produce the work, or an object code interpreter used to run it. 133 | 134 | The "Corresponding Source" for a work in object code form means all 135 | the source code needed to generate, install, and (for an executable 136 | work) run the object code and to modify the work, including scripts to 137 | control those activities. However, it does not include the work's 138 | System Libraries, or general-purpose tools or generally available free 139 | programs which are used unmodified in performing those activities but 140 | which are not part of the work. For example, Corresponding Source 141 | includes interface definition files associated with source files for 142 | the work, and the source code for shared libraries and dynamically 143 | linked subprograms that the work is specifically designed to require, 144 | such as by intimate data communication or control flow between those 145 | subprograms and other parts of the work. 146 | 147 | The Corresponding Source need not include anything that users 148 | can regenerate automatically from other parts of the Corresponding 149 | Source. 150 | 151 | The Corresponding Source for a work in source code form is that 152 | same work. 153 | 154 | 2. Basic Permissions. 155 | 156 | All rights granted under this License are granted for the term of 157 | copyright on the Program, and are irrevocable provided the stated 158 | conditions are met. This License explicitly affirms your unlimited 159 | permission to run the unmodified Program. The output from running a 160 | covered work is covered by this License only if the output, given its 161 | content, constitutes a covered work. This License acknowledges your 162 | rights of fair use or other equivalent, as provided by copyright law. 163 | 164 | You may make, run and propagate covered works that you do not 165 | convey, without conditions so long as your license otherwise remains 166 | in force. You may convey covered works to others for the sole purpose 167 | of having them make modifications exclusively for you, or provide you 168 | with facilities for running those works, provided that you comply with 169 | the terms of this License in conveying all material for which you do 170 | not control copyright. Those thus making or running the covered works 171 | for you must do so exclusively on your behalf, under your direction 172 | and control, on terms that prohibit them from making any copies of 173 | your copyrighted material outside their relationship with you. 174 | 175 | Conveying under any other circumstances is permitted solely under 176 | the conditions stated below. Sublicensing is not allowed; section 10 177 | makes it unnecessary. 178 | 179 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law. 180 | 181 | No covered work shall be deemed part of an effective technological 182 | measure under any applicable law fulfilling obligations under article 183 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or 184 | similar laws prohibiting or restricting circumvention of such 185 | measures. 186 | 187 | When you convey a covered work, you waive any legal power to forbid 188 | circumvention of technological measures to the extent such circumvention 189 | is effected by exercising rights under this License with respect to 190 | the covered work, and you disclaim any intention to limit operation or 191 | modification of the work as a means of enforcing, against the work's 192 | users, your or third parties' legal rights to forbid circumvention of 193 | technological measures. 194 | 195 | 4. Conveying Verbatim Copies. 196 | 197 | You may convey verbatim copies of the Program's source code as you 198 | receive it, in any medium, provided that you conspicuously and 199 | appropriately publish on each copy an appropriate copyright notice; 200 | keep intact all notices stating that this License and any 201 | non-permissive terms added in accord with section 7 apply to the code; 202 | keep intact all notices of the absence of any warranty; and give all 203 | recipients a copy of this License along with the Program. 204 | 205 | You may charge any price or no price for each copy that you convey, 206 | and you may offer support or warranty protection for a fee. 207 | 208 | 5. Conveying Modified Source Versions. 209 | 210 | You may convey a work based on the Program, or the modifications to 211 | produce it from the Program, in the form of source code under the 212 | terms of section 4, provided that you also meet all of these conditions: 213 | 214 | a) The work must carry prominent notices stating that you modified 215 | it, and giving a relevant date. 216 | 217 | b) The work must carry prominent notices stating that it is 218 | released under this License and any conditions added under section 219 | 7. This requirement modifies the requirement in section 4 to 220 | "keep intact all notices". 221 | 222 | c) You must license the entire work, as a whole, under this 223 | License to anyone who comes into possession of a copy. This 224 | License will therefore apply, along with any applicable section 7 225 | additional terms, to the whole of the work, and all its parts, 226 | regardless of how they are packaged. This License gives no 227 | permission to license the work in any other way, but it does not 228 | invalidate such permission if you have separately received it. 229 | 230 | d) If the work has interactive user interfaces, each must display 231 | Appropriate Legal Notices; however, if the Program has interactive 232 | interfaces that do not display Appropriate Legal Notices, your 233 | work need not make them do so. 234 | 235 | A compilation of a covered work with other separate and independent 236 | works, which are not by their nature extensions of the covered work, 237 | and which are not combined with it such as to form a larger program, 238 | in or on a volume of a storage or distribution medium, is called an 239 | "aggregate" if the compilation and its resulting copyright are not 240 | used to limit the access or legal rights of the compilation's users 241 | beyond what the individual works permit. Inclusion of a covered work 242 | in an aggregate does not cause this License to apply to the other 243 | parts of the aggregate. 244 | 245 | 6. Conveying Non-Source Forms. 246 | 247 | You may convey a covered work in object code form under the terms 248 | of sections 4 and 5, provided that you also convey the 249 | machine-readable Corresponding Source under the terms of this License, 250 | in one of these ways: 251 | 252 | a) Convey the object code in, or embodied in, a physical product 253 | (including a physical distribution medium), accompanied by the 254 | Corresponding Source fixed on a durable physical medium 255 | customarily used for software interchange. 256 | 257 | b) Convey the object code in, or embodied in, a physical product 258 | (including a physical distribution medium), accompanied by a 259 | written offer, valid for at least three years and valid for as 260 | long as you offer spare parts or customer support for that product 261 | model, to give anyone who possesses the object code either (1) a 262 | copy of the Corresponding Source for all the software in the 263 | product that is covered by this License, on a durable physical 264 | medium customarily used for software interchange, for a price no 265 | more than your reasonable cost of physically performing this 266 | conveying of source, or (2) access to copy the 267 | Corresponding Source from a network server at no charge. 268 | 269 | c) Convey individual copies of the object code with a copy of the 270 | written offer to provide the Corresponding Source. This 271 | alternative is allowed only occasionally and noncommercially, and 272 | only if you received the object code with such an offer, in accord 273 | with subsection 6b. 274 | 275 | d) Convey the object code by offering access from a designated 276 | place (gratis or for a charge), and offer equivalent access to the 277 | Corresponding Source in the same way through the same place at no 278 | further charge. You need not require recipients to copy the 279 | Corresponding Source along with the object code. If the place to 280 | copy the object code is a network server, the Corresponding Source 281 | may be on a different server (operated by you or a third party) 282 | that supports equivalent copying facilities, provided you maintain 283 | clear directions next to the object code saying where to find the 284 | Corresponding Source. Regardless of what server hosts the 285 | Corresponding Source, you remain obligated to ensure that it is 286 | available for as long as needed to satisfy these requirements. 287 | 288 | e) Convey the object code using peer-to-peer transmission, provided 289 | you inform other peers where the object code and Corresponding 290 | Source of the work are being offered to the general public at no 291 | charge under subsection 6d. 292 | 293 | A separable portion of the object code, whose source code is excluded 294 | from the Corresponding Source as a System Library, need not be 295 | included in conveying the object code work. 296 | 297 | A "User Product" is either (1) a "consumer product", which means any 298 | tangible personal property which is normally used for personal, family, 299 | or household purposes, or (2) anything designed or sold for incorporation 300 | into a dwelling. In determining whether a product is a consumer product, 301 | doubtful cases shall be resolved in favor of coverage. For a particular 302 | product received by a particular user, "normally used" refers to a 303 | typical or common use of that class of product, regardless of the status 304 | of the particular user or of the way in which the particular user 305 | actually uses, or expects or is expected to use, the product. A product 306 | is a consumer product regardless of whether the product has substantial 307 | commercial, industrial or non-consumer uses, unless such uses represent 308 | the only significant mode of use of the product. 309 | 310 | "Installation Information" for a User Product means any methods, 311 | procedures, authorization keys, or other information required to install 312 | and execute modified versions of a covered work in that User Product from 313 | a modified version of its Corresponding Source. The information must 314 | suffice to ensure that the continued functioning of the modified object 315 | code is in no case prevented or interfered with solely because 316 | modification has been made. 317 | 318 | If you convey an object code work under this section in, or with, or 319 | specifically for use in, a User Product, and the conveying occurs as 320 | part of a transaction in which the right of possession and use of the 321 | User Product is transferred to the recipient in perpetuity or for a 322 | fixed term (regardless of how the transaction is characterized), the 323 | Corresponding Source conveyed under this section must be accompanied 324 | by the Installation Information. But this requirement does not apply 325 | if neither you nor any third party retains the ability to install 326 | modified object code on the User Product (for example, the work has 327 | been installed in ROM). 328 | 329 | The requirement to provide Installation Information does not include a 330 | requirement to continue to provide support service, warranty, or updates 331 | for a work that has been modified or installed by the recipient, or for 332 | the User Product in which it has been modified or installed. Access to a 333 | network may be denied when the modification itself materially and 334 | adversely affects the operation of the network or violates the rules and 335 | protocols for communication across the network. 336 | 337 | Corresponding Source conveyed, and Installation Information provided, 338 | in accord with this section must be in a format that is publicly 339 | documented (and with an implementation available to the public in 340 | source code form), and must require no special password or key for 341 | unpacking, reading or copying. 342 | 343 | 7. Additional Terms. 344 | 345 | "Additional permissions" are terms that supplement the terms of this 346 | License by making exceptions from one or more of its conditions. 347 | Additional permissions that are applicable to the entire Program shall 348 | be treated as though they were included in this License, to the extent 349 | that they are valid under applicable law. If additional permissions 350 | apply only to part of the Program, that part may be used separately 351 | under those permissions, but the entire Program remains governed by 352 | this License without regard to the additional permissions. 353 | 354 | When you convey a copy of a covered work, you may at your option 355 | remove any additional permissions from that copy, or from any part of 356 | it. (Additional permissions may be written to require their own 357 | removal in certain cases when you modify the work.) You may place 358 | additional permissions on material, added by you to a covered work, 359 | for which you have or can give appropriate copyright permission. 360 | 361 | Notwithstanding any other provision of this License, for material you 362 | add to a covered work, you may (if authorized by the copyright holders of 363 | that material) supplement the terms of this License with terms: 364 | 365 | a) Disclaiming warranty or limiting liability differently from the 366 | terms of sections 15 and 16 of this License; or 367 | 368 | b) Requiring preservation of specified reasonable legal notices or 369 | author attributions in that material or in the Appropriate Legal 370 | Notices displayed by works containing it; or 371 | 372 | c) Prohibiting misrepresentation of the origin of that material, or 373 | requiring that modified versions of such material be marked in 374 | reasonable ways as different from the original version; or 375 | 376 | d) Limiting the use for publicity purposes of names of licensors or 377 | authors of the material; or 378 | 379 | e) Declining to grant rights under trademark law for use of some 380 | trade names, trademarks, or service marks; or 381 | 382 | f) Requiring indemnification of licensors and authors of that 383 | material by anyone who conveys the material (or modified versions of 384 | it) with contractual assumptions of liability to the recipient, for 385 | any liability that these contractual assumptions directly impose on 386 | those licensors and authors. 387 | 388 | All other non-permissive additional terms are considered "further 389 | restrictions" within the meaning of section 10. If the Program as you 390 | received it, or any part of it, contains a notice stating that it is 391 | governed by this License along with a term that is a further 392 | restriction, you may remove that term. If a license document contains 393 | a further restriction but permits relicensing or conveying under this 394 | License, you may add to a covered work material governed by the terms 395 | of that license document, provided that the further restriction does 396 | not survive such relicensing or conveying. 397 | 398 | If you add terms to a covered work in accord with this section, you 399 | must place, in the relevant source files, a statement of the 400 | additional terms that apply to those files, or a notice indicating 401 | where to find the applicable terms. 402 | 403 | Additional terms, permissive or non-permissive, may be stated in the 404 | form of a separately written license, or stated as exceptions; 405 | the above requirements apply either way. 406 | 407 | 8. Termination. 408 | 409 | You may not propagate or modify a covered work except as expressly 410 | provided under this License. Any attempt otherwise to propagate or 411 | modify it is void, and will automatically terminate your rights under 412 | this License (including any patent licenses granted under the third 413 | paragraph of section 11). 414 | 415 | However, if you cease all violation of this License, then your 416 | license from a particular copyright holder is reinstated (a) 417 | provisionally, unless and until the copyright holder explicitly and 418 | finally terminates your license, and (b) permanently, if the copyright 419 | holder fails to notify you of the violation by some reasonable means 420 | prior to 60 days after the cessation. 421 | 422 | Moreover, your license from a particular copyright holder is 423 | reinstated permanently if the copyright holder notifies you of the 424 | violation by some reasonable means, this is the first time you have 425 | received notice of violation of this License (for any work) from that 426 | copyright holder, and you cure the violation prior to 30 days after 427 | your receipt of the notice. 428 | 429 | Termination of your rights under this section does not terminate the 430 | licenses of parties who have received copies or rights from you under 431 | this License. If your rights have been terminated and not permanently 432 | reinstated, you do not qualify to receive new licenses for the same 433 | material under section 10. 434 | 435 | 9. Acceptance Not Required for Having Copies. 436 | 437 | You are not required to accept this License in order to receive or 438 | run a copy of the Program. Ancillary propagation of a covered work 439 | occurring solely as a consequence of using peer-to-peer transmission 440 | to receive a copy likewise does not require acceptance. However, 441 | nothing other than this License grants you permission to propagate or 442 | modify any covered work. These actions infringe copyright if you do 443 | not accept this License. Therefore, by modifying or propagating a 444 | covered work, you indicate your acceptance of this License to do so. 445 | 446 | 10. Automatic Licensing of Downstream Recipients. 447 | 448 | Each time you convey a covered work, the recipient automatically 449 | receives a license from the original licensors, to run, modify and 450 | propagate that work, subject to this License. You are not responsible 451 | for enforcing compliance by third parties with this License. 452 | 453 | An "entity transaction" is a transaction transferring control of an 454 | organization, or substantially all assets of one, or subdividing an 455 | organization, or merging organizations. If propagation of a covered 456 | work results from an entity transaction, each party to that 457 | transaction who receives a copy of the work also receives whatever 458 | licenses to the work the party's predecessor in interest had or could 459 | give under the previous paragraph, plus a right to possession of the 460 | Corresponding Source of the work from the predecessor in interest, if 461 | the predecessor has it or can get it with reasonable efforts. 462 | 463 | You may not impose any further restrictions on the exercise of the 464 | rights granted or affirmed under this License. For example, you may 465 | not impose a license fee, royalty, or other charge for exercise of 466 | rights granted under this License, and you may not initiate litigation 467 | (including a cross-claim or counterclaim in a lawsuit) alleging that 468 | any patent claim is infringed by making, using, selling, offering for 469 | sale, or importing the Program or any portion of it. 470 | 471 | 11. Patents. 472 | 473 | A "contributor" is a copyright holder who authorizes use under this 474 | License of the Program or a work on which the Program is based. The 475 | work thus licensed is called the contributor's "contributor version". 476 | 477 | A contributor's "essential patent claims" are all patent claims 478 | owned or controlled by the contributor, whether already acquired or 479 | hereafter acquired, that would be infringed by some manner, permitted 480 | by this License, of making, using, or selling its contributor version, 481 | but do not include claims that would be infringed only as a 482 | consequence of further modification of the contributor version. For 483 | purposes of this definition, "control" includes the right to grant 484 | patent sublicenses in a manner consistent with the requirements of 485 | this License. 486 | 487 | Each contributor grants you a non-exclusive, worldwide, royalty-free 488 | patent license under the contributor's essential patent claims, to 489 | make, use, sell, offer for sale, import and otherwise run, modify and 490 | propagate the contents of its contributor version. 491 | 492 | In the following three paragraphs, a "patent license" is any express 493 | agreement or commitment, however denominated, not to enforce a patent 494 | (such as an express permission to practice a patent or covenant not to 495 | sue for patent infringement). To "grant" such a patent license to a 496 | party means to make such an agreement or commitment not to enforce a 497 | patent against the party. 498 | 499 | If you convey a covered work, knowingly relying on a patent license, 500 | and the Corresponding Source of the work is not available for anyone 501 | to copy, free of charge and under the terms of this License, through a 502 | publicly available network server or other readily accessible means, 503 | then you must either (1) cause the Corresponding Source to be so 504 | available, or (2) arrange to deprive yourself of the benefit of the 505 | patent license for this particular work, or (3) arrange, in a manner 506 | consistent with the requirements of this License, to extend the patent 507 | license to downstream recipients. "Knowingly relying" means you have 508 | actual knowledge that, but for the patent license, your conveying the 509 | covered work in a country, or your recipient's use of the covered work 510 | in a country, would infringe one or more identifiable patents in that 511 | country that you have reason to believe are valid. 512 | 513 | If, pursuant to or in connection with a single transaction or 514 | arrangement, you convey, or propagate by procuring conveyance of, a 515 | covered work, and grant a patent license to some of the parties 516 | receiving the covered work authorizing them to use, propagate, modify 517 | or convey a specific copy of the covered work, then the patent license 518 | you grant is automatically extended to all recipients of the covered 519 | work and works based on it. 520 | 521 | A patent license is "discriminatory" if it does not include within 522 | the scope of its coverage, prohibits the exercise of, or is 523 | conditioned on the non-exercise of one or more of the rights that are 524 | specifically granted under this License. You may not convey a covered 525 | work if you are a party to an arrangement with a third party that is 526 | in the business of distributing software, under which you make payment 527 | to the third party based on the extent of your activity of conveying 528 | the work, and under which the third party grants, to any of the 529 | parties who would receive the covered work from you, a discriminatory 530 | patent license (a) in connection with copies of the covered work 531 | conveyed by you (or copies made from those copies), or (b) primarily 532 | for and in connection with specific products or compilations that 533 | contain the covered work, unless you entered into that arrangement, 534 | or that patent license was granted, prior to 28 March 2007. 535 | 536 | Nothing in this License shall be construed as excluding or limiting 537 | any implied license or other defenses to infringement that may 538 | otherwise be available to you under applicable patent law. 539 | 540 | 12. No Surrender of Others' Freedom. 541 | 542 | If conditions are imposed on you (whether by court order, agreement or 543 | otherwise) that contradict the conditions of this License, they do not 544 | excuse you from the conditions of this License. If you cannot convey a 545 | covered work so as to satisfy simultaneously your obligations under this 546 | License and any other pertinent obligations, then as a consequence you may 547 | not convey it at all. For example, if you agree to terms that obligate you 548 | to collect a royalty for further conveying from those to whom you convey 549 | the Program, the only way you could satisfy both those terms and this 550 | License would be to refrain entirely from conveying the Program. 551 | 552 | 13. Use with the GNU Affero General Public License. 553 | 554 | Notwithstanding any other provision of this License, you have 555 | permission to link or combine any covered work with a work licensed 556 | under version 3 of the GNU Affero General Public License into a single 557 | combined work, and to convey the resulting work. The terms of this 558 | License will continue to apply to the part which is the covered work, 559 | but the special requirements of the GNU Affero General Public License, 560 | section 13, concerning interaction through a network will apply to the 561 | combination as such. 562 | 563 | 14. Revised Versions of this License. 564 | 565 | The Free Software Foundation may publish revised and/or new versions of 566 | the GNU General Public License from time to time. Such new versions will 567 | be similar in spirit to the present version, but may differ in detail to 568 | address new problems or concerns. 569 | 570 | Each version is given a distinguishing version number. If the 571 | Program specifies that a certain numbered version of the GNU General 572 | Public License "or any later version" applies to it, you have the 573 | option of following the terms and conditions either of that numbered 574 | version or of any later version published by the Free Software 575 | Foundation. If the Program does not specify a version number of the 576 | GNU General Public License, you may choose any version ever published 577 | by the Free Software Foundation. 578 | 579 | If the Program specifies that a proxy can decide which future 580 | versions of the GNU General Public License can be used, that proxy's 581 | public statement of acceptance of a version permanently authorizes you 582 | to choose that version for the Program. 583 | 584 | Later license versions may give you additional or different 585 | permissions. However, no additional obligations are imposed on any 586 | author or copyright holder as a result of your choosing to follow a 587 | later version. 588 | 589 | 15. Disclaimer of Warranty. 590 | 591 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY 592 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT 593 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY 594 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, 595 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 596 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM 597 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF 598 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 599 | 600 | 16. Limitation of Liability. 601 | 602 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 603 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS 604 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY 605 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE 606 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF 607 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD 608 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), 609 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF 610 | SUCH DAMAGES. 611 | 612 | 17. Interpretation of Sections 15 and 16. 613 | 614 | If the disclaimer of warranty and limitation of liability provided 615 | above cannot be given local legal effect according to their terms, 616 | reviewing courts shall apply local law that most closely approximates 617 | an absolute waiver of all civil liability in connection with the 618 | Program, unless a warranty or assumption of liability accompanies a 619 | copy of the Program in return for a fee. 620 | 621 | END OF TERMS AND CONDITIONS 622 | 623 | How to Apply These Terms to Your New Programs 624 | 625 | If you develop a new program, and you want it to be of the greatest 626 | possible use to the public, the best way to achieve this is to make it 627 | free software which everyone can redistribute and change under these terms. 628 | 629 | To do so, attach the following notices to the program. It is safest 630 | to attach them to the start of each source file to most effectively 631 | state the exclusion of warranty; and each file should have at least 632 | the "copyright" line and a pointer to where the full notice is found. 633 | 634 | 635 | Copyright (C) 636 | 637 | This program is free software: you can redistribute it and/or modify 638 | it under the terms of the GNU General Public License as published by 639 | the Free Software Foundation, either version 3 of the License, or 640 | (at your option) any later version. 641 | 642 | This program is distributed in the hope that it will be useful, 643 | but WITHOUT ANY WARRANTY; without even the implied warranty of 644 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 645 | GNU General Public License for more details. 646 | 647 | You should have received a copy of the GNU General Public License 648 | along with this program. If not, see . 649 | 650 | Also add information on how to contact you by electronic and paper mail. 651 | 652 | If the program does terminal interaction, make it output a short 653 | notice like this when it starts in an interactive mode: 654 | 655 | Copyright (C) 656 | This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. 657 | This is free software, and you are welcome to redistribute it 658 | under certain conditions; type `show c' for details. 659 | 660 | The hypothetical commands `show w' and `show c' should show the appropriate 661 | parts of the General Public License. Of course, your program's commands 662 | might be different; for a GUI interface, you would use an "about box". 663 | 664 | You should also get your employer (if you work as a programmer) or school, 665 | if any, to sign a "copyright disclaimer" for the program, if necessary. 666 | For more information on this, and how to apply and follow the GNU GPL, see 667 | . 668 | 669 | The GNU General Public License does not permit incorporating your program 670 | into proprietary programs. If your program is a subroutine library, you 671 | may consider it more useful to permit linking proprietary applications with 672 | the library. If this is what you want to do, use the GNU Lesser General 673 | Public License instead of this License. But first, please read 674 | . 675 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # AI American Sign Language Translator 2 | 3 | A real-time American Sign Language (ASL) translation system that uses computer vision and machine learning to interpret hand gestures and convert them to text. 4 | 5 | ## 🚀 Features 6 | 7 | - **Real-time ASL Recognition**: Capture and interpret hand gestures in real-time 8 | - **High Accuracy**: Trained on extensive ASL datasets for reliable translations 9 | - **User-Friendly Interface**: Intuitive web application for easy interaction 10 | - **API Integration**: RESTful API for seamless integration with other applications 11 | - **Cloud Deployment**: Scalable deployment on Google Cloud Platform 12 | 13 | ## 🖥️ Application Interface 14 | 15 | Our web application provides an intuitive interface for ASL translation: 16 | 17 | ![Demo](images/demo.gif) 18 | 19 | *Watch our application in action - the complete workflow from video upload to ASL translation results.* 20 | 21 | ## 📋 Prerequisites 22 | 23 | - Python 3.8+ 24 | - Docker and Docker Compose 25 | - Google Cloud Platform account (for deployment) 26 | - Node.js 16+ (for frontend development) 27 | 28 | ## 🛠️ Installation 29 | 30 | ### Local Development 31 | 32 | 1. **Clone the repository** 33 | ```bash 34 | git clone 35 | cd AI_ASL_Translator 36 | ``` 37 | 38 | 2. **Install Python dependencies** 39 | ```bash 40 | pip install -r requirements.txt 41 | ``` 42 | 43 | 3. **Set up the frontend** 44 | ```bash 45 | cd src/frontend 46 | npm install 47 | ``` 48 | 49 | ## 🏃‍♂️ Quick Start 50 | 51 | ### Running the API Service 52 | 53 | 1. Navigate to the API service directory: 54 | ```bash 55 | cd src/api-service 56 | ``` 57 | 58 | 2. Start the API server: 59 | ```bash 60 | sh docker-shell.sh 61 | uvicorn_server 62 | ``` 63 | 64 | 3. Access the API documentation at `http://localhost:9000/docs` 65 | 66 | ### Running the Frontend 67 | 68 | 1. Navigate to the frontend directory: 69 | ```bash 70 | cd src/frontend 71 | ``` 72 | 73 | 2. Start the development server: 74 | ```bash 75 | sh docker-shell.sh 76 | yarn install # First time only 77 | yarn start 78 | ``` 79 | 80 | 3. Access the application at `http://localhost:3000` 81 | 82 | ## 🏗️ Project Structure 83 | 84 | ``` 85 | AI_ASL_Translator/ 86 | ├── data/ # Data storage (not uploaded to repo) 87 | │ ├── interim/ # Intermediate preprocessed data 88 | │ ├── processed/ # Final dataset files for modeling 89 | │ └── raw/ # Original immutable input data 90 | ├── notebooks/ # Jupyter notebooks for EDA and testing 91 | ├── src/ # Source code 92 | │ ├── data-collector/ # Dataset creation scripts 93 | │ ├── data-processor/ # Data processing code 94 | │ ├── model-training/ # Model training and evaluation 95 | │ ├── model-deploy/ # Model deployment 96 | │ ├── workflow/ # Automation scripts 97 | │ ├── api-service/ # Backend API service 98 | │ ├── frontend/ # React frontend application 99 | │ └── deployment/ # GCP deployment configuration 100 | ├── reports/ # Generated reports and documentation 101 | ├── references/ # Reference materials and papers 102 | └── requirements.txt # Python dependencies 103 | ``` 104 | 105 | ## 🏛️ System Architecture 106 | 107 | ### Solution Architecture 108 | 109 | Our solution architecture outlines the complete workflow from data collection to deployment: 110 | 111 | ![Solution Architecture](images/solution-arch.png) 112 | 113 | *The solution architecture shows the three main layers: Process (People), Execution (Code), and State (Source, Data, Models).* 114 | 115 | ### Technical Architecture 116 | 117 | The technical architecture details the implementation components and their interactions: 118 | 119 | ![Technical Architecture](images/technical-arch.png) 120 | 121 | *The technical architecture illustrates the frontend, backend, ML pipeline, and state management components.* 122 | 123 | ## 🔧 Configuration 124 | 125 | ### Environment Variables 126 | 127 | Create a `.env` file in the root directory: 128 | 129 | ```env 130 | # API Configuration 131 | API_HOST=0.0.0.0 132 | API_PORT=9000 133 | DEBUG=False 134 | 135 | # Model Configuration 136 | MODEL_PATH=./models/asl_model.pkl 137 | CONFIDENCE_THRESHOLD=0.8 138 | 139 | # Database Configuration 140 | DATABASE_URL=postgresql://user:password@localhost/asl_db 141 | 142 | # GCP Configuration (for deployment) 143 | GCP_PROJECT_ID=your-project-id 144 | GCP_ZONE=us-central1-a 145 | ``` 146 | 147 | ## 🚀 Deployment 148 | 149 | ### Google Cloud Platform Deployment 150 | 151 | The application can be deployed to GCP using Ansible playbooks: 152 | 153 | 1. **Build and push Docker images to GCR:** 154 | ```bash 155 | cd src/deployment 156 | ansible-playbook deploy-docker-images.yml -i inventory.yml 157 | ``` 158 | 159 | 2. **Create GCP compute instance:** 160 | ```bash 161 | ansible-playbook deploy-create-instance.yml -i inventory.yml --extra-vars cluster_state=present 162 | ``` 163 | 164 | 3. **Provision the instance:** 165 | ```bash 166 | ansible-playbook deploy-provision-instance.yml -i inventory.yml 167 | ``` 168 | 169 | 4. **Setup Docker containers:** 170 | ```bash 171 | ansible-playbook deploy-setup-containers.yml -i inventory.yml 172 | ``` 173 | 174 | 5. **Configure webserver:** 175 | ```bash 176 | ansible-playbook deploy-setup-webserver.yml -i inventory.yml 177 | ``` 178 | 179 | Access your deployed application at `http:///` 180 | 181 | ![Deployment Architecture](images/deployment-single-vm.png) 182 | 183 | *The application is deployed on a single VM in Google Cloud Platform with containerized services.* 184 | 185 | ## 📊 API Documentation 186 | 187 | The API provides the following endpoints: 188 | 189 | - `POST /predict` - Upload an image for ASL translation 190 | - `GET /health` - Health check endpoint 191 | - `GET /model-info` - Get model information and statistics 192 | - `GET /docs` - Interactive API documentation (Swagger UI) 193 | 194 | ![API Endpoints](images/api-list.png) 195 | 196 | *Available API endpoints for the ASL translation service.* 197 | 198 | ### Example API Usage 199 | 200 | ```python 201 | import requests 202 | 203 | # Upload image for translation 204 | with open('asl_image.jpg', 'rb') as f: 205 | files = {'file': f} 206 | response = requests.post('http://localhost:9000/predict', files=files) 207 | result = response.json() 208 | print(f"Translation: {result['translation']}") 209 | ``` 210 | 211 | ## 🤝 Contributing 212 | 213 | 1. Fork the repository 214 | 2. Create a feature branch (`git checkout -b feature/amazing-feature`) 215 | 3. Commit your changes (`git commit -m 'Add amazing feature'`) 216 | 4. Push to the branch (`git push origin feature/amazing-feature`) 217 | 5. Open a Pull Request 218 | 219 | ## 📝 License 220 | 221 | This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details. 222 | 223 | ## 🙏 Acknowledgments 224 | 225 | - ASL dataset contributors 226 | - Open source computer vision libraries 227 | - Google Cloud Platform for hosting infrastructure 228 | 229 | ## 👨‍💻 Author 230 | 231 | **Chuqing Zhao** - *AI American Sign Language Translator* 232 | 233 | This project was developed as part of an AI/ML initiative to create accessible technology for the deaf and hard-of-hearing community. 234 | 235 | ## 📞 Support 236 | 237 | For support and questions, please open an issue in the GitHub repository or contact the development team. 238 | 239 | --- 240 | 241 | **Note**: This application is designed for educational and research purposes. For production use in critical applications, additional testing and validation is recommended. 242 | -------------------------------------------------------------------------------- /data/data_pipeline/Dockerfile: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cqzhao918/AI_ASL_Translator/429a795a6b587d489a55a0451ae270040aa079e4/data/data_pipeline/Dockerfile -------------------------------------------------------------------------------- /data/data_pipeline/build_records.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | -------------------------------------------------------------------------------- /data/data_pipeline/dataloader.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | -------------------------------------------------------------------------------- /data/data_pipeline/process.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | -------------------------------------------------------------------------------- /data/interim/01411.parquet: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cqzhao918/AI_ASL_Translator/429a795a6b587d489a55a0451ae270040aa079e4/data/interim/01411.parquet -------------------------------------------------------------------------------- /data/processed/fold3-1-140.tfrecords: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cqzhao918/AI_ASL_Translator/429a795a6b587d489a55a0451ae270040aa079e4/data/processed/fold3-1-140.tfrecords -------------------------------------------------------------------------------- /data/raw/01411.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cqzhao918/AI_ASL_Translator/429a795a6b587d489a55a0451ae270040aa079e4/data/raw/01411.mp4 -------------------------------------------------------------------------------- /images/api-list.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cqzhao918/AI_ASL_Translator/429a795a6b587d489a55a0451ae270040aa079e4/images/api-list.png -------------------------------------------------------------------------------- /images/demo.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cqzhao918/AI_ASL_Translator/429a795a6b587d489a55a0451ae270040aa079e4/images/demo.gif -------------------------------------------------------------------------------- /images/deployment-single-vm.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cqzhao918/AI_ASL_Translator/429a795a6b587d489a55a0451ae270040aa079e4/images/deployment-single-vm.png -------------------------------------------------------------------------------- /images/frontend-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cqzhao918/AI_ASL_Translator/429a795a6b587d489a55a0451ae270040aa079e4/images/frontend-1.png -------------------------------------------------------------------------------- /images/frontend-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cqzhao918/AI_ASL_Translator/429a795a6b587d489a55a0451ae270040aa079e4/images/frontend-2.png -------------------------------------------------------------------------------- /images/solution-arch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cqzhao918/AI_ASL_Translator/429a795a6b587d489a55a0451ae270040aa079e4/images/solution-arch.png -------------------------------------------------------------------------------- /images/technical-arch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cqzhao918/AI_ASL_Translator/429a795a6b587d489a55a0451ae270040aa079e4/images/technical-arch.png -------------------------------------------------------------------------------- /model/Dockerfile: -------------------------------------------------------------------------------- 1 | # Use the official Debian-hosted Python image 2 | FROM python:3.9-slim-buster 3 | 4 | ARG DEBIAN_PACKAGES="build-essential git curl wget unzip gzip" 5 | 6 | # Prevent apt from showing prompts 7 | ENV DEBIAN_FRONTEND=noninteractive 8 | 9 | # Python wants UTF-8 locale 10 | ENV LANG=C.UTF-8 11 | 12 | # Tell pipenv where the shell is. 13 | # This allows us to use "pipenv shell" as a container entry point. 14 | ENV PYENV_SHELL=/bin/bash 15 | 16 | # Tell Python to disable buffering so we don't lose any logs. 17 | ENV PYTHONUNBUFFERED=1 18 | 19 | ENV GOOGLE_APPLICATION_CREDENTIALS=secrets/data-pipeline.json 20 | 21 | # Ensure we have an up to date baseline, install dependencies 22 | RUN set -ex; \ 23 | for i in $(seq 1 8); do mkdir -p "/usr/share/man/man${i}"; done && \ 24 | apt-get update && \ 25 | apt-get upgrade -y && \ 26 | apt-get install -y --no-install-recommends $DEBIAN_PACKAGES && \ 27 | apt-get install -y --no-install-recommends software-properties-common apt-transport-https ca-certificates gnupg2 gnupg-agent curl openssh-client && \ 28 | curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - && \ 29 | echo "deb [signed-by=/usr/share/keyrings/cloud.google.gpg] https://packages.cloud.google.com/apt cloud-sdk main" | tee -a /etc/apt/sources.list.d/google-cloud-sdk.list && \ 30 | curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key --keyring /usr/share/keyrings/cloud.google.gpg add - && \ 31 | apt-get update && \ 32 | apt-get install -y --no-install-recommends google-cloud-sdk && \ 33 | apt-get clean && \ 34 | rm -rf /var/lib/apt/lists/* && \ 35 | pip install --no-cache-dir --upgrade pip && \ 36 | pip install pipenv && \ 37 | useradd -ms /bin/bash app -d /home/app -u 1000 -p "$(openssl passwd -1 Passw0rd)" && \ 38 | mkdir -p /app && \ 39 | chown app:app /app 40 | 41 | # Switch to the new user 42 | USER app 43 | WORKDIR /app 44 | 45 | # # Set the working directory to /preprocessing 46 | RUN pipenv lock 47 | 48 | # Add the Pipfile, Pipfile.lock, and python code into the container 49 | ADD . / 50 | 51 | RUN pipenv sync 52 | 53 | # Make the entrypoint.sh script executable 54 | # RUN chmod +x /bin/bash/entrypoint.sh 55 | 56 | # # Set the entrypoint 57 | # ENTRYPOINT ["/bin/bash"] 58 | 59 | # # Specify the entrypoint script as the CMD 60 | # CMD ["entrypoint.sh"] 61 | # CMD ["-c", "pipenv shell"] 62 | ENTRYPOINT ["/bin/bash","./entrypoint.sh"] -------------------------------------------------------------------------------- /model/Pipfile: -------------------------------------------------------------------------------- 1 | [[source]] 2 | name = "pypi" 3 | url = "https://pypi.org/simple" 4 | verify_ssl = true 5 | 6 | [dev-packages] 7 | 8 | [packages] 9 | google-cloud-aiplatform = "*" 10 | 11 | [requires] 12 | python_version = "3.9" -------------------------------------------------------------------------------- /model/capy-trainer.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cqzhao918/AI_ASL_Translator/429a795a6b587d489a55a0451ae270040aa079e4/model/capy-trainer.tar.gz -------------------------------------------------------------------------------- /model/cli.sh: -------------------------------------------------------------------------------- 1 | # List of prebuilt containers for training 2 | # https://cloud.google.com/vertex-ai/docs/training/pre-built-containers 3 | 4 | export GCS_BUCKET_URI="gs://capy-data/model" 5 | export UUID=$(openssl rand -hex 6) 6 | export DISPLAY_NAME="capy_training_job_$UUID" 7 | export MACHINE_TYPE="n1-standard-4" 8 | export REPLICA_COUNT=1 9 | # export EXECUTOR_IMAGE_URI="us-docker.pkg.dev/vertex-ai/training/tf-gpu.2-12.py310:latest" 10 | export PYTHON_PACKAGE_URI=$GCS_BUCKET_URI/capy-trainer.tar.gz 11 | export PYTHON_MODULE="capy-trainer.task" 12 | # export ACCELERATOR_TYPE="NVIDIA_TESLA_T4" 13 | # export ACCELERATOR_COUNT=1 14 | export GCP_REGION="us-central1" # Adjust region based on you approved quotas for GPUs 15 | export WANDB_KEY="6a862c7a22f68c00ceb59a5daf60d10ae341fb94" 16 | 17 | export CMDARGS="--wandb_key=$WANDB_KEY" 18 | #export CMDARGS="--model_name=mobilenetv2,--train_base,--epochs=30,--batch_size=32,--wandb_key=$WANDB_KEY" 19 | #export CMDARGS="--model_name=tfhub_mobilenetv2,--epochs=30,--batch_size=32,--wandb_key=$WANDB_KEY" 20 | #export CMDARGS="--model_name=tfhub_mobilenetv2,--train_base,--epochs=30,--batch_size=32,--wandb_key=$WANDB_KEY" 21 | 22 | # gcloud ai custom-jobs create \ 23 | # --region=$GCP_REGION \ 24 | # --display-name=$DISPLAY_NAME \ 25 | # --python-package-uris=$PYTHON_PACKAGE_URI \ 26 | # --worker-pool-spec=machine-type=$MACHINE_TYPE,replica-count=$REPLICA_COUNT,accelerator-type=$ACCELERATOR_TYPE,accelerator-count=$ACCELERATOR_COUNT,executor-image-uri=$EXECUTOR_IMAGE_URI,python-module=$PYTHON_MODULE \ 27 | # --args=$CMDARGS 28 | 29 | 30 | # Run training with No GPU 31 | export EXECUTOR_IMAGE_URI="us-docker.pkg.dev/vertex-ai/training/tf-cpu.2-12.py310:latest" 32 | # gcloud ai custom-jobs create \ 33 | # --region=$GCP_REGION \ 34 | # --display-name=$DISPLAY_NAME \ 35 | # --python-package-uris=$PYTHON_PACKAGE_URI \ 36 | # --worker-pool-spec=machine-type=$MACHINE_TYPE,replica-count=$REPLICA_COUNT,executor-image-uri=$EXECUTOR_IMAGE_URI,python-module=$PYTHON_MODULE \ 37 | # --args=$CMDARGS 38 | 39 | gcloud ai custom-jobs create \ 40 | --region=$GCP_REGION \ 41 | --display-name=$DISPLAY_NAME \ 42 | --python-package-uris=$PYTHON_PACKAGE_URI \ 43 | --worker-pool-spec=machine-type=$MACHINE_TYPE,replica-count=$REPLICA_COUNT,executor-image-uri=$EXECUTOR_IMAGE_URI,python-module=$PYTHON_MODULE \ 44 | --args=$CMDARGS -------------------------------------------------------------------------------- /model/docker-shell.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | export IMAGE_NAME="capy-model" 6 | export BASE_DIR=$(pwd) 7 | export SECRETS_DIR=$(pwd)/secrets/ 8 | export GCS_BUCKET_URI="gs://capy-data" 9 | export GCP_PROJECT="psychic-bedrock-398320" 10 | export WANDB_KEY="6a862c7a22f68c00ceb59a5daf60d10ae341fb94" 11 | 12 | 13 | # Build the image based on the Dockerfile 14 | docker build -t $IMAGE_NAME -f Dockerfile . 15 | # M1/2 chip macs use this line 16 | # docker build -t $IMAGE_NAME --platform=linux/arm64/v8 -f Dockerfile . 17 | 18 | # Run Container 19 | docker run --rm --name $IMAGE_NAME -ti \ 20 | -v "$BASE_DIR":/app \ 21 | -v "$SECRETS_DIR":/secrets \ 22 | -e GOOGLE_APPLICATION_CREDENTIALS="/secrets/data-pipeline.json" \ 23 | -e GCP_PROJECT=$GCP_PROJECT \ 24 | -e GCS_BUCKET_URI=$GCS_BUCKET_URI \ 25 | -e WANDB_KEY=$WANDB_KEY \ 26 | $IMAGE_NAME -------------------------------------------------------------------------------- /model/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo "Container is running!!!" 4 | # Activate the Pipenv virtual environment and install dependencies 5 | # pipenv run pip install -r requirements.txt 6 | 7 | # Authenticate gcloud using service account 8 | gcloud auth activate-service-account --key-file=secrets/data-pipeline.json 9 | 10 | # Set GCP Project Details 11 | gcloud config set project $GCP_PROJECT 12 | 13 | # Run the preprocess.py script 14 | # pipenv run python model.py 15 | # pipenv run bash package-trainer.sh 16 | 17 | pipenv shell -------------------------------------------------------------------------------- /model/package-trainer.sh: -------------------------------------------------------------------------------- 1 | rm -f capy-trainer.tar capy-trainer.tar.gz 2 | tar cvf capy-trainer.tar package 3 | gzip capy-trainer.tar 4 | gsutil cp capy-trainer.tar.gz gs://capy-data/model/capy-trainer.tar.gz -------------------------------------------------------------------------------- /model/package/PKG-INFO: -------------------------------------------------------------------------------- 1 | Metadata-Version: 1.0 2 | 3 | Name: Mushroom App Trainer 4 | 5 | Version: 0.0.1 6 | 7 | License: Public 8 | 9 | Description: Demo 10 | 11 | Platform: Vertex -------------------------------------------------------------------------------- /model/package/capy-trainer/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cqzhao918/AI_ASL_Translator/429a795a6b587d489a55a0451ae270040aa079e4/model/package/capy-trainer/__init__.py -------------------------------------------------------------------------------- /model/package/capy-trainer/task.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import os 3 | import tqdm 4 | import numpy as np 5 | import json 6 | import tensorflow as tf 7 | import gc 8 | from os import mkdir 9 | import argparse 10 | 11 | import math 12 | import pickle 13 | 14 | # import matplotlib.pyplot as plt 15 | from keras import layers, models 16 | 17 | # import tensorflow_addons as tfa 18 | from google.cloud import storage 19 | import io 20 | import os 21 | from tensorflow.python.lib.io import file_io 22 | 23 | import wandb 24 | from wandb.keras import WandbCallback, WandbMetricsLogger 25 | 26 | parser = argparse.ArgumentParser() 27 | parser.add_argument( 28 | "--wandb_key", dest="wandb_key", default="16", type=str, help="WandB API Key" 29 | ) 30 | args = parser.parse_args() 31 | 32 | wandb.login(key=args.wandb_key) 33 | 34 | client = storage.Client() 35 | bucket = client.bucket("capy-data") 36 | 37 | # blobs = bucket.list_blobs(prefix='data/preprocessed_dfs/preprocessed_dfs/') 38 | 39 | # import wandb 40 | # wandb.login() 41 | 42 | FRAME_LEN = 128 * 2 43 | # Read character to prediction index 44 | blob_json = bucket.blob( 45 | "data/preprocessed_dfs/preprocessed_dfs/character_to_prediction_index.json" 46 | ) 47 | with blob_json.open("r") as f: 48 | char_to_num = json.loads(f.read()) 49 | 50 | pad_token = "^" 51 | pad_token_idx = 59 52 | 53 | char_to_num[pad_token] = pad_token_idx 54 | num_to_char = {j: i for i, j in char_to_num.items()} 55 | INPUT_SHAPE = [256, 276] 56 | 57 | 58 | def load_npy_data(npy_path, npy_file): 59 | blob_df = bucket.blob(os.path.join(npy_path, npy_file)) 60 | npy_data = blob_df.download_as_bytes() 61 | loaded_array = np.load(io.BytesIO(npy_data), allow_pickle=True) 62 | return loaded_array 63 | 64 | 65 | npyPath = "data/preprocessed_dfs/preprocessed_dfs/mean_std" 66 | RHM = load_npy_data(npyPath, "rh_mean.npy") 67 | LHM = load_npy_data(npyPath, "lh_mean.npy") 68 | RPM = load_npy_data(npyPath, "rp_mean.npy") 69 | LPM = load_npy_data(npyPath, "lp_mean.npy") 70 | FACEM = load_npy_data(npyPath, "face_mean.npy") 71 | 72 | RHS = load_npy_data(npyPath, "rh_std.npy") 73 | LHS = load_npy_data(npyPath, "lh_std.npy") 74 | RPS = load_npy_data(npyPath, "rp_std.npy") 75 | LPS = load_npy_data(npyPath, "lp_std.npy") 76 | FACES = load_npy_data(npyPath, "face_std.npy") 77 | 78 | 79 | @tf.function() 80 | def resize_pad(x): 81 | if tf.shape(x)[0] < FRAME_LEN: 82 | x = tf.pad( 83 | x, 84 | ([[0, FRAME_LEN - tf.shape(x)[0]], [0, 0], [0, 0]]), 85 | constant_values=float("NaN"), 86 | ) 87 | else: 88 | x = tf.image.resize(x, (FRAME_LEN, tf.shape(x)[1])) 89 | return x 90 | 91 | 92 | @tf.function() 93 | def pre_process1(face, rhand, lhand, rpose, lpose): 94 | print(type(face), face.shape) 95 | face = (resize_pad(face) - FACEM) / FACES 96 | rhand = (resize_pad(rhand) - RHM) / RHS 97 | lhand = (resize_pad(lhand) - LHM) / LHS 98 | rpose = (resize_pad(rpose) - RPM) / RPS 99 | lpose = (resize_pad(lpose) - LPM) / LPS 100 | 101 | x = tf.concat([face, rhand, lhand, rpose, lpose], axis=1) 102 | s = tf.shape(x) 103 | x = tf.reshape(x, (s[0], s[1] * s[2])) 104 | x = tf.where(tf.math.is_nan(x), 0.0, x) 105 | return x 106 | 107 | 108 | # Deconde tfrecords 109 | def decode_fn(record_bytes): 110 | schema = { 111 | "face": tf.io.VarLenFeature(tf.float32), 112 | "rhand": tf.io.VarLenFeature(tf.float32), 113 | "lhand": tf.io.VarLenFeature(tf.float32), 114 | "rpose": tf.io.VarLenFeature(tf.float32), 115 | "lpose": tf.io.VarLenFeature(tf.float32), 116 | "phrase": tf.io.VarLenFeature(tf.int64), 117 | } 118 | x = tf.io.parse_single_example(record_bytes, schema) 119 | 120 | face = tf.reshape(tf.sparse.to_dense(x["face"]), (-1, 40, 3)) 121 | rhand = tf.reshape(tf.sparse.to_dense(x["rhand"]), (-1, 21, 3)) 122 | lhand = tf.reshape(tf.sparse.to_dense(x["lhand"]), (-1, 21, 3)) 123 | rpose = tf.reshape(tf.sparse.to_dense(x["rpose"]), (-1, 5, 3)) 124 | lpose = tf.reshape(tf.sparse.to_dense(x["lpose"]), (-1, 5, 3)) 125 | phrase = tf.sparse.to_dense(x["phrase"]) 126 | 127 | return face, rhand, lhand, rpose, lpose, phrase 128 | 129 | 130 | def pre_process_fn(lip, rhand, lhand, rpose, lpose, phrase): 131 | phrase = tf.pad( 132 | phrase, 133 | [[0, MAX_PHRASE_LENGTH - tf.shape(phrase)[0]]], 134 | constant_values=pad_token_idx, 135 | ) 136 | return pre_process1(lip, rhand, lhand, rpose, lpose), phrase 137 | 138 | 139 | MAX_PHRASE_LENGTH = 500 140 | tffiles_dir = [ 141 | file.name 142 | for file in bucket.list_blobs( 143 | prefix="data/preprocessed_dfs/preprocessed_dfs/test_tfrecords" 144 | ) 145 | ] 146 | tffiles = [ 147 | os.path.join("gs://capy-data", tffile) 148 | for tffile in tffiles_dir 149 | if ".tfrecord" in tffile 150 | ] 151 | print("path", tffiles[:2]) 152 | # tffiles = [f"C:/Users/chuqi/ac215/capy_data_test/test_tfds/{file_id}.tfrecord" for file_id in os.listdir('C:/Users/chuqi/ac215/capy_data_test/test_npy')] 153 | val_len = 1 154 | train_batch_size = 32 155 | val_batch_size = 32 156 | # 157 | # tffiles = ['gs://capy-data/data/preprocessed_dfs/preprocessed_dfs/test_tfrecords/preprocessed__fZbAxSSbX4_1-5-rgb_front.npy.tfrecord', 158 | # 'gs://capy-data/data/preprocessed_dfs/preprocessed_dfs/test_tfrecords/preprocessed__fZbAxSSbX4_2-5-rgb_front.npy.tfrecord'] 159 | 160 | train_dataset = ( 161 | tf.data.TFRecordDataset(tffiles[val_len:]) 162 | .prefetch(tf.data.AUTOTUNE) 163 | .shuffle(5000) 164 | .map(decode_fn, num_parallel_calls=tf.data.AUTOTUNE) 165 | .map(pre_process_fn, num_parallel_calls=tf.data.AUTOTUNE) 166 | .batch(train_batch_size) 167 | .prefetch(tf.data.AUTOTUNE) 168 | ) 169 | val_dataset = ( 170 | tf.data.TFRecordDataset(tffiles[:val_len]) 171 | .prefetch(tf.data.AUTOTUNE) 172 | .map(decode_fn, num_parallel_calls=tf.data.AUTOTUNE) 173 | .map(pre_process_fn, num_parallel_calls=tf.data.AUTOTUNE) 174 | .batch(val_batch_size) 175 | .prefetch(tf.data.AUTOTUNE) 176 | ) 177 | 178 | 179 | print("train:", train_dataset) 180 | print("train type:", type(train_dataset)) 181 | 182 | print("val:", val_dataset) 183 | print("val type:", type(val_dataset)) 184 | 185 | val = next(iter(val_dataset)) 186 | print(val[0].shape) 187 | 188 | train = next(iter(train_dataset)) 189 | print(train[0].shape) 190 | 191 | 192 | #%%Build model 193 | class ECA(tf.keras.layers.Layer): 194 | def __init__(self, kernel_size=5, **kwargs): 195 | super().__init__(**kwargs) 196 | self.supports_masking = True 197 | self.kernel_size = kernel_size 198 | self.conv = tf.keras.layers.Conv1D( 199 | 1, kernel_size=kernel_size, strides=1, padding="same", use_bias=False 200 | ) 201 | 202 | def call(self, inputs, mask=None): 203 | nn = tf.keras.layers.GlobalAveragePooling1D()(inputs, mask=mask) 204 | nn = tf.expand_dims(nn, -1) 205 | nn = self.conv(nn) 206 | nn = tf.squeeze(nn, -1) 207 | nn = tf.nn.sigmoid(nn) 208 | nn = nn[:, None, :] 209 | return inputs * nn 210 | 211 | 212 | class CausalDWConv1D(tf.keras.layers.Layer): 213 | def __init__( 214 | self, 215 | kernel_size=17, 216 | dilation_rate=1, 217 | use_bias=False, 218 | depthwise_initializer="glorot_uniform", 219 | name="", 220 | **kwargs, 221 | ): 222 | super().__init__(name=name, **kwargs) 223 | self.causal_pad = tf.keras.layers.ZeroPadding1D( 224 | (dilation_rate * (kernel_size - 1), 0), name=name + "_pad" 225 | ) 226 | self.dw_conv = tf.keras.layers.DepthwiseConv1D( 227 | kernel_size, 228 | strides=1, 229 | dilation_rate=dilation_rate, 230 | padding="valid", 231 | use_bias=use_bias, 232 | depthwise_initializer=depthwise_initializer, 233 | name=name + "_dwconv", 234 | ) 235 | self.supports_masking = True 236 | 237 | def call(self, inputs): 238 | x = self.causal_pad(inputs) 239 | x = self.dw_conv(x) 240 | return x 241 | 242 | 243 | def Conv1DBlock( 244 | channel_size, 245 | kernel_size, 246 | dilation_rate=1, 247 | drop_rate=0.0, 248 | expand_ratio=2, 249 | se_ratio=0.25, 250 | activation="swish", 251 | name=None, 252 | ): 253 | """ 254 | efficient conv1d block, @hoyso48 255 | """ 256 | if name is None: 257 | name = str(tf.keras.backend.get_uid("mbblock")) 258 | # Expansion phase 259 | def apply(inputs): 260 | channels_in = tf.keras.backend.int_shape(inputs)[-1] 261 | channels_expand = channels_in * expand_ratio 262 | 263 | skip = inputs 264 | 265 | x = tf.keras.layers.Dense( 266 | channels_expand, 267 | use_bias=True, 268 | activation=activation, 269 | name=name + "_expand_conv", 270 | )(inputs) 271 | 272 | # Depthwise Convolution 273 | x = CausalDWConv1D( 274 | kernel_size, 275 | dilation_rate=dilation_rate, 276 | use_bias=False, 277 | name=name + "_dwconv", 278 | )(x) 279 | 280 | x = tf.keras.layers.BatchNormalization(momentum=0.95, name=name + "_bn")(x) 281 | 282 | x = ECA()(x) 283 | 284 | x = tf.keras.layers.Dense( 285 | channel_size, use_bias=True, name=name + "_project_conv" 286 | )(x) 287 | 288 | if drop_rate > 0: 289 | x = tf.keras.layers.Dropout( 290 | drop_rate, noise_shape=(None, 1, 1), name=name + "_drop" 291 | )(x) 292 | 293 | if channels_in == channel_size: 294 | x = tf.keras.layers.add([x, skip], name=name + "_add") 295 | return x 296 | 297 | return apply 298 | 299 | 300 | class MultiHeadSelfAttention(tf.keras.layers.Layer): 301 | def __init__(self, dim=256, num_heads=4, dropout=0, **kwargs): 302 | super().__init__(**kwargs) 303 | self.dim = dim 304 | self.scale = self.dim**-0.5 305 | self.num_heads = num_heads 306 | self.qkv = tf.keras.layers.Dense(3 * dim, use_bias=False) 307 | self.drop1 = tf.keras.layers.Dropout(dropout) 308 | self.proj = tf.keras.layers.Dense(dim, use_bias=False) 309 | self.supports_masking = True 310 | 311 | def call(self, inputs, mask=None): 312 | qkv = self.qkv(inputs) 313 | qkv = tf.keras.layers.Permute((2, 1, 3))( 314 | tf.keras.layers.Reshape( 315 | (-1, self.num_heads, self.dim * 3 // self.num_heads) 316 | )(qkv) 317 | ) 318 | q, k, v = tf.split(qkv, [self.dim // self.num_heads] * 3, axis=-1) 319 | 320 | attn = tf.matmul(q, k, transpose_b=True) * self.scale 321 | 322 | if mask is not None: 323 | mask = mask[:, None, None, :] 324 | 325 | attn = tf.keras.layers.Softmax(axis=-1)(attn, mask=mask) 326 | attn = self.drop1(attn) 327 | 328 | x = attn @ v 329 | x = tf.keras.layers.Reshape((-1, self.dim))( 330 | tf.keras.layers.Permute((2, 1, 3))(x) 331 | ) 332 | x = self.proj(x) 333 | return x 334 | 335 | 336 | def TransformerBlock( 337 | dim=256, num_heads=6, expand=4, attn_dropout=0.2, drop_rate=0.2, activation="swish" 338 | ): 339 | def apply(inputs): 340 | x = inputs 341 | x = tf.keras.layers.LayerNormalization(epsilon=1e-6)(x) 342 | x = MultiHeadSelfAttention(dim=dim, num_heads=num_heads, dropout=attn_dropout)( 343 | x 344 | ) 345 | x = tf.keras.layers.Dropout(drop_rate, noise_shape=(None, 1, 1))(x) 346 | x = tf.keras.layers.Add()([inputs, x]) 347 | attn_out = x 348 | 349 | x = tf.keras.layers.LayerNormalization(epsilon=1e-6)(x) 350 | x = tf.keras.layers.Dense(dim * expand, use_bias=False, activation=activation)( 351 | x 352 | ) 353 | x = tf.keras.layers.Dense(dim, use_bias=False)(x) 354 | x = tf.keras.layers.Dropout(drop_rate, noise_shape=(None, 1, 1))(x) 355 | x = tf.keras.layers.Add()([attn_out, x]) 356 | return x 357 | 358 | return apply 359 | 360 | 361 | def positional_encoding(maxlen, num_hid): 362 | depth = num_hid / 2 363 | positions = tf.range(maxlen, dtype=tf.float32)[..., tf.newaxis] 364 | depths = tf.range(depth, dtype=tf.float32)[np.newaxis, :] / depth 365 | angle_rates = tf.math.divide(1, tf.math.pow(tf.cast(10000, tf.float32), depths)) 366 | angle_rads = tf.linalg.matmul(positions, angle_rates) 367 | pos_encoding = tf.concat( 368 | [tf.math.sin(angle_rads), tf.math.cos(angle_rads)], axis=-1 369 | ) 370 | return pos_encoding 371 | 372 | 373 | def positional_encoding2(maxlen, num_hid): 374 | depth = num_hid / 2 375 | positions = tf.range(maxlen, dtype=tf.float32)[..., tf.newaxis] 376 | depths = tf.range(depth, dtype=tf.float32)[np.newaxis, :] / depth 377 | angle_rates = tf.math.divide(1, tf.math.pow(tf.cast(10000, tf.float32), depths)) 378 | angle_rads = tf.linalg.matmul(positions, angle_rates) 379 | pos_encoding = np.zeros((maxlen, num_hid)) 380 | pos_encoding[:, 0::2] = np.sin(angle_rads) 381 | pos_encoding[:, 1::2] = np.cos(angle_rads) 382 | return pos_encoding 383 | 384 | 385 | # %% Build Loss function 386 | def CTCLoss(labels, logits): 387 | label_length = tf.reduce_sum(tf.cast(labels != pad_token_idx, tf.int32), axis=-1) 388 | logit_length = tf.ones(tf.shape(logits)[0], dtype=tf.int32) * tf.shape(logits)[1] 389 | loss = tf.nn.ctc_loss( 390 | labels=labels, 391 | logits=logits, 392 | label_length=label_length, 393 | logit_length=logit_length, 394 | blank_index=pad_token_idx, 395 | logits_time_major=False, 396 | ) 397 | loss = tf.reduce_mean(loss) 398 | return loss 399 | 400 | 401 | def get_model(dim=384): 402 | inp = tf.keras.Input(INPUT_SHAPE) 403 | x = tf.keras.layers.Masking(mask_value=0.0)(inp) 404 | x = tf.keras.layers.Dense(dim, use_bias=False, name="stem_conv")(x) 405 | pe = tf.cast(positional_encoding(INPUT_SHAPE[0], dim), dtype=x.dtype) 406 | x = x + pe 407 | x = tf.keras.layers.BatchNormalization(momentum=0.95, name="stem_bn")(x) 408 | num_blocks = 6 409 | drop_rate = 0.4 410 | for i in range(num_blocks): 411 | x = Conv1DBlock(dim, 11, drop_rate=drop_rate)(x) 412 | x = Conv1DBlock(dim, 5, drop_rate=drop_rate)(x) 413 | x = Conv1DBlock(dim, 3, drop_rate=drop_rate)(x) 414 | x = TransformerBlock(dim, expand=2)(x) 415 | 416 | x = tf.keras.layers.Dense(dim * 2, activation="relu", name="top_conv")(x) 417 | x = tf.keras.layers.Dropout(0.4)(x) 418 | # x = LateDropout(0.7)(x) 419 | x = tf.keras.layers.Dense(len(char_to_num), name="classifier")(x) 420 | 421 | model = tf.keras.Model(inp, x) 422 | 423 | loss = CTCLoss 424 | 425 | # Adam Optimizer 426 | optimizer = tf.keras.optimizers.Adam(learning_rate=0.001) 427 | # optimizer = tf.optimizers.RectifiedAdam(sma_threshold=4) 428 | # optimizer = tfa.optimizers.Lookahead(optimizer, sync_period=5) 429 | 430 | model.compile(loss=loss, optimizer=optimizer) 431 | 432 | return model 433 | 434 | 435 | tf.keras.backend.clear_session() 436 | model = get_model() 437 | # model(batch[0]) 438 | 439 | 440 | def num_to_char_fn(y): 441 | return [num_to_char.get(x, "") for x in y] 442 | 443 | 444 | @tf.function() 445 | def decode_phrase(pred): 446 | x = tf.argmax(pred, axis=1) 447 | diff = tf.not_equal(x[:-1], x[1:]) 448 | adjacent_indices = tf.where(diff)[:, 0] 449 | x = tf.gather(x, adjacent_indices) 450 | mask = x != pad_token_idx 451 | x = tf.boolean_mask(x, mask, axis=0) 452 | return x 453 | 454 | 455 | # A utility function to decode the output of the network 456 | def decode_batch_predictions(pred): 457 | output_text = [] 458 | for result in pred: 459 | result = "".join(num_to_char_fn(decode_phrase(result).numpy())) 460 | output_text.append(result) 461 | return output_text 462 | 463 | 464 | # A callback class to output a few transcriptions during training 465 | class CallbackEval(tf.keras.callbacks.Callback): 466 | """Displays a batch of outputs after every epoch.""" 467 | 468 | def __init__(self, dataset): 469 | super().__init__() 470 | self.dataset = dataset 471 | 472 | def on_epoch_end(self, epoch: int, logs=None): 473 | # model.save_weights("model.keras") 474 | predictions = [] 475 | targets = [] 476 | for batch in self.dataset: 477 | X, y = batch 478 | batch_predictions = model(X) 479 | batch_predictions = decode_batch_predictions(batch_predictions) 480 | predictions.extend(batch_predictions) 481 | for label in y: 482 | label = "".join(num_to_char_fn(label.numpy())) 483 | targets.append(label) 484 | print("-" * 100) 485 | # for i in np.random.randint(0, len(predictions), 2): 486 | print(f"Target : {targets}") 487 | print(f"Prediction: {predictions}, len: {len(predictions)}") 488 | print("-" * 100) 489 | 490 | 491 | # for i in range(32): 492 | # print(f"Target : {targets[i]}") 493 | # print(f"Prediction: {predictions[i]}, len: {len(predictions[i])}") 494 | # print("-" * 100) 495 | 496 | # Callback function to check transcription on the val set. 497 | validation_callback = CallbackEval(val_dataset.take(1)) 498 | 499 | 500 | N_EPOCHS = 51 501 | N_WARMUP_EPOCHS = 10 502 | LR_MAX = 1e-3 503 | WD_RATIO = 0.05 504 | WARMUP_METHOD = "exp" 505 | 506 | 507 | def lrfn( 508 | current_step, num_warmup_steps, lr_max, num_cycles=0.50, num_training_steps=N_EPOCHS 509 | ): 510 | 511 | if current_step < num_warmup_steps: 512 | if WARMUP_METHOD == "log": 513 | return lr_max * 0.10 ** (num_warmup_steps - current_step) 514 | else: 515 | return lr_max * 2 ** -(num_warmup_steps - current_step) 516 | else: 517 | progress = float(current_step - num_warmup_steps) / float( 518 | max(1, num_training_steps - num_warmup_steps) 519 | ) 520 | 521 | return ( 522 | max( 523 | 0.0, 524 | 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress)), 525 | ) 526 | * lr_max 527 | ) 528 | 529 | 530 | # Learning rate for encoder 531 | LR_SCHEDULE = [ 532 | lrfn(step, num_warmup_steps=N_WARMUP_EPOCHS, lr_max=LR_MAX, num_cycles=0.50) 533 | for step in range(N_EPOCHS) 534 | ] 535 | # Learning Rate Callback 536 | lr_callback = tf.keras.callbacks.LearningRateScheduler( 537 | lambda step: LR_SCHEDULE[step], verbose=0 538 | ) 539 | 540 | # Custom callback to update weight decay with learning rate 541 | class WeightDecayCallback(tf.keras.callbacks.Callback): 542 | def __init__(self, wd_ratio=WD_RATIO): 543 | self.step_counter = 0 544 | self.wd_ratio = wd_ratio 545 | 546 | def on_epoch_begin(self, epoch, logs=None): 547 | model.optimizer.weight_decay = model.optimizer.learning_rate * self.wd_ratio 548 | print( 549 | f"learning rate: {model.optimizer.learning_rate.numpy():.2e}, weight decay: {model.optimizer.weight_decay.numpy():.2e}" 550 | ) 551 | 552 | 553 | checkpoint_callback = tf.keras.callbacks.ModelCheckpoint( 554 | filepath="code/baseline2/out2/model.{epoch:05d}.keras", 555 | save_weights_only=True, 556 | period=50, 557 | ) 558 | 559 | 560 | # load weight 561 | weight_path = ( 562 | "gs://capy-data/data/preprocessed_dfs/preprocessed_dfs/model_weights/model.h5" 563 | ) 564 | print( 565 | "model weight", 566 | [ 567 | file.name 568 | for file in bucket.list_blobs( 569 | prefix="data/preprocessed_dfs/preprocessed_dfs/model_weights" 570 | ) 571 | ], 572 | ) 573 | blob = bucket.blob("data/preprocessed_dfs/preprocessed_dfs/model_weights/model.h5") 574 | # model.load_weights(weight_path, by_name=False) 575 | 576 | # Training 577 | history = model.fit( 578 | train_dataset, 579 | validation_data=val_dataset, 580 | # epochs=N_EPOCHS, 581 | epochs=1, 582 | callbacks=[ 583 | checkpoint_callback, 584 | validation_callback, 585 | lr_callback, 586 | WeightDecayCallback(), 587 | ], 588 | ) 589 | model.save("basemodel.keras") 590 | 591 | # Initialize a W&B run 592 | wandb.init( 593 | project="capy-train", 594 | config={ 595 | "learning_rate": LR_MAX, 596 | "epochs": 1, 597 | "batch_size": 32, 598 | "model_name": model.name, 599 | }, 600 | name=model.name, 601 | ) 602 | 603 | # Wandb training 604 | training_results = model.fit( 605 | train_dataset, 606 | validation_data=val_dataset, 607 | epochs=5, 608 | callbacks=[WandbMetricsLogger()], 609 | # callbacks = [WandbMetricsLogger(log_freq=1)], 610 | verbose=1, 611 | ) 612 | 613 | wandb.run.finish() 614 | -------------------------------------------------------------------------------- /model/package/setup.cfg: -------------------------------------------------------------------------------- 1 | [egg_info] 2 | 3 | tag_build = 4 | 5 | tag_date = 0 -------------------------------------------------------------------------------- /model/package/setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import find_packages 2 | from setuptools import setup 3 | 4 | 5 | REQUIRED_PACKAGES = [ 6 | "wandb", 7 | "tensorflow==2.13.0", 8 | "pandas", 9 | "numpy", 10 | "tqdm", 11 | "python-json-logger", 12 | ] 13 | 14 | setup( 15 | name="capy-trainer", 16 | version="0.0.1", 17 | install_requires=REQUIRED_PACKAGES, 18 | packages=find_packages(), 19 | description="Capy Trainer Application", 20 | ) 21 | -------------------------------------------------------------------------------- /model/requirements.txt: -------------------------------------------------------------------------------- 1 | pandas 2 | numpy 3 | tqdm 4 | tensorflow==2.13.0 5 | wandb 6 | google-cloud-storage 7 | wandb -------------------------------------------------------------------------------- /models/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cqzhao918/AI_ASL_Translator/429a795a6b587d489a55a0451ae270040aa079e4/models/.gitkeep -------------------------------------------------------------------------------- /notebooks/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cqzhao918/AI_ASL_Translator/429a795a6b587d489a55a0451ae270040aa079e4/notebooks/.gitkeep -------------------------------------------------------------------------------- /notebooks/eda.ipynb: -------------------------------------------------------------------------------- 1 | # Blank file showing how and where you might use a notebook in your project. 2 | -------------------------------------------------------------------------------- /preprocessing/gcp-upload.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 9, 6 | "id": "d67e69df", 7 | "metadata": {}, 8 | "outputs": [ 9 | { 10 | "name": "stderr", 11 | "output_type": "stream", 12 | "text": [ 13 | "100%|██████████████████████████████████████████████████████████████████████████████| 2811/2811 [47:28<00:00, 1.01s/it]\n" 14 | ] 15 | } 16 | ], 17 | "source": [ 18 | "from google.cloud import storage\n", 19 | "import os\n", 20 | "import glob\n", 21 | "import tqdm\n", 22 | "\n", 23 | "\n", 24 | "# GCS bucket information\n", 25 | "gcs_bucket_name = 'capy-data'\n", 26 | "gcs_prefix = 'data/WLASL-data/wlasl_parquet_update/'\n", 27 | "\n", 28 | "storage_client = storage.Client.from_service_account_json(\"./psychic-bedrock-398320-e41cc1b33701.json\")\n", 29 | "bucket = storage_client.get_bucket(\"capy-data\")\n", 30 | "\n", 31 | "# Upload the TFRecord file to the GCS bucket\n", 32 | "\n", 33 | "for input_file in tqdm.tqdm(glob.glob(os.path.join(\"wlasl/wlasl_parquet_update\", '*.parquet'))):\n", 34 | "# print(input_file)\n", 35 | " gcs_object_name = os.path.join(gcs_prefix, os.path.basename(input_file))\n", 36 | " blob = bucket.blob(gcs_object_name)\n", 37 | " blob.upload_from_filename(input_file)\n" 38 | ] 39 | }, 40 | { 41 | "cell_type": "code", 42 | "execution_count": null, 43 | "id": "c1e2411e", 44 | "metadata": {}, 45 | "outputs": [], 46 | "source": [] 47 | } 48 | ], 49 | "metadata": { 50 | "kernelspec": { 51 | "display_name": "Python 3 (ipykernel)", 52 | "language": "python", 53 | "name": "python3" 54 | }, 55 | "language_info": { 56 | "codemirror_mode": { 57 | "name": "ipython", 58 | "version": 3 59 | }, 60 | "file_extension": ".py", 61 | "mimetype": "text/x-python", 62 | "name": "python", 63 | "nbconvert_exporter": "python", 64 | "pygments_lexer": "ipython3", 65 | "version": "3.10.6" 66 | } 67 | }, 68 | "nbformat": 4, 69 | "nbformat_minor": 5 70 | } 71 | -------------------------------------------------------------------------------- /references/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cqzhao918/AI_ASL_Translator/429a795a6b587d489a55a0451ae270040aa079e4/references/.gitkeep -------------------------------------------------------------------------------- /references/2020.coling-main.525.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cqzhao918/AI_ASL_Translator/429a795a6b587d489a55a0451ae270040aa079e4/references/2020.coling-main.525.pdf -------------------------------------------------------------------------------- /references/Camgoz_Neural_Sign_Language_CVPR_2018_paper.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cqzhao918/AI_ASL_Translator/429a795a6b587d489a55a0451ae270040aa079e4/references/Camgoz_Neural_Sign_Language_CVPR_2018_paper.pdf -------------------------------------------------------------------------------- /references/methods.md: -------------------------------------------------------------------------------- 1 | # Sign Language Translation 2 | 3 | ## Method papers 4 | 5 | 6 | ### Better Sign Language Translation with STMC-Transformer 7 | 8 | * Coling'20 Paper 9 | * Model: STMC-Transformer 10 | * Dataset: HPHOENIX-Weather 2014T dataset & ASLG-PC12 corpus 11 | * STMC-Transformer model performing video-to-text translation that surpasses 12 | translation of ground truth glosses, which reveals that glosses are an inefficient representation of sign language. 13 | 14 | [GitHub](https://github.com/kayoyin/transformer-slt) 15 | 16 | 17 | ### Sign Language Transformers 18 | 19 | * CVPR'20 paper: Sign Language Transformers: Joint End-to-end Sign Language Recognition and Translation. 20 | 21 | [GitHub](https://github.com/neccam/slt) 22 | 23 | 24 | ### Neural Sign Language Translation 25 | 26 | * Baseline model, Camgoz et al. (2018) formalized the SLT. 27 | * CVPR'18 Paper: https://openaccess.thecvf.com/content_cvpr_2018/papers/Camgoz_Neural_Sign_Language_CVPR_2018_paper.pdf 28 | 29 | 30 | ### Google - ASL Fingerspelling Recognition 1st place solution 31 | 32 | * kaggle: https://www.kaggle.com/competitions/asl-fingerspelling 33 | 34 | [GitHub](https://github.com/ChristofHenkel/kaggle-asl-fingerspelling-1st-place-solution) 35 | 36 | 37 | ### MyVoice: Machine Translation for American Sign Language 38 | 39 | * Model based on Camgoz et al’s work on using Transformers for jointly learning both Sign Language Recognition and Translation tasks. 40 | * Dataset: How2Sign; Use English transcript as the intermediary representation gloss. 41 | * https://www.ischool.berkeley.edu/projects/2022/myvoice-machine-translation-american-sign-language#:~:text=MyVoice%20is%20an%20American%20Sign,generate%20captions%20for%20short%20paragraphs. 42 | 43 | [GitHub](https://github.com/sign2text/myvoice) 44 | 45 | 46 | 47 | ## Dataset 48 | 49 | ### English-ASL Gloss Parallel Corpus 2012: ASLG-PC12 50 | 51 | * https://achrafothman.net/site/english-asl-gloss-parallel-corpus-2012-aslg-pc12/ 52 | 53 | 54 | ### MS-ASL American Sign Language Dataset 55 | 56 | * https://microsoft.github.io/data-for-society/dataset?d=MS-ASL-American-Sign-Language-Dataset 57 | 58 | 59 | ### How2Sign: A Large-Scale Multimodal Dataset for Continuous American Sign Language 60 | 61 | * https://how2sign.github.io/ 62 | -------------------------------------------------------------------------------- /references/sign_language_transformers.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cqzhao918/AI_ASL_Translator/429a795a6b587d489a55a0451ae270040aa079e4/references/sign_language_transformers.pdf -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | # external requirements 2 | click 3 | Sphinx 4 | coverage 5 | awscli 6 | flake8 7 | python-dotenv>=0.5.1 8 | black 9 | pytest 10 | -------------------------------------------------------------------------------- /src/api-service/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cqzhao918/AI_ASL_Translator/429a795a6b587d489a55a0451ae270040aa079e4/src/api-service/.gitkeep -------------------------------------------------------------------------------- /src/api-service/Dockerfile: -------------------------------------------------------------------------------- 1 | # Use the official Debian-hosted Python image 2 | FROM python:3.9-slim-buster 3 | 4 | ARG DEBIAN_PACKAGES="build-essential git" 5 | 6 | # Prevent apt from showing prompts 7 | ENV DEBIAN_FRONTEND=noninteractive 8 | 9 | # Python wants UTF-8 locale 10 | ENV LANG=C.UTF-8 11 | 12 | # Tell pipenv where the shell is. This allows us to use "pipenv shell" as a 13 | # container entry point. 14 | ENV PYENV_SHELL=/bin/bash 15 | 16 | # Tell Python to disable buffering so we don't lose any logs. 17 | ENV PYTHONUNBUFFERED=1 18 | 19 | # Ensure we have an up to date baseline, install dependencies and 20 | # create a user so we don't run the app as root 21 | RUN set -ex; \ 22 | for i in $(seq 1 8); do mkdir -p "/usr/share/man/man${i}"; done && \ 23 | apt-get update && \ 24 | apt-get upgrade -y && \ 25 | apt install -y ca-certificates && \ 26 | apt install -y libglib2.0-0 && \ 27 | apt-get install -y libgl1-mesa-glx && \ 28 | apt-get install -y ca-certificates && \ 29 | apt-get install -y --no-install-recommends $DEBIAN_PACKAGES && \ 30 | apt-get clean && \ 31 | rm -rf /var/lib/apt/lists/* && \ 32 | pip install --no-cache-dir --upgrade pip && \ 33 | pip install pipenv && \ 34 | useradd -ms /bin/bash app -d /home/app -u 1000 -p "$(openssl passwd -1 Passw0rd)" && \ 35 | mkdir -p /app && \ 36 | mkdir -p /persistent && \ 37 | chown app:app /persistent && \ 38 | chown app:app /app 39 | 40 | RUN mkdir /local_model && chmod 777 /local_model 41 | 42 | 43 | # Expose port of API service 44 | EXPOSE 9000 45 | 46 | # Switch to the new user 47 | USER app 48 | WORKDIR /app 49 | 50 | # Install python packages 51 | ADD --chown=app:app Pipfile Pipfile.lock /app/ 52 | 53 | RUN pipenv sync 54 | 55 | # Add the rest of the source code. This is done last so we don't invalidate all 56 | # layers when we change a line of code. 57 | ADD --chown=app:app . /app 58 | 59 | # Entry point 60 | ENTRYPOINT ["/bin/bash","./docker-entrypoint.sh"] -------------------------------------------------------------------------------- /src/api-service/Pipfile: -------------------------------------------------------------------------------- 1 | [[source]] 2 | name = "pypi" 3 | url = "https://pypi.org/simple" 4 | verify_ssl = true 5 | 6 | [dev-packages] 7 | 8 | [packages] 9 | uvicorn = "*" 10 | fastapi = "*" 11 | pandas = "*" 12 | google-cloud-storage = "*" 13 | tensorflow-hub = "*" 14 | python-multipart = "*" 15 | google-cloud-aiplatform = "*" 16 | numpy = "*" 17 | tensorflow = "==2.13.0" 18 | opencv-python = "*" 19 | keras = "*" 20 | mediapipe = "*" 21 | importlib_resources = '*' 22 | zipp = '*' 23 | 24 | [requires] 25 | python_version = "3.9" -------------------------------------------------------------------------------- /src/api-service/api/local_model/asl_model.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cqzhao918/AI_ASL_Translator/429a795a6b587d489a55a0451ae270040aa079e4/src/api-service/api/local_model/asl_model.h5 -------------------------------------------------------------------------------- /src/api-service/api/model.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | import tensorflow as tf 4 | from tensorflow.python.keras import backend as K 5 | from tensorflow.keras.models import Model 6 | #import tensorflow_hub as hub 7 | #from google.cloud import aiplatform 8 | #import base64 9 | 10 | import mediapipe as mp 11 | import pandas as pd 12 | import json 13 | import cv2 14 | import keras 15 | 16 | ROWS_PER_FRAME = 543 17 | MAX_LEN = 384 18 | CROP_LEN = MAX_LEN 19 | NUM_CLASSES = 250 20 | PAD = -100. 21 | NOSE=[ 22 | 1,2,98,327 23 | ] 24 | LNOSE = [98] 25 | RNOSE = [327] 26 | LIP = [ 0, 27 | 61, 185, 40, 39, 37, 267, 269, 270, 409, 28 | 291, 146, 91, 181, 84, 17, 314, 405, 321, 375, 29 | 78, 191, 80, 81, 82, 13, 312, 311, 310, 415, 30 | 95, 88, 178, 87, 14, 317, 402, 318, 324, 308, 31 | ] 32 | LLIP = [84,181,91,146,61,185,40,39,37,87,178,88,95,78,191,80,81,82] 33 | RLIP = [314,405,321,375,291,409,270,269,267,317,402,318,324,308,415,310,311,312] 34 | 35 | POSE = [500, 502, 504, 501, 503, 505, 512, 513] 36 | LPOSE = [513,505,503,501] 37 | RPOSE = [512,504,502,500] 38 | 39 | REYE = [ 40 | 33, 7, 163, 144, 145, 153, 154, 155, 133, 41 | 246, 161, 160, 159, 158, 157, 173, 42 | ] 43 | LEYE = [ 44 | 263, 249, 390, 373, 374, 380, 381, 382, 362, 45 | 466, 388, 387, 386, 385, 384, 398, 46 | ] 47 | 48 | LHAND = np.arange(468, 489).tolist() 49 | RHAND = np.arange(522, 543).tolist() 50 | 51 | POINT_LANDMARKS = LIP + LHAND + RHAND + NOSE + REYE + LEYE #+POSE 52 | 53 | NUM_NODES = len(POINT_LANDMARKS) 54 | CHANNELS = 6*NUM_NODES 55 | 56 | print(NUM_NODES) 57 | print(CHANNELS) 58 | 59 | def tf_nan_mean(x, axis=0, keepdims=False): 60 | return tf.reduce_sum(tf.where(tf.math.is_nan(x), tf.zeros_like(x), x), axis=axis, keepdims=keepdims) / tf.reduce_sum(tf.where(tf.math.is_nan(x), tf.zeros_like(x), tf.ones_like(x)), axis=axis, keepdims=keepdims) 61 | 62 | def tf_nan_std(x, center=None, axis=0, keepdims=False): 63 | if center is None: 64 | center = tf_nan_mean(x, axis=axis, keepdims=True) 65 | d = x - center 66 | return tf.math.sqrt(tf_nan_mean(d * d, axis=axis, keepdims=keepdims)) 67 | 68 | class Preprocess(tf.keras.layers.Layer): 69 | def __init__(self, max_len=MAX_LEN, point_landmarks=POINT_LANDMARKS, **kwargs): 70 | super().__init__(**kwargs) 71 | self.max_len = max_len 72 | self.point_landmarks = point_landmarks 73 | 74 | def call(self, inputs): 75 | if tf.rank(inputs) == 3: 76 | x = inputs[None,...] 77 | else: 78 | x = inputs 79 | 80 | mean = tf_nan_mean(tf.gather(x, [17], axis=2), axis=[1,2], keepdims=True) 81 | mean = tf.where(tf.math.is_nan(mean), tf.constant(0.5,x.dtype), mean) 82 | x = tf.gather(x, self.point_landmarks, axis=2) #N,T,P,C 83 | std = tf_nan_std(x, center=mean, axis=[1,2], keepdims=True) 84 | 85 | x = (x - mean)/std 86 | 87 | if self.max_len is not None: 88 | x = x[:,:self.max_len] 89 | length = tf.shape(x)[1] 90 | x = x[...,:2] 91 | 92 | dx = tf.cond(tf.shape(x)[1]>1,lambda:tf.pad(x[:,1:] - x[:,:-1], [[0,0],[0,1],[0,0],[0,0]]),lambda:tf.zeros_like(x)) 93 | 94 | dx2 = tf.cond(tf.shape(x)[1]>2,lambda:tf.pad(x[:,2:] - x[:,:-2], [[0,0],[0,2],[0,0],[0,0]]),lambda:tf.zeros_like(x)) 95 | 96 | x = tf.concat([ 97 | tf.reshape(x, (-1,length,2*len(self.point_landmarks))), 98 | tf.reshape(dx, (-1,length,2*len(self.point_landmarks))), 99 | tf.reshape(dx2, (-1,length,2*len(self.point_landmarks))), 100 | ], axis = -1) 101 | 102 | x = tf.where(tf.math.is_nan(x),tf.constant(0.,x.dtype),x) 103 | 104 | return x 105 | 106 | @keras.saving.register_keras_serializable() 107 | class ECA(tf.keras.layers.Layer): 108 | def __init__(self, kernel_size=5, **kwargs): 109 | super().__init__(**kwargs) 110 | self.supports_masking = True 111 | self.kernel_size = kernel_size 112 | self.conv = tf.keras.layers.Conv1D(1, kernel_size=kernel_size, strides=1, padding="same", use_bias=False) 113 | 114 | def call(self, inputs, mask=None): 115 | nn = tf.keras.layers.GlobalAveragePooling1D()(inputs, mask=mask) 116 | nn = tf.expand_dims(nn, -1) 117 | nn = self.conv(nn) 118 | nn = tf.squeeze(nn, -1) 119 | nn = tf.nn.sigmoid(nn) 120 | nn = nn[:,None,:] 121 | return inputs * nn 122 | 123 | @keras.saving.register_keras_serializable() 124 | class LateDropout(tf.keras.layers.Layer): 125 | def __init__(self, rate, noise_shape=None, start_step=0, **kwargs): 126 | super().__init__(**kwargs) 127 | # self.supports_masking = True 128 | self.rate = rate 129 | self.start_step = start_step 130 | self.dropout = tf.keras.layers.Dropout(rate, noise_shape=noise_shape) 131 | 132 | def build(self, input_shape): 133 | super().build(input_shape) 134 | agg = tf.VariableAggregation.ONLY_FIRST_REPLICA 135 | self._train_counter = tf.Variable(0, dtype="int64", aggregation=agg, trainable=False) 136 | 137 | def call(self, inputs, training=False): 138 | x = tf.cond(self._train_counter < self.start_step, lambda:inputs, lambda:self.dropout(inputs, training=training)) 139 | if training: 140 | self._train_counter.assign_add(1) 141 | return x 142 | 143 | @keras.saving.register_keras_serializable() 144 | class CausalDWConv1D(tf.keras.layers.Layer): 145 | def __init__(self, 146 | kernel_size=17, 147 | dilation_rate=1, 148 | use_bias=False, 149 | depthwise_initializer='glorot_uniform', 150 | name='', **kwargs): 151 | super().__init__(name=name,**kwargs) 152 | self.causal_pad = tf.keras.layers.ZeroPadding1D((dilation_rate*(kernel_size-1),0),name=name + '_pad') 153 | self.dw_conv = tf.keras.layers.DepthwiseConv1D( 154 | kernel_size, 155 | strides=1, 156 | dilation_rate=dilation_rate, 157 | padding='valid', 158 | use_bias=use_bias, 159 | depthwise_initializer=depthwise_initializer, 160 | name=name + '_dwconv') 161 | self.supports_masking = True 162 | 163 | def call(self, inputs): 164 | x = self.causal_pad(inputs) 165 | x = self.dw_conv(x) 166 | return x 167 | 168 | def Conv1DBlock(channel_size, 169 | kernel_size, 170 | dilation_rate=1, 171 | drop_rate=0.0, 172 | expand_ratio=2, 173 | se_ratio=0.25, 174 | activation='swish', 175 | name=None): 176 | ''' 177 | efficient conv1d block, @hoyso48 178 | ''' 179 | if name is None: 180 | name = str(tf.keras.backend.get_uid("mbblock")) 181 | # Expansion phase 182 | def apply(inputs): 183 | channels_in = tf.keras.backend.int_shape(inputs)[-1] 184 | channels_expand = channels_in * expand_ratio 185 | 186 | skip = inputs 187 | 188 | x = tf.keras.layers.Dense( 189 | channels_expand, 190 | use_bias=True, 191 | activation=activation, 192 | name=name + '_expand_conv')(inputs) 193 | 194 | # Depthwise Convolution 195 | x = CausalDWConv1D(kernel_size, 196 | dilation_rate=dilation_rate, 197 | use_bias=False, 198 | name=name + '_dwconv')(x) 199 | 200 | x = tf.keras.layers.BatchNormalization(momentum=0.95, name=name + '_bn')(x) 201 | 202 | x = ECA()(x) 203 | 204 | x = tf.keras.layers.Dense( 205 | channel_size, 206 | use_bias=True, 207 | name=name + '_project_conv')(x) 208 | 209 | if drop_rate > 0: 210 | x = tf.keras.layers.Dropout(drop_rate, noise_shape=(None,1,1), name=name + '_drop')(x) 211 | 212 | if (channels_in == channel_size): 213 | x = tf.keras.layers.add([x, skip], name=name + '_add') 214 | return x 215 | 216 | return apply 217 | 218 | 219 | @keras.saving.register_keras_serializable() 220 | class MultiHeadSelfAttention(tf.keras.layers.Layer): 221 | def __init__(self, dim=256, num_heads=4, dropout=0, **kwargs): 222 | super().__init__(**kwargs) 223 | self.dim = dim 224 | self.scale = self.dim ** -0.5 225 | self.num_heads = num_heads 226 | self.qkv = tf.keras.layers.Dense(3 * dim, use_bias=False) 227 | self.drop1 = tf.keras.layers.Dropout(dropout) 228 | self.proj = tf.keras.layers.Dense(dim, use_bias=False) 229 | self.supports_masking = True 230 | 231 | def call(self, inputs, mask=None): 232 | qkv = self.qkv(inputs) 233 | qkv = tf.keras.layers.Permute((2, 1, 3))(tf.keras.layers.Reshape((-1, self.num_heads, self.dim * 3 // self.num_heads))(qkv)) 234 | q, k, v = tf.split(qkv, [self.dim // self.num_heads] * 3, axis=-1) 235 | 236 | attn = tf.matmul(q, k, transpose_b=True) * self.scale 237 | 238 | if mask is not None: 239 | mask = mask[:, None, None, :] 240 | 241 | attn = tf.keras.layers.Softmax(axis=-1)(attn, mask=mask) 242 | attn = self.drop1(attn) 243 | 244 | x = attn @ v 245 | x = tf.keras.layers.Reshape((-1, self.dim))(tf.keras.layers.Permute((2, 1, 3))(x)) 246 | x = self.proj(x) 247 | return x 248 | 249 | 250 | def TransformerBlock(dim=256, num_heads=4, expand=4, attn_dropout=0.2, drop_rate=0.2, activation='swish'): 251 | def apply(inputs): 252 | x = inputs 253 | x = tf.keras.layers.BatchNormalization(momentum=0.95)(x) 254 | x = MultiHeadSelfAttention(dim=dim,num_heads=num_heads,dropout=attn_dropout)(x) 255 | x = tf.keras.layers.Dropout(drop_rate, noise_shape=(None,1,1))(x) 256 | x = tf.keras.layers.Add()([inputs, x]) 257 | attn_out = x 258 | 259 | x = tf.keras.layers.BatchNormalization(momentum=0.95)(x) 260 | x = tf.keras.layers.Dense(dim*expand, use_bias=False, activation=activation)(x) 261 | x = tf.keras.layers.Dense(dim, use_bias=False)(x) 262 | x = tf.keras.layers.Dropout(drop_rate, noise_shape=(None,1,1))(x) 263 | x = tf.keras.layers.Add()([attn_out, x]) 264 | return x 265 | return apply 266 | 267 | def get_model(max_len=MAX_LEN, dropout_step=0, dim=192): 268 | inp = tf.keras.Input((max_len,CHANNELS)) 269 | #x = tf.keras.layers.Masking(mask_value=PAD,input_shape=(max_len,CHANNELS))(inp) #we don't need masking layer with inference 270 | x = inp 271 | ksize = 17 272 | x = tf.keras.layers.Dense(dim, use_bias=False,name='stem_conv')(x) 273 | x = tf.keras.layers.BatchNormalization(momentum=0.95,name='stem_bn')(x) 274 | 275 | x = Conv1DBlock(dim,ksize,drop_rate=0.2)(x) 276 | x = Conv1DBlock(dim,ksize,drop_rate=0.2)(x) 277 | x = Conv1DBlock(dim,ksize,drop_rate=0.2)(x) 278 | x = TransformerBlock(dim,expand=2)(x) 279 | 280 | x = Conv1DBlock(dim,ksize,drop_rate=0.2)(x) 281 | x = Conv1DBlock(dim,ksize,drop_rate=0.2)(x) 282 | x = Conv1DBlock(dim,ksize,drop_rate=0.2)(x) 283 | x = TransformerBlock(dim,expand=2)(x) 284 | 285 | if dim == 384: #for the 4x sized model 286 | x = Conv1DBlock(dim,ksize,drop_rate=0.2)(x) 287 | x = Conv1DBlock(dim,ksize,drop_rate=0.2)(x) 288 | x = Conv1DBlock(dim,ksize,drop_rate=0.2)(x) 289 | x = TransformerBlock(dim,expand=2)(x) 290 | 291 | x = Conv1DBlock(dim,ksize,drop_rate=0.2)(x) 292 | x = Conv1DBlock(dim,ksize,drop_rate=0.2)(x) 293 | x = Conv1DBlock(dim,ksize,drop_rate=0.2)(x) 294 | x = TransformerBlock(dim,expand=2)(x) 295 | 296 | x = tf.keras.layers.Dense(dim*2,activation=None,name='top_conv')(x) 297 | x = tf.keras.layers.GlobalAveragePooling1D()(x) 298 | x = LateDropout(0.8, start_step=dropout_step)(x) 299 | x = tf.keras.layers.Dense(NUM_CLASSES,name='classifier')(x) 300 | return tf.keras.Model(inp, x) 301 | 302 | def load_prediction_model(): 303 | 304 | print("Loading Model...") 305 | 306 | global prediction_model 307 | 308 | # model_path = "/local_model/asl_model2.keras" 309 | model_path = "/local_model/asl_model2.h5" 310 | 311 | 312 | print("model_path:", model_path) 313 | 314 | custom_objects = {'CausalDWConv1D': CausalDWConv1D,'ECA':ECA, 'MultiHeadSelfAttention': MultiHeadSelfAttention, 'LateDropout':LateDropout} 315 | prediction_model = keras.models.load_model(model_path, 316 | custom_objects = custom_objects) 317 | 318 | 319 | # with keras.saving.custom_object_scope(custom_objects): 320 | # prediction_model = keras.models.load_model(model_path) 321 | 322 | print(prediction_model.summary()) 323 | 324 | return prediction_model 325 | 326 | ''' 327 | data_details_path = os.path.join( 328 | local_experiments_path, best_model["experiment"], "data_details.json" 329 | ) 330 | 331 | # Load data details 332 | with open(data_details_path, "r") as json_file: 333 | data_details = json.load(json_file) 334 | 335 | def check_model_change(): 336 | global best_model, best_model_id 337 | best_model_json = os.path.join(local_experiments_path, "best_model.json") 338 | if os.path.exists(best_model_json): 339 | with open(best_model_json) as json_file: 340 | best_model = json.load(json_file) 341 | 342 | if best_model_id != best_model["experiment"]: 343 | load_prediction_model() 344 | best_model_id = best_model["experiment"] 345 | ''' 346 | 347 | def transform_video(video_path): 348 | cap = cv2.VideoCapture(video_path) 349 | mp_holistic = mp.solutions.holistic 350 | holistic = mp_holistic.Holistic(min_detection_confidence=0.5, min_tracking_confidence=0.1) 351 | 352 | video_df = [] 353 | frame_no=0 354 | while cap.isOpened(): 355 | print('\r',frame_no,end='') 356 | success, image = cap.read() 357 | 358 | if not success: break 359 | image = cv2.resize(image, dsize=None, fx=4, fy=4) 360 | height,width,_ = image.shape 361 | 362 | #print(image.shape) 363 | image.flags.writeable = False 364 | image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) 365 | result = holistic.process(image) 366 | 367 | #--- 368 | data = [] 369 | fy = height/width 370 | 371 | # ----------------------------------------------------- 372 | if result.face_landmarks is None: 373 | for i in range(468): # 374 | data.append({ 375 | 'type' : 'face', 376 | 'landmark_index' : i, 377 | 'x' : np.nan, 378 | 'y' : np.nan, 379 | 'z' : np.nan, 380 | }) 381 | else: 382 | assert(len(result.face_landmarks.landmark)==468) 383 | for i in range(468): # 384 | xyz = result.face_landmarks.landmark[i] 385 | data.append({ 386 | 'type' : 'face', 387 | 'landmark_index' : i, 388 | 'x' : xyz.x, 389 | 'y' : xyz.y *fy, 390 | 'z' : xyz.z, 391 | }) 392 | 393 | # ----------------------------------------------------- 394 | if result.left_hand_landmarks is None: 395 | for i in range(21): # 396 | data.append({ 397 | 'type': 'left_hand', 398 | 'landmark_index': i, 399 | 'x': np.nan, 400 | 'y': np.nan, 401 | 'z': np.nan, 402 | }) 403 | else: 404 | assert (len(result.left_hand_landmarks.landmark) == 21) 405 | for i in range(21): # 406 | xyz = result.left_hand_landmarks.landmark[i] 407 | data.append({ 408 | 'type': 'left_hand', 409 | 'landmark_index': i, 410 | 'x': xyz.x, 411 | 'y': xyz.y *fy, 412 | 'z': xyz.z, 413 | }) 414 | 415 | # ----------------------------------------------------- 416 | #if result.pose_world_landmarks is None: 417 | if result.pose_landmarks is None: 418 | for i in range(33): # 419 | data.append({ 420 | 'type': 'pose', 421 | 'landmark_index': i, 422 | 'x': np.nan, 423 | 'y': np.nan, 424 | 'z': np.nan, 425 | }) 426 | else: 427 | assert (len(result.pose_landmarks.landmark) == 33) 428 | for i in range(33): # 429 | xyz = result.pose_landmarks.landmark[i] 430 | data.append({ 431 | 'type': 'pose', 432 | 'landmark_index': i, 433 | 'x': xyz.x, 434 | 'y': xyz.y *fy, 435 | 'z': xyz.z, 436 | }) 437 | 438 | # ----------------------------------------------------- 439 | if result.right_hand_landmarks is None: 440 | for i in range(21): # 441 | data.append({ 442 | 'type': 'right_hand', 443 | 'landmark_index': i, 444 | 'x': np.nan, 445 | 'y': np.nan, 446 | 'z': np.nan, 447 | }) 448 | else: 449 | assert (len(result.right_hand_landmarks.landmark) == 21) 450 | for i in range(21): # 451 | xyz = result.right_hand_landmarks.landmark[i] 452 | data.append({ 453 | 'type': 'right_hand', 454 | 'landmark_index': i, 455 | 'x': xyz.x, 456 | 'y': xyz.y *fy, 457 | 'z': xyz.z, 458 | }) 459 | zz=0 460 | 461 | frame_df = pd.DataFrame(data) 462 | frame_df.loc[:,'frame'] = frame_no 463 | frame_df.loc[:, 'height'] = height/width 464 | frame_df.loc[:, 'width'] = width/width 465 | video_df.append(frame_df) 466 | 467 | 468 | #========================= 469 | frame_no +=1 470 | 471 | # print(video_df) 472 | video_df = pd.concat(video_df) 473 | 474 | video_df['row_id'] = video_df['frame'].astype('str')+'-'+video_df['type']+'-'+video_df['landmark_index'].astype('str') 475 | try: 476 | video_df.drop(['Unnamed: 0', 'height', 'width'], axis=1, inplace=True) 477 | except: 478 | video_df.drop(['height', 'width'], axis=1, inplace=True) 479 | 480 | video_df.to_numpy() 481 | 482 | data_columns = ['x', 'y', 'z'] 483 | 484 | video_df = video_df[data_columns] 485 | 486 | n_frames = int(len(video_df) / ROWS_PER_FRAME) 487 | data = video_df.values.reshape(n_frames, ROWS_PER_FRAME, len(data_columns)) 488 | 489 | return data 490 | 491 | class TFModel(tf.Module): 492 | """ 493 | TensorFlow Lite model that takes input tensors and applies: 494 | – a preprocessing model 495 | – the ISLR model 496 | """ 497 | def __init__(self, islr_models): 498 | """ 499 | Initializes the TFLiteModel with the specified preprocessing model and ISLR model. 500 | """ 501 | super(TFModel, self).__init__() 502 | 503 | # Load the feature generation and main models 504 | self.prep_inputs = Preprocess() 505 | self.islr_models = islr_models 506 | 507 | @tf.function(input_signature=[tf.TensorSpec(shape=[None, 543, 3], dtype=tf.float32, name='inputs')]) 508 | def __call__(self, inputs): 509 | """ 510 | Applies the feature generation model and main model to the input tensors. 511 | 512 | Args: 513 | inputs: Input tensor with shape [batch_size, 543, 3]. 514 | 515 | Returns: 516 | A dictionary with a single key 'outputs' and corresponding output tensor. 517 | """ 518 | x = self.prep_inputs(inputs) 519 | # x = self.prep_inputs(tf.cast(inputs, dtype=tf.float32)) 520 | outputs = [model(x) for model in self.islr_models] 521 | outputs = tf.keras.layers.Average()(outputs)[0] 522 | return {'outputs': outputs} 523 | 524 | def make_prediction(video_path): 525 | 526 | # Load & preprocess 527 | data = transform_video(video_path) 528 | 529 | prediction_model = load_prediction_model() 530 | tflite_keras_model = TFModel(islr_models=[prediction_model]) 531 | p2s_map = {"TV": 0, "after": 1, "airplane": 2, "all": 3, "alligator": 4, "animal": 5, "another": 6, "any": 7, "apple": 8, "arm": 9, "aunt": 10, "awake": 11, "backyard": 12, "bad": 13, "balloon": 14, "bath": 15, "because": 16, "bed": 17, "bedroom": 18, "bee": 19, "before": 20, "beside": 21, "better": 22, "bird": 23, "black": 24, "blow": 25, "blue": 26, "boat": 27, "book": 28, "boy": 29, "brother": 30, "brown": 31, "bug": 32, "bye": 33, "callonphone": 34, "can": 35, "car": 36, "carrot": 37, "cat": 38, "cereal": 39, "chair": 40, "cheek": 41, "child": 42, "chin": 43, "chocolate": 44, "clean": 45, "close": 46, "closet": 47, "cloud": 48, "clown": 49, "cow": 50, "cowboy": 51, "cry": 52, "cut": 53, "cute": 54, "dad": 55, "dance": 56, "dirty": 57, "dog": 58, "doll": 59, "donkey": 60, "down": 61, "drawer": 62, "drink": 63, "drop": 64, "dry": 65, "dryer": 66, "duck": 67, "ear": 68, "elephant": 69, "empty": 70, "every": 71, "eye": 72, "face": 73, "fall": 74, "farm": 75, "fast": 76, "feet": 77, "find": 78, "fine": 79, "finger": 80, "finish": 81, "fireman": 82, "first": 83, "fish": 84, "flag": 85, "flower": 86, "food": 87, "for": 88, "frenchfries": 89, "frog": 90, "garbage": 91, "gift": 92, "giraffe": 93, "girl": 94, "give": 95, "glasswindow": 96, "go": 97, "goose": 98, "grandma": 99, "grandpa": 100, "grass": 101, "green": 102, "gum": 103, "hair": 104, "happy": 105, "hat": 106, "hate": 107, "have": 108, "haveto": 109, "head": 110, "hear": 111, "helicopter": 112, "hello": 113, "hen": 114, "hesheit": 115, "hide": 116, "high": 117, "home": 118, "horse": 119, "hot": 120, "hungry": 121, "icecream": 122, "if": 123, "into": 124, "jacket": 125, "jeans": 126, "jump": 127, "kiss": 128, "kitty": 129, "lamp": 130, "later": 131, "like": 132, "lion": 133, "lips": 134, "listen": 135, "look": 136, "loud": 137, "mad": 138, "make": 139, "man": 140, "many": 141, "milk": 142, "minemy": 143, "mitten": 144, "mom": 145, "moon": 146, "morning": 147, "mouse": 148, "mouth": 149, "nap": 150, "napkin": 151, "night": 152, "no": 153, "noisy": 154, "nose": 155, "not": 156, "now": 157, "nuts": 158, "old": 159, "on": 160, "open": 161, "orange": 162, "outside": 163, "owie": 164, "owl": 165, "pajamas": 166, "pen": 167, "pencil": 168, "penny": 169, "person": 170, "pig": 171, "pizza": 172, "please": 173, "police": 174, "pool": 175, "potty": 176, "pretend": 177, "pretty": 178, "puppy": 179, "puzzle": 180, "quiet": 181, "radio": 182, "rain": 183, "read": 184, "red": 185, "refrigerator": 186, "ride": 187, "room": 188, "sad": 189, "same": 190, "say": 191, "scissors": 192, "see": 193, "shhh": 194, "shirt": 195, "shoe": 196, "shower": 197, "sick": 198, "sleep": 199, "sleepy": 200, "smile": 201, "snack": 202, "snow": 203, "stairs": 204, "stay": 205, "sticky": 206, "store": 207, "story": 208, "stuck": 209, "sun": 210, "table": 211, "talk": 212, "taste": 213, "thankyou": 214, "that": 215, "there": 216, "think": 217, "thirsty": 218, "tiger": 219, "time": 220, "tomorrow": 221, "tongue": 222, "tooth": 223, "toothbrush": 224, "touch": 225, "toy": 226, "tree": 227, "uncle": 228, "underwear": 229, "up": 230, "vacuum": 231, "wait": 232, "wake": 233, "water": 234, "wet": 235, "weus": 236, "where": 237, "white": 238, "who": 239, "why": 240, "will": 241, "wolf": 242, "yellow": 243, "yes": 244, "yesterday": 245, "yourself": 246, "yucky": 247, "zebra": 248, "zipper": 249} 532 | #decoder = lambda x: p2s_map.get(x) 533 | demo_output = tflite_keras_model(data)["outputs"] 534 | v_output = np.argmax(demo_output.numpy(), axis = -1) 535 | for k,v in p2s_map.items(): 536 | if v == v_output: 537 | pred_value = k 538 | #pred_value = decoder(np.argmax(demo_output.numpy(), axis=-1)) 539 | 540 | return {"prediction_label":pred_value, 541 | "poisonous": False} 542 | 543 | -------------------------------------------------------------------------------- /src/api-service/api/service.py: -------------------------------------------------------------------------------- 1 | from fastapi import FastAPI 2 | from starlette.middleware.cors import CORSMiddleware 3 | import asyncio 4 | from api.tracker import TrackerService 5 | import pandas as pd 6 | import os 7 | from fastapi import File 8 | from tempfile import TemporaryDirectory 9 | from api import model 10 | 11 | # Initialize Tracker Service 12 | tracker_service = TrackerService() 13 | 14 | # Setup FastAPI app 15 | app = FastAPI(title="API Server", description="API Server", version="v1") 16 | 17 | # Enable CORSMiddleware 18 | app.add_middleware( 19 | CORSMiddleware, 20 | allow_credentials=False, 21 | allow_origins=["*"], 22 | allow_methods=["*"], 23 | allow_headers=["*"], 24 | ) 25 | 26 | 27 | @app.on_event("startup") 28 | async def startup(): 29 | print("Startup tasks") 30 | # Start the tracker service 31 | asyncio.create_task(tracker_service.track()) 32 | 33 | 34 | # Routes 35 | @app.get("/") 36 | async def get_index(): 37 | return {"message": "Welcome to the API Service"} 38 | 39 | 40 | @app.post("/predict") 41 | async def predict(file: bytes = File(...)): 42 | print("predict file:", len(file), type(file)) 43 | 44 | self_host_model = True 45 | 46 | # # Save the test video 47 | with TemporaryDirectory() as video_dir: 48 | video_path = os.path.join(video_dir, "test.mp4") 49 | with open(video_path, "wb") as output: 50 | output.write(file) 51 | 52 | # Make prediction 53 | prediction_results = {} 54 | if self_host_model: 55 | prediction_results = model.make_prediction(video_path) 56 | # else: 57 | # prediction_results = model.make_prediction_vertexai(video_path) 58 | 59 | print(prediction_results) 60 | return prediction_results 61 | -------------------------------------------------------------------------------- /src/api-service/api/test.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cqzhao918/AI_ASL_Translator/429a795a6b587d489a55a0451ae270040aa079e4/src/api-service/api/test.mp4 -------------------------------------------------------------------------------- /src/api-service/api/tracker.py: -------------------------------------------------------------------------------- 1 | import os 2 | import traceback 3 | import asyncio 4 | from glob import glob 5 | import json 6 | import pandas as pd 7 | 8 | import tensorflow as tf 9 | from google.cloud import storage 10 | 11 | 12 | # bucket_name = os.environ["GCS_BUCKET_NAME"] 13 | bucket_name = 'capy-data' 14 | local_model_path = "/local_model" 15 | secret_path = '/secrets/capy-key.json' 16 | 17 | with open('/secrets/capy-key.json') as json_file: 18 | key_info = json.load(json_file) 19 | 20 | # print(key_info) 21 | 22 | # Setup experiments folder 23 | if not os.path.exists(local_model_path): 24 | os.mkdir(local_model_path) 25 | 26 | 27 | def download_blob(bucket_name, source_blob_name, destination_file_name): 28 | """Downloads a blob from the bucket.""" 29 | storage_client = storage.Client.from_service_account_info(key_info) 30 | 31 | # crediental 32 | # storage_client = storage.Client.from_service_account_json("C:/Users/chuqi/ac215/kaggle-data/aslfr-isolated/psychic-bedrock-398320-e41cc1b33701.json") 33 | 34 | bucket = storage_client.bucket(bucket_name) 35 | blob = bucket.blob(source_blob_name) 36 | blob.download_to_filename(destination_file_name) 37 | 38 | 39 | def download_best_model(): 40 | print("Download best model") 41 | try: 42 | download_file = 'asl_model2.h5' 43 | download_blob( 44 | bucket_name, 45 | download_file, 46 | os.path.join(local_model_path, download_file), 47 | ) 48 | 49 | except: 50 | print("Error in download_best_model") 51 | traceback.print_exc() 52 | 53 | 54 | class TrackerService: 55 | def __init__(self): 56 | self.timestamp = 0 57 | 58 | async def track(self): 59 | # while True: 60 | await asyncio.sleep(10) 61 | print("Download Model...") 62 | 63 | download_best_model() -------------------------------------------------------------------------------- /src/api-service/docker-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo "Container is running!!!" 4 | 5 | # this will run the api/service.py file with the instantiated app FastAPI 6 | uvicorn_server() { 7 | uvicorn api.service:app --host 0.0.0.0 --port 9000 --log-level debug --reload --reload-dir api/ "$@" 8 | } 9 | 10 | uvicorn_server_production() { 11 | pipenv run uvicorn api.service:app --host 0.0.0.0 --port 9000 --lifespan on 12 | } 13 | 14 | export -f uvicorn_server 15 | export -f uvicorn_server_production 16 | 17 | echo -en "\033[92m 18 | The following commands are available: 19 | uvicorn_server 20 | Run the Uvicorn Server 21 | \033[0m 22 | " 23 | 24 | if [ "${DEV}" = 1 ]; then 25 | pipenv shell 26 | else 27 | uvicorn_server_production 28 | fi 29 | -------------------------------------------------------------------------------- /src/api-service/docker-shell.bat: -------------------------------------------------------------------------------- 1 | REM Define some environment variables 2 | SET IMAGE_NAME="capy-app-api-service" 3 | 4 | REM Build the image based on the Dockerfile 5 | docker build -t %IMAGE_NAME% -f Dockerfile . 6 | 7 | REM Run the container 8 | cd .. 9 | docker run --rm --name %IMAGE_NAME% -ti ^ 10 | --mount type=bind,source="%cd%\api-service",target=/app ^ 11 | --mount type=bind,source="%cd%\..\..\local_model",target=/local_model ^ 12 | --mount type=bind,source="%cd%\..\..\secrets",target=/secrets ^ 13 | -p 9000:9000 -e DEV=1 %IMAGE_NAME% -------------------------------------------------------------------------------- /src/api-service/docker-shell.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # exit immediately if a command exits with a non-zero status 4 | set -e 5 | 6 | # Define some environment variables 7 | export IMAGE_NAME="capy-app-api-service" 8 | export BASE_DIR=$(pwd) 9 | export SECRETS_DIR=$(pwd)/../../../secrets/ 10 | export PERSISTENT_DIR=$(pwd)/../../../local_model/ 11 | export GCS_BUCKET_NAME="capy-app-models" 12 | 13 | # Build the image based on the Dockerfile 14 | docker build -t $IMAGE_NAME -f Dockerfile . 15 | # M1/2 chip macs use this line 16 | # docker build -t $IMAGE_NAME --platform=linux/arm64/v8 -f Dockerfile . 17 | 18 | # Run the container 19 | docker run --rm --name $IMAGE_NAME -ti \ 20 | -v "$BASE_DIR":/app \ 21 | -v "$SECRETS_DIR":/secrets \ 22 | -v "$PERSISTENT_DIR":/local_model \ 23 | -p 9000:9000 \ 24 | -e DEV=1 \ 25 | -e GOOGLE_APPLICATION_CREDENTIALS=/secrets/capy-key.json \ 26 | -e GCS_BUCKET_NAME=$GCS_BUCKET_NAME \ 27 | $IMAGE_NAME -------------------------------------------------------------------------------- /src/data-collector/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cqzhao918/AI_ASL_Translator/429a795a6b587d489a55a0451ae270040aa079e4/src/data-collector/.gitkeep -------------------------------------------------------------------------------- /src/data-processor/Dockerfile: -------------------------------------------------------------------------------- 1 | # Use the official Debian-hosted Python image 2 | FROM python:3.9-slim-buster 3 | 4 | ARG DEBIAN_PACKAGES="build-essential git curl wget unzip gzip" 5 | 6 | # Prevent apt from showing prompts 7 | ENV DEBIAN_FRONTEND=noninteractive 8 | 9 | # Python wants UTF-8 locale 10 | ENV LANG=C.UTF-8 11 | 12 | # Tell pipenv where the shell is. 13 | # This allows us to use "pipenv shell" as a container entry point. 14 | ENV PYENV_SHELL=/bin/bash 15 | 16 | # Tell Python to disable buffering so we don't lose any logs. 17 | ENV PYTHONUNBUFFERED=1 18 | 19 | #ENV GOOGLE_APPLICATION_CREDENTIALS=secrets/data-pipeline.json 20 | 21 | # Ensure we have an up to date baseline, install dependencies 22 | RUN set -ex; \ 23 | for i in $(seq 1 8); do mkdir -p "/usr/share/man/man${i}"; done && \ 24 | apt-get update && \ 25 | apt-get upgrade -y && \ 26 | apt-get install -y --no-install-recommends $DEBIAN_PACKAGES && \ 27 | apt-get install -y --no-install-recommends software-properties-common apt-transport-https ca-certificates gnupg2 gnupg-agent curl openssh-client && \ 28 | curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - && \ 29 | echo "deb [signed-by=/usr/share/keyrings/cloud.google.gpg] https://packages.cloud.google.com/apt cloud-sdk main" | tee -a /etc/apt/sources.list.d/google-cloud-sdk.list && \ 30 | curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key --keyring /usr/share/keyrings/cloud.google.gpg add - && \ 31 | apt-get update && \ 32 | apt-get install -y --no-install-recommends google-cloud-sdk && \ 33 | apt-get clean && \ 34 | rm -rf /var/lib/apt/lists/* && \ 35 | pip install --no-cache-dir --upgrade pip && \ 36 | pip install pipenv && \ 37 | useradd -ms /bin/bash app -d /home/app -u 1000 -p "$(openssl passwd -1 Passw0rd)" && \ 38 | mkdir -p /app && \ 39 | chown app:app /app 40 | 41 | # Switch to the new user 42 | USER app 43 | WORKDIR /app 44 | 45 | # # # Set the working directory to /preprocessing 46 | # RUN pipenv lock 47 | 48 | # # Add the Pipfile, Pipfile.lock, and python code into the container 49 | # ADD . / 50 | 51 | # RUN pipenv sync 52 | 53 | # Install python packages 54 | ADD --chown=app:app Pipfile Pipfile.lock /app/ 55 | 56 | RUN pipenv sync 57 | 58 | # Make the entrypoint.sh script executable 59 | # RUN chmod +x /bin/bash/entrypoint.sh 60 | # Add the rest of the source code. This is done last so we don't invalidate all 61 | # layers when we change a line of code. 62 | ADD --chown=app:app . /app 63 | 64 | # Make the entrypoint.sh script executable 65 | # RUN chmod +x /bin/bash/entrypoint.sh 66 | 67 | # # Set the entrypoint 68 | # ENTRYPOINT ["/bin/bash"] 69 | 70 | # # Specify the entrypoint script as the CMD 71 | # CMD ["entrypoint.sh"] 72 | # CMD ["-c", "pipenv shell"] 73 | ENTRYPOINT ["/bin/bash","./entrypoint.sh"] 74 | -------------------------------------------------------------------------------- /src/data-processor/Pipfile: -------------------------------------------------------------------------------- 1 | [[source]] 2 | name = "pypi" 3 | url = "https://pypi.org/simple" 4 | verify_ssl = true 5 | 6 | [dev-packages] 7 | 8 | [packages] 9 | opencv-python = "*" 10 | google-cloud-aiplatform = "*" 11 | pandas = "*" 12 | numpy = "*" 13 | tqdm = "*" 14 | tensorflow = "*" 15 | google-cloud-storage = "*" 16 | pyarrow = "*" 17 | mediapipe = "*" 18 | glob = "*" 19 | 20 | [requires] 21 | python_version = "3.9" -------------------------------------------------------------------------------- /src/data-processor/cli.py: -------------------------------------------------------------------------------- 1 | import mediapipe as mp 2 | import numpy as np 3 | import pandas as pd 4 | import json 5 | import cv2 6 | from google.cloud import storage 7 | import os 8 | import glob 9 | import tqdm 10 | import argparse 11 | 12 | 13 | def transform_video(video_file): 14 | cap = cv2.VideoCapture(video_file) 15 | mp_holistic = mp.solutions.holistic 16 | holistic = mp_holistic.Holistic(min_detection_confidence=0.5, min_tracking_confidence=0.1) 17 | 18 | video_df = [] 19 | frame_no=0 20 | 21 | while cap.isOpened(): 22 | print('\r',frame_no,end='') 23 | success, image = cap.read() 24 | 25 | if not success: break 26 | image = cv2.resize(image, dsize=None, fx=4, fy=4) 27 | height,width,_ = image.shape 28 | 29 | #print(image.shape) 30 | image.flags.writeable = False 31 | image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) 32 | result = holistic.process(image) 33 | 34 | data = [] 35 | fy = height/width 36 | 37 | if result.face_landmarks is None: 38 | for i in range(468): # 39 | data.append({ 40 | 'type' : 'face', 41 | 'landmark_index' : i, 42 | 'x' : np.nan, 43 | 'y' : np.nan, 44 | 'z' : np.nan, 45 | }) 46 | else: 47 | assert(len(result.face_landmarks.landmark)==468) 48 | for i in range(468): # 49 | xyz = result.face_landmarks.landmark[i] 50 | data.append({ 51 | 'type' : 'face', 52 | 'landmark_index' : i, 53 | 'x' : xyz.x, 54 | 'y' : xyz.y *fy, 55 | 'z' : xyz.z, 56 | }) 57 | 58 | if result.left_hand_landmarks is None: 59 | for i in range(21): # 60 | data.append({ 61 | 'type': 'left_hand', 62 | 'landmark_index': i, 63 | 'x': np.nan, 64 | 'y': np.nan, 65 | 'z': np.nan, 66 | }) 67 | else: 68 | assert (len(result.left_hand_landmarks.landmark) == 21) 69 | for i in range(21): # 70 | xyz = result.left_hand_landmarks.landmark[i] 71 | data.append({ 72 | 'type': 'left_hand', 73 | 'landmark_index': i, 74 | 'x': xyz.x, 75 | 'y': xyz.y *fy, 76 | 'z': xyz.z, 77 | }) 78 | 79 | #if result.pose_world_landmarks is None: 80 | if result.pose_landmarks is None: 81 | for i in range(33): # 82 | data.append({ 83 | 'type': 'pose', 84 | 'landmark_index': i, 85 | 'x': np.nan, 86 | 'y': np.nan, 87 | 'z': np.nan, 88 | }) 89 | else: 90 | assert (len(result.pose_landmarks.landmark) == 33) 91 | for i in range(33): # 92 | xyz = result.pose_landmarks.landmark[i] 93 | data.append({ 94 | 'type': 'pose', 95 | 'landmark_index': i, 96 | 'x': xyz.x, 97 | 'y': xyz.y *fy, 98 | 'z': xyz.z, 99 | }) 100 | 101 | if result.right_hand_landmarks is None: 102 | for i in range(21): # 103 | data.append({ 104 | 'type': 'right_hand', 105 | 'landmark_index': i, 106 | 'x': np.nan, 107 | 'y': np.nan, 108 | 'z': np.nan, 109 | }) 110 | else: 111 | assert (len(result.right_hand_landmarks.landmark) == 21) 112 | for i in range(21): # 113 | xyz = result.right_hand_landmarks.landmark[i] 114 | data.append({ 115 | 'type': 'right_hand', 116 | 'landmark_index': i, 117 | 'x': xyz.x, 118 | 'y': xyz.y *fy, 119 | 'z': xyz.z, 120 | }) 121 | zz=0 122 | 123 | frame_df = pd.DataFrame(data) 124 | frame_df.loc[:,'frame'] = frame_no 125 | frame_df.loc[:, 'height'] = height/width 126 | frame_df.loc[:, 'width'] = width/width 127 | #print(frame_df) 128 | video_df.append(frame_df) 129 | frame_no +=1 130 | 131 | video_df = pd.concat(video_df) 132 | holistic.close() 133 | return video_df 134 | 135 | def clean_format(video_df): 136 | #print(video_df) 137 | video_df['row_id'] = video_df['frame'].astype('str')+'-'+video_df['type']+'-'+video_df['landmark_index'].astype('str') 138 | video_df.drop(['height', 'width'], axis=1, inplace=True) 139 | return video_df 140 | 141 | 142 | 143 | def main(args): 144 | 145 | print("========Processing Data=========") 146 | 147 | client = storage.Client() 148 | bucket = client.bucket('capy-data') 149 | # videofile_dir = [file.name for file in bucket.list_blobs(prefix="data/WLASL-data/wlasl_videos/") if '.mp4' in file.name] 150 | # videofiles = [os.path.join('gs://capy-data',videofile) for videofile in videofile_dir if '.mp4' in videofile] 151 | # video_filenames = videofiles 152 | 153 | item = args.file 154 | 155 | if not os.path.exists('wlasl_deploy_video'): 156 | os.makedirs('wlasl_deploy_video') 157 | if not os.path.exists('wlasl_deploy_parquet'): 158 | os.makedirs('wlasl_deploy_parquet') 159 | 160 | bucket_train_filename = f'data/WLASL-data/wlasl_videos/{item}.mp4' 161 | blob = bucket.blob(bucket_train_filename) 162 | train_filename = f'./wlasl_deploy_video/{item}.mp4' 163 | blob.download_to_filename(train_filename) 164 | print('transform_video') 165 | train_video_df = transform_video(train_filename) 166 | print('clean_format') 167 | cleaned_train_vdf = clean_format(train_video_df) 168 | print('to_parquet') 169 | cleaned_train_vdf.to_parquet(f'./wlasl_deploy_parquet/{item}.parquet') 170 | 171 | gcs_new_prefix = 'data/WLASL-data/wlasl_parquet_deploy/' 172 | 173 | storage_client = storage.Client.from_service_account_json("../secrets/model-deployment.json") 174 | bucket = storage_client.get_bucket("capy-data") 175 | 176 | print('upload') 177 | for input_file in tqdm.tqdm(glob.glob(os.path.join("./wlasl_deploy_parquet", '*.parquet'))): 178 | print(input_file) 179 | gcs_object_name = os.path.join(gcs_new_prefix, os.path.basename(input_file)) 180 | blob = bucket.blob(gcs_object_name) 181 | blob.upload_from_filename(input_file) 182 | 183 | 184 | # blob_df = bucket.blob("data/WLASL-data/wlasl_train_new.csv") 185 | # train_df = pd.read_csv(blob_df.open(), dtype='str') 186 | # train_videos_id = list(train_df['sequence_id']) 187 | 188 | # blob_df = bucket.blob("data/WLASL-data/wlasl_test_new.csv") 189 | # test_df = pd.read_csv(blob_df.open(), dtype='str') 190 | # test_videos_id = list(test_df['sequence_id']) 191 | 192 | # if not os.path.exists('wlasl_train_video'): 193 | # os.makedirs('wlasl_train_video') 194 | # if not os.path.exists('wlasl_train_parquet'): 195 | # os.makedirs('wlasl_train_parquet') 196 | # for item in train_videos_id: 197 | # print(item) 198 | # bucket_train_filename = f'data/WLASL-data/wlasl_videos/{item}.mp4' 199 | # blob = bucket.blob(bucket_train_filename) 200 | # train_filename = f'./wlasl_train_video/{item}.mp4' 201 | # # blob.download_to_filename(train_filename) 202 | # print(1) 203 | # train_video_df = transform_video(train_filename) 204 | # print(2) 205 | # cleaned_train_vdf = clean_format(train_video_df) 206 | # print(3) 207 | # cleaned_train_vdf.to_parquet(f'./wlasl_train_parquet/{item}_olivia.parquet') 208 | # print("Success 1") 209 | # break 210 | 211 | # if not os.path.exists('wlasl_test_video'): 212 | # os.makedirs('wlasl_test_video') 213 | # if not os.path.exists('wlasl_test_parquet'): 214 | # os.makedirs('wlasl_test_parquet') 215 | # for item in test_videos_id: 216 | # print(item) 217 | # bucket_test_filename = f'data/WLASL-data/wlasl_videos/{item}.mp4' 218 | # blob = bucket.blob(bucket_test_filename) 219 | # test_filename = f'./wlasl_test_video/{item}.mp4' 220 | # blob.download_to_filename(test_filename) 221 | # test_video_df = transform_video(test_filename) 222 | # cleaned_test_vdf = clean_format(test_video_df) 223 | # cleaned_test_vdf.to_parquet(f'./wlasl_test_parquet/{item}_olivia.parquet') 224 | # print("Success 2") 225 | # break 226 | 227 | # # gcs_bucket_name = 'capy-data' 228 | # gcs_train_prefix = 'data/WLASL-data/wlasl_parquet_train/' 229 | # gcs_test_prefix = 'data/WLASL-data/wlasl_parquet_test/' 230 | 231 | # storage_client = storage.Client.from_service_account_json("./psychic-bedrock-398320-e41cc1b33701.json") 232 | # bucket = storage_client.get_bucket("capy-data") 233 | 234 | # print("========Connect to bucket=========") 235 | 236 | # for input_file in tqdm.tqdm(glob.glob(os.path.join("./wlasl_train_parquet", '*_olivia.parquet'))): 237 | # print(input_file) 238 | # gcs_object_name = os.path.join(gcs_train_prefix, os.path.basename(input_file)) 239 | # blob = bucket.blob(gcs_object_name) 240 | # blob.upload_from_filename(input_file) 241 | # print("Success 3") 242 | # break 243 | 244 | # for input_file in tqdm.tqdm(glob.glob(os.path.join("./wlasl_test_parquet", '*_olivia.parquet'))): 245 | # print(input_file) 246 | # gcs_object_name = os.path.join(gcs_test_prefix, os.path.basename(input_file)) 247 | # blob = bucket.blob(gcs_object_name) 248 | # blob.upload_from_filename(input_file) 249 | # print("Success 4") 250 | # break 251 | 252 | # def main_(): 253 | # print("========Processing Data=========") 254 | # print("========Connect to bucket=========") 255 | # print("========Done=========") 256 | 257 | 258 | if __name__ == "__main__": 259 | parser = argparse.ArgumentParser(description="Data Collector CLI") 260 | 261 | parser.add_argument( 262 | "-f", 263 | "--file", 264 | default='70349', 265 | help="Video file name", 266 | ) 267 | 268 | args = parser.parse_args() 269 | 270 | main(args) -------------------------------------------------------------------------------- /src/data-processor/docker-shell.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # exit immediately if a command exits with a non-zero status 4 | set -e 5 | 6 | # Define some environment variables 7 | export IMAGE_NAME="capy-data-preprocess" 8 | export BASE_DIR=$(pwd) 9 | export SECRETS_DIR=$(pwd)/../../../secrets/ # make sure it matches your directory of secrets 10 | export GCS_BUCKET_URI="gs://capy-data" 11 | export GCP_PROJECT="psychic-bedrock-398320" 12 | 13 | # Build the image based on the Dockerfile 14 | # docker build -t $IMAGE_NAME -f Dockerfile . 15 | # M1/2 chip macs use this line 16 | docker build -t $IMAGE_NAME --platform=linux/arm64/v8 -f Dockerfile . 17 | 18 | # Run Container 19 | docker run --rm --name $IMAGE_NAME -ti \ 20 | -v "$BASE_DIR":/app \ 21 | -v "$SECRETS_DIR":/secrets \ 22 | -e GOOGLE_APPLICATION_CREDENTIALS=/secrets/model-training.json \ 23 | -e GCP_PROJECT=$GCP_PROJECT \ 24 | -e GCS_BUCKET_URI=$GCS_BUCKET_URI \ 25 | $IMAGE_NAME -------------------------------------------------------------------------------- /src/data-processor/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo "Preprocess container is running!!!" 4 | # Activate the Pipenv virtual environment and install dependencies 5 | # pipenv run pip install -r requirements.txt 6 | 7 | # pipenv run pip install --force-reinstall opencv-python-headless 8 | 9 | # Authenticate gcloud using service account 10 | # gcloud auth activate-service-account --key-file=secrets/ml-workflow.json 11 | 12 | # Set GCP Project Details 13 | gcloud config set project $GCP_PROJECT 14 | 15 | # Run the preprocess.py script 16 | # pipenv run python model.py 17 | # pipenv run bash package-trainer.sh 18 | 19 | # pipenv run python cli.py 20 | 21 | pipenv shell 22 | -------------------------------------------------------------------------------- /src/data-processor/requirements.txt: -------------------------------------------------------------------------------- 1 | opencv-python 2 | mediapipe 3 | google-cloud-aiplatform 4 | pandas 5 | numpy 6 | tqdm 7 | google-cloud-storage -------------------------------------------------------------------------------- /src/data-processor/wlasl_deploy_video/60578.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cqzhao918/AI_ASL_Translator/429a795a6b587d489a55a0451ae270040aa079e4/src/data-processor/wlasl_deploy_video/60578.mp4 -------------------------------------------------------------------------------- /src/data-processor/wlasl_deploy_video/70349.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cqzhao918/AI_ASL_Translator/429a795a6b587d489a55a0451ae270040aa079e4/src/data-processor/wlasl_deploy_video/70349.mp4 -------------------------------------------------------------------------------- /src/deployment/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cqzhao918/AI_ASL_Translator/429a795a6b587d489a55a0451ae270040aa079e4/src/deployment/.gitkeep -------------------------------------------------------------------------------- /src/frontend/.env.development: -------------------------------------------------------------------------------- 1 | REACT_APP_BASE_API_URL=http://localhost:9000 2 | CHOKIDAR_USEPOLLING=true -------------------------------------------------------------------------------- /src/frontend/.env.production: -------------------------------------------------------------------------------- 1 | REACT_APP_BASE_API_URL=/api -------------------------------------------------------------------------------- /src/frontend/.gitignore: -------------------------------------------------------------------------------- 1 | node_modules/ 2 | 3 | # misc 4 | .DS_Store 5 | env.local 6 | .eslintcache -------------------------------------------------------------------------------- /src/frontend/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cqzhao918/AI_ASL_Translator/429a795a6b587d489a55a0451ae270040aa079e4/src/frontend/.gitkeep -------------------------------------------------------------------------------- /src/frontend/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:14.5.0-alpine as build 2 | 3 | WORKDIR /app 4 | ENV PATH /app/node_modules/.bin:$PATH 5 | ENV PUBLIC_URL / 6 | 7 | COPY package.json ./ 8 | COPY yarn.lock ./ 9 | RUN yarn install 10 | 11 | COPY . ./ 12 | RUN yarn run build 13 | 14 | # Nginx wrapper to serve static files 15 | FROM nginx:stable 16 | COPY --from=build /app/build /usr/share/nginx/html 17 | EXPOSE 80 18 | CMD ["nginx", "-g", "daemon off;"] -------------------------------------------------------------------------------- /src/frontend/Dockerfile.dev: -------------------------------------------------------------------------------- 1 | FROM node:14.9.0-buster-slim 2 | 3 | # Ensure we don't run the app as root. 4 | RUN set -ex; \ 5 | apt-get update && \ 6 | apt-get upgrade -y && \ 7 | apt-get install -y --no-install-recommends openssl && \ 8 | useradd -ms /bin/bash app -d /home/app -G sudo -u 2000 -p "$(openssl passwd -1 Passw0rd)" && \ 9 | mkdir -p /app && \ 10 | chown app:app /app 11 | 12 | EXPOSE 3000 13 | 14 | # Switch to the new user 15 | USER app 16 | WORKDIR /app 17 | 18 | ENTRYPOINT ["/bin/bash"] -------------------------------------------------------------------------------- /src/frontend/docker-shell.bat: -------------------------------------------------------------------------------- 1 | SET IMAGE_NAME=capy-app-frontend-react 2 | SET BASE_DIR=%cd% 3 | 4 | docker build -t %IMAGE_NAME% -f Dockerfile.dev . 5 | docker run --rm --name %IMAGE_NAME% -ti --mount type=bind,source="%cd%",target=/app -p 3000:3000 %IMAGE_NAME% -------------------------------------------------------------------------------- /src/frontend/docker-shell.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | export IMAGE_NAME="capy-app-frontend-react" 6 | 7 | docker build -t $IMAGE_NAME -f Dockerfile.dev . 8 | docker run --rm --name $IMAGE_NAME -ti -v "$(pwd)/:/app/" -p 3000:3000 $IMAGE_NAME -------------------------------------------------------------------------------- /src/frontend/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "frontend1", 3 | "version": "0.1.0", 4 | "private": true, 5 | "dependencies": { 6 | "@material-ui/core": "^4.11.3", 7 | "@material-ui/icons": "^4.11.2", 8 | "@material-ui/lab": "^4.0.0-alpha.57", 9 | "@testing-library/jest-dom": "^5.11.4", 10 | "@testing-library/react": "^11.1.0", 11 | "@testing-library/user-event": "^12.1.10", 12 | "axios": "^0.21.1", 13 | "react": "^17.0.1", 14 | "react-dom": "^17.0.1", 15 | "react-number-format": "^4.7.3", 16 | "react-router-dom": "^5.2.0", 17 | "react-scripts": "^4.0.1", 18 | "web-vitals": "^0.2.4" 19 | }, 20 | "scripts": { 21 | "start": "react-scripts start", 22 | "build": "react-scripts build", 23 | "test": "react-scripts test", 24 | "eject": "react-scripts eject" 25 | }, 26 | "eslintConfig": { 27 | "extends": [ 28 | "react-app", 29 | "react-app/jest" 30 | ] 31 | }, 32 | "browserslist": { 33 | "production": [ 34 | ">0.2%", 35 | "not dead", 36 | "not op_mini all" 37 | ], 38 | "development": [ 39 | "last 1 chrome version", 40 | "last 1 firefox version", 41 | "last 1 safari version" 42 | ] 43 | }, 44 | "resolutions": { 45 | "react-scripts/eslint-webpack-plugin": "2.3.0" 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /src/frontend/public/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cqzhao918/AI_ASL_Translator/429a795a6b587d489a55a0451ae270040aa079e4/src/frontend/public/favicon.ico -------------------------------------------------------------------------------- /src/frontend/public/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 13 | 14 | 15 | 24 | ✋American Sign Language Translator 25 | 26 | 27 | 28 | 29 |
30 | 40 | 41 | 42 | -------------------------------------------------------------------------------- /src/frontend/public/manifest.json: -------------------------------------------------------------------------------- 1 | { 2 | "short_name": "React App", 3 | "name": "Create React App Sample", 4 | "icons": [ 5 | { 6 | "src": "favicon.ico", 7 | "sizes": "64x64 32x32 24x24 16x16", 8 | "type": "image/x-icon" 9 | } 10 | ], 11 | "start_url": ".", 12 | "display": "standalone", 13 | "theme_color": "#000000", 14 | "background_color": "#ffffff" 15 | } 16 | -------------------------------------------------------------------------------- /src/frontend/src/app/App.css: -------------------------------------------------------------------------------- 1 | .App { 2 | text-align: center; 3 | } 4 | 5 | .App-logo { 6 | height: 40vmin; 7 | pointer-events: none; 8 | } 9 | 10 | @media (prefers-reduced-motion: no-preference) { 11 | .App-logo { 12 | animation: App-logo-spin infinite 20s linear; 13 | } 14 | } 15 | 16 | .App-header { 17 | background-color: #10a480; 18 | min-height: 100vh; 19 | display: flex; 20 | flex-direction: column; 21 | align-items: center; 22 | justify-content: center; 23 | font-size: calc(10px + 2vmin); 24 | color: white; 25 | } 26 | 27 | .App-link { 28 | color: #61dafb; 29 | } 30 | 31 | @keyframes App-logo-spin { 32 | from { 33 | transform: rotate(0deg); 34 | } 35 | to { 36 | transform: rotate(360deg); 37 | } 38 | } 39 | 40 | pre { 41 | font-size: inherit; 42 | font-family: inherit; 43 | line-height: 1.66667; 44 | padding: 8px; 45 | background-color: #dedede; 46 | } 47 | 48 | -------------------------------------------------------------------------------- /src/frontend/src/app/App.js: -------------------------------------------------------------------------------- 1 | import React from 'react'; 2 | import { BrowserRouter as Router } from 'react-router-dom'; 3 | import { 4 | ThemeProvider, 5 | CssBaseline 6 | } from '@material-ui/core'; 7 | import './App.css'; 8 | import Theme from "./Theme"; 9 | import AppRoutes from "./AppRoutes"; 10 | import Content from "../common/Content"; 11 | import Header from "../common/Header"; 12 | import Footer from "../common/Footer"; 13 | import DataService from '../services/DataService'; 14 | // import VideoUpload from './VideoUpload'; 15 | 16 | 17 | const App = (props) => { 18 | 19 | console.log("================================== App ======================================"); 20 | 21 | // Init Data Service 22 | DataService.Init(); 23 | 24 | // Build App 25 | let view = ( 26 | 27 | 28 | 29 | 30 |
31 | 32 | 33 | 34 |
35 |
36 |
37 |
38 | ) 39 | 40 | // Return View 41 | return view 42 | } 43 | 44 | export default App; 45 | -------------------------------------------------------------------------------- /src/frontend/src/app/AppRoutes.js: -------------------------------------------------------------------------------- 1 | import React from "react"; 2 | import { Route, Switch, Redirect } from 'react-router-dom'; 3 | import Home from "../components/Home"; 4 | import Error404 from '../components/Error/404'; 5 | import Currentmodel from '../components/Currentmodel'; 6 | 7 | const AppRouter = (props) => { 8 | 9 | console.log("================================== AppRouter ======================================"); 10 | 11 | return ( 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | ); 20 | } 21 | 22 | export default AppRouter; -------------------------------------------------------------------------------- /src/frontend/src/app/Theme.js: -------------------------------------------------------------------------------- 1 | import { 2 | createMuiTheme, 3 | } from '@material-ui/core'; 4 | 5 | const Theme = createMuiTheme({ 6 | palette: { 7 | type: 'light', 8 | primary: { 9 | // light: will be calculated from palette.primary.main, 10 | main: '#10a480', 11 | // dark: will be calculated from palette.primary.main, 12 | // contrastText: will be calculated to contrast with palette.primary.main 13 | }, 14 | secondary: { 15 | light: '#0066ff', 16 | main: '#A41034', 17 | // dark: will be calculated from palette.secondary.main, 18 | contrastText: '#ffffff', 19 | }, 20 | // error: will use the default color 21 | info: { 22 | light: '#AF5454', 23 | main: '#AF5454', 24 | // dark: will be calculated from palette.secondary.main, 25 | contrastText: '#ffffff', 26 | }, 27 | }, 28 | typography: { 29 | useNextVariants: true, 30 | h6: { 31 | color: "#15d2a4", 32 | fontSize: "1.1rem", 33 | fontFamily: "Roboto, Helvetica, Arial, sans-serif", 34 | fontWeight: 800 35 | }, 36 | h5: { 37 | color: "#18e8b6", 38 | fontSize: "1.2rem", 39 | fontFamily: "Roboto, Helvetica, Arial, sans-serif", 40 | fontWeight: 800 41 | }, 42 | h4: { 43 | color: "#15d2a4", 44 | fontSize: "1.8rem", 45 | fontFamily: "Roboto, Helvetica, Arial, sans-serif", 46 | fontWeight: 900 47 | }, 48 | }, 49 | overrides: { 50 | MuiOutlinedInput: { 51 | root: { 52 | backgroundColor: "#ffffff", 53 | position: "relative", 54 | borderRadius: "4px", 55 | } 56 | }, 57 | } 58 | }); 59 | 60 | export default Theme; -------------------------------------------------------------------------------- /src/frontend/src/app/VideoUpload.js: -------------------------------------------------------------------------------- 1 | import React from "react"; 2 | import "./styles.css"; 3 | 4 | import Card from "@material-ui/core/Card"; 5 | import Grid from "@material-ui/core/Grid"; 6 | import Input from "@material-ui/core/Input"; 7 | import Typography from "@material-ui/core/Typography"; 8 | 9 | import LocalForage from "localforage/dist/localforage.js"; 10 | const videoStore = LocalForage.createInstance({ name: "VideoStore" }); 11 | 12 | export default function App() { 13 | setVideo(); 14 | 15 | return ( 16 | 17 | 18 | 19 | Video Upload 20 | 21 | 22 | 23 | Save the video to indexDB, load from indexDB, then play. 24 | 25 | 26 | 27 | { 33 | let file = document.getElementById("input").files[0]; 34 | if (file instanceof File) { 35 | file = new Blob([file], { type: file.type }); 36 | videoStore 37 | .setItem("video", file) 38 | .then(() => { 39 | setVideo(); 40 | }) 41 | .catch(err => console.error("Unable to store video", err)); 42 | } 43 | }} 44 | /> 45 | 46 | 47 |
48 | 49 | 50 | 51 | ); 52 | } 53 | 54 | function setVideo() { 55 | videoStore 56 | .getItem("video") 57 | .then(val => { 58 | if (val) { 59 | let vid = document.createElement("video"); 60 | vid.src = URL.createObjectURL(val); 61 | vid.muted = true; 62 | vid.style = { maxWidth: "400px", maxHeight: "400px" }; 63 | vid.autoPlay = true; 64 | vid.controls = true; 65 | vid.playsInline = true; 66 | 67 | // creating and adding the element appears to be 68 | // the issue... When just setting the source of 69 | // an element it seems to work for a while. But 70 | // if left alone, the video will eventually stop 71 | // playing or allowing time scrubbing/seeking. 72 | let elem = document.getElementById("video"); 73 | while (elem.children.length > 0) { 74 | if (elem.firstChild.src) { 75 | URL.revokeObjectURL(elem.firstChild.src); 76 | } 77 | elem.removeChild(elem.firstChild); 78 | } 79 | elem.appendChild(vid); 80 | } 81 | }) 82 | .catch(err => { 83 | console.error("Unable to retrieve video", err); 84 | }); 85 | } 86 | -------------------------------------------------------------------------------- /src/frontend/src/app/index.js: -------------------------------------------------------------------------------- 1 | import React from "react"; 2 | import ReactDOM from "react-dom"; 3 | 4 | import App from "./App"; 5 | 6 | const rootElement = document.getElementById("root"); 7 | ReactDOM.render(, rootElement); -------------------------------------------------------------------------------- /src/frontend/src/app/styles.css: -------------------------------------------------------------------------------- 1 | .App { 2 | font-family: sans-serif; 3 | text-align: center; 4 | } 5 | -------------------------------------------------------------------------------- /src/frontend/src/common/Content/index.js: -------------------------------------------------------------------------------- 1 | import React from 'react'; 2 | import { withStyles } from '@material-ui/core'; 3 | import styles from './styles'; 4 | 5 | const Content = props => { 6 | const classes = props.classes; 7 | const children = props.children; 8 | 9 | return ( 10 |
11 | {children} 12 |
13 | ); 14 | } 15 | 16 | export default withStyles(styles)(Content); -------------------------------------------------------------------------------- /src/frontend/src/common/Content/styles.js: -------------------------------------------------------------------------------- 1 | const styles = theme => ({ 2 | grow: { 3 | flexGrow: 1, 4 | }, 5 | root: { 6 | minHeight: "100vh" 7 | }, 8 | }); 9 | 10 | export default styles; -------------------------------------------------------------------------------- /src/frontend/src/common/Footer/index.js: -------------------------------------------------------------------------------- 1 | import React, { useEffect, useRef, useState } from 'react'; 2 | import { withStyles } from '@material-ui/core'; 3 | import Typography from '@material-ui/core/Typography'; 4 | 5 | import styles from './styles'; 6 | 7 | const Footer = (props) => { 8 | const { classes } = props; 9 | const { history } = props; 10 | 11 | console.log("================================== Footer ======================================"); 12 | 13 | // Component States 14 | 15 | // Setup Component 16 | useEffect(() => { 17 | 18 | }, []); 19 | 20 | return ( 21 |
22 | 23 | ✋American Sign Language Translator 24 | 25 | 26 |
27 | ); 28 | }; 29 | 30 | export default withStyles(styles)(Footer); -------------------------------------------------------------------------------- /src/frontend/src/common/Footer/styles.js: -------------------------------------------------------------------------------- 1 | import { pink, red } from "@material-ui/core/colors"; 2 | 3 | const styles = theme => ({ 4 | root: { 5 | flexGrow: 1, 6 | marginTop: "30px", 7 | }, 8 | grow: { 9 | flexGrow: 1, 10 | }, 11 | main: { 12 | 13 | }, 14 | }); 15 | 16 | export default styles; -------------------------------------------------------------------------------- /src/frontend/src/common/Header/index.js: -------------------------------------------------------------------------------- 1 | import React, { useState } from 'react'; 2 | import { withStyles } from '@material-ui/core'; 3 | import AppBar from '@material-ui/core/AppBar'; 4 | import Toolbar from '@material-ui/core/Toolbar'; 5 | import Drawer from '@material-ui/core/Drawer'; 6 | import Typography from '@material-ui/core/Typography'; 7 | 8 | 9 | import List from '@material-ui/core/List'; 10 | import Divider from '@material-ui/core/Divider'; 11 | import ListItem from '@material-ui/core/ListItem'; 12 | import ListItemIcon from '@material-ui/core/ListItemIcon'; 13 | import ListItemText from '@material-ui/core/ListItemText'; 14 | import Menu from '@material-ui/core/Menu'; 15 | import MenuItem from '@material-ui/core/MenuItem'; 16 | import IconButton from '@material-ui/core/IconButton'; 17 | import AccountCircle from '@material-ui/icons/AccountCircle'; 18 | import MenuIcon from '@material-ui/icons/Menu'; 19 | import Icon from '@material-ui/core/Icon'; 20 | import { Link } from 'react-router-dom'; 21 | 22 | 23 | import styles from './styles'; 24 | 25 | const Header = (props) => { 26 | const { classes } = props; 27 | 28 | console.log("================================== Header ======================================"); 29 | 30 | 31 | // State 32 | const [drawerOpen, setDrawerOpen] = useState(false); 33 | const [settingsMenuOpen, setSettingsMenuOpen] = useState(false); 34 | const [settingsMenuAnchorEl, setSettingsMenuAnchorEl] = useState(null); 35 | 36 | const toggleDrawer = (open) => () => { 37 | setDrawerOpen(open) 38 | }; 39 | const openSettingsMenu = (event) => { 40 | setSettingsMenuAnchorEl(event.currentTarget); 41 | }; 42 | const closeSettingsMenu = (event) => { 43 | setSettingsMenuAnchorEl(null); 44 | }; 45 | 46 | return ( 47 |
48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | ✋American Sign Language Translator 56 | 57 | 58 | 59 |
60 | 61 | 62 |
63 | 64 | home 65 |  Home 66 | 67 | 68 | 69 | model_training 70 |  Model 71 | 72 | {/* 73 | login 74 |  Login 75 | */} 76 |
77 | 78 | 79 | 80 |
86 |
87 | 88 | 89 | home 90 | 91 | 92 | 93 | 94 | 95 | 96 | 97 | star 98 | 99 | 100 | 101 |
102 |
103 |
104 |
105 | ); 106 | } 107 | 108 | export default withStyles(styles)(Header); 109 | -------------------------------------------------------------------------------- /src/frontend/src/common/Header/styles.js: -------------------------------------------------------------------------------- 1 | import { fade } from '@material-ui/core/styles/colorManipulator'; 2 | 3 | const styles = theme => ({ 4 | root: { 5 | flexGrow: 1, 6 | }, 7 | grow: { 8 | flexGrow: 1, 9 | }, 10 | appLink: { 11 | color: "inherit", 12 | textDecoration: "inherit", 13 | }, 14 | appTitle: { 15 | fontSize: "1.286rem", 16 | lineHeight: "1.33", 17 | fontWeight: "800", 18 | letterSpacing: "3px" 19 | }, 20 | menuButton: { 21 | marginLeft: -12, 22 | marginRight: 5, 23 | }, 24 | verticalDivider: { 25 | border: "1px solid white", 26 | marginLeft: "5px", 27 | marginRight: "5px", 28 | }, 29 | list: { 30 | width: 250, 31 | }, 32 | listItemText: { 33 | fontSize: "15px" 34 | }, 35 | search: { 36 | position: 'relative', 37 | borderRadius: theme.shape.borderRadius, 38 | backgroundColor: fade(theme.palette.common.white, 0.15), 39 | '&:hover': { 40 | backgroundColor: fade(theme.palette.common.white, 0.25), 41 | }, 42 | marginLeft: 0, 43 | width: '100%', 44 | [theme.breakpoints.up('sm')]: { 45 | marginLeft: theme.spacing(1), 46 | width: 'auto', 47 | }, 48 | }, 49 | searchIcon: { 50 | width: theme.spacing(1) * 9, 51 | height: '100%', 52 | position: 'absolute', 53 | pointerEvents: 'none', 54 | display: 'flex', 55 | alignItems: 'center', 56 | justifyContent: 'center', 57 | }, 58 | inputRoot: { 59 | color: 'inherit', 60 | width: '100%', 61 | }, 62 | inputInput: { 63 | paddingTop: theme.spacing(1), 64 | paddingRight: theme.spacing(1), 65 | paddingBottom: theme.spacing(1), 66 | paddingLeft: theme.spacing(1) * 10, 67 | transition: theme.transitions.create('width'), 68 | width: '100%', 69 | [theme.breakpoints.up('sm')]: { 70 | width: '35ch', 71 | '&:focus': { 72 | width: '70ch', 73 | }, 74 | }, 75 | }, 76 | alertContainer: { 77 | position: "fixed", 78 | top: "0", 79 | left: "40%", 80 | zIndex: 1000, 81 | }, 82 | }); 83 | 84 | 85 | export default styles; -------------------------------------------------------------------------------- /src/frontend/src/components/Currentmodel/index.js: -------------------------------------------------------------------------------- 1 | import React, { useEffect, useState } from 'react'; 2 | import { withStyles } from '@material-ui/core'; 3 | import Container from '@material-ui/core/Container'; 4 | import Typography from '@material-ui/core/Typography'; 5 | import Divider from '@material-ui/core/Divider'; 6 | import Paper from '@material-ui/core/Paper'; 7 | import Table from '@material-ui/core/Table'; 8 | import TableBody from '@material-ui/core/TableBody'; 9 | import TableCell from '@material-ui/core/TableCell'; 10 | import TableContainer from '@material-ui/core/TableContainer'; 11 | import TableRow from '@material-ui/core/TableRow'; 12 | import NumberFormat from "react-number-format"; 13 | 14 | import DataService from "../../services/DataService"; 15 | import styles from './styles'; 16 | 17 | const Currentmodel = (props) => { 18 | const { classes } = props; 19 | 20 | console.log("================================== Currentmodel ======================================"); 21 | 22 | // Component States 23 | const [model, setModel] = useState(null); 24 | const loadModel = () => { 25 | DataService.GetCurrentmodel() 26 | .then(function (response) { 27 | console.log(response.data); 28 | setModel(response.data.model_details); 29 | }); 30 | } 31 | 32 | // Setup Component 33 | useEffect(() => { 34 | loadModel(); 35 | }, []); 36 | 37 | return ( 38 |
39 |
40 | 41 | 42 | Transformer-based Model Details: 43 | 44 | 45 | {model && 46 | 47 | 48 | 49 | 50 | Name 51 | {model.model_name} 52 | 53 | 54 | 55 | Trainable Parameters 56 | 57 | 62 | 63 | 64 | 65 | Training Time (mins) 66 | 67 | 73 | 74 | 75 | 76 | Loss 77 | 78 | 83 | 84 | 85 | 86 | Accuracy 87 | 88 | 96 | 97 | 98 | 99 | Model Size (Mb) 100 | 101 | 108 | 109 | 110 | 111 | Learning Rate 112 | 113 | {model.learning_rate} 114 | 115 | 116 | 117 | Batch Size 118 | 119 | {model.batch_size} 120 | 121 | 122 | 123 | Epochs 124 | 125 | {model.epochs} 126 | 127 | 128 | 129 | Optimizer 130 | 131 | {model.optimizer} 132 | 133 | 134 | 135 |
136 |
137 | } 138 |
139 |
140 |
141 | ); 142 | }; 143 | 144 | export default withStyles(styles)(Currentmodel); -------------------------------------------------------------------------------- /src/frontend/src/components/Currentmodel/styles.js: -------------------------------------------------------------------------------- 1 | 2 | const styles = theme => ({ 3 | root: { 4 | flexGrow: 1, 5 | minHeight: "100vh" 6 | }, 7 | grow: { 8 | flexGrow: 1, 9 | }, 10 | main: { 11 | 12 | }, 13 | container: { 14 | backgroundColor: "#ffffff", 15 | paddingTop: "30px", 16 | paddingBottom: "20px", 17 | }, 18 | }); 19 | 20 | export default styles; -------------------------------------------------------------------------------- /src/frontend/src/components/Error/404.js: -------------------------------------------------------------------------------- 1 | import React from 'react'; 2 | 3 | const Error404 = () => ( 4 |
5 |

Error404

6 |
7 | ); 8 | 9 | export default Error404; 10 | -------------------------------------------------------------------------------- /src/frontend/src/components/Home/background.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cqzhao918/AI_ASL_Translator/429a795a6b587d489a55a0451ae270040aa079e4/src/frontend/src/components/Home/background.png -------------------------------------------------------------------------------- /src/frontend/src/components/Home/index.js: -------------------------------------------------------------------------------- 1 | import React, { useEffect, useRef, useState } from 'react'; 2 | import { withStyles } from '@material-ui/core'; 3 | import Container from '@material-ui/core/Container'; 4 | import Typography from '@material-ui/core/Typography'; 5 | 6 | import DataService from "../../services/DataService"; 7 | import styles from './styles'; 8 | 9 | const Home = (props) => { 10 | const { classes } = props; 11 | 12 | console.log("================================== Home ======================================"); 13 | 14 | const inputFile = useRef(null); 15 | 16 | // Component States 17 | const [image, setImage] = useState(null); 18 | const [prediction, setPrediction] = useState(null); 19 | 20 | // Setup Component 21 | useEffect(() => { 22 | 23 | }, []); 24 | 25 | // Handlers 26 | const handleImageUploadClick = () => { 27 | inputFile.current.click(); 28 | } 29 | // const handleOnChange = (event) => { 30 | // console.log(event.target.files); 31 | // setImage(URL.createObjectURL(event.target.files[0])); 32 | 33 | // var formData = new FormData(); 34 | // formData.append("file", event.target.files[0]); 35 | // DataService.Predict(formData) 36 | // .then(function (response) { 37 | // console.log(response.data); 38 | // setPrediction(response.data); 39 | // }) 40 | // } 41 | const handleOnChange = (event) => { 42 | const file = event.target.files[0]; 43 | const url = URL.createObjectURL(file); 44 | setImage(url); 45 | 46 | // Link to API 47 | var formData = new FormData(); 48 | formData.append("file", event.target.files[0]); 49 | DataService.Predict(formData) 50 | .then(function (response) { 51 | console.log(response.data); 52 | setPrediction(response.data); 53 | }) 54 | }; 55 | 56 | 57 | return ( 58 |
59 |
60 | 61 | {prediction && 62 | 63 | 64 | {prediction.prediction_label} 65 | 66 | 67 | } 68 | {/*
handleImageUploadClick()}> */} 69 |
70 | 71 | handleOnChange(event)} 81 | accept=".mov,.mp4" 82 | /> 83 | {!image && } 84 | {image && ( 85 |
96 | 97 |
98 |
99 | ); 100 | }; 101 | 102 | export default withStyles(styles)(Home); -------------------------------------------------------------------------------- /src/frontend/src/components/Home/styles.js: -------------------------------------------------------------------------------- 1 | 2 | const styles = theme => ({ 3 | root: { 4 | flexGrow: 1, 5 | minHeight: "100vh" 6 | }, 7 | grow: { 8 | flexGrow: 1, 9 | }, 10 | main: { 11 | 12 | }, 13 | container: { 14 | backgroundColor: "#ffffff", 15 | paddingTop: "30px", 16 | paddingBottom: "20px", 17 | }, 18 | dropzone: { 19 | flex: 1, 20 | display: "flex", 21 | flexDirection: "column", 22 | alignItems: "center", 23 | margin: "20px", 24 | borderWidth: "2px", 25 | borderRadius: "2px", 26 | borderColor: "#cccccc", 27 | borderStyle: "dashed", 28 | backgroundColor: "#fafafa", 29 | outline: "none", 30 | transition: "border .24s ease-in-out", 31 | cursor: "pointer", 32 | backgroundImage: "url('https://storage.googleapis.com/capy-data/background.png')", 33 | backgroundRepeat: "no-repeat", 34 | backgroundPosition: "center", 35 | minHeight: "400px", 36 | }, 37 | fileInput: { 38 | display: "none", 39 | }, 40 | preview: { 41 | width: "100%", 42 | }, 43 | help: { 44 | color: "#302f2f" 45 | }, 46 | safe: { 47 | color: "#31a354", 48 | }, 49 | poisonous: { 50 | color: "#de2d26", 51 | }, 52 | }); 53 | 54 | export default styles; -------------------------------------------------------------------------------- /src/frontend/src/index.css: -------------------------------------------------------------------------------- 1 | body { 2 | margin: 0; 3 | font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', 'Roboto', 'Oxygen', 4 | 'Ubuntu', 'Cantarell', 'Fira Sans', 'Droid Sans', 'Helvetica Neue', 5 | sans-serif; 6 | -webkit-font-smoothing: antialiased; 7 | -moz-osx-font-smoothing: grayscale; 8 | height: 100vh; 9 | min-height: 100vh; 10 | } 11 | 12 | #root { 13 | background-color: #efefef; 14 | } 15 | 16 | code { 17 | font-family: source-code-pro, Menlo, Monaco, Consolas, 'Courier New', 18 | monospace; 19 | } 20 | -------------------------------------------------------------------------------- /src/frontend/src/index.js: -------------------------------------------------------------------------------- 1 | import React from 'react'; 2 | import ReactDOM from 'react-dom'; 3 | import './index.css'; 4 | import App from './app/App'; 5 | 6 | ReactDOM.render( 7 | 8 | 9 | , 10 | document.getElementById('root') 11 | ); 12 | 13 | -------------------------------------------------------------------------------- /src/frontend/src/services/Common.js: -------------------------------------------------------------------------------- 1 | export const BASE_API_URL = process.env.REACT_APP_BASE_API_URL; 2 | 3 | export function epochToJsDate(ts) { 4 | let dt = new Date(ts) 5 | return dt.toLocaleDateString() + " " + dt.toLocaleTimeString(); 6 | } -------------------------------------------------------------------------------- /src/frontend/src/services/DataService.js: -------------------------------------------------------------------------------- 1 | import { BASE_API_URL } from "./Common"; 2 | 3 | const axios = require('axios'); 4 | 5 | const DataService = { 6 | Init: function () { 7 | // Any application initialization logic comes here 8 | }, 9 | GetExperiments: async function () { 10 | return await axios.get(BASE_API_URL + "/experiments"); 11 | }, 12 | GetCurrentmodel: async function () { 13 | return await axios.get(BASE_API_URL + "/best_model"); 14 | }, 15 | Predict: async function (formData) { 16 | return await axios.post(BASE_API_URL + "/predict", formData, { 17 | headers: { 18 | 'Content-Type': 'multipart/form-data' 19 | } 20 | }); 21 | }, 22 | } 23 | 24 | export default DataService; -------------------------------------------------------------------------------- /src/model-deploy/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cqzhao918/AI_ASL_Translator/429a795a6b587d489a55a0451ae270040aa079e4/src/model-deploy/.gitkeep -------------------------------------------------------------------------------- /src/model-prediction/Dockerfile: -------------------------------------------------------------------------------- 1 | # Use the official Debian-hosted Python image 2 | FROM python:3.9-slim-buster 3 | 4 | ARG DEBIAN_PACKAGES="build-essential git curl wget unzip gzip" 5 | 6 | # Prevent apt from showing prompts 7 | ENV DEBIAN_FRONTEND=noninteractive 8 | 9 | # Python wants UTF-8 locale 10 | ENV LANG=C.UTF-8 11 | 12 | # Tell pipenv where the shell is. 13 | # This allows us to use "pipenv shell" as a container entry point. 14 | ENV PYENV_SHELL=/bin/bash 15 | 16 | # Tell Python to disable buffering so we don't lose any logs. 17 | ENV PYTHONUNBUFFERED=1 18 | 19 | #ENV GOOGLE_APPLICATION_CREDENTIALS=secrets/data-pipeline.json 20 | 21 | # Ensure we have an up to date baseline, install dependencies 22 | RUN set -ex; \ 23 | for i in $(seq 1 8); do mkdir -p "/usr/share/man/man${i}"; done && \ 24 | apt-get update && \ 25 | apt-get upgrade -y && \ 26 | apt-get install -y --no-install-recommends $DEBIAN_PACKAGES && \ 27 | apt-get install -y --no-install-recommends software-properties-common apt-transport-https ca-certificates gnupg2 gnupg-agent curl openssh-client && \ 28 | curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - && \ 29 | echo "deb [signed-by=/usr/share/keyrings/cloud.google.gpg] https://packages.cloud.google.com/apt cloud-sdk main" | tee -a /etc/apt/sources.list.d/google-cloud-sdk.list && \ 30 | curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key --keyring /usr/share/keyrings/cloud.google.gpg add - && \ 31 | apt-get update && \ 32 | apt-get install -y --no-install-recommends google-cloud-sdk && \ 33 | apt-get clean && \ 34 | rm -rf /var/lib/apt/lists/* && \ 35 | pip install --no-cache-dir --upgrade pip && \ 36 | pip install pipenv && \ 37 | useradd -ms /bin/bash app -d /home/app -u 1000 -p "$(openssl passwd -1 Passw0rd)" && \ 38 | mkdir -p /app && \ 39 | chown app:app /app 40 | 41 | # Switch to the new user 42 | USER app 43 | WORKDIR /app 44 | 45 | # # # Set the working directory to /preprocessing 46 | # RUN pipenv lock 47 | 48 | # # Add the Pipfile, Pipfile.lock, and python code into the container 49 | # ADD . / 50 | 51 | # RUN pipenv sync 52 | 53 | # Install python packages 54 | ADD --chown=app:app Pipfile Pipfile.lock /app/ 55 | 56 | RUN pipenv sync 57 | 58 | # Make the entrypoint.sh script executable 59 | # RUN chmod +x /bin/bash/entrypoint.sh 60 | # Add the rest of the source code. This is done last so we don't invalidate all 61 | # layers when we change a line of code. 62 | ADD --chown=app:app . /app 63 | 64 | # Make the entrypoint.sh script executable 65 | # RUN chmod +x /bin/bash/entrypoint.sh 66 | 67 | # # Set the entrypoint 68 | # ENTRYPOINT ["/bin/bash"] 69 | 70 | # # Specify the entrypoint script as the CMD 71 | # CMD ["entrypoint.sh"] 72 | # CMD ["-c", "pipenv shell"] 73 | ENTRYPOINT ["/bin/bash","./entrypoint.sh"] 74 | -------------------------------------------------------------------------------- /src/model-prediction/Pipfile: -------------------------------------------------------------------------------- 1 | [[source]] 2 | name = "pypi" 3 | url = "https://pypi.org/simple" 4 | verify_ssl = true 5 | 6 | [dev-packages] 7 | 8 | [packages] 9 | google-cloud-aiplatform = "*" 10 | pandas = "*" 11 | numpy = "*" 12 | tqdm = "*" 13 | tensorflow = "*" 14 | google-cloud-storage = "*" 15 | wandb = "*" 16 | pyarrow = "*" 17 | 18 | [requires] 19 | python_version = "3.9" -------------------------------------------------------------------------------- /src/model-prediction/cli.py: -------------------------------------------------------------------------------- 1 | """ 2 | Module that contains the command line app. 3 | 4 | Typical usage example from command line: 5 | python cli.py --test 6 | """ 7 | 8 | import os 9 | import requests 10 | import zipfile 11 | import tarfile 12 | import argparse 13 | from glob import glob 14 | import numpy as np 15 | import base64 16 | from google.cloud import storage 17 | from google.cloud import aiplatform 18 | import tensorflow as tf 19 | import pyarrow.parquet as pq 20 | 21 | import pandas as pd 22 | import json 23 | from multiprocessing import cpu_count 24 | from tensorflow.python.lib.io import file_io 25 | 26 | 27 | client = storage.Client() 28 | bucket = client.bucket('capy-data') 29 | 30 | # blob_df = bucket.blob("data/WLASL-data/wlasl_test_new.csv") 31 | # train_df = pd.read_csv(blob_df.open()) 32 | 33 | print("\n\n... LOAD SIGN TO PREDICTION INDEX MAP FROM JSON FILE ...\n") 34 | # Read character to prediction index 35 | blob_json = bucket.blob("data/WLASL-data/sign_to_prediction_index_map.json") 36 | with blob_json.open("r") as f: 37 | json_file = json.loads(f.read()) 38 | 39 | s2p_map = {k.lower():v for k,v in json_file.items()} 40 | p2s_map = {v:k for k,v in json_file.items()} 41 | encoder = lambda x: s2p_map.get(x.lower()) 42 | decoder = lambda x: p2s_map.get(x) 43 | # print(s2p_map) 44 | # train_df['label'] = train_df.sign.map(encoder) 45 | 46 | ROWS_PER_FRAME = 543 47 | MAX_LEN = 384 48 | CROP_LEN = MAX_LEN 49 | NUM_CLASSES = 250 50 | PAD = -100. 51 | NOSE=[ 52 | 1,2,98,327 53 | ] 54 | LNOSE = [98] 55 | RNOSE = [327] 56 | LIP = [ 0, 57 | 61, 185, 40, 39, 37, 267, 269, 270, 409, 58 | 291, 146, 91, 181, 84, 17, 314, 405, 321, 375, 59 | 78, 191, 80, 81, 82, 13, 312, 311, 310, 415, 60 | 95, 88, 178, 87, 14, 317, 402, 318, 324, 308, 61 | ] 62 | LLIP = [84,181,91,146,61,185,40,39,37,87,178,88,95,78,191,80,81,82] 63 | RLIP = [314,405,321,375,291,409,270,269,267,317,402,318,324,308,415,310,311,312] 64 | 65 | POSE = [500, 502, 504, 501, 503, 505, 512, 513] 66 | LPOSE = [513,505,503,501] 67 | RPOSE = [512,504,502,500] 68 | 69 | REYE = [ 70 | 33, 7, 163, 144, 145, 153, 154, 155, 133, 71 | 246, 161, 160, 159, 158, 157, 173, 72 | ] 73 | LEYE = [ 74 | 263, 249, 390, 373, 374, 380, 381, 382, 362, 75 | 466, 388, 387, 386, 385, 384, 398, 76 | ] 77 | 78 | LHAND = np.arange(468, 489).tolist() 79 | RHAND = np.arange(522, 543).tolist() 80 | 81 | POINT_LANDMARKS = LIP + LHAND + RHAND + NOSE + REYE + LEYE #+POSE 82 | 83 | NUM_NODES = len(POINT_LANDMARKS) 84 | CHANNELS = 6*NUM_NODES 85 | 86 | # print(NUM_NODES) 87 | # print(CHANNELS) 88 | 89 | def tf_nan_mean(x, axis=0, keepdims=False): 90 | return tf.reduce_sum(tf.where(tf.math.is_nan(x), tf.zeros_like(x), x), axis=axis, keepdims=keepdims) / tf.reduce_sum(tf.where(tf.math.is_nan(x), tf.zeros_like(x), tf.ones_like(x)), axis=axis, keepdims=keepdims) 91 | 92 | def tf_nan_std(x, center=None, axis=0, keepdims=False): 93 | if center is None: 94 | center = tf_nan_mean(x, axis=axis, keepdims=True) 95 | d = x - center 96 | return tf.math.sqrt(tf_nan_mean(d * d, axis=axis, keepdims=keepdims)) 97 | 98 | class Preprocess(tf.keras.layers.Layer): 99 | def __init__(self, max_len=MAX_LEN, point_landmarks=POINT_LANDMARKS, **kwargs): 100 | super().__init__(**kwargs) 101 | self.max_len = max_len 102 | self.point_landmarks = point_landmarks 103 | 104 | def call(self, inputs): 105 | if tf.rank(inputs) == 3: 106 | x = inputs[None,...] 107 | else: 108 | x = inputs 109 | 110 | mean = tf_nan_mean(tf.gather(x, [17], axis=2), axis=[1,2], keepdims=True) 111 | mean = tf.where(tf.math.is_nan(mean), tf.constant(0.5,x.dtype), mean) 112 | x = tf.gather(x, self.point_landmarks, axis=2) #N,T,P,C 113 | std = tf_nan_std(x, center=mean, axis=[1,2], keepdims=True) 114 | 115 | x = (x - mean)/std 116 | 117 | if self.max_len is not None: 118 | x = x[:,:self.max_len] 119 | length = tf.shape(x)[1] 120 | x = x[...,:2] 121 | 122 | dx = tf.cond(tf.shape(x)[1]>1,lambda:tf.pad(x[:,1:] - x[:,:-1], [[0,0],[0,1],[0,0],[0,0]]),lambda:tf.zeros_like(x)) 123 | 124 | dx2 = tf.cond(tf.shape(x)[1]>2,lambda:tf.pad(x[:,2:] - x[:,:-2], [[0,0],[0,2],[0,0],[0,0]]),lambda:tf.zeros_like(x)) 125 | 126 | x = tf.concat([ 127 | tf.reshape(x, (-1,length,2*len(self.point_landmarks))), 128 | tf.reshape(dx, (-1,length,2*len(self.point_landmarks))), 129 | tf.reshape(dx2, (-1,length,2*len(self.point_landmarks))), 130 | ], axis = -1) 131 | 132 | x = tf.where(tf.math.is_nan(x),tf.constant(0.,x.dtype),x) 133 | 134 | return x 135 | 136 | ########Model######### 137 | class ECA(tf.keras.layers.Layer): 138 | def __init__(self, kernel_size=5, **kwargs): 139 | super().__init__(**kwargs) 140 | self.supports_masking = True 141 | self.kernel_size = kernel_size 142 | self.conv = tf.keras.layers.Conv1D(1, kernel_size=kernel_size, strides=1, padding="same", use_bias=False) 143 | 144 | def call(self, inputs, mask=None): 145 | nn = tf.keras.layers.GlobalAveragePooling1D()(inputs, mask=mask) 146 | nn = tf.expand_dims(nn, -1) 147 | nn = self.conv(nn) 148 | nn = tf.squeeze(nn, -1) 149 | nn = tf.nn.sigmoid(nn) 150 | nn = nn[:,None,:] 151 | return inputs * nn 152 | 153 | class LateDropout(tf.keras.layers.Layer): 154 | def __init__(self, rate, noise_shape=None, start_step=0, **kwargs): 155 | super().__init__(**kwargs) 156 | self.supports_masking = True 157 | self.rate = rate 158 | self.start_step = start_step 159 | self.dropout = tf.keras.layers.Dropout(rate, noise_shape=noise_shape) 160 | 161 | def build(self, input_shape): 162 | super().build(input_shape) 163 | agg = tf.VariableAggregation.ONLY_FIRST_REPLICA 164 | self._train_counter = tf.Variable(0, dtype="int64", aggregation=agg, trainable=False) 165 | 166 | def call(self, inputs, training=False): 167 | x = tf.cond(self._train_counter < self.start_step, lambda:inputs, lambda:self.dropout(inputs, training=training)) 168 | if training: 169 | self._train_counter.assign_add(1) 170 | return x 171 | 172 | class CausalDWConv1D(tf.keras.layers.Layer): 173 | def __init__(self, 174 | kernel_size=17, 175 | dilation_rate=1, 176 | use_bias=False, 177 | depthwise_initializer='glorot_uniform', 178 | name='', **kwargs): 179 | super().__init__(name=name,**kwargs) 180 | self.causal_pad = tf.keras.layers.ZeroPadding1D((dilation_rate*(kernel_size-1),0),name=name + '_pad') 181 | self.dw_conv = tf.keras.layers.DepthwiseConv1D( 182 | kernel_size, 183 | strides=1, 184 | dilation_rate=dilation_rate, 185 | padding='valid', 186 | use_bias=use_bias, 187 | depthwise_initializer=depthwise_initializer, 188 | name=name + '_dwconv') 189 | self.supports_masking = True 190 | 191 | def call(self, inputs): 192 | x = self.causal_pad(inputs) 193 | x = self.dw_conv(x) 194 | return x 195 | 196 | def Conv1DBlock(channel_size, 197 | kernel_size, 198 | dilation_rate=1, 199 | drop_rate=0.0, 200 | expand_ratio=2, 201 | se_ratio=0.25, 202 | activation='swish', 203 | name=None): 204 | ''' 205 | efficient conv1d block, @hoyso48 206 | ''' 207 | if name is None: 208 | name = str(tf.keras.backend.get_uid("mbblock")) 209 | # Expansion phase 210 | def apply(inputs): 211 | channels_in = tf.keras.backend.int_shape(inputs)[-1] 212 | channels_expand = channels_in * expand_ratio 213 | 214 | skip = inputs 215 | 216 | x = tf.keras.layers.Dense( 217 | channels_expand, 218 | use_bias=True, 219 | activation=activation, 220 | name=name + '_expand_conv')(inputs) 221 | 222 | # Depthwise Convolution 223 | x = CausalDWConv1D(kernel_size, 224 | dilation_rate=dilation_rate, 225 | use_bias=False, 226 | name=name + '_dwconv')(x) 227 | 228 | x = tf.keras.layers.BatchNormalization(momentum=0.95, name=name + '_bn')(x) 229 | 230 | x = ECA()(x) 231 | 232 | x = tf.keras.layers.Dense( 233 | channel_size, 234 | use_bias=True, 235 | name=name + '_project_conv')(x) 236 | 237 | if drop_rate > 0: 238 | x = tf.keras.layers.Dropout(drop_rate, noise_shape=(None,1,1), name=name + '_drop')(x) 239 | 240 | if (channels_in == channel_size): 241 | x = tf.keras.layers.add([x, skip], name=name + '_add') 242 | return x 243 | 244 | return apply 245 | 246 | class MultiHeadSelfAttention(tf.keras.layers.Layer): 247 | def __init__(self, dim=256, num_heads=4, dropout=0, **kwargs): 248 | super().__init__(**kwargs) 249 | self.dim = dim 250 | self.scale = self.dim ** -0.5 251 | self.num_heads = num_heads 252 | self.qkv = tf.keras.layers.Dense(3 * dim, use_bias=False) 253 | self.drop1 = tf.keras.layers.Dropout(dropout) 254 | self.proj = tf.keras.layers.Dense(dim, use_bias=False) 255 | self.supports_masking = True 256 | 257 | def call(self, inputs, mask=None): 258 | qkv = self.qkv(inputs) 259 | qkv = tf.keras.layers.Permute((2, 1, 3))(tf.keras.layers.Reshape((-1, self.num_heads, self.dim * 3 // self.num_heads))(qkv)) 260 | q, k, v = tf.split(qkv, [self.dim // self.num_heads] * 3, axis=-1) 261 | 262 | attn = tf.matmul(q, k, transpose_b=True) * self.scale 263 | 264 | if mask is not None: 265 | mask = mask[:, None, None, :] 266 | 267 | attn = tf.keras.layers.Softmax(axis=-1)(attn, mask=mask) 268 | attn = self.drop1(attn) 269 | 270 | x = attn @ v 271 | x = tf.keras.layers.Reshape((-1, self.dim))(tf.keras.layers.Permute((2, 1, 3))(x)) 272 | x = self.proj(x) 273 | return x 274 | 275 | 276 | def TransformerBlock(dim=256, num_heads=4, expand=4, attn_dropout=0.2, drop_rate=0.2, activation='swish'): 277 | def apply(inputs): 278 | x = inputs 279 | x = tf.keras.layers.BatchNormalization(momentum=0.95)(x) 280 | x = MultiHeadSelfAttention(dim=dim,num_heads=num_heads,dropout=attn_dropout)(x) 281 | x = tf.keras.layers.Dropout(drop_rate, noise_shape=(None,1,1))(x) 282 | x = tf.keras.layers.Add()([inputs, x]) 283 | attn_out = x 284 | 285 | x = tf.keras.layers.BatchNormalization(momentum=0.95)(x) 286 | x = tf.keras.layers.Dense(dim*expand, use_bias=False, activation=activation)(x) 287 | x = tf.keras.layers.Dense(dim, use_bias=False)(x) 288 | x = tf.keras.layers.Dropout(drop_rate, noise_shape=(None,1,1))(x) 289 | x = tf.keras.layers.Add()([attn_out, x]) 290 | return x 291 | return apply 292 | 293 | def get_model(max_len=MAX_LEN, dropout_step=0, dim=192): 294 | inp = tf.keras.Input((max_len,CHANNELS)) 295 | #x = tf.keras.layers.Masking(mask_value=PAD,input_shape=(max_len,CHANNELS))(inp) #we don't need masking layer with inference 296 | x = inp 297 | ksize = 17 298 | x = tf.keras.layers.Dense(dim, use_bias=False,name='stem_conv')(x) 299 | x = tf.keras.layers.BatchNormalization(momentum=0.95,name='stem_bn')(x) 300 | 301 | x = Conv1DBlock(dim,ksize,drop_rate=0.2)(x) 302 | x = Conv1DBlock(dim,ksize,drop_rate=0.2)(x) 303 | x = Conv1DBlock(dim,ksize,drop_rate=0.2)(x) 304 | x = TransformerBlock(dim,expand=2)(x) 305 | 306 | x = Conv1DBlock(dim,ksize,drop_rate=0.2)(x) 307 | x = Conv1DBlock(dim,ksize,drop_rate=0.2)(x) 308 | x = Conv1DBlock(dim,ksize,drop_rate=0.2)(x) 309 | x = TransformerBlock(dim,expand=2)(x) 310 | 311 | if dim == 384: #for the 4x sized model 312 | x = Conv1DBlock(dim,ksize,drop_rate=0.2)(x) 313 | x = Conv1DBlock(dim,ksize,drop_rate=0.2)(x) 314 | x = Conv1DBlock(dim,ksize,drop_rate=0.2)(x) 315 | x = TransformerBlock(dim,expand=2)(x) 316 | 317 | x = Conv1DBlock(dim,ksize,drop_rate=0.2)(x) 318 | x = Conv1DBlock(dim,ksize,drop_rate=0.2)(x) 319 | x = Conv1DBlock(dim,ksize,drop_rate=0.2)(x) 320 | x = TransformerBlock(dim,expand=2)(x) 321 | 322 | x = tf.keras.layers.Dense(dim*2,activation=None,name='top_conv')(x) 323 | x = tf.keras.layers.GlobalAveragePooling1D()(x) 324 | x = LateDropout(0.8, start_step=dropout_step)(x) 325 | x = tf.keras.layers.Dense(NUM_CLASSES,name='classifier')(x) 326 | return tf.keras.Model(inp, x) 327 | 328 | models_path = [ 329 | 'gs://capy-data/data/model_weights/islr-fp16-192-8-seed42-foldall-full.h5', #comment out other weights to check single model score 330 | # 'C:/Users/chuqi/ac215/kaggle-data/aslfr-isolated/islr-fp16-192-8-seed42-foldall-last.h5', 331 | # '/kaggle/input/islr-models/islr-fp16-192-8-seed44-foldall-last.h5', 332 | #'/kaggle/input/islr-models/islr-fp16-192-8-seed45-foldall-last.h5', 333 | ] 334 | models_path_local = [] 335 | for model_path in models_path: 336 | model_file = file_io.FileIO(model_path, mode='rb') 337 | model_name = model_path.split('/')[-1] 338 | temp_model_location = f'./{model_name}' 339 | temp_model_file = open(temp_model_location, 'wb') 340 | temp_model_file.write(model_file.read()) 341 | temp_model_file.close() 342 | model_file.close() 343 | models_path_local.append(model_name) 344 | 345 | models = [get_model() for _ in models_path_local] 346 | 347 | # models = [get_model() for _ in models_path] 348 | for model,path in zip(models,models_path_local): 349 | model.load_weights(path) 350 | # models[0].summary() 351 | 352 | def load_target(wlasl_train_df, file_data_name): 353 | parquet_id = file_data_name.split("/")[-1] 354 | path = 'wlasl_parquet_deploy/'+parquet_id 355 | # print(path) 356 | # print(wlasl_train_df[wlasl_train_df['path']==path]) 357 | print(wlasl_train_df) 358 | target = wlasl_train_df[wlasl_train_df['path']==path]['sign'].iloc[0] 359 | return target 360 | 361 | 362 | def load_relevant_data_subset(file_data_name): 363 | data_columns = ['x', 'y', 'z'] 364 | # data = pd.read_parquet('C:/Users/chuqi/ac215/kaggle-data/aslfr-isolated/islr-5fold' + pq_path, columns=data_columns) 365 | # file_data_name = [pq_path + file_name for file_name in os.listdir(pq_path)] 366 | # print("File_name", file_data_name) 367 | # blob_df = bucket.blob(file_data_name) 368 | # data = pd.read_parquet(blob_df, columns=data_columns) 369 | table = pq.read_table(file_data_name) 370 | data = table.to_pandas()[data_columns] 371 | # print(data) 372 | 373 | n_frames = int(len(data) / ROWS_PER_FRAME) 374 | data = data.values.reshape(n_frames, ROWS_PER_FRAME, len(data_columns)) 375 | # print(data.shape) 376 | return data.astype(np.float32) 377 | 378 | def prune_model(model, sparisity=1): 379 | # model = models[1] 380 | layer_with_weights = [] 381 | lw = [] 382 | for layer in model.layers: 383 | if bool(layer.trainable_weights): 384 | layer_with_weights.append(layer.get_weights()) 385 | lw.append(layer.name) 386 | 387 | w = layer_with_weights 388 | w_flat = [] 389 | #masks_flat = [] 390 | for item in w: 391 | for elem in item: 392 | for x in list(elem.flatten()): 393 | w_flat.append(x) 394 | w_flat_abs = [abs(x) for x in w_flat] 395 | prune_length = int(len(w_flat_abs)*sparisity) 396 | w_flat_abs_prune = sorted(w_flat_abs)[:prune_length][-1] 397 | 398 | weights_pruned = [] 399 | for item in w: 400 | wp = [] 401 | for i in range(len(item)): 402 | pruned = np.where(np.abs(item[i])<=w_flat_abs_prune, 0 ,item[i]) 403 | wp.append(pruned) 404 | weights_pruned.append(wp) 405 | #w_pruned = np.where(np.abs(w[i])<= w_flat_abs_prune, 0, w[i]) 406 | #weights_pruned.append(w_pruned) 407 | 408 | wd = dict(zip(lw,weights_pruned)) 409 | for layer in model.layers: 410 | if bool(layer.trainable_weights): 411 | layer.set_weights(wd[layer.name]) 412 | 413 | return model 414 | 415 | class TFModel(tf.Module): 416 | """ 417 | TensorFlow Lite model that takes input tensors and applies: 418 | – a preprocessing model 419 | – the ISLR model 420 | """ 421 | def __init__(self, islr_models): 422 | """ 423 | Initializes the TFLiteModel with the specified preprocessing model and ISLR model. 424 | """ 425 | super(TFModel, self).__init__() 426 | 427 | # Load the feature generation and main models 428 | self.prep_inputs = Preprocess() 429 | self.islr_models = islr_models 430 | 431 | @tf.function(input_signature=[tf.TensorSpec(shape=[None, 543, 3], dtype=tf.float32, name='inputs')]) 432 | def __call__(self, inputs): 433 | """ 434 | Applies the feature generation model and main model to the input tensors. 435 | 436 | Args: 437 | inputs: Input tensor with shape [batch_size, 543, 3]. 438 | 439 | Returns: 440 | A dictionary with a single key 'outputs' and corresponding output tensor. 441 | """ 442 | x = self.prep_inputs(inputs) 443 | # x = self.prep_inputs(tf.cast(inputs, dtype=tf.float32)) 444 | outputs = [model(x) for model in self.islr_models] 445 | outputs = tf.keras.layers.Average()(outputs)[0] 446 | return {'outputs': outputs} 447 | 448 | class TFLiteModel(tf.Module): 449 | """ 450 | TensorFlow Lite model that takes input tensors and applies: 451 | – a preprocessing model 452 | – the ISLR model 453 | """ 454 | 455 | def __init__(self, islr_models): 456 | """ 457 | Initializes the TFLiteModel with the specified preprocessing model and ISLR model. 458 | """ 459 | super(TFLiteModel, self).__init__() 460 | 461 | # Load the feature generation and main models 462 | self.prep_inputs = Preprocess() 463 | self.islr_models = islr_models 464 | 465 | @tf.function(input_signature=[tf.TensorSpec(shape=[None, 543, 3], dtype=tf.float32, name='inputs')]) 466 | def __call__(self, inputs): 467 | """ 468 | Applies the feature generation model and main model to the input tensors. 469 | 470 | Args: 471 | inputs: Input tensor with shape [batch_size, 543, 3]. 472 | 473 | Returns: 474 | A dictionary with a single key 'outputs' and corresponding output tensor. 475 | """ 476 | x = self.prep_inputs(tf.cast(inputs, dtype=tf.float32)) 477 | outputs = [model(x) for model in self.islr_models] 478 | outputs = tf.keras.layers.Average()(outputs)[0] 479 | return {'outputs': outputs} 480 | 481 | 482 | def prediction(sparisity=1, QUANT=False, PRUNE=False): 483 | TARGET, PRED = [], [] 484 | 485 | # Load target csv 486 | # blob_df = bucket.blob("data/WLASL-data/wlasl_test_new.csv") 487 | # wlasl_test_df = pd.read_csv(blob_df.open()) 488 | # wlasl_test_df = pd.read_csv(r"./wlasl/wlasl_test_new.csv") 489 | 490 | # Load Model 491 | model = models[0] 492 | tflite_keras_model = TFModel(islr_models=models) 493 | if QUANT == True and PRUNE==False: 494 | tflite_keras_model = TFLiteModel(islr_models=models) 495 | elif QUANT == True and PRUNE==True: 496 | pruned_model = prune_model(model, sparisity) 497 | tflite_keras_model = TFLiteModel(islr_models=[pruned_model]) 498 | 499 | # Save Model 500 | keras_model_converter = tf.lite.TFLiteConverter.from_keras_model(tflite_keras_model) 501 | keras_model_converter.optimizations = [tf.lite.Optimize.DEFAULT] 502 | keras_model_converter.target_spec.supported_types = [tf.float16] 503 | tflite_model = keras_model_converter.convert() 504 | 505 | 506 | # Load all files 507 | tffiles_dir = [file.name for file in bucket.list_blobs(prefix='data/WLASL-data/wlasl_parquet_deploy') if '.parquet' in file.name] 508 | tffiles = [os.path.join('gs://capy-data',tffile) for tffile in tffiles_dir if '.parquet' in tffile] 509 | 510 | pq_path = tffiles 511 | # file_data_names = [pq_path + file_name for file_name in os.listdir(pq_path)] 512 | file_data_names = tffiles 513 | for file_data_name in file_data_names: 514 | # Transformer Prediction 515 | # if QUANT == True: 516 | #print(file_data_name) 517 | demo_output = tflite_keras_model(load_relevant_data_subset(file_data_name))["outputs"] 518 | pred_value = decoder(np.argmax(demo_output.numpy(), axis=-1)) 519 | # else: 520 | # demo_output = tflite_keras_model(load_relevant_data_subset(file_data_name)) 521 | # pred_value = decoder(np.argmax(demo_output.numpy(), axis=-1)) 522 | 523 | # Target 524 | # target = load_target(wlasl_test_df, file_data_name) 525 | 526 | # print(f"Target: {target}") 527 | # print(f"Prediction: {pred_value}") 528 | # print("-"*20) 529 | 530 | # TARGET.append(target) 531 | PRED.append(pred_value) 532 | return PRED 533 | 534 | def decoder_accuracy(TARGET,PRED): 535 | correct_predition_count = sum(1 for true, pred in zip(TARGET, PRED) if true == pred) 536 | return correct_predition_count/len(TARGET) 537 | 538 | 539 | def main(args=None): 540 | if args.test: 541 | print("Making Prediction: ") 542 | 543 | PRED = prediction(sparisity=1, QUANT=False, PRUNE=False) 544 | # accuracy = decoder_accuracy(TARGET,PRED) 545 | print(f"Prediction: {PRED}%") 546 | 547 | 548 | if __name__ == "__main__": 549 | # Generate the inputs arguments parser 550 | # if you type into the terminal 'python cli.py --help', it will provide the description 551 | parser = argparse.ArgumentParser(description="Model Inference CLI") 552 | 553 | parser.add_argument( 554 | "-t", 555 | "--test", 556 | action="store_true", 557 | help="Make inference on test set", 558 | ) 559 | 560 | args = parser.parse_args() 561 | 562 | main(args) 563 | -------------------------------------------------------------------------------- /src/model-prediction/docker-shell.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # exit immediately if a command exits with a non-zero status 4 | set -e 5 | 6 | # Define some environment variables 7 | export IMAGE_NAME="capy-model-prediction" 8 | export BASE_DIR=$(pwd) 9 | export SECRETS_DIR=$(pwd)/../../../secrets/ # make sure it matches your directory of secrets 10 | export GCS_BUCKET_URI="gs://capy-data" 11 | export GCP_PROJECT="psychic-bedrock-398320" 12 | 13 | # Build the image based on the Dockerfile 14 | # docker build -t $IMAGE_NAME -f Dockerfile . 15 | # M1/2 chip macs use this line 16 | docker build -t $IMAGE_NAME --platform=linux/arm64/v8 -f Dockerfile . 17 | 18 | # Run Container 19 | docker run --rm --name $IMAGE_NAME -ti \ 20 | -v "$BASE_DIR":/app \ 21 | -v "$SECRETS_DIR":/secrets \ 22 | -e GOOGLE_APPLICATION_CREDENTIALS=/secrets/model-training.json \ 23 | -e GCP_PROJECT=$GCP_PROJECT \ 24 | -e GCS_BUCKET_URI=$GCS_BUCKET_URI \ 25 | $IMAGE_NAME -------------------------------------------------------------------------------- /src/model-prediction/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo "Inference container is running!!!" 4 | # Activate the Pipenv virtual environment and install dependencies 5 | pipenv run pip install -r requirements.txt 6 | 7 | args="$@" 8 | echo $args 9 | 10 | if [[ -z ${args} ]]; 11 | then 12 | # Authenticate gcloud using service account 13 | gcloud auth activate-service-account --key-file $GOOGLE_APPLICATION_CREDENTIALS 14 | # Set GCP Project Details 15 | gcloud config set project $GCP_PROJECT 16 | #/bin/bash 17 | pipenv shell 18 | else 19 | pipenv run python $args 20 | fi 21 | 22 | # # Authenticate gcloud using service account 23 | # # gcloud auth activate-service-account --key-file=secrets/ml-workflow.json 24 | 25 | # # Set GCP Project Details 26 | # gcloud config set project $GCP_PROJECT 27 | 28 | # # Run the preprocess.py script 29 | # # pipenv run python model.py 30 | # # pipenv run bash package-trainer.sh 31 | 32 | # pipenv run python cli.py --test 33 | 34 | # #pipenv shell -------------------------------------------------------------------------------- /src/model-prediction/islr-fp16-192-8-seed42-foldall-full.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cqzhao918/AI_ASL_Translator/429a795a6b587d489a55a0451ae270040aa079e4/src/model-prediction/islr-fp16-192-8-seed42-foldall-full.h5 -------------------------------------------------------------------------------- /src/model-prediction/requirements.txt: -------------------------------------------------------------------------------- 1 | tensorflow==2.13.0 2 | pyarrow -------------------------------------------------------------------------------- /src/workflow/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cqzhao918/AI_ASL_Translator/429a795a6b587d489a55a0451ae270040aa079e4/src/workflow/.gitkeep -------------------------------------------------------------------------------- /test_project.py: -------------------------------------------------------------------------------- 1 | def test_pytest_installed(): 2 | assert "a" == "a" 3 | --------------------------------------------------------------------------------