├── .gitignore ├── LICENSE ├── LICENSE.AGPL ├── README.md ├── docs └── detection_attributes.png ├── openpifpaf_detection_attributes ├── __init__.py ├── datasets │ ├── __init__.py │ ├── annotation.py │ ├── attribute.py │ ├── encoder.py │ ├── generators.py │ ├── headmeta.py │ ├── jaad │ │ ├── __init__.py │ │ ├── annotation.py │ │ ├── attribute.py │ │ ├── datamodule.py │ │ ├── dataset.py │ │ ├── encoder.py │ │ └── transforms.py │ ├── metrics.py │ ├── painter.py │ └── sampler.py └── models │ ├── __init__.py │ ├── mtl_grad_fork_norm.py │ ├── mtlfields │ ├── __init__.py │ ├── basenetwork.py │ ├── decoder.py │ ├── head.py │ └── loss.py │ └── optics.py └── requirements.txt /.gitignore: -------------------------------------------------------------------------------- 1 | # Editors 2 | .vscode/ 3 | .idea/ 4 | 5 | # Vagrant 6 | .vagrant/ 7 | 8 | # Mac/OSX 9 | .DS_Store 10 | 11 | # Windows 12 | Thumbs.db 13 | 14 | # Source for the following rules: https://raw.githubusercontent.com/github/gitignore/master/Python.gitignore 15 | # Byte-compiled / optimized / DLL files 16 | __pycache__/ 17 | *.py[cod] 18 | *$py.class 19 | 20 | # C extensions 21 | *.so 22 | 23 | # Distribution / packaging 24 | .Python 25 | build/ 26 | develop-eggs/ 27 | dist/ 28 | downloads/ 29 | eggs/ 30 | .eggs/ 31 | lib/ 32 | lib64/ 33 | parts/ 34 | sdist/ 35 | var/ 36 | wheels/ 37 | *.egg-info/ 38 | .installed.cfg 39 | *.egg 40 | MANIFEST 41 | 42 | # PyInstaller 43 | # Usually these files are written by a python script from a template 44 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 45 | *.manifest 46 | *.spec 47 | 48 | # Installer logs 49 | pip-log.txt 50 | pip-delete-this-directory.txt 51 | 52 | # Unit test / coverage reports 53 | htmlcov/ 54 | .tox/ 55 | .nox/ 56 | .coverage 57 | .coverage.* 58 | .cache 59 | nosetests.xml 60 | coverage.xml 61 | *.cover 62 | .hypothesis/ 63 | .pytest_cache/ 64 | 65 | # Translations 66 | *.mo 67 | *.pot 68 | 69 | # Django stuff: 70 | *.log 71 | local_settings.py 72 | db.sqlite3 73 | 74 | # Flask stuff: 75 | instance/ 76 | .webassets-cache 77 | 78 | # Scrapy stuff: 79 | .scrapy 80 | 81 | # Sphinx documentation 82 | docs/_build/ 83 | 84 | # PyBuilder 85 | target/ 86 | 87 | # Jupyter Notebook 88 | .ipynb_checkpoints 89 | 90 | # IPython 91 | profile_default/ 92 | ipython_config.py 93 | 94 | # pyenv 95 | .python-version 96 | 97 | # celery beat schedule file 98 | celerybeat-schedule 99 | 100 | # SageMath parsed files 101 | *.sage.py 102 | 103 | # Environments 104 | .env 105 | .venv 106 | env/ 107 | venv/ 108 | ENV/ 109 | env.bak/ 110 | venv.bak/ 111 | 112 | # Spyder project settings 113 | .spyderproject 114 | .spyproject 115 | 116 | # Rope project settings 117 | .ropeproject 118 | 119 | # mkdocs documentation 120 | /site 121 | 122 | # mypy 123 | .mypy_cache/ 124 | .dmypy.json 125 | dmypy.json 126 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright 2021 by EPFL/VITA. All rights reserved. 2 | 3 | This project and all its files are licensed under 4 | GNU AGPLv3 or later version. 5 | 6 | If this license is not suitable for your business or project 7 | please contact EPFL-TTO (https://tto.epfl.ch/) for a full commercial license. 8 | 9 | This software may not be used to harm any person deliberately. 10 | -------------------------------------------------------------------------------- /LICENSE.AGPL: -------------------------------------------------------------------------------- 1 | GNU AFFERO GENERAL PUBLIC LICENSE 2 | Version 3, 19 November 2007 3 | 4 | Copyright (C) 2007 Free Software Foundation, Inc. 5 | Everyone is permitted to copy and distribute verbatim copies 6 | of this license document, but changing it is not allowed. 7 | 8 | Preamble 9 | 10 | The GNU Affero General Public License is a free, copyleft license for 11 | software and other kinds of works, specifically designed to ensure 12 | cooperation with the community in the case of network server software. 13 | 14 | The licenses for most software and other practical works are designed 15 | to take away your freedom to share and change the works. By contrast, 16 | our General Public Licenses are intended to guarantee your freedom to 17 | share and change all versions of a program--to make sure it remains free 18 | software for all its users. 19 | 20 | When we speak of free software, we are referring to freedom, not 21 | price. Our General Public Licenses are designed to make sure that you 22 | have the freedom to distribute copies of free software (and charge for 23 | them if you wish), that you receive source code or can get it if you 24 | want it, that you can change the software or use pieces of it in new 25 | free programs, and that you know you can do these things. 26 | 27 | Developers that use our General Public Licenses protect your rights 28 | with two steps: (1) assert copyright on the software, and (2) offer 29 | you this License which gives you legal permission to copy, distribute 30 | and/or modify the software. 31 | 32 | A secondary benefit of defending all users' freedom is that 33 | improvements made in alternate versions of the program, if they 34 | receive widespread use, become available for other developers to 35 | incorporate. Many developers of free software are heartened and 36 | encouraged by the resulting cooperation. However, in the case of 37 | software used on network servers, this result may fail to come about. 38 | The GNU General Public License permits making a modified version and 39 | letting the public access it on a server without ever releasing its 40 | source code to the public. 41 | 42 | The GNU Affero General Public License is designed specifically to 43 | ensure that, in such cases, the modified source code becomes available 44 | to the community. It requires the operator of a network server to 45 | provide the source code of the modified version running there to the 46 | users of that server. Therefore, public use of a modified version, on 47 | a publicly accessible server, gives the public access to the source 48 | code of the modified version. 49 | 50 | An older license, called the Affero General Public License and 51 | published by Affero, was designed to accomplish similar goals. This is 52 | a different license, not a version of the Affero GPL, but Affero has 53 | released a new version of the Affero GPL which permits relicensing under 54 | this license. 55 | 56 | The precise terms and conditions for copying, distribution and 57 | modification follow. 58 | 59 | TERMS AND CONDITIONS 60 | 61 | 0. Definitions. 62 | 63 | "This License" refers to version 3 of the GNU Affero General Public License. 64 | 65 | "Copyright" also means copyright-like laws that apply to other kinds of 66 | works, such as semiconductor masks. 67 | 68 | "The Program" refers to any copyrightable work licensed under this 69 | License. Each licensee is addressed as "you". "Licensees" and 70 | "recipients" may be individuals or organizations. 71 | 72 | To "modify" a work means to copy from or adapt all or part of the work 73 | in a fashion requiring copyright permission, other than the making of an 74 | exact copy. The resulting work is called a "modified version" of the 75 | earlier work or a work "based on" the earlier work. 76 | 77 | A "covered work" means either the unmodified Program or a work based 78 | on the Program. 79 | 80 | To "propagate" a work means to do anything with it that, without 81 | permission, would make you directly or secondarily liable for 82 | infringement under applicable copyright law, except executing it on a 83 | computer or modifying a private copy. Propagation includes copying, 84 | distribution (with or without modification), making available to the 85 | public, and in some countries other activities as well. 86 | 87 | To "convey" a work means any kind of propagation that enables other 88 | parties to make or receive copies. Mere interaction with a user through 89 | a computer network, with no transfer of a copy, is not conveying. 90 | 91 | An interactive user interface displays "Appropriate Legal Notices" 92 | to the extent that it includes a convenient and prominently visible 93 | feature that (1) displays an appropriate copyright notice, and (2) 94 | tells the user that there is no warranty for the work (except to the 95 | extent that warranties are provided), that licensees may convey the 96 | work under this License, and how to view a copy of this License. If 97 | the interface presents a list of user commands or options, such as a 98 | menu, a prominent item in the list meets this criterion. 99 | 100 | 1. Source Code. 101 | 102 | The "source code" for a work means the preferred form of the work 103 | for making modifications to it. "Object code" means any non-source 104 | form of a work. 105 | 106 | A "Standard Interface" means an interface that either is an official 107 | standard defined by a recognized standards body, or, in the case of 108 | interfaces specified for a particular programming language, one that 109 | is widely used among developers working in that language. 110 | 111 | The "System Libraries" of an executable work include anything, other 112 | than the work as a whole, that (a) is included in the normal form of 113 | packaging a Major Component, but which is not part of that Major 114 | Component, and (b) serves only to enable use of the work with that 115 | Major Component, or to implement a Standard Interface for which an 116 | implementation is available to the public in source code form. A 117 | "Major Component", in this context, means a major essential component 118 | (kernel, window system, and so on) of the specific operating system 119 | (if any) on which the executable work runs, or a compiler used to 120 | produce the work, or an object code interpreter used to run it. 121 | 122 | The "Corresponding Source" for a work in object code form means all 123 | the source code needed to generate, install, and (for an executable 124 | work) run the object code and to modify the work, including scripts to 125 | control those activities. However, it does not include the work's 126 | System Libraries, or general-purpose tools or generally available free 127 | programs which are used unmodified in performing those activities but 128 | which are not part of the work. For example, Corresponding Source 129 | includes interface definition files associated with source files for 130 | the work, and the source code for shared libraries and dynamically 131 | linked subprograms that the work is specifically designed to require, 132 | such as by intimate data communication or control flow between those 133 | subprograms and other parts of the work. 134 | 135 | The Corresponding Source need not include anything that users 136 | can regenerate automatically from other parts of the Corresponding 137 | Source. 138 | 139 | The Corresponding Source for a work in source code form is that 140 | same work. 141 | 142 | 2. Basic Permissions. 143 | 144 | All rights granted under this License are granted for the term of 145 | copyright on the Program, and are irrevocable provided the stated 146 | conditions are met. This License explicitly affirms your unlimited 147 | permission to run the unmodified Program. The output from running a 148 | covered work is covered by this License only if the output, given its 149 | content, constitutes a covered work. This License acknowledges your 150 | rights of fair use or other equivalent, as provided by copyright law. 151 | 152 | You may make, run and propagate covered works that you do not 153 | convey, without conditions so long as your license otherwise remains 154 | in force. You may convey covered works to others for the sole purpose 155 | of having them make modifications exclusively for you, or provide you 156 | with facilities for running those works, provided that you comply with 157 | the terms of this License in conveying all material for which you do 158 | not control copyright. Those thus making or running the covered works 159 | for you must do so exclusively on your behalf, under your direction 160 | and control, on terms that prohibit them from making any copies of 161 | your copyrighted material outside their relationship with you. 162 | 163 | Conveying under any other circumstances is permitted solely under 164 | the conditions stated below. Sublicensing is not allowed; section 10 165 | makes it unnecessary. 166 | 167 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law. 168 | 169 | No covered work shall be deemed part of an effective technological 170 | measure under any applicable law fulfilling obligations under article 171 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or 172 | similar laws prohibiting or restricting circumvention of such 173 | measures. 174 | 175 | When you convey a covered work, you waive any legal power to forbid 176 | circumvention of technological measures to the extent such circumvention 177 | is effected by exercising rights under this License with respect to 178 | the covered work, and you disclaim any intention to limit operation or 179 | modification of the work as a means of enforcing, against the work's 180 | users, your or third parties' legal rights to forbid circumvention of 181 | technological measures. 182 | 183 | 4. Conveying Verbatim Copies. 184 | 185 | You may convey verbatim copies of the Program's source code as you 186 | receive it, in any medium, provided that you conspicuously and 187 | appropriately publish on each copy an appropriate copyright notice; 188 | keep intact all notices stating that this License and any 189 | non-permissive terms added in accord with section 7 apply to the code; 190 | keep intact all notices of the absence of any warranty; and give all 191 | recipients a copy of this License along with the Program. 192 | 193 | You may charge any price or no price for each copy that you convey, 194 | and you may offer support or warranty protection for a fee. 195 | 196 | 5. Conveying Modified Source Versions. 197 | 198 | You may convey a work based on the Program, or the modifications to 199 | produce it from the Program, in the form of source code under the 200 | terms of section 4, provided that you also meet all of these conditions: 201 | 202 | a) The work must carry prominent notices stating that you modified 203 | it, and giving a relevant date. 204 | 205 | b) The work must carry prominent notices stating that it is 206 | released under this License and any conditions added under section 207 | 7. This requirement modifies the requirement in section 4 to 208 | "keep intact all notices". 209 | 210 | c) You must license the entire work, as a whole, under this 211 | License to anyone who comes into possession of a copy. This 212 | License will therefore apply, along with any applicable section 7 213 | additional terms, to the whole of the work, and all its parts, 214 | regardless of how they are packaged. This License gives no 215 | permission to license the work in any other way, but it does not 216 | invalidate such permission if you have separately received it. 217 | 218 | d) If the work has interactive user interfaces, each must display 219 | Appropriate Legal Notices; however, if the Program has interactive 220 | interfaces that do not display Appropriate Legal Notices, your 221 | work need not make them do so. 222 | 223 | A compilation of a covered work with other separate and independent 224 | works, which are not by their nature extensions of the covered work, 225 | and which are not combined with it such as to form a larger program, 226 | in or on a volume of a storage or distribution medium, is called an 227 | "aggregate" if the compilation and its resulting copyright are not 228 | used to limit the access or legal rights of the compilation's users 229 | beyond what the individual works permit. Inclusion of a covered work 230 | in an aggregate does not cause this License to apply to the other 231 | parts of the aggregate. 232 | 233 | 6. Conveying Non-Source Forms. 234 | 235 | You may convey a covered work in object code form under the terms 236 | of sections 4 and 5, provided that you also convey the 237 | machine-readable Corresponding Source under the terms of this License, 238 | in one of these ways: 239 | 240 | a) Convey the object code in, or embodied in, a physical product 241 | (including a physical distribution medium), accompanied by the 242 | Corresponding Source fixed on a durable physical medium 243 | customarily used for software interchange. 244 | 245 | b) Convey the object code in, or embodied in, a physical product 246 | (including a physical distribution medium), accompanied by a 247 | written offer, valid for at least three years and valid for as 248 | long as you offer spare parts or customer support for that product 249 | model, to give anyone who possesses the object code either (1) a 250 | copy of the Corresponding Source for all the software in the 251 | product that is covered by this License, on a durable physical 252 | medium customarily used for software interchange, for a price no 253 | more than your reasonable cost of physically performing this 254 | conveying of source, or (2) access to copy the 255 | Corresponding Source from a network server at no charge. 256 | 257 | c) Convey individual copies of the object code with a copy of the 258 | written offer to provide the Corresponding Source. This 259 | alternative is allowed only occasionally and noncommercially, and 260 | only if you received the object code with such an offer, in accord 261 | with subsection 6b. 262 | 263 | d) Convey the object code by offering access from a designated 264 | place (gratis or for a charge), and offer equivalent access to the 265 | Corresponding Source in the same way through the same place at no 266 | further charge. You need not require recipients to copy the 267 | Corresponding Source along with the object code. If the place to 268 | copy the object code is a network server, the Corresponding Source 269 | may be on a different server (operated by you or a third party) 270 | that supports equivalent copying facilities, provided you maintain 271 | clear directions next to the object code saying where to find the 272 | Corresponding Source. Regardless of what server hosts the 273 | Corresponding Source, you remain obligated to ensure that it is 274 | available for as long as needed to satisfy these requirements. 275 | 276 | e) Convey the object code using peer-to-peer transmission, provided 277 | you inform other peers where the object code and Corresponding 278 | Source of the work are being offered to the general public at no 279 | charge under subsection 6d. 280 | 281 | A separable portion of the object code, whose source code is excluded 282 | from the Corresponding Source as a System Library, need not be 283 | included in conveying the object code work. 284 | 285 | A "User Product" is either (1) a "consumer product", which means any 286 | tangible personal property which is normally used for personal, family, 287 | or household purposes, or (2) anything designed or sold for incorporation 288 | into a dwelling. In determining whether a product is a consumer product, 289 | doubtful cases shall be resolved in favor of coverage. For a particular 290 | product received by a particular user, "normally used" refers to a 291 | typical or common use of that class of product, regardless of the status 292 | of the particular user or of the way in which the particular user 293 | actually uses, or expects or is expected to use, the product. A product 294 | is a consumer product regardless of whether the product has substantial 295 | commercial, industrial or non-consumer uses, unless such uses represent 296 | the only significant mode of use of the product. 297 | 298 | "Installation Information" for a User Product means any methods, 299 | procedures, authorization keys, or other information required to install 300 | and execute modified versions of a covered work in that User Product from 301 | a modified version of its Corresponding Source. The information must 302 | suffice to ensure that the continued functioning of the modified object 303 | code is in no case prevented or interfered with solely because 304 | modification has been made. 305 | 306 | If you convey an object code work under this section in, or with, or 307 | specifically for use in, a User Product, and the conveying occurs as 308 | part of a transaction in which the right of possession and use of the 309 | User Product is transferred to the recipient in perpetuity or for a 310 | fixed term (regardless of how the transaction is characterized), the 311 | Corresponding Source conveyed under this section must be accompanied 312 | by the Installation Information. But this requirement does not apply 313 | if neither you nor any third party retains the ability to install 314 | modified object code on the User Product (for example, the work has 315 | been installed in ROM). 316 | 317 | The requirement to provide Installation Information does not include a 318 | requirement to continue to provide support service, warranty, or updates 319 | for a work that has been modified or installed by the recipient, or for 320 | the User Product in which it has been modified or installed. Access to a 321 | network may be denied when the modification itself materially and 322 | adversely affects the operation of the network or violates the rules and 323 | protocols for communication across the network. 324 | 325 | Corresponding Source conveyed, and Installation Information provided, 326 | in accord with this section must be in a format that is publicly 327 | documented (and with an implementation available to the public in 328 | source code form), and must require no special password or key for 329 | unpacking, reading or copying. 330 | 331 | 7. Additional Terms. 332 | 333 | "Additional permissions" are terms that supplement the terms of this 334 | License by making exceptions from one or more of its conditions. 335 | Additional permissions that are applicable to the entire Program shall 336 | be treated as though they were included in this License, to the extent 337 | that they are valid under applicable law. If additional permissions 338 | apply only to part of the Program, that part may be used separately 339 | under those permissions, but the entire Program remains governed by 340 | this License without regard to the additional permissions. 341 | 342 | When you convey a copy of a covered work, you may at your option 343 | remove any additional permissions from that copy, or from any part of 344 | it. (Additional permissions may be written to require their own 345 | removal in certain cases when you modify the work.) You may place 346 | additional permissions on material, added by you to a covered work, 347 | for which you have or can give appropriate copyright permission. 348 | 349 | Notwithstanding any other provision of this License, for material you 350 | add to a covered work, you may (if authorized by the copyright holders of 351 | that material) supplement the terms of this License with terms: 352 | 353 | a) Disclaiming warranty or limiting liability differently from the 354 | terms of sections 15 and 16 of this License; or 355 | 356 | b) Requiring preservation of specified reasonable legal notices or 357 | author attributions in that material or in the Appropriate Legal 358 | Notices displayed by works containing it; or 359 | 360 | c) Prohibiting misrepresentation of the origin of that material, or 361 | requiring that modified versions of such material be marked in 362 | reasonable ways as different from the original version; or 363 | 364 | d) Limiting the use for publicity purposes of names of licensors or 365 | authors of the material; or 366 | 367 | e) Declining to grant rights under trademark law for use of some 368 | trade names, trademarks, or service marks; or 369 | 370 | f) Requiring indemnification of licensors and authors of that 371 | material by anyone who conveys the material (or modified versions of 372 | it) with contractual assumptions of liability to the recipient, for 373 | any liability that these contractual assumptions directly impose on 374 | those licensors and authors. 375 | 376 | All other non-permissive additional terms are considered "further 377 | restrictions" within the meaning of section 10. If the Program as you 378 | received it, or any part of it, contains a notice stating that it is 379 | governed by this License along with a term that is a further 380 | restriction, you may remove that term. If a license document contains 381 | a further restriction but permits relicensing or conveying under this 382 | License, you may add to a covered work material governed by the terms 383 | of that license document, provided that the further restriction does 384 | not survive such relicensing or conveying. 385 | 386 | If you add terms to a covered work in accord with this section, you 387 | must place, in the relevant source files, a statement of the 388 | additional terms that apply to those files, or a notice indicating 389 | where to find the applicable terms. 390 | 391 | Additional terms, permissive or non-permissive, may be stated in the 392 | form of a separately written license, or stated as exceptions; 393 | the above requirements apply either way. 394 | 395 | 8. Termination. 396 | 397 | You may not propagate or modify a covered work except as expressly 398 | provided under this License. Any attempt otherwise to propagate or 399 | modify it is void, and will automatically terminate your rights under 400 | this License (including any patent licenses granted under the third 401 | paragraph of section 11). 402 | 403 | However, if you cease all violation of this License, then your 404 | license from a particular copyright holder is reinstated (a) 405 | provisionally, unless and until the copyright holder explicitly and 406 | finally terminates your license, and (b) permanently, if the copyright 407 | holder fails to notify you of the violation by some reasonable means 408 | prior to 60 days after the cessation. 409 | 410 | Moreover, your license from a particular copyright holder is 411 | reinstated permanently if the copyright holder notifies you of the 412 | violation by some reasonable means, this is the first time you have 413 | received notice of violation of this License (for any work) from that 414 | copyright holder, and you cure the violation prior to 30 days after 415 | your receipt of the notice. 416 | 417 | Termination of your rights under this section does not terminate the 418 | licenses of parties who have received copies or rights from you under 419 | this License. If your rights have been terminated and not permanently 420 | reinstated, you do not qualify to receive new licenses for the same 421 | material under section 10. 422 | 423 | 9. Acceptance Not Required for Having Copies. 424 | 425 | You are not required to accept this License in order to receive or 426 | run a copy of the Program. Ancillary propagation of a covered work 427 | occurring solely as a consequence of using peer-to-peer transmission 428 | to receive a copy likewise does not require acceptance. However, 429 | nothing other than this License grants you permission to propagate or 430 | modify any covered work. These actions infringe copyright if you do 431 | not accept this License. Therefore, by modifying or propagating a 432 | covered work, you indicate your acceptance of this License to do so. 433 | 434 | 10. Automatic Licensing of Downstream Recipients. 435 | 436 | Each time you convey a covered work, the recipient automatically 437 | receives a license from the original licensors, to run, modify and 438 | propagate that work, subject to this License. You are not responsible 439 | for enforcing compliance by third parties with this License. 440 | 441 | An "entity transaction" is a transaction transferring control of an 442 | organization, or substantially all assets of one, or subdividing an 443 | organization, or merging organizations. If propagation of a covered 444 | work results from an entity transaction, each party to that 445 | transaction who receives a copy of the work also receives whatever 446 | licenses to the work the party's predecessor in interest had or could 447 | give under the previous paragraph, plus a right to possession of the 448 | Corresponding Source of the work from the predecessor in interest, if 449 | the predecessor has it or can get it with reasonable efforts. 450 | 451 | You may not impose any further restrictions on the exercise of the 452 | rights granted or affirmed under this License. For example, you may 453 | not impose a license fee, royalty, or other charge for exercise of 454 | rights granted under this License, and you may not initiate litigation 455 | (including a cross-claim or counterclaim in a lawsuit) alleging that 456 | any patent claim is infringed by making, using, selling, offering for 457 | sale, or importing the Program or any portion of it. 458 | 459 | 11. Patents. 460 | 461 | A "contributor" is a copyright holder who authorizes use under this 462 | License of the Program or a work on which the Program is based. The 463 | work thus licensed is called the contributor's "contributor version". 464 | 465 | A contributor's "essential patent claims" are all patent claims 466 | owned or controlled by the contributor, whether already acquired or 467 | hereafter acquired, that would be infringed by some manner, permitted 468 | by this License, of making, using, or selling its contributor version, 469 | but do not include claims that would be infringed only as a 470 | consequence of further modification of the contributor version. For 471 | purposes of this definition, "control" includes the right to grant 472 | patent sublicenses in a manner consistent with the requirements of 473 | this License. 474 | 475 | Each contributor grants you a non-exclusive, worldwide, royalty-free 476 | patent license under the contributor's essential patent claims, to 477 | make, use, sell, offer for sale, import and otherwise run, modify and 478 | propagate the contents of its contributor version. 479 | 480 | In the following three paragraphs, a "patent license" is any express 481 | agreement or commitment, however denominated, not to enforce a patent 482 | (such as an express permission to practice a patent or covenant not to 483 | sue for patent infringement). To "grant" such a patent license to a 484 | party means to make such an agreement or commitment not to enforce a 485 | patent against the party. 486 | 487 | If you convey a covered work, knowingly relying on a patent license, 488 | and the Corresponding Source of the work is not available for anyone 489 | to copy, free of charge and under the terms of this License, through a 490 | publicly available network server or other readily accessible means, 491 | then you must either (1) cause the Corresponding Source to be so 492 | available, or (2) arrange to deprive yourself of the benefit of the 493 | patent license for this particular work, or (3) arrange, in a manner 494 | consistent with the requirements of this License, to extend the patent 495 | license to downstream recipients. "Knowingly relying" means you have 496 | actual knowledge that, but for the patent license, your conveying the 497 | covered work in a country, or your recipient's use of the covered work 498 | in a country, would infringe one or more identifiable patents in that 499 | country that you have reason to believe are valid. 500 | 501 | If, pursuant to or in connection with a single transaction or 502 | arrangement, you convey, or propagate by procuring conveyance of, a 503 | covered work, and grant a patent license to some of the parties 504 | receiving the covered work authorizing them to use, propagate, modify 505 | or convey a specific copy of the covered work, then the patent license 506 | you grant is automatically extended to all recipients of the covered 507 | work and works based on it. 508 | 509 | A patent license is "discriminatory" if it does not include within 510 | the scope of its coverage, prohibits the exercise of, or is 511 | conditioned on the non-exercise of one or more of the rights that are 512 | specifically granted under this License. You may not convey a covered 513 | work if you are a party to an arrangement with a third party that is 514 | in the business of distributing software, under which you make payment 515 | to the third party based on the extent of your activity of conveying 516 | the work, and under which the third party grants, to any of the 517 | parties who would receive the covered work from you, a discriminatory 518 | patent license (a) in connection with copies of the covered work 519 | conveyed by you (or copies made from those copies), or (b) primarily 520 | for and in connection with specific products or compilations that 521 | contain the covered work, unless you entered into that arrangement, 522 | or that patent license was granted, prior to 28 March 2007. 523 | 524 | Nothing in this License shall be construed as excluding or limiting 525 | any implied license or other defenses to infringement that may 526 | otherwise be available to you under applicable patent law. 527 | 528 | 12. No Surrender of Others' Freedom. 529 | 530 | If conditions are imposed on you (whether by court order, agreement or 531 | otherwise) that contradict the conditions of this License, they do not 532 | excuse you from the conditions of this License. If you cannot convey a 533 | covered work so as to satisfy simultaneously your obligations under this 534 | License and any other pertinent obligations, then as a consequence you may 535 | not convey it at all. For example, if you agree to terms that obligate you 536 | to collect a royalty for further conveying from those to whom you convey 537 | the Program, the only way you could satisfy both those terms and this 538 | License would be to refrain entirely from conveying the Program. 539 | 540 | 13. Remote Network Interaction; Use with the GNU General Public License. 541 | 542 | Notwithstanding any other provision of this License, if you modify the 543 | Program, your modified version must prominently offer all users 544 | interacting with it remotely through a computer network (if your version 545 | supports such interaction) an opportunity to receive the Corresponding 546 | Source of your version by providing access to the Corresponding Source 547 | from a network server at no charge, through some standard or customary 548 | means of facilitating copying of software. This Corresponding Source 549 | shall include the Corresponding Source for any work covered by version 3 550 | of the GNU General Public License that is incorporated pursuant to the 551 | following paragraph. 552 | 553 | Notwithstanding any other provision of this License, you have 554 | permission to link or combine any covered work with a work licensed 555 | under version 3 of the GNU General Public License into a single 556 | combined work, and to convey the resulting work. The terms of this 557 | License will continue to apply to the part which is the covered work, 558 | but the work with which it is combined will remain governed by version 559 | 3 of the GNU General Public License. 560 | 561 | 14. Revised Versions of this License. 562 | 563 | The Free Software Foundation may publish revised and/or new versions of 564 | the GNU Affero General Public License from time to time. Such new versions 565 | will be similar in spirit to the present version, but may differ in detail to 566 | address new problems or concerns. 567 | 568 | Each version is given a distinguishing version number. If the 569 | Program specifies that a certain numbered version of the GNU Affero General 570 | Public License "or any later version" applies to it, you have the 571 | option of following the terms and conditions either of that numbered 572 | version or of any later version published by the Free Software 573 | Foundation. If the Program does not specify a version number of the 574 | GNU Affero General Public License, you may choose any version ever published 575 | by the Free Software Foundation. 576 | 577 | If the Program specifies that a proxy can decide which future 578 | versions of the GNU Affero General Public License can be used, that proxy's 579 | public statement of acceptance of a version permanently authorizes you 580 | to choose that version for the Program. 581 | 582 | Later license versions may give you additional or different 583 | permissions. However, no additional obligations are imposed on any 584 | author or copyright holder as a result of your choosing to follow a 585 | later version. 586 | 587 | 15. Disclaimer of Warranty. 588 | 589 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY 590 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT 591 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY 592 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, 593 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 594 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM 595 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF 596 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 597 | 598 | 16. Limitation of Liability. 599 | 600 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 601 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS 602 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY 603 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE 604 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF 605 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD 606 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), 607 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF 608 | SUCH DAMAGES. 609 | 610 | 17. Interpretation of Sections 15 and 16. 611 | 612 | If the disclaimer of warranty and limitation of liability provided 613 | above cannot be given local legal effect according to their terms, 614 | reviewing courts shall apply local law that most closely approximates 615 | an absolute waiver of all civil liability in connection with the 616 | Program, unless a warranty or assumption of liability accompanies a 617 | copy of the Program in return for a fee. 618 | 619 | END OF TERMS AND CONDITIONS 620 | 621 | How to Apply These Terms to Your New Programs 622 | 623 | If you develop a new program, and you want it to be of the greatest 624 | possible use to the public, the best way to achieve this is to make it 625 | free software which everyone can redistribute and change under these terms. 626 | 627 | To do so, attach the following notices to the program. It is safest 628 | to attach them to the start of each source file to most effectively 629 | state the exclusion of warranty; and each file should have at least 630 | the "copyright" line and a pointer to where the full notice is found. 631 | 632 | 633 | Copyright (C) 634 | 635 | This program is free software: you can redistribute it and/or modify 636 | it under the terms of the GNU Affero General Public License as published by 637 | the Free Software Foundation, either version 3 of the License, or 638 | (at your option) any later version. 639 | 640 | This program is distributed in the hope that it will be useful, 641 | but WITHOUT ANY WARRANTY; without even the implied warranty of 642 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 643 | GNU Affero General Public License for more details. 644 | 645 | You should have received a copy of the GNU Affero General Public License 646 | along with this program. If not, see . 647 | 648 | Also add information on how to contact you by electronic and paper mail. 649 | 650 | If your software can interact with users remotely through a computer 651 | network, you should also make sure that it provides a way for users to 652 | get its source. For example, if your program is a web application, its 653 | interface could display a "Source" link that leads users to an archive 654 | of the code. There are many ways you could offer source, and different 655 | solutions will be better for different programs; see section 13 for the 656 | specific requirements. 657 | 658 | You should also get your employer (if you work as a programmer) or school, 659 | if any, to sign a "copyright disclaimer" for the program, if necessary. 660 | For more information on this, and how to apply and follow the GNU AGPL, see 661 | . 662 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Object Detection and Attribute Recognition with Fields 2 | 3 | [PyTorch](https://pytorch.org/) implementation of paper [Detecting 32 Pedestrian Attributes for Autonomous Vehicles](https://arxiv.org/abs/2012.02647) by Taylor Mordan (EPFL/VITA), Matthieu Cord (Sorbonne Université, valeo.ai), Patrick Pérez (valeo.ai) and Alexandre Alahi (EPFL/VITA). 4 | 5 | 6 | #### Abstract 7 | 8 | > Detecting 32 Pedestrian Attributes for Autonomous Vehicles 9 | > 10 | >Pedestrians are arguably one of the most safety-critical road users to consider for autonomous vehicles in urban areas. 11 | >In this paper, we address the problem of jointly detecting pedestrians and recognizing 32 pedestrian attributes from a single image. 12 | >These encompass visual appearance and behavior, and also include the forecasting of road crossing, which is a main safety concern. 13 | >For this, we introduce a Multi-Task Learning (MTL) model relying on a composite field framework, which achieves both goals in an efficient way. 14 | >Each field spatially locates pedestrian instances and aggregates attribute predictions over them. 15 | >This formulation naturally leverages spatial context, making it well suited to low resolution scenarios such as autonomous driving. 16 | >By increasing the number of attributes jointly learned, we highlight an issue related to the scales of gradients, which arises in MTL with numerous tasks. 17 | >We solve it by normalizing the gradients coming from different objective functions when they join at the fork in the network architecture during the backward pass, referred to as fork-normalization. 18 | >Experimental validation is performed on JAAD, a dataset providing numerous attributes for pedestrian analysis from autonomous vehicles, and shows competitive detection and attribute recognition results, as well as a more stable MTL training. 19 | 20 | ![detection_schema](docs/detection_attributes.png) 21 | 22 | The model MTL-Fields learns multiple fields for both object detection and attribute recognition in a Multi-Task Learning way. 23 | Learning is done on full images with dedicated field and image-wise loss function for each task, and predictions are obtained at inference through a post-processing instance-wise decoding step that yields a bounding box and all attributes for each detected instance. 24 | This model is applied on dataset JAAD to detect up to 32 pedestrian attributes in an autonomous vehicle scenario. 25 | 26 | The model MTL-Fields also contains a normalization of gradients during backward to solve gradient scale issues when learning numerous tasks. 27 | 28 | 29 | ### Table of Contents 30 | 31 | - [Installation](#installation) 32 | - [Dataset](#dataset) 33 | - [Interfaces](#interfaces) 34 | - [Training](#training) 35 | - [Evaluation](#evaluation) 36 | - [Project structure](#project-structure) 37 | - [License](#license) 38 | - [Citation](#citation) 39 | - [Acknowledgements](#acknowledgements) 40 | 41 | 42 | ## Installation 43 | 44 | Clone this repository in order to use it. 45 | ``` 46 | # To clone the repository using HTTPS 47 | git clone https://github.com/vita-epfl/detection-attributes-fields 48 | cd detection-attributes-fields/ 49 | ``` 50 | 51 | All dependencies can be found in the `requirements.txt` file. 52 | ``` 53 | # To install dependencies 54 | pip3 install -r requirements.txt 55 | ``` 56 | 57 | This project has been tested with Python 3.7.7, PyTorch 1.9.1, CUDA 10.2 and OpenPifPaf 0.13.0. 58 | 59 | 60 | ## Dataset 61 | 62 | This project uses dataset [JAAD](http://data.nvision2.eecs.yorku.ca/JAAD_dataset/) for training and evaluation. 63 | 64 | Please refer to JAAD documentation to download the dataset. 65 | 66 | 67 | ## Interfaces 68 | 69 | This project is implemented as an [OpenPifPaf](https://github.com/openpifpaf/openpifpaf) plugin module. 70 | As such, it benefits from all the core capabilities offered by OpenPifPaf, and only implements the additional functions it needs. 71 | 72 | All the commands can be run through OpenPifPaf's interface using subparsers. 73 | Help can be obtained for any of them with option `--help`. 74 | More information can be found in [OpenPifPaf documentation](https://openpifpaf.github.io/intro.html). 75 | 76 | 77 | ## Training 78 | 79 | Training is done using subparser `openpifpaf.train`. 80 | 81 | Training on JAAD with all attributes can be run with the command: 82 | ``` 83 | python3 -m openpifpaf.train \ 84 | --output \ 85 | --dataset jaad \ 86 | --jaad-root-dir \ 87 | --jaad-subset default \ 88 | --jaad-training-set train \ 89 | --jaad-validation-set val \ 90 | --log-interval 10 \ 91 | --val-interval 1 \ 92 | --epochs 5 \ 93 | --batch-size 4 \ 94 | --lr 0.0005 \ 95 | --lr-warm-up-start-epoch -1 \ 96 | --weight-decay 5e-4 \ 97 | --momentum 0.95 \ 98 | --basenet fn-resnet50 \ 99 | --pifpaf-pretraining \ 100 | --detection-bias-prior 0.01 \ 101 | --jaad-head-upsample 2 \ 102 | --jaad-pedestrian-attributes all \ 103 | --fork-normalization-operation power \ 104 | --fork-normalization-duplicates 35 \ 105 | --lambdas 7.0 7.0 7.0 7.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 \ 106 | --attribute-regression-loss l1 \ 107 | --attribute-focal-gamma 2 \ 108 | --auto-tune-mtl 109 | ``` 110 | Arguments should be modified appropriately if needed. 111 | 112 | More information about the options can be obtained with the command: 113 | ``` 114 | python3 -m openpifpaf.train --help 115 | ``` 116 | 117 | 118 | ## Evaluation 119 | 120 | Evaluation of a checkpoint is done using subparser `openpifpaf.eval`. 121 | 122 | Evaluation on JAAD with all attributes can be run with the command: 123 | ``` 124 | python3 -m openpifpaf.eval \ 125 | --output \ 126 | --dataset jaad \ 127 | --jaad-root-dir \ 128 | --jaad-subset default \ 129 | --jaad-testing-set test \ 130 | --checkpoint \ 131 | --batch-size 1 \ 132 | --jaad-head-upsample 2 \ 133 | --jaad-pedestrian-attributes all \ 134 | --head-consolidation filter_and_extend \ 135 | --decoder instancedecoder:0 \ 136 | --decoder-s-threshold 0.2 \ 137 | --decoder-optics-min-cluster-size 10 \ 138 | --decoder-optics-epsilon 5.0 \ 139 | --decoder-optics-cluster-threshold 0.5 140 | ``` 141 | Arguments should be modified appropriately if needed. 142 | 143 | Using option `--write-predictions`, a json file with predictions can be written as an additional output. 144 | 145 | Using option `--show-final-image`, images with predictions displayed on them can be written in the folder given by option `--save-all `. 146 | To also display ground truth annotations, add option `--show-final-ground-truth`. 147 | 148 | More information about the options can be obtained with the command: 149 | ``` 150 | python3 -m openpifpaf.eval --help 151 | ``` 152 | 153 | 154 | ## Project structure 155 | 156 | The code is organized as follows: 157 | ``` 158 | openpifpaf_detection_attributes/ 159 | ├── datasets/ 160 | │ ├── jaad/ 161 | │ ├── (+ common files for datasets) 162 | │ └── (add new datasets here) 163 | └── models/ 164 | ├── mtlfields/ 165 | ├── (+ common files for models) 166 | └── (add new models here) 167 | ``` 168 | 169 | 170 | ## License 171 | 172 | This project is built upon [OpenPifPaf](https://openpifpaf.github.io/intro.html) and shares the AGPL Licence. 173 | 174 | This software is also available for commercial licensing via the EPFL Technology Transfer 175 | Office (https://tto.epfl.ch/, info.tto@epfl.ch). 176 | 177 | 178 | ## Citation 179 | 180 | If you use this project in your research, please cite the corresponding paper: 181 | ```text 182 | @article{mordan2021detecting, 183 | title={Detecting 32 Pedestrian Attributes for Autonomous Vehicles}, 184 | author={Mordan, Taylor and Cord, Matthieu and P{\'e}rez, Patrick and Alahi, Alexandre}, 185 | journal={IEEE Transactions on Intelligent Transportation Systems (T-ITS)}, 186 | year={2021}, 187 | doi={10.1109/TITS.2021.3107587} 188 | } 189 | ``` 190 | 191 | 192 | ## Acknowledgements 193 | 194 | We would like to thank Valeo for funding our work, and Sven Kreiss for the OpenPifPaf Plugin architecture. 195 | -------------------------------------------------------------------------------- /docs/detection_attributes.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vita-epfl/detection-attributes-fields/6e83eec5914dd464fb15e79a3d1d79ab44012f18/docs/detection_attributes.png -------------------------------------------------------------------------------- /openpifpaf_detection_attributes/__init__.py: -------------------------------------------------------------------------------- 1 | from . import datasets 2 | from . import models 3 | 4 | 5 | def register(): 6 | datasets.register() 7 | models.register() 8 | -------------------------------------------------------------------------------- /openpifpaf_detection_attributes/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | from . import jaad 2 | 3 | 4 | def register(): 5 | jaad.register() 6 | -------------------------------------------------------------------------------- /openpifpaf_detection_attributes/datasets/annotation.py: -------------------------------------------------------------------------------- 1 | from abc import abstractmethod 2 | from typing import Dict 3 | 4 | import openpifpaf 5 | 6 | from .attribute import ObjectType 7 | 8 | 9 | class AnnotationAttr(openpifpaf.annotation.Base): 10 | """Annotation class for a detected instance.""" 11 | 12 | object_type = None 13 | attribute_metas = None 14 | 15 | 16 | def __init__(self, **kwargs): 17 | self.id = kwargs['id'] if 'id' in kwargs else None 18 | self.ignore_eval = kwargs['ignore_eval'] if 'ignore_eval' in kwargs else None 19 | self.attributes = {} 20 | for meta in self.attribute_metas: 21 | if meta['attribute'] in kwargs: 22 | self.attributes[meta['attribute']] = kwargs[meta['attribute']] 23 | 24 | 25 | @abstractmethod 26 | def inverse_transform(self, meta): 27 | """Inverse data augmentation to get annotations on original images. 28 | Needs to be implemented for every type of object. 29 | """ 30 | raise NotImplementedError 31 | 32 | 33 | def json_data(self): 34 | return {'object_type': self.object_type.name, **self.attributes} 35 | 36 | 37 | """List of annotations for every dataset and object type.""" 38 | OBJECT_ANNOTATIONS: Dict[str, Dict[ObjectType, AnnotationAttr]] = {} 39 | -------------------------------------------------------------------------------- /openpifpaf_detection_attributes/datasets/attribute.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | from typing import Dict 3 | 4 | 5 | class ObjectType(Enum): 6 | """Enum type for categories of objects.""" 7 | 8 | def __repr__(self): 9 | return '<%s.%s>' % (self.__class__.__name__, self.name) 10 | 11 | 12 | def __new__(cls): 13 | value = len(cls.__members__) + 1 14 | obj = object.__new__(cls) 15 | obj._value_ = value 16 | return obj 17 | 18 | 19 | """List of object types for every dataset.""" 20 | OBJECT_TYPES: Dict[str, ObjectType] = {} 21 | """List of attribute meta information for every dataset and object type.""" 22 | ATTRIBUTE_METAS: Dict[str, Dict[ObjectType, list]] = {} 23 | 24 | 25 | def get_attribute_metas(dataset: str, 26 | attributes: Dict[ObjectType, list]): 27 | assert dataset in OBJECT_TYPES 28 | assert dataset in ATTRIBUTE_METAS 29 | att_metas = [] 30 | for object_type in OBJECT_TYPES[dataset]: 31 | if ( 32 | (object_type in attributes) 33 | and (object_type in ATTRIBUTE_METAS[dataset]) 34 | ): 35 | att_metas += [{'object_type': object_type, **am} 36 | for am in ATTRIBUTE_METAS[dataset][object_type] if ( 37 | (am['attribute'] in attributes[object_type]) 38 | or (am['group'] in attributes[object_type]) 39 | or ('all' in attributes[object_type]) 40 | )] 41 | return att_metas 42 | -------------------------------------------------------------------------------- /openpifpaf_detection_attributes/datasets/encoder.py: -------------------------------------------------------------------------------- 1 | from abc import abstractmethod 2 | from enum import auto, Enum 3 | import logging 4 | from typing import Dict 5 | 6 | from .attribute import ObjectType 7 | from .headmeta import AttributeMeta 8 | 9 | 10 | LOG = logging.getLogger(__name__) 11 | 12 | 13 | class AnnotationRescaler: 14 | """Rescale images and annotations based on stride of network. 15 | 16 | Args: 17 | stride (int): Factor to divide dimensions by. 18 | object_type (ObjectType): Category of object annotated. 19 | """ 20 | 21 | def __init__(self, stride: int, object_type: ObjectType): 22 | self.stride = stride 23 | self.object_type = object_type 24 | 25 | 26 | def valid_area(self, meta): 27 | if 'valid_area' not in meta: 28 | return None 29 | 30 | return ( 31 | meta['valid_area'][0] / self.stride, 32 | meta['valid_area'][1] / self.stride, 33 | meta['valid_area'][2] / self.stride, 34 | meta['valid_area'][3] / self.stride, 35 | ) 36 | 37 | 38 | @abstractmethod 39 | def objects(self, anns): 40 | """Rescale and return object annotations of given type. 41 | Needs to be implemented for every object type. 42 | """ 43 | raise NotImplementedError 44 | 45 | 46 | def width_height(self, width_height_original): 47 | return [round((width_height_original[0]-1) / self.stride + 1), 48 | round((width_height_original[1]-1) / self.stride + 1)] 49 | 50 | 51 | class AttributeEncoder: 52 | """Convert annotations to target feature maps. 53 | 54 | Args: 55 | meta (AttributeMeta): Description of the attribute. 56 | rescaler (AnnotationRescaler): Rescaler corresponding to object type. 57 | """ 58 | 59 | def __init__(self, 60 | meta: AttributeMeta, 61 | rescaler: AnnotationRescaler = None, 62 | **kwargs): 63 | self.meta = meta 64 | self.rescaler = rescaler 65 | self.__dict__.update(kwargs) 66 | 67 | 68 | def __call__(self, image, anns, meta): 69 | generator = ATTRIBUTE_GENERATORS[self.meta.dataset][self.meta.object_type] 70 | return generator(self)(image, anns, meta) 71 | 72 | 73 | class AttributeGenerator: 74 | """Compute target feature map for an attribute. 75 | 76 | Args: 77 | config (AttributeEncoder): Meta information about how to handle the 78 | attribute. 79 | """ 80 | 81 | rescaler_class = AnnotationRescaler 82 | 83 | 84 | def __init__(self, config: AttributeEncoder): 85 | self.config = config 86 | self.rescaler = config.rescaler or self.rescaler_class( 87 | config.meta.stride, config.meta.object_type) 88 | 89 | 90 | def __call__(self, image, anns, meta): 91 | width_height_original = image.shape[2:0:-1] 92 | 93 | objects = self.rescaler.objects(anns) 94 | new_width_height = self.rescaler.width_height(width_height_original) 95 | valid_area = self.rescaler.valid_area(meta) 96 | LOG.debug('valid area: %s', valid_area) 97 | 98 | encoding = self.generate_encoding(objects, new_width_height, valid_area) 99 | return encoding 100 | 101 | 102 | @abstractmethod 103 | def generate_encoding(self, objects, width_height, valid_area): 104 | """Compute targets from annotations.""" 105 | raise NotImplementedError 106 | 107 | 108 | """List of generatpr for every dataset and object type.""" 109 | ATTRIBUTE_GENERATORS: Dict[str, Dict[ObjectType, AttributeGenerator]] = {} 110 | -------------------------------------------------------------------------------- /openpifpaf_detection_attributes/datasets/generators.py: -------------------------------------------------------------------------------- 1 | import copy 2 | 3 | import numpy as np 4 | from openpifpaf.utils import mask_valid_area 5 | import torch 6 | 7 | from .encoder import AnnotationRescaler, AttributeGenerator 8 | 9 | 10 | class BoxAnnotationRescaler(AnnotationRescaler): 11 | """AnnotationRescaler for objects defined with bounding boxes.""" 12 | 13 | def objects(self, anns): 14 | objs = [copy.deepcopy(ann) for ann in anns 15 | if ann['object_type'] is self.object_type] 16 | for obj in objs: 17 | obj['box'] /= self.stride 18 | obj['center'] /= self.stride 19 | obj['width'] /= self.stride 20 | obj['height'] /= self.stride 21 | return objs 22 | 23 | 24 | class BoxAttributeGenerator(AttributeGenerator): 25 | """AttributeGenerator for objects defined with bounding boxes.""" 26 | 27 | rescaler_class = BoxAnnotationRescaler 28 | 29 | 30 | def generate_encoding(self, objects, width_height, valid_area): 31 | self.init_fields(width_height) 32 | self.fill(objects) 33 | encodings = self.fields(valid_area) 34 | return encodings 35 | 36 | 37 | def init_fields(self, width_height): 38 | init_value = np.nan if self.config.meta.only_on_instance else 0. 39 | assert self.config.meta.n_channels > 0 40 | n_targets = (1 if self.config.meta.is_classification 41 | else self.config.meta.n_channels) 42 | self.targets = np.full( 43 | (n_targets, width_height[1], width_height[0]), 44 | init_value, 45 | dtype=np.float32, 46 | ) 47 | self.previous_distances = np.full((width_height[1], width_height[0]), 48 | np.inf, dtype=np.float32) 49 | self.previous_bottoms = np.full((width_height[1], width_height[0]), 50 | -1., dtype=np.float32) 51 | 52 | 53 | def fill(self, objects): 54 | for obj in objects: 55 | self.fill_object(obj) 56 | 57 | 58 | def fill_object(self, obj): 59 | x_start = int(np.round(obj['box'][0])) 60 | x_end = int(np.round(obj['box'][0] + obj['box'][2]) + 1) 61 | y_start = int(np.round(obj['box'][1])) 62 | y_end = int(np.round(obj['box'][1] + obj['box'][3]) + 1) 63 | mask_size = [x_end - x_start, y_end - y_start] 64 | 65 | target_mask = self.target_mask(obj, mask_size) 66 | 67 | v_center = np.stack(( 68 | np.linspace( 69 | obj['center'][0] - np.round(obj['box'][0]), 70 | obj['center'][0] - np.round(obj['box'][0] + obj['box'][2]), 71 | mask_size[0], 72 | ).reshape(1,-1).repeat(mask_size[1], axis=0), 73 | np.linspace( 74 | obj['center'][1] - np.round(obj['box'][1]), 75 | obj['center'][1] - np.round(obj['box'][1] + obj['box'][3]), 76 | mask_size[1], 77 | ).reshape(-1,1).repeat(mask_size[0], axis=1), 78 | ), axis=0) 79 | d_center = np.linalg.norm(v_center, ord=2, axis=0) 80 | t = self.targets[:, y_start:y_end, x_start:x_end] 81 | pd = self.previous_distances[y_start:y_end, x_start:x_end] 82 | pb = self.previous_bottoms[y_start:y_end, x_start:x_end] 83 | 84 | if (t.shape[1] <= 0) or (t.shape[2] <= 0): 85 | return 86 | 87 | # No learning on heavily occluded or ignored instances 88 | if ( 89 | (obj['occlusion'] > self.config.occlusion_level) 90 | or obj['ignore_eval'] 91 | ): 92 | if not self.config.meta.only_on_instance: 93 | t[t==0.] = np.nan 94 | return 95 | 96 | valid_mask = ( 97 | (pd > d_center) 98 | | ((pd == d_center) & (pb < obj['box'][1]+obj['box'][3])) 99 | ) 100 | t[ 101 | np.expand_dims(valid_mask, axis=0).repeat(t.shape[0], axis=0) 102 | ] = target_mask[ 103 | np.expand_dims(valid_mask, axis=0).repeat(target_mask.shape[0], 104 | axis=0) 105 | ] 106 | pd[valid_mask] = d_center[valid_mask] 107 | pb[valid_mask] = obj['box'][1] + obj['box'][3] 108 | 109 | 110 | def target_mask(self, obj, mask_size): 111 | val = obj[self.config.meta.attribute] 112 | 113 | if self.config.meta.is_scalar: 114 | if val is None: 115 | val = np.nan 116 | target = np.full((1, mask_size[1], mask_size[0]), 117 | val, dtype=np.float32) 118 | if self.config.meta.mean is not None: 119 | target -= self.config.meta.mean 120 | if self.config.meta.std is not None: 121 | target /= self.config.meta.std 122 | else: # vectorial attribute 123 | if val is None: 124 | val = [np.nan, np.nan] 125 | target = np.stack(( 126 | np.linspace( 127 | val[0] - np.round(obj['box'][0]), 128 | val[0] - np.round(obj['box'][0] + obj['box'][2]), 129 | mask_size[0], 130 | ).reshape(1,-1).repeat(mask_size[1], axis=0), 131 | np.linspace( 132 | val[1] - np.round(obj['box'][1]), 133 | val[1] - np.round(obj['box'][1] + obj['box'][3]), 134 | mask_size[1], 135 | ).reshape(-1,1).repeat(mask_size[0], axis=1), 136 | ), axis=0) 137 | if self.config.meta.mean is not None: 138 | target[0,:,:] -= self.config.meta.mean[0] 139 | target[1,:,:] -= self.config.meta.mean[1] 140 | if self.config.meta.std is not None: 141 | target[0,:,:] /= self.config.meta.std[0] 142 | target[1,:,:] /= self.config.meta.std[1] 143 | 144 | return target 145 | 146 | 147 | def fields(self, valid_area): 148 | mask_valid_area(self.targets, valid_area, fill_value=np.nan) 149 | return torch.from_numpy(self.targets) 150 | -------------------------------------------------------------------------------- /openpifpaf_detection_attributes/datasets/headmeta.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from typing import Dict, List, Union 3 | 4 | import openpifpaf 5 | 6 | from .attribute import ObjectType 7 | 8 | 9 | @dataclass 10 | class AttributeMeta(openpifpaf.headmeta.Base): 11 | """Meta information about an attribute. 12 | 13 | Args: 14 | object_type (ObjectType): Type of object annotated. 15 | attribute (str): Name of attribute. 16 | group (str): Group of attribute. 17 | only_on_instance (bool): Compute targets only on instances. 18 | is_classification (bool): Classification or regression attribute. 19 | is_scalar (bool): Scalar or vectorial attribute. 20 | is_spatial (bool): Attribute affected by stride. 21 | n_channels (int): Number of channels for annotations. 22 | mean (Union[float, List[float]]): Mean of attribute for normalization. 23 | std (Union[float, List[float]]): Standard deviation of attribute for 24 | normalization. 25 | default (Union[int, float, List[float]]): Default prediction for 26 | classificatione evaluation. 27 | labels (Dict[int, str]): Names of classes. 28 | """ 29 | 30 | object_type: ObjectType 31 | attribute: str 32 | group: str 33 | only_on_instance: bool 34 | is_classification: bool 35 | is_scalar: bool 36 | is_spatial: bool 37 | n_channels: int 38 | mean: Union[float, List[float]] = None 39 | std: Union[float, List[float]] = None 40 | default: Union[int, float, List[float]] = None 41 | labels: Dict[int, str] = None 42 | 43 | 44 | @property 45 | def n_fields(self): 46 | return 1 47 | -------------------------------------------------------------------------------- /openpifpaf_detection_attributes/datasets/jaad/__init__.py: -------------------------------------------------------------------------------- 1 | import openpifpaf 2 | 3 | from .annotation import JAAD_OBJECT_ANNOTATIONS 4 | from .attribute import JaadType, JAAD_ATTRIBUTE_METAS 5 | from .datamodule import Jaad 6 | from .encoder import JAAD_ATTRIBUTE_GENERATORS 7 | from .. import annotation 8 | from .. import attribute 9 | from .. import encoder 10 | from .. import painter 11 | 12 | 13 | def register(): 14 | openpifpaf.DATAMODULES['jaad'] = Jaad 15 | openpifpaf.PAINTERS['JaadPedestrianAnnotation'] = painter.BoxPainter 16 | 17 | attribute.OBJECT_TYPES['jaad'] = JaadType 18 | attribute.ATTRIBUTE_METAS['jaad'] = JAAD_ATTRIBUTE_METAS 19 | encoder.ATTRIBUTE_GENERATORS['jaad'] = JAAD_ATTRIBUTE_GENERATORS 20 | annotation.OBJECT_ANNOTATIONS['jaad'] = JAAD_OBJECT_ANNOTATIONS 21 | -------------------------------------------------------------------------------- /openpifpaf_detection_attributes/datasets/jaad/annotation.py: -------------------------------------------------------------------------------- 1 | import copy 2 | 3 | from .attribute import JaadType, JAAD_ATTRIBUTE_METAS 4 | from .. import annotation 5 | 6 | 7 | class JaadPedestrianAnnotation(annotation.AnnotationAttr): 8 | """Annotation class for pedestrians from dataset JAAD.""" 9 | 10 | object_type = JaadType.PEDESTRIAN 11 | attribute_metas = JAAD_ATTRIBUTE_METAS[JaadType.PEDESTRIAN] 12 | 13 | 14 | def inverse_transform(self, meta): 15 | pred = copy.deepcopy(self) 16 | 17 | atts = pred.attributes 18 | 19 | # Horizontal flip 20 | if meta['hflip']: 21 | w = meta['width_height'][0] 22 | if atts['center'] is not None: 23 | atts['center'][0] = -atts['center'][0] + (w - 1) 24 | atts['bag_left_side'], atts['bag_right_side'] = ( 25 | atts['bag_right_side'], atts['bag_left_side']) 26 | atts['pose_left'], atts['pose_right'] = ( 27 | atts['pose_right'], atts['pose_left']) 28 | 29 | # Offset and scale 30 | if atts['center'] is not None: 31 | atts['center'][0] = (atts['center'][0] + meta['offset'][0]) / meta['scale'][0] 32 | atts['center'][1] = (atts['center'][1] + meta['offset'][1]) / meta['scale'][1] 33 | if atts['width'] is not None: 34 | atts['width'] /= meta['scale'][0] 35 | if atts['height'] is not None: 36 | atts['height'] /= meta['scale'][1] 37 | 38 | return pred 39 | 40 | 41 | JAAD_OBJECT_ANNOTATIONS = { 42 | JaadType.PEDESTRIAN: JaadPedestrianAnnotation, 43 | } 44 | -------------------------------------------------------------------------------- /openpifpaf_detection_attributes/datasets/jaad/attribute.py: -------------------------------------------------------------------------------- 1 | from .. import attribute 2 | 3 | 4 | class JaadType(attribute.ObjectType): 5 | """Object types for JAAD.""" 6 | PEDESTRIAN = () 7 | 8 | 9 | JAAD_ATTRIBUTE_METAS = { 10 | JaadType.PEDESTRIAN: [ 11 | # Detection 12 | {'attribute': 'confidence', 'group': 'detection', 'only_on_instance': False, 'is_classification': True, 'is_scalar': True, 'is_spatial': False, 'n_channels': 1}, 13 | {'attribute': 'center', 'group': 'detection', 'only_on_instance': True, 'is_classification': False, 'is_scalar': False, 'is_spatial': True, 'n_channels': 2, 'std': [2.7, 5.9]}, 14 | {'attribute': 'height', 'group': 'detection', 'only_on_instance': True, 'is_classification': False, 'is_scalar': True, 'is_spatial': True, 'n_channels': 1, 'default': 17.5, 'mean': 17.5, 'std': 8.9}, 15 | {'attribute': 'width', 'group': 'detection', 'only_on_instance': True, 'is_classification': False, 'is_scalar': True, 'is_spatial': True, 'n_channels': 1, 'default': 7.7, 'mean': 7.7, 'std': 4.4}, 16 | # Intention 17 | {'attribute': 'will_cross', 'group': 'intention', 'only_on_instance': True, 'is_classification': True, 'is_scalar': True, 'is_spatial': False, 'n_channels': 1, 'default': 0}, 18 | {'attribute': 'time_to_crossing', 'group': 'intention', 'only_on_instance': True, 'is_classification': False, 'is_scalar': True, 'is_spatial': False, 'n_channels': 1, 'default': -2.4, 'mean': -2.4, 'std': 2.8}, 19 | # Behavior 20 | {'attribute': 'is_crossing', 'group': 'behavior', 'only_on_instance': True, 'is_classification': True, 'is_scalar': True, 'is_spatial': False, 'n_channels': 1, 'default': 0}, 21 | {'attribute': 'look', 'group': 'behavior', 'only_on_instance': True, 'is_classification': True, 'is_scalar': True, 'is_spatial': False, 'n_channels': 1, 'default': 0}, 22 | {'attribute': 'walk', 'group': 'behavior', 'only_on_instance': True, 'is_classification': True, 'is_scalar': True, 'is_spatial': False, 'n_channels': 1, 'default': 1}, 23 | {'attribute': 'motion_direction', 'group': 'behavior', 'only_on_instance': True, 'is_classification': True, 'is_scalar': True, 'is_spatial': False, 'n_channels': 2, 'default': 0, 'labels': {0: 'lateral', 1: 'longitudinal'}}, 24 | {'attribute': 'pose_back', 'group': 'behavior', 'only_on_instance': True, 'is_classification': True, 'is_scalar': True, 'is_spatial': False, 'n_channels': 1, 'default': 0}, 25 | {'attribute': 'pose_front', 'group': 'behavior', 'only_on_instance': True, 'is_classification': True, 'is_scalar': True, 'is_spatial': False, 'n_channels': 1, 'default': 0}, 26 | {'attribute': 'pose_left', 'group': 'behavior', 'only_on_instance': True, 'is_classification': True, 'is_scalar': True, 'is_spatial': False, 'n_channels': 1, 'default': 0}, 27 | {'attribute': 'pose_right', 'group': 'behavior', 'only_on_instance': True, 'is_classification': True, 'is_scalar': True, 'is_spatial': False, 'n_channels': 1, 'default': 0}, 28 | {'attribute': 'group_size', 'group': 'behavior', 'only_on_instance': True, 'is_classification': True, 'is_scalar': True, 'is_spatial': False, 'n_channels': 4, 'default': 1, 'labels': {0: '1', 1: '2', 2: '3', 3: '4+'}}, 29 | {'attribute': 'reaction', 'group': 'behavior', 'only_on_instance': True, 'is_classification': True, 'is_scalar': True, 'is_spatial': False, 'n_channels': 4, 'default': 0, 'labels': {0: 'none', 1: 'clear_path', 2: 'speed_up', 3: 'slow_down'}}, 30 | # Appearance 31 | {'attribute': 'gender', 'group': 'appearance', 'only_on_instance': True, 'is_classification': True, 'is_scalar': True, 'is_spatial': False, 'n_channels': 2, 'default': 0, 'labels': {0: 'female', 1: 'male'}}, 32 | {'attribute': 'backpack', 'group': 'appearance', 'only_on_instance': True, 'is_classification': True, 'is_scalar': True, 'is_spatial': False, 'n_channels': 1, 'default': 0}, 33 | {'attribute': 'bag_elbow', 'group': 'appearance', 'only_on_instance': True, 'is_classification': True, 'is_scalar': True, 'is_spatial': False, 'n_channels': 1, 'default': 0}, 34 | {'attribute': 'bag_hand', 'group': 'appearance', 'only_on_instance': True, 'is_classification': True, 'is_scalar': True, 'is_spatial': False, 'n_channels': 1, 'default': 0}, 35 | {'attribute': 'bag_left_side', 'group': 'appearance', 'only_on_instance': True, 'is_classification': True, 'is_scalar': True, 'is_spatial': False, 'n_channels': 1, 'default': 0}, 36 | {'attribute': 'bag_right_side', 'group': 'appearance', 'only_on_instance': True, 'is_classification': True, 'is_scalar': True, 'is_spatial': False, 'n_channels': 1, 'default': 0}, 37 | {'attribute': 'bag_shoulder', 'group': 'appearance', 'only_on_instance': True, 'is_classification': True, 'is_scalar': True, 'is_spatial': False, 'n_channels': 1, 'default': 0}, 38 | {'attribute': 'cap', 'group': 'appearance', 'only_on_instance': True, 'is_classification': True, 'is_scalar': True, 'is_spatial': False, 'n_channels': 1, 'default': 0}, 39 | {'attribute': 'clothes_below_knee', 'group': 'appearance', 'only_on_instance': True, 'is_classification': True, 'is_scalar': True, 'is_spatial': False, 'n_channels': 1, 'default': 0}, 40 | {'attribute': 'clothes_lower_dark', 'group': 'appearance', 'only_on_instance': True, 'is_classification': True, 'is_scalar': True, 'is_spatial': False, 'n_channels': 1, 'default': 1}, 41 | {'attribute': 'clothes_upper_dark', 'group': 'appearance', 'only_on_instance': True, 'is_classification': True, 'is_scalar': True, 'is_spatial': False, 'n_channels': 1, 'default': 1}, 42 | {'attribute': 'clothes_lower_light', 'group': 'appearance', 'only_on_instance': True, 'is_classification': True, 'is_scalar': True, 'is_spatial': False, 'n_channels': 1, 'default': 0}, 43 | {'attribute': 'clothes_upper_light', 'group': 'appearance', 'only_on_instance': True, 'is_classification': True, 'is_scalar': True, 'is_spatial': False, 'n_channels': 1, 'default': 0}, 44 | {'attribute': 'hood', 'group': 'appearance', 'only_on_instance': True, 'is_classification': True, 'is_scalar': True, 'is_spatial': False, 'n_channels': 1, 'default': 0}, 45 | {'attribute': 'object', 'group': 'appearance', 'only_on_instance': True, 'is_classification': True, 'is_scalar': True, 'is_spatial': False, 'n_channels': 1, 'default': 0}, 46 | {'attribute': 'phone', 'group': 'appearance', 'only_on_instance': True, 'is_classification': True, 'is_scalar': True, 'is_spatial': False, 'n_channels': 1, 'default': 0}, 47 | {'attribute': 'stroller_cart', 'group': 'appearance', 'only_on_instance': True, 'is_classification': True, 'is_scalar': True, 'is_spatial': False, 'n_channels': 1, 'default': 0}, 48 | {'attribute': 'sunglasses', 'group': 'appearance', 'only_on_instance': True, 'is_classification': True, 'is_scalar': True, 'is_spatial': False, 'n_channels': 1, 'default': 0}, 49 | {'attribute': 'age', 'group': 'appearance', 'only_on_instance': True, 'is_classification': True, 'is_scalar': True, 'is_spatial': False, 'n_channels': 3, 'default': 1, 'labels': {0: 'child/young', 1: 'adult', 2: 'senior'}}, 50 | # Not used 51 | #{'attribute': 'baby', 'group': 'notused', 'only_on_instance': True, 'is_classification': True, 'is_scalar': True, 'is_spatial': False, 'n_channels': 1, 'default': 0}, 52 | #{'attribute': 'bicycle_motorcycle', 'group': 'notused', 'only_on_instance': True, 'is_classification': True, 'is_scalar': True, 'is_spatial': False, 'n_channels': 1, 'default': 0}, 53 | #{'attribute': 'hand_gesture', 'group': 'notused', 'only_on_instance': True, 'is_classification': True, 'is_scalar': True, 'is_spatial': False, 'n_channels': 1, 'default': 0}, 54 | #{'attribute': 'nod', 'group': 'notused', 'only_on_instance': True, 'is_classification': True, 'is_scalar': True, 'is_spatial': False, 'n_channels': 1, 'default': 0}, 55 | #{'attribute': 'umbrella', 'group': 'notused', 'only_on_instance': True, 'is_classification': True, 'is_scalar': True, 'is_spatial': False, 'n_channels': 1, 'default': 0}, 56 | ], 57 | } 58 | -------------------------------------------------------------------------------- /openpifpaf_detection_attributes/datasets/jaad/datamodule.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | 3 | import torch 4 | import openpifpaf 5 | 6 | from .attribute import JaadType 7 | from .dataset import JaadDataset 8 | from . import transforms 9 | from .. import annotation 10 | from .. import attribute 11 | from .. import encoder 12 | from .. import headmeta 13 | from .. import metrics as eval_metrics 14 | from .. import sampler 15 | 16 | 17 | class Jaad(openpifpaf.datasets.DataModule): 18 | """DataModule for dataset JAAD.""" 19 | 20 | debug = False 21 | pin_memory = False 22 | 23 | # General 24 | root_dir = 'data-jaad/' 25 | subset = 'default' 26 | train_set = 'train' 27 | val_set = 'val' 28 | test_set = 'test' 29 | subepochs = 1 30 | 31 | # Tasks 32 | pedestrian_attributes = ['detection'] 33 | occlusion_level = 1 34 | upsample_stride = 1 35 | 36 | # Pre-processing 37 | image_width = 961 38 | top_crop_ratio = 0.33 39 | image_height_stride = 16 40 | fast_scaling = True 41 | augmentation = True 42 | 43 | 44 | def __init__(self): 45 | super().__init__() 46 | self.compute_attributes() 47 | self.compute_head_metas() 48 | 49 | 50 | @classmethod 51 | def compute_attributes(cls): 52 | cls.attributes = { 53 | JaadType.PEDESTRIAN: cls.pedestrian_attributes, 54 | } 55 | 56 | 57 | @classmethod 58 | def compute_head_metas(cls): 59 | att_metas = attribute.get_attribute_metas(dataset='jaad', 60 | attributes=cls.attributes) 61 | cls.head_metas = [headmeta.AttributeMeta('attribute-'+am['attribute'], 62 | 'jaad', **am) 63 | for am in att_metas] 64 | for hm in cls.head_metas: 65 | hm.upsample_stride = cls.upsample_stride 66 | 67 | 68 | @classmethod 69 | def cli(cls, parser: argparse.ArgumentParser): 70 | group = parser.add_argument_group('data module Jaad') 71 | 72 | # General 73 | group.add_argument('--jaad-root-dir', 74 | default=cls.root_dir, 75 | help='root directory of jaad dataset') 76 | group.add_argument('--jaad-subset', 77 | default=cls.subset, 78 | choices=['default', 'all_videos', 'high_visibility'], 79 | help='subset of videos to consider') 80 | group.add_argument('--jaad-training-set', 81 | default=cls.train_set, 82 | choices=['train', 'trainval'], 83 | help='training set') 84 | group.add_argument('--jaad-validation-set', 85 | default=cls.val_set, 86 | choices=['val', 'test'], 87 | help='validation set') 88 | group.add_argument('--jaad-testing-set', 89 | default=cls.test_set, 90 | choices=['val', 'test'], 91 | help='testing set') 92 | group.add_argument('--jaad-subepochs', 93 | default=cls.subepochs, type=int, 94 | help='number of subepochs with sub-sampling') 95 | 96 | # Tasks 97 | group.add_argument('--jaad-pedestrian-attributes', 98 | default=cls.pedestrian_attributes, nargs='+', 99 | help='list of attributes to consider for pedestrians') 100 | group.add_argument('--jaad-occlusion-level', 101 | default=cls.occlusion_level, type=int, 102 | choices=[0, 1, 2], 103 | help='max level of occlusion to learn from') 104 | group.add_argument('--jaad-head-upsample', 105 | default=cls.upsample_stride, type=int, 106 | help='head upsample stride') 107 | 108 | # Pre-processing 109 | group.add_argument('--jaad-image-width', 110 | default=cls.image_width, type=int, 111 | help='width to rescale image to') 112 | group.add_argument('--jaad-top-crop-ratio', 113 | default=cls.top_crop_ratio, type=float, 114 | help='ratio of height to crop from top of image') 115 | group.add_argument('--jaad-image-height-stride', 116 | default=cls.image_height_stride, type=int, 117 | help='stride to compute height of image') 118 | assert cls.fast_scaling 119 | group.add_argument('--jaad-no-fast-scaling', 120 | dest='jaad_fast_scaling', 121 | default=True, action='store_false', 122 | help='do not use fast scaling algorithm') 123 | assert cls.augmentation 124 | group.add_argument('--jaad-no-augmentation', 125 | dest='jaad_augmentation', 126 | default=True, action='store_false', 127 | help='do not apply data augmentation') 128 | 129 | 130 | @classmethod 131 | def configure(cls, args: argparse.Namespace): 132 | # Extract global information 133 | cls.debug = args.debug 134 | cls.pin_memory = args.pin_memory 135 | 136 | # General 137 | cls.root_dir = args.jaad_root_dir 138 | cls.subset = args.jaad_subset 139 | cls.train_set = args.jaad_training_set 140 | cls.val_set = args.jaad_validation_set 141 | cls.test_set = args.jaad_testing_set 142 | cls.subepochs = args.jaad_subepochs 143 | 144 | # Tasks 145 | cls.pedestrian_attributes = args.jaad_pedestrian_attributes 146 | cls.compute_attributes() 147 | cls.occlusion_level = args.jaad_occlusion_level 148 | cls.upsample_stride = args.jaad_head_upsample 149 | cls.compute_head_metas() 150 | 151 | # Pre-processing 152 | cls.image_width = args.jaad_image_width 153 | cls.top_crop_ratio = args.jaad_top_crop_ratio 154 | cls.image_height_stride = args.jaad_image_height_stride 155 | cls.fast_scaling = args.jaad_fast_scaling 156 | cls.augmentation = args.jaad_augmentation 157 | 158 | 159 | def _common_preprocess_op(self): 160 | return [ 161 | transforms.NormalizeAnnotations(), 162 | transforms.RescaleAbsolute(self.image_width, 163 | fast=self.fast_scaling), 164 | transforms.CropTopOut(self.top_crop_ratio, 165 | self.image_height_stride), 166 | ] 167 | 168 | 169 | def _train_preprocess(self): 170 | if self.augmentation: 171 | data_augmentation_op = [ 172 | transforms.ZoomInOrOut(fast=self.fast_scaling), 173 | openpifpaf.transforms.RandomApply(transforms.HFlip(), 0.5), 174 | transforms.TRAIN_TRANSFORM, 175 | ] 176 | else: 177 | data_augmentation_op = [transforms.EVAL_TRANSFORM] 178 | 179 | encoders = [encoder.AttributeEncoder( 180 | head_meta, 181 | occlusion_level=self.occlusion_level, 182 | ) 183 | for head_meta in self.head_metas] 184 | 185 | return openpifpaf.transforms.Compose([ 186 | *self._common_preprocess_op(), 187 | *data_augmentation_op, 188 | openpifpaf.transforms.Encoders(encoders), 189 | ]) 190 | 191 | 192 | def _eval_preprocess(self): 193 | return openpifpaf.transforms.Compose([ 194 | *self._common_preprocess_op(), 195 | transforms.ToAnnotations(annotation.OBJECT_ANNOTATIONS['jaad']), 196 | transforms.EVAL_TRANSFORM, 197 | ]) 198 | 199 | 200 | def train_loader(self): 201 | train_data = JaadDataset( 202 | root_dir=self.root_dir, 203 | split=self.train_set, 204 | subset=self.subset, 205 | preprocess=self._train_preprocess(), 206 | ) 207 | subsampler = sampler.RegularSubSampler( 208 | len(train_data), 209 | subepochs=self.subepochs, 210 | shuffle=(not self.debug) and self.augmentation 211 | ) 212 | return torch.utils.data.DataLoader( 213 | train_data, 214 | batch_size=self.batch_size, 215 | sampler=subsampler, 216 | pin_memory=self.pin_memory, 217 | num_workers=self.loader_workers, 218 | drop_last=True, 219 | collate_fn=openpifpaf.datasets.collate_images_targets_meta, 220 | ) 221 | 222 | 223 | def val_loader(self): 224 | val_data = JaadDataset( 225 | root_dir=self.root_dir, 226 | split=self.val_set, 227 | subset=self.subset, 228 | preprocess=self._train_preprocess(), 229 | ) 230 | return torch.utils.data.DataLoader( 231 | val_data, 232 | batch_size=self.batch_size, 233 | shuffle=(not self.debug) and self.augmentation, 234 | pin_memory=self.pin_memory, 235 | num_workers=self.loader_workers, 236 | drop_last=True, 237 | collate_fn=openpifpaf.datasets.collate_images_targets_meta, 238 | ) 239 | 240 | 241 | def eval_loader(self): 242 | eval_data = JaadDataset( 243 | root_dir=self.root_dir, 244 | split=self.test_set, 245 | subset=self.subset, 246 | preprocess=self._eval_preprocess(), 247 | ) 248 | return torch.utils.data.DataLoader( 249 | eval_data, 250 | batch_size=self.batch_size, 251 | shuffle=False, 252 | pin_memory=self.pin_memory, 253 | num_workers=self.loader_workers, 254 | drop_last=False, 255 | collate_fn=openpifpaf.datasets.collate_images_anns_meta, 256 | ) 257 | 258 | 259 | def metrics(self): 260 | return [eval_metrics.InstanceDetection(self.head_metas)] 261 | -------------------------------------------------------------------------------- /openpifpaf_detection_attributes/datasets/jaad/dataset.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import sys 4 | from typing import Callable 5 | 6 | import openpifpaf 7 | from PIL import Image 8 | import torch.utils.data 9 | 10 | from .attribute import JaadType 11 | from . import transforms 12 | 13 | 14 | LOG = logging.getLogger(__name__) 15 | 16 | 17 | class JaadDataset(torch.utils.data.Dataset): 18 | """Dataset JAAD . 19 | 20 | Args: 21 | root_dir (str): Root directory of dataset. 22 | split (str: 'train', 'val', 'test'): Split of dataset. 23 | subset (str: 'default', 'all_videos', 'high_visibility'): Set of 24 | videos to use. 25 | preprocess (Callable): A function/transform that takes in the 26 | image and targets and transforms them. 27 | """ 28 | 29 | def __init__(self, 30 | root_dir: str, 31 | split: str, 32 | subset: str, 33 | *, 34 | preprocess: Callable = None): 35 | super().__init__() 36 | sys.path.append(root_dir) 37 | from jaad_data import JAAD 38 | 39 | jaad = JAAD(data_path=root_dir) 40 | 41 | self.root_dir = root_dir 42 | if subset not in {'default', 'all_videos', 'high_visibility'}: 43 | raise ValueError('unknown subset {}'.format(subset)) 44 | self.subset = subset 45 | if split in {'train', 'val', 'test'}: 46 | list_videos = jaad._get_video_ids_split(split, subset=self.subset) 47 | elif split == 'trainval': 48 | list_videos = ( 49 | jaad._get_video_ids_split('train', subset=self.subset) 50 | + jaad._get_video_ids_split('val', subset=self.subset) 51 | ) 52 | else: 53 | raise ValueError('unknown split {}'.format(split)) 54 | self.split = split 55 | self.preprocess = preprocess or transforms.EVAL_TRANSFORM 56 | 57 | self.db = jaad.generate_database() 58 | self.idx_to_ids = [] 59 | for vid_id in list_videos: 60 | for img_id in range(self.db[vid_id]["num_frames"]): 61 | self.idx_to_ids.append({ 62 | 'video_name': vid_id, 63 | 'image_name': '{:05d}.png'.format(img_id), 64 | 'frame_id': img_id, 65 | }) 66 | 67 | LOG.info('JAAD {0} {1} images: {2}'.format(self.subset, self.split, 68 | len(self.idx_to_ids))) 69 | 70 | 71 | def __getitem__(self, index): 72 | ids = self.idx_to_ids[index] 73 | local_file_path = os.path.join(self.root_dir, 'images', 74 | ids['video_name'], ids['image_name']) 75 | with open(local_file_path, 'rb') as f: 76 | image = Image.open(f).convert('RGB') 77 | 78 | # Annotations 79 | anns = [] 80 | for ped_id in self.db[ids['video_name']]['ped_annotations']: 81 | if ids['frame_id'] not in (self.db[ids['video_name']] 82 | ['ped_annotations'] 83 | [ped_id] 84 | ['frames']): 85 | continue # ped not present in frame 86 | seq_id = (self.db[ids['video_name']] 87 | ['ped_annotations'] 88 | [ped_id] 89 | ['frames']).index(ids['frame_id']) 90 | 91 | ped = {} 92 | ped['object_type'] = JaadType.PEDESTRIAN 93 | ped['id'] = ped_id 94 | ped_anns = self.db[ids['video_name']]['ped_annotations'][ped_id] 95 | 96 | # General 97 | ped['confidence'] = 1 98 | ped['box'] = [ # x, y, w, h 99 | ped_anns['bbox'][seq_id][0], 100 | ped_anns['bbox'][seq_id][1], 101 | ped_anns['bbox'][seq_id][2]-ped_anns['bbox'][seq_id][0], 102 | ped_anns['bbox'][seq_id][3]-ped_anns['bbox'][seq_id][1], 103 | ] 104 | ped['center'] = [ped['box'][0]+.5*ped['box'][2], 105 | ped['box'][1]+.5*ped['box'][3]] 106 | ped['width'] = ped['box'][2] 107 | ped['height'] = ped['box'][3] 108 | ped['occlusion'] = ped_anns['occlusion'][seq_id] #0: no occlusion, 1: partial occlusion (>25%), 2: full occlusion (>75%) 109 | ped['with_behavior'] = True if ped_id[-1]=='b' else False 110 | ped['ignore_eval'] = True if ped_id[-1]=='p' else False 111 | 112 | # Crossing 113 | if 'cross' in ped_anns['behavior']: 114 | crossing_behavior = ped_anns['behavior']['cross'] 115 | crossing_behavior = [max(0,cb) for cb in crossing_behavior] # replace -1 by 0 116 | ped['is_crossing'] = crossing_behavior[seq_id] # 0: 'not-crossing', 1: 'crossing' 117 | ped['will_cross'] = 1 if any(crossing_behavior[seq_id:]) else 0 # 0: 'not-crossing', 1: 'crossing' 118 | else: 119 | ped['will_cross'] = 0 120 | ped['is_crossing'] = 0 121 | ped['frames_to_crossing'] = None 122 | ped['time_to_crossing'] = None 123 | if ped['will_cross'] == 1: 124 | cross_idx = next(t for t in range(len(crossing_behavior)) 125 | if crossing_behavior[t]==1) # start crossing 126 | assert crossing_behavior[cross_idx] == 1 127 | # Only annotate if start of crossing is observed 128 | if ( 129 | (cross_idx > 0) 130 | and (ped_anns['frames'][cross_idx] - ped_anns['frames'][cross_idx-1] == 1) 131 | and (crossing_behavior[cross_idx-1] == 0) 132 | ): 133 | cross_frame = ped_anns['frames'][cross_idx] 134 | ped['frames_to_crossing'] = cross_frame - ids['frame_id'] 135 | ped['time_to_crossing'] = ped['frames_to_crossing'] / 30. # conversion to seconds at 30fps 136 | 137 | # Behavior 138 | for tag in ['hand_gesture', 'look', 'nod', 'reaction']: 139 | ped[tag] = ( 140 | int(ped_anns['behavior'][tag][seq_id]) 141 | if tag in ped_anns['behavior'] else None 142 | ) 143 | if (ped['hand_gesture'] is not None) and (ped['hand_gesture'] > 0): 144 | ped['hand_gesture'] = 1 # merge all reaction types 145 | ped['walk'] = ( # different name for action -> walk attribute 146 | int(ped_anns['behavior']['action'][seq_id]) 147 | if 'action' in ped_anns['behavior'] else None 148 | ) 149 | 150 | # Attributes 151 | for tag in ['age', 'gender', 'group_size', 'motion_direction']: 152 | ped[tag] = ( 153 | int(ped_anns['attributes'][tag]) 154 | if tag in ped_anns['attributes'] else None 155 | ) 156 | if ped['age'] is not None: # merge child/young 157 | ped['age'] -= 1 158 | if ped['age'] < 0: 159 | ped['age'] = 0 160 | if ped['gender'] is not None: # remove n/a 161 | ped['gender'] -= 1 162 | if ped['gender'] < 0: 163 | ped['gender'] = None 164 | if ped['motion_direction'] is not None: # remove n/a 165 | ped['motion_direction'] -= 1 166 | if ped['motion_direction'] < 0: 167 | ped['motion_direction'] = None 168 | if ped['group_size'] is not None: # limit at 4 or more 169 | ped['group_size'] -= 1 170 | if ped['group_size'] > 3: 171 | ped['group_size'] = 3 172 | 173 | # Appearance 174 | if ('frames' in ped_anns['appearance'] 175 | and ids['frame_id'] in ped_anns['appearance']['frames']): 176 | app_seq_id = ped_anns['appearance']['frames'].index(ids['frame_id']) 177 | else: 178 | app_seq_id = None 179 | for tag in ['baby', 'backpack', 'bag_elbow', 'bag_hand', 180 | 'bag_left_side', 'bag_right_side', 'bag_shoulder', 181 | 'bicycle_motorcycle', 'cap', 'clothes_below_knee', 182 | 'clothes_lower_dark', 'clothes_lower_light', 183 | 'clothes_upper_light', 'clothes_upper_dark', 'hood', 184 | 'object', 'phone', 'pose_back', 'pose_front', 185 | 'pose_left', 'pose_right', 'stroller_cart', 186 | 'sunglasses', 'umbrella']: 187 | ped[tag] = ( 188 | int(ped_anns['appearance'][tag][app_seq_id]) 189 | if (tag in ped_anns['appearance'] 190 | and app_seq_id is not None) 191 | else None 192 | ) 193 | 194 | # Add pedestrian 195 | anns.append(ped) 196 | 197 | meta = { 198 | 'dataset': 'jaad', 199 | 'dataset_index': index, 200 | 'video_name': ids['video_name'], 201 | 'image_name': ids['image_name'], 202 | 'frame_id': ids['frame_id'], 203 | 'image_id': ids['video_name'] + '/' + ids['image_name'], 204 | 'local_file_path': local_file_path, 205 | 'file_name': local_file_path, 206 | } 207 | 208 | # Preprocess image and annotations 209 | image, anns, meta = self.preprocess(image, anns, meta) 210 | 211 | LOG.debug(meta) 212 | 213 | return image, anns, meta 214 | 215 | 216 | def __len__(self): 217 | return len(self.idx_to_ids) 218 | -------------------------------------------------------------------------------- /openpifpaf_detection_attributes/datasets/jaad/encoder.py: -------------------------------------------------------------------------------- 1 | from .attribute import JaadType 2 | from .. import generators 3 | 4 | 5 | JAAD_ATTRIBUTE_GENERATORS = { 6 | JaadType.PEDESTRIAN: generators.BoxAttributeGenerator, 7 | } 8 | -------------------------------------------------------------------------------- /openpifpaf_detection_attributes/datasets/jaad/transforms.py: -------------------------------------------------------------------------------- 1 | import copy 2 | import logging 3 | import warnings 4 | 5 | import numpy as np 6 | import openpifpaf 7 | import PIL 8 | import scipy.ndimage 9 | import torch 10 | import torchvision 11 | 12 | from .attribute import JaadType 13 | from .. import annotation 14 | 15 | 16 | LOG = logging.getLogger(__name__) 17 | 18 | 19 | def _scale(image, anns, meta, target_w, target_h, resample, *, fast=False): 20 | """target_w and target_h as integers 21 | Internally, resample in Pillow are aliases: 22 | PIL.Image.BILINEAR = 2 23 | PIL.Image.BICUBIC = 3 24 | """ 25 | assert resample in (0, 2, 3) 26 | meta = copy.deepcopy(meta) 27 | anns = copy.deepcopy(anns) 28 | w, h = image.size 29 | 30 | # Scale image 31 | if fast: 32 | image = image.resize((target_w, target_h), resample) 33 | else: 34 | order = resample 35 | if order == 2: 36 | order = 1 37 | 38 | im_np = np.asarray(image) 39 | with warnings.catch_warnings(): 40 | warnings.simplefilter('ignore') 41 | im_np = scipy.ndimage.zoom(im_np, (target_h / h, target_w / w, 1), 42 | order=order) 43 | image = PIL.Image.fromarray(im_np) 44 | 45 | LOG.debug('before resize = (%f, %f), after = %s', w, h, image.size) 46 | assert image.size[0] == target_w 47 | assert image.size[1] == target_h 48 | 49 | # Rescale annotations 50 | x_scale = (image.size[0] - 1) / (w - 1) 51 | y_scale = (image.size[1] - 1) / (h - 1) 52 | for ann in anns: 53 | if ann['object_type'] is JaadType.PEDESTRIAN: 54 | ann['box'][0] *= x_scale 55 | ann['box'][1] *= y_scale 56 | ann['box'][2] *= x_scale 57 | ann['box'][3] *= y_scale 58 | ann['center'][0] *= x_scale 59 | ann['center'][1] *= y_scale 60 | ann['width'] *= x_scale 61 | ann['height'] *= y_scale 62 | 63 | # Adjust meta 64 | scale_factors = np.array((x_scale, y_scale)) 65 | LOG.debug('meta before resize: %s', meta) 66 | meta['offset'] *= scale_factors 67 | meta['scale'] *= scale_factors 68 | meta['valid_area'][:2] *= scale_factors 69 | meta['valid_area'][2:] *= scale_factors 70 | LOG.debug('meta after resize: %s', meta) 71 | 72 | return image, anns, meta 73 | 74 | 75 | class NormalizeAnnotations(openpifpaf.transforms.Preprocess): 76 | @staticmethod 77 | def normalize_annotations(anns): 78 | anns = copy.deepcopy(anns) 79 | 80 | for ann in anns: 81 | if isinstance(ann, annotation.AnnotationAttr): 82 | # Already converted to an annotation type 83 | continue 84 | 85 | if ann['object_type'] is JaadType.PEDESTRIAN: 86 | ann['box'] = np.asarray(ann['box'], dtype=np.float32) 87 | ann['center'] = np.asarray(ann['center'], dtype=np.float32) 88 | 89 | return anns 90 | 91 | 92 | def __call__(self, image, anns, meta): 93 | anns = self.normalize_annotations(anns) 94 | if meta is None: 95 | meta = {} 96 | 97 | # fill meta with defaults if not already present 98 | w, h = image.size 99 | meta_from_image = { 100 | 'offset': np.array((0.0, 0.0)), 101 | 'scale': np.array((1.0, 1.0)), 102 | 'rotation': {'angle': 0.0, 'width': None, 'height': None}, 103 | 'valid_area': np.array((0.0, 0.0, w - 1, h - 1)), 104 | 'hflip': False, 105 | 'width_height': np.array((w, h)), 106 | } 107 | for k, v in meta_from_image.items(): 108 | if k not in meta: 109 | meta[k] = v 110 | 111 | return image, anns, meta 112 | 113 | 114 | class RescaleAbsolute(openpifpaf.transforms.Preprocess): 115 | def __init__(self, long_edge, *, fast=False, resample=PIL.Image.BICUBIC): 116 | self.long_edge = long_edge 117 | self.fast = fast 118 | self.resample = resample 119 | 120 | 121 | def __call__(self, image, anns, meta): 122 | w, h = image.size 123 | this_long_edge = self.long_edge 124 | if isinstance(this_long_edge, (tuple, list)): 125 | this_long_edge = torch.randint( 126 | int(this_long_edge[0]), 127 | int(this_long_edge[1]), (1,) 128 | ).item() 129 | 130 | s = this_long_edge / max(h, w) 131 | if h > w: 132 | target_w, target_h = int(w * s), int(this_long_edge) 133 | else: 134 | target_w, target_h = int(this_long_edge), int(h * s) 135 | return _scale(image, anns, meta, target_w, target_h, 136 | self.resample, fast=self.fast) 137 | 138 | 139 | class CropTopOut(openpifpaf.transforms.Preprocess): 140 | def __init__(self, top_ratio, height_stride=None): 141 | self.top_ratio = top_ratio 142 | self.height_stride = height_stride 143 | 144 | 145 | def __call__(self, image, anns, meta): 146 | meta = copy.deepcopy(meta) 147 | anns = copy.deepcopy(anns) 148 | original_valid_area = meta['valid_area'].copy() 149 | 150 | w, h = image.size 151 | y_offset = int(h * self.top_ratio) 152 | if self.height_stride is not None: 153 | new_h = h - y_offset 154 | new_h = self.height_stride * round((new_h-1)/self.height_stride) + 1 155 | y_offset = h - new_h 156 | LOG.debug('top crop offset %d', y_offset) 157 | ltrb = (0, y_offset, w, h) 158 | image = image.crop(ltrb) 159 | 160 | # Shift annotations 161 | for ann in anns: 162 | if ann['object_type'] is JaadType.PEDESTRIAN: 163 | ann['box'][1] -= y_offset 164 | ann['center'][1] -= y_offset 165 | 166 | ltrb = np.array(ltrb) 167 | meta['offset'] += ltrb[:2] 168 | 169 | new_wh = image.size 170 | LOG.debug('valid area before crop of %s: %s', ltrb, original_valid_area) 171 | # Process crops from left and top 172 | meta['valid_area'][:2] = np.maximum(0.0, original_valid_area[:2] - ltrb[:2]) 173 | # Process crops from right and bottom 174 | new_rb_corner = original_valid_area[:2] + original_valid_area[2:] - ltrb[:2] 175 | new_rb_corner = np.maximum(0.0, new_rb_corner) 176 | new_rb_corner = np.minimum(new_wh, new_rb_corner) 177 | meta['valid_area'][2:] = new_rb_corner - meta['valid_area'][:2] 178 | LOG.debug('valid area after crop: %s', meta['valid_area']) 179 | 180 | return image, anns, meta 181 | 182 | 183 | class ZoomInOrOut(openpifpaf.transforms.Preprocess): 184 | def __init__(self, scale_range=(0.95, 1.05), *, fast=False, 185 | resample=PIL.Image.BICUBIC): 186 | self.scale_range = scale_range 187 | self.fast = fast 188 | self.resample = resample 189 | 190 | 191 | def __call__(self, image, anns, meta): 192 | w, h = image.size 193 | scale_factor = ( 194 | self.scale_range[0] + 195 | torch.rand(1).item() * (self.scale_range[1] - self.scale_range[0]) 196 | ) 197 | new_w, new_h = round(w * scale_factor), round(h * scale_factor) 198 | image, anns, meta = _scale(image, anns, meta, new_w, new_h, 199 | self.resample, fast=self.fast) 200 | 201 | if scale_factor < 1.0: # pad image to original size 202 | x_offset = int(torch.randint(0, w - new_w + 1, (1,)).item()) 203 | y_offset = int(torch.randint(0, h - new_h + 1, (1,)).item()) 204 | ltrb = (x_offset, y_offset, w - new_w - x_offset, h - new_h - y_offset) 205 | image = torchvision.transforms.functional.pad( 206 | image, ltrb, fill=(124, 116, 104)) 207 | 208 | # Shift annotations 209 | for ann in anns: 210 | if ann['object_type'] is JaadType.PEDESTRIAN: 211 | ann['box'][0] += x_offset 212 | ann['box'][1] += y_offset 213 | ann['center'][0] += x_offset 214 | ann['center'][1] += y_offset 215 | ltrb = np.array(ltrb) 216 | meta['offset'] -= ltrb[:2] 217 | LOG.debug('valid area before pad with %s: %s', ltrb, meta['valid_area']) 218 | meta['valid_area'][:2] += ltrb[:2] 219 | LOG.debug('valid area after pad: %s', meta['valid_area']) 220 | 221 | elif scale_factor > 1.0: # crop image to original size 222 | x_offset = int(torch.randint(0, new_w - w + 1, (1,)).item()) 223 | y_offset = int(torch.randint(0, new_h - h + 1, (1,)).item()) 224 | ltrb = (x_offset, y_offset, x_offset + w, y_offset + h) 225 | image = image.crop(ltrb) 226 | 227 | # Shift and crop annotations 228 | for ann in anns: 229 | if ann['object_type'] is JaadType.PEDESTRIAN: 230 | ann['box'][0] -= x_offset 231 | ann['box'][1] -= y_offset 232 | ann['center'][0] -= x_offset 233 | ann['center'][1] -= y_offset 234 | if ann['box'][0] < 0: 235 | max_x = ann['box'][0] + ann['box'][2] 236 | ann['box'][0] = 0 237 | ann['box'][2] = max_x 238 | ann['center'][0] = .5*max_x 239 | ann['width'] = max_x 240 | if ann['box'][1] < 0: 241 | max_y = ann['box'][1] + ann['box'][3] 242 | ann['box'][1] = 0 243 | ann['box'][3] = max_y 244 | ann['center'][1] = .5*max_y 245 | ann['height'] = max_y 246 | if ann['box'][0] + ann['box'][2] > w - 1: 247 | new_width = w - 1 - ann['box'][0] 248 | ann['box'][2] = new_width 249 | ann['center'][0] = ann['box'][0] + .5*new_width 250 | ann['width'] = new_width 251 | if ann['box'][1] + ann['box'][3] > h - 1: 252 | new_height = h - 1 - ann['box'][1] 253 | ann['box'][3] = new_height 254 | ann['center'][1] = ann['box'][1] + .5*new_height 255 | ann['height'] = new_height 256 | # Remove annotation if out of bound 257 | anns = [ann for ann in anns if not ( 258 | ann['object_type'] is JaadType.PEDESTRIAN 259 | and ((ann['width'] < 5) or (ann['height'] < 5)) 260 | )] 261 | 262 | ltrb = np.array(ltrb) 263 | meta['offset'] += ltrb[:2] 264 | new_wh = image.size 265 | original_valid_area = meta['valid_area'].copy() 266 | LOG.debug('valid area before crop of %s: %s', ltrb, original_valid_area) 267 | # Process crops from left and top 268 | meta['valid_area'][:2] = np.maximum(0.0, original_valid_area[:2] - ltrb[:2]) 269 | # Process crops from right and bottom 270 | new_rb_corner = original_valid_area[:2] + original_valid_area[2:] - ltrb[:2] 271 | new_rb_corner = np.maximum(0.0, new_rb_corner) 272 | new_rb_corner = np.minimum(new_wh, new_rb_corner) 273 | meta['valid_area'][2:] = new_rb_corner - meta['valid_area'][:2] 274 | LOG.debug('valid area after crop: %s', meta['valid_area']) 275 | 276 | return image, anns, meta 277 | 278 | 279 | class HFlip(openpifpaf.transforms.Preprocess): 280 | def __call__(self, image, anns, meta): 281 | meta = copy.deepcopy(meta) 282 | anns = copy.deepcopy(anns) 283 | 284 | w, _ = image.size 285 | image = image.transpose(PIL.Image.FLIP_LEFT_RIGHT) 286 | for ann in anns: 287 | if ann['object_type'] is JaadType.PEDESTRIAN: 288 | ann['box'][0] = -(ann['box'][0] + ann['box'][2]) - 1.0 + w 289 | ann['center'][0] = -ann['center'][0] - 1.0 + w 290 | ann['bag_left_side'], ann['bag_right_side'] = ( 291 | ann['bag_right_side'], ann['bag_left_side']) 292 | ann['pose_left'], ann['pose_right'] = ( 293 | ann['pose_right'], ann['pose_left']) 294 | 295 | assert meta['hflip'] is False 296 | meta['hflip'] = True 297 | meta['valid_area'][0] = -(meta['valid_area'][0] + meta['valid_area'][2]) + w 298 | 299 | return image, anns, meta 300 | 301 | 302 | class ToAnnotations(openpifpaf.transforms.Preprocess): 303 | def __init__(self, object_annotations): 304 | self.object_annotations = object_annotations 305 | 306 | 307 | def __call__(self, image, anns, meta): 308 | anns = [ 309 | self.object_annotations[ann['object_type']](**ann) 310 | for ann in anns 311 | ] 312 | return image, anns, meta 313 | 314 | 315 | def replaceNormalization(compose_transform): 316 | new_preprocess_list = [] 317 | for op in compose_transform.preprocess_list: 318 | if isinstance(op, openpifpaf.transforms.NormalizeAnnotations): 319 | new_preprocess_list.append(NormalizeAnnotations()) 320 | elif isinstance(op, openpifpaf.transforms.Compose): 321 | new_preprocess_list.append(replaceNormalization(op)) 322 | else: 323 | new_preprocess_list.append(op) 324 | return openpifpaf.transforms.Compose(new_preprocess_list) 325 | 326 | 327 | TRAIN_TRANSFORM = replaceNormalization(openpifpaf.transforms.TRAIN_TRANSFORM) 328 | EVAL_TRANSFORM = replaceNormalization(openpifpaf.transforms.EVAL_TRANSFORM) 329 | -------------------------------------------------------------------------------- /openpifpaf_detection_attributes/datasets/metrics.py: -------------------------------------------------------------------------------- 1 | import copy 2 | import json 3 | import logging 4 | import math 5 | from typing import List 6 | import zipfile 7 | 8 | import openpifpaf 9 | 10 | from .headmeta import AttributeMeta 11 | 12 | 13 | LOG = logging.getLogger(__name__) 14 | 15 | 16 | def compute_iou(pred_c, pred_w, pred_h, gt_c, gt_w, gt_h): 17 | inter_box = [ 18 | max(pred_c[0] - .5*pred_w, gt_c[0] - .5*gt_w), 19 | max(pred_c[1] - .5*pred_h, gt_c[1] - .5*gt_h), 20 | min(pred_c[0] + .5*pred_w, gt_c[0] + .5*gt_w), 21 | min(pred_c[1] + .5*pred_h, gt_c[1] + .5*gt_h) 22 | ] 23 | inter_area = ( 24 | max(0., inter_box[2] - inter_box[0]) 25 | * max(0., inter_box[3] - inter_box[1]) 26 | ) 27 | pred_area = pred_w * pred_h 28 | gt_area = gt_w * gt_h 29 | iou = ( 30 | inter_area / (pred_area + gt_area - inter_area) 31 | if pred_area + gt_area - inter_area != 0 else 0. 32 | ) 33 | return iou 34 | 35 | 36 | def compute_ap(stats): 37 | tps = [tp for _, tp in sorted(zip(stats['score'], 38 | stats['tp']), 39 | key=lambda pair: pair[0], 40 | reverse=True)] 41 | fps = [fp for _, fp in sorted(zip(stats['score'], 42 | stats['fp']), 43 | key=lambda pair: pair[0], 44 | reverse=True)] 45 | cumsum = 0 46 | for idx, val in enumerate(tps): 47 | tps[idx] += cumsum 48 | cumsum += val 49 | cumsum = 0 50 | for idx, val in enumerate(fps): 51 | fps[idx] += cumsum 52 | cumsum += val 53 | recs = tps[:] 54 | for idx, val in enumerate(tps): 55 | recs[idx] = ( 56 | float(tps[idx]) / stats['n_gt'] 57 | if stats['n_gt'] != 0 else 0. 58 | ) 59 | precs = tps[:] 60 | for idx, val in enumerate(tps): 61 | precs[idx] = ( 62 | float(tps[idx]) / (tps[idx] + fps[idx]) 63 | if tps[idx] + fps[idx] != 0 else 0. 64 | ) 65 | return average_precision(recs, precs) 66 | 67 | 68 | def average_precision(rec, prec): 69 | rec.insert(0, 0.0) # insert 0.0 at begining of list 70 | rec.append(1.0) # insert 1.0 at end of list 71 | mrec = rec[:] 72 | prec.insert(0, 0.0) # insert 0.0 at begining of list 73 | prec.append(0.0) # insert 0.0 at end of list 74 | mpre = prec[:] 75 | for i in range(len(mpre)-2, -1, -1): 76 | mpre[i] = max(mpre[i], mpre[i+1]) 77 | i_list = [] 78 | for i in range(1, len(mrec)): 79 | if mrec[i] != mrec[i-1]: 80 | i_list.append(i) 81 | ap = 0.0 82 | for i in i_list: 83 | ap += ((mrec[i]-mrec[i-1])*mpre[i]) 84 | return ap 85 | 86 | 87 | class InstanceDetection(openpifpaf.metric.base.Base): 88 | """Compute detection metrics from all detected instances for a list of 89 | attributes. 90 | 91 | Args: 92 | attribute_metas (List[AttributeMeta]): list of meta information about 93 | attributes. 94 | """ 95 | 96 | def __init__(self, attribute_metas: List[AttributeMeta]): 97 | self.attribute_metas = [am for am in attribute_metas 98 | if ((am.attribute == 'confidence') 99 | or (am.group != 'detection'))] 100 | assert len(self.attribute_metas) > 0 101 | 102 | self.det_stats = {} 103 | for att_meta in self.attribute_metas: 104 | if att_meta.is_classification: 105 | n_classes = max(att_meta.n_channels, 2) 106 | else: 107 | n_classes = 10 108 | self.det_stats[att_meta.attribute] = {'n_classes': n_classes} 109 | for cls in range(n_classes): 110 | self.det_stats[att_meta.attribute][cls] = { 111 | 'n_gt': 0, 'score': [], 'tp': [], 'fp': []} 112 | self.predictions = {} 113 | 114 | 115 | def accumulate(self, predictions, image_meta, *, ground_truth=None): 116 | # Store predictions for writing to file 117 | pred_data = [] 118 | for pred in predictions: 119 | pred_data.append(pred.json_data()) 120 | self.predictions[image_meta['image_id']] = pred_data 121 | 122 | # Compute metrics 123 | for att_meta in self.attribute_metas: 124 | self.accumulate_attribute(att_meta, predictions, image_meta, 125 | ground_truth=ground_truth) 126 | 127 | 128 | def accumulate_attribute(self, attribute_meta, predictions, image_meta, *, 129 | ground_truth=None): 130 | for cls in range(self.det_stats[attribute_meta.attribute]['n_classes']): 131 | det_stats = self.det_stats[attribute_meta.attribute][cls] 132 | 133 | # Initialize ground truths 134 | gt_match = {} 135 | for gt in ground_truth: 136 | if ( 137 | gt.ignore_eval 138 | or (gt.attributes[attribute_meta.attribute] is None) 139 | or (not attribute_meta.is_classification) 140 | or (int(gt.attributes[attribute_meta.attribute]) == cls) 141 | ): 142 | gt_match[gt.id] = False 143 | if ( 144 | (not gt.ignore_eval) 145 | and (gt.attributes[attribute_meta.attribute] is not None) 146 | ): 147 | det_stats['n_gt'] += 1 148 | 149 | # Rank predictions based on confidences 150 | ranked_preds = [] 151 | for pred in predictions: 152 | if ( 153 | (attribute_meta.attribute in pred.attributes) 154 | and (pred.attributes[attribute_meta.attribute] is not None) 155 | ): 156 | rpred = copy.deepcopy(pred) 157 | pred_score = pred.attributes[attribute_meta.attribute] 158 | pred_conf = pred.attributes['confidence'] 159 | if ( 160 | (attribute_meta.attribute == 'confidence') 161 | or (not attribute_meta.is_classification) 162 | ): 163 | rpred.attributes['score'] = pred_conf 164 | elif ( 165 | attribute_meta.is_classification 166 | and (attribute_meta.n_channels == 1) 167 | ): 168 | rpred.attributes['score'] = ( 169 | (cls*pred_score + (1-cls)*(1.-pred_score)) 170 | * pred_conf 171 | ) 172 | elif ( 173 | attribute_meta.is_classification 174 | and (attribute_meta.n_channels > 1) 175 | ): 176 | rpred.attributes['score'] = pred_score[cls] * pred_conf 177 | ranked_preds.append(rpred) 178 | ranked_preds.sort(key=lambda x:x.attributes['score'], reverse=True) 179 | 180 | # Match predictions with closest groud truths 181 | for pred in ranked_preds: 182 | max_iou = -1. 183 | match = None 184 | for gt in ground_truth: 185 | if ( 186 | (gt.id in gt_match) 187 | and ('width' in pred.attributes) 188 | and ('height' in pred.attributes) 189 | ): 190 | iou = compute_iou(pred.attributes['center'], pred.attributes['width'], 191 | pred.attributes['height'], 192 | gt.attributes['center'], gt.attributes['width'], 193 | gt.attributes['height']) 194 | else: 195 | iou = 0. 196 | if (iou > 0.5) and (iou >= max_iou): 197 | if ( 198 | (gt.attributes[attribute_meta.attribute] is None) 199 | or attribute_meta.is_classification 200 | or (abs(gt.attributes[attribute_meta.attribute] 201 | -pred.attributes[attribute_meta.attribute]) <= (cls+1)*.5) 202 | ): 203 | max_iou = iou 204 | match = gt 205 | 206 | # Classify predictions as True Positives or False Positives 207 | if match is not None: 208 | if ( 209 | (not match.ignore_eval) 210 | and (match.attributes[attribute_meta.attribute] is not None) 211 | ): 212 | if not gt_match[match.id]: 213 | # True positive 214 | det_stats['score'].append(pred.attributes['score']) 215 | det_stats['tp'].append(1) 216 | det_stats['fp'].append(0) 217 | 218 | gt_match[match.id] = True 219 | else: 220 | # False positive (multiple detections) 221 | det_stats['score'].append(pred.attributes['score']) 222 | det_stats['tp'].append(0) 223 | det_stats['fp'].append(1) 224 | else: 225 | # Ignore instance 226 | pass 227 | else: 228 | # False positive 229 | det_stats['score'].append(pred.attributes['score']) 230 | det_stats['tp'].append(0) 231 | det_stats['fp'].append(1) 232 | 233 | 234 | def stats(self): 235 | text_labels = [] 236 | stats = [] 237 | 238 | att_aps = [] 239 | for att_meta in self.attribute_metas: 240 | cls_aps = [] 241 | for cls in range(self.det_stats[att_meta.attribute]['n_classes']): 242 | cls_ap = compute_ap(self.det_stats[att_meta.attribute][cls]) 243 | cls_aps.append(cls_ap) 244 | if att_meta.attribute == 'confidence': 245 | text_labels.append('detection_AP') 246 | stats.append(cls_aps[1]) 247 | att_aps.append(cls_aps[1]) 248 | LOG.info('detection AP = {}'.format(cls_aps[1]*100)) 249 | else: 250 | text_labels.append(att_meta.attribute + '_AP') 251 | att_ap = sum(cls_aps) / len(cls_aps) 252 | stats.append(att_ap) 253 | att_aps.append(att_ap) 254 | LOG.info('{} AP = {}'.format(att_meta.attribute, att_ap*100)) 255 | text_labels.append('attribute_mAP') 256 | map = sum(att_aps) / len(att_aps) 257 | stats.append(map) 258 | LOG.info('attribute mAP = {}'.format(map*100)) 259 | 260 | data = { 261 | 'text_labels': text_labels, 262 | 'stats': stats, 263 | } 264 | return data 265 | 266 | 267 | def write_predictions(self, filename, *, additional_data=None): 268 | with open(filename + '.pred.json', 'w') as f: 269 | json.dump(self.predictions, f) 270 | LOG.info('wrote %s.pred.json', filename) 271 | with zipfile.ZipFile(filename + '.zip', 'w') as myzip: 272 | myzip.write(filename + '.pred.json', arcname='predictions.json') 273 | LOG.info('wrote %s.zip', filename) 274 | 275 | if additional_data: 276 | with open(filename + '.pred_meta.json', 'w') as f: 277 | json.dump(additional_data, f) 278 | LOG.info('wrote %s.pred_meta.json', filename) 279 | -------------------------------------------------------------------------------- /openpifpaf_detection_attributes/datasets/painter.py: -------------------------------------------------------------------------------- 1 | import openpifpaf 2 | 3 | 4 | class BoxPainter(openpifpaf.show.DetectionPainter): 5 | """Painter for bounding boxes of detected instances. 6 | 7 | Args: 8 | xy_scale (float): Scale factor for display. 9 | """ 10 | 11 | def __init__(self, *, xy_scale: float = 1.0): 12 | super().__init__(xy_scale=xy_scale) 13 | 14 | 15 | def annotation(self, ax, ann, *, color=None, text=None, subtext=None): 16 | assert 'center' in ann.attributes 17 | assert 'width' in ann.attributes 18 | assert 'height' in ann.attributes 19 | anndet = openpifpaf.annotation.AnnotationDet([]).set(0, 0., 20 | [ann.attributes['center'][0]-.5*ann.attributes['width'], 21 | ann.attributes['center'][1]-.5*ann.attributes['height'], 22 | ann.attributes['width'], ann.attributes['height']]) 23 | 24 | if text is None: 25 | text = ann.object_type.name 26 | if subtext is None: 27 | if getattr(ann, 'id', None): # ground truth annotation 28 | subtext = ann.id 29 | elif 'confidence' in ann.attributes: # prediction 30 | subtext = '{:.0%}'.format(ann.attributes['confidence']) 31 | 32 | super().annotation(ax, anndet, color=color, text=text, subtext=subtext) 33 | -------------------------------------------------------------------------------- /openpifpaf_detection_attributes/datasets/sampler.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | class RegularSubSampler(torch.utils.data.sampler.Sampler[int]): 5 | """Subsampler for video datasets. 6 | 7 | Images are subsampled with a regular step within each subepoch. 8 | Each epoch on the sampler (subepoch of the full dataset) corresponds to a 9 | different subset of the dataset, until all examples are seen. 10 | 11 | Args: 12 | data_size (int): Size of dataset to subsample. 13 | subepochs (int): Number of subepochs corresponding to the full dataset. 14 | shuffle (bool): Randomize order of eamples. 15 | """ 16 | 17 | def __init__(self, 18 | data_size: int, 19 | subepochs: int = 1, 20 | shuffle: bool = False): 21 | self.data_size = data_size 22 | assert subepochs > 0 23 | self.subepochs = subepochs 24 | self.shuffle = shuffle 25 | 26 | self._subepoch = None 27 | self._subepoch_idx = None 28 | 29 | 30 | @property 31 | def num_samples(self): 32 | return self.data_size // self.subepochs 33 | 34 | 35 | def _new_epoch(self): 36 | self._subepoch_idx = 0 37 | if self.shuffle: 38 | self._subepoch_order = torch.randperm(self.subepochs) 39 | else: 40 | self._subepoch_order = torch.arange(self.subepochs) 41 | 42 | 43 | def _new_subepoch(self): 44 | if self._subepoch_idx is None: 45 | self._subepoch_idx = self.subepochs 46 | self._subepoch_idx += 1 47 | if self._subepoch_idx >= self.subepochs: 48 | self._new_epoch() 49 | 50 | self._subepoch = self._subepoch_order[self._subepoch_idx] 51 | 52 | 53 | def __iter__(self): 54 | self._new_subepoch() 55 | if self.shuffle: 56 | example_order = torch.randperm(self.num_samples) 57 | else: 58 | example_order = torch.arange(self.num_samples) 59 | example_order *= self.subepochs 60 | example_order += self._subepoch 61 | yield from example_order.tolist() 62 | 63 | 64 | def __len__(self): 65 | return self.num_samples 66 | -------------------------------------------------------------------------------- /openpifpaf_detection_attributes/models/__init__.py: -------------------------------------------------------------------------------- 1 | from . import mtlfields 2 | 3 | 4 | def register(): 5 | mtlfields.register() 6 | -------------------------------------------------------------------------------- /openpifpaf_detection_attributes/models/mtl_grad_fork_norm.py: -------------------------------------------------------------------------------- 1 | from random import randrange 2 | 3 | import torch 4 | 5 | 6 | class GradientForkNormalization(torch.autograd.Function): 7 | """Autograd function for MTL gradient fork-normalization layer.""" 8 | 9 | @staticmethod 10 | def forward(ctx, input_, normalization, duplicates): 11 | ctx.normalization = normalization 12 | ctx.save_for_backward(input_) 13 | output = tuple(input_.clone() for _ in range(duplicates)) 14 | return output 15 | 16 | 17 | @staticmethod 18 | def backward(ctx, *grad_output): 19 | grad_input = None 20 | if ctx.needs_input_grad[0]: 21 | input, = ctx.saved_tensors 22 | grad_input = torch.zeros_like(input) 23 | for n in range(grad_input.shape[0]): 24 | valid_gradout = [gradout[n] for gradout in grad_output if ( 25 | (gradout is not None) 26 | and (torch.norm(gradout[n].view(-1), p=2).item() > 1e-8) 27 | )] 28 | if len(valid_gradout) == 0: 29 | continue 30 | elif ctx.normalization == 'accumulation': 31 | grad_input[n] = sum(valid_gradout) 32 | elif ctx.normalization == 'average': 33 | grad_input[n] = sum(valid_gradout) / len(valid_gradout) 34 | elif ctx.normalization == 'power': 35 | grad_input[n] = sum(valid_gradout) / (len(valid_gradout)**.5) 36 | elif ctx.normalization == 'sample': 37 | grad_input[n] = valid_gradout[randrange(len(valid_gradout))] 38 | elif ctx.normalization == 'random': 39 | weights = torch.distributions.dirichlet.Dirichlet( 40 | torch.ones(len(valid_gradout))).sample() 41 | grad_input[n] = sum([g*w.item() 42 | for g, w in zip(valid_gradout, weights)]) 43 | return grad_input, None, None 44 | 45 | 46 | class MtlGradForkNorm(torch.nn.Module): 47 | """Multi-Task Learning Gradient Fork-Normalization layer. 48 | Normalize gradients joining at a fork during backward (forward pass left 49 | unchanged). 50 | 51 | Args: 52 | normalization (str): Type of normalization ('accumulation', 'average', 53 | 'power', 'sample', 'random'). 54 | duplicates (int): Max number of branches to normalize for. 55 | """ 56 | 57 | def __init__(self, 58 | normalization: str = 'accumulation', 59 | duplicates: int = 1): 60 | super().__init__() 61 | if normalization not in ('accumulation', 'average', 'power', 62 | 'sample', 'random'): 63 | raise ValueError( 64 | 'unsupported normalization {}'.format(normalization)) 65 | self.normalization = normalization 66 | self.duplicates = duplicates 67 | 68 | 69 | def extra_repr(self): 70 | return 'normalization={}, duplicates={}'.format( 71 | self.normalization, self.duplicates) 72 | 73 | 74 | def forward(self, input_): 75 | return GradientForkNormalization.apply( 76 | input_, self.normalization, self.duplicates) 77 | -------------------------------------------------------------------------------- /openpifpaf_detection_attributes/models/mtlfields/__init__.py: -------------------------------------------------------------------------------- 1 | import openpifpaf 2 | 3 | from .basenetwork import ForkNormNetwork 4 | from .decoder import InstanceDecoder 5 | from .head import AttributeField 6 | from .loss import AttributeLoss 7 | from ...datasets import headmeta 8 | 9 | 10 | def register(): 11 | openpifpaf.BASE_TYPES.add(ForkNormNetwork) 12 | for backbone in list(openpifpaf.BASE_FACTORIES.keys()): 13 | openpifpaf.BASE_FACTORIES['fn-'+backbone] = (lambda backbone=backbone: 14 | ForkNormNetwork('fn-'+backbone, backbone)) 15 | openpifpaf.HEADS[headmeta.AttributeMeta] = AttributeField 16 | openpifpaf.DECODERS.add(InstanceDecoder) 17 | openpifpaf.LOSSES[headmeta.AttributeMeta] = AttributeLoss 18 | -------------------------------------------------------------------------------- /openpifpaf_detection_attributes/models/mtlfields/basenetwork.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import logging 3 | 4 | import openpifpaf 5 | 6 | from .. import mtl_grad_fork_norm 7 | 8 | 9 | LOG = logging.getLogger(__name__) 10 | 11 | 12 | class ForkNormNetwork(openpifpaf.network.basenetworks.BaseNetwork): 13 | """Backbone network with fork-normalization before prediction head 14 | networks. 15 | 16 | Args: 17 | name (str): Name of network. 18 | backbone_name (str): Name of base network (without fork_normalization). 19 | """ 20 | 21 | pifpaf_pretraining = False 22 | fork_normalization_operation = 'accumulation' 23 | fork_normalization_duplicates = 1 24 | 25 | 26 | def __init__(self, name: str, backbone_name: str): 27 | if self.pifpaf_pretraining: 28 | # Load pre-trained weights 29 | LOG.info('Loading weights from OpenPifPaf trained model') 30 | network_factory = openpifpaf.network.Factory() 31 | network_factory.checkpoint = backbone_name 32 | pretrained_net, _ = network_factory.from_checkpoint() 33 | backbone = pretrained_net.base_net 34 | else: 35 | # Build from scratch 36 | backbone = openpifpaf.BASE_FACTORIES[backbone_name]() 37 | super().__init__(name, 38 | stride=backbone.stride, 39 | out_features=backbone.out_features) 40 | self.backbone_name = backbone_name 41 | self.backbone = backbone 42 | self.fork_normalization = mtl_grad_fork_norm.MtlGradForkNorm( 43 | normalization=self.fork_normalization_operation, 44 | duplicates=self.fork_normalization_duplicates, 45 | ) 46 | 47 | 48 | @classmethod 49 | def cli(cls, parser: argparse.ArgumentParser): 50 | group = parser.add_argument_group('Fork-Normalized Network') 51 | group.add_argument('--pifpaf-pretraining', 52 | dest='pifpaf_pretraining', action='store_true', 53 | default=False, 54 | help='initialization from PifPaf pretrained model') 55 | group.add_argument('--fork-normalization-operation', 56 | default=cls.fork_normalization_operation, 57 | choices=['accumulation', 'average', 'power', 58 | 'sample', 'random'], 59 | help='operation for fork-normalization') 60 | group.add_argument('--fork-normalization-duplicates', 61 | default=cls.fork_normalization_duplicates, type=int, 62 | help='max number of branches to fork-normalize for') 63 | 64 | 65 | @classmethod 66 | def configure(cls, args: argparse.Namespace): 67 | cls.pifpaf_pretraining = args.pifpaf_pretraining 68 | cls.fork_normalization_operation = args.fork_normalization_operation 69 | cls.fork_normalization_duplicates = args.fork_normalization_duplicates 70 | 71 | 72 | def forward(self, *args): 73 | x = args[0] 74 | x = self.backbone(x) 75 | x = self.fork_normalization(x) 76 | return x 77 | -------------------------------------------------------------------------------- /openpifpaf_detection_attributes/models/mtlfields/decoder.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import logging 3 | import time 4 | from typing import List 5 | 6 | import numpy as np 7 | import openpifpaf 8 | from scipy.special import softmax 9 | import torch 10 | 11 | from .. import optics 12 | from ...datasets import annotation 13 | from ...datasets import attribute 14 | from ...datasets import headmeta 15 | 16 | 17 | LOG = logging.getLogger(__name__) 18 | 19 | 20 | class InstanceDecoder(openpifpaf.decoder.decoder.Decoder): 21 | """Decoder to convert predicted fields to sets of instance detections. 22 | 23 | Args: 24 | dataset (str): Dataset name. 25 | object_type (ObjectType): Type of object detected. 26 | attribute_metas (List[AttributeMeta]): List of meta information about 27 | predicted attributes. 28 | """ 29 | 30 | # General 31 | dataset = None 32 | object_type = None 33 | 34 | # Clustering detections 35 | s_threshold = 0.2 36 | optics_min_cluster_size = 10 37 | optics_epsilon = 5.0 38 | optics_cluster_threshold = 0.5 39 | 40 | 41 | def __init__(self, 42 | dataset: str, 43 | object_type: attribute.ObjectType, 44 | attribute_metas: List[headmeta.AttributeMeta]): 45 | super().__init__() 46 | self.dataset = dataset 47 | self.object_type = object_type 48 | self.annotation = annotation.OBJECT_ANNOTATIONS[self.dataset][self.object_type] 49 | for meta in attribute_metas: 50 | assert meta.dataset == self.dataset 51 | assert meta.object_type is self.object_type 52 | self.attribute_metas = attribute_metas 53 | 54 | 55 | @classmethod 56 | def cli(cls, parser: argparse.ArgumentParser): 57 | group = parser.add_argument_group('InstanceDecoder') 58 | 59 | # Clustering detections 60 | group.add_argument('--decoder-s-threshold', 61 | default=cls.s_threshold, type=float, 62 | help='threshold for field S') 63 | group.add_argument('--decoder-optics-min-cluster-size', 64 | default=cls.optics_min_cluster_size, type=int, 65 | help='minimum size of clusters in OPTICS') 66 | group.add_argument('--decoder-optics-epsilon', 67 | default=cls.optics_epsilon, type=float, 68 | help='maximum radius of cluster in OPTICS') 69 | group.add_argument('--decoder-optics-cluster-threshold', 70 | default=cls.optics_cluster_threshold, type=float, 71 | help='threshold to separate clusters in OPTICS') 72 | 73 | 74 | @classmethod 75 | def configure(cls, args: argparse.Namespace): 76 | # Clustering detections 77 | cls.s_threshold = args.decoder_s_threshold 78 | cls.optics_min_cluster_size = args.decoder_optics_min_cluster_size 79 | cls.optics_epsilon = args.decoder_optics_epsilon 80 | cls.optics_cluster_threshold = args.decoder_optics_cluster_threshold 81 | 82 | 83 | @classmethod 84 | def factory(self, head_metas: List[openpifpaf.headmeta.Base]): 85 | decoders = [] 86 | for dataset in attribute.OBJECT_TYPES: 87 | for object_type in attribute.OBJECT_TYPES[dataset]: 88 | meta_list = [meta for meta in head_metas 89 | if ( 90 | isinstance(meta, headmeta.AttributeMeta) 91 | and (meta.dataset == dataset) 92 | and (meta.object_type is object_type) 93 | )] 94 | if len(meta_list) > 0: 95 | decoders.append(InstanceDecoder(dataset=dataset, 96 | object_type=object_type, 97 | attribute_metas=meta_list)) 98 | return decoders 99 | 100 | 101 | def __call__(self, fields, initial_annotations=None): 102 | start = time.perf_counter() 103 | 104 | # Conversion to numpy if needed 105 | fields = [f.numpy() if torch.is_tensor(f) else f for f in fields] 106 | 107 | # Field S 108 | s_meta = [meta for meta in self.attribute_metas 109 | if meta.attribute == 'confidence'] 110 | assert len(s_meta) == 1 111 | s_meta = s_meta[0] 112 | s_field = fields[s_meta.head_index].copy() 113 | conf_field = 1. / (1. + np.exp(-s_field)) 114 | s_mask = conf_field > self.s_threshold 115 | 116 | # Field V 117 | v_meta = [meta for meta in self.attribute_metas 118 | if meta.attribute == 'center'] 119 | assert len(v_meta) == 1 120 | v_meta = v_meta[0] 121 | v_field = fields[v_meta.head_index].copy() 122 | if v_meta.std is not None: 123 | v_field[0] *= v_meta.std[0] 124 | v_field[1] *= v_meta.std[1] 125 | if v_meta.mean is not None: 126 | v_field[0] += v_meta.mean[0] 127 | v_field[1] += v_meta.mean[1] 128 | 129 | # OPTICS clustering 130 | point_list = [] 131 | for y in range(s_mask.shape[1]): 132 | for x in range(s_mask.shape[2]): 133 | if s_mask[0,y,x]: 134 | point = optics.Point(x, y, v_field[0,y,x], v_field[1,y,x]) 135 | point_list.append(point) 136 | 137 | clustering = optics.Optics(point_list, 138 | self.optics_min_cluster_size, 139 | self.optics_epsilon) 140 | clustering.run() 141 | clusters = clustering.cluster(self.optics_cluster_threshold) 142 | 143 | # Predictions for all instances 144 | predictions = [] 145 | for cluster in clusters: 146 | attributes = {} 147 | for meta in self.attribute_metas: 148 | att = self.cluster_vote(fields[meta.head_index], cluster, 149 | meta, conf_field) 150 | attributes[meta.attribute] = att 151 | 152 | pred = self.annotation(**attributes) 153 | predictions.append(pred) 154 | 155 | LOG.info('predictions %d, %.3fs', 156 | len(predictions), time.perf_counter()-start) 157 | 158 | return predictions 159 | 160 | 161 | def cluster_vote(self, field, cluster, meta, conf_field): 162 | field = field.copy() 163 | 164 | if meta.std is not None: 165 | field *= (meta.std if meta.n_channels == 1 166 | else np.expand_dims(meta.std, (1,2))) 167 | if meta.mean is not None: 168 | field += (meta.mean if meta.n_channels == 1 169 | else np.expand_dims(meta.mean, (1,2))) 170 | 171 | pred = np.array([0.]*field.shape[0]) 172 | norm = 0. 173 | for pt in cluster.points: 174 | if meta.is_scalar: # scalar field 175 | val = field[:, pt.y, pt.x] 176 | else: # vectorial field 177 | val = np.array([pt.x, pt.y]) + field[:, pt.y, pt.x] 178 | conf = ( 179 | conf_field[0, pt.y, pt.x] if meta.attribute != 'confidence' 180 | else 1. 181 | ) 182 | pred += val * conf 183 | norm += conf 184 | pred = pred / norm if norm != 0. else 0. 185 | 186 | if meta.is_spatial: 187 | pred *= meta.stride 188 | if meta.n_channels == 1: 189 | if meta.is_classification: 190 | pred = 1. / (1. + np.exp(-pred)) 191 | pred = pred[0] 192 | else: 193 | if meta.is_classification: 194 | pred = softmax(pred) 195 | pred = pred.tolist() 196 | 197 | return pred 198 | -------------------------------------------------------------------------------- /openpifpaf_detection_attributes/models/mtlfields/head.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import logging 3 | import math 4 | 5 | import openpifpaf 6 | import torch 7 | 8 | from ...datasets import headmeta 9 | 10 | 11 | LOG = logging.getLogger(__name__) 12 | 13 | 14 | class AttributeField(openpifpaf.network.heads.HeadNetwork): 15 | """Pediction head network for attributes. 16 | 17 | Args: 18 | meta (AttributeMeta): Meta information on attribute to predict. 19 | in_features (int): Number of features as input to the head network. 20 | """ 21 | 22 | # Convolutions 23 | detection_bias_prior = None 24 | 25 | 26 | def __init__(self, 27 | meta: headmeta.AttributeMeta, 28 | in_features: int): 29 | super().__init__(meta, in_features) 30 | 31 | LOG.debug('%s config: dataset %s, attribute %s', 32 | meta.name, meta.dataset, meta.attribute) 33 | 34 | # Convolutions 35 | out_features = meta.n_channels * meta.upsample_stride**2 36 | self.conv = torch.nn.Conv2d(in_features, out_features, 37 | kernel_size=1, padding=0, dilation=1) 38 | if ( 39 | (self.detection_bias_prior is not None) 40 | and (meta.attribute == 'confidence') 41 | ): 42 | assert ( 43 | (self.detection_bias_prior > 0.) 44 | and (self.detection_bias_prior < 1.) 45 | ) 46 | self.conv.bias.data.fill_(-math.log( 47 | (1. - self.detection_bias_prior) / self.detection_bias_prior)) 48 | 49 | # Upsampling 50 | assert meta.upsample_stride >= 1 51 | self.upsample_op = None 52 | if meta.upsample_stride > 1: 53 | self.upsample_op = torch.nn.PixelShuffle(meta.upsample_stride) 54 | 55 | 56 | @classmethod 57 | def cli(cls, parser: argparse.ArgumentParser): 58 | group = parser.add_argument_group('AttributeField') 59 | 60 | # Convolutions 61 | group.add_argument('--detection-bias-prior', 62 | default=cls.detection_bias_prior, type=float, 63 | help='prior bias for detection') 64 | 65 | 66 | @classmethod 67 | def configure(cls, args: argparse.Namespace): 68 | # Convolutions 69 | cls.detection_bias_prior = args.detection_bias_prior 70 | 71 | 72 | def forward(self, x): 73 | if isinstance(x, (list, tuple)): 74 | x = x[self.meta.head_index] 75 | x = self.conv(x) 76 | 77 | # Upsampling 78 | if self.upsample_op is not None: 79 | x = self.upsample_op(x) 80 | low_cut = (self.meta.upsample_stride - 1) // 2 81 | high_cut = math.ceil((self.meta.upsample_stride - 1) / 2.0) 82 | if self.training: 83 | # Negative axes not supported by ONNX TensorRT 84 | x = x[:, :, low_cut:-high_cut, low_cut:-high_cut] 85 | else: 86 | # The int() forces the tracer to use static shape 87 | x = x[:, :, 88 | low_cut:int(x.shape[2]) - high_cut, 89 | low_cut:int(x.shape[3]) - high_cut] 90 | 91 | return x 92 | -------------------------------------------------------------------------------- /openpifpaf_detection_attributes/models/mtlfields/loss.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import logging 3 | 4 | import torch 5 | 6 | from ...datasets import headmeta 7 | 8 | 9 | LOG = logging.getLogger(__name__) 10 | 11 | 12 | class AttributeLoss(torch.nn.Module): 13 | """Loss function for attribute fields. 14 | 15 | Args: 16 | head_meta (AttributeMeta): Meta information on attribute to predict. 17 | """ 18 | 19 | regression_loss = 'l1' 20 | focal_gamma = 0.0 21 | 22 | 23 | def __init__(self, head_meta: headmeta.AttributeMeta): 24 | super().__init__() 25 | self.meta = head_meta 26 | self.field_names = ['{}.{}'.format(head_meta.dataset, 27 | head_meta.name)] 28 | self.previous_loss = None 29 | 30 | LOG.debug('attribute loss for %s: %s, %d channels', 31 | self.meta.attribute, 32 | ('classification' if self.meta.is_classification 33 | else 'regression'), 34 | self.meta.n_channels) 35 | 36 | 37 | @property 38 | def loss_function(self): 39 | if self.meta.is_classification: 40 | if self.meta.n_channels == 1: 41 | return torch.nn.BCEWithLogitsLoss(reduction='none') 42 | elif self.meta.n_channels > 1: 43 | loss_module = torch.nn.CrossEntropyLoss(reduction='none') 44 | return lambda x, t: loss_module( 45 | x, t.to(torch.long).squeeze(1)).unsqueeze(1) 46 | else: 47 | raise Exception('error in attribute classification format:' 48 | ' size {}'.format(self.meta.n_channels)) 49 | else: 50 | if self.regression_loss == 'l1': 51 | return torch.nn.L1Loss(reduction='none') 52 | elif self.regression_loss == 'l2': 53 | return torch.nn.MSELoss(reduction='none') 54 | elif self.regression_loss == 'smoothl1': 55 | return torch.nn.SmoothL1Loss(reduction='none') 56 | else: 57 | raise Exception('unknown attribute regression loss type {}' 58 | ''.format(self.regression_loss)) 59 | 60 | 61 | @classmethod 62 | def cli(cls, parser: argparse.ArgumentParser): 63 | group = parser.add_argument_group('AttributeLoss') 64 | group.add_argument('--attribute-regression-loss', 65 | default=cls.regression_loss, 66 | choices=['l1', 'l2', 'smoothl1'], 67 | help='type of regression loss for attributes') 68 | group.add_argument('--attribute-focal-gamma', 69 | default=cls.focal_gamma, type=float, 70 | help='use focal loss for attributes with the given' 71 | ' gamma') 72 | 73 | 74 | @classmethod 75 | def configure(cls, args: argparse.Namespace): 76 | cls.regression_loss = args.attribute_regression_loss 77 | cls.focal_gamma = args.attribute_focal_gamma 78 | 79 | 80 | def forward(self, *args): 81 | LOG.debug('loss for %s', self.field_names) 82 | 83 | x, t = args 84 | loss = self.compute_loss(x, t) 85 | 86 | if (loss is not None) and (not torch.isfinite(loss).item()): 87 | raise Exception('found a loss that is not finite: {}, prev: {}' 88 | ''.format(loss, self.previous_loss)) 89 | self.previous_loss = float(loss.item()) if loss is not None else None 90 | 91 | return [loss] 92 | 93 | 94 | def compute_loss(self, x, t): 95 | if t is None: 96 | return None 97 | 98 | c_x = x.shape[1] 99 | x = x.permute(0,2,3,1).reshape(-1, c_x) 100 | c_t = t.shape[1] 101 | t = t.permute(0,2,3,1).reshape(-1, c_t) 102 | 103 | mask = torch.isnan(t).any(1).bitwise_not_() 104 | if not torch.any(mask): 105 | return None 106 | 107 | x = x[mask, :] 108 | t = t[mask, :] 109 | loss = self.loss_function(x, t) 110 | 111 | if (self.focal_gamma != 0) and self.meta.is_classification: 112 | if self.meta.n_channels == 1: # BCE 113 | focal = torch.sigmoid(x) 114 | focal = torch.where(t < 0.5, focal, 1. - focal) 115 | else: # CE 116 | focal = torch.nn.functional.softmax(x, dim=1) 117 | focal = 1. - focal.gather(1, t.to(torch.long)) 118 | loss = loss * focal.pow(self.focal_gamma) 119 | 120 | loss = loss.mean() 121 | return loss 122 | -------------------------------------------------------------------------------- /openpifpaf_detection_attributes/models/optics.py: -------------------------------------------------------------------------------- 1 | """Adapted from https://github.com/ranandalon/mtl/blob/master/src/OPTICS.py 2 | 3 | BSD 2-Clause License 4 | 5 | Copyright (c) 2019, ranandalon 6 | All rights reserved. 7 | 8 | Redistribution and use in source and binary forms, with or without 9 | modification, are permitted provided that the following conditions are met: 10 | 11 | * Redistributions of source code must retain the above copyright notice, this 12 | list of conditions and the following disclaimer. 13 | 14 | * Redistributions in binary form must reproduce the above copyright notice, 15 | this list of conditions and the following disclaimer in the documentation 16 | and/or other materials provided with the distribution. 17 | 18 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 22 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | """ 29 | 30 | 31 | import numpy as np 32 | 33 | 34 | class Point(): 35 | def __init__(self, x, y, vx, vy): 36 | self.x = x 37 | self.y = y 38 | self.vx = vx 39 | self.vy = vy 40 | self.cx = x + vx 41 | self.cy = y + vy 42 | self.cd = None # core distance 43 | self.rd = None # reachability distance 44 | self.processed = False 45 | 46 | 47 | def distance(self, point): 48 | return np.sqrt((point.cx - self.cx)**2 + (point.cy - self.cy)**2) 49 | 50 | 51 | class Cluster: 52 | def __init__(self, points): 53 | self.points = points 54 | 55 | 56 | def centroid(self): 57 | center = [sum([p.cx for p in self.points]) / len(self.points), 58 | sum([p.cy for p in self.points]) / len(self.points)] 59 | return center 60 | 61 | 62 | class Optics(): 63 | def __init__(self, pts_list, min_cluster_size, epsilon): 64 | self.pts = pts_list 65 | self.min_cluster_size = min_cluster_size 66 | self.max_radius = epsilon 67 | 68 | 69 | def _setup(self): 70 | for p in self.pts: 71 | p.rd = None 72 | p.processed = False 73 | self.unprocessed = [p for p in self.pts] 74 | self.ordered = [] 75 | 76 | 77 | def _core_distance(self, point, neighbors): 78 | if point.cd is not None: 79 | return point.cd 80 | if len(neighbors) >= self.min_cluster_size - 1: 81 | sorted_neighbors = sorted([n.distance(point) for n in neighbors]) 82 | point.cd = sorted_neighbors[self.min_cluster_size - 2] 83 | return point.cd 84 | 85 | 86 | def _neighbors(self, point): 87 | return [p for p in self.pts 88 | if (p is not point) and (p.distance(point) <= self.max_radius)] 89 | 90 | 91 | def _processed(self, point): 92 | point.processed = True 93 | self.unprocessed.remove(point) 94 | self.ordered.append(point) 95 | 96 | 97 | def _update(self, neighbors, point, seeds): 98 | for n in neighbors: 99 | if not n.processed: 100 | new_rd = max(point.cd, point.distance(n)) 101 | if n.rd is None: 102 | n.rd = new_rd 103 | seeds.append(n) 104 | elif new_rd < n.rd: 105 | n.rd = new_rd 106 | 107 | 108 | def run(self): 109 | self._setup() 110 | while self.unprocessed: 111 | point = self.unprocessed[0] 112 | self._processed(point) 113 | point_neighbors = self._neighbors(point) 114 | if self._core_distance(point, point_neighbors) is not None: 115 | seeds = [] 116 | self._update(point_neighbors, point, seeds) 117 | while (seeds): 118 | seeds.sort(key=lambda n: n.rd) 119 | n = seeds.pop(0) 120 | self._processed(n) 121 | n_neighbors = self._neighbors(n) 122 | if self._core_distance(n, n_neighbors) is not None: 123 | self._update(n_neighbors, n, seeds) 124 | return self.ordered 125 | 126 | 127 | def cluster(self, cluster_threshold): 128 | clusters = [] 129 | separators = [] 130 | for i in range(len(self.ordered)): 131 | this_i = i 132 | next_i = i + 1 133 | this_p = self.ordered[i] 134 | if this_p.rd is not None: 135 | this_rd = this_p.rd 136 | else: 137 | this_rd = float('infinity') 138 | if this_rd > cluster_threshold: 139 | separators.append(this_i) 140 | separators.append(len(self.ordered)) 141 | 142 | for i in range(len(separators) - 1): 143 | start = separators[i] 144 | end = separators[i + 1] 145 | if end - start >= self.min_cluster_size: 146 | clusters.append(Cluster(self.ordered[start:end])) 147 | return clusters 148 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | numpy 2 | cython 3 | matplotlib 4 | scikit-learn 5 | opencv-python 6 | torch 7 | torchvision 8 | openpifpaf>=0.13.0 9 | --------------------------------------------------------------------------------