├── LICENSE ├── README.md ├── c2_processing.py ├── detect.py ├── fts_processing.py ├── main.py ├── requirements.txt ├── test.py ├── track_quality.py ├── tracker.py ├── train.py └── visualize.py /LICENSE: -------------------------------------------------------------------------------- 1 | GNU GENERAL PUBLIC LICENSE 2 | Version 3, 29 June 2007 3 | 4 | Copyright (C) 2007 Free Software Foundation, Inc. 5 | Everyone is permitted to copy and distribute verbatim copies 6 | of this license document, but changing it is not allowed. 7 | 8 | Preamble 9 | 10 | The GNU General Public License is a free, copyleft license for 11 | software and other kinds of works. 12 | 13 | The licenses for most software and other practical works are designed 14 | to take away your freedom to share and change the works. By contrast, 15 | the GNU General Public License is intended to guarantee your freedom to 16 | share and change all versions of a program--to make sure it remains free 17 | software for all its users. We, the Free Software Foundation, use the 18 | GNU General Public License for most of our software; it applies also to 19 | any other work released this way by its authors. You can apply it to 20 | your programs, too. 21 | 22 | When we speak of free software, we are referring to freedom, not 23 | price. Our General Public Licenses are designed to make sure that you 24 | have the freedom to distribute copies of free software (and charge for 25 | them if you wish), that you receive source code or can get it if you 26 | want it, that you can change the software or use pieces of it in new 27 | free programs, and that you know you can do these things. 28 | 29 | To protect your rights, we need to prevent others from denying you 30 | these rights or asking you to surrender the rights. Therefore, you have 31 | certain responsibilities if you distribute copies of the software, or if 32 | you modify it: responsibilities to respect the freedom of others. 33 | 34 | For example, if you distribute copies of such a program, whether 35 | gratis or for a fee, you must pass on to the recipients the same 36 | freedoms that you received. You must make sure that they, too, receive 37 | or can get the source code. And you must show them these terms so they 38 | know their rights. 39 | 40 | Developers that use the GNU GPL protect your rights with two steps: 41 | (1) assert copyright on the software, and (2) offer you this License 42 | giving you legal permission to copy, distribute and/or modify it. 43 | 44 | For the developers' and authors' protection, the GPL clearly explains 45 | that there is no warranty for this free software. For both users' and 46 | authors' sake, the GPL requires that modified versions be marked as 47 | changed, so that their problems will not be attributed erroneously to 48 | authors of previous versions. 49 | 50 | Some devices are designed to deny users access to install or run 51 | modified versions of the software inside them, although the manufacturer 52 | can do so. This is fundamentally incompatible with the aim of 53 | protecting users' freedom to change the software. The systematic 54 | pattern of such abuse occurs in the area of products for individuals to 55 | use, which is precisely where it is most unacceptable. Therefore, we 56 | have designed this version of the GPL to prohibit the practice for those 57 | products. If such problems arise substantially in other domains, we 58 | stand ready to extend this provision to those domains in future versions 59 | of the GPL, as needed to protect the freedom of users. 60 | 61 | Finally, every program is threatened constantly by software patents. 62 | States should not allow patents to restrict development and use of 63 | software on general-purpose computers, but in those that do, we wish to 64 | avoid the special danger that patents applied to a free program could 65 | make it effectively proprietary. To prevent this, the GPL assures that 66 | patents cannot be used to render the program non-free. 67 | 68 | The precise terms and conditions for copying, distribution and 69 | modification follow. 70 | 71 | TERMS AND CONDITIONS 72 | 73 | 0. Definitions. 74 | 75 | "This License" refers to version 3 of the GNU General Public License. 76 | 77 | "Copyright" also means copyright-like laws that apply to other kinds of 78 | works, such as semiconductor masks. 79 | 80 | "The Program" refers to any copyrightable work licensed under this 81 | License. Each licensee is addressed as "you". "Licensees" and 82 | "recipients" may be individuals or organizations. 83 | 84 | To "modify" a work means to copy from or adapt all or part of the work 85 | in a fashion requiring copyright permission, other than the making of an 86 | exact copy. The resulting work is called a "modified version" of the 87 | earlier work or a work "based on" the earlier work. 88 | 89 | A "covered work" means either the unmodified Program or a work based 90 | on the Program. 91 | 92 | To "propagate" a work means to do anything with it that, without 93 | permission, would make you directly or secondarily liable for 94 | infringement under applicable copyright law, except executing it on a 95 | computer or modifying a private copy. Propagation includes copying, 96 | distribution (with or without modification), making available to the 97 | public, and in some countries other activities as well. 98 | 99 | To "convey" a work means any kind of propagation that enables other 100 | parties to make or receive copies. Mere interaction with a user through 101 | a computer network, with no transfer of a copy, is not conveying. 102 | 103 | An interactive user interface displays "Appropriate Legal Notices" 104 | to the extent that it includes a convenient and prominently visible 105 | feature that (1) displays an appropriate copyright notice, and (2) 106 | tells the user that there is no warranty for the work (except to the 107 | extent that warranties are provided), that licensees may convey the 108 | work under this License, and how to view a copy of this License. If 109 | the interface presents a list of user commands or options, such as a 110 | menu, a prominent item in the list meets this criterion. 111 | 112 | 1. Source Code. 113 | 114 | The "source code" for a work means the preferred form of the work 115 | for making modifications to it. "Object code" means any non-source 116 | form of a work. 117 | 118 | A "Standard Interface" means an interface that either is an official 119 | standard defined by a recognized standards body, or, in the case of 120 | interfaces specified for a particular programming language, one that 121 | is widely used among developers working in that language. 122 | 123 | The "System Libraries" of an executable work include anything, other 124 | than the work as a whole, that (a) is included in the normal form of 125 | packaging a Major Component, but which is not part of that Major 126 | Component, and (b) serves only to enable use of the work with that 127 | Major Component, or to implement a Standard Interface for which an 128 | implementation is available to the public in source code form. A 129 | "Major Component", in this context, means a major essential component 130 | (kernel, window system, and so on) of the specific operating system 131 | (if any) on which the executable work runs, or a compiler used to 132 | produce the work, or an object code interpreter used to run it. 133 | 134 | The "Corresponding Source" for a work in object code form means all 135 | the source code needed to generate, install, and (for an executable 136 | work) run the object code and to modify the work, including scripts to 137 | control those activities. However, it does not include the work's 138 | System Libraries, or general-purpose tools or generally available free 139 | programs which are used unmodified in performing those activities but 140 | which are not part of the work. For example, Corresponding Source 141 | includes interface definition files associated with source files for 142 | the work, and the source code for shared libraries and dynamically 143 | linked subprograms that the work is specifically designed to require, 144 | such as by intimate data communication or control flow between those 145 | subprograms and other parts of the work. 146 | 147 | The Corresponding Source need not include anything that users 148 | can regenerate automatically from other parts of the Corresponding 149 | Source. 150 | 151 | The Corresponding Source for a work in source code form is that 152 | same work. 153 | 154 | 2. Basic Permissions. 155 | 156 | All rights granted under this License are granted for the term of 157 | copyright on the Program, and are irrevocable provided the stated 158 | conditions are met. This License explicitly affirms your unlimited 159 | permission to run the unmodified Program. The output from running a 160 | covered work is covered by this License only if the output, given its 161 | content, constitutes a covered work. This License acknowledges your 162 | rights of fair use or other equivalent, as provided by copyright law. 163 | 164 | You may make, run and propagate covered works that you do not 165 | convey, without conditions so long as your license otherwise remains 166 | in force. You may convey covered works to others for the sole purpose 167 | of having them make modifications exclusively for you, or provide you 168 | with facilities for running those works, provided that you comply with 169 | the terms of this License in conveying all material for which you do 170 | not control copyright. Those thus making or running the covered works 171 | for you must do so exclusively on your behalf, under your direction 172 | and control, on terms that prohibit them from making any copies of 173 | your copyrighted material outside their relationship with you. 174 | 175 | Conveying under any other circumstances is permitted solely under 176 | the conditions stated below. Sublicensing is not allowed; section 10 177 | makes it unnecessary. 178 | 179 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law. 180 | 181 | No covered work shall be deemed part of an effective technological 182 | measure under any applicable law fulfilling obligations under article 183 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or 184 | similar laws prohibiting or restricting circumvention of such 185 | measures. 186 | 187 | When you convey a covered work, you waive any legal power to forbid 188 | circumvention of technological measures to the extent such circumvention 189 | is effected by exercising rights under this License with respect to 190 | the covered work, and you disclaim any intention to limit operation or 191 | modification of the work as a means of enforcing, against the work's 192 | users, your or third parties' legal rights to forbid circumvention of 193 | technological measures. 194 | 195 | 4. Conveying Verbatim Copies. 196 | 197 | You may convey verbatim copies of the Program's source code as you 198 | receive it, in any medium, provided that you conspicuously and 199 | appropriately publish on each copy an appropriate copyright notice; 200 | keep intact all notices stating that this License and any 201 | non-permissive terms added in accord with section 7 apply to the code; 202 | keep intact all notices of the absence of any warranty; and give all 203 | recipients a copy of this License along with the Program. 204 | 205 | You may charge any price or no price for each copy that you convey, 206 | and you may offer support or warranty protection for a fee. 207 | 208 | 5. Conveying Modified Source Versions. 209 | 210 | You may convey a work based on the Program, or the modifications to 211 | produce it from the Program, in the form of source code under the 212 | terms of section 4, provided that you also meet all of these conditions: 213 | 214 | a) The work must carry prominent notices stating that you modified 215 | it, and giving a relevant date. 216 | 217 | b) The work must carry prominent notices stating that it is 218 | released under this License and any conditions added under section 219 | 7. This requirement modifies the requirement in section 4 to 220 | "keep intact all notices". 221 | 222 | c) You must license the entire work, as a whole, under this 223 | License to anyone who comes into possession of a copy. This 224 | License will therefore apply, along with any applicable section 7 225 | additional terms, to the whole of the work, and all its parts, 226 | regardless of how they are packaged. This License gives no 227 | permission to license the work in any other way, but it does not 228 | invalidate such permission if you have separately received it. 229 | 230 | d) If the work has interactive user interfaces, each must display 231 | Appropriate Legal Notices; however, if the Program has interactive 232 | interfaces that do not display Appropriate Legal Notices, your 233 | work need not make them do so. 234 | 235 | A compilation of a covered work with other separate and independent 236 | works, which are not by their nature extensions of the covered work, 237 | and which are not combined with it such as to form a larger program, 238 | in or on a volume of a storage or distribution medium, is called an 239 | "aggregate" if the compilation and its resulting copyright are not 240 | used to limit the access or legal rights of the compilation's users 241 | beyond what the individual works permit. Inclusion of a covered work 242 | in an aggregate does not cause this License to apply to the other 243 | parts of the aggregate. 244 | 245 | 6. Conveying Non-Source Forms. 246 | 247 | You may convey a covered work in object code form under the terms 248 | of sections 4 and 5, provided that you also convey the 249 | machine-readable Corresponding Source under the terms of this License, 250 | in one of these ways: 251 | 252 | a) Convey the object code in, or embodied in, a physical product 253 | (including a physical distribution medium), accompanied by the 254 | Corresponding Source fixed on a durable physical medium 255 | customarily used for software interchange. 256 | 257 | b) Convey the object code in, or embodied in, a physical product 258 | (including a physical distribution medium), accompanied by a 259 | written offer, valid for at least three years and valid for as 260 | long as you offer spare parts or customer support for that product 261 | model, to give anyone who possesses the object code either (1) a 262 | copy of the Corresponding Source for all the software in the 263 | product that is covered by this License, on a durable physical 264 | medium customarily used for software interchange, for a price no 265 | more than your reasonable cost of physically performing this 266 | conveying of source, or (2) access to copy the 267 | Corresponding Source from a network server at no charge. 268 | 269 | c) Convey individual copies of the object code with a copy of the 270 | written offer to provide the Corresponding Source. This 271 | alternative is allowed only occasionally and noncommercially, and 272 | only if you received the object code with such an offer, in accord 273 | with subsection 6b. 274 | 275 | d) Convey the object code by offering access from a designated 276 | place (gratis or for a charge), and offer equivalent access to the 277 | Corresponding Source in the same way through the same place at no 278 | further charge. You need not require recipients to copy the 279 | Corresponding Source along with the object code. If the place to 280 | copy the object code is a network server, the Corresponding Source 281 | may be on a different server (operated by you or a third party) 282 | that supports equivalent copying facilities, provided you maintain 283 | clear directions next to the object code saying where to find the 284 | Corresponding Source. Regardless of what server hosts the 285 | Corresponding Source, you remain obligated to ensure that it is 286 | available for as long as needed to satisfy these requirements. 287 | 288 | e) Convey the object code using peer-to-peer transmission, provided 289 | you inform other peers where the object code and Corresponding 290 | Source of the work are being offered to the general public at no 291 | charge under subsection 6d. 292 | 293 | A separable portion of the object code, whose source code is excluded 294 | from the Corresponding Source as a System Library, need not be 295 | included in conveying the object code work. 296 | 297 | A "User Product" is either (1) a "consumer product", which means any 298 | tangible personal property which is normally used for personal, family, 299 | or household purposes, or (2) anything designed or sold for incorporation 300 | into a dwelling. In determining whether a product is a consumer product, 301 | doubtful cases shall be resolved in favor of coverage. For a particular 302 | product received by a particular user, "normally used" refers to a 303 | typical or common use of that class of product, regardless of the status 304 | of the particular user or of the way in which the particular user 305 | actually uses, or expects or is expected to use, the product. A product 306 | is a consumer product regardless of whether the product has substantial 307 | commercial, industrial or non-consumer uses, unless such uses represent 308 | the only significant mode of use of the product. 309 | 310 | "Installation Information" for a User Product means any methods, 311 | procedures, authorization keys, or other information required to install 312 | and execute modified versions of a covered work in that User Product from 313 | a modified version of its Corresponding Source. The information must 314 | suffice to ensure that the continued functioning of the modified object 315 | code is in no case prevented or interfered with solely because 316 | modification has been made. 317 | 318 | If you convey an object code work under this section in, or with, or 319 | specifically for use in, a User Product, and the conveying occurs as 320 | part of a transaction in which the right of possession and use of the 321 | User Product is transferred to the recipient in perpetuity or for a 322 | fixed term (regardless of how the transaction is characterized), the 323 | Corresponding Source conveyed under this section must be accompanied 324 | by the Installation Information. But this requirement does not apply 325 | if neither you nor any third party retains the ability to install 326 | modified object code on the User Product (for example, the work has 327 | been installed in ROM). 328 | 329 | The requirement to provide Installation Information does not include a 330 | requirement to continue to provide support service, warranty, or updates 331 | for a work that has been modified or installed by the recipient, or for 332 | the User Product in which it has been modified or installed. Access to a 333 | network may be denied when the modification itself materially and 334 | adversely affects the operation of the network or violates the rules and 335 | protocols for communication across the network. 336 | 337 | Corresponding Source conveyed, and Installation Information provided, 338 | in accord with this section must be in a format that is publicly 339 | documented (and with an implementation available to the public in 340 | source code form), and must require no special password or key for 341 | unpacking, reading or copying. 342 | 343 | 7. Additional Terms. 344 | 345 | "Additional permissions" are terms that supplement the terms of this 346 | License by making exceptions from one or more of its conditions. 347 | Additional permissions that are applicable to the entire Program shall 348 | be treated as though they were included in this License, to the extent 349 | that they are valid under applicable law. If additional permissions 350 | apply only to part of the Program, that part may be used separately 351 | under those permissions, but the entire Program remains governed by 352 | this License without regard to the additional permissions. 353 | 354 | When you convey a copy of a covered work, you may at your option 355 | remove any additional permissions from that copy, or from any part of 356 | it. (Additional permissions may be written to require their own 357 | removal in certain cases when you modify the work.) You may place 358 | additional permissions on material, added by you to a covered work, 359 | for which you have or can give appropriate copyright permission. 360 | 361 | Notwithstanding any other provision of this License, for material you 362 | add to a covered work, you may (if authorized by the copyright holders of 363 | that material) supplement the terms of this License with terms: 364 | 365 | a) Disclaiming warranty or limiting liability differently from the 366 | terms of sections 15 and 16 of this License; or 367 | 368 | b) Requiring preservation of specified reasonable legal notices or 369 | author attributions in that material or in the Appropriate Legal 370 | Notices displayed by works containing it; or 371 | 372 | c) Prohibiting misrepresentation of the origin of that material, or 373 | requiring that modified versions of such material be marked in 374 | reasonable ways as different from the original version; or 375 | 376 | d) Limiting the use for publicity purposes of names of licensors or 377 | authors of the material; or 378 | 379 | e) Declining to grant rights under trademark law for use of some 380 | trade names, trademarks, or service marks; or 381 | 382 | f) Requiring indemnification of licensors and authors of that 383 | material by anyone who conveys the material (or modified versions of 384 | it) with contractual assumptions of liability to the recipient, for 385 | any liability that these contractual assumptions directly impose on 386 | those licensors and authors. 387 | 388 | All other non-permissive additional terms are considered "further 389 | restrictions" within the meaning of section 10. If the Program as you 390 | received it, or any part of it, contains a notice stating that it is 391 | governed by this License along with a term that is a further 392 | restriction, you may remove that term. If a license document contains 393 | a further restriction but permits relicensing or conveying under this 394 | License, you may add to a covered work material governed by the terms 395 | of that license document, provided that the further restriction does 396 | not survive such relicensing or conveying. 397 | 398 | If you add terms to a covered work in accord with this section, you 399 | must place, in the relevant source files, a statement of the 400 | additional terms that apply to those files, or a notice indicating 401 | where to find the applicable terms. 402 | 403 | Additional terms, permissive or non-permissive, may be stated in the 404 | form of a separately written license, or stated as exceptions; 405 | the above requirements apply either way. 406 | 407 | 8. Termination. 408 | 409 | You may not propagate or modify a covered work except as expressly 410 | provided under this License. Any attempt otherwise to propagate or 411 | modify it is void, and will automatically terminate your rights under 412 | this License (including any patent licenses granted under the third 413 | paragraph of section 11). 414 | 415 | However, if you cease all violation of this License, then your 416 | license from a particular copyright holder is reinstated (a) 417 | provisionally, unless and until the copyright holder explicitly and 418 | finally terminates your license, and (b) permanently, if the copyright 419 | holder fails to notify you of the violation by some reasonable means 420 | prior to 60 days after the cessation. 421 | 422 | Moreover, your license from a particular copyright holder is 423 | reinstated permanently if the copyright holder notifies you of the 424 | violation by some reasonable means, this is the first time you have 425 | received notice of violation of this License (for any work) from that 426 | copyright holder, and you cure the violation prior to 30 days after 427 | your receipt of the notice. 428 | 429 | Termination of your rights under this section does not terminate the 430 | licenses of parties who have received copies or rights from you under 431 | this License. If your rights have been terminated and not permanently 432 | reinstated, you do not qualify to receive new licenses for the same 433 | material under section 10. 434 | 435 | 9. Acceptance Not Required for Having Copies. 436 | 437 | You are not required to accept this License in order to receive or 438 | run a copy of the Program. Ancillary propagation of a covered work 439 | occurring solely as a consequence of using peer-to-peer transmission 440 | to receive a copy likewise does not require acceptance. However, 441 | nothing other than this License grants you permission to propagate or 442 | modify any covered work. These actions infringe copyright if you do 443 | not accept this License. Therefore, by modifying or propagating a 444 | covered work, you indicate your acceptance of this License to do so. 445 | 446 | 10. Automatic Licensing of Downstream Recipients. 447 | 448 | Each time you convey a covered work, the recipient automatically 449 | receives a license from the original licensors, to run, modify and 450 | propagate that work, subject to this License. You are not responsible 451 | for enforcing compliance by third parties with this License. 452 | 453 | An "entity transaction" is a transaction transferring control of an 454 | organization, or substantially all assets of one, or subdividing an 455 | organization, or merging organizations. If propagation of a covered 456 | work results from an entity transaction, each party to that 457 | transaction who receives a copy of the work also receives whatever 458 | licenses to the work the party's predecessor in interest had or could 459 | give under the previous paragraph, plus a right to possession of the 460 | Corresponding Source of the work from the predecessor in interest, if 461 | the predecessor has it or can get it with reasonable efforts. 462 | 463 | You may not impose any further restrictions on the exercise of the 464 | rights granted or affirmed under this License. For example, you may 465 | not impose a license fee, royalty, or other charge for exercise of 466 | rights granted under this License, and you may not initiate litigation 467 | (including a cross-claim or counterclaim in a lawsuit) alleging that 468 | any patent claim is infringed by making, using, selling, offering for 469 | sale, or importing the Program or any portion of it. 470 | 471 | 11. Patents. 472 | 473 | A "contributor" is a copyright holder who authorizes use under this 474 | License of the Program or a work on which the Program is based. The 475 | work thus licensed is called the contributor's "contributor version". 476 | 477 | A contributor's "essential patent claims" are all patent claims 478 | owned or controlled by the contributor, whether already acquired or 479 | hereafter acquired, that would be infringed by some manner, permitted 480 | by this License, of making, using, or selling its contributor version, 481 | but do not include claims that would be infringed only as a 482 | consequence of further modification of the contributor version. For 483 | purposes of this definition, "control" includes the right to grant 484 | patent sublicenses in a manner consistent with the requirements of 485 | this License. 486 | 487 | Each contributor grants you a non-exclusive, worldwide, royalty-free 488 | patent license under the contributor's essential patent claims, to 489 | make, use, sell, offer for sale, import and otherwise run, modify and 490 | propagate the contents of its contributor version. 491 | 492 | In the following three paragraphs, a "patent license" is any express 493 | agreement or commitment, however denominated, not to enforce a patent 494 | (such as an express permission to practice a patent or covenant not to 495 | sue for patent infringement). To "grant" such a patent license to a 496 | party means to make such an agreement or commitment not to enforce a 497 | patent against the party. 498 | 499 | If you convey a covered work, knowingly relying on a patent license, 500 | and the Corresponding Source of the work is not available for anyone 501 | to copy, free of charge and under the terms of this License, through a 502 | publicly available network server or other readily accessible means, 503 | then you must either (1) cause the Corresponding Source to be so 504 | available, or (2) arrange to deprive yourself of the benefit of the 505 | patent license for this particular work, or (3) arrange, in a manner 506 | consistent with the requirements of this License, to extend the patent 507 | license to downstream recipients. "Knowingly relying" means you have 508 | actual knowledge that, but for the patent license, your conveying the 509 | covered work in a country, or your recipient's use of the covered work 510 | in a country, would infringe one or more identifiable patents in that 511 | country that you have reason to believe are valid. 512 | 513 | If, pursuant to or in connection with a single transaction or 514 | arrangement, you convey, or propagate by procuring conveyance of, a 515 | covered work, and grant a patent license to some of the parties 516 | receiving the covered work authorizing them to use, propagate, modify 517 | or convey a specific copy of the covered work, then the patent license 518 | you grant is automatically extended to all recipients of the covered 519 | work and works based on it. 520 | 521 | A patent license is "discriminatory" if it does not include within 522 | the scope of its coverage, prohibits the exercise of, or is 523 | conditioned on the non-exercise of one or more of the rights that are 524 | specifically granted under this License. You may not convey a covered 525 | work if you are a party to an arrangement with a third party that is 526 | in the business of distributing software, under which you make payment 527 | to the third party based on the extent of your activity of conveying 528 | the work, and under which the third party grants, to any of the 529 | parties who would receive the covered work from you, a discriminatory 530 | patent license (a) in connection with copies of the covered work 531 | conveyed by you (or copies made from those copies), or (b) primarily 532 | for and in connection with specific products or compilations that 533 | contain the covered work, unless you entered into that arrangement, 534 | or that patent license was granted, prior to 28 March 2007. 535 | 536 | Nothing in this License shall be construed as excluding or limiting 537 | any implied license or other defenses to infringement that may 538 | otherwise be available to you under applicable patent law. 539 | 540 | 12. No Surrender of Others' Freedom. 541 | 542 | If conditions are imposed on you (whether by court order, agreement or 543 | otherwise) that contradict the conditions of this License, they do not 544 | excuse you from the conditions of this License. If you cannot convey a 545 | covered work so as to satisfy simultaneously your obligations under this 546 | License and any other pertinent obligations, then as a consequence you may 547 | not convey it at all. For example, if you agree to terms that obligate you 548 | to collect a royalty for further conveying from those to whom you convey 549 | the Program, the only way you could satisfy both those terms and this 550 | License would be to refrain entirely from conveying the Program. 551 | 552 | 13. Use with the GNU Affero General Public License. 553 | 554 | Notwithstanding any other provision of this License, you have 555 | permission to link or combine any covered work with a work licensed 556 | under version 3 of the GNU Affero General Public License into a single 557 | combined work, and to convey the resulting work. The terms of this 558 | License will continue to apply to the part which is the covered work, 559 | but the special requirements of the GNU Affero General Public License, 560 | section 13, concerning interaction through a network will apply to the 561 | combination as such. 562 | 563 | 14. Revised Versions of this License. 564 | 565 | The Free Software Foundation may publish revised and/or new versions of 566 | the GNU General Public License from time to time. Such new versions will 567 | be similar in spirit to the present version, but may differ in detail to 568 | address new problems or concerns. 569 | 570 | Each version is given a distinguishing version number. If the 571 | Program specifies that a certain numbered version of the GNU General 572 | Public License "or any later version" applies to it, you have the 573 | option of following the terms and conditions either of that numbered 574 | version or of any later version published by the Free Software 575 | Foundation. If the Program does not specify a version number of the 576 | GNU General Public License, you may choose any version ever published 577 | by the Free Software Foundation. 578 | 579 | If the Program specifies that a proxy can decide which future 580 | versions of the GNU General Public License can be used, that proxy's 581 | public statement of acceptance of a version permanently authorizes you 582 | to choose that version for the Program. 583 | 584 | Later license versions may give you additional or different 585 | permissions. However, no additional obligations are imposed on any 586 | author or copyright holder as a result of your choosing to follow a 587 | later version. 588 | 589 | 15. Disclaimer of Warranty. 590 | 591 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY 592 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT 593 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY 594 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, 595 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 596 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM 597 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF 598 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 599 | 600 | 16. Limitation of Liability. 601 | 602 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 603 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS 604 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY 605 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE 606 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF 607 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD 608 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), 609 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF 610 | SUCH DAMAGES. 611 | 612 | 17. Interpretation of Sections 15 and 16. 613 | 614 | If the disclaimer of warranty and limitation of liability provided 615 | above cannot be given local legal effect according to their terms, 616 | reviewing courts shall apply local law that most closely approximates 617 | an absolute waiver of all civil liability in connection with the 618 | Program, unless a warranty or assumption of liability accompanies a 619 | copy of the Program in return for a fee. 620 | 621 | END OF TERMS AND CONDITIONS 622 | 623 | How to Apply These Terms to Your New Programs 624 | 625 | If you develop a new program, and you want it to be of the greatest 626 | possible use to the public, the best way to achieve this is to make it 627 | free software which everyone can redistribute and change under these terms. 628 | 629 | To do so, attach the following notices to the program. It is safest 630 | to attach them to the start of each source file to most effectively 631 | state the exclusion of warranty; and each file should have at least 632 | the "copyright" line and a pointer to where the full notice is found. 633 | 634 | 635 | Copyright (C) 636 | 637 | This program is free software: you can redistribute it and/or modify 638 | it under the terms of the GNU General Public License as published by 639 | the Free Software Foundation, either version 3 of the License, or 640 | (at your option) any later version. 641 | 642 | This program is distributed in the hope that it will be useful, 643 | but WITHOUT ANY WARRANTY; without even the implied warranty of 644 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 645 | GNU General Public License for more details. 646 | 647 | You should have received a copy of the GNU General Public License 648 | along with this program. If not, see . 649 | 650 | Also add information on how to contact you by electronic and paper mail. 651 | 652 | If the program does terminal interaction, make it output a short 653 | notice like this when it starts in an interactive mode: 654 | 655 | Copyright (C) 656 | This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. 657 | This is free software, and you are welcome to redistribute it 658 | under certain conditions; type `show c' for details. 659 | 660 | The hypothetical commands `show w' and `show c' should show the appropriate 661 | parts of the General Public License. Of course, your program's commands 662 | might be different; for a GUI interface, you would use an "about box". 663 | 664 | You should also get your employer (if you work as a programmer) or school, 665 | if any, to sign a "copyright disclaimer" for the program, if necessary. 666 | For more information on this, and how to apply and follow the GNU GPL, see 667 | . 668 | 669 | The GNU General Public License does not permit incorporating your program 670 | into proprietary programs. If your program is a subroutine library, you 671 | may consider it more useful to permit linking proprietary applications with 672 | the library. If this is what you want to do, use the GNU Lesser General 673 | Public License instead of this License. But first, please read 674 | . 675 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # soho-comet-pipeline 2 | Automated SOHO LASCO C2 comet search 3 | 4 | ## Demo 5 | ![image](https://s2.loli.net/2022/07/27/MxfN2zrgXQDEdIc.png) 6 | *** 7 |
8 | 9 | ## Features and functions 10 | 1. Batch pre-processing 11 | 2. Comet detection 12 | 3. Sends possible comet candidate to your email address 13 |
14 | 15 | ## How to use 16 | 17 | ```python 18 | pip install -r requirements.txt 19 | 20 | python main.py 21 | ``` 22 | 23 | Note: 24 | Line 20-39 should be configured before running the script. 25 | 26 |
27 | 28 | ## Credit 29 | The majority of the detection algorithm comes from Daniel Parrott (the author of Tycho Tracker),
30 | which I have integrated and modified, and I would like to express my gratitude to Mr. Parrott. 31 | 32 | 33 | 34 | -------------------------------------------------------------------------------- /c2_processing.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Created October 2021 5 | 6 | PURPOSE: 7 | This code provides two very basic image processing algorithms that 8 | are effective for removing the excess background signal (known as the "focorona") 9 | from raw LASCO data. These are established, publicly released algorithms that are 10 | well-known to the heliophysics community. 11 | 12 | Details of the two enclosed algorithms - "median subtraction" and "running difference" 13 | are provided as notes/comments in the code below. 14 | 15 | USAGE: 16 | Place a series (3 or more) of LASCO C2 .fts files into a 'C2DATA' folder 17 | Run 18 | >>> from c2_processing import c2_process 19 | >>> c2_process( ) 20 | - or - 21 | >>> c2_process( rundiff=True ) 22 | OUTPUT: 23 | - A series of processed png files will be created in the 'C2DATA' folder 24 | 25 | *** Please note: *** 26 | There is very minimal fault-tolerance in this code. It is provided simply to 27 | illustrate the basic algorithms, but does not perform any checks for bad, missing, 28 | or incorrectly shaped data. There exist many different methods/packages/functions 29 | to achieve the same, or similar, results as shown here, with differing levels of 30 | efficiency. 31 | 32 | DEPENDENCIES / REQUIRED PACKAGES: 33 | matplotlib 34 | numpy 35 | scipy 36 | astropy 37 | datetime 38 | glob 39 | 40 | @author: Dr. Karl Battams 41 | """ 42 | 43 | import matplotlib.pyplot as plt 44 | import glob 45 | import numpy as np 46 | from astropy.io import fits 47 | from scipy.signal import medfilt2d 48 | import datetime 49 | import torch 50 | import torch.nn.functional as F 51 | import os 52 | 53 | ######################################################################## 54 | # Function to perform median subtraction and save resulting files as PNGs 55 | ######################################################################## 56 | 57 | def med_subtract( indata, dst_path, dateinfo, kern_sz = 25 , annotate_words=True): 58 | dpi = 80 # dots per inch 59 | width = 1024 # image size (assuming 1024x1024 images) 60 | height = 1024 # image size (assuming 1024x1024 images) 61 | kern = kern_sz # size of smoothing kernel <- User defined 62 | 63 | imgmin = -3. # MAX value for display <- User defined 64 | imgmax = 3. # MIN value for display <- User defined 65 | 66 | figsize = width / float(dpi), height / float(dpi) # Set output image size 67 | numfiles = indata.shape[2] # For loop counter 68 | 69 | """ ## Note about median subtraction ## 70 | The process of median subtraction uses medfilt2d to create a 'smoothed' version of the 71 | image and then subtracts that smoothed image from the original, effectively operating 72 | like a high-pass filter. The size of the smoothing kernel (default = 25) is variable. 73 | """ 74 | plt.ioff() # Turn off interactive plotting 75 | 76 | # Create images 77 | for i in range( numfiles ): 78 | outname = dst_path + dateinfo[i].strftime('%Y%m%d_%H%M_C2_medfilt.png') 79 | if os.path.exists(outname): 80 | continue 81 | print("Writing image %i of %i with median-subtraction" % (i+1,numfiles)) 82 | medsub = indata[:,:,i] - medfilt2d(indata[:,:,i], kernel_size=kern) # Apply filter; see note above 83 | # The following commands just set up a figure with no borders, and writes the image to a png. 84 | fig = plt.figure(figsize=figsize) 85 | ax = fig.add_axes([0, 0, 1, 1]) 86 | ax.axis('off') 87 | ax.imshow(np.fliplr(medsub), vmin=imgmin,vmax=imgmax,cmap='gray', interpolation='nearest',origin='lower') 88 | if annotate_words: 89 | ax.annotate(dateinfo[i].strftime('%Y/%m/%d %H:%M'), xy=(10,10), xytext=(320, 1010),color='cyan', size=30, ha='right') 90 | ax.set(xlim=[0, width], ylim=[height, 0], aspect=1) 91 | fig.savefig(outname, dpi=dpi, transparent=True) 92 | plt.close() 93 | 94 | print("Median filtering process complete.") 95 | return 1 96 | 97 | 98 | ######################################################################## 99 | # Function to perform running difference and save resulting files as PNGs 100 | ######################################################################## 101 | 102 | def rdiff( indata, dateinfo ): 103 | 104 | """ ## Note about running difference ## 105 | The process of running difference involves subtracting the previous image 106 | in a sequence from the current image. This process removes static structures 107 | but emphasizes features in motion. 108 | """ 109 | 110 | #Perform running difference. See note above. 111 | rdiff = np.diff(indata, axis=2) 112 | 113 | # truncate date information as running difference "loses" the first image 114 | dateinfo = dateinfo[1:] 115 | 116 | # Write PNGS files 117 | dpi = 80 # dots per inch 118 | width = 1024 # image size (assuming 1024x1024 images) 119 | height = 1024 # image size (assuming 1024x1024 images) 120 | imgmin = -3. # MAX value for display <- User defined 121 | imgmax = 3. # MIN value for display <- User defined 122 | 123 | figsize = width / float(dpi), height / float(dpi) # Set output image size 124 | numfiles = rdiff.shape[2] # For loop counter 125 | 126 | plt.ioff() # Turn off interactive plotting 127 | # Create images 128 | for i in range( numfiles ): 129 | print("Writing image %i of %i with running-difference processing" % (i+1,numfiles)) 130 | 131 | # The following commands just set up a figure with no borders, and writes the image to a png. 132 | fig = plt.figure(figsize=figsize) 133 | ax = fig.add_axes([0, 0, 1, 1]) 134 | ax.axis('off') 135 | ax.imshow(np.fliplr(rdiff[:,:,i]), vmin=imgmin,vmax=imgmax,cmap='gray', interpolation='nearest',origin='lower') 136 | ax.annotate(dateinfo[i].strftime('%Y/%m/%d %H:%M'), xy=(10,10), xytext=(320, 1010),color='cyan', size=30, ha='right') 137 | outname='./C2DATA/'+dateinfo[i].strftime('%Y%m%d_%H%M_C2_rdiff.png') 138 | ax.set(xlim=[0, width], ylim=[height, 0], aspect=1) 139 | fig.savefig(outname, dpi=dpi, transparent=True) 140 | plt.close() 141 | 142 | print("Running Difference process complete.") 143 | return 1 144 | 145 | ################################################################################### 146 | # MAIN (control) routine to read/prepare data and call desired processing algorithm 147 | ################################################################################### 148 | 149 | 150 | def c2_process(fts_path, dst_path, rundiff=False, annotate_words=True, batch_mode=False): 151 | 152 | os.makedirs(dst_path) if not os.path.isdir(dst_path) else None 153 | dst_path_list = os.listdir(dst_path) 154 | if not batch_mode: 155 | for file in dst_path_list: 156 | os.remove(dst_path + file) 157 | 158 | # Gather list of file names 159 | lasco_files = sorted(glob.glob('{}*.fts'.format(fts_path))) 160 | 161 | # number of files 162 | nf = len(lasco_files) 163 | 164 | # Create 3D data cube to hold data, assuming all LASCO C2 data have 165 | # array sizes of 1024x1024 pixels. 166 | data_cube = np.empty((1024,1024,nf)) 167 | 168 | # Create an empty list to hold date/time values, which are later used for output png filenames 169 | dates_times = [] 170 | 171 | for i in range(nf): 172 | # read image and header from FITS file 173 | img,hdr = fits.getdata(lasco_files[i], header=True) 174 | 175 | # Normalize by exposure time (a good practice for LASCO data) 176 | img = img.astype('float64') / hdr['EXPTIME'] 177 | 178 | if img.shape[0] == 512 and img.shape[1] == 512: 179 | img = torch.from_numpy(img) 180 | img = F.interpolate(img[None, None, :, :], size=(1024, 1024), mode='bilinear')[0, 0, :, :] 181 | img = img.numpy() 182 | 183 | data_cube[:, :, i] = img 184 | 185 | # Retrieve image date/time from header; store as datetime object 186 | dates_times.append( datetime.datetime.strptime( hdr['DATE-OBS']+' '+hdr['TIME-OBS'], '%Y/%m/%d %H:%M:%S.%f') ) 187 | print('processing med_subtract...') 188 | # Call processing routine. Defaults to median subtraction. 189 | if rundiff: 190 | _ = rdiff( data_cube, dates_times ) 191 | else: 192 | _ = med_subtract( data_cube, dst_path, dates_times , annotate_words=annotate_words) -------------------------------------------------------------------------------- /detect.py: -------------------------------------------------------------------------------- 1 | """ 2 | File: detect.py 3 | Note: This code generates detections from raw image data 4 | Date: 2022-02-26 5 | Author: D. Parrott 6 | """ 7 | 8 | import sys 9 | import pickle 10 | import os 11 | import math 12 | import random 13 | from math import sqrt 14 | from skimage import data 15 | from skimage.feature import blob_dog, blob_log, blob_doh 16 | from skimage.color import rgb2gray 17 | import numpy as np 18 | from astropy.io import fits 19 | from astropy.time import Time 20 | from scipy.signal import medfilt2d 21 | from scipy import ndimage 22 | from numba import jit 23 | from numba import njit 24 | from numba import typed 25 | from numba import types 26 | import matplotlib.pyplot as plt 27 | import cv2 28 | import faulthandler; faulthandler.enable() 29 | import multiprocessing 30 | import warnings 31 | import track_quality 32 | 33 | warnings.filterwarnings("ignore") 34 | 35 | 36 | # Define some constants. 37 | SOHO_NUM_DELTAS_AWAY=2.75 38 | SOHO_IMG_STATISTIC_OFFSET=150 39 | THRESHOLD_GRID_SIZE=32 40 | 41 | 42 | # Class definitions 43 | class MyFITSImg: 44 | pass 45 | 46 | class MyTrack: 47 | pass 48 | 49 | class MyDetect: 50 | pass 51 | 52 | class MySeq: 53 | pass 54 | 55 | # ComputeImgMedian 56 | # Input: 57 | # img: A given image to be evaluated 58 | # width: Image width in pixels 59 | # height: Image height in pixels 60 | # Output: 61 | # Image background level 62 | def ComputeImgMedian(img,width,height): 63 | off = SOHO_IMG_STATISTIC_OFFSET 64 | listValues=[] 65 | j = off 66 | while (j=width or q<0 or q>=height): 170 | a=a+1 171 | continue 172 | 173 | v1 = img[q][p] 174 | 175 | if (v1 > pk_val): 176 | pk_val=v1 177 | 178 | sum += v1 179 | a=a+1 180 | 181 | b=b+1 182 | sum -= pk_val 183 | 184 | if (sum < 0): 185 | sum = 0 186 | 187 | if (sum > 65535): 188 | sum = 65535 189 | 190 | out[j][i] = sum 191 | 192 | i = i + 1 193 | 194 | j = j + 1 195 | return out 196 | 197 | # ComputeStatistics 198 | # Input: 199 | # listValues: A list of values 200 | # Output: 201 | # val_25: The 25% order statistic 202 | # val_50: The 50% order statistic 203 | # val_75: The 75% order statistic 204 | #@jit(nopython=True) # Set "nopython" mode for best performance, equivalent to @njit 205 | #@jit(complex128[:](float64,float64[:],float64)) 206 | #@jit('Tuple((int64,int64,int64))(int64[:])',nopython=True) 207 | #@njit() 208 | @jit(nopython=True) 209 | def ComputeStatistics(listValues): 210 | 211 | val_25 = 0 212 | val_50 = 0 213 | val_75 = 0 214 | 215 | # numba does not work with numpy sort 216 | #listValues = np.sort(listValues) 217 | listValues.sort() 218 | slen = len(listValues) 219 | 220 | idx_25 = int(0.25 * slen) 221 | idx_50 = int(0.50 * slen) 222 | idx_75 = int(0.75 * slen) 223 | 224 | for i in range(slen): 225 | 226 | if (idx_25 == i): 227 | val_25 = listValues[i] 228 | 229 | if (idx_50 == i): 230 | val_50 = listValues[i] 231 | 232 | if (idx_75 == i): 233 | val_75 = listValues[i] 234 | 235 | return (val_25, val_50, val_75) 236 | 237 | # CreateThresholdMap 238 | # Input: 239 | # img: A given image to be evaluated 240 | # width: Image width in pixels 241 | # height: Image height in pixels 242 | # grid_size: Size of grid cells, in pixels 243 | # Output: 244 | # A new image, with pixels set to respective threshold values 245 | @jit(nopython=True) 246 | def CreateThresholdMap(img, width, height, grid_size): 247 | 248 | # Create a new image, initialized to 0 249 | out = np.full_like(img, 0) 250 | 251 | j=0 252 | while (j=width or q<0 or q>=height): 268 | a = a + 1 269 | continue 270 | 271 | v1 = img[q][p] 272 | listValues.append(v1) 273 | 274 | a = a + 1 275 | 276 | b = b + 1 277 | 278 | stats = ComputeStatistics(listValues) 279 | fPct25 = stats[0] 280 | fPct50 = stats[1] 281 | fPct75 = stats[2] 282 | 283 | fDelta = fPct75 - fPct25 284 | fThreshold = fPct50 + SOHO_NUM_DELTAS_AWAY * (fDelta) 285 | 286 | nThreshold = int(fThreshold + 0.5) 287 | 288 | if (nThreshold < 0): 289 | nThreshold = 0 290 | 291 | if (nThreshold > 65535): 292 | nThreshold = 65535 293 | 294 | b=0 295 | while (b=width or q<0 or q>=height): 303 | a = a + 1 304 | continue 305 | 306 | out[q][p] = nThreshold 307 | 308 | a = a + 1 309 | 310 | b = b + 1 311 | 312 | i = i + grid_size 313 | 314 | j = j + grid_size 315 | return out 316 | 317 | # InterpolateMap 318 | # Input: 319 | # img: A given image to evaluate 320 | # width: Image width in pixels 321 | # height: Image height in pixels 322 | # grid_size: Size of grid cells, in pixels 323 | # Output: 324 | # A modified threshold map, selecting maximum threshold from adjacent cells 325 | @jit(nopython=True) 326 | def InterpolateMap(img, width, height, grid_size): 327 | 328 | # Create a new image, initialized to 0 329 | out = np.full_like(img, 0) 330 | 331 | j=0 332 | while (j=width or q<0 or q>=height): 355 | a = a + 1 356 | continue 357 | 358 | v1 = img[q][p] 359 | 360 | if (v1 > pk_val): 361 | pk_val=v1 362 | 363 | a = a+1 364 | 365 | b = b+1 366 | 367 | out[j][i] = pk_val 368 | i = i+1 369 | j = j+1 370 | 371 | return out 372 | 373 | # FilterSinglePixels 374 | # Input: 375 | # img: A given image to be evaluated 376 | # width: Image width in pixels 377 | # height: Image height in pixels 378 | # Modifies: 379 | # Image pixels are set to 0 if there are no adjacent pixels with non-zero values 380 | @jit(nopython=True) 381 | def FilterSinglePixels(img,width,height): 382 | 383 | j=0 384 | while (j=width or q<0 or q>=height): 397 | a = a + 1 398 | continue 399 | 400 | v1 = img[q][p] 401 | 402 | if (v1 > 0): 403 | nCount = nCount + 1 404 | 405 | a = a+1 406 | 407 | b=b+1 408 | 409 | if (nCount < 2): 410 | img[j][i] = 0 411 | 412 | i = i+1 413 | j = j+1 414 | 415 | # GetBounds 416 | # Input: 417 | # img: A given image to be evaluated 418 | # width: Image width in pixels 419 | # height: Image height in pixels 420 | # i: Current x-coordinate 421 | # j: Current y-coordinate 422 | # visited: A 2d array indicating which pixels have been visited 423 | # res: A result structure comprised of counts, and the upper-left and lower-right corner locations 424 | # Output: 425 | # res: A result structure comprised of counts, and the upper-left and lower-right corner locations 426 | @jit(nopython=True) 427 | def GetBounds(img, width, height, i, j, visited, res): 428 | 429 | if (i<0 or i>=width or j<0 or j>=height): 430 | return res 431 | 432 | if (img[j][i] <= 0): 433 | return res 434 | 435 | if (visited[j][i] > 0): 436 | return res 437 | 438 | # Mark the pixel as having been visited 439 | visited[j][i] = 1 440 | 441 | # Increment the count of consolidated pixels 442 | res[0] = res[0] + 1 443 | 444 | if (i < res[1]): 445 | res[1] = i 446 | if (i > res[3]): 447 | res[3] = i 448 | if (j < res[2]): 449 | res[2] = j 450 | if (j > res[4]): 451 | res[4] = j 452 | 453 | res = GetBounds(img, width, height, i-1, j-1, visited, res) 454 | res = GetBounds(img, width, height, i+1, j-1, visited, res) 455 | res = GetBounds(img, width, height, i+1, j+1, visited, res) 456 | res = GetBounds(img, width, height, i-1, j+1, visited, res) 457 | 458 | res = GetBounds(img, width, height, i+0, j-1, visited, res) 459 | res = GetBounds(img, width, height, i+1, j+0, visited, res) 460 | res = GetBounds(img, width, height, i+0, j+1, visited, res) 461 | res = GetBounds(img, width, height, i-1, j+0, visited, res) 462 | 463 | return res 464 | 465 | # ComputeCentroid 466 | # Input: 467 | # img: A given image to be evaluated 468 | # width: Image width in pixels 469 | # height: Image height in pixels 470 | # x1: The X-coordinate of the upper-left bounds of the object 471 | # y1: The Y-coordinate of the upper-left bounds of the object 472 | # x2: The X-coordinate of the lower-right bounds of the object 473 | # y2: The Y-coordinate of the lower-right bounds of the object 474 | # Output: 475 | # nCtrX: The X-coordinate of the object center 476 | # nCtrY: The Y-coordinate of the object center 477 | @jit(nopython=True) 478 | def ComputeCentroid(img, width, height, x1, y1, x2, y2): 479 | 480 | fCountX = 0; 481 | fCountY = 0; 482 | fSumX = 0; 483 | fSumY = 0; 484 | 485 | j = y1 486 | while (j<=y2): 487 | 488 | i = x1 489 | while (i <= x2): 490 | 491 | if (i<0 or i>=width or j<0 or j>=height): 492 | i = i + 1 493 | continue 494 | 495 | v1 = img[j][i] 496 | 497 | fSumX += i * v1 498 | fCountX += v1 499 | 500 | fSumY += j * v1 501 | fCountY += v1 502 | 503 | i = i + 1 504 | 505 | j = j + 1 506 | 507 | fCtrX = fSumX / max(1, fCountX) 508 | fCtrY = fSumY / max(1, fCountY) 509 | 510 | nCtrX = int(fCtrX + 0.5) 511 | nCtrY = int(fCtrY + 0.5) 512 | 513 | if (nCtrX<0): 514 | nCtrX=0 515 | 516 | if (nCtrX>=width): 517 | nCtrX = width-1 518 | 519 | if (nCtrY<0): 520 | nCtrY=0 521 | 522 | if (nCtrY>=height): 523 | nCtrY = height-1 524 | 525 | return (nCtrX, nCtrY) 526 | 527 | # ConsolidatePixels 528 | # Input: 529 | # img: The image to evaluate 530 | # width: Image width, in pixels 531 | # height: Image height, in pixels 532 | # Output: 533 | # A new image, after having performed the consolidation routine. 534 | @jit(nopython=True) 535 | def ConsolidatePixels(img, width, height): 536 | 537 | # Create a new image, initialized to 0 538 | out = np.full_like(img, 0) 539 | 540 | # Create a map to indicate visited cells, initialized to 0 541 | visited = np.full_like(img, 0) 542 | 543 | j=0 544 | while (j 0): 555 | i = i + 1 556 | continue 557 | 558 | nCount = 0 559 | x1 = i 560 | y1 = j 561 | x2 = i 562 | y2 = j 563 | 564 | res = [nCount, x1, y1, x2, y2] 565 | 566 | res = GetBounds(img, width, height, i, j, visited, res) 567 | 568 | nCount = res[0] 569 | x1 = res[1] 570 | y1 = res[2] 571 | x2 = res[3] 572 | y2 = res[4] 573 | 574 | centroid = ComputeCentroid(img, width, height, x1, y1, x2, y2) 575 | 576 | ctr_x = centroid[0] 577 | ctr_y = centroid[1] 578 | 579 | if (nCount < 0): 580 | nCount = 0 581 | 582 | if (nCount > 65535): 583 | nCount = 65535 584 | 585 | out[ctr_y][ctr_x] = nCount 586 | 587 | i = i + 1 588 | 589 | j = j + 1 590 | 591 | return out 592 | 593 | # AllocEmptyMapList 594 | # Input: 595 | # w: Map horizontal dimensions 596 | # h: Map vertical dimensions 597 | # Output: 598 | # A map of empty detection lists. 599 | def AllocEmptyMapList(w, h): 600 | return [ [ [] for i in range(w) ] for j in range(h) ] 601 | 602 | # GenerateMapListDetections 603 | # Input: 604 | # img: An image to be evaluated 605 | # width: Image width, in pixels 606 | # height: Image height, in pixels 607 | # grid_size: Size of grid cells, in pixels 608 | # Output: 609 | # A map of detection lists, each populated with detections in their respective cells. 610 | def GenerateMapListDetections(img, width, height, grid_size): 611 | 612 | w = int(width / grid_size) + 1 613 | h = int(height/ grid_size) + 1 614 | 615 | map_detect_list = AllocEmptyMapList(w, h) 616 | 617 | j=0 618 | while (jtext

39 | """ 40 | 41 | 42 | def create_interest_region(answer_txt_path, input_process_path, output_path): 43 | image_size = 200 44 | font_pad_size = 25 45 | font_size = 16 46 | os.makedirs(output_path) if not os.path.isdir(output_path) else None 47 | 48 | answer_txt = open(answer_txt_path) 49 | answer_txt_lines = answer_txt.readlines() 50 | answer_txt.close() 51 | if len(answer_txt_lines) == 0: 52 | print('没有检测到彗星') 53 | return -1 54 | max_confidence = 0 55 | max_confidence_index = -1 56 | for index, line in enumerate(answer_txt_lines): 57 | line = line.split('\n')[0] 58 | if len(line) == 0: 59 | continue 60 | now_confidence = float(line.split(',')[-1]) 61 | if now_confidence > max_confidence: 62 | max_confidence = now_confidence 63 | max_confidence_index = index 64 | if max_confidence < search_threshold: 65 | print('检测出最大置信度 = {:.3f} < 当前检测阈值 = {}, 已过滤'.format(max_confidence, search_threshold)) 66 | return -1 67 | 68 | select_line = answer_txt_lines[max_confidence_index].split('\n')[0] 69 | answer = select_line.split(',') 70 | 71 | fts_x_y_dict = dict() 72 | fts_x_y_dict['fts'] = [] 73 | fts_x_y_dict['x'] = [] 74 | fts_x_y_dict['y'] = [] 75 | i = 0 76 | while i < len(answer) - 1: 77 | if answer[i] == '.': 78 | i += 1 79 | continue 80 | if 'fts' in answer[i]: 81 | fts_x_y_dict['fts'].append(answer[i].split('\\')[-1]) 82 | fts_x_y_dict['x'].append(int(answer[i+1])) 83 | fts_x_y_dict['y'].append(int(answer[i+2])) 84 | # print(fts_x_y_dict['fts'][-1], fts_x_y_dict['x'][-1], fts_x_y_dict['y'][-1]) 85 | i += 3 86 | else: 87 | raise ValueError('1') 88 | 89 | process_png_list = os.listdir(input_process_path) # './real-time data/220813_process/' 90 | process_png_list = [i for i in process_png_list if 'medfilt.png' in i] 91 | process_png_list.sort() 92 | assert len(fts_x_y_dict['fts']) == len(process_png_list) 93 | 94 | for i in tqdm(range(len(process_png_list))): 95 | process_image = Image.open(input_process_path + process_png_list[i]) 96 | process_image_np = np.array(process_image) 97 | process_image_pad_100 = np.uint8(np.zeros((1024 + image_size, 1024 + image_size, 4)) + 255) 98 | process_image_pad_100[image_size//2: 1024 + image_size//2, image_size//2: 1024 + image_size//2, :] = process_image_np 99 | x, y = round(fts_x_y_dict['x'][i]), round(fts_x_y_dict['y'][i]) 100 | if not (0 <= x <= 1024 and 0 <= y <= 1024): 101 | continue 102 | r = ((x - 512) ** 2 + (y - 512) ** 2) ** 0.5 103 | if r < 200: 104 | continue 105 | choose_x, choose_y = 1024 + image_size - (x + image_size // 2), y + image_size // 2 106 | image_x_y = process_image_pad_100[choose_y - image_size // 2: choose_y + image_size // 2, 107 | choose_x - image_size // 2: choose_x + image_size // 2, :] 108 | image_x_y_pad = np.uint8(np.zeros((font_pad_size + image_size, image_size, 4)) + 0) 109 | image_x_y_pad[font_pad_size: font_pad_size + image_size, :, :] = image_x_y 110 | image_x_y = image_x_y_pad 111 | image_x_y = Image.fromarray(image_x_y) 112 | image_font = ImageFont.truetype('msyh.ttc', font_size) 113 | image_draw = ImageDraw.Draw(image_x_y) 114 | # 右上角转换为左上角 x->1024-x 115 | image_information = '{}: {},{}'.format(process_png_list[i].split('_C2')[0], 1024 - x, y) 116 | image_draw.text((0, 0), image_information, font=image_font, fill="#000000") 117 | image_x_y.save('{}{}.png'.format(output_path, image_information.replace(': ', '-'))) 118 | return max_confidence 119 | 120 | 121 | def send_email(attach_file_path): 122 | 123 | msg = MIMEMultipart() 124 | msg['From'] = Header('{}'.format(from_name)) 125 | msg['To'] = Header(to_name) 126 | 127 | msg['Subject'] = Header(subject, 'utf-8') 128 | msg.attach(MIMEText(html_msg, 'html', 'utf-8')) 129 | 130 | png_file_list = os.listdir(attach_file_path) 131 | if len(png_file_list) == 0: 132 | return 133 | 134 | for png_file in png_file_list: 135 | att = MIMEText(open(attach_file_path + png_file, 'rb').read(), 'base64', 'utf-8') 136 | att["Content-Type"] = 'application/octet-stream' 137 | att["Content-Disposition"] = 'attachment; filename="{}"'.format(png_file) 138 | msg.attach(att) 139 | 140 | try: 141 | smtpobj = smtplib.SMTP_SSL(smtp_server) 142 | smtpobj.connect(smtp_server, port_num) 143 | smtpobj.login(from_addr, password) 144 | smtpobj.sendmail(from_addr, to_addr, msg.as_string()) 145 | print("email sent successfully") 146 | except smtplib.SMTPException: 147 | print("send email failed") 148 | finally: 149 | smtpobj.quit() 150 | 151 | 152 | def search_comet_from_fts_file(fts_file_path, process_file_path, output_path): 153 | c2_process(fts_path=fts_file_path, dst_path=process_file_path, rundiff=False, annotate_words=False) # 预处理 154 | test(folder_in=fts_file_path, output_file=process_file_path + 'output.txt') # 通过fts文件计算彗星位置 155 | flag = create_interest_region(answer_txt_path=process_file_path + 'output.txt', input_process_path=process_file_path, 156 | output_path=output_path) # 将预处理结果进行彗星位置截取 157 | if flag == -1: 158 | print('没有检测到彗星或检测置信度过低,不发送邮件') 159 | else: 160 | send_email(output_path) 161 | 162 | 163 | def search_comet_from_fts_file_batch_mode(fts_file_path, output_path, batch_size): 164 | assert batch_size >= 5 165 | fts_file_list = os.listdir(fts_file_path) 166 | fts_file_list = [i for i in fts_file_list if '.fts' in i] 167 | fts_file_list.sort() 168 | comet_data = fits.getdata(fts_file_path + fts_file_list[0], header=True)[1]['DATE-OBS'].replace('/', '') 169 | for i in range(len(fts_file_list) // batch_size + 1): 170 | batch_fts_file_list = fts_file_list[i * batch_size: (i + 1) * batch_size] 171 | if len(batch_fts_file_list) < 5: 172 | continue 173 | batch_output_path = '{}/{:03d}_{}_/'.format(output_path, i + 1, comet_data) 174 | os.makedirs(batch_output_path) if not os.path.isdir(batch_output_path) else None 175 | for file in batch_fts_file_list: 176 | if not os.path.exists(batch_output_path + file): 177 | shutil.copyfile(fts_file_path + file, batch_output_path + file) 178 | c2_process(fts_path=batch_output_path, dst_path=batch_output_path, 179 | rundiff=False, annotate_words=False, batch_mode=True) # 预处理 180 | test(folder_in=batch_output_path, output_file=batch_output_path + 'output.txt') # 通过fts文件计算彗星位置 181 | max_confidence = create_interest_region(answer_txt_path=batch_output_path + 'output.txt', 182 | input_process_path=batch_output_path, 183 | output_path=batch_output_path) # 将预处理结果进行彗星位置截取 184 | if max_confidence == -1: 185 | shutil.rmtree(batch_output_path) 186 | else: 187 | now_files = os.listdir(batch_output_path) 188 | for file in now_files: 189 | if 'medfilt.png' in file or 'fts' in file or 'txt' in file: 190 | os.remove(batch_output_path + file) 191 | os.rename(batch_output_path, '{}{:.2f}/'.format(batch_output_path[:-1], max_confidence)) 192 | send_email('{}{:.2f}/'.format(batch_output_path[:-1], max_confidence)) 193 | 194 | 195 | def download(url, filename): 196 | try: 197 | wget.download(url, out=filename) 198 | except: 199 | print('网络不佳, 重新下载...') 200 | download(url, filename) 201 | 202 | 203 | def get_url_text(url): 204 | try: 205 | r = requests.get(url, timeout=10) 206 | r.raise_for_status() # 如果状态码不是200,产生异常 207 | r.encoding = 'utf-8' # 字符编码格式改成 utf-8 208 | text = r.text 209 | except: 210 | print('网页读取异常, 重新尝试') 211 | text = get_url_text(url) 212 | return text 213 | 214 | 215 | def download_fts_file_from_nasa(root_path, url): 216 | 217 | fts_file_list = [] 218 | url_text = get_url_text(url) 219 | url_text_lines = url_text.split('\n') 220 | for line in url_text_lines: 221 | if 'fts' in line: 222 | fts_file_list.append(line.split('href="')[-1].split('">')[0]) 223 | date = url.split('/')[-3] 224 | dst_path = root_path + date + '/' # './real-time data/220813/' 225 | os.makedirs(dst_path) if not os.path.isdir(dst_path) else None 226 | new_download_flag = False 227 | for fts_file in tqdm(fts_file_list, desc='download {} fts_file'.format(date)): 228 | if os.path.exists(dst_path + fts_file): 229 | continue 230 | download(url + fts_file, dst_path + fts_file) 231 | new_download_flag = True 232 | tmp_file_list = os.listdir(dst_path) 233 | tmp_file_list = [i for i in tmp_file_list if 'tmp' in i] 234 | if len(tmp_file_list) != 0: 235 | for tmp_file in tmp_file_list: 236 | os.remove(dst_path + tmp_file) 237 | return dst_path, len(fts_file_list), new_download_flag 238 | 239 | 240 | def auto_download_from_nasa(root_url, root_download_path, start_date): 241 | total_date = [] 242 | url_text = get_url_text(root_url) 243 | url_text_lines = url_text.split('\n') 244 | for line in url_text_lines: 245 | if 'folder.gif' in line: 246 | total_date.append(int(line.split('href="')[-1].split('/')[0])) 247 | last_date_fts_file_num = None 248 | for date in total_date: 249 | if date < start_date: 250 | continue 251 | date_url = root_url + str(date) + '/c2/' 252 | dst_path, last_date_fts_file_num, new_download_flag = download_fts_file_from_nasa(root_download_path, date_url) 253 | if last_date_fts_file_num >= 6 and new_download_flag: 254 | search_comet_from_fts_file(dst_path, dst_path[:-1] + '_process/', dst_path[:-1] + '_process_results/') 255 | return total_date[-1], last_date_fts_file_num 256 | 257 | 258 | def auto_search_comet(root_url, root_download_path, start_date): 259 | while 1: 260 | last_date, last_date_file_num = auto_download_from_nasa(root_url, root_download_path, start_date) 261 | start_date = last_date 262 | time.sleep(sleep_time) 263 | 264 | 265 | if __name__ == '__main__': 266 | if run_flag == 0: 267 | auto_search_comet(root_url='https://umbra.nascom.nasa.gov/pub/lasco/lastimage/level_05/', 268 | root_download_path='./real-time data/', 269 | start_date=start_date) 270 | elif run_flag == 1: 271 | search_comet_from_fts_file(fts_file_path=fts_file_path, 272 | process_file_path=process_file_path, 273 | output_path=output_path) 274 | elif run_flag == 2: 275 | search_comet_from_fts_file_batch_mode(fts_file_path=fts_file_path, 276 | output_path=output_path, 277 | batch_size=batch_size) 278 | else: 279 | raise ValueError('error run_flag') 280 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | astropy==4.3.1 2 | matplotlib==3.4.3 3 | numba==0.54.1 4 | numpy==1.20.3 5 | opencv_python==4.5.5.64 6 | Pillow==9.2.0 7 | scikit_image==0.18.3 8 | scipy==1.7.1 9 | skimage==0.0 10 | torch==1.11.0 11 | tqdm==4.62.3 12 | -------------------------------------------------------------------------------- /test.py: -------------------------------------------------------------------------------- 1 | """ 2 | File: test.py 3 | Note: This is the main driver code to load images and generate results 4 | Date: 2022-02-26 5 | Author: D. Parrott 6 | """ 7 | 8 | 9 | 10 | import sys 11 | import pickle 12 | import os 13 | import math 14 | import random 15 | from math import sqrt 16 | from skimage import data 17 | from skimage.feature import blob_dog, blob_log, blob_doh 18 | from skimage.color import rgb2gray 19 | import numpy as np 20 | from astropy.io import fits 21 | from astropy.time import Time 22 | from scipy.signal import medfilt2d 23 | from scipy import ndimage 24 | from numba import jit 25 | from numba import njit 26 | from numba import typed 27 | from numba import types 28 | import matplotlib.pyplot as plt 29 | import cv2 30 | import faulthandler; faulthandler.enable() 31 | import multiprocessing 32 | import warnings 33 | import track_quality 34 | import detect 35 | import tracker 36 | 37 | warnings.filterwarnings("ignore") 38 | 39 | 40 | 41 | 42 | # Define some constants. 43 | SOHO_MAX_TRACKS=250000 44 | SOHO_MAX_SEC_PER_PIXEL=140 45 | SOHO_MIN_SEC_PER_PIXEL=36 # 44 46 | SOHO_FIT_ORDER_CUTOFF=0.97 47 | SOHO_FIT_ORDER=1 48 | SOHO_NUM_TRACKS_OUTPUT=2 49 | SOHO_PCT_SAME_DETECTS=0.60 50 | THRESHOLD_GRID_SIZE=32 51 | R2D=(180 / 3.1415926535) 52 | 53 | # Class definitions 54 | class MyFITSImg: 55 | pass 56 | 57 | class MyTrack: 58 | pass 59 | 60 | class MyDetect: 61 | pass 62 | 63 | class MySeq: 64 | pass 65 | 66 | # normalizeDate 67 | # Input: A string formatted YYYY/MM/DD HH:MM:SS.sss 68 | # Output: A string formatted YYYY-MM-DD HH:MM:SS.sss 69 | def normalizeDate(date): 70 | newDate = '-'.join(str.zfill(elem,2) for elem in date.split('/')) 71 | return newDate 72 | 73 | # compute_elapsed_time_in_sec2 74 | # Input: Two time objects 75 | # Output: Elapsed time, floating point, in seconds. 76 | def compute_elapsed_time_in_sec2(dtime_a, dtime_b): 77 | delta_time = (dtime_b - dtime_a) 78 | dt_float = delta_time.to_value('sec', subfmt='float') 79 | return dt_float 80 | 81 | # Enable to see debug info 82 | DEBUG = False 83 | 84 | # GenerateResultString2 85 | # Input: 86 | # seq_full: A full image sequence 87 | # track: A given track object 88 | # subset_offset: The offset associated with the current image subset 89 | # listDateObs_full: List of DateObs for the full image sequence 90 | # Output: 91 | # A string containing the specified track information and quality score 92 | def GenerateResultString2(seq_full, track, subset_offset, listDateObs_full): 93 | 94 | num_img = len(seq_full["images"]) 95 | result = seq_full["ID"] + "," 96 | 97 | fQuality = track.fGlobalQuality 98 | 99 | slen = len(listDateObs_full) 100 | fDateObs_First = listDateObs_full[0] 101 | fDateObs_Last = listDateObs_full[slen-1] 102 | 103 | fDeltaTimeInSec = fDateObs_Last - fDateObs_First 104 | fDeltaTimeInHours = fDeltaTimeInSec / 3600.0 105 | 106 | if (fDeltaTimeInHours > 24.0 and slen>40): 107 | # This is considered a 'long' dataset (spanning more than 24 hours) 108 | # Compensate for the likelihood that track positions will be inaccurate beyond that time. 109 | fQuality *= 0.25 110 | 111 | for t in range(num_img): 112 | xy = tracker.ComputeTrackPosition(track, t-subset_offset, listDateObs_full[t]) 113 | x = int(xy[0]+0.5) 114 | y = int(xy[1]+0.5) 115 | imgid = seq_full["path"][t].split("/")[-1] 116 | result += imgid + "," + repr(x) + "," + repr(y) + "," 117 | 118 | result += repr(fQuality) + "\n" 119 | 120 | return result 121 | 122 | # GenerateResultString 123 | # Input: 124 | # seq_full: A full image sequence 125 | # listTracks: A list of tracks eligible for output to the result file 126 | # subset_offset: The offset associated with the current image subset 127 | # listDateObs_full: List of DateObs for the full image sequence 128 | # Output: 129 | # The requested tracks for output to the result file 130 | def GenerateResultString(seq_full, listTracks, subset_offset, listDateObs_full): 131 | 132 | slen = len(listTracks) 133 | 134 | result = "" 135 | 136 | for t in range(slen): 137 | if (t>=SOHO_NUM_TRACKS_OUTPUT): 138 | break 139 | 140 | cur_track = listTracks[t] 141 | result += GenerateResultString2(seq_full, cur_track, subset_offset, listDateObs_full) 142 | 143 | return result 144 | 145 | # explore_sequence 146 | # Input: 147 | # seq_full: The full image sequence 148 | # seq: The current image sequence 149 | # subset_offset: The offset associated with the current image subset 150 | # listDateObs_full: List of DateObs for the full image sequence 151 | # Output: 152 | # A result string to be output to the result file 153 | def explore_sequence(seq_full, seq, subset_offset, listDateObs_full): 154 | """ 155 | Extract the comets from a given sequence 156 | """ 157 | if DEBUG: 158 | print("Sequence: " + seq["ID"]) 159 | # number of images 160 | numImg = len(seq["path"]) 161 | if DEBUG: 162 | print("Number of images: "+str(numImg)) 163 | 164 | img_list = [] 165 | 166 | width = 1024 167 | height = 1024 168 | 169 | # Create 3D data cube to hold data, assuming all data have 170 | # array sizes of 1024x1024 pixels. 171 | data_cube = np.empty((width,height,numImg)) 172 | 173 | timestamps = [0] * numImg 174 | 175 | 176 | depoch = Time('1970-01-01T00:00:00.0', scale='utc', format='isot') 177 | 178 | for i in range(numImg): 179 | 180 | 181 | img = MyFITSImg() 182 | img.fullpath = seq["path"][i] 183 | img.hdulist = fits.open(img.fullpath) 184 | img.width = img.hdulist[0].header['NAXIS1'] 185 | img.height = img.hdulist[0].header['NAXIS2'] 186 | img.datestring = img.hdulist[0].header['DATE-OBS'] 187 | img.timestring = img.hdulist[0].header['TIME-OBS'] 188 | my_date_str = img.datestring + ' ' + img.timestring 189 | newDate = normalizeDate(my_date_str) 190 | dtime = Time(newDate, scale='utc', format='iso') 191 | img.fDateObs = compute_elapsed_time_in_sec2(depoch, dtime) 192 | img.data = img.hdulist[0].data 193 | img.img_detections = [] 194 | img.map_detections = [] 195 | img_list.append(img) 196 | 197 | #ymd = epoch_seconds_to_gregorian_date(img.fDateObs) 198 | #print(repr(i)+") "+repr(ymd[0])+"-"+repr(ymd[1])+"-"+repr(ymd[2])) 199 | #print("subset_offset="+repr(subset_offset)) 200 | 201 | #if (i>0): 202 | #delta_time = img_list[i].fDateObs - img_list[i-1].fDateObs 203 | #print("delta_time="+repr(delta_time)) 204 | 205 | for i in range(numImg): 206 | # read image and header from FITS file 207 | img, hdr = fits.getdata(seq["path"][i], header=True) 208 | 209 | # Collect timestamps 210 | timestamps[i] = hdr["MID_TIME"] 211 | 212 | # Store array into datacube (3D array) 213 | data_cube[:,:,i] = img 214 | 215 | # Floating point not desired -- convert to integer units 216 | data_cube = data_cube.astype(int) 217 | 218 | # Compute a normalization factor by which the images will be multiplied. 219 | normalizationFactor = detect.ComputeNormalizationFactor(data_cube, width, height) 220 | 221 | # Scale each image by the normalization factor 222 | detect.ScaleImages(data_cube, width, height, normalizationFactor) 223 | 224 | # Compute an average stack of the images. 225 | StackImgZeroMotion = np.mean(data_cube, axis=2) 226 | 227 | # Subtract the average stack from each image. 228 | detect.SubtractStackFromImages(data_cube, StackImgZeroMotion, width, height) 229 | 230 | # Truncate each value to 0 (no negatives) 231 | data_cube[data_cube < 0] = 0 232 | 233 | # Generate detections from each image 234 | detect.CreateDetections(data_cube, img_list) 235 | 236 | # Generate tracks from the detections 237 | listTracks = tracker.CreateTracks(img_list, width, height, THRESHOLD_GRID_SIZE) 238 | 239 | listDateObs=[] 240 | listImgDetections=[] 241 | slen = len(img_list) 242 | for t in range(slen): 243 | listDateObs.append(img_list[t].fDateObs) 244 | listImgDetections.append(img_list[t].img_detections) 245 | 246 | #print("(BEFORE) Num tracks="+repr(len(listTracks))) 247 | 248 | if (len(listTracks)>SOHO_MAX_TRACKS): 249 | return "" 250 | 251 | # Reduce tracks 252 | listTracks = tracker.ReduceTracks(listTracks, img_list, listDateObs, listImgDetections, width, height) 253 | listTracks = tracker.CullToTopNTracks(listTracks, 50) 254 | listTracks = tracker.ConsolidateTracks(listTracks, numImg) 255 | tracker.FinalizeMotion(listTracks, numImg) 256 | 257 | #print("(AFTER) Num tracks=" + repr(len(listTracks))) 258 | 259 | #PrintTracks(listTracks, img_list) 260 | 261 | result = GenerateResultString(seq_full, listTracks, subset_offset, listDateObs_full) 262 | 263 | return result 264 | 265 | # GenerateSequences 266 | # Input: 267 | # seq_full: The full image sequence 268 | # Output: 269 | # A list of sequences to be explored 270 | def GenerateSequences(seq_full): 271 | 272 | listSequences = [] 273 | numImg = len(seq_full["path"]) 274 | 275 | depoch = Time('1970-01-01T00:00:00.0', scale='utc', format='isot') 276 | 277 | prev_dateobs = 0 278 | delta_time = 0 279 | max_delta_time = 6*3600 280 | max_img_per_seq = 50 281 | min_img_per_seq = 5 282 | 283 | cur_seq = MySeq() 284 | cur_seq.seq = {"ID": "none", "images": [], "path": []} 285 | cur_seq.subset_offset = 0 286 | 287 | cur_num_img = 0 288 | 289 | for t in range(numImg): 290 | 291 | fullpath = seq_full["path"][t] 292 | hdulist = fits.open(fullpath) 293 | datestring = hdulist[0].header['DATE-OBS'] 294 | timestring = hdulist[0].header['TIME-OBS'] 295 | my_date_str = datestring + ' ' + timestring 296 | newDate = normalizeDate(my_date_str) 297 | dtime = Time(newDate, scale='utc', format='iso') 298 | fDateObs = compute_elapsed_time_in_sec2(depoch, dtime) 299 | 300 | if (0==t): 301 | prev_dateobs = fDateObs 302 | 303 | delta_time = fDateObs - prev_dateobs 304 | 305 | if (delta_time < max_delta_time and cur_num_img < max_img_per_seq): 306 | cur_num_img += 1 307 | cur_seq.seq["ID"] = seq_full["ID"] 308 | cur_seq.seq["images"].append(seq_full["images"][t]) 309 | cur_seq.seq["path"].append(seq_full["path"][t]) 310 | else: 311 | # Either the delta time (gap) has been exceeded 312 | # or there are too many images for the current sequence 313 | 314 | # Add the current sequence to the list of sequences 315 | if (cur_num_img>=min_img_per_seq): 316 | listSequences.append(cur_seq) 317 | 318 | # Construct new sequence 319 | cur_seq = MySeq() 320 | cur_seq.seq = {"ID": seq_full["ID"], "images": [seq_full["images"][t]], "path": [seq_full["path"][t]]} 321 | cur_seq.subset_offset = t 322 | 323 | cur_num_img = 1 324 | 325 | prev_dateobs = fDateObs 326 | 327 | # Add any remaining sequence 328 | if (cur_num_img>=min_img_per_seq): 329 | listSequences.append(cur_seq) 330 | 331 | return listSequences 332 | 333 | # GenerateListDateObs 334 | # Input: 335 | # seq: A given sequence of images 336 | # Output: 337 | # A list of DateObs pertaining to the given image sequence 338 | def GenerateListDateObs(seq): 339 | 340 | depoch = Time('1970-01-01T00:00:00.0', scale='utc', format='isot') 341 | listDateObs = [] 342 | numImg = len(seq["images"]) 343 | 344 | for t in range(numImg): 345 | fullpath = seq["path"][t] 346 | hdulist = fits.open(fullpath) 347 | datestring = hdulist[0].header['DATE-OBS'] 348 | timestring = hdulist[0].header['TIME-OBS'] 349 | my_date_str = datestring + ' ' + timestring 350 | newDate = normalizeDate(my_date_str) 351 | dtime = Time(newDate, scale='utc', format='iso') 352 | fDateObs = compute_elapsed_time_in_sec2(depoch, dtime) 353 | listDateObs.append(fDateObs) 354 | 355 | return listDateObs 356 | 357 | # process_sequence 358 | # Input: 359 | # seq_full: The full image sequence to be explored 360 | # Output: 361 | # The result string(s) associated with the entire image sequence 362 | def process_sequence(seq_full): 363 | 364 | # Find comets in the sequence and return only the longest matched one 365 | result = [] 366 | slen = len(seq_full["images"]) 367 | if slen < 5: 368 | #Ignore short sequences. 369 | print(seq_full["progress"]) 370 | return result 371 | 372 | track_quality.PopulateGridDirection() 373 | 374 | listDateObs_full = GenerateListDateObs(seq_full) 375 | listSequences = GenerateSequences(seq_full) 376 | 377 | nseq = len(listSequences) 378 | 379 | for t in range(nseq): 380 | 381 | cur_seq = listSequences[t] 382 | seq = cur_seq.seq 383 | subset_offset = cur_seq.subset_offset 384 | 385 | try: 386 | bestComet = explore_sequence(seq_full, seq, subset_offset, listDateObs_full) 387 | if (0!=len(bestComet)): 388 | result.append(bestComet) 389 | except Exception as e: 390 | print("Error: "+str(e)) 391 | pass 392 | 393 | print(seq_full["progress"]) 394 | return result 395 | 396 | 397 | def test(folder_in, output_file): 398 | print('calculate location...') 399 | data_set = [] 400 | # Scan folder for all sequences 401 | for (dirpath, dirnames, filenames) in os.walk(folder_in): 402 | dirnames.sort() 403 | 404 | seq = {} 405 | cometID = os.path.relpath(dirpath, folder_in) 406 | seq["ID"] = cometID 407 | images = [] 408 | paths = [] 409 | for filename in filenames: 410 | ext = os.path.splitext(filename)[1] 411 | if ext == '.fts': 412 | images.append(filename) 413 | paths.append(os.path.join(dirpath, filename)) 414 | 415 | images.sort() 416 | paths.sort() 417 | seq["images"] = images 418 | seq["path"] = paths 419 | if len(images) > 0: 420 | data_set.append(seq) 421 | 422 | for i, s in enumerate(data_set): 423 | s["progress"] = "Completed " + s["ID"] + " " + str(i + 1) + "/" + str(len(data_set)) 424 | 425 | pool = multiprocessing.Pool() 426 | result_async = [pool.apply_async(process_sequence, args=(s,)) for s in data_set] 427 | results = [r.get() for r in result_async] 428 | if os.path.exists(output_file): 429 | os.remove(output_file) 430 | with open(output_file, 'w') as f: 431 | for r in results: 432 | if len(r) > 0: 433 | f.writelines(r) 434 | f.flush() 435 | print('calculate finish.') 436 | 437 | 438 | if __name__ == "__main__": 439 | ####################################################################################### 440 | # python test.py D:\研究生\杂活\2022-7-23彗星搜索\challenge_data\cmt0030\ D:\研究生\杂活\2022-7-23彗星搜索\output\220716_c2\output.csv 441 | folder_in = sys.argv[1] 442 | output_file = sys.argv[2] 443 | 444 | 445 | 446 | 447 | -------------------------------------------------------------------------------- /track_quality.py: -------------------------------------------------------------------------------- 1 | """ 2 | File: track_quality.py 3 | Note: This code computes track quality from various attributes (speed, direction, location) 4 | Date: 2022-02-26 5 | Author: D. Parrott 6 | """ 7 | 8 | import sys 9 | import pickle 10 | import os 11 | import math 12 | import random 13 | from math import sqrt 14 | from skimage import data 15 | from skimage.feature import blob_dog, blob_log, blob_doh 16 | from skimage.color import rgb2gray 17 | import numpy as np 18 | from astropy.io import fits 19 | from astropy.time import Time 20 | from scipy.signal import medfilt2d 21 | from scipy import ndimage 22 | from numba import jit 23 | from numba import njit 24 | from numba import typed 25 | from numba import types 26 | import matplotlib.pyplot as plt 27 | import cv2 28 | import faulthandler; faulthandler.enable() 29 | import multiprocessing 30 | import warnings 31 | 32 | warnings.filterwarnings("ignore") 33 | 34 | # Class definitions 35 | class MyTrack: 36 | pass 37 | 38 | # Model of track directions for each grid section 39 | m_listGridDirections = { 0:[], 1:[], 2:[], 3:[], 40 | 4:[], 5:[], 6:[], 7:[], 41 | 8:[], 9:[], 10:[], 11:[], 42 | 12:[], 13:[], 14:[], 15:[] } 43 | 44 | # PopulateGridDirection 45 | # Input: 46 | # None 47 | # Modifies: 48 | # m_listGridDirections: List of likely track directions for each grid section 49 | # Track directions are encoded as a simple range of directions, in degrees 50 | def PopulateGridDirection(): 51 | 52 | m_listGridDirections[0].append([-55, -34]) 53 | m_listGridDirections[0].append([30, 50]) 54 | m_listGridDirections[0].append([64, 90]) 55 | 56 | m_listGridDirections[1].append([30, 120]) 57 | 58 | m_listGridDirections[2].append([42, 82]) 59 | m_listGridDirections[2].append([100, 135]) 60 | 61 | m_listGridDirections[3].append([-154, -123]) 62 | m_listGridDirections[3].append([67, 150]) 63 | 64 | m_listGridDirections[4].append([23, 41]) 65 | m_listGridDirections[4].append([47, 78]) 66 | 67 | m_listGridDirections[5].append([-180, 180]) 68 | 69 | m_listGridDirections[6].append([-180, 180]) 70 | 71 | m_listGridDirections[7].append([-146, -115]) 72 | m_listGridDirections[7].append([75, 156]) 73 | 74 | m_listGridDirections[8].append([-180, 180]) 75 | 76 | m_listGridDirections[9].append([-180, 180]) 77 | 78 | m_listGridDirections[10].append([-180, 180]) 79 | 80 | m_listGridDirections[11].append([-180, 180]) 81 | 82 | m_listGridDirections[12].append([-86, -28]) 83 | 84 | m_listGridDirections[13].append([-110, -70]) 85 | m_listGridDirections[13].append([-56, -30]) 86 | m_listGridDirections[13].append([30, 53]) 87 | m_listGridDirections[13].append([76, 108]) 88 | 89 | m_listGridDirections[14].append([-130, -98]) 90 | m_listGridDirections[14].append([-82, -48]) 91 | 92 | m_listGridDirections[15].append([-153, -62]) 93 | m_listGridDirections[15].append([110, 144]) 94 | m_listGridDirections[15].append([165, 177]) 95 | 96 | # ComputeTrackQuality_SunMotionVector_Jan 97 | # Input: 98 | # track: A given track to evaluate 99 | # Output: 100 | # Whether or not the track has a statistically likely sun motion vector for the month of January 101 | def ComputeTrackQuality_SunMotionVector_Jan(track): 102 | 103 | # By default 104 | bDesired = True 105 | 106 | if (track.fSunMotionVector >= -0.80 and 107 | track.fSunMotionVector <= -0.20): 108 | bDesired = False 109 | 110 | if (track.fSunMotionVector >= 0.70): 111 | bDesired = False 112 | 113 | return bDesired 114 | 115 | # ComputeTrackQuality_SunMotionVector_Feb 116 | # Input: 117 | # track: A given track to evaluate 118 | # Output: 119 | # Whether or not the track has a statistically likely sun motion vector for the month of February 120 | def ComputeTrackQuality_SunMotionVector_Feb(track): 121 | 122 | # By default 123 | bDesired = True 124 | 125 | if (track.fSunMotionVector >= -0.70 and 126 | track.fSunMotionVector <= -0.25): 127 | bDesired = False 128 | 129 | if (track.fSunMotionVector >= 0.20): 130 | bDesired = False 131 | 132 | return bDesired 133 | 134 | # ComputeTrackQuality_SunMotionVector_Mar 135 | # Input: 136 | # track: A given track to evaluate 137 | # Output: 138 | # Whether or not the track has a statistically likely sun motion vector for the month of March 139 | def ComputeTrackQuality_SunMotionVector_Mar(track): 140 | 141 | # By default 142 | bDesired = True 143 | 144 | if (track.fSunMotionVector >= -0.85 and 145 | track.fSunMotionVector <= -0.12): 146 | bDesired = False 147 | 148 | if (track.fSunMotionVector >= 0.00): 149 | bDesired = False 150 | 151 | return bDesired 152 | 153 | # ComputeTrackQuality_SunMotionVector_Apr 154 | # Input: 155 | # track: A given track to evaluate 156 | # Output: 157 | # Whether or not the track has a statistically likely sun motion vector for the month of April 158 | def ComputeTrackQuality_SunMotionVector_Apr(track): 159 | 160 | # By default 161 | bDesired = True 162 | 163 | if (track.fSunMotionVector >= -0.80 and 164 | track.fSunMotionVector <= 0.10): 165 | bDesired = False 166 | 167 | if (track.fSunMotionVector >= 0.40): 168 | bDesired = False 169 | 170 | return bDesired 171 | 172 | # ComputeTrackQuality_SunMotionVector_May 173 | # Input: 174 | # track: A given track to evaluate 175 | # Output: 176 | # Whether or not the track has a statistically likely sun motion vector for the month of May 177 | def ComputeTrackQuality_SunMotionVector_May(track): 178 | 179 | # By default 180 | bDesired = True 181 | 182 | if (track.fSunMotionVector >= -0.72 and 183 | track.fSunMotionVector <= 0.73): 184 | bDesired = False 185 | 186 | return bDesired 187 | 188 | # ComputeTrackQuality_SunMotionVector_Jun 189 | # Input: 190 | # track: A given track to evaluate 191 | # Output: 192 | # Whether or not the track has a statistically likely sun motion vector for the month of June 193 | def ComputeTrackQuality_SunMotionVector_Jun(track): 194 | 195 | # By default 196 | bDesired = True 197 | 198 | if (track.fSunMotionVector <= -0.92): 199 | bDesired = False 200 | 201 | if (track.fSunMotionVector >= -0.72): 202 | bDesired = False 203 | 204 | return bDesired 205 | 206 | # ComputeTrackQuality_SunMotionVector_Jul 207 | # Input: 208 | # track: A given track to evaluate 209 | # Output: 210 | # Whether or not the track has a statistically likely sun motion vector for the month of July 211 | def ComputeTrackQuality_SunMotionVector_Jul(track): 212 | 213 | # By default 214 | bDesired = True 215 | 216 | if (track.fSunMotionVector >= -0.80 and 217 | track.fSunMotionVector <= 0.00): 218 | bDesired = False 219 | 220 | if (track.fSunMotionVector >= 0.50): 221 | bDesired = False 222 | 223 | return bDesired 224 | 225 | # ComputeTrackQuality_SunMotionVector_Aug 226 | # Input: 227 | # track: A given track to evaluate 228 | # Output: 229 | # Whether or not the track has a statistically likely sun motion vector for the month of August 230 | def ComputeTrackQuality_SunMotionVector_Aug(track): 231 | 232 | # By default 233 | bDesired = True 234 | 235 | if (track.fSunMotionVector >= 0.10 and 236 | track.fSunMotionVector <= 0.70): 237 | bDesired = False 238 | 239 | if (track.fSunMotionVector >= 0.90): 240 | bDesired = False 241 | 242 | return bDesired 243 | 244 | # ComputeTrackQuality_SunMotionVector_Sep 245 | # Input: 246 | # track: A given track to evaluate 247 | # Output: 248 | # Whether or not the track has a statistically likely sun motion vector for the month of September 249 | def ComputeTrackQuality_SunMotionVector_Sep(track): 250 | 251 | # By default 252 | bDesired = True 253 | 254 | if (track.fSunMotionVector >= -0.80 and 255 | track.fSunMotionVector <= -0.20): 256 | bDesired = False 257 | 258 | if (track.fSunMotionVector >= 0.10): 259 | bDesired = False 260 | 261 | return bDesired 262 | 263 | # ComputeTrackQuality_SunMotionVector_Oct 264 | # Input: 265 | # track: A given track to evaluate 266 | # Output: 267 | # Whether or not the track has a statistically likely sun motion vector for the month of October 268 | def ComputeTrackQuality_SunMotionVector_Oct(track): 269 | 270 | # By default 271 | bDesired = True 272 | 273 | if (track.fSunMotionVector >= -0.80 and 274 | track.fSunMotionVector <= 0.05): 275 | bDesired = False 276 | 277 | if (track.fSunMotionVector >= 0.30): 278 | bDesired = False 279 | 280 | return bDesired 281 | 282 | # ComputeTrackQuality_SunMotionVector_Nov 283 | # Input: 284 | # track: A given track to evaluate 285 | # Output: 286 | # Whether or not the track has a statistically likely sun motion vector for the month of November 287 | def ComputeTrackQuality_SunMotionVector_Nov(track): 288 | 289 | # By default 290 | bDesired = True 291 | 292 | if (track.fSunMotionVector >= -0.72): 293 | bDesired = False 294 | 295 | return bDesired 296 | 297 | # ComputeTrackQuality_SunMotionVector_Dec 298 | # Input: 299 | # track: A given track to evaluate 300 | # Output: 301 | # Whether or not the track has a statistically likely sun motion vector for the month of December 302 | def ComputeTrackQuality_SunMotionVector_Dec(track): 303 | 304 | # By default 305 | bDesired = True 306 | 307 | if (track.fSunMotionVector <= -0.93): 308 | bDesired = False 309 | 310 | if (track.fSunMotionVector >= -0.72): 311 | bDesired = False 312 | 313 | return bDesired 314 | 315 | # ComputeTrackQuality_SunMotionVector_All 316 | # Input: 317 | # track: A given track to evaluate 318 | # Output: 319 | # Whether or not the track has a statistically likely sun motion vector for any month 320 | def ComputeTrackQuality_SunMotionVector_All(track): 321 | 322 | # By default 323 | bDesired = True 324 | 325 | if (track.fSunMotionVector >= -0.72): 326 | bDesired = False 327 | 328 | return bDesired 329 | 330 | # ComputeTrackQuality_SunMotionVector 331 | # Input: 332 | # track: A given track to evaluate 333 | # Modifies: 334 | # track.bFlaggedSunMotionVector 335 | def ComputeTrackQuality_SunMotionVector(track): 336 | 337 | # By default 338 | bHasDesiredSunMotionVector = False 339 | 340 | idx = track.nMonthIndex 341 | 342 | if (0 == idx): 343 | bHasDesiredSunMotionVector = ComputeTrackQuality_SunMotionVector_Jan(track) 344 | elif (1 == idx): 345 | bHasDesiredSunMotionVector = ComputeTrackQuality_SunMotionVector_Feb(track) 346 | elif (2 == idx): 347 | bHasDesiredSunMotionVector = ComputeTrackQuality_SunMotionVector_Mar(track) 348 | elif (3 == idx): 349 | bHasDesiredSunMotionVector = ComputeTrackQuality_SunMotionVector_Apr(track) 350 | elif (4 == idx): 351 | bHasDesiredSunMotionVector = ComputeTrackQuality_SunMotionVector_May(track) 352 | elif (5 == idx): 353 | bHasDesiredSunMotionVector = ComputeTrackQuality_SunMotionVector_Jun(track) 354 | elif (6 == idx): 355 | bHasDesiredSunMotionVector = ComputeTrackQuality_SunMotionVector_Jul(track) 356 | elif (7 == idx): 357 | bHasDesiredSunMotionVector = ComputeTrackQuality_SunMotionVector_Aug(track) 358 | elif (8 == idx): 359 | bHasDesiredSunMotionVector = ComputeTrackQuality_SunMotionVector_Sep(track) 360 | elif (9 == idx): 361 | bHasDesiredSunMotionVector = ComputeTrackQuality_SunMotionVector_Oct(track) 362 | elif (10 == idx): 363 | bHasDesiredSunMotionVector = ComputeTrackQuality_SunMotionVector_Nov(track) 364 | elif (11 == idx): 365 | bHasDesiredSunMotionVector = ComputeTrackQuality_SunMotionVector_Dec(track) 366 | 367 | if (bHasDesiredSunMotionVector): 368 | return 369 | 370 | track.bFlaggedSunMotionVector = True 371 | 372 | # ComputeTrackQuality_Direction_Jan 373 | # Input: 374 | # track: A given track to evaluate 375 | # Output: 376 | # Whether or not the track has a statistically likely direction for the month of January 377 | def ComputeTrackQuality_Direction_Jan(track): 378 | 379 | # By default 380 | bDesired = True 381 | 382 | if (track.fDirection >= -110.0 and 383 | track.fDirection <= 10.0): 384 | bDesired = False 385 | 386 | if (track.fDirection >= 110.0): 387 | bDesired = False 388 | 389 | return bDesired 390 | 391 | # ComputeTrackQuality_Direction_Feb 392 | # Input: 393 | # track: A given track to evaluate 394 | # Output: 395 | # Whether or not the track has a statistically likely direction for the month of February 396 | def ComputeTrackQuality_Direction_Feb(track): 397 | 398 | # By default 399 | bDesired = True 400 | 401 | if (track.fDirection >= -135.0 and 402 | track.fDirection <= -10.0): 403 | bDesired = False 404 | 405 | if (track.fDirection >= 55.0 and 406 | track.fDirection <= 140.0): 407 | bDesired = False 408 | 409 | return bDesired 410 | 411 | # ComputeTrackQuality_Direction_Mar 412 | # Input: 413 | # track: A given track to evaluate 414 | # Output: 415 | # Whether or not the track has a statistically likely direction for the month of March 416 | def ComputeTrackQuality_Direction_Mar(track): 417 | 418 | # By default 419 | bDesired = True 420 | 421 | if (track.fDirection <= -150.0): 422 | bDesired = False 423 | 424 | if (track.fDirection >= -110.0 and 425 | track.fDirection <= 20.0): 426 | bDesired = False 427 | 428 | if (track.fDirection >= 55.0): 429 | bDesired = False 430 | 431 | return bDesired 432 | 433 | # ComputeTrackQuality_Direction_Apr 434 | # Input: 435 | # track: A given track to evaluate 436 | # Output: 437 | # Whether or not the track has a statistically likely direction for the month of April 438 | def ComputeTrackQuality_Direction_Apr(track): 439 | 440 | # By default 441 | bDesired = True 442 | 443 | if (track.fDirection <= -150.0): 444 | bDesired = False 445 | 446 | if (track.fDirection >= -110.0 and 447 | track.fDirection <= 20.0): 448 | bDesired = False 449 | 450 | if (track.fDirection >= 80.0): 451 | bDesired = False 452 | 453 | return bDesired 454 | 455 | # ComputeTrackQuality_Direction_May 456 | # Input: 457 | # track: A given track to evaluate 458 | # Output: 459 | # Whether or not the track has a statistically likely direction for the month of May 460 | def ComputeTrackQuality_Direction_May(track): 461 | 462 | # By default 463 | bDesired = True 464 | 465 | if (track.fDirection <= -140.0): 466 | bDesired = False 467 | 468 | if (track.fDirection >= -95.0 and 469 | track.fDirection <= 40.0): 470 | bDesired = False 471 | 472 | if (track.fDirection >= 110.0 and 473 | track.fDirection <= 165.0): 474 | bDesired = False 475 | 476 | return bDesired 477 | 478 | # ComputeTrackQuality_Direction_Jun 479 | # Input: 480 | # track: A given track to evaluate 481 | # Output: 482 | # Whether or not the track has a statistically likely direction for the month of June 483 | def ComputeTrackQuality_Direction_Jun(track): 484 | 485 | # By default 486 | bDesired = True 487 | 488 | if (track.fDirection <= -110.0): 489 | bDesired = False 490 | 491 | if (track.fDirection >= -55.0 and 492 | track.fDirection <= 65.0): 493 | bDesired = False 494 | 495 | if (track.fDirection >= 130.0): 496 | bDesired = False 497 | 498 | return bDesired 499 | 500 | # ComputeTrackQuality_Direction_Jul 501 | # Input: 502 | # track: A given track to evaluate 503 | # Output: 504 | # Whether or not the track has a statistically likely direction for the month of July 505 | def ComputeTrackQuality_Direction_Jul(track): 506 | 507 | # By default 508 | bDesired = True 509 | 510 | if (track.fDirection <= -80.0): 511 | bDesired = False 512 | 513 | if (track.fDirection >= -30.0 and 514 | track.fDirection <= 105.0): 515 | bDesired = False 516 | 517 | if (track.fDirection >= 143.0): 518 | bDesired = False 519 | 520 | return bDesired 521 | 522 | # ComputeTrackQuality_Direction_Aug 523 | # Input: 524 | # track: A given track to evaluate 525 | # Output: 526 | # Whether or not the track has a statistically likely direction for the month of August 527 | def ComputeTrackQuality_Direction_Aug(track): 528 | 529 | # By default 530 | bDesired = True 531 | 532 | if (track.fDirection <= -140): 533 | bDesired = False 534 | 535 | if (track.fDirection >= -110.0 and 536 | track.fDirection <= -45.0): 537 | bDesired = False 538 | 539 | if (track.fDirection >= -10.0 and 540 | track.fDirection <= 95.0): 541 | bDesired = False 542 | 543 | if (track.fDirection >= 175.0): 544 | bDesired = False 545 | 546 | return bDesired 547 | 548 | # ComputeTrackQuality_Direction_Sep 549 | # Input: 550 | # track: A given track to evaluate 551 | # Output: 552 | # Whether or not the track has a statistically likely direction for the month of September 553 | def ComputeTrackQuality_Direction_Sep(track): 554 | 555 | # By default 556 | bDesired = True 557 | 558 | if (track.fDirection <= -50.0): 559 | bDesired = False 560 | 561 | if (track.fDirection >= -30.0 and 562 | track.fDirection <= 127.0): 563 | bDesired = False 564 | 565 | if (track.fDirection >= 150.0 and 566 | track.fDirection <= 168.0): 567 | bDesired = False 568 | 569 | if (track.fDirection >= 179.0): 570 | bDesired = False 571 | 572 | return bDesired 573 | 574 | # ComputeTrackQuality_Direction_Oct 575 | # Input: 576 | # track: A given track to evaluate 577 | # Output: 578 | # Whether or not the track has a statistically likely direction for the month of October 579 | def ComputeTrackQuality_Direction_Oct(track): 580 | 581 | # By default 582 | bDesired = True 583 | 584 | if (track.fDirection <= -70.0): 585 | bDesired = False 586 | 587 | if (track.fDirection >= -30.0 and 588 | track.fDirection <= 116.0): 589 | bDesired = False 590 | 591 | if (track.fDirection >= 145.0): 592 | bDesired = False 593 | 594 | return bDesired 595 | 596 | # ComputeTrackQuality_Direction_Nov 597 | # Input: 598 | # track: A given track to evaluate 599 | # Output: 600 | # Whether or not the track has a statistically likely direction for the month of November 601 | def ComputeTrackQuality_Direction_Nov(track): 602 | 603 | # By default 604 | bDesired = True 605 | 606 | if (track.fDirection <= -100.0): 607 | bDesired = False 608 | 609 | if (track.fDirection >= -45.0 and 610 | track.fDirection <= 87.0): 611 | bDesired = False 612 | 613 | if (track.fDirection >= 138.0): 614 | bDesired = False 615 | 616 | return bDesired 617 | 618 | # ComputeTrackQuality_Direction_Dec 619 | # Input: 620 | # track: A given track to evaluate 621 | # Output: 622 | # Whether or not the track has a statistically likely direction for the month of December 623 | def ComputeTrackQuality_Direction_Dec(track): 624 | 625 | # By default 626 | bDesired = True 627 | 628 | if (track.fDirection <= -130.0): 629 | bDesired = False 630 | 631 | if (track.fDirection >= -75.0 and 632 | track.fDirection <= 47.0): 633 | bDesired = False 634 | 635 | if (track.fDirection >= 110.0): 636 | bDesired = False 637 | 638 | return bDesired 639 | 640 | # ComputeTrackQuality_Direction_All 641 | # Input: 642 | # track: A given track to evaluate 643 | # Output: 644 | # Whether or not the track has a statistically likely direction for any month 645 | def ComputeTrackQuality_Direction_All(track): 646 | 647 | # By default 648 | bDesired = True 649 | 650 | if (track.fDirection <= -155.0): 651 | bDesired = False 652 | 653 | if (track.fDirection >= -35.0 and 654 | track.fDirection <= 24.0): 655 | bDesired = False 656 | 657 | if (track.fDirection >= 144.0): 658 | bDesired = False 659 | 660 | return bDesired 661 | 662 | # ComputeTrackQuality_Direction 663 | # Input: 664 | # track: A given track to be evaluated 665 | # Modifies: 666 | # track.bFlaggedDirection 667 | def ComputeTrackQuality_Direction(track): 668 | 669 | # By default 670 | bHasDesiredDirection = False 671 | 672 | idx = track.nMonthIndex 673 | 674 | if (0 == idx): 675 | bHasDesiredDirection = ComputeTrackQuality_Direction_Jan(track) 676 | elif (1 == idx): 677 | bHasDesiredDirection = ComputeTrackQuality_Direction_Feb(track) 678 | elif (2 == idx): 679 | bHasDesiredDirection = ComputeTrackQuality_Direction_Mar(track) 680 | elif (3 == idx): 681 | bHasDesiredDirection = ComputeTrackQuality_Direction_Apr(track) 682 | elif (4 == idx): 683 | bHasDesiredDirection = ComputeTrackQuality_Direction_May(track) 684 | elif (5 == idx): 685 | bHasDesiredDirection = ComputeTrackQuality_Direction_Jun(track) 686 | elif (6 == idx): 687 | bHasDesiredDirection = ComputeTrackQuality_Direction_Jul(track) 688 | elif (7 == idx): 689 | bHasDesiredDirection = ComputeTrackQuality_Direction_Aug(track) 690 | elif (8 == idx): 691 | bHasDesiredDirection = ComputeTrackQuality_Direction_Sep(track) 692 | elif (9 == idx): 693 | bHasDesiredDirection = ComputeTrackQuality_Direction_Oct(track) 694 | elif (10 == idx): 695 | bHasDesiredDirection = ComputeTrackQuality_Direction_Nov(track) 696 | elif (11 == idx): 697 | bHasDesiredDirection = ComputeTrackQuality_Direction_Dec(track) 698 | 699 | if (bHasDesiredDirection): 700 | return 701 | 702 | track.bFlaggedDirection = True 703 | 704 | # ComputeTrackQuality_GridSection_Jan 705 | # Input: 706 | # track: A given track to be evaluated 707 | # Output: 708 | # Whether or not the track has a statistically likely grid section for the month of January 709 | def ComputeTrackQuality_GridSection_Jan(track): 710 | 711 | # By default 712 | bDesired = False 713 | 714 | if (3==track.nGridSection or 715 | 4==track.nGridSection or 716 | 6==track.nGridSection or 717 | 7==track.nGridSection or 718 | 8==track.nGridSection or 719 | 11==track.nGridSection or 720 | 13==track.nGridSection or 721 | 14==track.nGridSection or 722 | 15==track.nGridSection): 723 | bDesired=True 724 | return bDesired 725 | 726 | # ComputeTrackQuality_GridSection_Feb 727 | # Input: 728 | # track: A given track to be evaluated 729 | # Output: 730 | # Whether or not the track has a statistically likely grid section for the month of February 731 | def ComputeTrackQuality_GridSection_Feb(track): 732 | 733 | # By default 734 | bDesired = False 735 | 736 | if (0==track.nGridSection or 737 | 1==track.nGridSection or 738 | 3==track.nGridSection or 739 | 4==track.nGridSection or 740 | 5==track.nGridSection or 741 | 7==track.nGridSection or 742 | 9==track.nGridSection or 743 | 10==track.nGridSection or 744 | 11==track.nGridSection or 745 | 12==track.nGridSection or 746 | 13==track.nGridSection or 747 | 14==track.nGridSection or 748 | 15==track.nGridSection): 749 | bDesired=True 750 | return bDesired 751 | 752 | # ComputeTrackQuality_GridSection_Mar 753 | # Input: 754 | # track: A given track to be evaluated 755 | # Output: 756 | # Whether or not the track has a statistically likely grid section for the month of March 757 | def ComputeTrackQuality_GridSection_Mar(track): 758 | 759 | # By default 760 | bDesired = False 761 | 762 | if (0==track.nGridSection or 763 | 1==track.nGridSection or 764 | 3==track.nGridSection or 765 | 4==track.nGridSection or 766 | 12==track.nGridSection or 767 | 15==track.nGridSection): 768 | bDesired=True 769 | return bDesired 770 | 771 | # ComputeTrackQuality_GridSection_Apr 772 | # Input: 773 | # track: A given track to be evaluated 774 | # Output: 775 | # Whether or not the track has a statistically likely grid section for the month of April 776 | def ComputeTrackQuality_GridSection_Apr(track): 777 | 778 | # By default 779 | bDesired = False 780 | 781 | if (1==track.nGridSection or 782 | 15==track.nGridSection): 783 | bDesired=True 784 | return bDesired 785 | 786 | # ComputeTrackQuality_GridSection_May 787 | # Input: 788 | # track: A given track to be evaluated 789 | # Output: 790 | # Whether or not the track has a statistically likely grid section for the month of May 791 | def ComputeTrackQuality_GridSection_May(track): 792 | 793 | # By default 794 | bDesired = False 795 | 796 | if (1==track.nGridSection or 797 | 2==track.nGridSection or 798 | 3==track.nGridSection or 799 | 13==track.nGridSection or 800 | 14==track.nGridSection or 801 | 15==track.nGridSection): 802 | bDesired=True 803 | return bDesired 804 | 805 | # ComputeTrackQuality_GridSection_Jun 806 | # Input: 807 | # track: A given track to be evaluated 808 | # Output: 809 | # Whether or not the track has a statistically likely grid section for the month of June 810 | def ComputeTrackQuality_GridSection_Jun(track): 811 | 812 | # By default 813 | bDesired = False 814 | 815 | if (3==track.nGridSection or 816 | 7==track.nGridSection or 817 | 12==track.nGridSection or 818 | 13==track.nGridSection): 819 | bDesired=True 820 | return bDesired 821 | 822 | # ComputeTrackQuality_GridSection_Jul 823 | # Input: 824 | # track: A given track to be evaluated 825 | # Output: 826 | # Whether or not the track has a statistically likely grid section for the month of July 827 | def ComputeTrackQuality_GridSection_Jul(track): 828 | 829 | # By default 830 | bDesired = False 831 | 832 | if (3==track.nGridSection or 833 | 7==track.nGridSection or 834 | 12==track.nGridSection or 835 | 14==track.nGridSection or 836 | 15==track.nGridSection): 837 | bDesired=True 838 | return bDesired 839 | 840 | # ComputeTrackQuality_GridSection_Aug 841 | # Input: 842 | # track: A given track to be evaluated 843 | # Output: 844 | # Whether or not the track has a statistically likely grid section for the month of August 845 | def ComputeTrackQuality_GridSection_Aug(track): 846 | 847 | # By default 848 | bDesired = False 849 | 850 | if (0==track.nGridSection or 851 | 4==track.nGridSection or 852 | 7==track.nGridSection or 853 | 11==track.nGridSection or 854 | 12==track.nGridSection or 855 | 15==track.nGridSection): 856 | bDesired=True 857 | return bDesired 858 | 859 | # ComputeTrackQuality_GridSection_Sep 860 | # Input: 861 | # track: A given track to be evaluated 862 | # Output: 863 | # Whether or not the track has a statistically likely grid section for the month of September 864 | def ComputeTrackQuality_GridSection_Sep(track): 865 | 866 | # By default 867 | bDesired = False 868 | 869 | if (3==track.nGridSection or 870 | 7==track.nGridSection or 871 | 12==track.nGridSection or 872 | 13==track.nGridSection or 873 | 15==track.nGridSection): 874 | bDesired=True 875 | return bDesired 876 | 877 | # ComputeTrackQuality_GridSection_Oct 878 | # Input: 879 | # track: A given track to be evaluated 880 | # Output: 881 | # Whether or not the track has a statistically likely grid section for the month of October 882 | def ComputeTrackQuality_GridSection_Oct(track): 883 | 884 | # By default 885 | bDesired = False 886 | 887 | if (3==track.nGridSection or 888 | 7==track.nGridSection or 889 | 12==track.nGridSection or 890 | 13==track.nGridSection or 891 | 15==track.nGridSection): 892 | bDesired=True 893 | return bDesired 894 | 895 | # ComputeTrackQuality_GridSection_Nov 896 | # Input: 897 | # track: A given track to be evaluated 898 | # Output: 899 | # Whether or not the track has a statistically likely grid section for the month of November 900 | def ComputeTrackQuality_GridSection_Nov(track): 901 | 902 | # By default 903 | bDesired = False 904 | 905 | if (1==track.nGridSection or 906 | 2==track.nGridSection or 907 | 3==track.nGridSection or 908 | 13==track.nGridSection or 909 | 14==track.nGridSection or 910 | 15==track.nGridSection): 911 | bDesired=True 912 | return bDesired 913 | 914 | # ComputeTrackQuality_GridSection_Dec 915 | # Input: 916 | # track: A given track to be evaluated 917 | # Output: 918 | # Whether or not the track has a statistically likely grid section for the month of December 919 | def ComputeTrackQuality_GridSection_Dec(track): 920 | 921 | # By default 922 | bDesired = False 923 | 924 | if (1==track.nGridSection or 925 | 4==track.nGridSection or 926 | 15==track.nGridSection): 927 | bDesired=True 928 | return bDesired 929 | 930 | # ComputeTrackQuality_GridSection_All 931 | # Input: 932 | # track: A given track to be evaluated 933 | # Output: 934 | # Whether or not the track has a statistically likely grid section for any month 935 | def ComputeTrackQuality_GridSection_All(track): 936 | 937 | # By default 938 | bDesired = False 939 | 940 | if (1 == track.nGridSection or 941 | 2 == track.nGridSection or 942 | 3 == track.nGridSection or 943 | 7 == track.nGridSection or 944 | 12 == track.nGridSection or 945 | 13 == track.nGridSection or 946 | 14 == track.nGridSection or 947 | 15 == track.nGridSection): 948 | bDesired=True 949 | return bDesired 950 | 951 | # ComputeTrackQuality_GridSection 952 | # Input: 953 | # track: A given track to be evaluated 954 | # Modifies: 955 | # track.bFlaggedGridSection 956 | def ComputeTrackQuality_GridSection(track): 957 | 958 | # By default 959 | bHasDesiredGridSection = False 960 | 961 | idx = track.nMonthIndex 962 | 963 | if (0 == idx): 964 | bHasDesiredGridSection = ComputeTrackQuality_GridSection_Jan(track) 965 | elif (1 == idx): 966 | bHasDesiredGridSection = ComputeTrackQuality_GridSection_Feb(track) 967 | elif (2 == idx): 968 | bHasDesiredGridSection = ComputeTrackQuality_GridSection_Mar(track) 969 | elif (3 == idx): 970 | bHasDesiredGridSection = ComputeTrackQuality_GridSection_Apr(track) 971 | elif (4 == idx): 972 | bHasDesiredGridSection = ComputeTrackQuality_GridSection_May(track) 973 | elif (5 == idx): 974 | bHasDesiredGridSection = ComputeTrackQuality_GridSection_Jun(track) 975 | elif (6 == idx): 976 | bHasDesiredGridSection = ComputeTrackQuality_GridSection_Jul(track) 977 | elif (7 == idx): 978 | bHasDesiredGridSection = ComputeTrackQuality_GridSection_Aug(track) 979 | elif (8 == idx): 980 | bHasDesiredGridSection = ComputeTrackQuality_GridSection_Sep(track) 981 | elif (9 == idx): 982 | bHasDesiredGridSection = ComputeTrackQuality_GridSection_Oct(track) 983 | elif (10 == idx): 984 | bHasDesiredGridSection = ComputeTrackQuality_GridSection_Nov(track) 985 | elif (11 == idx): 986 | bHasDesiredGridSection = ComputeTrackQuality_GridSection_Dec(track) 987 | 988 | if (bHasDesiredGridSection): 989 | return 990 | 991 | track.bFlaggedGridSection = True 992 | 993 | # ComputeTrackQuality_Velocity_Jan 994 | # Input: 995 | # track: A given track to be evaluated 996 | # Output: 997 | # Whether or not the given track has a statistically likely velocity for the month of January 998 | def ComputeTrackQuality_Velocity_Jan(track): 999 | 1000 | bDesired = True 1001 | 1002 | if (track.fVelocity < 34.7 or track.fVelocity > 97.7): 1003 | bDesired = False 1004 | 1005 | return bDesired 1006 | 1007 | # ComputeTrackQuality_Velocity_Feb 1008 | # Input: 1009 | # track: A given track to be evaluated 1010 | # Output: 1011 | # Whether or not the given track has a statistically likely velocity for the month of February 1012 | def ComputeTrackQuality_Velocity_Feb(track): 1013 | 1014 | bDesired = True 1015 | 1016 | if (track.fVelocity < 34.7 or track.fVelocity > 97.7): 1017 | bDesired = False 1018 | 1019 | return bDesired 1020 | 1021 | # ComputeTrackQuality_Velocity_Mar 1022 | # Input: 1023 | # track: A given track to be evaluated 1024 | # Output: 1025 | # Whether or not the given track has a statistically likely velocity for the month of March 1026 | def ComputeTrackQuality_Velocity_Mar(track): 1027 | 1028 | bDesired = True 1029 | 1030 | if (track.fVelocity < 68.9 or track.fVelocity > 97.7): 1031 | bDesired = False 1032 | 1033 | return bDesired 1034 | 1035 | # ComputeTrackQuality_Velocity_Apr 1036 | # Input: 1037 | # track: A given track to be evaluated 1038 | # Output: 1039 | # Whether or not the given track has a statistically likely velocity for the month of April 1040 | def ComputeTrackQuality_Velocity_Apr(track): 1041 | 1042 | bDesired = True 1043 | 1044 | if (track.fVelocity < 45 or track.fVelocity > 83): 1045 | bDesired = False 1046 | 1047 | return bDesired 1048 | 1049 | # ComputeTrackQuality_Velocity_May 1050 | # Input: 1051 | # track: A given track to be evaluated 1052 | # Output: 1053 | # Whether or not the given track has a statistically likely velocity for the month of May 1054 | def ComputeTrackQuality_Velocity_May(track): 1055 | 1056 | bDesired = True 1057 | 1058 | if (track.fVelocity < 37.6 or track.fVelocity > 65.1): 1059 | bDesired = False 1060 | 1061 | return bDesired 1062 | 1063 | # ComputeTrackQuality_Velocity_Jun 1064 | # Input: 1065 | # track: A given track to be evaluated 1066 | # Output: 1067 | # Whether or not the given track has a statistically likely velocity for the month of June 1068 | def ComputeTrackQuality_Velocity_Jun(track): 1069 | 1070 | bDesired = True 1071 | 1072 | if (track.fVelocity < 33 or track.fVelocity > 52): 1073 | bDesired = False 1074 | 1075 | return bDesired 1076 | 1077 | # ComputeTrackQuality_Velocity_Jul 1078 | # Input: 1079 | # track: A given track to be evaluated 1080 | # Output: 1081 | # Whether or not the given track has a statistically likely velocity for the month of July 1082 | def ComputeTrackQuality_Velocity_Jul(track): 1083 | 1084 | bDesired = True 1085 | 1086 | if (track.fVelocity < 40 or track.fVelocity > 90): 1087 | bDesired = False 1088 | 1089 | return bDesired 1090 | 1091 | # ComputeTrackQuality_Velocity_Aug 1092 | # Input: 1093 | # track: A given track to be evaluated 1094 | # Output: 1095 | # Whether or not the given track has a statistically likely velocity for the month of August 1096 | def ComputeTrackQuality_Velocity_Aug(track): 1097 | 1098 | bDesired = True 1099 | 1100 | if (track.fVelocity < 34.7 or track.fVelocity > 97.7): 1101 | bDesired = False 1102 | 1103 | return bDesired 1104 | 1105 | # ComputeTrackQuality_Velocity_Sep 1106 | # Input: 1107 | # track: A given track to be evaluated 1108 | # Output: 1109 | # Whether or not the given track has a statistically likely velocity for the month of September 1110 | def ComputeTrackQuality_Velocity_Sep(track): 1111 | 1112 | bDesired = True 1113 | 1114 | if (track.fVelocity < 77 or track.fVelocity > 100): 1115 | bDesired = False 1116 | 1117 | return bDesired 1118 | 1119 | # ComputeTrackQuality_Velocity_Oct 1120 | # Input: 1121 | # track: A given track to be evaluated 1122 | # Output: 1123 | # Whether or not the given track has a statistically likely velocity for the month of October 1124 | def ComputeTrackQuality_Velocity_Oct(track): 1125 | 1126 | bDesired = True 1127 | 1128 | if (track.fVelocity < 59 or track.fVelocity > 90): 1129 | bDesired = False 1130 | 1131 | return bDesired 1132 | 1133 | # ComputeTrackQuality_Velocity_Nov 1134 | # Input: 1135 | # track: A given track to be evaluated 1136 | # Output: 1137 | # Whether or not the given track has a statistically likely velocity for the month of November 1138 | def ComputeTrackQuality_Velocity_Nov(track): 1139 | 1140 | bDesired = True 1141 | 1142 | if (track.fVelocity < 43 or track.fVelocity > 75): 1143 | bDesired = False 1144 | 1145 | return bDesired 1146 | 1147 | # ComputeTrackQuality_Velocity_Dec 1148 | # Input: 1149 | # track: A given track to be evaluated 1150 | # Output: 1151 | # Whether or not the given track has a statistically likely velocity for the month of December 1152 | def ComputeTrackQuality_Velocity_Dec(track): 1153 | 1154 | bDesired = True 1155 | 1156 | if (track.fVelocity < 44 or track.fVelocity > 83): 1157 | bDesired = False 1158 | 1159 | return bDesired 1160 | 1161 | # ComputeTrackQuality_Velocity 1162 | # Input: 1163 | # track: A given track to be evaluated 1164 | # Modifies: 1165 | # track.bFlaggedVelocity 1166 | def ComputeTrackQuality_Velocity(track): 1167 | 1168 | # By default 1169 | bHasDesiredVelocity = False 1170 | 1171 | idx = track.nMonthIndex 1172 | 1173 | if (0 == idx): 1174 | bHasDesiredVelocity = ComputeTrackQuality_Velocity_Jan(track) 1175 | elif (1 == idx): 1176 | bHasDesiredVelocity = ComputeTrackQuality_Velocity_Feb(track) 1177 | elif (2 == idx): 1178 | bHasDesiredVelocity = ComputeTrackQuality_Velocity_Mar(track) 1179 | elif (3 == idx): 1180 | bHasDesiredVelocity = ComputeTrackQuality_Velocity_Apr(track) 1181 | elif (4 == idx): 1182 | bHasDesiredVelocity = ComputeTrackQuality_Velocity_May(track) 1183 | elif (5 == idx): 1184 | bHasDesiredVelocity = ComputeTrackQuality_Velocity_Jun(track) 1185 | elif (6 == idx): 1186 | bHasDesiredVelocity = ComputeTrackQuality_Velocity_Jul(track) 1187 | elif (7 == idx): 1188 | bHasDesiredVelocity = ComputeTrackQuality_Velocity_Aug(track) 1189 | elif (8 == idx): 1190 | bHasDesiredVelocity = ComputeTrackQuality_Velocity_Sep(track) 1191 | elif (9 == idx): 1192 | bHasDesiredVelocity = ComputeTrackQuality_Velocity_Oct(track) 1193 | elif (10 == idx): 1194 | bHasDesiredVelocity = ComputeTrackQuality_Velocity_Nov(track) 1195 | elif (11 == idx): 1196 | bHasDesiredVelocity = ComputeTrackQuality_Velocity_Dec(track) 1197 | 1198 | if (bHasDesiredVelocity): 1199 | return 1200 | 1201 | track.bFlaggedVelocity = True 1202 | 1203 | # ComputeTrackQuality_GridDirection 1204 | # Input: 1205 | # track: A given track to be evaluated 1206 | # Modifies: 1207 | # track.bFlaggedGridDirection 1208 | def ComputeTrackQuality_GridDirection(track): 1209 | 1210 | idx = track.nGridSection 1211 | bHasDesiredGridDirection = False 1212 | 1213 | slen = len(m_listGridDirections[idx]) 1214 | 1215 | for t in range(slen): 1216 | 1217 | dir1 = m_listGridDirections[idx][t][0] 1218 | dir2 = m_listGridDirections[idx][t][1] 1219 | 1220 | if (track.fDirection >= dir1 and track.fDirection <= dir2): 1221 | bHasDesiredGridDirection = True 1222 | break 1223 | 1224 | if (bHasDesiredGridDirection): 1225 | return 1226 | 1227 | track.bFlaggedGridDirection = True 1228 | -------------------------------------------------------------------------------- /tracker.py: -------------------------------------------------------------------------------- 1 | """ 2 | File: tracker.py 3 | Note: This code handles track creation and reduction 4 | Date: 2022-02-26 5 | Author: D. Parrott 6 | """ 7 | 8 | import sys 9 | import pickle 10 | import os 11 | import math 12 | import random 13 | from math import sqrt 14 | from skimage import data 15 | from skimage.feature import blob_dog, blob_log, blob_doh 16 | from skimage.color import rgb2gray 17 | import numpy as np 18 | from astropy.io import fits 19 | from astropy.time import Time 20 | from scipy.signal import medfilt2d 21 | from scipy import ndimage 22 | from numba import jit 23 | from numba import njit 24 | from numba import typed 25 | from numba import types 26 | import matplotlib.pyplot as plt 27 | import cv2 28 | import faulthandler; faulthandler.enable() 29 | import multiprocessing 30 | import warnings 31 | import track_quality 32 | import detect 33 | 34 | warnings.filterwarnings("ignore") 35 | 36 | 37 | 38 | 39 | # Define some constants. 40 | SOHO_MAX_TRACKS=250000 41 | SOHO_MAX_SEC_PER_PIXEL=140 42 | SOHO_MIN_SEC_PER_PIXEL=36 # 44 43 | SOHO_FIT_ORDER_CUTOFF=0.97 44 | SOHO_FIT_ORDER=1 45 | SOHO_NUM_TRACKS_OUTPUT=2 46 | SOHO_PCT_SAME_DETECTS=0.60 47 | THRESHOLD_GRID_SIZE=32 48 | R2D=(180 / 3.1415926535) 49 | 50 | 51 | # Class definitions 52 | class MyFITSImg: 53 | pass 54 | 55 | class MyTrack: 56 | pass 57 | 58 | class MyDetect: 59 | pass 60 | 61 | class MySeq: 62 | pass 63 | 64 | # compute_elapsed_time_in_sec2 65 | # Input: Two time objects 66 | # Output: Elapsed time, floating point, in seconds. 67 | def compute_elapsed_time_in_sec2(dtime_a, dtime_b): 68 | delta_time = (dtime_b - dtime_a) 69 | dt_float = delta_time.to_value('sec', subfmt='float') 70 | return dt_float 71 | 72 | # AddTrackVelocity 73 | # Input: 74 | # listPixelsPerHourX: A list of DateObs:PixelsPerHourX pairs 75 | # listPixelsPerHourY: A list of DateObs:PixelsPerHourY pairs 76 | # listX: A list of DateObs:X pairs 77 | # listY: A list of DateObs:Y pairs 78 | # fDateObs: The DateObs associated with the detection velocity to be added 79 | # fTimeElapsedInSec: The timespan, in seconds, associated with the deltaX and deltaY values 80 | # fDeltaX: The deltaX value for computation of the detection velocity 81 | # fDeltaY: The deltaY value for computation of the detection velocity 82 | # detect: The detection object containing the X,Y values 83 | # Output: 84 | # No object returned. 85 | # Modifies: 86 | # listPixelsPerHourX: Adds new DateObs:PixelsPerHourX pair. 87 | # listPixelsPerHourY: Adds new DateObs:PixelsPerHourY pair. 88 | # listX: Adds new DateObs:X pair 89 | # listY: Adds new DateObs:Y pair 90 | @njit 91 | def AddTrackVelocity(listPixelsPerHourX, listPixelsPerHourY, listX, listY, fDateObs, fTimeElapsedInSec, fDeltaX, fDeltaY, detect): 92 | 93 | fX = detect[0] 94 | fY = detect[1] 95 | 96 | if (abs(fTimeElapsedInSec) > 0.1): 97 | fElapsedTimeInHours = fTimeElapsedInSec / 3600.0 98 | 99 | fPixelsPerHourX = fDeltaX / fElapsedTimeInHours 100 | fPixelsPerHourY = fDeltaY / fElapsedTimeInHours 101 | else: 102 | fPixelsPerHourX = 0 103 | fPixelsPerHourY = 0 104 | 105 | listPixelsPerHourX.append(types.double(fDateObs)) 106 | listPixelsPerHourX.append(types.double(fPixelsPerHourX)) 107 | 108 | listPixelsPerHourY.append(types.double(fDateObs)) 109 | listPixelsPerHourY.append(types.double(fPixelsPerHourY)) 110 | 111 | listX.append(types.double(fDateObs)) 112 | listX.append(types.double(fX)) 113 | 114 | listY.append(types.double(fDateObs)) 115 | listY.append(types.double(fY)) 116 | 117 | # CreateTracks3 118 | # Input: 119 | # width: Image width in pixels 120 | # height: Image height in pixels 121 | # detect1: A starting detection, from the first image in the current image pair 122 | # map2: The detection map from the second image in the current image pair 123 | # fTimeElapsedInSec: The timespan between the first and second images in the current image pair 124 | # listTracks: A list of candidate tracks 125 | # fMinDistance: The minimum allowed distance, in pixels, that an object must travel given the current image pair 126 | # fMaxDistance: The maximum allowed distance, in pixels, that an object can travel given the current image pair 127 | # idx_img1: The index associated with the first image in the pair 128 | # idx_img2: The index associated with the second image in the pair 129 | # img_list: A list of images 130 | # grid_size: The size of the map grid cells, in pixels 131 | # Modifies: 132 | # listTracks: New candidate tracks are added 133 | def CreateTracks3(width, height, detect1, map2, fTimeElapsedInSec, listTracks, fMinDistance, fMaxDistance, idx_img1, idx_img2, img_list, grid_size): 134 | 135 | i = detect1[0] 136 | j = detect1[1] 137 | 138 | w = int(width / grid_size) + 1 139 | h = int(height / grid_size) + 1 140 | 141 | c = int(i / (grid_size)) 142 | d = int(j / (grid_size)) 143 | 144 | b=0 145 | while (b<3): 146 | a=0 147 | while (a<3): 148 | p = c - 1 + a 149 | q = d - 1 + b 150 | 151 | if (p<0 or p>=w or q<0 or q>=h): 152 | a = a + 1 153 | continue 154 | 155 | slen = len(map2[q][p]) 156 | 157 | for t in range(slen): 158 | detect2 = map2[q][p][t] 159 | 160 | fDeltaX = detect2[0] - i 161 | fDeltaY = detect2[1] - j 162 | 163 | fDistance = sqrt(fDeltaX * fDeltaX + fDeltaY * fDeltaY) 164 | 165 | if (fDistance < fMinDistance or 166 | fDistance > fMaxDistance): 167 | continue 168 | 169 | new_track = MyTrack() 170 | new_track.listDetectIdentifiers = typed.List.empty_list(types.int64) 171 | new_track.listPixelsPerHourX = typed.List.empty_list(types.double) 172 | new_track.listPixelsPerHourY = typed.List.empty_list(types.double) 173 | new_track.listX = typed.List.empty_list(types.double) 174 | new_track.listY = typed.List.empty_list(types.double) 175 | new_track.vectorPositions = [] 176 | new_track.vecCoeffX = [] 177 | new_track.vecCoeffY = [] 178 | fDateObs = img_list[idx_img2].fDateObs 179 | AddTrackVelocity(new_track.listPixelsPerHourX, 180 | new_track.listPixelsPerHourY, 181 | new_track.listX, 182 | new_track.listY, 183 | fDateObs, 184 | fTimeElapsedInSec, 185 | fDeltaX, 186 | fDeltaY, 187 | detect2) 188 | 189 | 190 | nDetectID1 = (idx_img1 * width * height) + (i + width * j) 191 | nDetectID2 = (idx_img2 * width * height) + (detect2[0] + (width * detect2[1])) 192 | new_track.fDateObsFirstConfirmedDetection = -1 193 | new_track.fDateObsLastConfirmedDetection = -1 194 | new_track.listDetectIdentifiers.append(nDetectID2) 195 | new_track.listDetectCounts = typed.List.empty_list(types.int64) 196 | new_track.fSourceImgDateObs = img_list[idx_img1].fDateObs 197 | new_track.first_confirmed_idx_img = 99999 198 | new_track.first_confirmed_x = -1 199 | new_track.first_confirmed_y = -1 200 | new_track.last_confirmed_idx_img = -1 201 | new_track.last_confirmed_x = -1 202 | new_track.last_confirmed_y = -1 203 | new_track.source_img_idx = idx_img1 204 | new_track.source_img_x = i 205 | new_track.source_img_y = j 206 | new_track.bMarkedForDeletion = False 207 | new_track.nNumCombinedTracks = 1 208 | new_track.nNumDetectsAt2 = 0 209 | new_track.nNumDetectsAt3 = 0 210 | new_track.nNumDetectsAt4 = 0 211 | new_track.nNumDetectsAt5 = 0 212 | new_track.nNumDetectsAt6 = 0 213 | new_track.nNumDetectsAt7 = 0 214 | new_track.nNumDetectsAt8 = 0 215 | new_track.nNumGT0 = 0 216 | new_track.nNumGT1 = 0 217 | new_track.nNumGT2 = 0 218 | new_track.nNumGT3 = 0 219 | new_track.nNumGT5 = 0 220 | new_track.nNumGT7 = 0 221 | new_track.fSunMotionVector=-1 222 | new_track.fDirection=-1 223 | new_track.fGlobalQuality = 0 224 | new_track.fQuality = 0 225 | new_track.fFit_R2 = 0 226 | new_track.median_delta_x = 0 227 | new_track.median_delta_y = 0 228 | new_track.median_x = 0 229 | new_track.median_y = 0 230 | new_track.nMonthIndex = -1 231 | new_track.fVelocity = 0 232 | new_track.bFlaggedSunMotionVector = False 233 | new_track.bFlaggedDirection = False 234 | new_track.bFlaggedGridSection = False 235 | new_track.bFlaggedVelocity = False 236 | new_track.bFlaggedGridDirection = False 237 | listTracks.append(new_track) 238 | 239 | a = a + 1 240 | b = b + 1 241 | 242 | # CreateTracks2: 243 | # Input: 244 | # map1: The detection map for the first image in the current image pair 245 | # map2: The detection map for the second image in the current image pair 246 | # width: The image width, in pixels 247 | # height: The image height, in pixels 248 | # fTimeElapsedInSec: The timespan between the two images, in seconds. 249 | # listTracks: The current list of track candidates 250 | # idx_img1: The index associated with the first image in the current image pair 251 | # idx_img2: The index associated with the second image in the current image pair 252 | # img_list: The list of images for the current image sequence 253 | # grid_size: The size of the map grid cells, in pixels 254 | # Modifies: 255 | # listTracks: New candidate tracks are added 256 | def CreateTracks2(map1, map2, width, height, fTimeElapsedInSec, listTracks, idx_img1, idx_img2, img_list, grid_size): 257 | 258 | # Compute min and max distance for the given elapsed time 259 | fMinDistance = fTimeElapsedInSec / (SOHO_MAX_SEC_PER_PIXEL) # E.g., 140sec/pixel => 5.1"/min at 11.9"/px resolution 260 | fMaxDistance = fTimeElapsedInSec / (SOHO_MIN_SEC_PER_PIXEL) # E.g., 44sec/pixel => 16"/min at 11.9"/px resolution 261 | 262 | w = int(width / grid_size) + 1 263 | h = int(height / grid_size) + 1 264 | 265 | j = 0 266 | while (jSOHO_MAX_TRACKS): 305 | break 306 | 307 | t = t + 1 308 | 309 | return listTracks 310 | 311 | # GetDetection2 312 | # Input: 313 | # listValues: A list containing DateObs:Value pairs 314 | # bPrevious: When set to TRUE, initially only looks for detections that occur prior to the given timestamp 315 | # fDateObs_Current: The timestamp associated with the current detection 316 | # Output: 317 | # fBestDateObs: The DateObs associated with the closest matching detection 318 | # fBestValue: The value associated with the closest matching detection 319 | @njit 320 | def GetDetection2(listValues, bPrevious, fDateObs_Current): 321 | 322 | fBestDateObs=0 323 | fBestValue=0 324 | fDateObs=0 325 | fValue=0 326 | 327 | fMinDeltaTime = 1e9 328 | bFound = False 329 | 330 | slen = len(listValues) 331 | 332 | t=0 333 | while (t= fDateObs_Current - 0.1): 339 | t += 2 340 | continue 341 | 342 | fDeltaTime = abs(fDateObs_Current - fDateObs) 343 | 344 | if (fDeltaTime < fMinDeltaTime): 345 | fMinDeltaTime = fDeltaTime 346 | fBestDateObs = fDateObs 347 | fBestValue = fValue 348 | bFound=True 349 | 350 | t += 2 351 | 352 | if (bFound): 353 | return (fBestDateObs, fBestValue) 354 | 355 | # Could not find a data point in the past -- use the nearest data point 356 | fMinDeltaTime = 1e9 357 | 358 | t = 0 359 | while (t fDateObsLastConfirmedDetection): 434 | fDateObsLastConfirmedDetection = fDateObs 435 | 436 | return (fDateObsFirstConfirmedDetection, fDateObsLastConfirmedDetection) 437 | 438 | # TrackPresentOnImg 439 | # Summary: 440 | # Identifies whether or not a track is present on a given image. 441 | # If so, the associated detection on that image is added to the track. 442 | # The input/output fields would normally be contained within an object, 443 | # however, Numba requires primitive data types in order to work correctly. 444 | # Input: 445 | # source_img_idx: The index of the image from which the track was initially created 446 | # listPixelsPerHourX: A list of detection X-movements, in pixels/hour 447 | # listPixelsPerHourY: A list of detection Y-movements, in pixels/hour 448 | # listX: A list of detection X coordinates 449 | # listY: A list of detection Y coordinates 450 | # listDetectIdentifiers: A list of detection identifiers 451 | # fDateObsFirstConfirmedDetection: The DateObs of the first confirmed detection 452 | # fDateObsLastConfirmedDetection: The DateObs of the last confirmed detection 453 | # nNumDetectsAt2: The current number of detections found at 2 pixels within expected position 454 | # nNumDetectsAt3: The current number of detections found at 3 pixels within expected position 455 | # nNumDetectsAt4: The current number of detections found at 4 pixels within expected position 456 | # nNumDetectsAt5: The current number of detections found at 5 pixels within expected position 457 | # nNumDetectsAt6: The current number of detections found at 6 pixels within expected position 458 | # nNumDetectsAt7: The current number of detections found at 7 pixels within expected position 459 | # nNumDetectsAt8: The current number of detections found at 8 pixels within expected position 460 | # listDetectCounts: The list of detection cluster sizes, in pixel counts 461 | # num_img: The number of images in the current sequence 462 | # idx_img: The index of the current image being evaluated 463 | # width: Image width, in pixels 464 | # height: Image height, in pixels 465 | # listDateObs: The list of DateObs associated with the current image sequence 466 | # listImgDetections: The list of detection images 467 | # update_counts: Whether or not to update the detection count statistics 468 | # Output: 469 | # source_img_idx: The index of the image from which the track was initially created 470 | # listPixelsPerHourX: A list of detection X-movements, in pixels/hour 471 | # listPixelsPerHourY: A list of detection Y-movements, in pixels/hour 472 | # listX: A list of detection X coordinates 473 | # listY: A list of detection Y coordinates 474 | # listDetectIdentifiers: A list of detection identifiers 475 | # fDateObsFirstConfirmedDetection: The DateObs of the first confirmed detection 476 | # fDateObsLastConfirmedDetection: The DateObs of the last confirmed detection 477 | # first_confirmed_idx_img: The index of the first confirmed detection image 478 | # last_confirmed_idx_img: The index of the last confirmed detection image 479 | # nNumDetectsAt2: The current number of detections found at 2 pixels within expected position 480 | # nNumDetectsAt3: The current number of detections found at 3 pixels within expected position 481 | # nNumDetectsAt4: The current number of detections found at 4 pixels within expected position 482 | # nNumDetectsAt5: The current number of detections found at 5 pixels within expected position 483 | # nNumDetectsAt6: The current number of detections found at 6 pixels within expected position 484 | # nNumDetectsAt7: The current number of detections found at 7 pixels within expected position 485 | # nNumDetectsAt8: The current number of detections found at 8 pixels within expected position 486 | # listDetectCounts: The list of detection cluster sizes, in pixel counts 487 | @njit 488 | def TrackPresentOnImg(source_img_idx, 489 | listPixelsPerHourX, 490 | listPixelsPerHourY, 491 | listX, 492 | listY, 493 | listDetectIdentifiers, 494 | fDateObsFirstConfirmedDetection, 495 | fDateObsLastConfirmedDetection, 496 | first_confirmed_idx_img, 497 | last_confirmed_idx_img, 498 | nNumDetectsAt2, 499 | nNumDetectsAt3, 500 | nNumDetectsAt4, 501 | nNumDetectsAt5, 502 | nNumDetectsAt6, 503 | nNumDetectsAt7, 504 | nNumDetectsAt8, 505 | listDetectCounts, 506 | num_img, 507 | idx_img, 508 | width, 509 | height, 510 | listDateObs, 511 | listImgDetections, 512 | update_counts): 513 | 514 | fDateObs_Current = listDateObs[idx_img] 515 | 516 | # Use the motion of the previous detection to predict where the current detection should be located 517 | res = GetDetection(listPixelsPerHourX, listPixelsPerHourY, listX, listY, True, fDateObs_Current) 518 | 519 | fDateObs_Adjacent = res[0] 520 | fPixelsPerHourX = res[1] 521 | fPixelsPerHourY = res[2] 522 | fX = res[3] 523 | fY = res[4] 524 | 525 | fElapsedTimeInSec = fDateObs_Current - fDateObs_Adjacent 526 | fElapsedTimeInHours = fElapsedTimeInSec / 3600.0 527 | 528 | fExpectedX = fX + (fElapsedTimeInHours * fPixelsPerHourX) 529 | fExpectedY = fY + (fElapsedTimeInHours * fPixelsPerHourY) 530 | 531 | i = int(fExpectedX + 0.5) 532 | j = int(fExpectedY + 0.5) 533 | 534 | fMaxDistance = 5.0 # 10.0 535 | r = 10 536 | len = (2 * r) + 1 537 | 538 | fClosestDistance = 1e9 539 | found_i = -1 540 | found_j = -1 541 | 542 | b=0 543 | while (b=width or q<0 or q>=height): 552 | a=a+1 553 | continue 554 | 555 | v1 = listImgDetections[idx_img][q][p] 556 | 557 | if (v1 <= 0): 558 | a=a+1 559 | continue 560 | 561 | fDeltaX = p - i 562 | fDeltaY = q - j 563 | 564 | fDistance = sqrt(fDeltaX * fDeltaX + fDeltaY * fDeltaY) 565 | 566 | if (fDistance > fMaxDistance): 567 | a=a+1 568 | continue 569 | 570 | if (fDistance < fClosestDistance): 571 | fClosestDistance = fDistance 572 | found_v1 = v1 573 | found_i = p 574 | found_j = q 575 | 576 | a=a+1 577 | 578 | b = b+1 579 | 580 | if (found_i < 0 or 581 | found_j < 0): 582 | return (source_img_idx, 583 | listPixelsPerHourX, 584 | listPixelsPerHourY, 585 | listX, 586 | listY, 587 | listDetectIdentifiers, 588 | fDateObsFirstConfirmedDetection, 589 | fDateObsLastConfirmedDetection, 590 | first_confirmed_idx_img, 591 | last_confirmed_idx_img, 592 | nNumDetectsAt2, 593 | nNumDetectsAt3, 594 | nNumDetectsAt4, 595 | nNumDetectsAt5, 596 | nNumDetectsAt6, 597 | nNumDetectsAt7, 598 | nNumDetectsAt8, 599 | listDetectCounts) 600 | 601 | if (update_counts): 602 | 603 | if (idx_img < first_confirmed_idx_img): 604 | first_confirmed_idx_img = idx_img 605 | 606 | if (idx_img > last_confirmed_idx_img): 607 | last_confirmed_idx_img = idx_img 608 | 609 | if (fClosestDistance < 2.0): 610 | nNumDetectsAt2 = nNumDetectsAt2 + 1 611 | elif (fClosestDistance < 3.0): 612 | nNumDetectsAt3 = nNumDetectsAt3 + 1 613 | elif (fClosestDistance < 4.0): 614 | nNumDetectsAt4 = nNumDetectsAt4 + 1 615 | elif (fClosestDistance < 5.0): 616 | nNumDetectsAt5 = nNumDetectsAt5 + 1 617 | elif (fClosestDistance < 6.0): 618 | nNumDetectsAt6 = nNumDetectsAt6 + 1 619 | elif (fClosestDistance < 7.0): 620 | nNumDetectsAt7 = nNumDetectsAt7 + 1 621 | elif (fClosestDistance < 8.0): 622 | nNumDetectsAt8 = nNumDetectsAt8 + 1 623 | else: 624 | 625 | nDetectID = (idx_img * width * height) + (found_i + width * found_j) 626 | bResult = AddDetectID(listDetectIdentifiers, nDetectID) 627 | 628 | if (bResult): 629 | # Detection not already in list -- incorporate the detection into the track velocity 630 | detect = [found_i, found_j] 631 | AddTrackVelocity(listPixelsPerHourX, 632 | listPixelsPerHourY, 633 | listX, 634 | listY, 635 | fDateObs_Current, 636 | fElapsedTimeInSec, 637 | found_i - fX, 638 | found_j - fY, 639 | detect) 640 | 641 | res = UpdateTrackDetectionIntervals(fDateObsFirstConfirmedDetection, fDateObsLastConfirmedDetection, fDateObs_Current) 642 | fDateObsFirstConfirmedDetection = res[0] 643 | fDateObsLastConfirmedDetection = res[1] 644 | 645 | res = UpdateTrackDetectionIntervals(fDateObsFirstConfirmedDetection, fDateObsLastConfirmedDetection, fDateObs_Adjacent) 646 | fDateObsFirstConfirmedDetection = res[0] 647 | fDateObsLastConfirmedDetection = res[1] 648 | 649 | listDetectCounts.append(types.int64(found_v1)) 650 | 651 | return (source_img_idx, 652 | listPixelsPerHourX, 653 | listPixelsPerHourY, 654 | listX, 655 | listY, 656 | listDetectIdentifiers, 657 | fDateObsFirstConfirmedDetection, 658 | fDateObsLastConfirmedDetection, 659 | first_confirmed_idx_img, 660 | last_confirmed_idx_img, 661 | nNumDetectsAt2, 662 | nNumDetectsAt3, 663 | nNumDetectsAt4, 664 | nNumDetectsAt5, 665 | nNumDetectsAt6, 666 | nNumDetectsAt7, 667 | nNumDetectsAt8, 668 | listDetectCounts) 669 | 670 | # CollectDetections 671 | # Summary: 672 | # Collects detects from each image that can be associated with the current track 673 | # The input/output fields would normally be contained within an object, 674 | # however, Numba requires primitive data types in order to work correctly. 675 | # Input: 676 | # source_img_idx: The index of the image from which the track was initially created 677 | # listPixelsPerHourX: A list of detection X-movements, in pixels/hour 678 | # listPixelsPerHourY: A list of detection Y-movements, in pixels/hour 679 | # listX: A list of detection X coordinates 680 | # listY: A list of detection Y coordinates 681 | # listDetectIdentifiers: A list of detection identifiers 682 | # fDateObsFirstConfirmedDetection: The DateObs of the first confirmed detection 683 | # fDateObsLastConfirmedDetection: The DateObs of the last confirmed detection 684 | # first_confirmed_idx_img: The index of the first confirmed detection image 685 | # last_confirmed_idx_img: The index of the last confirmed detection image 686 | # nNumDetectsAt2: The current number of detections found at 2 pixels within expected position 687 | # nNumDetectsAt3: The current number of detections found at 3 pixels within expected position 688 | # nNumDetectsAt4: The current number of detections found at 4 pixels within expected position 689 | # nNumDetectsAt5: The current number of detections found at 5 pixels within expected position 690 | # nNumDetectsAt6: The current number of detections found at 6 pixels within expected position 691 | # nNumDetectsAt7: The current number of detections found at 7 pixels within expected position 692 | # nNumDetectsAt8: The current number of detections found at 8 pixels within expected position 693 | # listDetectCounts: A list of detection cluster sizes, in pixels 694 | # num_img: The number of images in the current sequence 695 | # width: Image width, in pixels 696 | # height: Image height, in pixels 697 | # listDateObs: List of DateObs associated with the current image sequence 698 | # listImgDetections: List of detection images 699 | # update_counts: Whether or not to update the detection count statistics 700 | # Output: 701 | # source_img_idx: The index of the image from which the track was initially created 702 | # fDateObsFirstConfirmedDetection: The DateObs of the first confirmed detection 703 | # fDateObsLastConfirmedDetection: The DateObs of the last confirmed detection 704 | # first_confirmed_idx_img: The index of the first confirmed detection image 705 | # last_confirmed_idx_img: The index of the last confirmed detection image 706 | # nNumDetectsAt2: The current number of detections found at 2 pixels within expected position 707 | # nNumDetectsAt3: The current number of detections found at 3 pixels within expected position 708 | # nNumDetectsAt4: The current number of detections found at 4 pixels within expected position 709 | # nNumDetectsAt5: The current number of detections found at 5 pixels within expected position 710 | # nNumDetectsAt6: The current number of detections found at 6 pixels within expected position 711 | # nNumDetectsAt7: The current number of detections found at 7 pixels within expected position 712 | # nNumDetectsAt8: The current number of detections found at 8 pixels within expected position 713 | @njit 714 | def CollectDetections(source_img_idx, 715 | listPixelsPerHourX, 716 | listPixelsPerHourY, 717 | listX, 718 | listY, 719 | listDetectIdentifiers, 720 | fDateObsFirstConfirmedDetection, 721 | fDateObsLastConfirmedDetection, 722 | first_confirmed_idx_img, 723 | last_confirmed_idx_img, 724 | nNumDetectsAt2, 725 | nNumDetectsAt3, 726 | nNumDetectsAt4, 727 | nNumDetectsAt5, 728 | nNumDetectsAt6, 729 | nNumDetectsAt7, 730 | nNumDetectsAt8, 731 | listDetectCounts, 732 | num_img, 733 | width, 734 | height, 735 | listDateObs, 736 | listImgDetections, 737 | update_counts): 738 | 739 | cur_idx = source_img_idx 740 | 741 | while (True): 742 | 743 | res = TrackPresentOnImg(source_img_idx, 744 | listPixelsPerHourX, 745 | listPixelsPerHourY, 746 | listX, 747 | listY, 748 | listDetectIdentifiers, 749 | fDateObsFirstConfirmedDetection, 750 | fDateObsLastConfirmedDetection, 751 | first_confirmed_idx_img, 752 | last_confirmed_idx_img, 753 | nNumDetectsAt2, 754 | nNumDetectsAt3, 755 | nNumDetectsAt4, 756 | nNumDetectsAt5, 757 | nNumDetectsAt6, 758 | nNumDetectsAt7, 759 | nNumDetectsAt8, 760 | listDetectCounts, 761 | num_img, 762 | cur_idx, 763 | width, 764 | height, 765 | listDateObs, 766 | listImgDetections, 767 | update_counts) 768 | 769 | source_img_idx = res[0] 770 | listPixelsPerHourX = res[1] 771 | listPixelsPerHourY = res[2] 772 | listX = res[3] 773 | listY = res[4] 774 | listDetectIdentifiers = res[5] 775 | fDateObsFirstConfirmedDetection = res[6] 776 | fDateObsLastConfirmedDetection = res[7] 777 | first_confirmed_idx_img = res[8] 778 | last_confirmed_idx_img = res[9] 779 | nNumDetectsAt2 = res[10] 780 | nNumDetectsAt3 = res[11] 781 | nNumDetectsAt4 = res[12] 782 | nNumDetectsAt5 = res[13] 783 | nNumDetectsAt6 = res[14] 784 | nNumDetectsAt7 = res[15] 785 | nNumDetectsAt8 = res[16] 786 | listDetectCounts = res[17] 787 | 788 | cur_idx = (cur_idx+1) % num_img 789 | 790 | if (cur_idx == source_img_idx): 791 | break 792 | 793 | return (source_img_idx, 794 | fDateObsFirstConfirmedDetection, 795 | fDateObsLastConfirmedDetection, 796 | first_confirmed_idx_img, 797 | last_confirmed_idx_img, 798 | nNumDetectsAt2, 799 | nNumDetectsAt3, 800 | nNumDetectsAt4, 801 | nNumDetectsAt5, 802 | nNumDetectsAt6, 803 | nNumDetectsAt7, 804 | nNumDetectsAt8) 805 | 806 | # ComputeDetectionVelocityInOrder 807 | # Summary: 808 | # Updates the detection velocities so that they are computed in order of image index 809 | # Input: 810 | # listDetectionIdentifiers: A list of detection identifiers 811 | # listDateObs: The list of DateObs associated with the current image sequence 812 | # Output: 813 | # listPixelsPerHourX: A list of DateObs:PixelsPerHourX pairs 814 | # listPixelsPerHourY: A list of DateObs:PixelsPerHourY pairs 815 | # listX: A list of DateObs:X pairs 816 | # listY: A list of DateObs:Y pairs 817 | @njit 818 | def ComputeDetectionVelocityInOrder(listDetectIdentifiers, listDateObs): 819 | 820 | listDetectIdentifiers.sort() 821 | 822 | listPixelsPerHourX = typed.List.empty_list(types.double) 823 | listPixelsPerHourY = typed.List.empty_list(types.double) 824 | listX = typed.List.empty_list(types.double) 825 | listY = typed.List.empty_list(types.double) 826 | 827 | slen = len(listDetectIdentifiers) 828 | 829 | prev_idx_img = 0 830 | prev_x = 0 831 | prev_y = 0 832 | 833 | for t in range(slen): 834 | 835 | nDetectID = listDetectIdentifiers[t] 836 | idx_img = int((nDetectID) / (1024*1024)) 837 | x = nDetectID % 1024 838 | y = int(nDetectID / 1024) % 1024 839 | 840 | if (0==t): 841 | prev_idx_img = idx_img 842 | prev_x = x 843 | prev_y = y 844 | continue 845 | 846 | fDateObsPrev = listDateObs[prev_idx_img] 847 | fPrevX = prev_x 848 | fPrevY = prev_y 849 | 850 | fDateObsCur = listDateObs[idx_img] 851 | fCurX = x 852 | fCurY = y 853 | 854 | fTimeElapsedInSec = fDateObsCur - fDateObsPrev 855 | fDeltaX = fCurX - fPrevX 856 | fDeltaY = fCurY - fPrevY 857 | 858 | detect = [fCurX, fCurY] 859 | AddTrackVelocity(listPixelsPerHourX, 860 | listPixelsPerHourY, 861 | listX, 862 | listY, 863 | fDateObsCur, 864 | fTimeElapsedInSec, 865 | fDeltaX, 866 | fDeltaY, 867 | detect) 868 | 869 | prev_idx_img = idx_img 870 | prev_x = x 871 | prev_y = y 872 | 873 | return (listPixelsPerHourX, listPixelsPerHourY, listX, listY) 874 | 875 | # ComputeTrackMotionCoefficients 876 | # Summary: 877 | # This function was used to compute a 1st or 2nd order fit for a given track 878 | # No longer used, since such fitting allowed more false tracks than the current 879 | # approach of enforcing proximity limits on a set of detections. 880 | # Input: 881 | # track: A given track to evaluate 882 | # img_list: The list of images for the current image sequence 883 | # debug: A flag used for debugging purposes 884 | # Modifies: 885 | # Track vector coefficients X and Y 886 | def ComputeTrackMotionCoefficients(track, img_list, debug): 887 | 888 | vecTime = [] 889 | vecX = [] 890 | vecY = [] 891 | 892 | slen = len(track.listDetectIdentifiers) 893 | for t in range(slen): 894 | 895 | nDetectID = track.listDetectIdentifiers[t] 896 | idx_img = int((nDetectID) / (1024*1024)) 897 | fDateObs = img_list[idx_img].fDateObs 898 | x = nDetectID % 1024 899 | y = int(nDetectID / 1024) % 1024 900 | 901 | vecTime.append(fDateObs) 902 | vecX.append(x) 903 | vecY.append(y) 904 | 905 | if (debug): 906 | slen = len(vecX) 907 | for t in range(slen): 908 | print(" ("+repr(vecTime[t])+", " +repr(vecX[t])+", "+repr(vecY[t])+")") 909 | return 910 | 911 | track.vecCoeffX.clear() 912 | track.vecCoeffY.clear() 913 | 914 | track.vecCoeffX = np.polyfit(vecTime, vecX, SOHO_FIT_ORDER) 915 | track.vecCoeffY = np.polyfit(vecTime, vecY, SOHO_FIT_ORDER) 916 | 917 | # GetXYForTrackImgIndex 918 | # Input: 919 | # track: A given track to be evaluated 920 | # idx_img: The index of the image on which to return the track position 921 | # Output: 922 | # Found: Whether or not the track was found on the given image 923 | # x: The X coordinate of the track on the given image 924 | # y: The Y coordinate of the track on the given image 925 | def GetXYForTrackImgIndex(track, idx_img): 926 | slen = len(track.listDetectIdentifiers) 927 | 928 | for t in range(slen): 929 | nDetectID = track.listDetectIdentifiers[t] 930 | nDetectImgIdx = int((nDetectID)/(1024*1024)) 931 | 932 | if (nDetectImgIdx != idx_img): 933 | continue 934 | 935 | x = nDetectID % 1024 936 | y = int(nDetectID / 1024) % 1024 937 | 938 | return (True, x, y) 939 | 940 | return (False, 0, 0) 941 | 942 | # ComputeTrackPosition 943 | # Input: 944 | # track: A given track to evaluate 945 | # idx_img: The image on which the track position is to be computed 946 | # fDateObs: The DateObs associated with the image on which the track position is to be computed 947 | # Output: 948 | # X: The computed X position of the track 949 | # Y: The computed Y position of the track 950 | def ComputeTrackPosition(track, idx_img, fDateObs): 951 | 952 | if (idx_img>=0): 953 | res = GetXYForTrackImgIndex(track, idx_img) 954 | else: 955 | res = (False, 0, 0) 956 | 957 | if (True == res[0]): 958 | # Return the XY of the found detection 959 | return (res[1], res[2]) 960 | 961 | # Detection not found -- compute position 962 | res = GetDetection(track.listPixelsPerHourX, 963 | track.listPixelsPerHourY, 964 | track.listX, 965 | track.listY, 966 | False, 967 | fDateObs) 968 | 969 | fDateObs_Source = res[0] 970 | fPixelsPerHourX = res[1] 971 | fPixelsPerHourY = res[2] 972 | fX = res[3] 973 | fY = res[4] 974 | 975 | fElapsedTimeInHours = fDateObs - fDateObs_Source 976 | fElapsedTimeInHours /= 3600.0 977 | 978 | fComputedX = fX + (fElapsedTimeInHours * fPixelsPerHourX) 979 | fComputedY = fY + (fElapsedTimeInHours * fPixelsPerHourY) 980 | 981 | return (fComputedX, fComputedY) 982 | 983 | # ComputeTrackPositions 984 | # Input: 985 | # track: A given track to be evaluated 986 | # num_img: The number of images in the current sequence 987 | # img_list: The list of images in the current sequence 988 | # Modifies: 989 | # track.vectorPositions: The track positions across the image sequence 990 | def ComputeTrackPositions(track, num_img, img_list): 991 | 992 | xy = ComputeTrackPosition(track, track.first_confirmed_idx_img, track.fDateObsFirstConfirmedDetection) 993 | track.first_confirmed_x = int(xy[0] + 0.5) 994 | track.first_confirmed_y = int(xy[1] + 0.5) 995 | 996 | xy = ComputeTrackPosition(track, track.last_confirmed_idx_img, track.fDateObsLastConfirmedDetection) 997 | track.last_confirmed_x = int(xy[0] + 0.5) 998 | track.last_confirmed_y = int(xy[1] + 0.5) 999 | 1000 | track.vectorPositions.clear() 1001 | 1002 | for t in range(num_img): 1003 | xy = ComputeTrackPosition(track, t, img_list[t].fDateObs) 1004 | track.vectorPositions.append((int(xy[0]+0.5),int(xy[1]+0.5))) 1005 | 1006 | # ComputeDistanceToSun 1007 | # Input: 1008 | # fX: A given X coordinate 1009 | # fY: A given Y coordinate 1010 | # Output: 1011 | # fDistance: The distance to the center of the image from the given X,Y location 1012 | def ComputeDistanceToSun(fX, fY): 1013 | 1014 | fDeltaX = fX - 512 1015 | fDeltaY = fY - 512 1016 | 1017 | fDistance = sqrt(fDeltaX * fDeltaX + fDeltaY * fDeltaY) 1018 | 1019 | return fDistance 1020 | 1021 | # ComputeTrackSunMotionVector 1022 | # Input: 1023 | # track: A given track to be evaluated 1024 | # Modifies: 1025 | # track.fSunMotionVector: The motion vector of the track in the Sun direction. 1026 | def ComputeTrackSunMotionVector(track): 1027 | 1028 | # Compute the motion vector using the first and last confirmed detections 1029 | fX1 = track.first_confirmed_x 1030 | fY1 = track.first_confirmed_y 1031 | fX2 = track.last_confirmed_x 1032 | fY2 = track.last_confirmed_y 1033 | 1034 | fDeltaX = fX2 - fX1 1035 | fDeltaY = fY2 - fY1 1036 | fDistance = sqrt(fDeltaX * fDeltaX + fDeltaY * fDeltaY) 1037 | 1038 | fSunDistance1 = ComputeDistanceToSun(fX1, fY1) 1039 | fSunDistance2 = ComputeDistanceToSun(fX2, fY2) 1040 | 1041 | fDeltaSunDistance = fSunDistance2 - fSunDistance1 1042 | 1043 | if (abs(fDistance) > 0.01): 1044 | track.fSunMotionVector = fDeltaSunDistance / fDistance 1045 | else: 1046 | track.fSunMotionVector = 0 1047 | 1048 | # def GetAverageTrackVelocity(track, num_img, img_list): 1049 | # 1050 | # fDateObs_First = img_list[0].fDateObs 1051 | # fDateObs_Last = img_list[num_img - 1].fDateObs 1052 | # 1053 | # res = GetDetection(track, False, fDateObs_First) 1054 | # fDateObs_Found = res[0] 1055 | # fPixelsPerHourX_1 = res[1] 1056 | # fPixelsPerHourY_1 = res[2] 1057 | # fX_1 = res[3] 1058 | # fY_1 = res[4] 1059 | # 1060 | # res = GetDetection(track, False, fDateObs_Last) 1061 | # fDateObs_Found = res[0] 1062 | # fPixelsPerHourX_2 = res[1] 1063 | # fPixelsPerHourY_2 = res[2] 1064 | # fX_2 = res[3] 1065 | # fY_2 = res[4] 1066 | # 1067 | # fPixelsPerHourX = 0.5 * (fPixelsPerHourX_1 + fPixelsPerHourX_2) 1068 | # fPixelsPerHourY = 0.5 * (fPixelsPerHourY_1 + fPixelsPerHourY_2) 1069 | # 1070 | # return (fPixelsPerHourX, fPixelsPerHourY) 1071 | 1072 | # GetAverageTrackVelocity2 1073 | # Input: 1074 | # track: A given track to be evaluated 1075 | # num_img: The number of images in the current sequence 1076 | # img_list: The list of images in the current sequence 1077 | # Output: 1078 | # fPixelsPerHourX: The X-motion of the track, in pixels/hour 1079 | # fPixelsPerHourY: The Y-motion of the track, in pixels/hour 1080 | def GetAverageTrackVelocity2(track, num_img, img_list): 1081 | 1082 | fTimeElapsedInSec = track.fDateObsLastConfirmedDetection - track.fDateObsFirstConfirmedDetection 1083 | 1084 | if (abs(fTimeElapsedInSec) > 0.1): 1085 | fTimeElapsedInHours = fTimeElapsedInSec / 3600.0 1086 | fDeltaX = track.last_confirmed_x - track.first_confirmed_x 1087 | fDeltaY = track.last_confirmed_y - track.first_confirmed_y 1088 | 1089 | fPixelsPerHourX = fDeltaX / (fTimeElapsedInHours) 1090 | fPixelsPerHourY = fDeltaY / (fTimeElapsedInHours) 1091 | else: 1092 | fPixelsPerHourX = 0 1093 | fPixelsPerHourY = 0 1094 | 1095 | return (fPixelsPerHourX, fPixelsPerHourY) 1096 | 1097 | # ComputeTrackDirection 1098 | # Input: 1099 | # track: A given track to be evaluated 1100 | # num_img: The number of images in the current sequence 1101 | # img_list: The list of images in the current sequence 1102 | # Modifies: 1103 | # track.fDirection: The computed direction of the track in the image 1104 | def ComputeTrackDirection(track, num_img, img_list): 1105 | 1106 | vel = GetAverageTrackVelocity2(track, num_img, img_list) 1107 | track.fDirection = R2D * math.atan2(vel[1], vel[0]) 1108 | 1109 | # ComputeTrackGridSection 1110 | # Input: 1111 | # track: A given track to be evaluated 1112 | # Modifies: 1113 | # track.nGridSection: The location of the track in the image grid (integer in [0,15]) 1114 | def ComputeTrackGridSection(track): 1115 | 1116 | # Compute the location of the object at midpoint 1117 | fComputedX = 0.5 * (track.first_confirmed_x + track.last_confirmed_x) 1118 | fComputedY = 0.5 * (track.first_confirmed_y + track.last_confirmed_y) 1119 | 1120 | fX = fComputedX / 1024.0 1121 | fY = fComputedY / 1024.0 1122 | 1123 | nX = int(4.0 * fX + 0.5) 1124 | nY = int(4.0 * fY + 0.5) 1125 | 1126 | 1127 | if (nX < 0): 1128 | nX = 0 1129 | elif (nX >= 4): 1130 | nX = 3 1131 | 1132 | if (nY < 0): 1133 | nY = 0 1134 | elif (nY >= 4): 1135 | nY = 3 1136 | 1137 | track.nGridSection = nX + (4*nY) 1138 | 1139 | # epoch_seconds_to_gregorian_date 1140 | # Summary: 1141 | # Helper function to convert UNIX timestamp to year/month/day 1142 | # Used to compute the month index of a given track 1143 | # Sourced from: 1144 | # https://stackoverflow.com/questions/35796786/how-to-calculate-the-current-month-from-python-time-time 1145 | # Input: 1146 | # eseconds: A UNIX timestamp (epoch of 1970-01-01 00:00:00) 1147 | # Output: 1148 | # Y: Gregorian calendar year 1149 | # M: Gregorian calendar month 1150 | # D: Gregorian calendar day 1151 | def epoch_seconds_to_gregorian_date(eseconds): 1152 | # Algorithm parameters for Gregorian calendar 1153 | y = 4716; j = 1401; m = 2; n = 12; r = 4; p = 1461 1154 | v = 3; u = 5; s = 153; w = 2; B = 274277; C = -38 1155 | 1156 | #Julian day, rounded 1157 | J = int(0.5 + eseconds / 86400.0 + 2440587.5) 1158 | 1159 | f = J + j + (((4 * J + B) // 146097) * 3) // 4 + C 1160 | e = r * f + v 1161 | g = (e % p) // r 1162 | h = u * g + w 1163 | D = (h % s) // u + 1 1164 | M = (h // s + m) % n + 1 1165 | Y = (e // p) - y + (n + m - M) // n 1166 | 1167 | return Y, M, D 1168 | 1169 | # ComputeTrackMonthIndex 1170 | # Input: 1171 | # track: A given track to be evaluated 1172 | # Modifies: 1173 | # track.nMonthIndex: The month index associated with the given track 1174 | def ComputeTrackMonthIndex(track): 1175 | 1176 | ymd = epoch_seconds_to_gregorian_date(track.fDateObsFirstConfirmedDetection) 1177 | track.nMonthIndex = ymd[1] - 1 1178 | 1179 | # ComputeTrackVelocity 1180 | # Input: 1181 | # track: A given track to be evaluated 1182 | # num_img: The number of images in the current sequence 1183 | # img_list: The list of images in the current sequence 1184 | # Modifies: 1185 | # track.fVelocity: The average track velocity from first to last confirmed detection 1186 | def ComputeTrackVelocity(track, num_img, img_list): 1187 | 1188 | vel = GetAverageTrackVelocity2(track, num_img, img_list) 1189 | fPixelsPerHourX = vel[0] 1190 | fPixelsPerHourY = vel[1] 1191 | 1192 | track.fVelocity = sqrt(fPixelsPerHourX * fPixelsPerHourX + fPixelsPerHourY * fPixelsPerHourY) 1193 | 1194 | # ComputeTrackAttributes 1195 | # Input: 1196 | # track: A given track to be evaluated 1197 | # num_img: The number of images in the current sequence 1198 | # img_list: The list of images in the current sequence 1199 | # Modifies: 1200 | # Track sun motion vector 1201 | # Track direction 1202 | # Track grid section 1203 | # Track velocity 1204 | # Track month index 1205 | def ComputeTrackAttributes(track, num_img, img_list): 1206 | 1207 | ComputeTrackSunMotionVector(track) 1208 | ComputeTrackDirection(track, num_img, img_list) 1209 | ComputeTrackGridSection(track) 1210 | ComputeTrackVelocity(track, num_img, img_list) 1211 | ComputeTrackMonthIndex(track) 1212 | 1213 | # ComputeR2 1214 | # Summary: 1215 | # Was used as part of evaluating the quality of a track 1216 | # No longer used since false tracks could still have high R2 fit 1217 | # Input: 1218 | # vecTime: A vector of timestamps 1219 | # vecCoord: A vector of coordinates 1220 | # vecCoeff: A vector of coefficients for fitting 1221 | # Output: 1222 | # The R2 value for the requested fit 1223 | def ComputeR2(vecTime, vecCoord, vecCoeff): 1224 | 1225 | fCount = 0 1226 | fSum = 0 1227 | 1228 | slen = len(vecCoord) 1229 | 1230 | for t in range(slen): 1231 | fSum += vecCoord[t] 1232 | fCount += 1.0 1233 | 1234 | fAvg = fSum / fCount 1235 | 1236 | fSumSqResiduals = 0 1237 | fSumSquares = 0 1238 | 1239 | for t in range(slen): 1240 | fDelta = vecCoord[t] - fAvg 1241 | fSumSquares += fDelta*fDelta 1242 | 1243 | fDateObs = vecTime[t] 1244 | 1245 | if (2==SOHO_FIT_ORDER): 1246 | fComputedValue = vecCoeff[2] + vecCoeff[1] * fDateObs + (vecCoeff[0] * fDateObs * fDateObs) 1247 | elif (1==SOHO_FIT_ORDER): 1248 | fComputedValue = vecCoeff[1] + vecCoeff[0] * fDateObs 1249 | 1250 | fDelta = vecCoord[t] - fComputedValue 1251 | fSumSqResiduals += fDelta*fDelta 1252 | 1253 | if (abs(fSumSquares) < 0.000000000001): 1254 | return 0 1255 | 1256 | return 1.0 - (fSumSqResiduals / fSumSquares) 1257 | 1258 | # ComputeTrackQuality_Fit 1259 | # Input: 1260 | # track: A given track to evaluate 1261 | # img_list: A list of images in the current sequence 1262 | # Output: 1263 | # The best fit of the track in its X- and Y-motion 1264 | def ComputeTrackQuality_Fit(track, img_list): 1265 | 1266 | vecTime = [] 1267 | vecX = [] 1268 | vecY = [] 1269 | 1270 | slen = len(track.listDetectIdentifiers) 1271 | 1272 | for t in range(slen): 1273 | 1274 | nDetectID = track.listDetectIdentifiers[t] 1275 | idx_img = int(nDetectID / (1024*1024)) 1276 | fDateObs = img_list[idx_img].fDateObs 1277 | x = nDetectID % 1024 1278 | y = int(nDetectID / 1024) % 1024 1279 | 1280 | vecTime.append(fDateObs) 1281 | vecX.append(x) 1282 | vecY.append(y) 1283 | 1284 | fR2_X = ComputeR2(vecTime, vecX, track.vecCoeffX) 1285 | fR2_Y = ComputeR2(vecTime, vecY, track.vecCoeffY) 1286 | 1287 | track_r2 = min(fR2_X, fR2_Y) 1288 | return track_r2 1289 | 1290 | # ComputeFilteredSum 1291 | # Input: 1292 | # listValues: A list of values to be evaluated 1293 | # Output: 1294 | # A summation with the lowest and highest values omitted 1295 | def ComputeFilteredSum(listValues): 1296 | 1297 | listValues.sort() 1298 | slen = len(listValues) 1299 | idx_last = slen-1 1300 | 1301 | out = 0 1302 | for t in range(slen): 1303 | if (0==t): 1304 | continue 1305 | if (t==idx_last): 1306 | break 1307 | out += listValues[t] 1308 | 1309 | return out 1310 | 1311 | # ComputeTrackQuality 1312 | # Input: 1313 | # track: A given track to be evaluated 1314 | # num_img: The number of images in the current sequence 1315 | # img_list: The list of images in the current sequence 1316 | # Modifies: 1317 | # track.fGlobalQuality: The global quality score of the track 1318 | # track.fQuality: The quality of the track without modifiers (no longer used) 1319 | def ComputeTrackQuality(track, num_img, img_list): 1320 | 1321 | fR2 = 0 # ComputeTrackQuality_Fit(track, img_list) 1322 | track.fFit_R2 = fR2 1323 | 1324 | fNumImg = num_img 1325 | 1326 | fQuality = 4.0 * (track.nNumGT0) 1327 | fQuality += 3.0 * (track.nNumGT1) 1328 | fQuality += 2.0 * (track.nNumGT2) 1329 | fQuality += 1.0 * (track.nNumGT3) 1330 | fQuality /= 4.0 * fNumImg 1331 | 1332 | # fQuality = 7.0 * (track.nNumDetectsAt2) 1333 | # fQuality += 6.0 * (track.nNumDetectsAt3) 1334 | # fQuality += 5.0 * (track.nNumDetectsAt4) 1335 | # fQuality += 4.0 * (track.nNumDetectsAt5) 1336 | # fQuality += 3.0 * (track.nNumDetectsAt6) 1337 | # fQuality += 2.0 * (track.nNumDetectsAt7) 1338 | # fQuality += 1.0 * (track.nNumDetectsAt8) 1339 | # 1340 | # fQuality /= 7.0 * fNumImg 1341 | # fQuality *= fR2 1342 | 1343 | track_quality.ComputeTrackQuality_SunMotionVector(track) 1344 | track_quality.ComputeTrackQuality_Direction(track) 1345 | track_quality.ComputeTrackQuality_GridSection(track) 1346 | track_quality.ComputeTrackQuality_Velocity(track) 1347 | track_quality.ComputeTrackQuality_GridDirection(track) 1348 | 1349 | # Compute global track quality 1350 | 1351 | nFilteredSum = ComputeFilteredSum(track.listDetectCounts) 1352 | fFilteredSum = min(100, nFilteredSum) 1353 | fFilteredSum /= 100.0 1354 | fDetectCount = min(20, len(track.listDetectIdentifiers)) 1355 | fDetectCount /= 20.0 1356 | fQualityR2 = fR2 1357 | fQualityR2 -= 0.990 1358 | fQualityR2 /= 0.01 1359 | 1360 | if (fQualityR2 < 0.0): 1361 | fQualityR2 = 0.0 1362 | elif (fQualityR2 > 1.0): 1363 | fQualityR2 = 1.0 1364 | 1365 | track.fGlobalQuality = 0.80 * fQuality + 0.10 * fFilteredSum + 0.10 * fDetectCount 1366 | 1367 | if (track.bFlaggedSunMotionVector): 1368 | track.fGlobalQuality *= 0.70 # 0.50 1369 | 1370 | if (track.bFlaggedDirection): 1371 | track.fGlobalQuality *= 0.70 # 0.50 1372 | 1373 | if (track.bFlaggedGridSection): 1374 | track.fGlobalQuality *= 0.80 1375 | 1376 | if (track.bFlaggedVelocity): 1377 | track.fGlobalQuality *= 0.75 1378 | 1379 | if (track.bFlaggedGridDirection): 1380 | track.fGlobalQuality *= 0.75 1381 | 1382 | if (track.nNumGT5 > 0): 1383 | track.fGlobalQuality *= 0.50 1384 | 1385 | if (abs(track.median_delta_y) < 2): 1386 | track.fGlobalQuality *= 0.50 1387 | 1388 | track.fQuality = fQuality 1389 | 1390 | # IsEdgeTrack 1391 | # Input: 1392 | # track: A given track to be evaluated 1393 | # num_img: The number of images in the current sequence 1394 | # img_list: The list of images in the current sequence 1395 | # Output: 1396 | # Whether or not the track moves along the edges of the images 1397 | def IsEdgeTrack(track, num_img, img_list): 1398 | 1399 | width=1024 1400 | height=1024 1401 | vel = GetAverageTrackVelocity2(track, num_img, img_list) 1402 | fPixelsPerHourX = vel[0] 1403 | fPixelsPerHourY = vel[1] 1404 | 1405 | # Compute how far the track moves in four hours 1406 | fTimeElapsedInHours = 4.0 1407 | 1408 | fDeltaX = fTimeElapsedInHours * (fPixelsPerHourX) 1409 | fDeltaY = fTimeElapsedInHours * (fPixelsPerHourY) 1410 | 1411 | fMinMovement = 4.0 1412 | 1413 | if (track.source_img_x >= -10 and 1414 | track.source_img_x <= 10 and 1415 | abs(fDeltaX) < fMinMovement): 1416 | return True 1417 | 1418 | if (track.source_img_x >= width-10 and 1419 | track.source_img_x <= width + 10 and 1420 | abs(fDeltaX) < fMinMovement): 1421 | return True 1422 | 1423 | if (track.source_img_y >= -10 and 1424 | track.source_img_y <= 10 and 1425 | abs(fDeltaY) < fMinMovement): 1426 | return True 1427 | 1428 | if (track.source_img_y >= height - 10 and 1429 | track.source_img_y <= height + 10 and 1430 | abs(fDeltaY) < fMinMovement): 1431 | return True 1432 | 1433 | return False 1434 | 1435 | # ValidateTrackDetections 1436 | # Input: 1437 | # track: A given track to be evaluated 1438 | # num_img: The number of images in the current sequence 1439 | # img_list: The list of images in the current sequence 1440 | # width: The image width, in pixels 1441 | # height: The image height, in pixels 1442 | # Modifies: 1443 | # track.median_delta_x 1444 | # track.median_delta_y 1445 | # track.median_x 1446 | # track.median_y 1447 | # track.nNumGT0 1448 | # track.nNumGT1 1449 | # track.nNumGT2 1450 | # track.nNumGT3 1451 | # track.nNumGT5 1452 | # track.nNumGT7 1453 | def ValidateTrackDetections(track, num_img, img_list, width, height): 1454 | 1455 | track.nNumGT0 = 0 1456 | track.nNumGT1 = 0 1457 | track.nNumGT2 = 0 1458 | track.nNumGT3 = 0 1459 | track.nNumGT5 = 0 1460 | track.nNumGT7 = 0 1461 | 1462 | listDetects = [] 1463 | 1464 | slen = len(track.listDetectIdentifiers) 1465 | 1466 | for t in range(slen): 1467 | 1468 | nDetectID = track.listDetectIdentifiers[t] 1469 | idx_img = int(nDetectID / int(1024*1024)) 1470 | x = nDetectID % 1024 1471 | y = int(nDetectID / 1024) % 1024 1472 | 1473 | new_detect = MyDetect() 1474 | new_detect.idx_img = idx_img 1475 | new_detect.x = x 1476 | new_detect.y = y 1477 | listDetects.append(new_detect) 1478 | 1479 | listDetects.sort(key=lambda x: x.idx_img, reverse=False) 1480 | 1481 | track.listDetectIdentifiers.clear() 1482 | 1483 | slen = len(listDetects) 1484 | 1485 | for t in range(slen): 1486 | cur_detect = listDetects[t] 1487 | idx_img = cur_detect.idx_img 1488 | x = cur_detect.x 1489 | y = cur_detect.y 1490 | 1491 | nDetectID = (idx_img * width * height) + (x + width * y) 1492 | track.listDetectIdentifiers.append(nDetectID) 1493 | 1494 | listX=[] 1495 | listY=[] 1496 | listDeltaX=[] 1497 | listDeltaY=[] 1498 | 1499 | fPixelsPerHourX=0 1500 | fPixelsPerHourY=0 1501 | fExpectedX=0 1502 | fExpectedY=0 1503 | bHasValidSpeed = False 1504 | fPrevDateObs = 0 1505 | prev_x = 0 1506 | prev_y = 0 1507 | 1508 | slen = len(track.listDetectIdentifiers) 1509 | 1510 | for t in range(slen): 1511 | 1512 | nDetectID = track.listDetectIdentifiers[t] 1513 | idx_img = int(nDetectID / int(1024*1024)) 1514 | fDateObs = img_list[idx_img].fDateObs 1515 | x = nDetectID % 1024 1516 | y = int(nDetectID / 1024) % 1024 1517 | 1518 | listX.append(x) 1519 | listY.append(y) 1520 | 1521 | if (0 == t): 1522 | fPrevDateObs = fDateObs 1523 | prev_x = x 1524 | prev_y = y 1525 | continue 1526 | 1527 | listDeltaX.append((x - prev_x)) 1528 | listDeltaY.append((y - prev_y)) 1529 | 1530 | fElapsedTimeInSec = fDateObs - fPrevDateObs 1531 | 1532 | if (abs(fElapsedTimeInSec)>0.1): 1533 | fElapsedTimeInHours = fElapsedTimeInSec / 3600.0 1534 | else: 1535 | fElapsedTimeInHours = 0.1 / 3600.0 1536 | 1537 | if (bHasValidSpeed): 1538 | fExpectedX = prev_x + fPixelsPerHourX * fElapsedTimeInHours 1539 | fExpectedY = prev_y + fPixelsPerHourY * fElapsedTimeInHours 1540 | 1541 | fDeltaX = x - fExpectedX 1542 | fDeltaY = y - fExpectedY 1543 | fDistance = sqrt(fDeltaX * fDeltaX + fDeltaY * fDeltaY) 1544 | 1545 | if (fDistance > 7.0): 1546 | track.nNumGT7 += 1 1547 | elif (fDistance > 5.0): 1548 | track.nNumGT5 += 1 1549 | elif (fDistance > 3.0): 1550 | track.nNumGT3 += 1 1551 | elif (fDistance > 2.0): 1552 | track.nNumGT2 += 1 1553 | elif (fDistance > 1.0): 1554 | track.nNumGT1 += 1 1555 | elif (fDistance > 0.0): 1556 | track.nNumGT0 += 1 1557 | 1558 | fDeltaX = x - prev_x 1559 | fDeltaY = y - prev_y 1560 | 1561 | fPixelsPerHourX = fDeltaX / fElapsedTimeInHours 1562 | fPixelsPerHourY = fDeltaY / fElapsedTimeInHours 1563 | bHasValidSpeed = True 1564 | 1565 | fPrevDateObs = fDateObs 1566 | prev_x = x 1567 | prev_y = y 1568 | 1569 | track.median_delta_x = np.median(listDeltaX) 1570 | track.median_delta_y = np.median(listDeltaY) 1571 | track.median_x = np.median(listX) 1572 | track.median_y = np.median(listY) 1573 | 1574 | # ShouldKeepTrack 1575 | # Input: 1576 | # track: A given track to be evaluated 1577 | # img_list: The list of images in the current sequence 1578 | # listDateObs: The list of DateObs associated with the images 1579 | # width: Image width, in pixels 1580 | # height: Image height, in pixels 1581 | # Output: 1582 | # Whether or not the given track should be retained 1583 | def ShouldKeepTrack(track, img_list, listDateObs, listImgDetections, width, height): 1584 | 1585 | num_img = len(img_list) 1586 | 1587 | # First pass -- identify detections along the track and update track velocity accordingly 1588 | res = CollectDetections(track.source_img_idx, 1589 | track.listPixelsPerHourX, 1590 | track.listPixelsPerHourY, 1591 | track.listX, 1592 | track.listY, 1593 | track.listDetectIdentifiers, 1594 | track.fDateObsFirstConfirmedDetection, 1595 | track.fDateObsLastConfirmedDetection, 1596 | track.first_confirmed_idx_img, 1597 | track.last_confirmed_idx_img, 1598 | track.nNumDetectsAt2, 1599 | track.nNumDetectsAt3, 1600 | track.nNumDetectsAt4, 1601 | track.nNumDetectsAt5, 1602 | track.nNumDetectsAt6, 1603 | track.nNumDetectsAt7, 1604 | track.nNumDetectsAt8, 1605 | track.listDetectCounts, num_img, width, height, listDateObs, listImgDetections, False) 1606 | 1607 | track.source_img_idx = res[0] 1608 | track.fDateObsFirstConfirmedDetection = res[1] 1609 | track.fDateObsLastConfirmedDetection = res[2] 1610 | track.first_confirmed_idx_img = res[3] 1611 | track.last_confirmed_idx_img = res[4] 1612 | track.nNumDetectsAt2 = res[5] 1613 | track.nNumDetectsAt3 = res[6] 1614 | track.nNumDetectsAt4 = res[7] 1615 | track.nNumDetectsAt5 = res[8] 1616 | track.nNumDetectsAt6 = res[9] 1617 | track.nNumDetectsAt7 = res[10] 1618 | track.nNumDetectsAt8 = res[11] 1619 | 1620 | if (len(track.listDetectIdentifiers) < 4): 1621 | return False 1622 | 1623 | res = ComputeDetectionVelocityInOrder(track.listDetectIdentifiers, listDateObs) 1624 | 1625 | track.listPixelsPerHourX = res[0] 1626 | track.listPixelsPerHourY = res[1] 1627 | track.listX = res[2] 1628 | track.listY = res[3] 1629 | 1630 | # Second pass -- run once more with the updated track velocity 1631 | # ComputeTrackMotionCoefficients(track, img_list, False) 1632 | res = CollectDetections(track.source_img_idx, 1633 | track.listPixelsPerHourX, 1634 | track.listPixelsPerHourY, 1635 | track.listX, 1636 | track.listY, 1637 | track.listDetectIdentifiers, 1638 | track.fDateObsFirstConfirmedDetection, 1639 | track.fDateObsLastConfirmedDetection, 1640 | track.first_confirmed_idx_img, 1641 | track.last_confirmed_idx_img, 1642 | track.nNumDetectsAt2, 1643 | track.nNumDetectsAt3, 1644 | track.nNumDetectsAt4, 1645 | track.nNumDetectsAt5, 1646 | track.nNumDetectsAt6, 1647 | track.nNumDetectsAt7, 1648 | track.nNumDetectsAt8, 1649 | track.listDetectCounts, num_img, width, height, listDateObs, listImgDetections, True) 1650 | 1651 | track.source_img_idx = res[0] 1652 | track.fDateObsFirstConfirmedDetection = res[1] 1653 | track.fDateObsLastConfirmedDetection = res[2] 1654 | track.first_confirmed_idx_img = res[3] 1655 | track.last_confirmed_idx_img = res[4] 1656 | track.nNumDetectsAt2 = res[5] 1657 | track.nNumDetectsAt3 = res[6] 1658 | track.nNumDetectsAt4 = res[7] 1659 | track.nNumDetectsAt5 = res[8] 1660 | track.nNumDetectsAt6 = res[9] 1661 | track.nNumDetectsAt7 = res[10] 1662 | track.nNumDetectsAt8 = res[11] 1663 | 1664 | if (num_img <= 6): 1665 | # Must have at least three detections at 2 or better 1666 | # And one detection at 3 or better 1667 | if (track.nNumDetectsAt2 + track.nNumDetectsAt3 < 4 or 1668 | track.nNumDetectsAt2 < 3): 1669 | return False 1670 | else: 1671 | # Must have at least three detections at 2 or better 1672 | # And two detections at 3 or better 1673 | if (track.nNumDetectsAt2 + track.nNumDetectsAt3 < 5 or 1674 | track.nNumDetectsAt2 < 3): 1675 | return False 1676 | 1677 | ValidateTrackDetections(track, num_img, img_list, width, height) 1678 | 1679 | if (track.nNumGT7 > 0): 1680 | return False 1681 | 1682 | # Compute the track positions 1683 | ComputeTrackPositions(track, num_img, img_list) 1684 | 1685 | # Compute track attributes 1686 | ComputeTrackAttributes(track, num_img, img_list) 1687 | 1688 | # Compute track quality 1689 | ComputeTrackQuality(track, num_img, img_list) 1690 | 1691 | #if (track.fFit_R2 < SOHO_FIT_ORDER_CUTOFF): 1692 | # return False 1693 | 1694 | if (IsEdgeTrack(track, num_img, img_list)): 1695 | return False 1696 | 1697 | return True 1698 | 1699 | # ReduceTracks 1700 | # Input: 1701 | # listTracks: A list of tracks to be evaluated 1702 | # img_list: The list of images in the current sequence 1703 | # listDateObs: The list of DateObs associated with the images 1704 | # listImgDetections: The list of detection images 1705 | # width: Image width, in pixels 1706 | # height: Image height, in pixels 1707 | # Output: 1708 | # A new list of tracks, after having performed track reduction. 1709 | def ReduceTracks(listTracks, img_list, listDateObs, listImgDetections, width, height): 1710 | 1711 | listTracks[:] = [trk for trk in listTracks if ShouldKeepTrack(trk, img_list, listDateObs, listImgDetections, width, height)] 1712 | 1713 | return listTracks 1714 | 1715 | # PrintTracks 1716 | # Input: 1717 | # listTracks: A list of track to be printed 1718 | # img_list: The list of images for the current sequence 1719 | # Output: 1720 | # Prints track information to stdout 1721 | def PrintTracks(listTracks, img_list): 1722 | 1723 | slen = len(listTracks) 1724 | 1725 | for t in range(slen): 1726 | track = listTracks[t] 1727 | print(repr(t+1)+". ("+repr(track.source_img_x)+", "+repr(track.source_img_y)+")" + " Q="+repr(track.fGlobalQuality) + " R2="+repr(track.fFit_R2)) 1728 | 1729 | num_pos = len(track.vectorPositions) 1730 | for q in range(num_pos): 1731 | xy = track.vectorPositions[q] 1732 | print(" ("+repr(xy[0])+", "+repr(xy[1])+")") 1733 | 1734 | if (0==t): 1735 | ComputeTrackMotionCoefficients(track, img_list, True) 1736 | 1737 | # CullToTopNTracks 1738 | # Input: 1739 | # list_tracks: A list of tracks 1740 | # N: The number of tracks to be retained 1741 | # Output: 1742 | # A new list of tracks, preserving only the top N 1743 | def CullToTopNTracks(list_tracks, N): 1744 | newlist = sorted(list_tracks, key=lambda x: x.fGlobalQuality, reverse=True) 1745 | return newlist[:N] 1746 | 1747 | # ConsolidateTracks2 1748 | # Input: 1749 | # track1: First track 1750 | # track2: Second track 1751 | # num_img: The number of images in the current sequence 1752 | # Output: 1753 | # Whether or not the two tracks are considered equivalent 1754 | def ConsolidateTracks2(track1, track2, num_img): 1755 | 1756 | nNumSameDetectIdentifiers = 0 1757 | 1758 | slen1 = len(track1.listDetectIdentifiers) 1759 | slen2 = len(track2.listDetectIdentifiers) 1760 | 1761 | for t in range(slen1): 1762 | 1763 | nDetectID1 = track1.listDetectIdentifiers[t] 1764 | 1765 | for z in range(slen2): 1766 | 1767 | nDetectID2 = track2.listDetectIdentifiers[z] 1768 | 1769 | if (nDetectID1 == nDetectID2): 1770 | nNumSameDetectIdentifiers = nNumSameDetectIdentifiers + 1 1771 | 1772 | nNumSameDetects = max(3, min(5, int(SOHO_PCT_SAME_DETECTS * num_img))) 1773 | 1774 | if (nNumSameDetectIdentifiers < nNumSameDetects): 1775 | # Not the same track 1776 | return False 1777 | 1778 | # Tracks 1 and 2 are considered the same 1779 | 1780 | # Mark track 2 for deletion 1781 | track2.bMarkedForDeletion = True 1782 | 1783 | # Incorporate the motion of track 2 into track 1 1784 | for t in range(num_img): 1785 | xy1 = track1.vectorPositions[t] 1786 | xy2 = track2.vectorPositions[t] 1787 | 1788 | x1 = xy1[0] 1789 | y1 = xy1[1] 1790 | 1791 | x2 = xy2[0] 1792 | y2 = xy2[1] 1793 | 1794 | x3 = x1 + x2 1795 | y3 = y1 + y2 1796 | 1797 | xy3 = [] 1798 | xy3.append(x3) 1799 | xy3.append(y3) 1800 | 1801 | track1.vectorPositions[t] = xy3 1802 | 1803 | # Update the number of combined tracks 1804 | track1.nNumCombinedTracks = track1.nNumCombinedTracks + 1 1805 | 1806 | # ConsolidateTracks 1807 | # Input: 1808 | # list_tracks: A list of tracks to be consolidated 1809 | # num_img: The number of images in the current sequence 1810 | # Output: 1811 | # A new list of tracks, after performing consolidation. 1812 | def ConsolidateTracks(list_tracks, num_img): 1813 | 1814 | slen = len(list_tracks) 1815 | 1816 | for t in range(slen): 1817 | 1818 | track1 = list_tracks[t] 1819 | 1820 | if (track1.bMarkedForDeletion): 1821 | continue 1822 | 1823 | z = t+1 1824 | 1825 | while (z0: 23 | train_dt.append(seq) 24 | 25 | print(train_dt) 26 | 27 | -------------------------------------------------------------------------------- /visualize.py: -------------------------------------------------------------------------------- 1 | assert __name__ == "__main__" 2 | """ 3 | Tool to visualize a solution file. Images will be saved out in the current folder. 4 | Use and modify as you want 5 | Usage: 6 | python3 visualize.py /data/train /data/solution.csv 7 | """ 8 | 9 | import sys 10 | import pickle 11 | import os 12 | import math 13 | import random 14 | from math import sqrt 15 | from skimage import data 16 | from skimage.feature import blob_dog, blob_log, blob_doh 17 | from skimage.color import rgb2gray 18 | import numpy as np 19 | from astropy.io import fits 20 | from scipy.signal import medfilt2d 21 | import matplotlib.pyplot as plt 22 | import cv2 23 | 24 | def explore_sequence(seq): 25 | print(seq["ID"]) 26 | # number of images 27 | numImg = len(seq["path"]) 28 | print("Images: "+str(numImg)) 29 | 30 | width = 1024 31 | height = 1024 32 | 33 | # Create 3D data cube to hold data, assuming all data have 34 | # array sizes of 1024x1024 pixels. 35 | data_cube = np.empty((width,height,numImg)) 36 | 37 | for i in range(numImg): 38 | # read image and header from FITS file 39 | img, hdr = fits.getdata(seq["path"][i], header=True) 40 | 41 | # Normalize by exposure time (a good practice for LASCO data) 42 | img = img.astype('float64') / hdr['EXPTIME'] 43 | 44 | # Store array into datacube (3D array) 45 | data_cube[:,:,i] = img - medfilt2d(img, kernel_size=9) 46 | 47 | rdiff = np.diff(data_cube, axis=2) 48 | print(seq["truth"]) 49 | 50 | for i in range(numImg-1): 51 | 52 | medsub = -rdiff[:,:,i] 53 | medsub = cv2.min(medsub, 10.) 54 | medsub = cv2.max(medsub, -10.) 55 | medsub = (medsub + 10.) * 255. / 20. 56 | medsub = np.uint8(medsub) 57 | (T, mask) = cv2.threshold(medsub, 190, 255, cv2.THRESH_BINARY) 58 | medsub = cv2.bitwise_and(medsub, mask) 59 | 60 | # mask out sun 61 | cv2.circle(medsub, (512,512), 190, (0,0,0), cv2.FILLED) 62 | 63 | blobs_log = blob_log(medsub, max_sigma=5, min_sigma=1, num_sigma=5, threshold=0.1) 64 | if len(blobs_log)>0: 65 | blobs_log[:, 2] = blobs_log[:, 2] * sqrt(2) 66 | 67 | for blob in blobs_log: 68 | y, x, r = blob 69 | cv2.circle(medsub, (int(float(x)),int(float(y))), int(r)+3, (255,255,255), 1) 70 | 71 | # Draw lines 72 | for j in range(numImg-1): 73 | if seq["images"][j] in seq["truth"]: 74 | xy1 = seq["truth"][seq["images"][j]] 75 | if seq["images"][j+1] in seq["truth"]: 76 | xy2 = seq["truth"][seq["images"][j+1]] 77 | cv2.line(medsub, (int(float(xy1[0])),int(float(xy1[1]))), (int(float(xy2[0])),int(float(xy2[1]))), (255,255,255)) 78 | 79 | if seq["images"][i] in seq["truth"]: 80 | xy = seq["truth"][seq["images"][i]] 81 | cv2.circle(medsub, (int(float(xy[0])),int(float(xy[1]))), 10, (255,255,255), 1) 82 | cv2.imwrite(seq["ID"]+"_"+str(i)+".png", medsub) 83 | 84 | # folder for the set 85 | folder_in = sys.argv[1] 86 | # ground truth file to be visualized 87 | comet_filename = sys.argv[2] 88 | 89 | data_set = [] 90 | with open(comet_filename, 'r') as f: 91 | lines = f.readlines() 92 | for line in lines: 93 | tokens = line.split(',') 94 | seq = {} 95 | seq["ID"] = tokens[0] 96 | images = [] 97 | paths = [] 98 | truths = {} 99 | for i in range( (len(tokens)-2)//3 ): 100 | images.append(tokens[1+i*3]) 101 | paths.append(os.path.join(folder_in, tokens[0], tokens[1+i*3])) 102 | truths[tokens[1+i*3]] = [float(tokens[2+i*3]),tokens[3+i*3]] 103 | images.sort() 104 | paths.sort() 105 | seq["images"] = images 106 | seq["path"] = paths 107 | seq["truth"] = truths 108 | if len(images)>0: 109 | data_set.append(seq) 110 | 111 | for s in data_set: 112 | explore_sequence(s) 113 | --------------------------------------------------------------------------------