├── .gitignore ├── LICENSE ├── README.md ├── examples ├── FicoHeloc.ipynb ├── GAMINet-bike-share.ipynb ├── GAMINet-demo.ipynb ├── TF-Pytorch-Check.ipynb ├── bike_share_hour │ ├── bike_share_hour.csv │ ├── data_types.json │ └── readme.txt ├── cocircle.ipynb ├── credit_default │ ├── TaiwanCreditDataset.xls │ ├── credit_data_processed.csv │ ├── credit_default.names │ ├── data_types.json │ ├── load.py │ └── undocumented values ├── dataset.py ├── fico │ ├── .ipynb_checkpoints │ │ └── preprocess-checkpoint.ipynb │ ├── data_types.json │ ├── fico.csv │ ├── heloc_data_dictionary-2.xlsx │ ├── heloc_dataset_v1.csv │ ├── load.py │ ├── preprocess.ipynb │ └── test_file1.csv ├── friedman.ipynb ├── results │ ├── demo.eps │ ├── demo.png │ ├── s1_feature.png │ ├── s1_local.png │ ├── s1_regu_plot.png │ └── s1_traj_plot.png └── twiwan credit.ipynb ├── gaminet ├── __init__.py ├── api.py ├── base.py ├── dataloader.py ├── interpret.py ├── layers.py ├── lib │ ├── lib_ebm_native_linux_x64.so │ ├── lib_ebm_native_mac_x64.dylib │ └── lib_ebm_native_win_x64.dll └── utils.py └── setup.py /.gitignore: -------------------------------------------------------------------------------- 1 | examples/.ipynb_checkpoints/* 2 | examples/__pycache__/* 3 | scripts/* 4 | .ipynb_checkpoints/* 5 | gaminet/__pycache__/* 6 | interpret/*.ipynb 7 | build/* 8 | gaminet.egg-info/* 9 | dist/* 10 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | GNU GENERAL PUBLIC LICENSE 2 | Version 3, 29 June 2007 3 | 4 | Copyright (C) 2007 Free Software Foundation, Inc. 5 | Everyone is permitted to copy and distribute verbatim copies 6 | of this license document, but changing it is not allowed. 7 | 8 | Preamble 9 | 10 | The GNU General Public License is a free, copyleft license for 11 | software and other kinds of works. 12 | 13 | The licenses for most software and other practical works are designed 14 | to take away your freedom to share and change the works. By contrast, 15 | the GNU General Public License is intended to guarantee your freedom to 16 | share and change all versions of a program--to make sure it remains free 17 | software for all its users. We, the Free Software Foundation, use the 18 | GNU General Public License for most of our software; it applies also to 19 | any other work released this way by its authors. You can apply it to 20 | your programs, too. 21 | 22 | When we speak of free software, we are referring to freedom, not 23 | price. Our General Public Licenses are designed to make sure that you 24 | have the freedom to distribute copies of free software (and charge for 25 | them if you wish), that you receive source code or can get it if you 26 | want it, that you can change the software or use pieces of it in new 27 | free programs, and that you know you can do these things. 28 | 29 | To protect your rights, we need to prevent others from denying you 30 | these rights or asking you to surrender the rights. Therefore, you have 31 | certain responsibilities if you distribute copies of the software, or if 32 | you modify it: responsibilities to respect the freedom of others. 33 | 34 | For example, if you distribute copies of such a program, whether 35 | gratis or for a fee, you must pass on to the recipients the same 36 | freedoms that you received. You must make sure that they, too, receive 37 | or can get the source code. And you must show them these terms so they 38 | know their rights. 39 | 40 | Developers that use the GNU GPL protect your rights with two steps: 41 | (1) assert copyright on the software, and (2) offer you this License 42 | giving you legal permission to copy, distribute and/or modify it. 43 | 44 | For the developers' and authors' protection, the GPL clearly explains 45 | that there is no warranty for this free software. For both users' and 46 | authors' sake, the GPL requires that modified versions be marked as 47 | changed, so that their problems will not be attributed erroneously to 48 | authors of previous versions. 49 | 50 | Some devices are designed to deny users access to install or run 51 | modified versions of the software inside them, although the manufacturer 52 | can do so. This is fundamentally incompatible with the aim of 53 | protecting users' freedom to change the software. The systematic 54 | pattern of such abuse occurs in the area of products for individuals to 55 | use, which is precisely where it is most unacceptable. Therefore, we 56 | have designed this version of the GPL to prohibit the practice for those 57 | products. If such problems arise substantially in other domains, we 58 | stand ready to extend this provision to those domains in future versions 59 | of the GPL, as needed to protect the freedom of users. 60 | 61 | Finally, every program is threatened constantly by software patents. 62 | States should not allow patents to restrict development and use of 63 | software on general-purpose computers, but in those that do, we wish to 64 | avoid the special danger that patents applied to a free program could 65 | make it effectively proprietary. To prevent this, the GPL assures that 66 | patents cannot be used to render the program non-free. 67 | 68 | The precise terms and conditions for copying, distribution and 69 | modification follow. 70 | 71 | TERMS AND CONDITIONS 72 | 73 | 0. Definitions. 74 | 75 | "This License" refers to version 3 of the GNU General Public License. 76 | 77 | "Copyright" also means copyright-like laws that apply to other kinds of 78 | works, such as semiconductor masks. 79 | 80 | "The Program" refers to any copyrightable work licensed under this 81 | License. Each licensee is addressed as "you". "Licensees" and 82 | "recipients" may be individuals or organizations. 83 | 84 | To "modify" a work means to copy from or adapt all or part of the work 85 | in a fashion requiring copyright permission, other than the making of an 86 | exact copy. The resulting work is called a "modified version" of the 87 | earlier work or a work "based on" the earlier work. 88 | 89 | A "covered work" means either the unmodified Program or a work based 90 | on the Program. 91 | 92 | To "propagate" a work means to do anything with it that, without 93 | permission, would make you directly or secondarily liable for 94 | infringement under applicable copyright law, except executing it on a 95 | computer or modifying a private copy. Propagation includes copying, 96 | distribution (with or without modification), making available to the 97 | public, and in some countries other activities as well. 98 | 99 | To "convey" a work means any kind of propagation that enables other 100 | parties to make or receive copies. Mere interaction with a user through 101 | a computer network, with no transfer of a copy, is not conveying. 102 | 103 | An interactive user interface displays "Appropriate Legal Notices" 104 | to the extent that it includes a convenient and prominently visible 105 | feature that (1) displays an appropriate copyright notice, and (2) 106 | tells the user that there is no warranty for the work (except to the 107 | extent that warranties are provided), that licensees may convey the 108 | work under this License, and how to view a copy of this License. If 109 | the interface presents a list of user commands or options, such as a 110 | menu, a prominent item in the list meets this criterion. 111 | 112 | 1. Source Code. 113 | 114 | The "source code" for a work means the preferred form of the work 115 | for making modifications to it. "Object code" means any non-source 116 | form of a work. 117 | 118 | A "Standard Interface" means an interface that either is an official 119 | standard defined by a recognized standards body, or, in the case of 120 | interfaces specified for a particular programming language, one that 121 | is widely used among developers working in that language. 122 | 123 | The "System Libraries" of an executable work include anything, other 124 | than the work as a whole, that (a) is included in the normal form of 125 | packaging a Major Component, but which is not part of that Major 126 | Component, and (b) serves only to enable use of the work with that 127 | Major Component, or to implement a Standard Interface for which an 128 | implementation is available to the public in source code form. A 129 | "Major Component", in this context, means a major essential component 130 | (kernel, window system, and so on) of the specific operating system 131 | (if any) on which the executable work runs, or a compiler used to 132 | produce the work, or an object code interpreter used to run it. 133 | 134 | The "Corresponding Source" for a work in object code form means all 135 | the source code needed to generate, install, and (for an executable 136 | work) run the object code and to modify the work, including scripts to 137 | control those activities. However, it does not include the work's 138 | System Libraries, or general-purpose tools or generally available free 139 | programs which are used unmodified in performing those activities but 140 | which are not part of the work. For example, Corresponding Source 141 | includes interface definition files associated with source files for 142 | the work, and the source code for shared libraries and dynamically 143 | linked subprograms that the work is specifically designed to require, 144 | such as by intimate data communication or control flow between those 145 | subprograms and other parts of the work. 146 | 147 | The Corresponding Source need not include anything that users 148 | can regenerate automatically from other parts of the Corresponding 149 | Source. 150 | 151 | The Corresponding Source for a work in source code form is that 152 | same work. 153 | 154 | 2. Basic Permissions. 155 | 156 | All rights granted under this License are granted for the term of 157 | copyright on the Program, and are irrevocable provided the stated 158 | conditions are met. This License explicitly affirms your unlimited 159 | permission to run the unmodified Program. The output from running a 160 | covered work is covered by this License only if the output, given its 161 | content, constitutes a covered work. This License acknowledges your 162 | rights of fair use or other equivalent, as provided by copyright law. 163 | 164 | You may make, run and propagate covered works that you do not 165 | convey, without conditions so long as your license otherwise remains 166 | in force. You may convey covered works to others for the sole purpose 167 | of having them make modifications exclusively for you, or provide you 168 | with facilities for running those works, provided that you comply with 169 | the terms of this License in conveying all material for which you do 170 | not control copyright. Those thus making or running the covered works 171 | for you must do so exclusively on your behalf, under your direction 172 | and control, on terms that prohibit them from making any copies of 173 | your copyrighted material outside their relationship with you. 174 | 175 | Conveying under any other circumstances is permitted solely under 176 | the conditions stated below. Sublicensing is not allowed; section 10 177 | makes it unnecessary. 178 | 179 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law. 180 | 181 | No covered work shall be deemed part of an effective technological 182 | measure under any applicable law fulfilling obligations under article 183 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or 184 | similar laws prohibiting or restricting circumvention of such 185 | measures. 186 | 187 | When you convey a covered work, you waive any legal power to forbid 188 | circumvention of technological measures to the extent such circumvention 189 | is effected by exercising rights under this License with respect to 190 | the covered work, and you disclaim any intention to limit operation or 191 | modification of the work as a means of enforcing, against the work's 192 | users, your or third parties' legal rights to forbid circumvention of 193 | technological measures. 194 | 195 | 4. Conveying Verbatim Copies. 196 | 197 | You may convey verbatim copies of the Program's source code as you 198 | receive it, in any medium, provided that you conspicuously and 199 | appropriately publish on each copy an appropriate copyright notice; 200 | keep intact all notices stating that this License and any 201 | non-permissive terms added in accord with section 7 apply to the code; 202 | keep intact all notices of the absence of any warranty; and give all 203 | recipients a copy of this License along with the Program. 204 | 205 | You may charge any price or no price for each copy that you convey, 206 | and you may offer support or warranty protection for a fee. 207 | 208 | 5. Conveying Modified Source Versions. 209 | 210 | You may convey a work based on the Program, or the modifications to 211 | produce it from the Program, in the form of source code under the 212 | terms of section 4, provided that you also meet all of these conditions: 213 | 214 | a) The work must carry prominent notices stating that you modified 215 | it, and giving a relevant date. 216 | 217 | b) The work must carry prominent notices stating that it is 218 | released under this License and any conditions added under section 219 | 7. This requirement modifies the requirement in section 4 to 220 | "keep intact all notices". 221 | 222 | c) You must license the entire work, as a whole, under this 223 | License to anyone who comes into possession of a copy. This 224 | License will therefore apply, along with any applicable section 7 225 | additional terms, to the whole of the work, and all its parts, 226 | regardless of how they are packaged. This License gives no 227 | permission to license the work in any other way, but it does not 228 | invalidate such permission if you have separately received it. 229 | 230 | d) If the work has interactive user interfaces, each must display 231 | Appropriate Legal Notices; however, if the Program has interactive 232 | interfaces that do not display Appropriate Legal Notices, your 233 | work need not make them do so. 234 | 235 | A compilation of a covered work with other separate and independent 236 | works, which are not by their nature extensions of the covered work, 237 | and which are not combined with it such as to form a larger program, 238 | in or on a volume of a storage or distribution medium, is called an 239 | "aggregate" if the compilation and its resulting copyright are not 240 | used to limit the access or legal rights of the compilation's users 241 | beyond what the individual works permit. Inclusion of a covered work 242 | in an aggregate does not cause this License to apply to the other 243 | parts of the aggregate. 244 | 245 | 6. Conveying Non-Source Forms. 246 | 247 | You may convey a covered work in object code form under the terms 248 | of sections 4 and 5, provided that you also convey the 249 | machine-readable Corresponding Source under the terms of this License, 250 | in one of these ways: 251 | 252 | a) Convey the object code in, or embodied in, a physical product 253 | (including a physical distribution medium), accompanied by the 254 | Corresponding Source fixed on a durable physical medium 255 | customarily used for software interchange. 256 | 257 | b) Convey the object code in, or embodied in, a physical product 258 | (including a physical distribution medium), accompanied by a 259 | written offer, valid for at least three years and valid for as 260 | long as you offer spare parts or customer support for that product 261 | model, to give anyone who possesses the object code either (1) a 262 | copy of the Corresponding Source for all the software in the 263 | product that is covered by this License, on a durable physical 264 | medium customarily used for software interchange, for a price no 265 | more than your reasonable cost of physically performing this 266 | conveying of source, or (2) access to copy the 267 | Corresponding Source from a network server at no charge. 268 | 269 | c) Convey individual copies of the object code with a copy of the 270 | written offer to provide the Corresponding Source. This 271 | alternative is allowed only occasionally and noncommercially, and 272 | only if you received the object code with such an offer, in accord 273 | with subsection 6b. 274 | 275 | d) Convey the object code by offering access from a designated 276 | place (gratis or for a charge), and offer equivalent access to the 277 | Corresponding Source in the same way through the same place at no 278 | further charge. You need not require recipients to copy the 279 | Corresponding Source along with the object code. If the place to 280 | copy the object code is a network server, the Corresponding Source 281 | may be on a different server (operated by you or a third party) 282 | that supports equivalent copying facilities, provided you maintain 283 | clear directions next to the object code saying where to find the 284 | Corresponding Source. Regardless of what server hosts the 285 | Corresponding Source, you remain obligated to ensure that it is 286 | available for as long as needed to satisfy these requirements. 287 | 288 | e) Convey the object code using peer-to-peer transmission, provided 289 | you inform other peers where the object code and Corresponding 290 | Source of the work are being offered to the general public at no 291 | charge under subsection 6d. 292 | 293 | A separable portion of the object code, whose source code is excluded 294 | from the Corresponding Source as a System Library, need not be 295 | included in conveying the object code work. 296 | 297 | A "User Product" is either (1) a "consumer product", which means any 298 | tangible personal property which is normally used for personal, family, 299 | or household purposes, or (2) anything designed or sold for incorporation 300 | into a dwelling. In determining whether a product is a consumer product, 301 | doubtful cases shall be resolved in favor of coverage. For a particular 302 | product received by a particular user, "normally used" refers to a 303 | typical or common use of that class of product, regardless of the status 304 | of the particular user or of the way in which the particular user 305 | actually uses, or expects or is expected to use, the product. A product 306 | is a consumer product regardless of whether the product has substantial 307 | commercial, industrial or non-consumer uses, unless such uses represent 308 | the only significant mode of use of the product. 309 | 310 | "Installation Information" for a User Product means any methods, 311 | procedures, authorization keys, or other information required to install 312 | and execute modified versions of a covered work in that User Product from 313 | a modified version of its Corresponding Source. The information must 314 | suffice to ensure that the continued functioning of the modified object 315 | code is in no case prevented or interfered with solely because 316 | modification has been made. 317 | 318 | If you convey an object code work under this section in, or with, or 319 | specifically for use in, a User Product, and the conveying occurs as 320 | part of a transaction in which the right of possession and use of the 321 | User Product is transferred to the recipient in perpetuity or for a 322 | fixed term (regardless of how the transaction is characterized), the 323 | Corresponding Source conveyed under this section must be accompanied 324 | by the Installation Information. But this requirement does not apply 325 | if neither you nor any third party retains the ability to install 326 | modified object code on the User Product (for example, the work has 327 | been installed in ROM). 328 | 329 | The requirement to provide Installation Information does not include a 330 | requirement to continue to provide support service, warranty, or updates 331 | for a work that has been modified or installed by the recipient, or for 332 | the User Product in which it has been modified or installed. Access to a 333 | network may be denied when the modification itself materially and 334 | adversely affects the operation of the network or violates the rules and 335 | protocols for communication across the network. 336 | 337 | Corresponding Source conveyed, and Installation Information provided, 338 | in accord with this section must be in a format that is publicly 339 | documented (and with an implementation available to the public in 340 | source code form), and must require no special password or key for 341 | unpacking, reading or copying. 342 | 343 | 7. Additional Terms. 344 | 345 | "Additional permissions" are terms that supplement the terms of this 346 | License by making exceptions from one or more of its conditions. 347 | Additional permissions that are applicable to the entire Program shall 348 | be treated as though they were included in this License, to the extent 349 | that they are valid under applicable law. If additional permissions 350 | apply only to part of the Program, that part may be used separately 351 | under those permissions, but the entire Program remains governed by 352 | this License without regard to the additional permissions. 353 | 354 | When you convey a copy of a covered work, you may at your option 355 | remove any additional permissions from that copy, or from any part of 356 | it. (Additional permissions may be written to require their own 357 | removal in certain cases when you modify the work.) You may place 358 | additional permissions on material, added by you to a covered work, 359 | for which you have or can give appropriate copyright permission. 360 | 361 | Notwithstanding any other provision of this License, for material you 362 | add to a covered work, you may (if authorized by the copyright holders of 363 | that material) supplement the terms of this License with terms: 364 | 365 | a) Disclaiming warranty or limiting liability differently from the 366 | terms of sections 15 and 16 of this License; or 367 | 368 | b) Requiring preservation of specified reasonable legal notices or 369 | author attributions in that material or in the Appropriate Legal 370 | Notices displayed by works containing it; or 371 | 372 | c) Prohibiting misrepresentation of the origin of that material, or 373 | requiring that modified versions of such material be marked in 374 | reasonable ways as different from the original version; or 375 | 376 | d) Limiting the use for publicity purposes of names of licensors or 377 | authors of the material; or 378 | 379 | e) Declining to grant rights under trademark law for use of some 380 | trade names, trademarks, or service marks; or 381 | 382 | f) Requiring indemnification of licensors and authors of that 383 | material by anyone who conveys the material (or modified versions of 384 | it) with contractual assumptions of liability to the recipient, for 385 | any liability that these contractual assumptions directly impose on 386 | those licensors and authors. 387 | 388 | All other non-permissive additional terms are considered "further 389 | restrictions" within the meaning of section 10. If the Program as you 390 | received it, or any part of it, contains a notice stating that it is 391 | governed by this License along with a term that is a further 392 | restriction, you may remove that term. If a license document contains 393 | a further restriction but permits relicensing or conveying under this 394 | License, you may add to a covered work material governed by the terms 395 | of that license document, provided that the further restriction does 396 | not survive such relicensing or conveying. 397 | 398 | If you add terms to a covered work in accord with this section, you 399 | must place, in the relevant source files, a statement of the 400 | additional terms that apply to those files, or a notice indicating 401 | where to find the applicable terms. 402 | 403 | Additional terms, permissive or non-permissive, may be stated in the 404 | form of a separately written license, or stated as exceptions; 405 | the above requirements apply either way. 406 | 407 | 8. Termination. 408 | 409 | You may not propagate or modify a covered work except as expressly 410 | provided under this License. Any attempt otherwise to propagate or 411 | modify it is void, and will automatically terminate your rights under 412 | this License (including any patent licenses granted under the third 413 | paragraph of section 11). 414 | 415 | However, if you cease all violation of this License, then your 416 | license from a particular copyright holder is reinstated (a) 417 | provisionally, unless and until the copyright holder explicitly and 418 | finally terminates your license, and (b) permanently, if the copyright 419 | holder fails to notify you of the violation by some reasonable means 420 | prior to 60 days after the cessation. 421 | 422 | Moreover, your license from a particular copyright holder is 423 | reinstated permanently if the copyright holder notifies you of the 424 | violation by some reasonable means, this is the first time you have 425 | received notice of violation of this License (for any work) from that 426 | copyright holder, and you cure the violation prior to 30 days after 427 | your receipt of the notice. 428 | 429 | Termination of your rights under this section does not terminate the 430 | licenses of parties who have received copies or rights from you under 431 | this License. If your rights have been terminated and not permanently 432 | reinstated, you do not qualify to receive new licenses for the same 433 | material under section 10. 434 | 435 | 9. Acceptance Not Required for Having Copies. 436 | 437 | You are not required to accept this License in order to receive or 438 | run a copy of the Program. Ancillary propagation of a covered work 439 | occurring solely as a consequence of using peer-to-peer transmission 440 | to receive a copy likewise does not require acceptance. However, 441 | nothing other than this License grants you permission to propagate or 442 | modify any covered work. These actions infringe copyright if you do 443 | not accept this License. Therefore, by modifying or propagating a 444 | covered work, you indicate your acceptance of this License to do so. 445 | 446 | 10. Automatic Licensing of Downstream Recipients. 447 | 448 | Each time you convey a covered work, the recipient automatically 449 | receives a license from the original licensors, to run, modify and 450 | propagate that work, subject to this License. You are not responsible 451 | for enforcing compliance by third parties with this License. 452 | 453 | An "entity transaction" is a transaction transferring control of an 454 | organization, or substantially all assets of one, or subdividing an 455 | organization, or merging organizations. If propagation of a covered 456 | work results from an entity transaction, each party to that 457 | transaction who receives a copy of the work also receives whatever 458 | licenses to the work the party's predecessor in interest had or could 459 | give under the previous paragraph, plus a right to possession of the 460 | Corresponding Source of the work from the predecessor in interest, if 461 | the predecessor has it or can get it with reasonable efforts. 462 | 463 | You may not impose any further restrictions on the exercise of the 464 | rights granted or affirmed under this License. For example, you may 465 | not impose a license fee, royalty, or other charge for exercise of 466 | rights granted under this License, and you may not initiate litigation 467 | (including a cross-claim or counterclaim in a lawsuit) alleging that 468 | any patent claim is infringed by making, using, selling, offering for 469 | sale, or importing the Program or any portion of it. 470 | 471 | 11. Patents. 472 | 473 | A "contributor" is a copyright holder who authorizes use under this 474 | License of the Program or a work on which the Program is based. The 475 | work thus licensed is called the contributor's "contributor version". 476 | 477 | A contributor's "essential patent claims" are all patent claims 478 | owned or controlled by the contributor, whether already acquired or 479 | hereafter acquired, that would be infringed by some manner, permitted 480 | by this License, of making, using, or selling its contributor version, 481 | but do not include claims that would be infringed only as a 482 | consequence of further modification of the contributor version. For 483 | purposes of this definition, "control" includes the right to grant 484 | patent sublicenses in a manner consistent with the requirements of 485 | this License. 486 | 487 | Each contributor grants you a non-exclusive, worldwide, royalty-free 488 | patent license under the contributor's essential patent claims, to 489 | make, use, sell, offer for sale, import and otherwise run, modify and 490 | propagate the contents of its contributor version. 491 | 492 | In the following three paragraphs, a "patent license" is any express 493 | agreement or commitment, however denominated, not to enforce a patent 494 | (such as an express permission to practice a patent or covenant not to 495 | sue for patent infringement). To "grant" such a patent license to a 496 | party means to make such an agreement or commitment not to enforce a 497 | patent against the party. 498 | 499 | If you convey a covered work, knowingly relying on a patent license, 500 | and the Corresponding Source of the work is not available for anyone 501 | to copy, free of charge and under the terms of this License, through a 502 | publicly available network server or other readily accessible means, 503 | then you must either (1) cause the Corresponding Source to be so 504 | available, or (2) arrange to deprive yourself of the benefit of the 505 | patent license for this particular work, or (3) arrange, in a manner 506 | consistent with the requirements of this License, to extend the patent 507 | license to downstream recipients. "Knowingly relying" means you have 508 | actual knowledge that, but for the patent license, your conveying the 509 | covered work in a country, or your recipient's use of the covered work 510 | in a country, would infringe one or more identifiable patents in that 511 | country that you have reason to believe are valid. 512 | 513 | If, pursuant to or in connection with a single transaction or 514 | arrangement, you convey, or propagate by procuring conveyance of, a 515 | covered work, and grant a patent license to some of the parties 516 | receiving the covered work authorizing them to use, propagate, modify 517 | or convey a specific copy of the covered work, then the patent license 518 | you grant is automatically extended to all recipients of the covered 519 | work and works based on it. 520 | 521 | A patent license is "discriminatory" if it does not include within 522 | the scope of its coverage, prohibits the exercise of, or is 523 | conditioned on the non-exercise of one or more of the rights that are 524 | specifically granted under this License. You may not convey a covered 525 | work if you are a party to an arrangement with a third party that is 526 | in the business of distributing software, under which you make payment 527 | to the third party based on the extent of your activity of conveying 528 | the work, and under which the third party grants, to any of the 529 | parties who would receive the covered work from you, a discriminatory 530 | patent license (a) in connection with copies of the covered work 531 | conveyed by you (or copies made from those copies), or (b) primarily 532 | for and in connection with specific products or compilations that 533 | contain the covered work, unless you entered into that arrangement, 534 | or that patent license was granted, prior to 28 March 2007. 535 | 536 | Nothing in this License shall be construed as excluding or limiting 537 | any implied license or other defenses to infringement that may 538 | otherwise be available to you under applicable patent law. 539 | 540 | 12. No Surrender of Others' Freedom. 541 | 542 | If conditions are imposed on you (whether by court order, agreement or 543 | otherwise) that contradict the conditions of this License, they do not 544 | excuse you from the conditions of this License. If you cannot convey a 545 | covered work so as to satisfy simultaneously your obligations under this 546 | License and any other pertinent obligations, then as a consequence you may 547 | not convey it at all. For example, if you agree to terms that obligate you 548 | to collect a royalty for further conveying from those to whom you convey 549 | the Program, the only way you could satisfy both those terms and this 550 | License would be to refrain entirely from conveying the Program. 551 | 552 | 13. Use with the GNU Affero General Public License. 553 | 554 | Notwithstanding any other provision of this License, you have 555 | permission to link or combine any covered work with a work licensed 556 | under version 3 of the GNU Affero General Public License into a single 557 | combined work, and to convey the resulting work. The terms of this 558 | License will continue to apply to the part which is the covered work, 559 | but the special requirements of the GNU Affero General Public License, 560 | section 13, concerning interaction through a network will apply to the 561 | combination as such. 562 | 563 | 14. Revised Versions of this License. 564 | 565 | The Free Software Foundation may publish revised and/or new versions of 566 | the GNU General Public License from time to time. Such new versions will 567 | be similar in spirit to the present version, but may differ in detail to 568 | address new problems or concerns. 569 | 570 | Each version is given a distinguishing version number. If the 571 | Program specifies that a certain numbered version of the GNU General 572 | Public License "or any later version" applies to it, you have the 573 | option of following the terms and conditions either of that numbered 574 | version or of any later version published by the Free Software 575 | Foundation. If the Program does not specify a version number of the 576 | GNU General Public License, you may choose any version ever published 577 | by the Free Software Foundation. 578 | 579 | If the Program specifies that a proxy can decide which future 580 | versions of the GNU General Public License can be used, that proxy's 581 | public statement of acceptance of a version permanently authorizes you 582 | to choose that version for the Program. 583 | 584 | Later license versions may give you additional or different 585 | permissions. However, no additional obligations are imposed on any 586 | author or copyright holder as a result of your choosing to follow a 587 | later version. 588 | 589 | 15. Disclaimer of Warranty. 590 | 591 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY 592 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT 593 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY 594 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, 595 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 596 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM 597 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF 598 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 599 | 600 | 16. Limitation of Liability. 601 | 602 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 603 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS 604 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY 605 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE 606 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF 607 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD 608 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), 609 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF 610 | SUCH DAMAGES. 611 | 612 | 17. Interpretation of Sections 15 and 16. 613 | 614 | If the disclaimer of warranty and limitation of liability provided 615 | above cannot be given local legal effect according to their terms, 616 | reviewing courts shall apply local law that most closely approximates 617 | an absolute waiver of all civil liability in connection with the 618 | Program, unless a warranty or assumption of liability accompanies a 619 | copy of the Program in return for a fee. 620 | 621 | END OF TERMS AND CONDITIONS 622 | 623 | How to Apply These Terms to Your New Programs 624 | 625 | If you develop a new program, and you want it to be of the greatest 626 | possible use to the public, the best way to achieve this is to make it 627 | free software which everyone can redistribute and change under these terms. 628 | 629 | To do so, attach the following notices to the program. It is safest 630 | to attach them to the start of each source file to most effectively 631 | state the exclusion of warranty; and each file should have at least 632 | the "copyright" line and a pointer to where the full notice is found. 633 | 634 | 635 | Copyright (C) 636 | 637 | This program is free software: you can redistribute it and/or modify 638 | it under the terms of the GNU General Public License as published by 639 | the Free Software Foundation, either version 3 of the License, or 640 | (at your option) any later version. 641 | 642 | This program is distributed in the hope that it will be useful, 643 | but WITHOUT ANY WARRANTY; without even the implied warranty of 644 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 645 | GNU General Public License for more details. 646 | 647 | You should have received a copy of the GNU General Public License 648 | along with this program. If not, see . 649 | 650 | Also add information on how to contact you by electronic and paper mail. 651 | 652 | If the program does terminal interaction, make it output a short 653 | notice like this when it starts in an interactive mode: 654 | 655 | Copyright (C) 656 | This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. 657 | This is free software, and you are welcome to redistribute it 658 | under certain conditions; type `show c' for details. 659 | 660 | The hypothetical commands `show w' and `show c' should show the appropriate 661 | parts of the General Public License. Of course, your program's commands 662 | might be different; for a GUI interface, you would use an "about box". 663 | 664 | You should also get your employer (if you work as a programmer) or school, 665 | if any, to sign a "copyright disclaimer" for the program, if necessary. 666 | For more information on this, and how to apply and follow the GNU GPL, see 667 | . 668 | 669 | The GNU General Public License does not permit incorporating your program 670 | into proprietary programs. If your program is a subroutine library, you 671 | may consider it more useful to permit linking proprietary applications with 672 | the library. If this is what you want to do, use the GNU Lesser General 673 | Public License instead of this License. But first, please read 674 | . 675 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # GAMI-Net Pytorch version 2 | Generalized additive models with structured interactions - PyTorch version 3 | 4 | ## Installation 5 | 6 | ```shell 7 | pip install git+https://github.com/SelfExplainML/GamiNet-PyTorch 8 | ``` 9 | 10 | ## Citations 11 | 12 | ```latex 13 | @article{yang2021gami, 14 | title={GAMI-Net: An Explainable Neural Network based on Generalized Additive Models with Structured Interactions}, 15 | author={Yang, Zebin and Zhang, Aijun and Sudjianto, Agus}, 16 | journal={Pattern Recognition}, 17 | volume = {120}, 18 | pages = {108192}, 19 | year={2021} 20 | } 21 | ``` 22 | -------------------------------------------------------------------------------- /examples/TF-Pytorch-Check.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": { 7 | "ExecuteTime": { 8 | "end_time": "2022-04-03T02:42:01.439988Z", 9 | "start_time": "2022-04-03T02:41:59.900308Z" 10 | }, 11 | "scrolled": true 12 | }, 13 | "outputs": [], 14 | "source": [ 15 | "%matplotlib inline\n", 16 | "\n", 17 | "import os\n", 18 | "import sys\n", 19 | "\n", 20 | "PACKAGE_PARENT = '..'\n", 21 | "sys.path.append(PACKAGE_PARENT)\n", 22 | "\n", 23 | "import torch\n", 24 | "import numpy as np\n", 25 | "from sklearn.preprocessing import MinMaxScaler\n", 26 | "from sklearn.model_selection import train_test_split\n", 27 | "\n", 28 | "from gaminet import GAMINetRegressor\n", 29 | "from gaminet.utils import local_visualize\n", 30 | "from gaminet.utils import global_visualize_density\n", 31 | "from gaminet.utils import feature_importance_visualize\n", 32 | "from gaminet.utils import plot_trajectory\n", 33 | "from gaminet.utils import plot_regularization" 34 | ] 35 | }, 36 | { 37 | "cell_type": "markdown", 38 | "metadata": {}, 39 | "source": [ 40 | "## Load data" 41 | ] 42 | }, 43 | { 44 | "cell_type": "code", 45 | "execution_count": 2, 46 | "metadata": { 47 | "ExecuteTime": { 48 | "end_time": "2022-04-03T02:42:03.637759Z", 49 | "start_time": "2022-04-03T02:42:01.441368Z" 50 | } 51 | }, 52 | "outputs": [], 53 | "source": [ 54 | "def metric_wrapper(metric, scaler):\n", 55 | " def wrapper(label, pred):\n", 56 | " return metric(label, pred, scaler=scaler)\n", 57 | " return wrapper\n", 58 | "\n", 59 | "def rmse(label, pred, scaler):\n", 60 | " pred = scaler.inverse_transform(pred.reshape([-1, 1]))\n", 61 | " label = scaler.inverse_transform(label.reshape([-1, 1]))\n", 62 | " return np.sqrt(np.mean((pred - label)**2))\n", 63 | "\n", 64 | "def data_generator1(datanum, dist=\"uniform\", random_state=0):\n", 65 | " \n", 66 | " nfeatures = 100\n", 67 | " np.random.seed(random_state)\n", 68 | " x = np.random.uniform(0, 1, [datanum, nfeatures])\n", 69 | " x1, x2, x3, x4, x5, x6 = [x[:, [i]] for i in range(6)]\n", 70 | "\n", 71 | " def cliff(x1, x2):\n", 72 | " # x1: -20,20\n", 73 | " # x2: -10,5\n", 74 | " x1 = (2 * x1 - 1) * 20\n", 75 | " x2 = (2 * x2 - 1) * 7.5 - 2.5\n", 76 | " term1 = -0.5 * x1 ** 2 / 100\n", 77 | " term2 = -0.5 * (x2 + 0.03 * x1 ** 2 - 3) ** 2\n", 78 | " y = 10 * np.exp(term1 + term2)\n", 79 | " return y\n", 80 | "\n", 81 | " y = (8 * (x1 - 0.5) ** 2\n", 82 | " + 0.1 * np.exp(-8 * x2 + 4)\n", 83 | " + 3 * np.sin(2 * np.pi * x3 * x4) + cliff(x5, x6)\n", 84 | " ).reshape([-1,1]) + 1 * np.random.normal(0, 1, [datanum, 1])\n", 85 | "\n", 86 | " task_type = \"Regression\"\n", 87 | " meta_info = {\"X\" + str(i + 1):{'type':'continuous'} for i in range(nfeatures)}\n", 88 | " meta_info.update({'Y':{'type':'target'}}) \n", 89 | " for i, (key, item) in enumerate(meta_info.items()):\n", 90 | " if item['type'] == 'target':\n", 91 | " sy = MinMaxScaler((0, 1))\n", 92 | " y = sy.fit_transform(y)\n", 93 | " meta_info[key]['scaler'] = sy\n", 94 | " else:\n", 95 | " sx = MinMaxScaler((0, 1))\n", 96 | " sx.fit([[0], [1]])\n", 97 | " x[:,[i]] = sx.transform(x[:,[i]])\n", 98 | " meta_info[key]['scaler'] = sx\n", 99 | "\n", 100 | " train_x, test_x, train_y, test_y = train_test_split(x, y, test_size=0.2, random_state=random_state)\n", 101 | " return train_x, test_x, train_y, test_y, task_type, meta_info, metric_wrapper(rmse, sy)\n", 102 | "\n", 103 | "random_state = 0\n", 104 | "train_x, test_x, train_y, test_y, task_type, meta_info, get_metric = data_generator1(datanum=1000000, random_state=random_state)" 105 | ] 106 | }, 107 | { 108 | "cell_type": "markdown", 109 | "metadata": { 110 | "ExecuteTime": { 111 | "end_time": "2022-04-03T02:38:14.163923Z", 112 | "start_time": "2022-04-03T02:38:14.162077Z" 113 | } 114 | }, 115 | "source": [ 116 | "# Compare pytorch and tensorflow GAM" 117 | ] 118 | }, 119 | { 120 | "cell_type": "code", 121 | "execution_count": 3, 122 | "metadata": { 123 | "ExecuteTime": { 124 | "end_time": "2022-04-03T02:42:03.657084Z", 125 | "start_time": "2022-04-03T02:42:03.638916Z" 126 | } 127 | }, 128 | "outputs": [ 129 | { 130 | "data": { 131 | "text/plain": [ 132 | "tensor([-0.0842, -0.0735, -0.0710, -0.1213, 0.0126, -0.1070, -0.2776, -0.0694,\n", 133 | " -0.2091, 0.1363], grad_fn=)" 134 | ] 135 | }, 136 | "execution_count": 3, 137 | "metadata": {}, 138 | "output_type": "execute_result" 139 | } 140 | ], 141 | "source": [ 142 | "import torch\n", 143 | "\n", 144 | "\n", 145 | "class TensorLayer(torch.nn.Module):\n", 146 | "\n", 147 | " def __init__(self, n_subnets, subnet_arch, n_input_nodes, activation_func, device):\n", 148 | " super().__init__()\n", 149 | "\n", 150 | " self.device = device\n", 151 | " self.n_subnets = n_subnets\n", 152 | " self.n_input_nodes = n_input_nodes\n", 153 | " self.activation_func = activation_func\n", 154 | " self.n_hidden_layers = len(subnet_arch)\n", 155 | "\n", 156 | " all_biases = [] \n", 157 | " all_weights = []\n", 158 | " n_hidden_nodes_prev = n_input_nodes\n", 159 | " for i, n_hidden_nodes in enumerate(subnet_arch + [1]):\n", 160 | " if i == 0:\n", 161 | " w = torch.nn.Parameter(torch.empty(size=(n_subnets, n_hidden_nodes_prev, n_hidden_nodes),\n", 162 | " dtype=torch.float, requires_grad=True, device=device))\n", 163 | " b = torch.nn.Parameter(torch.empty(size=(n_subnets, n_hidden_nodes),\n", 164 | " dtype=torch.float, requires_grad=True, device=device))\n", 165 | " elif i == self.n_hidden_layers:\n", 166 | " w = torch.nn.Parameter(torch.empty(size=(n_subnets, n_hidden_nodes_prev, 1),\n", 167 | " dtype=torch.float, requires_grad=True, device=device))\n", 168 | " b = torch.nn.Parameter(torch.empty(size=(n_subnets, 1),\n", 169 | " dtype=torch.float, requires_grad=True, device=device))\n", 170 | " else:\n", 171 | " w = torch.nn.Parameter(torch.empty(size=(n_subnets, n_hidden_nodes_prev, n_hidden_nodes),\n", 172 | " dtype=torch.float, requires_grad=True, device=device))\n", 173 | " b = torch.nn.Parameter(torch.empty(size=(n_subnets, n_hidden_nodes),\n", 174 | " dtype=torch.float, requires_grad=True, device=device))\n", 175 | " n_hidden_nodes_prev = n_hidden_nodes\n", 176 | " torch.nn.init.zeros_(b)\n", 177 | " for j in range(n_subnets):\n", 178 | " torch.nn.init.orthogonal_(w[j])\n", 179 | " all_biases.append(b)\n", 180 | " all_weights.append(w)\n", 181 | " self.all_biases = torch.nn.ParameterList(all_biases)\n", 182 | " self.all_weights = torch.nn.ParameterList(all_weights)\n", 183 | "\n", 184 | " def individual_forward(self, inputs, idx):\n", 185 | "\n", 186 | " xs = inputs\n", 187 | " for i in range(self.n_hidden_layers):\n", 188 | " xs = self.activation_func(torch.matmul(xs, self.all_weights[i][idx]) + self.all_biases[i][idx])\n", 189 | " outputs = torch.matmul(xs, self.all_weights[-1][idx]) + self.all_biases[-1][idx]\n", 190 | " return outputs\n", 191 | "\n", 192 | " def forward(self, inputs):\n", 193 | "\n", 194 | " xs = torch.unsqueeze(torch.transpose(inputs, 0, 1), 2)\n", 195 | " for i in range(self.n_hidden_layers):\n", 196 | " xs = self.activation_func(torch.matmul(xs, self.all_weights[i])\n", 197 | " + torch.reshape(self.all_biases[i], [self.n_subnets, 1, -1]))\n", 198 | "\n", 199 | " outputs = torch.matmul(xs, self.all_weights[-1]) + torch.reshape(self.all_biases[-1], [self.n_subnets, 1, -1])\n", 200 | " outputs = torch.squeeze(torch.transpose(outputs, 0, 1), dim=2)\n", 201 | " outputs = outputs.sum(1)\n", 202 | " return outputs\n", 203 | "\n", 204 | "random_state = 0\n", 205 | "np.random.seed(random_state)\n", 206 | "torch.manual_seed(random_state)\n", 207 | "net = TensorLayer(n_subnets=5, subnet_arch=[10], n_input_nodes=1, activation_func=torch.nn.ReLU(), device=\"cpu\")\n", 208 | "coefs = [[net.all_weights[0][i].detach().numpy().copy(), net.all_weights[1][i].detach().numpy().copy()] for i in range(5)]\n", 209 | "net.forward(torch.tensor(train_x[:10, :5], dtype=torch.float32))" 210 | ] 211 | }, 212 | { 213 | "cell_type": "code", 214 | "execution_count": 4, 215 | "metadata": { 216 | "ExecuteTime": { 217 | "end_time": "2022-04-03T02:42:03.675409Z", 218 | "start_time": "2022-04-03T02:42:03.658185Z" 219 | } 220 | }, 221 | "outputs": [ 222 | { 223 | "name": "stdout", 224 | "output_type": "stream", 225 | "text": [ 226 | "[ 0.077 0.1 0.124 0.051 0.149 0.053 -0.112 0.072 -0.029 0.291]\n", 227 | "[0.233 0.264 0.312 0.217 0.277 0.205 0.047 0.209 0.145 0.435]\n", 228 | "[0.373 0.41 0.477 0.367 0.388 0.342 0.191 0.333 0.301 0.564]\n", 229 | "[0.479 0.52 0.603 0.482 0.474 0.448 0.302 0.425 0.421 0.66 ]\n", 230 | "[0.539 0.583 0.674 0.549 0.521 0.509 0.367 0.479 0.489 0.713]\n", 231 | "[0.555 0.599 0.694 0.568 0.533 0.528 0.386 0.494 0.509 0.725]\n", 232 | "[0.539 0.582 0.674 0.552 0.518 0.513 0.372 0.481 0.493 0.708]\n", 233 | "[0.502 0.541 0.628 0.512 0.485 0.476 0.336 0.448 0.453 0.669]\n", 234 | "[0.453 0.488 0.566 0.457 0.442 0.426 0.286 0.404 0.399 0.619]\n", 235 | "[0.398 0.428 0.498 0.396 0.395 0.37 0.23 0.354 0.339 0.564]\n" 236 | ] 237 | } 238 | ], 239 | "source": [ 240 | "from gaminet.dataloader import FastTensorDataLoader\n", 241 | "loss_fn = torch.nn.MSELoss(reduction=\"none\")\n", 242 | "opt = torch.optim.Adam(list(net.parameters()), lr=0.01)\n", 243 | "for epoch in range(10):\n", 244 | " net.train()\n", 245 | " opt.zero_grad(set_to_none=True)\n", 246 | " batch_xx = torch.tensor(train_x[:100, :5], dtype=torch.float32)\n", 247 | " batch_yy = torch.tensor(train_y[:100], dtype=torch.float32).ravel()\n", 248 | " pred = net(batch_xx).ravel()\n", 249 | " loss = torch.mean(loss_fn(pred, batch_yy))\n", 250 | " loss.backward()\n", 251 | " opt.step()\n", 252 | " print(net(torch.tensor(train_x[:10, :5], dtype=torch.float32)).ravel().detach().numpy().round(3))" 253 | ] 254 | }, 255 | { 256 | "cell_type": "code", 257 | "execution_count": 5, 258 | "metadata": { 259 | "ExecuteTime": { 260 | "end_time": "2022-04-03T02:42:06.381597Z", 261 | "start_time": "2022-04-03T02:42:03.676457Z" 262 | } 263 | }, 264 | "outputs": [ 265 | { 266 | "name": "stderr", 267 | "output_type": "stream", 268 | "text": [ 269 | "2022-04-03 10:42:04.035523: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libcudart.so.11.0'; dlerror: libcudart.so.11.0: cannot open shared object file: No such file or directory\n", 270 | "2022-04-03 10:42:04.035549: I tensorflow/stream_executor/cuda/cudart_stub.cc:29] Ignore above cudart dlerror if you do not have a GPU set up on your machine.\n" 271 | ] 272 | }, 273 | { 274 | "name": "stdout", 275 | "output_type": "stream", 276 | "text": [ 277 | "[-0.08416221 -0.07353798 -0.07099413 -0.12131885 0.01259092 -0.10699715\n", 278 | " -0.2775642 -0.06943712 -0.20914906 0.13627838]\n" 279 | ] 280 | }, 281 | { 282 | "name": "stderr", 283 | "output_type": "stream", 284 | "text": [ 285 | "2022-04-03 10:42:06.359079: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libcuda.so.1'; dlerror: libcuda.so.1: cannot open shared object file: No such file or directory\n", 286 | "2022-04-03 10:42:06.359102: W tensorflow/stream_executor/cuda/cuda_driver.cc:269] failed call to cuInit: UNKNOWN ERROR (303)\n", 287 | "2022-04-03 10:42:06.359116: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:156] kernel driver does not appear to be running on this host (iZwz989gvg9q1cbx1wnjqlZ): /proc/driver/nvidia/version does not exist\n", 288 | "2022-04-03 10:42:06.359312: I tensorflow/core/platform/cpu_feature_guard.cc:151] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 FMA\n", 289 | "To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n" 290 | ] 291 | } 292 | ], 293 | "source": [ 294 | "import tensorflow as tf\n", 295 | "from tensorflow.keras import layers\n", 296 | "\n", 297 | "class NumerNet(tf.keras.layers.Layer):\n", 298 | "\n", 299 | " def __init__(self, subnet_arch, activation_func, weight_init, subnet_id):\n", 300 | " super(NumerNet, self).__init__()\n", 301 | " self.layers = []\n", 302 | " self.subnet_arch = subnet_arch\n", 303 | " self.activation_func = activation_func\n", 304 | " self.subnet_id = subnet_id\n", 305 | " for nodes in self.subnet_arch:\n", 306 | " self.layers.append(layers.Dense(nodes, activation=self.activation_func,\n", 307 | " kernel_initializer=tf.keras.initializers.Constant(weight_init[0])))\n", 308 | " self.output_layer = layers.Dense(1, activation=tf.identity, kernel_initializer=tf.keras.initializers.Constant(weight_init[1]))\n", 309 | "\n", 310 | " def call(self, inputs):\n", 311 | "\n", 312 | " x = inputs\n", 313 | " for dense_layer in self.layers:\n", 314 | " x = dense_layer(x)\n", 315 | " output = self.output_layer(x)\n", 316 | " return output\n", 317 | "\n", 318 | "\n", 319 | "class MainEffectBlock(tf.keras.layers.Layer):\n", 320 | "\n", 321 | " def __init__(self, n_subnets, subnet_arch, activation_func):\n", 322 | " super(MainEffectBlock, self).__init__()\n", 323 | "\n", 324 | " self.n_subnets = n_subnets\n", 325 | " self.subnet_arch = subnet_arch\n", 326 | " self.activation_func = activation_func\n", 327 | " self.subnets = []\n", 328 | " for i in range(self.n_subnets):\n", 329 | " self.subnets.append(NumerNet(self.subnet_arch, self.activation_func, weight_init=coefs[i], subnet_id=i))\n", 330 | "\n", 331 | " def call(self, inputs):\n", 332 | "\n", 333 | " self.subnet_outputs = []\n", 334 | " for i in range(self.n_subnets):\n", 335 | " subnet = self.subnets[i]\n", 336 | " subnet_output = subnet(tf.gather(inputs, [i], axis=1))\n", 337 | " self.subnet_outputs.append(subnet_output)\n", 338 | " output = tf.reshape(tf.squeeze(tf.stack(self.subnet_outputs, 1)), [-1, self.n_subnets])\n", 339 | " output = tf.reduce_sum(output, 1)\n", 340 | " return output\n", 341 | "\n", 342 | "tfnet = MainEffectBlock(5, [10], activation_func=tf.nn.relu)\n", 343 | "print(tfnet.__call__(train_x[:10, :5]).numpy())" 344 | ] 345 | }, 346 | { 347 | "cell_type": "code", 348 | "execution_count": 6, 349 | "metadata": { 350 | "ExecuteTime": { 351 | "end_time": "2022-04-03T02:42:06.530659Z", 352 | "start_time": "2022-04-03T02:42:06.382732Z" 353 | } 354 | }, 355 | "outputs": [ 356 | { 357 | "name": "stdout", 358 | "output_type": "stream", 359 | "text": [ 360 | "[ 0.077 0.1 0.124 0.051 0.149 0.053 -0.112 0.072 -0.029 0.291]\n", 361 | "[0.233 0.264 0.312 0.217 0.277 0.205 0.047 0.209 0.145 0.435]\n", 362 | "[0.373 0.41 0.477 0.367 0.388 0.342 0.191 0.333 0.301 0.564]\n", 363 | "[0.479 0.52 0.603 0.482 0.474 0.448 0.302 0.425 0.421 0.66 ]\n", 364 | "[0.539 0.583 0.674 0.549 0.521 0.509 0.367 0.479 0.489 0.713]\n", 365 | "[0.555 0.599 0.694 0.568 0.533 0.528 0.386 0.494 0.509 0.725]\n", 366 | "[0.539 0.582 0.674 0.552 0.518 0.513 0.372 0.481 0.493 0.708]\n", 367 | "[0.502 0.541 0.628 0.512 0.485 0.476 0.336 0.448 0.453 0.669]\n", 368 | "[0.453 0.488 0.566 0.457 0.442 0.426 0.286 0.404 0.399 0.619]\n", 369 | "[0.398 0.428 0.498 0.396 0.395 0.37 0.23 0.354 0.339 0.564]\n" 370 | ] 371 | } 372 | ], 373 | "source": [ 374 | "optimizer = tf.keras.optimizers.Adam(learning_rate=0.01)\n", 375 | "loss_fn = tf.keras.losses.MeanSquaredError()\n", 376 | "for epoch in range(10):\n", 377 | " batch_xx = train_x[:100, :5]\n", 378 | " batch_yy = train_y[:100].ravel()\n", 379 | " with tf.GradientTape() as tape:\n", 380 | " pred = tfnet.__call__(batch_xx)\n", 381 | " total_loss = loss_fn(batch_yy, pred)\n", 382 | " grads = tape.gradient(total_loss, tfnet.trainable_weights)\n", 383 | " optimizer.apply_gradients(zip(grads, tfnet.trainable_weights))\n", 384 | " print(tfnet.__call__(train_x[:10, :5]).numpy().round(3))" 385 | ] 386 | } 387 | ], 388 | "metadata": { 389 | "kernelspec": { 390 | "display_name": "py39", 391 | "language": "python", 392 | "name": "py39" 393 | }, 394 | "language_info": { 395 | "codemirror_mode": { 396 | "name": "ipython", 397 | "version": 3 398 | }, 399 | "file_extension": ".py", 400 | "mimetype": "text/x-python", 401 | "name": "python", 402 | "nbconvert_exporter": "python", 403 | "pygments_lexer": "ipython3", 404 | "version": "3.9.10" 405 | }, 406 | "latex_envs": { 407 | "LaTeX_envs_menu_present": true, 408 | "autoclose": false, 409 | "autocomplete": true, 410 | "bibliofile": "biblio.bib", 411 | "cite_by": "apalike", 412 | "current_citInitial": 1, 413 | "eqLabelWithNumbers": true, 414 | "eqNumInitial": 1, 415 | "hotkeys": { 416 | "equation": "Ctrl-E", 417 | "itemize": "Ctrl-I" 418 | }, 419 | "labels_anchors": false, 420 | "latex_user_defs": false, 421 | "report_style_numbering": false, 422 | "user_envs_cfg": false 423 | }, 424 | "varInspector": { 425 | "cols": { 426 | "lenName": 16, 427 | "lenType": 16, 428 | "lenVar": 40 429 | }, 430 | "kernels_config": { 431 | "python": { 432 | "delete_cmd_postfix": "", 433 | "delete_cmd_prefix": "del ", 434 | "library": "var_list.py", 435 | "varRefreshCmd": "print(var_dic_list())" 436 | }, 437 | "r": { 438 | "delete_cmd_postfix": ") ", 439 | "delete_cmd_prefix": "rm(", 440 | "library": "var_list.r", 441 | "varRefreshCmd": "cat(var_dic_list()) " 442 | } 443 | }, 444 | "types_to_exclude": [ 445 | "module", 446 | "function", 447 | "builtin_function_or_method", 448 | "instance", 449 | "_Feature" 450 | ], 451 | "window_display": false 452 | } 453 | }, 454 | "nbformat": 4, 455 | "nbformat_minor": 2 456 | } 457 | -------------------------------------------------------------------------------- /examples/bike_share_hour/data_types.json: -------------------------------------------------------------------------------- 1 | {"season":{"type":"categorical"}, 2 | "yr":{"type":"categorical"}, 3 | "mnth":{"type":"categorical"}, 4 | "hr":{"type":"continuous"}, 5 | "holiday":{"type":"categorical"}, 6 | "weekday":{"type":"categorical"}, 7 | "workingday":{"type":"categorical"}, 8 | "weathersit":{"type":"categorical"}, 9 | "temp":{"type":"continuous"}, 10 | "atemp":{"type":"continuous"}, 11 | "hum":{"type":"continuous"}, 12 | "windspeed":{"type":"continuous"}, 13 | "cnt":{"type":"target"}} -------------------------------------------------------------------------------- /examples/bike_share_hour/readme.txt: -------------------------------------------------------------------------------- 1 | ========================================== 2 | Bike Sharing Dataset 3 | ========================================== 4 | 5 | Hadi Fanaee-T 6 | 7 | Laboratory of Artificial Intelligence and Decision Support (LIAAD), University of Porto 8 | INESC Porto, Campus da FEUP 9 | Rua Dr. Roberto Frias, 378 10 | 4200 - 465 Porto, Portugal 11 | 12 | 13 | ========================================= 14 | Background 15 | ========================================= 16 | 17 | Bike sharing systems are new generation of traditional bike rentals where whole process from membership, rental and return 18 | back has become automatic. Through these systems, user is able to easily rent a bike from a particular position and return 19 | back at another position. Currently, there are about over 500 bike-sharing programs around the world which is composed of 20 | over 500 thousands bicycles. Today, there exists great interest in these systems due to their important role in traffic, 21 | environmental and health issues. 22 | 23 | Apart from interesting real world applications of bike sharing systems, the characteristics of data being generated by 24 | these systems make them attractive for the research. Opposed to other transport services such as bus or subway, the duration 25 | of travel, departure and arrival position is explicitly recorded in these systems. This feature turns bike sharing system into 26 | a virtual sensor network that can be used for sensing mobility in the city. Hence, it is expected that most of important 27 | events in the city could be detected via monitoring these data. 28 | 29 | ========================================= 30 | Data Set 31 | ========================================= 32 | Bike-sharing rental process is highly correlated to the environmental and seasonal settings. For instance, weather conditions, 33 | precipitation, day of week, season, hour of the day, etc. can affect the rental behaviors. The core data set is related to 34 | the two-year historical log corresponding to years 2011 and 2012 from Capital Bikeshare system, Washington D.C., USA which is 35 | publicly available in http://capitalbikeshare.com/system-data. We aggregated the data on two hourly and daily basis and then 36 | extracted and added the corresponding weather and seasonal information. Weather information are extracted from http://www.freemeteo.com. 37 | 38 | ========================================= 39 | Associated tasks 40 | ========================================= 41 | 42 | - Regression: 43 | Predication of bike rental count hourly or daily based on the environmental and seasonal settings. 44 | 45 | - Event and Anomaly Detection: 46 | Count of rented bikes are also correlated to some events in the town which easily are traceable via search engines. 47 | For instance, query like "2012-10-30 washington d.c." in Google returns related results to Hurricane Sandy. Some of the important events are 48 | identified in [1]. Therefore the data can be used for validation of anomaly or event detection algorithms as well. 49 | 50 | 51 | ========================================= 52 | Files 53 | ========================================= 54 | 55 | - Readme.txt 56 | - hour.csv : bike sharing counts aggregated on hourly basis. Records: 17379 hours 57 | - day.csv - bike sharing counts aggregated on daily basis. Records: 731 days 58 | 59 | 60 | ========================================= 61 | Dataset characteristics 62 | ========================================= 63 | Both hour.csv and day.csv have the following fields, except hr which is not available in day.csv 64 | 65 | - instant: record index 66 | - dteday : date 67 | - season : season (1:springer, 2:summer, 3:fall, 4:winter) 68 | - yr : year (0: 2011, 1:2012) 69 | - mnth : month (1 to 12) 70 | - hr : hour (0 to 23) 71 | - holiday : weather day is holiday or not (extracted from http://dchr.dc.gov/page/holiday-schedule) 72 | - weekday : day of the week 73 | - workingday : if day is neither weekend nor holiday is 1, otherwise is 0. 74 | + weathersit : 75 | - 1: Clear, Few clouds, Partly cloudy, Partly cloudy 76 | - 2: Mist + Cloudy, Mist + Broken clouds, Mist + Few clouds, Mist 77 | - 3: Light Snow, Light Rain + Thunderstorm + Scattered clouds, Light Rain + Scattered clouds 78 | - 4: Heavy Rain + Ice Pallets + Thunderstorm + Mist, Snow + Fog 79 | - temp : Normalized temperature in Celsius. The values are divided to 41 (max) 80 | - atemp: Normalized feeling temperature in Celsius. The values are divided to 50 (max) 81 | - hum: Normalized humidity. The values are divided to 100 (max) 82 | - windspeed: Normalized wind speed. The values are divided to 67 (max) 83 | - casual: count of casual users 84 | - registered: count of registered users 85 | - cnt: count of total rental bikes including both casual and registered 86 | 87 | ========================================= 88 | License 89 | ========================================= 90 | Use of this dataset in publications must be cited to the following publication: 91 | 92 | [1] Fanaee-T, Hadi, and Gama, Joao, "Event labeling combining ensemble detectors and background knowledge", Progress in Artificial Intelligence (2013): pp. 1-15, Springer Berlin Heidelberg, doi:10.1007/s13748-013-0040-3. 93 | 94 | @article{ 95 | year={2013}, 96 | issn={2192-6352}, 97 | journal={Progress in Artificial Intelligence}, 98 | doi={10.1007/s13748-013-0040-3}, 99 | title={Event labeling combining ensemble detectors and background knowledge}, 100 | url={http://dx.doi.org/10.1007/s13748-013-0040-3}, 101 | publisher={Springer Berlin Heidelberg}, 102 | keywords={Event labeling; Event detection; Ensemble learning; Background knowledge}, 103 | author={Fanaee-T, Hadi and Gama, Joao}, 104 | pages={1-15} 105 | } 106 | 107 | ========================================= 108 | Contact 109 | ========================================= 110 | 111 | For further information about this dataset please contact Hadi Fanaee-T (hadi.fanaee@fe.up.pt) 112 | -------------------------------------------------------------------------------- /examples/credit_default/TaiwanCreditDataset.xls: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SelfExplainML/GamiNet-PyTorch/54598abdcd97ffd4f8e0d74930fe6a25b62d08b2/examples/credit_default/TaiwanCreditDataset.xls -------------------------------------------------------------------------------- /examples/credit_default/credit_default.names: -------------------------------------------------------------------------------- 1 | X1: Amount of the given credit (NT dollar): it includes both the individual consumer credit and his/her family (supplementary) credit. 2 | X2: Gender (1 = male; 2 = female). 3 | X3: Education (1 = graduate school; 2 = university; 3 = high school; 4 = others). 4 | X4: Marital status (1 = married; 2 = single; 3 = others). 5 | X5: Age (year). 6 | X6 - X11: History of past payment. We tracked the past monthly payment records (from April to September, 2005) as follows: X6 = the repayment status in September, 2005; X7 = the repayment status in August, 2005; . . .;X11 = the repayment status in April, 2005. The measurement scale for the repayment status is: -1 = pay duly; 1 = payment delay for one month; 2 = payment delay for two months; . . .; 8 = payment delay for eight months; 9 = payment delay for nine months and above. 7 | X12-X17: Amount of bill statement (NT dollar). X12 = amount of bill statement in September, 2005; X13 = amount of bill statement in August, 2005; . . .; X17 = amount of bill statement in April, 2005. 8 | X18-X23: Amount of previous payment (NT dollar). X18 = amount paid in September, 2005; X19 = amount paid in August, 2005; . . .;X23 = amount paid in April, 2005. 9 | 10 | -------------------------------------------------------------------------------- /examples/credit_default/data_types.json: -------------------------------------------------------------------------------- 1 | {"Given Credit":{"type":"continuous"}, 2 | "Gender":{"type":"categorical"}, 3 | "Education":{"type":"categorical"}, 4 | "Marital":{"type":"categorical"}, 5 | "Age":{"type":"continuous"}, 6 | "PAY1":{"type":"continuous"}, 7 | "PAY2":{"type":"continuous"}, 8 | "PAY3":{"type":"continuous"}, 9 | "PAY4":{"type":"continuous"}, 10 | "PAY5":{"type":"continuous"}, 11 | "PAY6":{"type":"continuous"}, 12 | "BILL_AMT1":{"type":"continuous"}, 13 | "BILL_AMT2":{"type":"continuous"}, 14 | "BILL_AMT3":{"type":"continuous"}, 15 | "BILL_AMT4":{"type":"continuous"}, 16 | "BILL_AMT5":{"type":"continuous"}, 17 | "BILL_AMT6":{"type":"continuous"}, 18 | "PAY_AMT1":{"type":"continuous"}, 19 | "PAY_AMT2":{"type":"continuous"}, 20 | "PAY_AMT3":{"type":"continuous"}, 21 | "PAY_AMT4":{"type":"continuous"}, 22 | "PAY_AMT5":{"type":"continuous"}, 23 | "PAY_AMT6":{"type":"continuous"}, 24 | "Default Payment":{"type":"target"}} -------------------------------------------------------------------------------- /examples/credit_default/load.py: -------------------------------------------------------------------------------- 1 | import json 2 | import numpy as np 3 | import pandas as pd 4 | from sklearn.model_selection import train_test_split 5 | from sklearn.preprocessing import OrdinalEncoder, MinMaxScaler 6 | 7 | 8 | def load_credit_default(random_state=0): 9 | 10 | data = pd.read_excel('./data/credit_default/default of credit card clients.xls', header=1) 11 | meta_info = json.load(open('./data/credit_default/data_types.json')) 12 | payment_list = ['BILL_AMT1', 'BILL_AMT2', 'BILL_AMT3', 'BILL_AMT4', 'BILL_AMT5', 'BILL_AMT6', 13 | 'PAY_AMT1', 'PAY_AMT2','PAY_AMT3', 'PAY_AMT4','PAY_AMT5', 'PAY_AMT6'] 14 | data.loc[:,payment_list] = (np.sign(data.loc[:,payment_list]).values * np.log10(np.abs(data.loc[:,payment_list]) + 1)) 15 | x, y = data.iloc[:,1:-1].values, data.iloc[:,[-1]].values 16 | 17 | xx = np.zeros(x.shape) 18 | task_type = 'Classification' 19 | for i, (key, item) in enumerate(meta_info.items()): 20 | if item['type'] == 'target': 21 | enc = OrdinalEncoder() 22 | enc.fit(y) 23 | y = enc.transform(y) 24 | meta_info[key]['values'] = enc.categories_[0].tolist() 25 | elif item['type'] == 'categorical': 26 | enc = OrdinalEncoder() 27 | enc.fit(x[:,[i]]) 28 | ordinal_feature = enc.transform(x[:,[i]]) 29 | xx[:,[i]] = ordinal_feature 30 | meta_info[key]['values'] = enc.categories_[0].tolist() 31 | else: 32 | sx = MinMaxScaler((0, 1)) 33 | xx[:,[i]] = sx.fit_transform(x[:,[i]]) 34 | meta_info[key]['scaler'] = sx 35 | 36 | train_x, test_x, train_y, test_y = train_test_split(xx.astype(np.float32), y, test_size=0.2, random_state=random_state) 37 | 38 | meta_info = {'LIMIT_BAL':{'type':'continuous'}, 39 | 'PAY_0':{'type':'continuous'}, 40 | 'PAY_2':{'type':'continuous'}, 41 | 'PAY_3':{'type':'continuous'}, 42 | 'PAY_4':{'type':'continuous'}, 43 | 'PAY_5':{'type':'continuous'}, 44 | 'PAY_6':{'type':'continuous'}, 45 | 'BILL_AMT1':{'type':'continuous'}, 46 | 'BILL_AMT2':{'type':'continuous'}, 47 | 'BILL_AMT3':{'type':'continuous'}, 48 | 'BILL_AMT4':{'type':'continuous'}, 49 | 'BILL_AMT5':{'type':'continuous'}, 50 | 'BILL_AMT6':{'type':'continuous'}, 51 | 'PAY_AMT1':{'type':'continuous'}, 52 | 'PAY_AMT2':{'type':'continuous'}, 53 | 'PAY_AMT3':{'type':'continuous'}, 54 | 'PAY_AMT456':{'type':'continuous'}, 55 | 'FLAG_UTIL_RAT1':{'type':'categorical'}, 56 | 'UTIL_RAT1':{'type':'continuous'}, 57 | 'UTIL_RAT_AVG':{'type':'continuous'}, 58 | 'UTIL_RAT_RANGE':{'type':'continuous'}, 59 | 'UTIL_RAT_MAX':{'type':'continuous'}, 60 | 'FLAG_PAY_RAT1':{'type':'categorical'}, 61 | 'PAY_RAT1':{'type':'continuous'}, 62 | 'PAY_RAT_AVG':{'type':'continuous'}, 63 | 'PAY_RAT_RANGE':{'type':'continuous'}, 64 | 'PAY_RAT_MAX':{'type':'continuous'}, 65 | 'Default Payment':{'type':'target'}} 66 | 67 | data = pd.read_csv('./data/credit_default/credit_data_processed.csv', index_col=[0]) 68 | x, y = data.loc[:,list(meta_info.keys())[:-1]].values, data.loc[:,['default.payment.next.month']].values 69 | 70 | xx = np.zeros(x.shape) 71 | task_type = 'Classification' 72 | for i, (key, item) in enumerate(meta_info.items()): 73 | if item['type'] == 'target': 74 | enc = OrdinalEncoder() 75 | enc.fit(y) 76 | y = enc.transform(y) 77 | meta_info[key]['values'] = enc.categories_[0].tolist() 78 | elif item['type'] == 'categorical': 79 | enc = OrdinalEncoder() 80 | enc.fit(x[:,[i]]) 81 | ordinal_feature = enc.transform(x[:,[i]]) 82 | xx[:,[i]] = ordinal_feature 83 | meta_info[key]['values'] = enc.categories_[0].tolist() 84 | else: 85 | sx = MinMaxScaler((0, 1)) 86 | xx[:,[i]] = sx.fit_transform(x[:,[i]]) 87 | meta_info[key]['scaler'] = sx 88 | 89 | train_x, test_x, train_y, test_y = train_test_split(xx.astype(np.float32), y, test_size=0.2, random_state=random_state) 90 | return train_x, test_x, train_y, test_y, task_type, meta_info -------------------------------------------------------------------------------- /examples/credit_default/undocumented values: -------------------------------------------------------------------------------- 1 | I emailed the professor who created the data set. Listed here 2 | 3 | Below is the response regarding the values used for fields X6:X11 4 | 5 | "This research employed a binary variable, default payment (Yes = 1, No = 0), as the response variable. This study reviewed the literature and used the following 23 variables as explanatory variables: 6 | 7 | X1: Amount of the given credit (NT dollar): it includes both the individual consumer credit and his/her family (supplementary) credit. 8 | 9 | X2: Gender (1 = male; 2 = female). 10 | 11 | X3: Education (1 = graduate school; 2 = university; 3 = high school; 0, 4, 5, 6 = others). 12 | 13 | X4: Marital status (1 = married; 2 = single; 3 = divorce; 0=others). 14 | 15 | X5: Age (year). 16 | 17 | X6 - X11: History of past payment. We tracked the past monthly payment records (from April to September, 2005) as follows: X6 = the repayment status in September, 2005; X7 = the repayment status in August, 2005; . . .;X11 = the repayment status in April, 2005. The measurement scale for the repayment status is: 18 | 19 | -2: No consumption; -1: Paid in full; 0: The use of revolving credit; 1 = payment delay for one month; 2 = payment delay for two months; . . .; 8 = payment delay for eight months; 9 = payment delay for nine months and above. 20 | 21 | X12-X17: Amount of bill statement (NT dollar). X12 = amount of bill statement in September, 2005; X13 = amount of bill statement in August, 2005; . . .; X17 = amount of bill statement in April, 2005. 22 | 23 | X18-X23: Amount of previous payment (NT dollar). X18 = amount paid in September, 2005; X19 = amount paid in August, 2005; . . .;X23 = amount paid in April, 2005. 24 | 25 | Y: client's behavior; Y=0 then not default, Y=1 then default" 26 | 27 | -------------------------------------------------------------------------------- /examples/dataset.py: -------------------------------------------------------------------------------- 1 | import json 2 | import numpy as np 3 | import pandas as pd 4 | from sklearn.datasets import fetch_california_housing 5 | from sklearn.model_selection import train_test_split 6 | from sklearn.preprocessing import OrdinalEncoder, MinMaxScaler 7 | 8 | 9 | def metric_wrapper(metric, scaler): 10 | def wrapper(label, pred): 11 | return metric(label, pred, scaler=scaler) 12 | return wrapper 13 | 14 | def rmse(label, pred, scaler): 15 | pred = scaler.inverse_transform(pred.reshape([-1, 1])) 16 | label = scaler.inverse_transform(label.reshape([-1, 1])) 17 | return np.sqrt(np.mean((pred - label)**2)) 18 | 19 | 20 | def get_bike_share(random_state=0): 21 | 22 | # we use hour dataset and predict total cnt 23 | task_type = "Regression" 24 | data = pd.read_csv("./bike_share_hour/bike_share_hour.csv", index_col=[0]) 25 | meta_info = json.load(open("./bike_share_hour/data_types.json")) 26 | x, y = data.iloc[:,1:-3].values, data.iloc[:,[-1]].values 27 | xx = np.zeros((x.shape[0], x.shape[1]), dtype=np.float32) 28 | for i, (key, item) in enumerate(meta_info.items()): 29 | if item['type'] == 'target': 30 | sy = MinMaxScaler((0, 1)) 31 | y = sy.fit_transform(y) 32 | meta_info[key]['scaler'] = sy 33 | elif item['type'] == 'categorical': 34 | enc = OrdinalEncoder() 35 | xx[:,[i]] = enc.fit_transform(x[:,[i]]) 36 | meta_info[key]['values'] = [] 37 | for item in enc.categories_[0].tolist(): 38 | try: 39 | if item == int(item): 40 | meta_info[key]['values'].append(str(int(item))) 41 | else: 42 | meta_info[key]['values'].append(str(item)) 43 | except ValueError: 44 | meta_info[key]['values'].append(str(item)) 45 | else: 46 | sx = MinMaxScaler((0, 1)) 47 | xx[:,[i]] = sx.fit_transform(x[:,[i]]) 48 | meta_info[key]['scaler'] = sx 49 | selected_features = ['season', 'mnth', 'hr', 'weekday', 'workingday', 'weathersit', 'temp', 'hum', 'windspeed', 'cnt'] 50 | meta_info = {key: meta_info[key] for key in selected_features} 51 | 52 | train_x, test_x, train_y, test_y = train_test_split(xx.astype(np.float32)[:, [0, 2, 3, 5, 6, 7, 8, 10, 11]], 53 | y.astype(np.float32), 54 | test_size=0.2, random_state=random_state) 55 | return train_x, test_x, train_y, test_y, task_type, meta_info, metric_wrapper(rmse,sy) 56 | 57 | 58 | def get_credit_default(random_state=0): 59 | 60 | meta_info = {'LIMIT_BAL':{'type':'continuous'}, 61 | 'PAY_0':{'type':'continuous'}, 62 | 'PAY_2':{'type':'continuous'}, 63 | 'PAY_3':{'type':'continuous'}, 64 | 'PAY_4':{'type':'continuous'}, 65 | 'PAY_5':{'type':'continuous'}, 66 | 'PAY_6':{'type':'continuous'}, 67 | 'BILL_AMT1':{'type':'continuous'}, 68 | 'BILL_AMT2':{'type':'continuous'}, 69 | 'BILL_AMT3':{'type':'continuous'}, 70 | 'BILL_AMT4':{'type':'continuous'}, 71 | 'BILL_AMT5':{'type':'continuous'}, 72 | 'BILL_AMT6':{'type':'continuous'}, 73 | 'PAY_AMT1':{'type':'continuous'}, 74 | 'PAY_AMT2':{'type':'continuous'}, 75 | 'PAY_AMT3':{'type':'continuous'}, 76 | 'PAY_AMT456':{'type':'continuous'}, 77 | 'FLAG_UTIL_RAT1':{'type':'categorical'}, 78 | 'UTIL_RAT1':{'type':'continuous'}, 79 | 'UTIL_RAT_AVG':{'type':'continuous'}, 80 | 'UTIL_RAT_RANGE':{'type':'continuous'}, 81 | 'UTIL_RAT_MAX':{'type':'continuous'}, 82 | 'FLAG_PAY_RAT1':{'type':'categorical'}, 83 | 'PAY_RAT1':{'type':'continuous'}, 84 | 'PAY_RAT_AVG':{'type':'continuous'}, 85 | 'PAY_RAT_RANGE':{'type':'continuous'}, 86 | 'PAY_RAT_MAX':{'type':'continuous'}, 87 | 'Default Payment':{'type':'target'}} 88 | 89 | data = pd.read_csv('./credit_default/credit_data_processed.csv', index_col=[0]) 90 | x, y = data.loc[:,list(meta_info.keys())[:-1]].values, data.loc[:,['default.payment.next.month']].values 91 | 92 | xx = np.zeros(x.shape) 93 | task_type = 'Classification' 94 | for i, (key, item) in enumerate(meta_info.items()): 95 | if item['type'] == 'target': 96 | enc = OrdinalEncoder() 97 | enc.fit(y) 98 | y = enc.transform(y) 99 | meta_info[key]['values'] = enc.categories_[0].tolist() 100 | elif item['type'] == 'categorical': 101 | enc = OrdinalEncoder() 102 | enc.fit(x[:,[i]]) 103 | ordinal_feature = enc.transform(x[:,[i]]) 104 | xx[:,[i]] = ordinal_feature 105 | meta_info[key]['values'] = enc.categories_[0].tolist() 106 | else: 107 | sx = MinMaxScaler((0, 1)) 108 | xx[:,[i]] = sx.fit_transform(x[:,[i]]) 109 | meta_info[key]['scaler'] = sx 110 | 111 | train_x, test_x, train_y, test_y = train_test_split(xx.astype(np.float32), y, test_size=0.2, random_state=random_state) 112 | return train_x, test_x, train_y, test_y, task_type, meta_info 113 | 114 | 115 | def get_california_housing(random_state=0): 116 | 117 | task_type = "Regression" 118 | cal_housing = fetch_california_housing() 119 | sx = MinMaxScaler((0, 1)) 120 | sy = MinMaxScaler((0, 1)) 121 | xx = sx.fit_transform(cal_housing.data) 122 | yy = sy.fit_transform(cal_housing.target.reshape(-1, 1)) 123 | 124 | get_metric = metric_wrapper(rmse, sy) 125 | meta_info = {name: {"type": "continuous"} for name in cal_housing.feature_names} 126 | meta_info.update({cal_housing.target_names[0]: {"type": "target"}}) 127 | train_x, test_x, train_y, test_y = train_test_split(xx, yy, test_size=0.2, random_state=random_state) 128 | return train_x, test_x, train_y, test_y, task_type, meta_info, metric_wrapper(rmse,sy) 129 | -------------------------------------------------------------------------------- /examples/fico/.ipynb_checkpoints/preprocess-checkpoint.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": { 7 | "ExecuteTime": { 8 | "end_time": "2020-05-27T04:16:46.019095Z", 9 | "start_time": "2020-05-27T04:08:26.163560Z" 10 | } 11 | }, 12 | "outputs": [ 13 | { 14 | "name": "stdout", 15 | "output_type": "stream", 16 | "text": [ 17 | "Column being fixed: 1\n", 18 | "Column being fixed: 8\n", 19 | "Column being fixed: 14\n", 20 | "Column being fixed: 17\n", 21 | "Column being fixed: 18\n", 22 | "Column being fixed: 19\n", 23 | "Column being fixed: 20\n", 24 | "Column being fixed: 21\n", 25 | "Column being fixed: 22\n" 26 | ] 27 | } 28 | ], 29 | "source": [ 30 | "# --- Imports section --- \n", 31 | "import numpy as np\n", 32 | "import pandas as pd\n", 33 | "from sklearn.preprocessing import StandardScaler\n", 34 | "from sklearn import datasets, linear_model, preprocessing\n", 35 | "import copy\n", 36 | "\n", 37 | "class ModelError(Exception):\n", 38 | "\tpass\n", 39 | "\n", 40 | "class Data_Cleaner():\n", 41 | "\n", 42 | "\tdef __init__ (self, file_name, data = None):\n", 43 | "\t# --- Retrieves the data from CSV or array, as well as basic organisation ---\n", 44 | "\n", 45 | "\t\t# -- Get data from CSV or given array --\n", 46 | "\t\tif (data == None):\n", 47 | "\t\t\tself.data_set = pd.read_csv(file_name).values\n", 48 | "\n", 49 | "\t\telse:\n", 50 | "\t\t\tself.data_set = data\n", 51 | "\n", 52 | "\t\t# -- Converting target to binary --\n", 53 | "\t\tnp.place(self.data_set, self.data_set == \"Bad\", 0)\n", 54 | "\t\tnp.place(self.data_set, self.data_set == \"Good\", 1)\n", 55 | "\n", 56 | "\t\t# -- Creating Model Variable -- \n", 57 | "\t\tself.model = None\n", 58 | "\n", 59 | "\t\t# -- Creating an Order Column --\n", 60 | "\t\torder = np.arange(self.data_set.shape[0])\n", 61 | "\t\torder = order.reshape((order.shape[0],1))\n", 62 | "\n", 63 | "\t\t# -- Scale and Split --\n", 64 | "\t\t# self.y = self.data_set[:,:1]\n", 65 | "\t\t# scaler = StandardScaler()\n", 66 | "\t\t# self.X = scaler.fit_transform(self.data_set[:,1:])\n", 67 | "\n", 68 | "\t\tself.y = self.data_set[:,:1]\n", 69 | "\t\tself.X = self.data_set[:,1:]\n", 70 | "\n", 71 | "\n", 72 | "\t\t# -- Needs to be retained for inserting new samples\n", 73 | "\t\t# self.mean = scaler.mean_\n", 74 | "\t\t# self.scale = scaler.scale_\n", 75 | "\n", 76 | "\t\t# -- Assiging general useful variables --\n", 77 | "\t\tself.num_samples , self.num_features = self.X.shape\n", 78 | "\n", 79 | "\t\t# -- Add the Order Column -- \n", 80 | "\t\tself.X = np.append(order,self.X,axis=1)\n", 81 | "\t\tself.y = np.append(order,self.y,axis=1)\n", 82 | "\n", 83 | "\tdef shift(self):\n", 84 | "\t# --- Perform the shift for the two categorical features --- \n", 85 | "\n", 86 | "\t\t# -- Shift is hardcoded based on requirements -- \n", 87 | "\t\tfirst_col = self.X[:,10]\n", 88 | "\t\tnp.place(first_col, first_col == 1, 100) # hold value\n", 89 | "\t\tnp.place(first_col, first_col == 6, 1)\n", 90 | "\t\tnp.place(first_col, first_col == 5, 1)\n", 91 | "\t\tnp.place(first_col, first_col == 4, 6)\n", 92 | "\t\tnp.place(first_col, first_col == 3, 5)\n", 93 | "\t\tnp.place(first_col, first_col == 2, 4)\n", 94 | "\t\tnp.place(first_col, first_col == 100, 3)\n", 95 | "\t\tnp.place(first_col, first_col == 0, 2)\n", 96 | "\t\tnp.place(first_col, first_col == 8, 0)\n", 97 | "\t\tnp.place(first_col, first_col == 9, 0)\n", 98 | "\n", 99 | "\t\tsecond_col= self.X[:,11]\n", 100 | "\t\tnp.place(second_col, second_col == 1, 0)\n", 101 | "\t\tnp.place(second_col, second_col == 9, 0)\n", 102 | "\t\tnp.place(second_col, second_col == 7, 1)\n", 103 | "\t\tnp.place(second_col, second_col == 8, 7)\n", 104 | "\n", 105 | "\t\tself.X[:,10] = first_col\n", 106 | "\t\tself.X[:,11] = second_col\n", 107 | "\n", 108 | "\tdef __scaled_row(self,row,scaler):\n", 109 | "\t# --- Returns the Row Scaled ---\n", 110 | "\t\tmean = scaler.mean_\n", 111 | "\t\tscale = scaler.scale_\n", 112 | "\t\tscld = []\n", 113 | "\t\tfor k in range(row.shape[0]):\n", 114 | "\t\t\tscld.append((row[k] - mean[k])/scale[k])\n", 115 | "\t\tscld = np.array(scld)\n", 116 | "\n", 117 | "\t\treturn scld\n", 118 | "\t \n", 119 | "\tdef __masked_arr(self,orig_array, mask):\n", 120 | "\t# --- Returns XOR of Array and Mask --- \n", 121 | "\t\tmasked_array = []\n", 122 | "\n", 123 | "\t\tfor i in range(len(orig_array)):\n", 124 | "\t\t\trow = []\n", 125 | "\t\t\tfor j in range(len(orig_array[0])):\n", 126 | "\t\t\t\tif mask[j] != 0:\n", 127 | "\t\t\t\t\trow.append(orig_array[i][j])\n", 128 | "\t\t\tmasked_array.append(row)\n", 129 | "\n", 130 | "\t\tmasked_array = np.array(masked_array)\n", 131 | "\n", 132 | "\t\treturn masked_array\n", 133 | "\n", 134 | "\tdef __euc_distance(self,row1, row2):\n", 135 | "\t# --- Returns Euclidian Distance between Rows --- \n", 136 | "\t\tdist = 0\n", 137 | "\t\tfor i in range(len(row1)):\n", 138 | "\t\t\tt = (row1[i]-row2[i])**2\n", 139 | "\t\t\tdist += t\n", 140 | "\t\tdist = np.sqrt(dist)\n", 141 | "\t\treturn dist\n", 142 | "\n", 143 | "\tdef __predict_feature_weighted(self,row, good_data_masked, no_neighbours, orig_array, ft_idx):\n", 144 | "\t# --- Returns the single special value replaced by kNN imputation using weights---\n", 145 | "\n", 146 | "\t\tdistances = []\n", 147 | "\t\t# -- Loops through the good data with no special values -- \n", 148 | "\t\t\t# - Good data has the changing feature removed -\n", 149 | "\t\tfor i in range(len(good_data_masked)):\t\n", 150 | "\t\t\tdistances.append(self.__euc_distance(row, good_data_masked[i]))\n", 151 | "\n", 152 | "\t\tdistances = np.array(distances)\n", 153 | "\t\tmax_dist = np.max(distances)\n", 154 | "\t \n", 155 | "\t\t# -- Sorts the first no_neigbours features --\n", 156 | "\t\tidx = np.argpartition(distances, no_neighbours)\n", 157 | "\n", 158 | "\t\tvalues = []\n", 159 | "\t\tmin_dists = []\n", 160 | "\t \n", 161 | "\t\t# -- Retrieving values with which to replace -- \n", 162 | "\t\tfor i in range(no_neighbours):\n", 163 | "\t\t\tvalues.append(orig_array[idx[i]][ft_idx])\n", 164 | "\t\t\tmin_dists.append(distances[idx[i]])\n", 165 | "\n", 166 | "\t\tvalues = np.array(values) \n", 167 | "\t\tmin_dists = np.array(min_dists)\n", 168 | "\n", 169 | "\t\t# -- Assigning the weights -- \n", 170 | "\t\tweights = []\n", 171 | "\t\tfor i in min_dists:\n", 172 | "\t\t\tweights.append(1 - (i/max_dist))\n", 173 | "\t \n", 174 | "\t # -- Calculating final result -- \n", 175 | "\t\timputed_val = 0\n", 176 | "\t\tfor i in range(len(weights)):\n", 177 | "\t\t\timputed_val += weights[i] * values[i]\n", 178 | "\t \n", 179 | "\t\treturn imputed_val \n", 180 | "\n", 181 | "\tdef __predict_feature_mean(self,row, good_data_masked, no_neighbours, orig_array, ft_idx):\n", 182 | "\t# --- Returns the single special value replaced by kNN imputation using the mean ---\n", 183 | "\n", 184 | "\t\tdistances = []\n", 185 | "\t\t# -- Loops through the good data with no special values -- \n", 186 | "\t \t# - Good data has the changing feature removed -\n", 187 | "\t\tfor i in range(len(good_data_masked)):\n", 188 | "\t\t\tdistances.append(self.__euc_distance(row,good_data_masked[i]))\n", 189 | "\t\tdistances = np.array(distances)\n", 190 | "\t \n", 191 | "\t\t# -- Sorts the first no_neigbours features --\n", 192 | "\t\tidx = np.argpartition(distances, no_neighbours)\n", 193 | "\n", 194 | "\t\tvalues = []\n", 195 | "\t\tmin_dists = []\n", 196 | "\t \n", 197 | "\t\t# -- Retrieving values with which to replace -- \n", 198 | "\t\tfor i in range(no_neighbours):\n", 199 | "\t\t\tvalues.append(orig_array[idx[i]][ft_idx])\n", 200 | "\t\t\tmin_dists.append(distances[idx[i]])\n", 201 | "\n", 202 | "\t\tvalues = np.array(values) \n", 203 | "\t\tmin_dists = np.array(min_dists)\n", 204 | "\t \n", 205 | "\t\t# -- Calculating final result -- \n", 206 | "\t\timputed_val = 0\n", 207 | "\t\tfor i in range(len(values)):\n", 208 | "\t\t\timputed_val += values[i]\n", 209 | "\n", 210 | "\t\timputed_val = imputed_val/len(values)\n", 211 | "\n", 212 | "\t\treturn imputed_val\n", 213 | "\n", 214 | "\tdef __remove_row_with_vals(self, data, target, vals):\n", 215 | "\t# --- Returns the data/target without the rows that have any instance of vals list ---\n", 216 | "\t\tremoved_data = []\n", 217 | "\t\tremoved_target = []\n", 218 | "\n", 219 | "\t\trow_no = 0 \n", 220 | "\t\tfor row in data:\n", 221 | "\t\t\tfor col in row:\n", 222 | "\t\t\t\tif (col in vals):\n", 223 | "\t\t\t\t\tremoved_data.append(data[row_no])\n", 224 | "\t\t\t\t\tdata = np.delete(data, row_no, 0)\n", 225 | "\n", 226 | "\t\t\t\t\tremoved_target.append(target[row_no])\n", 227 | "\t\t\t\t\ttarget = np.delete(target, row_no, 0) \n", 228 | "\t\t\t\t\trow_no -= 1\n", 229 | "\t\t\t\t\tbreak\n", 230 | "\t\t\trow_no += 1\n", 231 | "\n", 232 | "\t\tremoved_data = np.array(removed_data)\n", 233 | "\t\tremoved_target = np.array(removed_target)\n", 234 | "\n", 235 | "\t\treturn data, target, removed_data, removed_target\n", 236 | "\n", 237 | "\tdef __remove_col_with_vals(self, data, vals):\n", 238 | "\t# --- Returns the data without the coloumns that have the desired special values ---\n", 239 | "\t\tno_cols = data.shape[1]\n", 240 | "\t\tno_rows = data.shape[0]\n", 241 | "\t\trow = 0\n", 242 | "\t\twhile (no_rows > row):\n", 243 | "\t\t\tcol = 0\n", 244 | "\t\t\twhile (no_cols > col):\n", 245 | "\t\t\t\tif (data[row][col] in vals):\n", 246 | "\t\t\t\t\tdata = np.delete(data, col, 1)\n", 247 | "\t\t\t\t\tno_cols -= 1\n", 248 | "\t\t\t\telse:\n", 249 | "\t\t\t\t\tcol += 1\n", 250 | "\t\t\trow += 1 \n", 251 | "\t\treturn data\n", 252 | "\n", 253 | "\tdef __predict_values_lin_reg(self,X_tr,y_tr,X_test):\n", 254 | "\t# --- Uses linear regression to extrapolate values ---\n", 255 | "\t\tmodel = linear_model.LinearRegression()\n", 256 | "\t\tmodel.fit(X_tr, y_tr)\n", 257 | "\t\tpred = model.predict(X_test)\n", 258 | "\t\treturn pred\n", 259 | "\n", 260 | "\tdef __data_spliter(self,all_data,target_col,target_val):\n", 261 | "\t# --- Splits the data such to identify target col --- \n", 262 | "\t\ttarget_col += 1\n", 263 | "\n", 264 | "\t\ty = all_data[:,target_col:target_col+1]\n", 265 | "\t\tX = np.delete(all_data,target_col,1)\n", 266 | "\t \n", 267 | "\t\t# -- Will hold the X for the y values that need to be predicted--\n", 268 | "\t\tX_target = np.zeros((1,X.shape[1]))\n", 269 | "\n", 270 | "\t\trow_no = 0 \n", 271 | "\t\t# -- Finds the rows with a target val -- \n", 272 | "\t\tfor val in y:\n", 273 | "\t\t\tif (val[0] == target_val):\n", 274 | "\t\t\t\tX_target = np.append(X_target,X[row_no:row_no+1,:],axis=0)\n", 275 | "\t\t\t\tX = np.delete(X, row_no, 0)\n", 276 | "\t\t\t\ty = np.delete(y, row_no, 0) \n", 277 | "\t\t\telse:\n", 278 | "\t\t\t\trow_no += 1\n", 279 | "\n", 280 | "\t\tX_target = np.delete(X_target,0,0)\n", 281 | "\t \n", 282 | "\t\treturn X,y,X_target # Note that the order column is still attached\n", 283 | "\n", 284 | "\tdef __combine_parts_inorder(self,X,y,X_target,y_target,target_col):\n", 285 | "\t# --- Combines all the small parts into a single data matrix ---\n", 286 | "\t\ttarget_col += 1 # To account for the order column\n", 287 | "\n", 288 | "\t\ty_target = y_target.reshape((y_target.shape[0],1))\n", 289 | "\t\ty_full = np.append(y_target,y,axis=0)\n", 290 | "\t\tX_full = np.append(X_target,X,axis=0)\n", 291 | "\n", 292 | "\t\tdata = np.append(X_full[:,:target_col],y_full,axis=1)\n", 293 | "\t\tdata = np.append(data,X_full[:,target_col:],axis=1)\n", 294 | "\t\treturn data\n", 295 | "\n", 296 | "\tdef __average_each_feature(self,X):\n", 297 | "\t# --- Finds the mean values for each feature ---\n", 298 | "\n", 299 | "\t\tX_target = np.zeros((1,X.shape[1]))\n", 300 | "\t \n", 301 | "\t\tfor i in range(X.shape[1]):\n", 302 | "\t\t\tcol = X[:,i]\n", 303 | "\t\t\tcol = np.mean(col,axis=0)\n", 304 | "\t\t\tX_target[:,i] = col\n", 305 | "\t \n", 306 | "\t\treturn X_target\n", 307 | "\n", 308 | "\tdef __process_and_predict(self,all_data,target_col,target_val,exclude=None,model=\"linear\"):\n", 309 | "\t\t# -- Split data --\n", 310 | "\t\tX,y,X_target = self.__data_spliter(all_data,target_col,target_val)\n", 311 | "\t\t# -- Record order columns -- \n", 312 | "\n", 313 | "\t\torder_data = X[:,0:1]\n", 314 | "\t\torder_target = X_target[:,0:1]\n", 315 | "\t \n", 316 | "\t # -- Remove certain columns --\n", 317 | "\t\tif (exclude != None or exclude == []):\n", 318 | "\t\t\ty_tr = np.copy(y)\n", 319 | "\t\t\tX_tr = self.__remove_col_with_vals(X,exclude)\n", 320 | "\t\t\tX_tr = np.delete(X_tr,0,axis=1) # Removes the order column\n", 321 | "\t\t\tX_pred = self.__remove_col_with_vals(X_target,exclude) # The x used to predict\n", 322 | "\t\t\tX_pred = np.delete(X_pred,0,axis=1)\n", 323 | "\n", 324 | "\n", 325 | "\t\telse:\n", 326 | "\t\t\ty_tr = np.copy(y)\n", 327 | "\t\t\tX_tr = np.delete(X,0,axis=1) # Removes the order column\n", 328 | "\t\t\tX_pred = np.delete(X_target,0,axis=1)\n", 329 | "\n", 330 | "\n", 331 | "\t # -- Run regression --\n", 332 | "\t\tif (model == \"linear\"):\n", 333 | "\t\t\ty_target = self.__predict_values_lin_reg(X_tr,y_tr,X_pred)\n", 334 | "\n", 335 | "\t\telif (model == \"polynomial\"):\n", 336 | "\t\t\tpass\n", 337 | "\n", 338 | "\t\telif (model == \"special\"):\n", 339 | "\t\t\tX_avg = self.__average_each_feature(X_pred)\n", 340 | "\t\t\tpred = self.__predict_values_lin_reg(X_tr,y_tr,X_avg)\n", 341 | "\t \n", 342 | "\t\telse:\n", 343 | "\t\t\traise ModelError(\"Model currently not available\")\n", 344 | "\t \n", 345 | "\t\tfinal_data = self.__combine_parts_inorder(X,y,X_target,y_target,target_col)\n", 346 | "\t\treturn final_data\n", 347 | "\t# --- Processes the data and uses linear regression to extrapolate --- \n", 348 | "\n", 349 | "\tdef remove_8(self, kNN, prediction_type):\n", 350 | "\t# --- Removes all the -8 values using kNN imputation ---\n", 351 | "\t\t# -- Remove the order column -- \n", 352 | "\t\torder = self.X[:,0]\n", 353 | "\t\torder = order.reshape((order.shape[0],1))\n", 354 | "\t\tself.X = np.delete(self.X, 0, axis = 1)\n", 355 | "\n", 356 | "\t\t# -- Removes all special values (-7,-8,-9) --\n", 357 | "\t\tX_good, hold1, hold2, hold3 = self.__remove_row_with_vals(self.X, self.y, [-7,-8,-9])\n", 358 | "\n", 359 | "\t\tscaler = StandardScaler()\n", 360 | "\t\tX_good_scaled = scaler.fit_transform(X_good)\n", 361 | "\n", 362 | "\t\t# -- Create a copy of the data matrix X to edit -- \n", 363 | "\t\tX_no_8 = np.copy(self.X)\n", 364 | "\n", 365 | "\t\tcols_with_8 = [1,8,14,17,18,19,20,21,22]\n", 366 | "\n", 367 | "\t\t# -- Fixing each -8 column -- \n", 368 | "\t\tfor fix_col in cols_with_8:\n", 369 | "\t\t\tprint(\"Column being fixed:\", str(fix_col))\n", 370 | "\t\t\t# -- Looping through all samples -- \n", 371 | "\t\t\tfor row in range(self.num_samples):\n", 372 | "\n", 373 | "\t\t\t\tif self.X[row][fix_col] == -8:\n", 374 | "\t\t\t\t\trow_to_comp = []\n", 375 | "\t\t\t\t\tmask = []\n", 376 | "\t\t\t\t\tscaled = self.__scaled_row(self.X[row],scaler)\n", 377 | "\n", 378 | "\t\t\t\t\t# -- Looping through each value --\n", 379 | "\t\t\t\t\tfor col in range(self.num_features):\n", 380 | "\t\t\t\t\t\tif self.X[row][col] >= 0:\n", 381 | "\t\t\t\t\t\t\tmask.append(1)\n", 382 | "\t\t\t\t\t\t\trow_to_comp.append(scaled[col])\n", 383 | "\t\t\t\t\t\telse:\n", 384 | "\t\t\t\t\t\t\tmask.append(0)\n", 385 | "\n", 386 | "\t\t\t\t\trow_to_comp = np.array(row_to_comp)\n", 387 | "\t\t\t\t\tmask = np.array(mask)\n", 388 | "\t\t \n", 389 | "\t\t\t\t\t# -- Getting the array of samples without special values in the good datasets-- \n", 390 | "\t\t\t\t\tX_good_masked = self.__masked_arr(X_good_scaled, mask)\n", 391 | "\n", 392 | "\t\t\t\t\tif (prediction_type == \"mean\"):\n", 393 | "\t\t\t\t\t\timputed = self.__predict_feature_mean(row_to_comp, X_good_masked, kNN, X_good_scaled, fix_col)\n", 394 | "\n", 395 | "\t\t\t\t\telif (prediction_type == \"weighted\"):\n", 396 | "\t\t\t\t\t\timputed = self.__predict_feature_weighted(row_to_comp, X_good_masked, kNN, X_good_scaled, fix_col)\n", 397 | "\t\t\t\t\t\n", 398 | "\t\t\t\t\tX_no_8[row][fix_col] = imputed*scaler.scale_[fix_col] + scaler.mean_[fix_col]\n", 399 | "\n", 400 | "\t\tself.X = X_no_8\n", 401 | "\n", 402 | "\t\t# -- Add back order column -- \n", 403 | "\t\tself.X = np.append(order,self.X,axis=1)\n", 404 | "\n", 405 | "\tdef remove_all_9(self):\n", 406 | "\t# --- Removes the columns with all -9 values -- \n", 407 | "\t\tself.rem_X = []\n", 408 | "\t\tself.rem_y = []\n", 409 | "\t\trow_no = 0 \n", 410 | "\t\tfor row in self.X:\n", 411 | "\t\t\tfor col_i in range(1,row.shape[0]):\n", 412 | "\t\t\t\tif (row[col_i] == -9):\n", 413 | "\t\t\t\t\tremove = True\n", 414 | "\t\t\t\telse:\n", 415 | "\t\t\t\t\tremove = False\n", 416 | "\t\t\t\t\tbreak\n", 417 | "\t\t\tif remove:\n", 418 | "\t\t\t\tself.rem_X.append(self.X[row_no])\n", 419 | "\t\t\t\tself.X = np.delete(self.X, row_no, 0)\n", 420 | "\n", 421 | "\t\t\t\tself.rem_y.append(self.y[row_no])\n", 422 | "\t\t\t\tself.y = np.delete(self.y, row_no, 0) \n", 423 | "\n", 424 | "\t\t\telse:\n", 425 | "\t\t\t\trow_no += 1\n", 426 | "\n", 427 | "\t\tself.rem_X = np.array(self.rem_X)\n", 428 | "\t\tself.rem_y = np.array(self.rem_y)\n", 429 | "\n", 430 | "\tdef remove_9(self):\n", 431 | "\t# --- Removes the -9 values by using linear regression ---\n", 432 | "\t\tself.X = self.__process_and_predict(self.X,0,-9,[-7])\n", 433 | "\n", 434 | "\tdef remove_7_est(self):\n", 435 | "\t# --- Removes the -7 values by using an approximated value ---\n", 436 | "\t\tvalue_replace = 150\n", 437 | "\t\tnp.place(self.X, self.X == -7, value_replace)\n", 438 | "\n", 439 | "\tdef output_all_data(self):\n", 440 | "\t# --- Combines all the data into a single array in order and outputs it ---\n", 441 | "\t\tall_X = np.append(self.X,self.rem_X,axis=0)\n", 442 | "\t\tall_y = np.append(self.y,self.rem_y,axis=0)\n", 443 | "\n", 444 | "\t\tall_X = all_X[all_X[:,0].argsort()]\n", 445 | "\t\tall_y = all_y[all_y[:,0].argsort()]\n", 446 | "\n", 447 | "\t\tall_X = np.delete(all_X, 0, axis=1) \n", 448 | "\t\tall_y = np.delete(all_y, 0, axis=1) \n", 449 | "\t\t\n", 450 | "\t\tdata_output = np.append(all_y,all_X,axis=1)\n", 451 | "\n", 452 | "\t\treturn data_output\n", 453 | "\n", 454 | "\tdef output_to_CSV(self, filename):\n", 455 | "\t# --- Outputs the data to a CSV according to assigned filename --- \n", 456 | "\t\tdata_output = self.output_all_data()\n", 457 | "\n", 458 | "\t\tnp.savetxt(filename, data_output.astype(int), fmt='%i', delimiter=\",\")\n", 459 | "\n", 460 | "\tdef revert_to_original(self):\n", 461 | "\t# --- Allows to retrieve the original dataset ---\n", 462 | "\t\tself.__init__(\"pass\",self.data_set)\n", 463 | "\n", 464 | "testing123 = Data_Cleaner(\"./heloc_dataset_v1.csv\")\n", 465 | "testing123.shift()\n", 466 | "testing123.remove_8(5,\"mean\")\n", 467 | "testing123.remove_all_9()\n", 468 | "testing123.remove_9()\n", 469 | "testing123.remove_7_est()\n", 470 | "testing123.output_to_CSV(\"test_file1.csv\")" 471 | ] 472 | }, 473 | { 474 | "cell_type": "code", 475 | "execution_count": 69, 476 | "metadata": { 477 | "ExecuteTime": { 478 | "end_time": "2020-05-28T02:34:55.972490Z", 479 | "start_time": "2020-05-28T02:34:55.852496Z" 480 | } 481 | }, 482 | "outputs": [], 483 | "source": [ 484 | "pd.DataFrame(np.hstack([testing123.X, testing123.y])).to_csv(\"fico.csv\")" 485 | ] 486 | }, 487 | { 488 | "cell_type": "code", 489 | "execution_count": 74, 490 | "metadata": { 491 | "ExecuteTime": { 492 | "end_time": "2020-05-28T02:36:57.680689Z", 493 | "start_time": "2020-05-28T02:36:57.624935Z" 494 | } 495 | }, 496 | "outputs": [], 497 | "source": [ 498 | "data = pd.read_csv(\"fico.csv\", index_col=[0, 1])\n", 499 | "x, y = data.iloc[:,0:].values, data.iloc[:,[-1]].values" 500 | ] 501 | } 502 | ], 503 | "metadata": { 504 | "kernelspec": { 505 | "display_name": "Python (tf2)", 506 | "language": "python", 507 | "name": "tf2" 508 | }, 509 | "language_info": { 510 | "codemirror_mode": { 511 | "name": "ipython", 512 | "version": 3 513 | }, 514 | "file_extension": ".py", 515 | "mimetype": "text/x-python", 516 | "name": "python", 517 | "nbconvert_exporter": "python", 518 | "pygments_lexer": "ipython3", 519 | "version": "3.6.8" 520 | }, 521 | "latex_envs": { 522 | "LaTeX_envs_menu_present": true, 523 | "autoclose": false, 524 | "autocomplete": true, 525 | "bibliofile": "biblio.bib", 526 | "cite_by": "apalike", 527 | "current_citInitial": 1, 528 | "eqLabelWithNumbers": true, 529 | "eqNumInitial": 1, 530 | "hotkeys": { 531 | "equation": "Ctrl-E", 532 | "itemize": "Ctrl-I" 533 | }, 534 | "labels_anchors": false, 535 | "latex_user_defs": false, 536 | "report_style_numbering": false, 537 | "user_envs_cfg": false 538 | }, 539 | "varInspector": { 540 | "cols": { 541 | "lenName": 16, 542 | "lenType": 16, 543 | "lenVar": 40 544 | }, 545 | "kernels_config": { 546 | "python": { 547 | "delete_cmd_postfix": "", 548 | "delete_cmd_prefix": "del ", 549 | "library": "var_list.py", 550 | "varRefreshCmd": "print(var_dic_list())" 551 | }, 552 | "r": { 553 | "delete_cmd_postfix": ") ", 554 | "delete_cmd_prefix": "rm(", 555 | "library": "var_list.r", 556 | "varRefreshCmd": "cat(var_dic_list()) " 557 | } 558 | }, 559 | "types_to_exclude": [ 560 | "module", 561 | "function", 562 | "builtin_function_or_method", 563 | "instance", 564 | "_Feature" 565 | ], 566 | "window_display": false 567 | } 568 | }, 569 | "nbformat": 4, 570 | "nbformat_minor": 2 571 | } 572 | -------------------------------------------------------------------------------- /examples/fico/data_types.json: -------------------------------------------------------------------------------- 1 | {"ExternalRiskEstimate":{"type":"continuous"}, 2 | "MSinceOldestTradeOpen":{"type":"continuous"}, 3 | "MSinceMostRecentTradeOpen":{"type":"continuous"}, 4 | "AverageMInFile":{"type":"continuous"}, 5 | "NumSatisfactoryTrades":{"type":"continuous"}, 6 | "NumTrades60Ever2DerogPubRec":{"type":"continuous"}, 7 | "NumTrades90Ever2DerogPubRec":{"type":"continuous"}, 8 | "PercentTradesNeverDelq":{"type":"continuous"}, 9 | "MSinceMostRecentDelq":{"type":"continuous"}, 10 | "MaxDelq2PublicRecLast12M":{"type":"continuous"}, 11 | "MaxDelqEver":{"type":"continuous"}, 12 | "NumTotalTrades":{"type":"continuous"}, 13 | "NumTradesOpeninLast12M":{"type":"continuous"}, 14 | "PercentInstallTrades":{"type":"continuous"}, 15 | "MSinceMostRecentInqexcl7days":{"type":"continuous"}, 16 | "NumInqLast6M":{"type":"continuous"}, 17 | "NumInqLast6Mexcl7days":{"type":"continuous"}, 18 | "NetFractionRevolvingBurden":{"type":"continuous"}, 19 | "NetFractionInstallBurden":{"type":"continuous"}, 20 | "NumRevolvingTradesWBalance":{"type":"continuous"}, 21 | "NumInstallTradesWBalance":{"type":"continuous"}, 22 | "NumBank2NatlTradesWHighUtilization":{"type":"continuous"}, 23 | "PercentTradesWBalance":{"type":"continuous"}, 24 | "RiskPerformance":{"type":"target"}} 25 | -------------------------------------------------------------------------------- /examples/fico/heloc_data_dictionary-2.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SelfExplainML/GamiNet-PyTorch/54598abdcd97ffd4f8e0d74930fe6a25b62d08b2/examples/fico/heloc_data_dictionary-2.xlsx -------------------------------------------------------------------------------- /examples/fico/load.py: -------------------------------------------------------------------------------- 1 | import json 2 | import numpy as np 3 | import pandas as pd 4 | 5 | def load_fico_challange(path="./"): 6 | 7 | data = pd.read_csv(path + "heloc_dataset_v1.csv") 8 | meta_info = json.load(open(path + "data_types.json")) 9 | data = data.replace(-9, np.nan).replace(-8, np.nan).replace(-7, np.nan) 10 | 11 | imp = SimpleImputer(missing_values=np.nan, strategy="median") 12 | imp.fit(data.iloc[:,1:]) 13 | data.iloc[:,1:] = imp.transform(data.iloc[:,1:]) 14 | x, y = data.iloc[:,1:].values, data.iloc[:,[0]].values 15 | return x, y, "Regression", meta_info 16 | -------------------------------------------------------------------------------- /examples/fico/preprocess.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": { 7 | "ExecuteTime": { 8 | "end_time": "2020-05-27T04:16:46.019095Z", 9 | "start_time": "2020-05-27T04:08:26.163560Z" 10 | } 11 | }, 12 | "outputs": [ 13 | { 14 | "name": "stdout", 15 | "output_type": "stream", 16 | "text": [ 17 | "Column being fixed: 1\n", 18 | "Column being fixed: 8\n", 19 | "Column being fixed: 14\n", 20 | "Column being fixed: 17\n", 21 | "Column being fixed: 18\n", 22 | "Column being fixed: 19\n", 23 | "Column being fixed: 20\n", 24 | "Column being fixed: 21\n", 25 | "Column being fixed: 22\n" 26 | ] 27 | } 28 | ], 29 | "source": [ 30 | "# --- Imports section --- \n", 31 | "import numpy as np\n", 32 | "import pandas as pd\n", 33 | "from sklearn.preprocessing import StandardScaler\n", 34 | "from sklearn import datasets, linear_model, preprocessing\n", 35 | "import copy\n", 36 | "\n", 37 | "class ModelError(Exception):\n", 38 | "\tpass\n", 39 | "\n", 40 | "class Data_Cleaner():\n", 41 | "\n", 42 | "\tdef __init__ (self, file_name, data = None):\n", 43 | "\t# --- Retrieves the data from CSV or array, as well as basic organisation ---\n", 44 | "\n", 45 | "\t\t# -- Get data from CSV or given array --\n", 46 | "\t\tif (data == None):\n", 47 | "\t\t\tself.data_set = pd.read_csv(file_name).values\n", 48 | "\n", 49 | "\t\telse:\n", 50 | "\t\t\tself.data_set = data\n", 51 | "\n", 52 | "\t\t# -- Converting target to binary --\n", 53 | "\t\tnp.place(self.data_set, self.data_set == \"Bad\", 0)\n", 54 | "\t\tnp.place(self.data_set, self.data_set == \"Good\", 1)\n", 55 | "\n", 56 | "\t\t# -- Creating Model Variable -- \n", 57 | "\t\tself.model = None\n", 58 | "\n", 59 | "\t\t# -- Creating an Order Column --\n", 60 | "\t\torder = np.arange(self.data_set.shape[0])\n", 61 | "\t\torder = order.reshape((order.shape[0],1))\n", 62 | "\n", 63 | "\t\t# -- Scale and Split --\n", 64 | "\t\t# self.y = self.data_set[:,:1]\n", 65 | "\t\t# scaler = StandardScaler()\n", 66 | "\t\t# self.X = scaler.fit_transform(self.data_set[:,1:])\n", 67 | "\n", 68 | "\t\tself.y = self.data_set[:,:1]\n", 69 | "\t\tself.X = self.data_set[:,1:]\n", 70 | "\n", 71 | "\n", 72 | "\t\t# -- Needs to be retained for inserting new samples\n", 73 | "\t\t# self.mean = scaler.mean_\n", 74 | "\t\t# self.scale = scaler.scale_\n", 75 | "\n", 76 | "\t\t# -- Assiging general useful variables --\n", 77 | "\t\tself.num_samples , self.num_features = self.X.shape\n", 78 | "\n", 79 | "\t\t# -- Add the Order Column -- \n", 80 | "\t\tself.X = np.append(order,self.X,axis=1)\n", 81 | "\t\tself.y = np.append(order,self.y,axis=1)\n", 82 | "\n", 83 | "\tdef shift(self):\n", 84 | "\t# --- Perform the shift for the two categorical features --- \n", 85 | "\n", 86 | "\t\t# -- Shift is hardcoded based on requirements -- \n", 87 | "\t\tfirst_col = self.X[:,10]\n", 88 | "\t\tnp.place(first_col, first_col == 1, 100) # hold value\n", 89 | "\t\tnp.place(first_col, first_col == 6, 1)\n", 90 | "\t\tnp.place(first_col, first_col == 5, 1)\n", 91 | "\t\tnp.place(first_col, first_col == 4, 6)\n", 92 | "\t\tnp.place(first_col, first_col == 3, 5)\n", 93 | "\t\tnp.place(first_col, first_col == 2, 4)\n", 94 | "\t\tnp.place(first_col, first_col == 100, 3)\n", 95 | "\t\tnp.place(first_col, first_col == 0, 2)\n", 96 | "\t\tnp.place(first_col, first_col == 8, 0)\n", 97 | "\t\tnp.place(first_col, first_col == 9, 0)\n", 98 | "\n", 99 | "\t\tsecond_col= self.X[:,11]\n", 100 | "\t\tnp.place(second_col, second_col == 1, 0)\n", 101 | "\t\tnp.place(second_col, second_col == 9, 0)\n", 102 | "\t\tnp.place(second_col, second_col == 7, 1)\n", 103 | "\t\tnp.place(second_col, second_col == 8, 7)\n", 104 | "\n", 105 | "\t\tself.X[:,10] = first_col\n", 106 | "\t\tself.X[:,11] = second_col\n", 107 | "\n", 108 | "\tdef __scaled_row(self,row,scaler):\n", 109 | "\t# --- Returns the Row Scaled ---\n", 110 | "\t\tmean = scaler.mean_\n", 111 | "\t\tscale = scaler.scale_\n", 112 | "\t\tscld = []\n", 113 | "\t\tfor k in range(row.shape[0]):\n", 114 | "\t\t\tscld.append((row[k] - mean[k])/scale[k])\n", 115 | "\t\tscld = np.array(scld)\n", 116 | "\n", 117 | "\t\treturn scld\n", 118 | "\t \n", 119 | "\tdef __masked_arr(self,orig_array, mask):\n", 120 | "\t# --- Returns XOR of Array and Mask --- \n", 121 | "\t\tmasked_array = []\n", 122 | "\n", 123 | "\t\tfor i in range(len(orig_array)):\n", 124 | "\t\t\trow = []\n", 125 | "\t\t\tfor j in range(len(orig_array[0])):\n", 126 | "\t\t\t\tif mask[j] != 0:\n", 127 | "\t\t\t\t\trow.append(orig_array[i][j])\n", 128 | "\t\t\tmasked_array.append(row)\n", 129 | "\n", 130 | "\t\tmasked_array = np.array(masked_array)\n", 131 | "\n", 132 | "\t\treturn masked_array\n", 133 | "\n", 134 | "\tdef __euc_distance(self,row1, row2):\n", 135 | "\t# --- Returns Euclidian Distance between Rows --- \n", 136 | "\t\tdist = 0\n", 137 | "\t\tfor i in range(len(row1)):\n", 138 | "\t\t\tt = (row1[i]-row2[i])**2\n", 139 | "\t\t\tdist += t\n", 140 | "\t\tdist = np.sqrt(dist)\n", 141 | "\t\treturn dist\n", 142 | "\n", 143 | "\tdef __predict_feature_weighted(self,row, good_data_masked, no_neighbours, orig_array, ft_idx):\n", 144 | "\t# --- Returns the single special value replaced by kNN imputation using weights---\n", 145 | "\n", 146 | "\t\tdistances = []\n", 147 | "\t\t# -- Loops through the good data with no special values -- \n", 148 | "\t\t\t# - Good data has the changing feature removed -\n", 149 | "\t\tfor i in range(len(good_data_masked)):\t\n", 150 | "\t\t\tdistances.append(self.__euc_distance(row, good_data_masked[i]))\n", 151 | "\n", 152 | "\t\tdistances = np.array(distances)\n", 153 | "\t\tmax_dist = np.max(distances)\n", 154 | "\t \n", 155 | "\t\t# -- Sorts the first no_neigbours features --\n", 156 | "\t\tidx = np.argpartition(distances, no_neighbours)\n", 157 | "\n", 158 | "\t\tvalues = []\n", 159 | "\t\tmin_dists = []\n", 160 | "\t \n", 161 | "\t\t# -- Retrieving values with which to replace -- \n", 162 | "\t\tfor i in range(no_neighbours):\n", 163 | "\t\t\tvalues.append(orig_array[idx[i]][ft_idx])\n", 164 | "\t\t\tmin_dists.append(distances[idx[i]])\n", 165 | "\n", 166 | "\t\tvalues = np.array(values) \n", 167 | "\t\tmin_dists = np.array(min_dists)\n", 168 | "\n", 169 | "\t\t# -- Assigning the weights -- \n", 170 | "\t\tweights = []\n", 171 | "\t\tfor i in min_dists:\n", 172 | "\t\t\tweights.append(1 - (i/max_dist))\n", 173 | "\t \n", 174 | "\t # -- Calculating final result -- \n", 175 | "\t\timputed_val = 0\n", 176 | "\t\tfor i in range(len(weights)):\n", 177 | "\t\t\timputed_val += weights[i] * values[i]\n", 178 | "\t \n", 179 | "\t\treturn imputed_val \n", 180 | "\n", 181 | "\tdef __predict_feature_mean(self,row, good_data_masked, no_neighbours, orig_array, ft_idx):\n", 182 | "\t# --- Returns the single special value replaced by kNN imputation using the mean ---\n", 183 | "\n", 184 | "\t\tdistances = []\n", 185 | "\t\t# -- Loops through the good data with no special values -- \n", 186 | "\t \t# - Good data has the changing feature removed -\n", 187 | "\t\tfor i in range(len(good_data_masked)):\n", 188 | "\t\t\tdistances.append(self.__euc_distance(row,good_data_masked[i]))\n", 189 | "\t\tdistances = np.array(distances)\n", 190 | "\t \n", 191 | "\t\t# -- Sorts the first no_neigbours features --\n", 192 | "\t\tidx = np.argpartition(distances, no_neighbours)\n", 193 | "\n", 194 | "\t\tvalues = []\n", 195 | "\t\tmin_dists = []\n", 196 | "\t \n", 197 | "\t\t# -- Retrieving values with which to replace -- \n", 198 | "\t\tfor i in range(no_neighbours):\n", 199 | "\t\t\tvalues.append(orig_array[idx[i]][ft_idx])\n", 200 | "\t\t\tmin_dists.append(distances[idx[i]])\n", 201 | "\n", 202 | "\t\tvalues = np.array(values) \n", 203 | "\t\tmin_dists = np.array(min_dists)\n", 204 | "\t \n", 205 | "\t\t# -- Calculating final result -- \n", 206 | "\t\timputed_val = 0\n", 207 | "\t\tfor i in range(len(values)):\n", 208 | "\t\t\timputed_val += values[i]\n", 209 | "\n", 210 | "\t\timputed_val = imputed_val/len(values)\n", 211 | "\n", 212 | "\t\treturn imputed_val\n", 213 | "\n", 214 | "\tdef __remove_row_with_vals(self, data, target, vals):\n", 215 | "\t# --- Returns the data/target without the rows that have any instance of vals list ---\n", 216 | "\t\tremoved_data = []\n", 217 | "\t\tremoved_target = []\n", 218 | "\n", 219 | "\t\trow_no = 0 \n", 220 | "\t\tfor row in data:\n", 221 | "\t\t\tfor col in row:\n", 222 | "\t\t\t\tif (col in vals):\n", 223 | "\t\t\t\t\tremoved_data.append(data[row_no])\n", 224 | "\t\t\t\t\tdata = np.delete(data, row_no, 0)\n", 225 | "\n", 226 | "\t\t\t\t\tremoved_target.append(target[row_no])\n", 227 | "\t\t\t\t\ttarget = np.delete(target, row_no, 0) \n", 228 | "\t\t\t\t\trow_no -= 1\n", 229 | "\t\t\t\t\tbreak\n", 230 | "\t\t\trow_no += 1\n", 231 | "\n", 232 | "\t\tremoved_data = np.array(removed_data)\n", 233 | "\t\tremoved_target = np.array(removed_target)\n", 234 | "\n", 235 | "\t\treturn data, target, removed_data, removed_target\n", 236 | "\n", 237 | "\tdef __remove_col_with_vals(self, data, vals):\n", 238 | "\t# --- Returns the data without the coloumns that have the desired special values ---\n", 239 | "\t\tno_cols = data.shape[1]\n", 240 | "\t\tno_rows = data.shape[0]\n", 241 | "\t\trow = 0\n", 242 | "\t\twhile (no_rows > row):\n", 243 | "\t\t\tcol = 0\n", 244 | "\t\t\twhile (no_cols > col):\n", 245 | "\t\t\t\tif (data[row][col] in vals):\n", 246 | "\t\t\t\t\tdata = np.delete(data, col, 1)\n", 247 | "\t\t\t\t\tno_cols -= 1\n", 248 | "\t\t\t\telse:\n", 249 | "\t\t\t\t\tcol += 1\n", 250 | "\t\t\trow += 1 \n", 251 | "\t\treturn data\n", 252 | "\n", 253 | "\tdef __predict_values_lin_reg(self,X_tr,y_tr,X_test):\n", 254 | "\t# --- Uses linear regression to extrapolate values ---\n", 255 | "\t\tmodel = linear_model.LinearRegression()\n", 256 | "\t\tmodel.fit(X_tr, y_tr)\n", 257 | "\t\tpred = model.predict(X_test)\n", 258 | "\t\treturn pred\n", 259 | "\n", 260 | "\tdef __data_spliter(self,all_data,target_col,target_val):\n", 261 | "\t# --- Splits the data such to identify target col --- \n", 262 | "\t\ttarget_col += 1\n", 263 | "\n", 264 | "\t\ty = all_data[:,target_col:target_col+1]\n", 265 | "\t\tX = np.delete(all_data,target_col,1)\n", 266 | "\t \n", 267 | "\t\t# -- Will hold the X for the y values that need to be predicted--\n", 268 | "\t\tX_target = np.zeros((1,X.shape[1]))\n", 269 | "\n", 270 | "\t\trow_no = 0 \n", 271 | "\t\t# -- Finds the rows with a target val -- \n", 272 | "\t\tfor val in y:\n", 273 | "\t\t\tif (val[0] == target_val):\n", 274 | "\t\t\t\tX_target = np.append(X_target,X[row_no:row_no+1,:],axis=0)\n", 275 | "\t\t\t\tX = np.delete(X, row_no, 0)\n", 276 | "\t\t\t\ty = np.delete(y, row_no, 0) \n", 277 | "\t\t\telse:\n", 278 | "\t\t\t\trow_no += 1\n", 279 | "\n", 280 | "\t\tX_target = np.delete(X_target,0,0)\n", 281 | "\t \n", 282 | "\t\treturn X,y,X_target # Note that the order column is still attached\n", 283 | "\n", 284 | "\tdef __combine_parts_inorder(self,X,y,X_target,y_target,target_col):\n", 285 | "\t# --- Combines all the small parts into a single data matrix ---\n", 286 | "\t\ttarget_col += 1 # To account for the order column\n", 287 | "\n", 288 | "\t\ty_target = y_target.reshape((y_target.shape[0],1))\n", 289 | "\t\ty_full = np.append(y_target,y,axis=0)\n", 290 | "\t\tX_full = np.append(X_target,X,axis=0)\n", 291 | "\n", 292 | "\t\tdata = np.append(X_full[:,:target_col],y_full,axis=1)\n", 293 | "\t\tdata = np.append(data,X_full[:,target_col:],axis=1)\n", 294 | "\t\treturn data\n", 295 | "\n", 296 | "\tdef __average_each_feature(self,X):\n", 297 | "\t# --- Finds the mean values for each feature ---\n", 298 | "\n", 299 | "\t\tX_target = np.zeros((1,X.shape[1]))\n", 300 | "\t \n", 301 | "\t\tfor i in range(X.shape[1]):\n", 302 | "\t\t\tcol = X[:,i]\n", 303 | "\t\t\tcol = np.mean(col,axis=0)\n", 304 | "\t\t\tX_target[:,i] = col\n", 305 | "\t \n", 306 | "\t\treturn X_target\n", 307 | "\n", 308 | "\tdef __process_and_predict(self,all_data,target_col,target_val,exclude=None,model=\"linear\"):\n", 309 | "\t\t# -- Split data --\n", 310 | "\t\tX,y,X_target = self.__data_spliter(all_data,target_col,target_val)\n", 311 | "\t\t# -- Record order columns -- \n", 312 | "\n", 313 | "\t\torder_data = X[:,0:1]\n", 314 | "\t\torder_target = X_target[:,0:1]\n", 315 | "\t \n", 316 | "\t # -- Remove certain columns --\n", 317 | "\t\tif (exclude != None or exclude == []):\n", 318 | "\t\t\ty_tr = np.copy(y)\n", 319 | "\t\t\tX_tr = self.__remove_col_with_vals(X,exclude)\n", 320 | "\t\t\tX_tr = np.delete(X_tr,0,axis=1) # Removes the order column\n", 321 | "\t\t\tX_pred = self.__remove_col_with_vals(X_target,exclude) # The x used to predict\n", 322 | "\t\t\tX_pred = np.delete(X_pred,0,axis=1)\n", 323 | "\n", 324 | "\n", 325 | "\t\telse:\n", 326 | "\t\t\ty_tr = np.copy(y)\n", 327 | "\t\t\tX_tr = np.delete(X,0,axis=1) # Removes the order column\n", 328 | "\t\t\tX_pred = np.delete(X_target,0,axis=1)\n", 329 | "\n", 330 | "\n", 331 | "\t # -- Run regression --\n", 332 | "\t\tif (model == \"linear\"):\n", 333 | "\t\t\ty_target = self.__predict_values_lin_reg(X_tr,y_tr,X_pred)\n", 334 | "\n", 335 | "\t\telif (model == \"polynomial\"):\n", 336 | "\t\t\tpass\n", 337 | "\n", 338 | "\t\telif (model == \"special\"):\n", 339 | "\t\t\tX_avg = self.__average_each_feature(X_pred)\n", 340 | "\t\t\tpred = self.__predict_values_lin_reg(X_tr,y_tr,X_avg)\n", 341 | "\t \n", 342 | "\t\telse:\n", 343 | "\t\t\traise ModelError(\"Model currently not available\")\n", 344 | "\t \n", 345 | "\t\tfinal_data = self.__combine_parts_inorder(X,y,X_target,y_target,target_col)\n", 346 | "\t\treturn final_data\n", 347 | "\t# --- Processes the data and uses linear regression to extrapolate --- \n", 348 | "\n", 349 | "\tdef remove_8(self, kNN, prediction_type):\n", 350 | "\t# --- Removes all the -8 values using kNN imputation ---\n", 351 | "\t\t# -- Remove the order column -- \n", 352 | "\t\torder = self.X[:,0]\n", 353 | "\t\torder = order.reshape((order.shape[0],1))\n", 354 | "\t\tself.X = np.delete(self.X, 0, axis = 1)\n", 355 | "\n", 356 | "\t\t# -- Removes all special values (-7,-8,-9) --\n", 357 | "\t\tX_good, hold1, hold2, hold3 = self.__remove_row_with_vals(self.X, self.y, [-7,-8,-9])\n", 358 | "\n", 359 | "\t\tscaler = StandardScaler()\n", 360 | "\t\tX_good_scaled = scaler.fit_transform(X_good)\n", 361 | "\n", 362 | "\t\t# -- Create a copy of the data matrix X to edit -- \n", 363 | "\t\tX_no_8 = np.copy(self.X)\n", 364 | "\n", 365 | "\t\tcols_with_8 = [1,8,14,17,18,19,20,21,22]\n", 366 | "\n", 367 | "\t\t# -- Fixing each -8 column -- \n", 368 | "\t\tfor fix_col in cols_with_8:\n", 369 | "\t\t\tprint(\"Column being fixed:\", str(fix_col))\n", 370 | "\t\t\t# -- Looping through all samples -- \n", 371 | "\t\t\tfor row in range(self.num_samples):\n", 372 | "\n", 373 | "\t\t\t\tif self.X[row][fix_col] == -8:\n", 374 | "\t\t\t\t\trow_to_comp = []\n", 375 | "\t\t\t\t\tmask = []\n", 376 | "\t\t\t\t\tscaled = self.__scaled_row(self.X[row],scaler)\n", 377 | "\n", 378 | "\t\t\t\t\t# -- Looping through each value --\n", 379 | "\t\t\t\t\tfor col in range(self.num_features):\n", 380 | "\t\t\t\t\t\tif self.X[row][col] >= 0:\n", 381 | "\t\t\t\t\t\t\tmask.append(1)\n", 382 | "\t\t\t\t\t\t\trow_to_comp.append(scaled[col])\n", 383 | "\t\t\t\t\t\telse:\n", 384 | "\t\t\t\t\t\t\tmask.append(0)\n", 385 | "\n", 386 | "\t\t\t\t\trow_to_comp = np.array(row_to_comp)\n", 387 | "\t\t\t\t\tmask = np.array(mask)\n", 388 | "\t\t \n", 389 | "\t\t\t\t\t# -- Getting the array of samples without special values in the good datasets-- \n", 390 | "\t\t\t\t\tX_good_masked = self.__masked_arr(X_good_scaled, mask)\n", 391 | "\n", 392 | "\t\t\t\t\tif (prediction_type == \"mean\"):\n", 393 | "\t\t\t\t\t\timputed = self.__predict_feature_mean(row_to_comp, X_good_masked, kNN, X_good_scaled, fix_col)\n", 394 | "\n", 395 | "\t\t\t\t\telif (prediction_type == \"weighted\"):\n", 396 | "\t\t\t\t\t\timputed = self.__predict_feature_weighted(row_to_comp, X_good_masked, kNN, X_good_scaled, fix_col)\n", 397 | "\t\t\t\t\t\n", 398 | "\t\t\t\t\tX_no_8[row][fix_col] = imputed*scaler.scale_[fix_col] + scaler.mean_[fix_col]\n", 399 | "\n", 400 | "\t\tself.X = X_no_8\n", 401 | "\n", 402 | "\t\t# -- Add back order column -- \n", 403 | "\t\tself.X = np.append(order,self.X,axis=1)\n", 404 | "\n", 405 | "\tdef remove_all_9(self):\n", 406 | "\t# --- Removes the columns with all -9 values -- \n", 407 | "\t\tself.rem_X = []\n", 408 | "\t\tself.rem_y = []\n", 409 | "\t\trow_no = 0 \n", 410 | "\t\tfor row in self.X:\n", 411 | "\t\t\tfor col_i in range(1,row.shape[0]):\n", 412 | "\t\t\t\tif (row[col_i] == -9):\n", 413 | "\t\t\t\t\tremove = True\n", 414 | "\t\t\t\telse:\n", 415 | "\t\t\t\t\tremove = False\n", 416 | "\t\t\t\t\tbreak\n", 417 | "\t\t\tif remove:\n", 418 | "\t\t\t\tself.rem_X.append(self.X[row_no])\n", 419 | "\t\t\t\tself.X = np.delete(self.X, row_no, 0)\n", 420 | "\n", 421 | "\t\t\t\tself.rem_y.append(self.y[row_no])\n", 422 | "\t\t\t\tself.y = np.delete(self.y, row_no, 0) \n", 423 | "\n", 424 | "\t\t\telse:\n", 425 | "\t\t\t\trow_no += 1\n", 426 | "\n", 427 | "\t\tself.rem_X = np.array(self.rem_X)\n", 428 | "\t\tself.rem_y = np.array(self.rem_y)\n", 429 | "\n", 430 | "\tdef remove_9(self):\n", 431 | "\t# --- Removes the -9 values by using linear regression ---\n", 432 | "\t\tself.X = self.__process_and_predict(self.X,0,-9,[-7])\n", 433 | "\n", 434 | "\tdef remove_7_est(self):\n", 435 | "\t# --- Removes the -7 values by using an approximated value ---\n", 436 | "\t\tvalue_replace = 150\n", 437 | "\t\tnp.place(self.X, self.X == -7, value_replace)\n", 438 | "\n", 439 | "\tdef output_all_data(self):\n", 440 | "\t# --- Combines all the data into a single array in order and outputs it ---\n", 441 | "\t\tall_X = np.append(self.X,self.rem_X,axis=0)\n", 442 | "\t\tall_y = np.append(self.y,self.rem_y,axis=0)\n", 443 | "\n", 444 | "\t\tall_X = all_X[all_X[:,0].argsort()]\n", 445 | "\t\tall_y = all_y[all_y[:,0].argsort()]\n", 446 | "\n", 447 | "\t\tall_X = np.delete(all_X, 0, axis=1) \n", 448 | "\t\tall_y = np.delete(all_y, 0, axis=1) \n", 449 | "\t\t\n", 450 | "\t\tdata_output = np.append(all_y,all_X,axis=1)\n", 451 | "\n", 452 | "\t\treturn data_output\n", 453 | "\n", 454 | "\tdef output_to_CSV(self, filename):\n", 455 | "\t# --- Outputs the data to a CSV according to assigned filename --- \n", 456 | "\t\tdata_output = self.output_all_data()\n", 457 | "\n", 458 | "\t\tnp.savetxt(filename, data_output.astype(int), fmt='%i', delimiter=\",\")\n", 459 | "\n", 460 | "\tdef revert_to_original(self):\n", 461 | "\t# --- Allows to retrieve the original dataset ---\n", 462 | "\t\tself.__init__(\"pass\",self.data_set)\n", 463 | "\n", 464 | "testing123 = Data_Cleaner(\"./heloc_dataset_v1.csv\")\n", 465 | "testing123.shift()\n", 466 | "testing123.remove_8(5,\"mean\")\n", 467 | "testing123.remove_all_9()\n", 468 | "testing123.remove_9()\n", 469 | "testing123.remove_7_est()\n", 470 | "testing123.output_to_CSV(\"test_file1.csv\")" 471 | ] 472 | }, 473 | { 474 | "cell_type": "code", 475 | "execution_count": 69, 476 | "metadata": { 477 | "ExecuteTime": { 478 | "end_time": "2020-05-28T02:34:55.972490Z", 479 | "start_time": "2020-05-28T02:34:55.852496Z" 480 | } 481 | }, 482 | "outputs": [], 483 | "source": [ 484 | "pd.DataFrame(np.hstack([testing123.X, testing123.y])).to_csv(\"fico.csv\")" 485 | ] 486 | }, 487 | { 488 | "cell_type": "code", 489 | "execution_count": 74, 490 | "metadata": { 491 | "ExecuteTime": { 492 | "end_time": "2020-05-28T02:36:57.680689Z", 493 | "start_time": "2020-05-28T02:36:57.624935Z" 494 | } 495 | }, 496 | "outputs": [], 497 | "source": [ 498 | "data = pd.read_csv(\"fico.csv\", index_col=[0, 1])\n", 499 | "x, y = data.iloc[:,0:].values, data.iloc[:,[-1]].values" 500 | ] 501 | } 502 | ], 503 | "metadata": { 504 | "kernelspec": { 505 | "display_name": "Python (tf2)", 506 | "language": "python", 507 | "name": "tf2" 508 | }, 509 | "language_info": { 510 | "codemirror_mode": { 511 | "name": "ipython", 512 | "version": 3 513 | }, 514 | "file_extension": ".py", 515 | "mimetype": "text/x-python", 516 | "name": "python", 517 | "nbconvert_exporter": "python", 518 | "pygments_lexer": "ipython3", 519 | "version": "3.6.8" 520 | }, 521 | "latex_envs": { 522 | "LaTeX_envs_menu_present": true, 523 | "autoclose": false, 524 | "autocomplete": true, 525 | "bibliofile": "biblio.bib", 526 | "cite_by": "apalike", 527 | "current_citInitial": 1, 528 | "eqLabelWithNumbers": true, 529 | "eqNumInitial": 1, 530 | "hotkeys": { 531 | "equation": "Ctrl-E", 532 | "itemize": "Ctrl-I" 533 | }, 534 | "labels_anchors": false, 535 | "latex_user_defs": false, 536 | "report_style_numbering": false, 537 | "user_envs_cfg": false 538 | }, 539 | "varInspector": { 540 | "cols": { 541 | "lenName": 16, 542 | "lenType": 16, 543 | "lenVar": 40 544 | }, 545 | "kernels_config": { 546 | "python": { 547 | "delete_cmd_postfix": "", 548 | "delete_cmd_prefix": "del ", 549 | "library": "var_list.py", 550 | "varRefreshCmd": "print(var_dic_list())" 551 | }, 552 | "r": { 553 | "delete_cmd_postfix": ") ", 554 | "delete_cmd_prefix": "rm(", 555 | "library": "var_list.r", 556 | "varRefreshCmd": "cat(var_dic_list()) " 557 | } 558 | }, 559 | "types_to_exclude": [ 560 | "module", 561 | "function", 562 | "builtin_function_or_method", 563 | "instance", 564 | "_Feature" 565 | ], 566 | "window_display": false 567 | } 568 | }, 569 | "nbformat": 4, 570 | "nbformat_minor": 2 571 | } 572 | -------------------------------------------------------------------------------- /examples/results/demo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SelfExplainML/GamiNet-PyTorch/54598abdcd97ffd4f8e0d74930fe6a25b62d08b2/examples/results/demo.png -------------------------------------------------------------------------------- /examples/results/s1_feature.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SelfExplainML/GamiNet-PyTorch/54598abdcd97ffd4f8e0d74930fe6a25b62d08b2/examples/results/s1_feature.png -------------------------------------------------------------------------------- /examples/results/s1_local.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SelfExplainML/GamiNet-PyTorch/54598abdcd97ffd4f8e0d74930fe6a25b62d08b2/examples/results/s1_local.png -------------------------------------------------------------------------------- /examples/results/s1_regu_plot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SelfExplainML/GamiNet-PyTorch/54598abdcd97ffd4f8e0d74930fe6a25b62d08b2/examples/results/s1_regu_plot.png -------------------------------------------------------------------------------- /examples/results/s1_traj_plot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SelfExplainML/GamiNet-PyTorch/54598abdcd97ffd4f8e0d74930fe6a25b62d08b2/examples/results/s1_traj_plot.png -------------------------------------------------------------------------------- /gaminet/__init__.py: -------------------------------------------------------------------------------- 1 | from .api import GAMINetRegressor, GAMINetClassifier 2 | 3 | __all__ = ["GAMINetRegressor", "GAMINetClassifier"] 4 | 5 | __version__ = '1.0.0' 6 | __author__ = 'Zebin Yang and Aijun Zhang' 7 | -------------------------------------------------------------------------------- /gaminet/api.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | from sklearn.utils import column_or_1d 4 | from sklearn.utils.extmath import softmax 5 | from sklearn.preprocessing import LabelBinarizer 6 | from sklearn.model_selection import train_test_split 7 | from sklearn.utils.validation import check_is_fitted 8 | from sklearn.base import RegressorMixin, ClassifierMixin 9 | 10 | from pygam.terms import TermList 11 | from pygam import LinearGAM, s, te 12 | 13 | from .base import GAMINet 14 | 15 | 16 | class GAMINetRegressor(GAMINet, RegressorMixin): 17 | 18 | def __init__(self, meta_info=None, interact_num=10, 19 | subnet_size_main_effect=(20,), subnet_size_interaction=(20, 20), activation_func="ReLU", 20 | max_epochs=(1000, 1000, 1000), learning_rates=(1e-3, 1e-3, 1e-4), early_stop_thres=("auto", "auto", "auto"), 21 | batch_size=200, batch_size_inference=10000, max_iter_per_epoch=100, val_ratio=0.2, 22 | warm_start=True, gam_sample_size=5000, mlp_sample_size=1000, 23 | heredity=True, reg_clarity=0.1, loss_threshold=0.0, 24 | reg_mono=0.1, mono_increasing_list=(), mono_decreasing_list=(), mono_sample_size=1000, 25 | boundary_clip=True, normalize=True, verbose=False, n_jobs=10, device="cpu", random_state=0): 26 | 27 | super(GAMINetRegressor, self).__init__(loss_fn=torch.nn.MSELoss(reduction="none"), 28 | meta_info=meta_info, 29 | interact_num=interact_num, 30 | subnet_size_main_effect=subnet_size_main_effect, 31 | subnet_size_interaction=subnet_size_interaction, 32 | activation_func=activation_func, 33 | max_epochs=max_epochs, 34 | learning_rates=learning_rates, 35 | early_stop_thres=early_stop_thres, 36 | batch_size=batch_size, 37 | batch_size_inference=batch_size_inference, 38 | max_iter_per_epoch=max_iter_per_epoch, 39 | val_ratio=val_ratio, 40 | warm_start=warm_start, 41 | gam_sample_size=gam_sample_size, 42 | mlp_sample_size=mlp_sample_size, 43 | heredity=heredity, 44 | reg_clarity=reg_clarity, 45 | loss_threshold=loss_threshold, 46 | reg_mono=reg_mono, 47 | mono_sample_size=mono_sample_size, 48 | mono_increasing_list=mono_increasing_list, 49 | mono_decreasing_list=mono_decreasing_list, 50 | boundary_clip=boundary_clip, 51 | normalize=normalize, 52 | verbose=verbose, 53 | n_jobs=n_jobs, 54 | device=device, 55 | random_state=random_state) 56 | 57 | def _more_tags(self): 58 | """ 59 | Internal function for skipping some sklearn estimator checks. 60 | """ 61 | return {"_xfail_checks": {"check_sample_weights_invariance": 62 | ("zero sample_weight is not equivalent to removing samples")}} 63 | 64 | def _validate_input(self, x, y, sample_weight): 65 | """ 66 | Internal function for validating the inputs of the fit function. 67 | 68 | Samples with zero sample_weight are removed. 69 | Sample_weight would be normalized, such that the sum equals sample size. 70 | Will raise an error if only one sample is given. 71 | 72 | Parameters 73 | ---------- 74 | x : np.ndarray of shape (n_samples, n_features) 75 | Data features. 76 | y : np.ndarray of shape (n_samples, ) 77 | Target response. 78 | sample_weight : np.ndarray of shape (n_samples, ) 79 | Sample weight. 80 | 81 | Returns 82 | ------- 83 | x : np.ndarray of shape (n_samples, n_features) 84 | Data features. 85 | y : np.ndarray of shape (n_samples, ) 86 | Target response. 87 | sample_weight : np.ndarray of shape (n_samples, ) 88 | Sample weight. 89 | """ 90 | x, y = self._validate_data(x, y, y_numeric=True) 91 | if y.ndim == 2 and y.shape[1] == 1: 92 | y = column_or_1d(y, warn=True) 93 | if sample_weight is None: 94 | sample_weight = np.ones(x.shape[0]) 95 | else: 96 | sample_weight = np.asarray(sample_weight) 97 | if sample_weight.shape[0] != x.shape[0]: 98 | raise ValueError("sample_weight shape mismatches the input") 99 | valid_idx = np.where(sample_weight > 0)[0] 100 | x, y, sample_weight = x[valid_idx], y[valid_idx], sample_weight[valid_idx] 101 | if np.sum(sample_weight) > 0: 102 | sample_weight = x.shape[0] * sample_weight.ravel() / np.sum(sample_weight) 103 | if x.shape[0] == 1: 104 | raise ValueError("n_samples=1") 105 | return x, y.ravel(), sample_weight.ravel() 106 | 107 | def _build_teacher_main_effect(self): 108 | """ 109 | Internal function for fiting a spline based additive interaction model. 110 | 111 | It works as follows. 112 | 1) Subsample at most self.gam_sample_size data from training set. 113 | 2) Get the residual with respect to the fitted main effect networks, 114 | for classification case, the residual is y_label - pred_proba. 115 | 3) Fit a tensor-product spline GAM for selected interactions, to make it 116 | scalable for large number of interactions, the number of knots 117 | in spline is adaptively adjusted from 10 to 2, according to 118 | the number of interactions. 119 | 4) Wrap the partial function of each effect and intercept. 120 | 121 | Returns 122 | ------- 123 | surrogate_estimator : object 124 | List of wrapped functions, each element is a fitted effect. 125 | intercept : float 126 | Fitted intercept. 127 | """ 128 | 129 | x = self.training_generator_.tensors[0].cpu().numpy() 130 | y = self.training_generator_.tensors[1].cpu().numpy() 131 | sw = self.training_generator_.tensors[2].cpu().numpy() 132 | if self.gam_sample_size >= x.shape[0]: 133 | xx, yy, swsw = x, y, sw 134 | else: 135 | _, xx, _, yy, _, swsw = train_test_split(x, y, sw, test_size=self.gam_sample_size, random_state=self.random_state) 136 | 137 | termlist = TermList() 138 | n_splines = max(11 - np.ceil(self.n_features_ / 100).astype(int), 2) 139 | for idx in range(self.n_features_): 140 | termlist += s(idx, n_splines=n_splines, spline_order=1, lam=0.6) 141 | 142 | gam = LinearGAM(termlist) 143 | gam.fit((xx - self.mu_list_.cpu().numpy()) / self.std_list_.cpu().numpy(), yy, weights=swsw) 144 | 145 | def margial_effect(i): 146 | return lambda x: gam.partial_dependence(i, x) 147 | 148 | intercept = gam.coef_[-1] 149 | surrogate_estimator = [margial_effect(i) for i in range(self.n_features_)] 150 | return surrogate_estimator, intercept 151 | 152 | def _build_teacher_interaction(self): 153 | """ 154 | Internal function for fiting a spline based additive interaction model. 155 | 156 | It works as follows. 157 | 1) Subsample at most self.gam_sample_size data from training set. 158 | 2) Get the residual with respect to the fitted main effect networks, 159 | for classification case, the residual is y_label - pred_proba. 160 | 3) Fit a tensor-product spline GAM for selected interactions, to make it 161 | scalable for large number of interactions, the number of knots 162 | in spline is adaptively adjusted from 10 to 2, according to 163 | the number of interactions. 164 | 4) Wrap the partial function of each effect and intercept. 165 | 166 | Returns 167 | ------- 168 | surrogate_estimator : object 169 | List of wrapped functions, each element is a fitted effect. 170 | intercept : float 171 | Fitted intercept. 172 | """ 173 | x = self.training_generator_.tensors[0].cpu().numpy() 174 | y = self.training_generator_.tensors[1].cpu().numpy() 175 | sw = self.training_generator_.tensors[2].cpu().numpy() 176 | if self.gam_sample_size >= x.shape[0]: 177 | xx, yy, swsw = x, y, sw 178 | else: 179 | _, xx, _, yy, _, swsw = train_test_split(x, y, sw, test_size=self.gam_sample_size, random_state=self.random_state) 180 | residual = yy - self.get_aggregate_output(xx, main_effect=True, interaction=False).detach().cpu().numpy().ravel() 181 | 182 | termlist = TermList() 183 | n_splines = max(11 - np.ceil(self.n_interactions_ / 10).astype(int), 2) 184 | for i, (idx1, idx2) in enumerate(self.interaction_list_): 185 | termlist += te(s(idx1, n_splines=n_splines, spline_order=1, lam=0.6), 186 | s(idx2, n_splines=n_splines, spline_order=1, lam=0.6)) 187 | 188 | gam = LinearGAM(termlist) 189 | gam.fit((xx - self.mu_list_.cpu().numpy()) / self.std_list_.cpu().numpy(), residual, weights=swsw) 190 | 191 | def margial_effect(i): 192 | return lambda x: gam.partial_dependence(i, x) 193 | 194 | intercept = gam.coef_[-1] 195 | surrogate_estimator = [margial_effect(i) for i in range(self.n_interactions_)] 196 | return surrogate_estimator, intercept 197 | 198 | def _get_interaction_list(self, x, y, w, scores, feature_names, feature_types): 199 | """ 200 | Internal function for screening interactions in regression setting. 201 | 202 | Returns 203 | ------- 204 | interaction_list : list of int 205 | List of paired tuple index, each indicating the feature index. 206 | """ 207 | num_classes = -1 208 | model_type = "regression" 209 | interaction_list = self._interaction_screening(x, y.astype(np.float64), w, scores, feature_names, feature_types, 210 | model_type, num_classes) 211 | return interaction_list 212 | 213 | def fit(self, x, y, sample_weight=None): 214 | """ 215 | Fit GAMINetRegressor model. 216 | 217 | Parameters 218 | ---------- 219 | x : np.ndarray of shape (n_samples, n_features) 220 | Data features. 221 | y : np.ndarray of shape (n_samples, ) 222 | Target response. 223 | sample_weight : np.ndarray of shape (n_samples, ) 224 | Sample weight. 225 | 226 | Returns 227 | ------- 228 | self : object 229 | Fitted Estimator. 230 | """ 231 | self._init_fit(x, y, sample_weight) 232 | return self._fit() 233 | 234 | def predict(self, x, main_effect=True, interaction=True): 235 | """ 236 | Returns numpy array of predicted values. 237 | 238 | Parameters 239 | ---------- 240 | x : np.ndarray of shape (n_samples, n_features) 241 | Data features. 242 | main_effect : boolean 243 | Whether to include main effects, default to True. 244 | interaction : boolean 245 | Whether to include interactions, default to True. 246 | 247 | Returns 248 | ------- 249 | pred: np.ndarray of shape (n_samples, ) 250 | numpy array of predicted values. 251 | """ 252 | check_is_fitted(self) 253 | x = self._validate_data(x) 254 | pred = self.get_aggregate_output(x, main_effect=main_effect, interaction=interaction).detach().cpu().numpy().ravel() 255 | return pred 256 | 257 | 258 | class GAMINetClassifier(GAMINet, ClassifierMixin): 259 | 260 | def __init__(self, meta_info=None, interact_num=10, 261 | subnet_size_main_effect=(20,), subnet_size_interaction=(20, 20), activation_func="ReLU", 262 | max_epochs=(1000, 1000, 1000), learning_rates=(1e-3, 1e-3, 1e-4), early_stop_thres=("auto", "auto", "auto"), 263 | batch_size=200, batch_size_inference=10000, max_iter_per_epoch=100, val_ratio=0.2, 264 | warm_start=True, gam_sample_size=5000, mlp_sample_size=1000, 265 | heredity=True, reg_clarity=0.1, loss_threshold=0.0, 266 | reg_mono=0.1, mono_increasing_list=(), mono_decreasing_list=(), mono_sample_size=1000, 267 | boundary_clip=True, normalize=True, verbose=False, n_jobs=10, device="cpu", random_state=0): 268 | 269 | super(GAMINetClassifier, self).__init__(loss_fn=torch.nn.BCEWithLogitsLoss(reduction="none"), 270 | meta_info=meta_info, 271 | interact_num=interact_num, 272 | subnet_size_main_effect=subnet_size_main_effect, 273 | subnet_size_interaction=subnet_size_interaction, 274 | activation_func=activation_func, 275 | max_epochs=max_epochs, 276 | learning_rates=learning_rates, 277 | early_stop_thres=early_stop_thres, 278 | batch_size=batch_size, 279 | batch_size_inference=batch_size_inference, 280 | max_iter_per_epoch=max_iter_per_epoch, 281 | val_ratio=val_ratio, 282 | warm_start=warm_start, 283 | gam_sample_size=gam_sample_size, 284 | mlp_sample_size=mlp_sample_size, 285 | heredity=heredity, 286 | reg_clarity=reg_clarity, 287 | loss_threshold=loss_threshold, 288 | reg_mono=reg_mono, 289 | mono_sample_size=mono_sample_size, 290 | mono_increasing_list=mono_increasing_list, 291 | mono_decreasing_list=mono_decreasing_list, 292 | boundary_clip=boundary_clip, 293 | normalize=normalize, 294 | verbose=verbose, 295 | n_jobs=n_jobs, 296 | device=device, 297 | random_state=random_state) 298 | 299 | def _more_tags(self): 300 | """ 301 | Internal function for skipping some sklearn estimator checks. 302 | """ 303 | return {"binary_only": True, 304 | "_xfail_checks": {"check_sample_weights_invariance": 305 | ("zero sample_weight is not equivalent to removing samples")}} 306 | 307 | def _validate_input(self, x, y, sample_weight): 308 | """ 309 | Internal function for validating the inputs of the fit function. 310 | 311 | Samples with zero sample_weight are removed. 312 | Sample_weight would be normalized, such that the sum equals sample size. 313 | Will raise an error if only one sample is given. 314 | The target label would be encoded as 0 and 1. 315 | 316 | Parameters 317 | ---------- 318 | x : np.ndarray of shape (n_samples, n_features) 319 | Data features. 320 | y : np.ndarray of shape (n_samples, ) 321 | Target response. 322 | sample_weight : np.ndarray of shape (n_samples, ) 323 | Sample weight. 324 | 325 | Returns 326 | ------- 327 | x : np.ndarray of shape (n_samples, n_features) 328 | Data features. 329 | y : np.ndarray of shape (n_samples, ) 330 | Target response. 331 | sample_weight : np.ndarray of shape (n_samples, ) 332 | Sample weight. 333 | """ 334 | x, y = self._validate_data(x, y) 335 | if y.ndim == 2 and y.shape[1] == 1: 336 | y = column_or_1d(y, warn=False) 337 | if sample_weight is None: 338 | sample_weight = np.ones(x.shape[0]) 339 | else: 340 | sample_weight = np.asarray(sample_weight) 341 | if sample_weight.shape[0] != x.shape[0]: 342 | raise ValueError("sample_weight shape mismatches the input") 343 | valid_idx = np.where(sample_weight > 0)[0] 344 | x, y, sample_weight = x[valid_idx], y[valid_idx], sample_weight[valid_idx] 345 | if np.sum(sample_weight) > 0: 346 | sample_weight = x.shape[0] * sample_weight.ravel() / np.sum(sample_weight) 347 | if x.shape[0] == 1: 348 | raise ValueError("n_samples=1") 349 | 350 | self.label_binarizer_ = LabelBinarizer() 351 | self.label_binarizer_.fit(y) 352 | self.classes_ = self.label_binarizer_.classes_ 353 | if len(self.classes_) > 2: 354 | raise ValueError("multi-classification not supported") 355 | y = self.label_binarizer_.transform(y) * 1.0 356 | return x, y.ravel(), sample_weight.ravel() 357 | 358 | def _build_teacher_main_effect(self): 359 | """ 360 | Internal function for fiting a spline based additive model. 361 | 362 | It works as follows. 363 | 1) Subsample at most self.gam_sample_size data from training set. 364 | 2) Fit a B-spline GAM for all input features, to make it 365 | scalable for large number of interactions, the number of knots 366 | in spline is adaptively adjusted from 10 to 2, according to 367 | the number of features. 368 | 3) Wrap the partial function of each effect and intercept. 369 | 370 | Returns 371 | ------- 372 | surrogate_estimator : object 373 | List of wrapped functions, each element is a fitted effect. 374 | intercept : float 375 | Fitted intercept. 376 | """ 377 | x = self.training_generator_.tensors[0].cpu().numpy() 378 | y = self.training_generator_.tensors[1].cpu().numpy() * 4 - 2 379 | sw = self.training_generator_.tensors[2].cpu().numpy() 380 | if self.gam_sample_size >= x.shape[0]: 381 | xx, yy, swsw = x, y, sw 382 | else: 383 | _, xx, _, yy, _, swsw = train_test_split(x, y, sw, 384 | test_size=self.gam_sample_size, stratify=y, random_state=self.random_state) 385 | 386 | termlist = TermList() 387 | n_splines = max(11 - np.ceil(self.n_features_ / 100).astype(int), 2) 388 | for idx in range(self.n_features_): 389 | termlist += s(idx, n_splines=n_splines, spline_order=1, lam=0.6) 390 | 391 | gam = LinearGAM(termlist) 392 | gam.fit((xx - self.mu_list_.cpu().numpy()) / self.std_list_.cpu().numpy(), 393 | yy, weights=swsw) 394 | 395 | def margial_effect(i): 396 | return lambda x: gam.partial_dependence(i, x) 397 | 398 | intercept = gam.coef_[-1] 399 | surrogate_estimator = [margial_effect(i) for i in range(self.n_features_)] 400 | return surrogate_estimator, intercept 401 | 402 | def _build_teacher_interaction(self): 403 | """ 404 | Internal function for fiting a spline based additive interaction model. 405 | 406 | It works as follows. 407 | 1) Subsample at most self.gam_sample_size data from training set. 408 | 2) Get the residual with respect to the fitted main effect networks, 409 | for classification case, the residual is y_label - pred_proba. 410 | 3) Fit a tensor-product spline GAM for selected interactions, to make it 411 | scalable for large number of interactions, the number of knots 412 | in spline is adaptively adjusted from 10 to 2, according to 413 | the number of interactions. 414 | 4) Wrap the partial function of each effect and intercept. 415 | 416 | Returns 417 | ------- 418 | surrogate_estimator : object 419 | List of wrapped functions, each element is a fitted effect. 420 | intercept : float 421 | Fitted intercept. 422 | """ 423 | x = self.training_generator_.tensors[0].cpu().numpy() 424 | y = self.training_generator_.tensors[1].cpu().numpy() 425 | sw = self.training_generator_.tensors[2].cpu().numpy() 426 | if self.gam_sample_size >= x.shape[0]: 427 | xx, yy, swsw = x, y, sw 428 | else: 429 | _, xx, _, yy, _, swsw = train_test_split(x, y, sw, 430 | test_size=self.gam_sample_size, stratify=y, random_state=self.random_state) 431 | 432 | pred = self.get_aggregate_output(xx, main_effect=True, 433 | interaction=False).detach().cpu().numpy().ravel() 434 | pred_proba = softmax(np.vstack([-pred, pred]).T / 2, copy=False)[:, 1] 435 | residual = yy - pred_proba 436 | 437 | termlist = TermList() 438 | n_splines = max(11 - np.ceil(self.n_interactions_ / 10).astype(int), 2) 439 | for i, (idx1, idx2) in enumerate(self.interaction_list_): 440 | termlist += te(s(idx1, n_splines=n_splines, spline_order=1, lam=0.6), 441 | s(idx2, n_splines=n_splines, spline_order=1, lam=0.6)) 442 | 443 | gam = LinearGAM(termlist) 444 | gam.fit((xx - self.mu_list_.cpu().numpy()) / self.std_list_.cpu().numpy(), 445 | residual, weights=swsw) 446 | 447 | def margial_effect(i): 448 | return lambda x: gam.partial_dependence(i, x) 449 | 450 | intercept = gam.coef_[-1] 451 | surrogate_estimator = [margial_effect(i) for i in range(self.n_interactions_)] 452 | return surrogate_estimator, intercept 453 | 454 | def _get_interaction_list(self, x, y, w, scores, feature_names, feature_types): 455 | """ 456 | Internal function for screening interactions in classification setting. 457 | 458 | Returns 459 | ------- 460 | interaction_list : list of int 461 | List of paired tuple index, each indicating the feature index. 462 | """ 463 | num_classes = 2 464 | model_type = "classification" 465 | 466 | interaction_list = self._interaction_screening(x, y.astype(np.int64), w, 467 | scores, feature_names, feature_types, model_type, num_classes) 468 | return interaction_list 469 | 470 | def fit(self, x, y, sample_weight=None): 471 | """ 472 | Fit GAMINetClassifier model. 473 | 474 | Parameters 475 | ---------- 476 | x : np.ndarray of shape (n_samples, n_features) 477 | Data features. 478 | y : np.ndarray of shape (n_samples, ) 479 | Target response. 480 | sample_weight : np.ndarray of shape (n_samples, ) 481 | Sample weight. 482 | 483 | Returns 484 | ------- 485 | self : object 486 | Fitted Estimator. 487 | """ 488 | self._init_fit(x, y, sample_weight, stratified=True) 489 | return self._fit() 490 | 491 | def decision_function(self, x, main_effect=True, interaction=True): 492 | """ 493 | Returns numpy array of raw predicted value before softmax. 494 | 495 | Parameters 496 | ---------- 497 | x : np.ndarray of shape (n_samples, n_features) 498 | Data features. 499 | main_effect : boolean 500 | Whether to include main effects, default to True. 501 | interaction : boolean 502 | Whether to include interactions, default to True. 503 | 504 | Returns 505 | ------- 506 | pred : np.ndarray of shape (n_samples, ) 507 | numpy array of predicted class values. 508 | """ 509 | check_is_fitted(self) 510 | x = self._validate_data(x) 511 | pred = self.get_aggregate_output(x, main_effect=main_effect, 512 | interaction=interaction).detach().cpu().numpy().ravel() 513 | return pred 514 | 515 | def predict_proba(self, x, main_effect=True, interaction=True): 516 | """ 517 | Returns numpy array of predicted probabilities of each class. 518 | 519 | Parameters 520 | ---------- 521 | x : np.ndarray of shape (n_samples, n_features) 522 | Data features. 523 | main_effect : boolean 524 | Whether to include main effects, default to True. 525 | interaction : boolean 526 | Whether to include interactions, default to True. 527 | 528 | Returns 529 | ------- 530 | pred_proba : np.ndarray of shape (n_samples, 2) 531 | numpy array of predicted proba values. 532 | """ 533 | pred = self.decision_function(x, main_effect=main_effect, interaction=interaction) 534 | pred_proba = softmax(np.vstack([-pred, pred]).T / 2, copy=False) 535 | return pred_proba 536 | 537 | def predict(self, x, main_effect=True, interaction=True): 538 | """ 539 | Returns numpy array of predicted class. 540 | 541 | Parameters 542 | ---------- 543 | x : np.ndarray of shape (n_samples, n_features) 544 | Data features 545 | main_effect : boolean 546 | Whether to include main effects, default to True. 547 | interaction : boolean 548 | Whether to include interactions, default to True. 549 | 550 | Returns 551 | ------- 552 | pred : np.ndarray of shape (n_samples, ) 553 | numpy array of predicted class values. 554 | """ 555 | pred_proba = self.predict_proba(x, main_effect=main_effect, interaction=interaction)[:, 1] 556 | pred = np.array(pred_proba > 0.5, dtype=np.int) 557 | return self.label_binarizer_.inverse_transform(pred) 558 | -------------------------------------------------------------------------------- /gaminet/dataloader.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | class FastTensorDataLoader: 5 | """ 6 | A DataLoader-like object for a set of tensors that can be much faster than 7 | TensorDataset + DataLoader because dataloader grabs individual indices of 8 | the dataset and calls cat (slow). 9 | Source: https://discuss.pytorch.org/t/dataloader-much-slower-than-manual-batching/27014/6 10 | """ 11 | def __init__(self, *tensors, batch_size=32, shuffle=False): 12 | """ 13 | Initialize a FastTensorDataLoader. 14 | :param *tensors: tensors to store. Must have the same length @ dim 0. 15 | :param batch_size: batch size to load. 16 | :param shuffle: if True, shuffle the data *in-place* whenever an 17 | iterator is created out of this object. 18 | :returns: A FastTensorDataLoader. 19 | """ 20 | assert all(t.shape[0] == tensors[0].shape[0] for t in tensors) 21 | self.tensors = tensors 22 | 23 | self.dataset_len = self.tensors[0].shape[0] 24 | self.batch_size = batch_size 25 | self.shuffle = shuffle 26 | 27 | # Calculate # batches 28 | n_batches, remainder = divmod(self.dataset_len, self.batch_size) 29 | if remainder > 0: 30 | n_batches += 1 31 | self.n_batches = n_batches 32 | 33 | def __iter__(self): 34 | if self.shuffle: 35 | r = torch.randperm(self.dataset_len) 36 | self.tensors = [t[r] for t in self.tensors] 37 | self.i = 0 38 | return self 39 | 40 | def __next__(self): 41 | if self.i >= self.dataset_len: 42 | raise StopIteration 43 | batch = tuple(t[self.i: self.i + self.batch_size] for t in self.tensors) 44 | self.i += self.batch_size 45 | return batch 46 | 47 | def __len__(self): 48 | return self.n_batches 49 | -------------------------------------------------------------------------------- /gaminet/layers.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | class TensorLayer(torch.nn.Module): 5 | 6 | def __init__(self, n_subnets, subnet_arch, n_input_nodes, activation_func, device): 7 | super().__init__() 8 | 9 | self.device = device 10 | self.n_subnets = n_subnets 11 | self.n_input_nodes = n_input_nodes 12 | self.activation_func = activation_func 13 | self.n_hidden_layers = len(subnet_arch) 14 | 15 | all_biases = [] 16 | all_weights = [] 17 | n_hidden_nodes_prev = n_input_nodes 18 | for i, n_hidden_nodes in enumerate(subnet_arch + [1]): 19 | if i == 0: 20 | w = torch.nn.Parameter(torch.empty(size=(n_subnets, n_hidden_nodes_prev, n_hidden_nodes), 21 | dtype=torch.float, requires_grad=True, device=device)) 22 | b = torch.nn.Parameter(torch.empty(size=(n_subnets, n_hidden_nodes), 23 | dtype=torch.float, requires_grad=True, device=device)) 24 | elif i == self.n_hidden_layers: 25 | w = torch.nn.Parameter(torch.empty(size=(n_subnets, n_hidden_nodes_prev, 1), 26 | dtype=torch.float, requires_grad=True, device=device)) 27 | b = torch.nn.Parameter(torch.empty(size=(n_subnets, 1), 28 | dtype=torch.float, requires_grad=True, device=device)) 29 | else: 30 | w = torch.nn.Parameter(torch.empty(size=(n_subnets, n_hidden_nodes_prev, n_hidden_nodes), 31 | dtype=torch.float, requires_grad=True, device=device)) 32 | b = torch.nn.Parameter(torch.empty(size=(n_subnets, n_hidden_nodes), 33 | dtype=torch.float, requires_grad=True, device=device)) 34 | n_hidden_nodes_prev = n_hidden_nodes 35 | torch.nn.init.zeros_(b) 36 | for j in range(n_subnets): 37 | torch.nn.init.orthogonal_(w[j]) 38 | all_biases.append(b) 39 | all_weights.append(w) 40 | self.all_biases = torch.nn.ParameterList(all_biases) 41 | self.all_weights = torch.nn.ParameterList(all_weights) 42 | 43 | def individual_forward(self, inputs, idx): 44 | 45 | xs = inputs 46 | for i in range(self.n_hidden_layers): 47 | xs = self.activation_func(torch.matmul(xs, self.all_weights[i][idx]) + 48 | self.all_biases[i][idx]) 49 | outputs = torch.matmul(xs, self.all_weights[-1][idx]) + self.all_biases[-1][idx] 50 | return outputs 51 | 52 | def forward(self, inputs): 53 | 54 | xs = inputs 55 | for i in range(self.n_hidden_layers): 56 | xs = self.activation_func(torch.matmul(xs, self.all_weights[i]) + 57 | torch.reshape(self.all_biases[i], [self.n_subnets, 1, -1])) 58 | 59 | outputs = (torch.matmul(xs, self.all_weights[-1]) + 60 | torch.reshape(self.all_biases[-1], [self.n_subnets, 1, -1])) 61 | outputs = torch.squeeze(torch.transpose(outputs, 0, 1), dim=2) 62 | return outputs 63 | 64 | 65 | class UnivariateOneHotEncodingLayer(torch.nn.Module): 66 | 67 | def __init__(self, num_classes_list, device): 68 | 69 | super(UnivariateOneHotEncodingLayer, self).__init__() 70 | 71 | self.class_bias = [] 72 | self.global_bias = [] 73 | self.num_classes_list = num_classes_list 74 | for i in range(len(num_classes_list)): 75 | cb = torch.nn.Parameter(torch.empty(size=(num_classes_list[i], 1), 76 | dtype=torch.float, requires_grad=True, device=device)) 77 | gb = torch.nn.Parameter(torch.empty(size=(1, 1), 78 | dtype=torch.float, requires_grad=False, device=device)) 79 | torch.nn.init.zeros_(cb) 80 | torch.nn.init.zeros_(gb) 81 | self.class_bias.append(cb) 82 | self.global_bias.append(gb) 83 | 84 | def forward(self, inputs, sample_weight=None, training=False): 85 | 86 | output = [] 87 | for i in range(len(self.num_classes_list)): 88 | dummy = torch.nn.functional.one_hot(inputs[:, i].to(torch.int64), 89 | num_classes=self.num_classes_list[i]).to(torch.float) 90 | output.append(torch.matmul(dummy, self.class_bias[i]) + self.global_bias[i]) 91 | output = torch.squeeze(torch.hstack(output)) 92 | return output 93 | 94 | 95 | class pyGAMNet(torch.nn.Module): 96 | 97 | def __init__(self, nfeature_index_list, cfeature_index_list, num_classes_list, 98 | subnet_arch, activation_func, device): 99 | 100 | super(pyGAMNet, self).__init__() 101 | 102 | self.device = device 103 | self.nfeature_index_list = nfeature_index_list 104 | self.cfeature_index_list = cfeature_index_list 105 | self.num_classes_list = num_classes_list 106 | self.subnet_arch = subnet_arch 107 | self.activation_func = activation_func 108 | 109 | if len(self.nfeature_index_list) > 0: 110 | self.nsubnets = TensorLayer(len(nfeature_index_list), subnet_arch, 111 | 1, activation_func, device) 112 | if len(self.cfeature_index_list) > 0: 113 | self.csubnets = UnivariateOneHotEncodingLayer(num_classes_list, device) 114 | 115 | def forward(self, inputs): 116 | 117 | output = torch.zeros(size=(inputs.shape[0], inputs.shape[1]), dtype=torch.float, device=self.device) 118 | if len(self.nfeature_index_list) > 0: 119 | ntensor_inputs = torch.unsqueeze(torch.transpose(inputs[:, 120 | self.nfeature_index_list], 0, 1), 2) 121 | output[:, self.nfeature_index_list] = self.nsubnets(ntensor_inputs) 122 | if len(self.cfeature_index_list) > 0: 123 | ctensor_inputs = inputs[:, self.cfeature_index_list] 124 | output[:, self.cfeature_index_list] = self.csubnets(ctensor_inputs) 125 | return output 126 | 127 | 128 | class pyInteractionNet(torch.nn.Module): 129 | 130 | def __init__(self, interaction_list, nfeature_index_list, cfeature_index_list, num_classes_list, 131 | subnet_arch, activation_func, device): 132 | super(pyInteractionNet, self).__init__() 133 | 134 | self.interaction_list = interaction_list 135 | self.n_interactions = len(interaction_list) 136 | self.nfeature_index_list = nfeature_index_list 137 | self.cfeature_index_list = cfeature_index_list 138 | self.num_classes_list = num_classes_list 139 | self.subnet_arch = subnet_arch 140 | self.activation_func = activation_func 141 | self.device = device 142 | 143 | self.n_inputs1 = [] 144 | self.n_inputs2 = [] 145 | for i in range(self.n_interactions): 146 | if self.interaction_list[i][0] in self.cfeature_index_list: 147 | self.n_inputs1.append(self.num_classes_list[ 148 | self.cfeature_index_list.index(self.interaction_list[i][0])]) 149 | else: 150 | self.n_inputs1.append(1) 151 | 152 | if self.interaction_list[i][1] in self.cfeature_index_list: 153 | self.n_inputs2.append(self.num_classes_list[ 154 | self.cfeature_index_list.index(self.interaction_list[i][1])]) 155 | else: 156 | self.n_inputs2.append(1) 157 | 158 | self.max_n_inputs = max([self.n_inputs1[i] + self.n_inputs2[i] 159 | for i in range(self.n_interactions)]) 160 | self.subnets = TensorLayer(self.n_interactions, subnet_arch, self.max_n_inputs, 161 | activation_func, device) 162 | 163 | def preprocessing(self, inputs): 164 | 165 | preprocessed_inputs = [] 166 | for i in range(self.n_interactions): 167 | interact_input_list = [] 168 | idx1 = self.interaction_list[i][0] 169 | idx2 = self.interaction_list[i][1] 170 | if self.interaction_list[i][0] in self.cfeature_index_list: 171 | interact_input1 = torch.nn.functional.one_hot(inputs[:, idx1].to(torch.int64), 172 | num_classes=self.n_inputs1[i]).to(torch.float) 173 | interact_input_list.append(interact_input1) 174 | else: 175 | interact_input_list.append(inputs[:, [idx1]]) 176 | if self.interaction_list[i][1] in self.cfeature_index_list: 177 | interact_input2 = torch.nn.functional.one_hot(inputs[:, idx2].to(torch.int64), 178 | num_classes=self.n_inputs2[i]).to(torch.float) 179 | interact_input_list.append(interact_input2) 180 | else: 181 | interact_input_list.append(inputs[:, [idx2]]) 182 | 183 | if (self.n_inputs1[i] + self.n_inputs2[i]) < self.max_n_inputs: 184 | interact_input_list.append(torch.zeros(size=(inputs.shape[0], 185 | self.max_n_inputs - (self.n_inputs1[i] + self.n_inputs2[i])), 186 | dtype=torch.float, requires_grad=True, device=self.device)) 187 | preprocessed_inputs.append(torch.hstack(interact_input_list)) 188 | preprocessed_inputs = torch.hstack(preprocessed_inputs) 189 | return preprocessed_inputs 190 | 191 | def forward(self, inputs): 192 | 193 | tensor_inputs = torch.transpose(torch.reshape(self.preprocessing(inputs), 194 | [-1, self.n_interactions, self.max_n_inputs]), 0, 1) 195 | subnet_output = self.subnets(tensor_inputs) 196 | return subnet_output 197 | 198 | 199 | class pyGAMINet(torch.nn.Module): 200 | 201 | def __init__(self, nfeature_index_list, cfeature_index_list, num_classes_list, 202 | subnet_size_main_effect, subnet_size_interaction, activation_func, 203 | heredity, mono_increasing_list, mono_decreasing_list, 204 | boundary_clip, min_value, max_value, mu_list, std_list, device): 205 | 206 | super(pyGAMINet, self).__init__() 207 | 208 | self.n_features = len(nfeature_index_list) + len(cfeature_index_list) 209 | self.nfeature_index_list = nfeature_index_list 210 | self.cfeature_index_list = cfeature_index_list 211 | self.num_classes_list = num_classes_list 212 | self.subnet_size_main_effect = subnet_size_main_effect 213 | self.subnet_size_interaction = subnet_size_interaction 214 | self.activation_func = activation_func 215 | self.heredity = heredity 216 | self.mono_increasing_list = mono_increasing_list 217 | self.mono_decreasing_list = mono_decreasing_list 218 | 219 | self.boundary_clip = boundary_clip 220 | self.min_value = min_value 221 | self.max_value = max_value 222 | self.mu_list = mu_list 223 | self.std_list = std_list 224 | 225 | self.device = device 226 | self.interaction_status = False 227 | self.main_effect_blocks = pyGAMNet(nfeature_index_list=nfeature_index_list, 228 | cfeature_index_list=cfeature_index_list, 229 | num_classes_list=num_classes_list, 230 | subnet_arch=subnet_size_main_effect, 231 | activation_func=activation_func, 232 | device=device) 233 | self.main_effect_weights = torch.nn.Parameter(torch.empty(size=(self.n_features, 1), 234 | dtype=torch.float, requires_grad=True, device=device)) 235 | self.main_effect_switcher = torch.nn.Parameter(torch.empty(size=(self.n_features, 1), 236 | dtype=torch.float, requires_grad=False, device=device)) 237 | 238 | self.output_bias = torch.nn.Parameter(torch.empty(size=(1, ), 239 | dtype=torch.float, requires_grad=True, device=device)) 240 | torch.nn.init.zeros_(self.output_bias) 241 | torch.nn.init.ones_(self.main_effect_switcher) 242 | torch.nn.init.ones_(self.main_effect_weights) 243 | 244 | def init_interaction_blocks(self, interaction_list): 245 | 246 | if len(interaction_list) > 0: 247 | self.interaction_status = True 248 | self.n_interactions = len(interaction_list) 249 | self.interaction_blocks = pyInteractionNet(interaction_list=interaction_list, 250 | nfeature_index_list=self.nfeature_index_list, 251 | cfeature_index_list=self.cfeature_index_list, 252 | num_classes_list=self.num_classes_list, 253 | subnet_arch=self.subnet_size_interaction, 254 | activation_func=self.activation_func, 255 | device=self.device) 256 | self.interaction_weights = torch.nn.Parameter(torch.empty(size=(self.n_interactions, 1), 257 | dtype=torch.float, requires_grad=True, device=self.device)) 258 | self.interaction_switcher = torch.nn.Parameter(torch.empty(size=(self.n_interactions, 1), 259 | dtype=torch.float, requires_grad=False, device=self.device)) 260 | torch.nn.init.ones_(self.interaction_switcher) 261 | torch.nn.init.ones_(self.interaction_weights) 262 | 263 | def get_mono_loss(self, inputs, outputs=None, monotonicity=False, sample_weight=None): 264 | 265 | mono_loss = torch.tensor(0.0, requires_grad=True) 266 | if not monotonicity: 267 | return mono_loss 268 | 269 | grad = torch.autograd.grad(outputs=torch.sum(outputs), 270 | inputs=inputs, create_graph=True)[0] 271 | 272 | if sample_weight is not None: 273 | if len(self.mono_increasing_list) > 0: 274 | mono_loss = mono_loss + torch.mean(torch.nn.ReLU()( 275 | -grad[:, self.mono_increasing_list]) * sample_weight.reshape(-1, 1)) 276 | if len(self.mono_decreasing_list) > 0: 277 | mono_loss = mono_loss + torch.mean(torch.nn.ReLU()( 278 | grad[:, self.mono_decreasing_list]) * sample_weight.reshape(-1, 1)) 279 | else: 280 | if len(self.mono_increasing_list) > 0: 281 | mono_loss = mono_loss + torch.mean(torch.nn.ReLU()( 282 | -grad[:, self.mono_increasing_list])) 283 | if len(self.mono_decreasing_list) > 0: 284 | mono_loss = mono_loss + torch.mean(torch.nn.ReLU()( 285 | grad[:, self.mono_decreasing_list])) 286 | return mono_loss 287 | 288 | def get_clarity_loss(self, main_effect_outputs=None, interaction_outputs=None, 289 | sample_weight=None, clarity=False): 290 | 291 | clarity_loss = torch.tensor(0.0, requires_grad=True) 292 | if main_effect_outputs is None: 293 | return clarity_loss 294 | if interaction_outputs is None: 295 | return clarity_loss 296 | if not clarity: 297 | return clarity_loss 298 | 299 | for i, (k1, k2) in enumerate(self.interaction_blocks.interaction_list): 300 | if sample_weight is not None: 301 | clarity_loss = clarity_loss + torch.abs((main_effect_outputs[:, k1] * 302 | interaction_outputs[:, i] * sample_weight.ravel()).mean()) 303 | clarity_loss = clarity_loss + torch.abs((main_effect_outputs[:, k2] * 304 | interaction_outputs[:, i] * sample_weight.ravel()).mean()) 305 | else: 306 | clarity_loss = clarity_loss + torch.abs((main_effect_outputs[:, k1] 307 | * interaction_outputs[:, i]).mean()) 308 | clarity_loss = clarity_loss + torch.abs((main_effect_outputs[:, k2] 309 | * interaction_outputs[:, i]).mean()) 310 | return clarity_loss 311 | 312 | def forward_main_effect(self, inputs): 313 | 314 | inputs = torch.max(torch.min(inputs, self.max_value), self.min_value) if self.boundary_clip else inputs 315 | inputs = (inputs - self.mu_list) / self.std_list 316 | main_effect_weights = self.main_effect_switcher * self.main_effect_weights 317 | outputs = self.main_effect_blocks(inputs) * main_effect_weights.ravel() 318 | return outputs 319 | 320 | def forward_interaction(self, inputs): 321 | 322 | inputs = torch.max(torch.min(inputs, self.max_value), self.min_value) if self.boundary_clip else inputs 323 | inputs = (inputs - self.mu_list) / self.std_list 324 | interaction_weights = self.interaction_switcher * self.interaction_weights 325 | outputs = self.interaction_blocks(inputs) * interaction_weights.ravel() 326 | return outputs 327 | 328 | def forward(self, inputs, sample_weight=None, main_effect=True, interaction=True, 329 | clarity=False, monotonicity=False): 330 | 331 | main_effect_outputs = None 332 | interaction_outputs = None 333 | inputs.requires_grad = True 334 | outputs = self.output_bias * torch.ones(inputs.shape[0], 1, device=self.device) 335 | inputs = torch.max(torch.min(inputs, self.max_value), self.min_value) if self.boundary_clip else inputs 336 | inputs = (inputs - self.mu_list) / self.std_list 337 | if main_effect: 338 | main_effect_weights = self.main_effect_switcher * self.main_effect_weights 339 | main_effect_outputs = self.main_effect_blocks(inputs) * main_effect_weights.ravel() 340 | outputs = outputs + main_effect_outputs.sum(1, keepdim=True) 341 | if interaction and self.interaction_status: 342 | interaction_weights = self.interaction_switcher * self.interaction_weights 343 | interaction_outputs = self.interaction_blocks(inputs) * interaction_weights.ravel() 344 | outputs = outputs + interaction_outputs.sum(1, keepdim=True) 345 | 346 | self.mono_loss = self.get_mono_loss(inputs, outputs, monotonicity, sample_weight) 347 | self.clarity_loss = self.get_clarity_loss(main_effect_outputs, interaction_outputs, 348 | sample_weight, clarity) 349 | return outputs 350 | -------------------------------------------------------------------------------- /gaminet/lib/lib_ebm_native_linux_x64.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SelfExplainML/GamiNet-PyTorch/54598abdcd97ffd4f8e0d74930fe6a25b62d08b2/gaminet/lib/lib_ebm_native_linux_x64.so -------------------------------------------------------------------------------- /gaminet/lib/lib_ebm_native_mac_x64.dylib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SelfExplainML/GamiNet-PyTorch/54598abdcd97ffd4f8e0d74930fe6a25b62d08b2/gaminet/lib/lib_ebm_native_mac_x64.dylib -------------------------------------------------------------------------------- /gaminet/lib/lib_ebm_native_win_x64.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SelfExplainML/GamiNet-PyTorch/54598abdcd97ffd4f8e0d74930fe6a25b62d08b2/gaminet/lib/lib_ebm_native_win_x64.dll -------------------------------------------------------------------------------- /gaminet/utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | 4 | import matplotlib 5 | from matplotlib import gridspec 6 | from matplotlib import pyplot as plt 7 | from matplotlib.ticker import MaxNLocator 8 | 9 | 10 | def plot_regularization(data_dict_logs, log_scale=True, folder="./results/", name="regularization_path", save_eps=False, save_png=False): 11 | """ 12 | Helper function for visualizing regularization path. 13 | 14 | Parameters 15 | ---------- 16 | data_dict_logs : dict 17 | Dictionary containing regularization path information. 18 | log_scale : boolean 19 | Whether to use log scale for y-axis. 20 | folder : str 21 | The path of folder to save figure, by default "./". 22 | name : str 23 | Name of the file, by default "regularization_path". 24 | save_png : boolean 25 | Whether to save the plot in PNG format, by default False. 26 | save_eps : boolean 27 | Whether to save the plot in EPS format, by default False. 28 | """ 29 | 30 | main_loss = data_dict_logs["main_effect_val_loss"] 31 | inter_loss = data_dict_logs["interaction_val_loss"] 32 | active_main_effect_index = data_dict_logs["active_main_effect_index"] 33 | active_interaction_index = data_dict_logs["active_interaction_index"] 34 | 35 | fig = plt.figure(figsize=(14, 4)) 36 | if len(main_loss) > 0: 37 | ax1 = plt.subplot(1, 2, 1) 38 | ax1.plot(np.arange(0, len(main_loss), 1), main_loss) 39 | ax1.axvline(np.argmin(main_loss), linestyle="dotted", color="red") 40 | ax1.axvline(len(active_main_effect_index), linestyle="dotted", color="red") 41 | ax1.plot(np.argmin(main_loss), np.min(main_loss), "*", markersize=12, color="red") 42 | ax1.plot(len(active_main_effect_index), main_loss[len(active_main_effect_index)], "o", markersize=8, color="red") 43 | ax1.set_xlabel("Number of Main Effects", fontsize=12) 44 | ax1.set_xlim(-0.5, len(main_loss) - 0.5) 45 | ax1.xaxis.set_major_locator(MaxNLocator(integer=True)) 46 | if log_scale: 47 | ax1.set_yscale("log") 48 | ax1.set_yticks((10 ** np.linspace(np.log10(np.nanmin(main_loss)), np.log10(np.nanmax(main_loss)), 5)).round(5)) 49 | ax1.get_yaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter()) 50 | ax1.get_yaxis().set_minor_formatter(matplotlib.ticker.NullFormatter()) 51 | ax1.set_ylabel("Validation Loss (Log Scale)", fontsize=12) 52 | else: 53 | ax1.set_yticks((np.linspace(np.nanmin(main_loss), np.nanmax(main_loss), 5)).round(5)) 54 | ax1.get_yaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter()) 55 | ax1.get_yaxis().set_minor_formatter(matplotlib.ticker.NullFormatter()) 56 | ax1.set_ylabel("Validation Loss", fontsize=12) 57 | 58 | if len(inter_loss) > 0: 59 | ax2 = plt.subplot(1, 2, 2) 60 | ax2.plot(np.arange(0, len(inter_loss), 1), inter_loss) 61 | ax2.axvline(np.argmin(inter_loss), linestyle="dotted", color="red") 62 | ax2.axvline(len(active_interaction_index), linestyle="dotted", color="red") 63 | ax2.plot(np.argmin(inter_loss), np.min(inter_loss), "*", markersize=12, color="red") 64 | ax2.plot(len(active_interaction_index), inter_loss[len(active_interaction_index)], "o", markersize=8, color="red") 65 | ax2.set_xlabel("Number of Interactions", fontsize=12) 66 | ax2.set_xlim(-0.5, len(inter_loss) - 0.5) 67 | ax2.xaxis.set_major_locator(MaxNLocator(integer=True)) 68 | if log_scale: 69 | ax2.set_yscale("log") 70 | ax2.set_yticks((10 ** np.linspace(np.log10(np.nanmin(inter_loss)), np.log10(np.nanmax(inter_loss)), 5)).round(5)) 71 | ax2.get_yaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter()) 72 | ax2.get_yaxis().set_minor_formatter(matplotlib.ticker.NullFormatter()) 73 | ax2.set_ylabel("Validation Loss (Log Scale)", fontsize=12) 74 | else: 75 | ax2.set_yticks((np.linspace(np.nanmin(inter_loss), np.nanmax(inter_loss), 5)).round(5)) 76 | ax2.get_yaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter()) 77 | ax2.get_yaxis().set_minor_formatter(matplotlib.ticker.NullFormatter()) 78 | ax2.set_ylabel("Validation Loss", fontsize=12) 79 | plt.show() 80 | 81 | save_path = folder + name 82 | if save_eps: 83 | if not os.path.exists(folder): 84 | os.makedirs(folder) 85 | fig.savefig("%s.eps" % save_path, bbox_inches="tight", dpi=100) 86 | if save_png: 87 | if not os.path.exists(folder): 88 | os.makedirs(folder) 89 | fig.savefig("%s.png" % save_path, bbox_inches="tight", dpi=100) 90 | 91 | 92 | def plot_trajectory(data_dict_logs, log_scale=True, folder="./", name="loss_trajectory", save_eps=False, save_png=False): 93 | """ 94 | Helper function for visualizing loss trajectory. 95 | 96 | Parameters 97 | ---------- 98 | data_dict_logs : dict 99 | Dictionary containing loss trajectory information. 100 | log_scale : boolean 101 | Whether to use log scale for y-axis. 102 | folder : str 103 | The path of folder to save figure, by default "./". 104 | name : str 105 | Name of the file, by default "trajectory_plot". 106 | save_png : boolean 107 | Whether to save the plot in PNG format, by default False. 108 | save_eps : boolean 109 | Whether to save the plot in EPS format, by default False. 110 | """ 111 | t1, t2, t3 = [data_dict_logs["err_train_main_effect_training"], 112 | data_dict_logs["err_train_interaction_training"], data_dict_logs["err_train_tuning"]] 113 | v1, v2, v3 = [data_dict_logs["err_val_main_effect_training"], 114 | data_dict_logs["err_val_interaction_training"], data_dict_logs["err_val_tuning"]] 115 | 116 | if len(t1) + len(t2) + len(t3) == 0: 117 | return 118 | 119 | fig = plt.figure(figsize=(14, 4)) 120 | ax1 = plt.subplot(1, 2, 1) 121 | ax1.plot(np.arange(1, len(t1) + 1, 1), t1, color="r") 122 | ax1.plot(np.arange(len(t1) + 1, len(t1 + t2) + 1, 1), t2, color="b") 123 | ax1.plot(np.arange(len(t1 + t2) + 1, len(t1 + t2 + t3) + 1, 1), t3, color="y") 124 | if log_scale: 125 | ax1.set_yscale("log") 126 | ax1.set_yticks((10 ** np.linspace(np.log10(np.nanmin(t1 + t2 + t3)), np.log10(np.nanmax(t1 + t2 + t3)), 5)).round(5)) 127 | ax1.get_yaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter()) 128 | ax1.get_yaxis().set_minor_formatter(matplotlib.ticker.NullFormatter()) 129 | ax1.set_xlabel("Number of Epochs", fontsize=12) 130 | ax1.set_ylabel("Training Loss (Log Scale)", fontsize=12) 131 | else: 132 | ax1.set_yticks((np.linspace(np.nanmin(t1 + t2), np.nanmax(t1 + t2), 5)).round(5)) 133 | ax1.get_yaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter()) 134 | ax1.get_yaxis().set_minor_formatter(matplotlib.ticker.NullFormatter()) 135 | ax1.set_xlabel("Number of Epochs", fontsize=12) 136 | ax1.set_ylabel("Training Loss", fontsize=12) 137 | 138 | ax1.legend(["Stage 1: Training Main Effects", "Stage 2: Training Interactions", "Stage 3: Fine Tuning"]) 139 | 140 | ax2 = plt.subplot(1, 2, 2) 141 | ax2.plot(np.arange(1, len(v1) + 1, 1), v1, color="r") 142 | ax2.plot(np.arange(len(v1) + 1, len(v1 + v2) + 1, 1), v2, color="b") 143 | ax2.plot(np.arange(len(v1 + v2) + 1, len(v1 + v2 + v3) + 1, 1), v3, color="y") 144 | if log_scale: 145 | ax2.set_yscale("log") 146 | ax2.set_yticks((10 ** np.linspace(np.log10(np.nanmin(v1 + v2 + v3)), np.log10(np.nanmax(v1 + v2 + v3)), 5)).round(5)) 147 | ax2.get_yaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter()) 148 | ax2.get_yaxis().set_minor_formatter(matplotlib.ticker.NullFormatter()) 149 | ax2.set_xlabel("Number of Epochs", fontsize=12) 150 | ax2.set_ylabel("Validation Loss (Log Scale)", fontsize=12) 151 | else: 152 | ax2.set_yticks((np.linspace(np.nanmin(v1 + v2 + v3), np.nanmax(v1 + v2 + v3), 5)).round(5)) 153 | ax2.get_yaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter()) 154 | ax2.get_yaxis().set_minor_formatter(matplotlib.ticker.NullFormatter()) 155 | ax2.set_xlabel("Number of Epochs", fontsize=12) 156 | ax2.set_ylabel("Validation Loss", fontsize=12) 157 | ax2.legend(["Stage 1: Training Main Effects", "Stage 2: Training Interactions", "Stage 3: Fine Tuning"]) 158 | plt.show() 159 | 160 | save_path = folder + name 161 | if save_eps: 162 | if not os.path.exists(folder): 163 | os.makedirs(folder) 164 | fig.savefig("%s.eps" % save_path, bbox_inches="tight", dpi=100) 165 | if save_png: 166 | if not os.path.exists(folder): 167 | os.makedirs(folder) 168 | fig.savefig("%s.png" % save_path, bbox_inches="tight", dpi=100) 169 | 170 | 171 | def feature_importance_visualize(feature_importance, feature_names, folder="./", name="feature_importance", save_png=False, save_eps=False): 172 | """ 173 | Helper function for visualizing feature importance. 174 | 175 | Parameters 176 | ---------- 177 | feature_importance : np.ndarray of shape (n_features, ) 178 | Feature importance based on Shapley value. 179 | feature_names : list of str of shape (n_features, ) 180 | Feature name list. 181 | folder : str 182 | The path of folder to save figure, by default "./". 183 | name : str 184 | Name of the file, by default "feature_importance". 185 | save_png : boolean 186 | Whether to save the plot in PNG format, by default False. 187 | save_eps : boolean 188 | Whether to save the plot in EPS format, by default False. 189 | """ 190 | all_ir = [] 191 | all_names = [] 192 | for name, importance in zip(feature_names, feature_importance): 193 | if importance > 0: 194 | all_ir.append(importance) 195 | all_names.append(name) 196 | 197 | max_ids = len(all_names) 198 | if max_ids > 0: 199 | fig = plt.figure(figsize=(0.4 + 0.65 * max_ids, 4)) 200 | ax = plt.axes() 201 | ax.bar(np.arange(len(all_ir)), [ir for ir, _ in sorted(zip(all_ir, all_names))][::-1]) 202 | ax.set_xticks(np.arange(len(all_ir))) 203 | ax.set_xticklabels([name for _, name in sorted(zip(all_ir, all_names))][::-1], rotation=60) 204 | plt.ylim(0, np.max(all_ir) + 0.05) 205 | plt.xlim(-1, len(all_names)) 206 | plt.title("Feature Importance") 207 | 208 | save_path = folder + name 209 | if save_eps: 210 | if not os.path.exists(folder): 211 | os.makedirs(folder) 212 | fig.savefig("%s.eps" % save_path, bbox_inches="tight", dpi=100) 213 | if save_png: 214 | if not os.path.exists(folder): 215 | os.makedirs(folder) 216 | fig.savefig("%s.png" % save_path, bbox_inches="tight", dpi=100) 217 | 218 | 219 | def effect_importance_visualize(data_dict_global, folder="./", name="effect_importance", save_png=False, save_eps=False): 220 | """ 221 | Helper function for visualizing effect importance. 222 | 223 | Parameters 224 | ---------- 225 | data_dict_global : dict 226 | Dictionary with global explanation information. 227 | folder : str 228 | The path of folder to save figure, by default "./". 229 | name : str 230 | Name of the file, by default "effect_importance". 231 | save_png : boolean 232 | Whether to save the plot in PNG format, by default False. 233 | save_eps : boolean 234 | Whether to save the plot in EPS format, by default False. 235 | """ 236 | all_ir = [] 237 | all_names = [] 238 | for key, item in data_dict_global.items(): 239 | if item["importance"] > 0: 240 | all_ir.append(item["importance"]) 241 | all_names.append(key) 242 | 243 | max_ids = len(all_names) 244 | if max_ids > 0: 245 | fig = plt.figure(figsize=(0.4 + 0.65 * max_ids, 4)) 246 | ax = plt.axes() 247 | ax.bar(np.arange(len(all_ir)), [ir for ir, _ in sorted(zip(all_ir, all_names))][::-1]) 248 | ax.set_xticks(np.arange(len(all_ir))) 249 | ax.set_xticklabels([name for _, name in sorted(zip(all_ir, all_names))][::-1], rotation=60) 250 | plt.ylim(0, np.max(all_ir) + 0.05) 251 | plt.xlim(-1, len(all_names)) 252 | plt.title("Effect Importance") 253 | 254 | save_path = folder + name 255 | if save_eps: 256 | if not os.path.exists(folder): 257 | os.makedirs(folder) 258 | fig.savefig("%s.eps" % save_path, bbox_inches="tight", dpi=100) 259 | if save_png: 260 | if not os.path.exists(folder): 261 | os.makedirs(folder) 262 | fig.savefig("%s.png" % save_path, bbox_inches="tight", dpi=100) 263 | 264 | 265 | def global_visualize_density(data_dict_global, main_effect_num=None, interaction_num=None, cols_per_row=4, 266 | save_png=False, save_eps=False, folder="./", name="global_explain"): 267 | """ 268 | Helper function for visualizing global explanation with density plots. 269 | 270 | Parameters 271 | ---------- 272 | data_dict_global : dict 273 | Dictionary with global explanation information. 274 | main_effect_num : int or None 275 | The number of top main effects to show, by default None, 276 | As main_effect_num=None, all main effects would be shown. 277 | interaction_num : int or None 278 | The number of top interactions to show, by default None, 279 | As interaction_num=None, all main effects would be shown. 280 | cols_per_row : int 281 | The number of subfigures each row, by default 4. 282 | folder : str 283 | The path of folder to save figure, by default "./". 284 | name : str 285 | Name of the file, by default "global_explain". 286 | save_png : boolean 287 | Whether to save the plot in PNG format, by default False. 288 | save_eps : boolean 289 | Whether to save the plot in EPS format, by default False. 290 | """ 291 | maineffect_count = 0 292 | componment_scales = [] 293 | for key, item in data_dict_global.items(): 294 | componment_scales.append(item["importance"]) 295 | if item["type"] != "pairwise": 296 | maineffect_count += 1 297 | 298 | componment_scales = np.array(componment_scales) 299 | sorted_index = np.argsort(componment_scales) 300 | active_index = sorted_index[componment_scales[sorted_index].cumsum() > 0][::-1] 301 | active_univariate_index = active_index[active_index < maineffect_count][:main_effect_num] 302 | active_interaction_index = active_index[active_index >= maineffect_count][:interaction_num] 303 | max_ids = len(active_univariate_index) + len(active_interaction_index) 304 | 305 | if max_ids == 0: 306 | return 307 | 308 | idx = 0 309 | fig = plt.figure(figsize=(6 * cols_per_row, 4.6 * int(np.ceil(max_ids / cols_per_row)))) 310 | outer = gridspec.GridSpec(int(np.ceil(max_ids / cols_per_row)), cols_per_row, wspace=0.25, hspace=0.35) 311 | for indice in active_univariate_index: 312 | 313 | feature_name = list(data_dict_global.keys())[indice] 314 | if data_dict_global[feature_name]["type"] == "continuous": 315 | 316 | inner = gridspec.GridSpecFromSubplotSpec(2, 1, subplot_spec=outer[idx], wspace=0.1, hspace=0.1, height_ratios=[6, 1]) 317 | ax1 = plt.Subplot(fig, inner[0]) 318 | ax1.plot(data_dict_global[feature_name]["inputs"], data_dict_global[feature_name]["outputs"]) 319 | ax1.set_xticklabels([]) 320 | fig.add_subplot(ax1) 321 | 322 | ax2 = plt.Subplot(fig, inner[1]) 323 | xint = ((np.array(data_dict_global[feature_name]["density"]["names"][1:]) 324 | + np.array(data_dict_global[feature_name]["density"]["names"][:-1])) / 2).reshape([-1, 1]).reshape([-1]) 325 | ax2.bar(xint, data_dict_global[feature_name]["density"]["scores"], width=xint[1] - xint[0]) 326 | ax2.get_shared_x_axes().join(ax1, ax2) 327 | ax2.set_yticklabels([]) 328 | ax2.autoscale() 329 | fig.add_subplot(ax2) 330 | 331 | elif data_dict_global[feature_name]["type"] == "categorical": 332 | 333 | inner = gridspec.GridSpecFromSubplotSpec(2, 1, subplot_spec=outer[idx], 334 | wspace=0.1, hspace=0.1, height_ratios=[6, 1]) 335 | ax1 = plt.Subplot(fig, inner[0]) 336 | ax1.bar(np.arange(len(data_dict_global[feature_name]["inputs"])), 337 | data_dict_global[feature_name]["outputs"]) 338 | ax1.set_xticklabels([]) 339 | fig.add_subplot(ax1) 340 | 341 | ax2 = plt.Subplot(fig, inner[1]) 342 | ax2.bar(np.arange(len(data_dict_global[feature_name]["density"]["names"])), 343 | data_dict_global[feature_name]["density"]["scores"]) 344 | ax2.get_shared_x_axes().join(ax1, ax2) 345 | ax2.autoscale() 346 | ax2.set_xticks(data_dict_global[feature_name]["input_ticks"]) 347 | ax2.set_xticklabels(data_dict_global[feature_name]["input_labels"]) 348 | ax2.set_yticklabels([]) 349 | fig.add_subplot(ax2) 350 | 351 | idx = idx + 1 352 | if len(str(ax2.get_xticks())) > 60: 353 | ax2.xaxis.set_tick_params(rotation=20) 354 | ax1.set_title(feature_name + " (" + str(np.round(100 * data_dict_global[feature_name]["importance"], 1)) + "%)", fontsize=12) 355 | 356 | for indice in active_interaction_index: 357 | 358 | feature_name = list(data_dict_global.keys())[indice] 359 | axis_extent = data_dict_global[feature_name]["axis_extent"] 360 | 361 | ax_main = plt.Subplot(fig, outer[idx]) 362 | interact_plot = ax_main.imshow(data_dict_global[feature_name]["outputs"], interpolation="nearest", 363 | aspect="auto", extent=axis_extent) 364 | 365 | if data_dict_global[feature_name]["xtype"] == "categorical": 366 | ax_main.set_xticks(data_dict_global[feature_name]["input1_ticks"]) 367 | ax_main.set_xticklabels(data_dict_global[feature_name]["input1_labels"]) 368 | if data_dict_global[feature_name]["ytype"] == "categorical": 369 | ax_main.set_yticks(data_dict_global[feature_name]["input2_ticks"]) 370 | ax_main.set_yticklabels(data_dict_global[feature_name]["input2_labels"]) 371 | 372 | response_precision = max(int(- np.log10(np.max(data_dict_global[feature_name]["outputs"]) 373 | - np.min(data_dict_global[feature_name]["outputs"]))) + 2, 0) 374 | ax_main.set_title(feature_name + " (" + str(np.round(100 * data_dict_global[feature_name]["importance"], 1)) + "%)", fontsize=12) 375 | fig.add_subplot(ax_main) 376 | fig.colorbar(interact_plot, ax=ax_main, orientation="vertical", 377 | format="%0." + str(response_precision) + "f", use_gridspec=True) 378 | idx = idx + 1 379 | if len(str(ax_main.get_xticks())) > 60: 380 | ax_main.xaxis.set_tick_params(rotation=20) 381 | 382 | if max_ids > 0: 383 | save_path = folder + name 384 | if save_eps: 385 | if not os.path.exists(folder): 386 | os.makedirs(folder) 387 | fig.savefig("%s.eps" % save_path, bbox_inches="tight", dpi=100) 388 | if save_png: 389 | if not os.path.exists(folder): 390 | os.makedirs(folder) 391 | fig.savefig("%s.png" % save_path, bbox_inches="tight", dpi=100) 392 | 393 | 394 | def global_visualize_wo_density(data_dict_global, main_effect_num=None, interaction_num=None, cols_per_row=4, 395 | save_png=False, save_eps=False, folder="./", name="global_explain"): 396 | """ 397 | Helper function for visualizing global explanation without density plots. 398 | 399 | Parameters 400 | ---------- 401 | data_dict_global : dict 402 | Dictionary with global explanation information. 403 | main_effect_num : int or None 404 | The number of top main effects to show, by default None, 405 | As main_effect_num=None, all main effects would be shown. 406 | interaction_num : int or None 407 | The number of top interactions to show, by default None, 408 | As interaction_num=None, all main effects would be shown. 409 | cols_per_row : int 410 | The number of subfigures each row, by default 4. 411 | folder : str 412 | The path of folder to save figure, by default "./". 413 | name : str 414 | Name of the file, by default "global_explain". 415 | save_png : boolean 416 | Whether to save the plot in PNG format, by default False. 417 | save_eps : boolean 418 | Whether to save the plot in EPS format, by default False. 419 | """ 420 | maineffect_count = 0 421 | componment_scales = [] 422 | for key, item in data_dict_global.items(): 423 | componment_scales.append(item["importance"]) 424 | if item["type"] != "pairwise": 425 | maineffect_count += 1 426 | 427 | componment_scales = np.array(componment_scales) 428 | sorted_index = np.argsort(componment_scales) 429 | active_index = sorted_index[componment_scales[sorted_index].cumsum() > 0][::-1] 430 | active_univariate_index = active_index[active_index < maineffect_count][:main_effect_num] 431 | active_interaction_index = active_index[active_index >= maineffect_count][:interaction_num] 432 | max_ids = len(active_univariate_index) + len(active_interaction_index) 433 | 434 | idx = 0 435 | fig = plt.figure(figsize=(5.2 * cols_per_row, 4 * int(np.ceil(max_ids / cols_per_row)))) 436 | outer = gridspec.GridSpec(int(np.ceil(max_ids / cols_per_row)), cols_per_row, wspace=0.25, hspace=0.35) 437 | for indice in active_univariate_index: 438 | 439 | feature_name = list(data_dict_global.keys())[indice] 440 | if data_dict_global[feature_name]["type"] == "continuous": 441 | 442 | ax1 = plt.Subplot(fig, outer[idx]) 443 | ax1.plot(data_dict_global[feature_name]["inputs"], data_dict_global[feature_name]["outputs"]) 444 | ax1.set_title(feature_name, fontsize=12) 445 | fig.add_subplot(ax1) 446 | if len(str(ax1.get_xticks())) > 80: 447 | ax1.xaxis.set_tick_params(rotation=20) 448 | 449 | elif data_dict_global[feature_name]["type"] == "categorical": 450 | 451 | ax1 = plt.Subplot(fig, outer[idx]) 452 | ax1.bar(np.arange(len(data_dict_global[feature_name]["inputs"])), 453 | data_dict_global[feature_name]["outputs"]) 454 | ax1.set_title(feature_name, fontsize=12) 455 | ax1.set_xticks(data_dict_global[feature_name]["input_ticks"]) 456 | ax1.set_xticklabels(data_dict_global[feature_name]["input_labels"]) 457 | fig.add_subplot(ax1) 458 | 459 | idx = idx + 1 460 | if len(str(ax1.get_xticks())) > 60: 461 | ax1.xaxis.set_tick_params(rotation=20) 462 | ax1.set_title(feature_name + " (" + str(np.round(100 * data_dict_global[feature_name]["importance"], 1)) + "%)", fontsize=12) 463 | 464 | for indice in active_interaction_index: 465 | 466 | feature_name = list(data_dict_global.keys())[indice] 467 | axis_extent = data_dict_global[feature_name]["axis_extent"] 468 | 469 | ax_main = plt.Subplot(fig, outer[idx]) 470 | interact_plot = ax_main.imshow(data_dict_global[feature_name]["outputs"], interpolation="nearest", 471 | aspect="auto", extent=axis_extent) 472 | 473 | if data_dict_global[feature_name]["xtype"] == "categorical": 474 | ax_main.set_xticks(data_dict_global[feature_name]["input1_ticks"]) 475 | ax_main.set_xticklabels(data_dict_global[feature_name]["input1_labels"]) 476 | if data_dict_global[feature_name]["ytype"] == "categorical": 477 | ax_main.set_yticks(data_dict_global[feature_name]["input2_ticks"]) 478 | ax_main.set_yticklabels(data_dict_global[feature_name]["input2_labels"]) 479 | 480 | response_precision = max(int(- np.log10(np.max(data_dict_global[feature_name]["outputs"]) 481 | - np.min(data_dict_global[feature_name]["outputs"]))) + 2, 0) 482 | ax_main.set_title(feature_name + " (" + str(np.round(100 * data_dict_global[feature_name]["importance"], 1)) + "%)", fontsize=12) 483 | fig.add_subplot(ax_main) 484 | fig.colorbar(interact_plot, ax=ax_main, orientation="vertical", 485 | format="%0." + str(response_precision) + "f", use_gridspec=True) 486 | 487 | idx = idx + 1 488 | if len(str(ax_main.get_xticks())) > 60: 489 | ax_main.xaxis.set_tick_params(rotation=20) 490 | 491 | if max_ids > 0: 492 | save_path = folder + name 493 | if save_eps: 494 | if not os.path.exists(folder): 495 | os.makedirs(folder) 496 | fig.savefig("%s.eps" % save_path, bbox_inches="tight", dpi=100) 497 | if save_png: 498 | if not os.path.exists(folder): 499 | os.makedirs(folder) 500 | fig.savefig("%s.png" % save_path, bbox_inches="tight", dpi=100) 501 | 502 | 503 | def local_visualize(data_dict_local, folder="./", name="local_explain", save_png=False, save_eps=False): 504 | """ 505 | Helper function for visualizing local explanation. 506 | 507 | Parameters 508 | ---------- 509 | data_dict_local : dict 510 | Dictionary with local explanation information. 511 | folder : str 512 | The path of folder to save figure, by default "./". 513 | name : str 514 | Name of the file, by default "local_explain". 515 | save_png : boolean 516 | Whether to save the plot in PNG format, by default False. 517 | save_eps : boolean 518 | Whether to save the plot in EPS format, by default False. 519 | """ 520 | idx = np.argsort(np.abs(data_dict_local["scores"][data_dict_local["active_indice"]]))[::-1] 521 | 522 | max_ids = len(data_dict_local["active_indice"]) 523 | fig = plt.figure(figsize=(round((len(data_dict_local["active_indice"]) + 1) * 0.6), 4)) 524 | plt.bar(np.arange(len(data_dict_local["active_indice"])), data_dict_local["scores"][data_dict_local["active_indice"]][idx]) 525 | plt.xticks(np.arange(len(data_dict_local["active_indice"])), 526 | data_dict_local["effect_names"][data_dict_local["active_indice"]][idx], rotation=60) 527 | 528 | if "actual" in data_dict_local.keys(): 529 | title = "Predicted: %0.4f | Actual: %0.4f" % (data_dict_local["predicted"], data_dict_local["actual"]) 530 | else: 531 | title = "Predicted: %0.4f" % (data_dict_local["predicted"]) 532 | plt.title(title, fontsize=12) 533 | 534 | if max_ids > 0: 535 | save_path = folder + name 536 | if save_eps: 537 | if not os.path.exists(folder): 538 | os.makedirs(folder) 539 | fig.savefig("%s.eps" % save_path, bbox_inches="tight", dpi=100) 540 | if save_png: 541 | if not os.path.exists(folder): 542 | os.makedirs(folder) 543 | fig.savefig("%s.png" % save_path, bbox_inches="tight", dpi=100) 544 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | 3 | package_data = { 4 | "gaminet": [ 5 | "lib/lib_ebm_native_win_x64.dll", 6 | "lib/lib_ebm_native_linux_x64.so", 7 | "lib/lib_ebm_native_mac_x64.dylib", 8 | "lib/lib_ebm_native_win_x64.pdb" 9 | ] 10 | } 11 | 12 | setup(name='gaminet', 13 | version='1.0.0', 14 | description='Pytorch version of GAMINet; it was done when I was PhD student in HKU', 15 | url='https://github.com/ZebinYang/GAMINet-Pytorch', 16 | author='Zebin Yang', 17 | author_email='yangzb2010@connect.hku.hk', 18 | license='GPL', 19 | packages=['gaminet'], 20 | package_data=package_data, 21 | install_requires=['matplotlib>=3.1.3', 'numpy>=1.15.2', 'scikit-learn>=1.0.2', 22 | 'joblib', 'pygam', 'tqdm', 'torch>=1.9'], 23 | zip_safe=False) 24 | --------------------------------------------------------------------------------