├── .gitignore ├── LICENSE ├── README.md ├── dpmmpython ├── __init__.py ├── build_datasets.ipynb ├── dpmmwrapper.ipynb ├── dpmmwrapper.py ├── dpmmwrapper.pyc ├── install.py ├── priors.py ├── release.py ├── replication.py ├── replication_full.py └── replication_short.py ├── examples ├── clustering_example.ipynb └── multi_process.ipynb └── setup.py /.gitignore: -------------------------------------------------------------------------------- 1 | .py[cod] 2 | 3 | # C extensions 4 | *.so 5 | 6 | # Packages 7 | *.egg 8 | *.egg-info 9 | dist 10 | build 11 | eggs 12 | parts 13 | bin 14 | var 15 | sdist 16 | develop-eggs 17 | .installed.cfg 18 | lib 19 | lib64 20 | __pycache__ 21 | *.pyc 22 | # Installer logs 23 | pip-log.txt 24 | 25 | # Unit test / coverage reports 26 | .coverage 27 | .pytest_cache 28 | .tox 29 | nosetests.xml 30 | 31 | # Translations 32 | *.mo 33 | 34 | # Mr Developer 35 | .mr.developer.cfg 36 | .project 37 | .pydevproject 38 | 39 | .ropeproject 40 | # PyCharm 41 | .idea/* 42 | 43 | # created by distutils during build process 44 | MANIFEST 45 | 46 | # Mac Os 47 | .DS_Store 48 | 49 | 50 | .vscode* -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | GNU GENERAL PUBLIC LICENSE 2 | Version 2, June 1991 3 | 4 | Copyright (C) 1989, 1991 Free Software Foundation, Inc., 5 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 6 | Everyone is permitted to copy and distribute verbatim copies 7 | of this license document, but changing it is not allowed. 8 | 9 | Preamble 10 | 11 | The licenses for most software are designed to take away your 12 | freedom to share and change it. By contrast, the GNU General Public 13 | License is intended to guarantee your freedom to share and change free 14 | software--to make sure the software is free for all its users. This 15 | General Public License applies to most of the Free Software 16 | Foundation's software and to any other program whose authors commit to 17 | using it. (Some other Free Software Foundation software is covered by 18 | the GNU Lesser General Public License instead.) You can apply it to 19 | your programs, too. 20 | 21 | When we speak of free software, we are referring to freedom, not 22 | price. Our General Public Licenses are designed to make sure that you 23 | have the freedom to distribute copies of free software (and charge for 24 | this service if you wish), that you receive source code or can get it 25 | if you want it, that you can change the software or use pieces of it 26 | in new free programs; and that you know you can do these things. 27 | 28 | To protect your rights, we need to make restrictions that forbid 29 | anyone to deny you these rights or to ask you to surrender the rights. 30 | These restrictions translate to certain responsibilities for you if you 31 | distribute copies of the software, or if you modify it. 32 | 33 | For example, if you distribute copies of such a program, whether 34 | gratis or for a fee, you must give the recipients all the rights that 35 | you have. You must make sure that they, too, receive or can get the 36 | source code. And you must show them these terms so they know their 37 | rights. 38 | 39 | We protect your rights with two steps: (1) copyright the software, and 40 | (2) offer you this license which gives you legal permission to copy, 41 | distribute and/or modify the software. 42 | 43 | Also, for each author's protection and ours, we want to make certain 44 | that everyone understands that there is no warranty for this free 45 | software. If the software is modified by someone else and passed on, we 46 | want its recipients to know that what they have is not the original, so 47 | that any problems introduced by others will not reflect on the original 48 | authors' reputations. 49 | 50 | Finally, any free program is threatened constantly by software 51 | patents. We wish to avoid the danger that redistributors of a free 52 | program will individually obtain patent licenses, in effect making the 53 | program proprietary. To prevent this, we have made it clear that any 54 | patent must be licensed for everyone's free use or not licensed at all. 55 | 56 | The precise terms and conditions for copying, distribution and 57 | modification follow. 58 | 59 | GNU GENERAL PUBLIC LICENSE 60 | TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 61 | 62 | 0. This License applies to any program or other work which contains 63 | a notice placed by the copyright holder saying it may be distributed 64 | under the terms of this General Public License. The "Program", below, 65 | refers to any such program or work, and a "work based on the Program" 66 | means either the Program or any derivative work under copyright law: 67 | that is to say, a work containing the Program or a portion of it, 68 | either verbatim or with modifications and/or translated into another 69 | language. (Hereinafter, translation is included without limitation in 70 | the term "modification".) Each licensee is addressed as "you". 71 | 72 | Activities other than copying, distribution and modification are not 73 | covered by this License; they are outside its scope. The act of 74 | running the Program is not restricted, and the output from the Program 75 | is covered only if its contents constitute a work based on the 76 | Program (independent of having been made by running the Program). 77 | Whether that is true depends on what the Program does. 78 | 79 | 1. You may copy and distribute verbatim copies of the Program's 80 | source code as you receive it, in any medium, provided that you 81 | conspicuously and appropriately publish on each copy an appropriate 82 | copyright notice and disclaimer of warranty; keep intact all the 83 | notices that refer to this License and to the absence of any warranty; 84 | and give any other recipients of the Program a copy of this License 85 | along with the Program. 86 | 87 | You may charge a fee for the physical act of transferring a copy, and 88 | you may at your option offer warranty protection in exchange for a fee. 89 | 90 | 2. You may modify your copy or copies of the Program or any portion 91 | of it, thus forming a work based on the Program, and copy and 92 | distribute such modifications or work under the terms of Section 1 93 | above, provided that you also meet all of these conditions: 94 | 95 | a) You must cause the modified files to carry prominent notices 96 | stating that you changed the files and the date of any change. 97 | 98 | b) You must cause any work that you distribute or publish, that in 99 | whole or in part contains or is derived from the Program or any 100 | part thereof, to be licensed as a whole at no charge to all third 101 | parties under the terms of this License. 102 | 103 | c) If the modified program normally reads commands interactively 104 | when run, you must cause it, when started running for such 105 | interactive use in the most ordinary way, to print or display an 106 | announcement including an appropriate copyright notice and a 107 | notice that there is no warranty (or else, saying that you provide 108 | a warranty) and that users may redistribute the program under 109 | these conditions, and telling the user how to view a copy of this 110 | License. (Exception: if the Program itself is interactive but 111 | does not normally print such an announcement, your work based on 112 | the Program is not required to print an announcement.) 113 | 114 | These requirements apply to the modified work as a whole. If 115 | identifiable sections of that work are not derived from the Program, 116 | and can be reasonably considered independent and separate works in 117 | themselves, then this License, and its terms, do not apply to those 118 | sections when you distribute them as separate works. But when you 119 | distribute the same sections as part of a whole which is a work based 120 | on the Program, the distribution of the whole must be on the terms of 121 | this License, whose permissions for other licensees extend to the 122 | entire whole, and thus to each and every part regardless of who wrote it. 123 | 124 | Thus, it is not the intent of this section to claim rights or contest 125 | your rights to work written entirely by you; rather, the intent is to 126 | exercise the right to control the distribution of derivative or 127 | collective works based on the Program. 128 | 129 | In addition, mere aggregation of another work not based on the Program 130 | with the Program (or with a work based on the Program) on a volume of 131 | a storage or distribution medium does not bring the other work under 132 | the scope of this License. 133 | 134 | 3. You may copy and distribute the Program (or a work based on it, 135 | under Section 2) in object code or executable form under the terms of 136 | Sections 1 and 2 above provided that you also do one of the following: 137 | 138 | a) Accompany it with the complete corresponding machine-readable 139 | source code, which must be distributed under the terms of Sections 140 | 1 and 2 above on a medium customarily used for software interchange; or, 141 | 142 | b) Accompany it with a written offer, valid for at least three 143 | years, to give any third party, for a charge no more than your 144 | cost of physically performing source distribution, a complete 145 | machine-readable copy of the corresponding source code, to be 146 | distributed under the terms of Sections 1 and 2 above on a medium 147 | customarily used for software interchange; or, 148 | 149 | c) Accompany it with the information you received as to the offer 150 | to distribute corresponding source code. (This alternative is 151 | allowed only for noncommercial distribution and only if you 152 | received the program in object code or executable form with such 153 | an offer, in accord with Subsection b above.) 154 | 155 | The source code for a work means the preferred form of the work for 156 | making modifications to it. For an executable work, complete source 157 | code means all the source code for all modules it contains, plus any 158 | associated interface definition files, plus the scripts used to 159 | control compilation and installation of the executable. However, as a 160 | special exception, the source code distributed need not include 161 | anything that is normally distributed (in either source or binary 162 | form) with the major components (compiler, kernel, and so on) of the 163 | operating system on which the executable runs, unless that component 164 | itself accompanies the executable. 165 | 166 | If distribution of executable or object code is made by offering 167 | access to copy from a designated place, then offering equivalent 168 | access to copy the source code from the same place counts as 169 | distribution of the source code, even though third parties are not 170 | compelled to copy the source along with the object code. 171 | 172 | 4. You may not copy, modify, sublicense, or distribute the Program 173 | except as expressly provided under this License. Any attempt 174 | otherwise to copy, modify, sublicense or distribute the Program is 175 | void, and will automatically terminate your rights under this License. 176 | However, parties who have received copies, or rights, from you under 177 | this License will not have their licenses terminated so long as such 178 | parties remain in full compliance. 179 | 180 | 5. You are not required to accept this License, since you have not 181 | signed it. However, nothing else grants you permission to modify or 182 | distribute the Program or its derivative works. These actions are 183 | prohibited by law if you do not accept this License. Therefore, by 184 | modifying or distributing the Program (or any work based on the 185 | Program), you indicate your acceptance of this License to do so, and 186 | all its terms and conditions for copying, distributing or modifying 187 | the Program or works based on it. 188 | 189 | 6. Each time you redistribute the Program (or any work based on the 190 | Program), the recipient automatically receives a license from the 191 | original licensor to copy, distribute or modify the Program subject to 192 | these terms and conditions. You may not impose any further 193 | restrictions on the recipients' exercise of the rights granted herein. 194 | You are not responsible for enforcing compliance by third parties to 195 | this License. 196 | 197 | 7. If, as a consequence of a court judgment or allegation of patent 198 | infringement or for any other reason (not limited to patent issues), 199 | conditions are imposed on you (whether by court order, agreement or 200 | otherwise) that contradict the conditions of this License, they do not 201 | excuse you from the conditions of this License. If you cannot 202 | distribute so as to satisfy simultaneously your obligations under this 203 | License and any other pertinent obligations, then as a consequence you 204 | may not distribute the Program at all. For example, if a patent 205 | license would not permit royalty-free redistribution of the Program by 206 | all those who receive copies directly or indirectly through you, then 207 | the only way you could satisfy both it and this License would be to 208 | refrain entirely from distribution of the Program. 209 | 210 | If any portion of this section is held invalid or unenforceable under 211 | any particular circumstance, the balance of the section is intended to 212 | apply and the section as a whole is intended to apply in other 213 | circumstances. 214 | 215 | It is not the purpose of this section to induce you to infringe any 216 | patents or other property right claims or to contest validity of any 217 | such claims; this section has the sole purpose of protecting the 218 | integrity of the free software distribution system, which is 219 | implemented by public license practices. Many people have made 220 | generous contributions to the wide range of software distributed 221 | through that system in reliance on consistent application of that 222 | system; it is up to the author/donor to decide if he or she is willing 223 | to distribute software through any other system and a licensee cannot 224 | impose that choice. 225 | 226 | This section is intended to make thoroughly clear what is believed to 227 | be a consequence of the rest of this License. 228 | 229 | 8. If the distribution and/or use of the Program is restricted in 230 | certain countries either by patents or by copyrighted interfaces, the 231 | original copyright holder who places the Program under this License 232 | may add an explicit geographical distribution limitation excluding 233 | those countries, so that distribution is permitted only in or among 234 | countries not thus excluded. In such case, this License incorporates 235 | the limitation as if written in the body of this License. 236 | 237 | 9. The Free Software Foundation may publish revised and/or new versions 238 | of the General Public License from time to time. Such new versions will 239 | be similar in spirit to the present version, but may differ in detail to 240 | address new problems or concerns. 241 | 242 | Each version is given a distinguishing version number. If the Program 243 | specifies a version number of this License which applies to it and "any 244 | later version", you have the option of following the terms and conditions 245 | either of that version or of any later version published by the Free 246 | Software Foundation. If the Program does not specify a version number of 247 | this License, you may choose any version ever published by the Free Software 248 | Foundation. 249 | 250 | 10. If you wish to incorporate parts of the Program into other free 251 | programs whose distribution conditions are different, write to the author 252 | to ask for permission. For software which is copyrighted by the Free 253 | Software Foundation, write to the Free Software Foundation; we sometimes 254 | make exceptions for this. Our decision will be guided by the two goals 255 | of preserving the free status of all derivatives of our free software and 256 | of promoting the sharing and reuse of software generally. 257 | 258 | NO WARRANTY 259 | 260 | 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY 261 | FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN 262 | OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES 263 | PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED 264 | OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 265 | MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS 266 | TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE 267 | PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, 268 | REPAIR OR CORRECTION. 269 | 270 | 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 271 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR 272 | REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, 273 | INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING 274 | OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED 275 | TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY 276 | YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER 277 | PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE 278 | POSSIBILITY OF SUCH DAMAGES. 279 | 280 | END OF TERMS AND CONDITIONS 281 | 282 | How to Apply These Terms to Your New Programs 283 | 284 | If you develop a new program, and you want it to be of the greatest 285 | possible use to the public, the best way to achieve this is to make it 286 | free software which everyone can redistribute and change under these terms. 287 | 288 | To do so, attach the following notices to the program. It is safest 289 | to attach them to the start of each source file to most effectively 290 | convey the exclusion of warranty; and each file should have at least 291 | the "copyright" line and a pointer to where the full notice is found. 292 | 293 | 294 | Copyright (C) 295 | 296 | This program is free software; you can redistribute it and/or modify 297 | it under the terms of the GNU General Public License as published by 298 | the Free Software Foundation; either version 2 of the License, or 299 | (at your option) any later version. 300 | 301 | This program is distributed in the hope that it will be useful, 302 | but WITHOUT ANY WARRANTY; without even the implied warranty of 303 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 304 | GNU General Public License for more details. 305 | 306 | You should have received a copy of the GNU General Public License along 307 | with this program; if not, write to the Free Software Foundation, Inc., 308 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. 309 | 310 | Also add information on how to contact you by electronic and paper mail. 311 | 312 | If the program is interactive, make it output a short notice like this 313 | when it starts in an interactive mode: 314 | 315 | Gnomovision version 69, Copyright (C) year name of author 316 | Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. 317 | This is free software, and you are welcome to redistribute it 318 | under certain conditions; type `show c' for details. 319 | 320 | The hypothetical commands `show w' and `show c' should show the appropriate 321 | parts of the General Public License. Of course, the commands you use may 322 | be called something other than `show w' and `show c'; they could even be 323 | mouse-clicks or menu items--whatever suits your program. 324 | 325 | You should also get your employer (if you work as a programmer) or your 326 | school, if any, to sign a "copyright disclaimer" for the program, if 327 | necessary. Here is a sample; alter the names: 328 | 329 | Yoyodyne, Inc., hereby disclaims all copyright interest in the program 330 | `Gnomovision' (which makes passes at compilers) written by James Hacker. 331 | 332 | , 1 April 1989 333 | Ty Coon, President of Vice 334 | 335 | This General Public License does not permit incorporating your program into 336 | proprietary programs. If your program is a subroutine library, you may 337 | consider it more useful to permit linking proprietary applications with the 338 | library. If this is what you want to do, use the GNU Lesser General 339 | Public License instead of this License. 340 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |
2 |

3 | DPGMM SubClusters 2d example 4 |

5 | 6 | ## DPMMSubClusters 7 | 8 | This package is a Python wrapper for the [DPMMSubClusters.jl](https://github.com/BGU-CS-VIL/DPMMSubClusters.jl) Julia package and for the [DPMMSubClusters_GPU](https://github.com/BGU-CS-VIL/DPMMSubClusters_GPU) CUDA/C++ package.
9 | 10 | The package is useful for fitting, in a scalable way, a mixture model with an unknown number of components. We currently support either Multinomial or Gaussian components, but additional types of components can be easily added, as long as they belong to an exponential family. 11 | 12 | 13 | 14 | ### Motivation 15 | 16 | Working on a subset of 100K images from ImageNet, containing 79 classes, we have created embeddings using [SWAV](https://github.com/facebookresearch/swav), and reduced the dimension to 128 using PCA. We have compared our method with the popular scikit-learn [GMM](https://scikit-learn.org/stable/modules/generated/sklearn.mixture.GaussianMixture.html) and [DPGMM](https://scikit-learn.org/stable/modules/generated/sklearn.mixture.BayesianGaussianMixture.html) with the following results: 17 |

18 | 19 | | Method | Timing (sec) | NMI (higher is better) | 20 | |-----------------------------------------------------|--------------|------------------------| 21 | | *Scikit-learn's GMM* (using EM, and given the True K) | 2523 | 0.695 | 22 | | *Scikit-learn's DPGMM* | 6108 | 0.683 | 23 | | DPMMpython (CPU Version) | 475 | 0.705 | 24 | 25 |

26 | 27 | 28 | ### Installation 29 | 30 | If you wish to use only the CPU version, you may skip all the GPU related steps. 31 | 32 | 1. Install Julia from: https://julialang.org/downloads/platform 33 | 2. Add our DPMMSubCluster package from within a Julia terminal via Julia package manager: 34 | ``` 35 | ] add DPMMSubClusters 36 | ``` 37 | 3. Add our dpmmpython package in python: pip install dpmmpython 38 | 4. Add Environment Variables: 39 | #### On Linux: 40 | 1. Add to the "PATH" environment variable the path to the Julia executable (e.g., in .bashrc add: export PATH =$PATH:$HOME/julia/julia-1.6.0/bin). 41 | #### On Windows: 42 | 1. Add to the "PATH" environment variable the path to the Julia executable (e.g., C:\Users\\AppData\Local\Programs\Julia\Julia-1.6.0\bin). 43 | 5. Install PyJulia from within a Python terminal: 44 | ``` 45 | import julia;julia.install(); 46 | ``` 47 | GPU Steps: 48 | 49 | 1. Install CUDA version 11.2 (or higher) from https://developer.nvidia.com/CUDA-downloads 50 | 2. git clone https://github.com/BGU-CS-VIL/DPMMSubClusters_GPU 51 | 3. Add Environment Variables: 52 | #### On Linux: 53 | 1. Add "CUDA_VERSION" with the value of the version of your CUDA installation (e.g., 11.6). 54 | 2. Make sure that CUDA_PATH exist. If it is missing add it with a path to CUDA (e.g., export CUDA_PATH=/usr/local/cuda-11.6/). 55 | 3. Make sure that the relevant CUDA paths are included in $PATH and $LD_LIBRARY_PATH (e.g., export PATH=/usr/local/cuda-11.6/bin:$PATH, export LD_LIBRARY_PATH=/usr/local/cuda- 56 | 11.6/lib64:$LD_LIBRARY_PATH). 57 | #### On Windows: 58 | 1. Add "CUDA_VERSION" with the value of the version of your CUDA installation (e.g., 11.6). 59 | 2. Make sure that CUDA_PATH exists. If it is missing add it with a path to CUDA (e.g., C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.6). 60 | 4. Install cmake if necessary. 61 | 62 | 5. For Windows only (optional, used on for debugging purposes): Install OpenCV 63 | 1. run Git Bash 64 | 2. cd /DPMMSubClusters 65 | 3. ./installOCV.sh 66 | 67 | ### Building 68 | For Windows for the CUDA/C++ package both of the build options below are viable. For Linux use 69 | Option 2. 70 | #### Option 1: 71 | DPMMSubClusters.sln - Solution file for Visual Studio 2019 72 | #### Option 2: 73 | CMakeLists.txt 74 | 1. Run in the terminal: 75 | ``` 76 | cd /DPMMSubClusters 77 | mkdir build 78 | cd build 79 | cmake -S ../ 80 | ``` 81 | 2. Build: 82 | * Windows: 83 | ```cmake --build . --config Release --target ALL_BUILD``` 84 | * Linux: ```cmake --build . --config Release --target all``` 85 | 86 | ### Post Build 87 | Add Environment Variable: 88 | * On Linux:
89 | Add "DPMM_GPU_FULL_PATH_TO_PACKAGE_IN_LINUX" with the value of the path to the binary of the package DPMMSubClusters_GPU.
90 | The path is: /DPMMSubClusters/DPMMSubClusters. 91 | * On Windows:
92 | Add "DPMM_GPU_FULL_PATH_TO_PACKAGE_IN_WINDOWS" with the value of the path to the exe of the package DPMMSubClusters_GPU.
93 | The path is: \DPMMSubClusters\build\Release 94 | \DPMMSubClusters.exe. 95 | 96 | End of GPU Steps 97 | 98 | ## Precompiled Binaries - 99 | [Windows](https://drive.google.com/file/d/1gQE6BWSseOEBW3xFTuahXJPIZI16uwj7/view?usp=sharing)
100 | [Linux](https://drive.google.com/file/d/1EWBqZG2jv4yH_O-BIwvDdn6gTJbF4mU4/view?usp=sharing)
101 | Both binaries were compiled with CUDA 11.2, note that you still need to have cuda and cudnn installed in order to use these. 102 | 103 | 104 | ### Usage Example: 105 | 106 | ``` 107 | from julia.api import Julia 108 | jl = Julia(compiled_modules=False) 109 | from dpmmpython.dpmmwrapper import DPMMPython 110 | from dpmmpython.priors import niw 111 | import numpy as np 112 | 113 | data,gt = DPMMPython.generate_gaussian_data(10000, 2, 10, 100.0) 114 | prior = niw(1,np.zeros(2),4,np.eye(2)) 115 | labels,_,results = DPMMPython.fit(data,100,prior = prior,verbose = True, gt = gt, gpu = False) 116 | 117 | ``` 118 | ``` 119 | Iteration: 1 || Clusters count: 1 || Log posterior: -71190.14226686998 || Vi score: 1.990707323192506 || NMI score: 6.69243345834295e-16 || Iter Time:0.004499912261962891 || Total time:0.004499912261962891 120 | Iteration: 2 || Clusters count: 1 || Log posterior: -71190.14226686998 || Vi score: 1.990707323192506 || NMI score: 6.69243345834295e-16 || Iter Time:0.0038819313049316406 || Total time:0.008381843566894531 121 | ... 122 | Iteration: 98 || Clusters count: 9 || Log posterior: -40607.39498126549 || Vi score: 0.11887067921133423 || NMI score: 0.9692247699387838 || Iter Time:0.015907764434814453 || Total time:0.5749104022979736 123 | Iteration: 99 || Clusters count: 9 || Log posterior: -40607.39498126549 || Vi score: 0.11887067921133423 || NMI score: 0.9692247699387838 || Iter Time:0.01072382926940918 || Total time:0.5856342315673828 124 | Iteration: 100 || Clusters count: 9 || Log posterior: -40607.39498126549 || Vi score: 0.11887067921133423 || NMI score: 0.9692247699387838 || Iter Time:0.010260820388793945 || Total time:0.5958950519561768 125 | ``` 126 | ``` 127 | predictions, probabilities = DPMMPython.predict(results[-1],data) 128 | ``` 129 | 130 | You can modify the number of processes by using `DPMMPython.add_procs(procs_count)`, note that you can only scale it upwards. 131 | 132 | #### Additional Examples: 133 | [Clustering](https://nbviewer.jupyter.org/github/BGU-CS-VIL/dpmmpython/blob/master/examples/clustering_example.ipynb) 134 |
135 | [Multi-Process](https://nbviewer.jupyter.org/github/BGU-CS-VIL/dpmmpython/blob/master/examples/multi_process.ipynb) 136 | 137 | 138 | #### Python 3.8/3.9 139 | If you are having problems with the above Python version, please update PyJulia and PyCall to the latest versions, this should fix it. 140 | 141 | ### Misc 142 | 143 | For any questions: dinari@post.bgu.ac.il 144 | 145 | Contributions, feature requests, suggestion etc.. are welcomed. 146 | 147 | If you use this code for your work, please cite the following works: 148 | 149 | ``` 150 | @inproceedings{dinari2019distributed, 151 | title={Distributed MCMC Inference in Dirichlet Process Mixture Models Using Julia}, 152 | author={Dinari, Or and Yu, Angel and Freifeld, Oren and Fisher III, John W}, 153 | booktitle={2019 19th IEEE/ACM International Symposium on Cluster, Cloud and Grid Computing (CCGRID)}, 154 | pages={518--525}, 155 | year={2019} 156 | } 157 | 158 | @article{dinari2022cpu, 159 | title={CPU-and GPU-based Distributed Sampling in Dirichlet Process Mixtures for Large-scale Analysis}, 160 | author={Dinari, Or and Zamir, Raz and Fisher III, John W and Freifeld, Oren}, 161 | journal={arXiv preprint arXiv:2204.08988}, 162 | year={2022} 163 | } 164 | ``` 165 | -------------------------------------------------------------------------------- /dpmmpython/__init__.py: -------------------------------------------------------------------------------- 1 | from .release import __version__ 2 | from .install import install 3 | import os 4 | import julia 5 | 6 | -------------------------------------------------------------------------------- /dpmmpython/build_datasets.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "import tensorflow as tf\n", 10 | "import numpy as np" 11 | ] 12 | }, 13 | { 14 | "cell_type": "code", 15 | "execution_count": 2, 16 | "metadata": { 17 | "collapsed": false, 18 | "pycharm": { 19 | "name": "#%%\n" 20 | } 21 | }, 22 | "outputs": [], 23 | "source": [ 24 | "mnist = tf.keras.datasets.mnist\n", 25 | "(train_images, train_labels), (test_images, test_labels) = mnist.load_data()\n", 26 | "train_images = train_images.reshape(60000, 784)\n", 27 | "np.save(\"mnist_images.npy\",train_images)\n", 28 | "np.save(\"mnist_labels.npy\",train_labels)" 29 | ] 30 | }, 31 | { 32 | "cell_type": "code", 33 | "execution_count": 3, 34 | "metadata": {}, 35 | "outputs": [], 36 | "source": [ 37 | "fashion_mnist = tf.keras.datasets.fashion_mnist\n", 38 | "(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()\n", 39 | "train_images = train_images.reshape(60000, 784)\n", 40 | "np.save(\"fashion_mnist_images.npy\",train_images)\n", 41 | "np.save(\"fashion_mnist_labels.npy\",train_labels)" 42 | ] 43 | }, 44 | { 45 | "cell_type": "code", 46 | "execution_count": 4, 47 | "metadata": {}, 48 | "outputs": [], 49 | "source": [ 50 | "cifar10 = tf.keras.datasets.cifar10\n", 51 | "(train_images, train_labels), (test_images, test_labels) = cifar10.load_data()\n", 52 | "train_images = train_images.reshape(50000, 32*32*3)\n", 53 | "np.save(\"cifar10_images.npy\",train_images)\n", 54 | "np.save(\"cifar10_labels.npy\",train_labels)" 55 | ] 56 | }, 57 | { 58 | "cell_type": "markdown", 59 | "source": [ 60 | "\"imagenet_short.csv\" can be downloaded from: https://drive.google.com/file/d/1_FgNQ5v9UnMSTbGduJjvue0a2p-EK2JZ/view?usp=sharing" 61 | ], 62 | "metadata": { 63 | "collapsed": false 64 | } 65 | }, 66 | { 67 | "cell_type": "code", 68 | "execution_count": 5, 69 | "metadata": { 70 | "collapsed": false, 71 | "pycharm": { 72 | "name": "#%%\n" 73 | } 74 | }, 75 | "outputs": [ 76 | { 77 | "name": "stdout", 78 | "output_type": "stream", 79 | "text": [ 80 | "[96. 96. 96. ... 40. 40. 40.]\n" 81 | ] 82 | } 83 | ], 84 | "source": [ 85 | "from numpy import genfromtxt\n", 86 | "data = genfromtxt('imagenet_short.csv', delimiter=',')\n", 87 | "np.save(\"imagenet64_images.npy\",data[:,0:64])\n", 88 | "np.save(\"imagenet64_labels.npy\",data[:,-1])\n", 89 | "print(data[:,-1])\n" 90 | ] 91 | }, 92 | { 93 | "cell_type": "code", 94 | "execution_count": 7, 95 | "metadata": { 96 | "collapsed": false, 97 | "pycharm": { 98 | "name": "#%%\n" 99 | } 100 | }, 101 | "outputs": [ 102 | { 103 | "name": "stdout", 104 | "output_type": "stream", 105 | "text": [ 106 | "(11314, 130107)\n" 107 | ] 108 | } 109 | ], 110 | "source": [ 111 | "from sklearn.datasets import fetch_20newsgroups_vectorized\n", 112 | "data, labels = fetch_20newsgroups_vectorized(subset='train',return_X_y=True,normalize=False)\n", 113 | "\n", 114 | "D = 20000\n", 115 | "print(data.shape)\n", 116 | "sum_row = data.sum(axis=0)\n", 117 | "sorted_sum_row = np.argsort(sum_row, axis=1)[0,::-1]\n", 118 | "indices = np.squeeze(np.asarray(sorted_sum_row[0,:D]))\n", 119 | "data_array = data[:,indices].toarray()\n", 120 | "np.save(\"20newsgroups\"+str(D)+\"_train.npy\",data_array)\n", 121 | "np.save(\"20newsgroups\"+str(D)+\"_labels.npy\",labels+1)" 122 | ] 123 | } 124 | ], 125 | "metadata": { 126 | "celltoolbar": "Raw Cell Format", 127 | "interpreter": { 128 | "hash": "34b414f817d4cb14ae50d2374de9fe6611a9d65cd3430da6f5e12d03a6418fd7" 129 | }, 130 | "kernelspec": { 131 | "display_name": "Python 3", 132 | "language": "python", 133 | "name": "python3" 134 | }, 135 | "language_info": { 136 | "codemirror_mode": { 137 | "name": "ipython", 138 | "version": 3 139 | }, 140 | "file_extension": ".py", 141 | "mimetype": "text/x-python", 142 | "name": "python", 143 | "nbconvert_exporter": "python", 144 | "pygments_lexer": "ipython3", 145 | "version": "3.8.11" 146 | } 147 | }, 148 | "nbformat": 4, 149 | "nbformat_minor": 2 150 | } -------------------------------------------------------------------------------- /dpmmpython/dpmmwrapper.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "source": [ 6 | "The real datasets that are used in this notebook can be created by build_datasets.ipynb" 7 | ], 8 | "metadata": { 9 | "collapsed": false 10 | } 11 | }, 12 | { 13 | "cell_type": "code", 14 | "execution_count": null, 15 | "metadata": {}, 16 | "outputs": [], 17 | "source": [ 18 | "import os\n", 19 | "#Set the full path for the PMMSubClusters.exe in windows\n", 20 | "FULL_PATH_TO_PACKAGE_IN_WINDOWS = os.environ.get('DPMM_GPU_FULL_PATH_TO_PACKAGE_IN_WINDOWS')\n", 21 | "\n", 22 | "#Set the full path for the PMMSubClusters in Linux\n", 23 | "FULL_PATH_TO_PACKAGE_IN_LINUX = os.environ.get('DPMM_GPU_FULL_PATH_TO_PACKAGE_IN_LINUX')" 24 | ] 25 | }, 26 | { 27 | "cell_type": "code", 28 | "execution_count": null, 29 | "metadata": { 30 | "collapsed": false, 31 | "pycharm": { 32 | "name": "#%%\n" 33 | } 34 | }, 35 | "outputs": [], 36 | "source": [ 37 | "import platform\n", 38 | "\n", 39 | "if platform.system().startswith('Windows'):\n", 40 | " if FULL_PATH_TO_PACKAGE_IN_WINDOWS == None:\n", 41 | " print('Missing path for windows package. For example: FULL_PATH_TO_PACKAGE_IN_WINDOWS = \"C:/DPMMSubClusters.exe\"')\n", 42 | " assert(False)\n", 43 | "elif platform.system().startswith(\"Linux\"):\n", 44 | " if FULL_PATH_TO_PACKAGE_IN_LINUX == None:\n", 45 | " print('Missing path for linux package. For example: FULL_PATH_TO_PACKAGE_IN_LINUX = \"/home/user/bin/DPMMSubClusters\"')\n", 46 | " assert(False)" 47 | ] 48 | }, 49 | { 50 | "cell_type": "code", 51 | "execution_count": null, 52 | "metadata": { 53 | "collapsed": false, 54 | "pycharm": { 55 | "name": "#%%\n" 56 | } 57 | }, 58 | "outputs": [], 59 | "source": [ 60 | "import julia\n", 61 | "from julia.api import Julia\n", 62 | "jl = Julia(compiled_modules=False)\n", 63 | "\n", 64 | "# julia.install()\n", 65 | "from dpmmpython.priors import niw, multinomial\n", 66 | "from julia import DPMMSubClusters\n", 67 | "import numpy as np\n", 68 | "import platform\n", 69 | "import subprocess\n", 70 | "import json\n", 71 | "import pandas as pd\n", 72 | "from sklearn.mixture import GaussianMixture\n", 73 | "from sklearn.mixture import BayesianGaussianMixture\n", 74 | "from sklearn.metrics import normalized_mutual_info_score as nmi\n", 75 | "from tqdm import tqdm\n", 76 | "from time import time\n", 77 | "from datetime import datetime" 78 | ] 79 | }, 80 | { 81 | "cell_type": "code", 82 | "execution_count": null, 83 | "metadata": {}, 84 | "outputs": [], 85 | "source": [ 86 | "if not os.path.exists('results'):\n", 87 | " os.makedirs('results')" 88 | ] 89 | }, 90 | { 91 | "cell_type": "code", 92 | "execution_count": null, 93 | "metadata": {}, 94 | "outputs": [], 95 | "source": [ 96 | "class prior:\n", 97 | " def to_julia_prior(self):\n", 98 | " pass\n", 99 | "\n", 100 | " def get_type(self):\n", 101 | " pass\n", 102 | "\n", 103 | " def to_JSON(self):\n", 104 | " pass\n", 105 | "\n", 106 | "\n", 107 | "class niw(prior):\n", 108 | " def __init__(self, kappa, mu, nu, psi):\n", 109 | " if nu < len(mu):\n", 110 | " raise Exception('nu should be atleast the Dim')\n", 111 | " self.kappa = kappa\n", 112 | " self.mu = mu\n", 113 | " self.nu = nu\n", 114 | " self.psi = psi\n", 115 | "\n", 116 | " def to_julia_prior(self):\n", 117 | " return DPMMSubClusters.niw_hyperparams(self.kappa, self.mu, self.nu, self.psi)\n", 118 | "\n", 119 | " def get_type(self):\n", 120 | " return 'Gaussian'\n", 121 | "\n", 122 | " def to_JSON(self):\n", 123 | " j = {'k': self.kappa,\n", 124 | " 'm': self.mu.tolist(),\n", 125 | " 'v': self.nu,\n", 126 | " 'psi': self.psi.tolist()\n", 127 | " }\n", 128 | "\n", 129 | " return j\n", 130 | "\n", 131 | "\n", 132 | "class multinomial(prior):\n", 133 | " def __init__(self, alpha, dim=1):\n", 134 | " if isinstance(alpha, np.ndarray):\n", 135 | " self.alpha = alpha\n", 136 | " else:\n", 137 | " self.alpha = np.ones(dim) * alpha\n", 138 | "\n", 139 | " def to_julia_prior(self):\n", 140 | " return DPMMSubClusters.multinomial_hyper(self.alpha)\n", 141 | "\n", 142 | " def get_type(self):\n", 143 | " return 'Multinomial'\n", 144 | "\n", 145 | " def to_JSON(self):\n", 146 | " j = {'alpha': self.alpha.tolist()\n", 147 | " }\n", 148 | "\n", 149 | " return j" 150 | ] 151 | }, 152 | { 153 | "cell_type": "code", 154 | "execution_count": null, 155 | "metadata": {}, 156 | "outputs": [], 157 | "source": [ 158 | "class DPMMPython:\n", 159 | " \"\"\"\n", 160 | " Wrapper for the DPMMSubCluster Julia package\n", 161 | " \"\"\"\n", 162 | "\n", 163 | " @staticmethod\n", 164 | " def create_niw_prior(dim, mean_prior, mean_str, cov_prior, cov_str):\n", 165 | " \"\"\"\n", 166 | " Creates a gaussian prior, if cov_prior is a scalar, then creates an isotropic prior scaled to that, if its a matrix\n", 167 | " uses it as covariance\n", 168 | " :param dim: data dimension\n", 169 | " :param mean_prior: if a scalar, will create a vector scaled to that, if its a vector then use it as the prior mean\n", 170 | " :param mean_str: prior mean psuedo count\n", 171 | " :param cov_prior: if a scalar, will create an isotropic covariance scaled to cov_prior, if a matrix will use it as\n", 172 | " the covariance.\n", 173 | " :param cov_str: prior covariance psuedo counts\n", 174 | " :return: DPMMSubClusters.niw_hyperparams prior\n", 175 | " \"\"\"\n", 176 | " if isinstance(mean_prior, (int, float)):\n", 177 | " prior_mean = np.ones(dim) * mean_prior\n", 178 | " else:\n", 179 | " prior_mean = mean_prior\n", 180 | "\n", 181 | " if isinstance(cov_prior, (int, float)):\n", 182 | " prior_covariance = np.eye(dim) * cov_prior\n", 183 | " else:\n", 184 | " prior_covariance = cov_prior\n", 185 | " prior = niw(mean_str, prior_mean, dim + cov_str, prior_covariance)\n", 186 | " return prior\n", 187 | "\n", 188 | " @staticmethod\n", 189 | " def create_mnmm_prior(alpha, dim):\n", 190 | " prior = multinomial(alpha, dim)\n", 191 | " return prior\n", 192 | "\n", 193 | " @staticmethod\n", 194 | " def fit(data, alpha, prior=None,\n", 195 | " iterations=100, verbose=False,\n", 196 | " burnout=15, gt=None, outlier_weight=0, outlier_params=None, gpu=True, force_kernel = 2):\n", 197 | " \"\"\"\n", 198 | " Wrapper for DPMMSubClusters fit, reffer to \"https://bgu-cs-vil.github.io/DPMMSubClusters.jl/stable/usage/\" for specification\n", 199 | " Note that directly working with the returned clusters can be problematic software displaying the workspace (such as PyCharm debugger).\n", 200 | " :return: labels, clusters, sublabels\n", 201 | " \"\"\"\n", 202 | " if gpu == True:\n", 203 | " np.save(\"modelData.npy\", np.swapaxes(data, 0, 1))\n", 204 | "\n", 205 | " modelParams = {'alpha': alpha,\n", 206 | " 'iterations': iterations,\n", 207 | " 'use_verbose': verbose,\n", 208 | " 'burnout_period': burnout,\n", 209 | " 'force_kernel': force_kernel,\n", 210 | " 'outlier_mod': outlier_weight,\n", 211 | " 'outlier_hyper_params': outlier_params,\n", 212 | " 'hyper_params': prior.to_JSON()\n", 213 | " }\n", 214 | " if gt is not None:\n", 215 | " modelParams['gt'] = gt.tolist()\n", 216 | "\n", 217 | " with open('modelParams.json', 'w') as f:\n", 218 | " json.dump(modelParams, f)\n", 219 | "\n", 220 | " if platform.system().startswith('Windows'):\n", 221 | " process = subprocess.Popen([FULL_PATH_TO_PACKAGE_IN_WINDOWS,\n", 222 | " \"--prior_type=\" + prior.get_type(), \"--model_path=modelData.npy\",\n", 223 | " \"--params_path=modelParams.json\", \"--result_path=result.json\"])\n", 224 | " elif platform.system().startswith(\"Linux\"):\n", 225 | " process = subprocess.Popen(\n", 226 | " [FULL_PATH_TO_PACKAGE_IN_LINUX,\n", 227 | " \"--prior_type=\" + prior.get_type(), \"--model_path=modelData.npy\", \"--params_path=modelParams.json\",\n", 228 | " \"--result_path=result.json\"])\n", 229 | " else:\n", 230 | " print(f'Not support {platform.system()} OS')\n", 231 | "\n", 232 | " out, err = process.communicate()\n", 233 | " errcode = process.returncode\n", 234 | "\n", 235 | " process.kill()\n", 236 | " process.terminate()\n", 237 | "\n", 238 | " with open('result.json') as f:\n", 239 | " results_json = json.load(f)\n", 240 | "\n", 241 | " if \"error\" in results_json:\n", 242 | " print(f'Error:{results_json[\"error\"]}')\n", 243 | " return [], []\n", 244 | "\n", 245 | " os.remove(\"result.json\")\n", 246 | " return results_json[\"labels\"], None, [results_json[\"weights\"], results_json[\"iter_count\"]]\n", 247 | "\n", 248 | " else:\n", 249 | " if prior == None:\n", 250 | " results = DPMMSubClusters.fit(data, alpha, iters=iterations,\n", 251 | " verbose=verbose, burnout=burnout,\n", 252 | " gt=gt, outlier_weight=outlier_weight,\n", 253 | " outlier_params=outlier_params)\n", 254 | " else:\n", 255 | " results = DPMMSubClusters.fit(data, prior.to_julia_prior(), alpha, iters=iterations,\n", 256 | " verbose=verbose, burnout=burnout,\n", 257 | " gt=gt, outlier_weight=outlier_weight,\n", 258 | " outlier_params=outlier_params)\n", 259 | " return results[0],results[1],results[2:]\n", 260 | "\n", 261 | " @staticmethod\n", 262 | " def get_model_ll(points, labels, clusters):\n", 263 | " \"\"\"\n", 264 | " Wrapper for DPMMSubClusters cluster statistics\n", 265 | " :param points: data\n", 266 | " :param labels: labels\n", 267 | " :param clusters: vector of clusters distributions\n", 268 | " :return: vector with each cluster avg ll\n", 269 | " \"\"\"\n", 270 | " return DPMMSubClusters.cluster_statistics(points, labels, clusters)[0]\n", 271 | "\n", 272 | " @staticmethod\n", 273 | " def add_procs(procs_count):\n", 274 | " j = julia.Julia()\n", 275 | " j.eval('using Distributed')\n", 276 | " j.eval('addprocs(' + str(procs_count) + ')')\n", 277 | " j.eval('@everywhere using DPMMSubClusters')\n", 278 | " j.eval('@everywhere using LinearAlgebra')\n", 279 | " j.eval('@everywhere BLAS.set_num_threads(2)')\n", 280 | "\n", 281 | " @staticmethod\n", 282 | " def generate_gaussian_data(sample_count, dim, components, var):\n", 283 | " '''\n", 284 | " Wrapper for DPMMSubClusters cluster statistics\n", 285 | " :param sample_count: how much of samples\n", 286 | " :param dim: samples dimension\n", 287 | " :param components: number of components\n", 288 | " :param var: variance between componenets means\n", 289 | " :return: (data, gt)\n", 290 | " '''\n", 291 | " data = DPMMSubClusters.generate_gaussian_data(sample_count, dim, components, var)\n", 292 | " gt = data[1]\n", 293 | " data = data[0]\n", 294 | " return data, gt\n", 295 | "\n", 296 | " @staticmethod\n", 297 | " def generate_mnmm_data(sample_count, dim, components, trials):\n", 298 | " '''\n", 299 | " Wrapper for DPMMSubClusters cluster statistics\n", 300 | " :param sample_count: how much of samples\n", 301 | " :param dim: samples dimension\n", 302 | " :param components: number of components\n", 303 | " :param trials: draws from each vector\n", 304 | " :return: (data, gt)\n", 305 | " '''\n", 306 | " data = DPMMSubClusters.generate_mnmm_data(sample_count, dim, components, trials)\n", 307 | " gt = data[1]\n", 308 | " data = data[0]\n", 309 | " return data, gt" 310 | ] 311 | }, 312 | { 313 | "cell_type": "code", 314 | "execution_count": null, 315 | "metadata": {}, 316 | "outputs": [], 317 | "source": [ 318 | "from sklearn.decomposition import PCA\n", 319 | "\n", 320 | "def generate_gaussian_data(n_samples, d, k):\n", 321 | " if d > 4:\n", 322 | " return DPMMPython.generate_gaussian_data(n_samples, d, k, 0.1)\n", 323 | " else:\n", 324 | " return DPMMPython.generate_gaussian_data(n_samples, d, k, 100)\n", 325 | "\n", 326 | "def generate_mnmm_data(n_samples, d, k):\n", 327 | " print(f'start generate_mnmm_data: {datetime.now()}')\n", 328 | " return DPMMPython.generate_mnmm_data(n_samples, d, k, 50)\n", 329 | "\n", 330 | "def generate_mnist_data(n_samples, d, k):\n", 331 | " data = np.load('mnist_images.npy')\n", 332 | " pca = PCA(n_components=d)\n", 333 | " data = pca.fit(data).transform(data)\n", 334 | " data = data - data.mean(axis = 0)\n", 335 | " data = data / data.std(axis = 0)\n", 336 | " data = np.swapaxes(data, 0, 1)\n", 337 | " gt = np.load('mnist_labels.npy').flatten()\n", 338 | " return data, gt\n", 339 | "\n", 340 | "def generate_fashion_mnist_data(n_samples, d, k):\n", 341 | " data = np.load('fashion_mnist_images.npy')\n", 342 | " pca = PCA(n_components=d)\n", 343 | " data = pca.fit(data).transform(data)\n", 344 | " data = data - data.mean(axis = 0)\n", 345 | " data = data / data.std(axis = 0)\n", 346 | " data = np.swapaxes(data, 0, 1)\n", 347 | " gt = np.load('fashion_mnist_labels.npy')\n", 348 | " return data, gt\n", 349 | "\n", 350 | "def generate_imagenet64_data(n_samples, d, k):\n", 351 | " data = np.load('imagenet64_images.npy')\n", 352 | " data = np.swapaxes(data, 0, 1)\n", 353 | " gt = np.load('imagenet64_labels.npy')\n", 354 | " return data, gt\n", 355 | "\n", 356 | "def generate_20newsgroups20k_data(n_samples, d, k):\n", 357 | " data = np.load('20newsgroups20000_train.npy')\n", 358 | " data = np.swapaxes(data, 0, 1)\n", 359 | " gt = np.load('20newsgroups20000_labels.npy')\n", 360 | " return data, gt" 361 | ] 362 | }, 363 | { 364 | "cell_type": "code", 365 | "execution_count": null, 366 | "metadata": {}, 367 | "outputs": [], 368 | "source": [ 369 | "def run_test(n_samples, d, k, numIter = 10, max_iter = 100, model='',\n", 370 | " get_data=generate_gaussian_data, prior = None, prior_niw_if_none= True, alpha = 1, burnout = 15,\n", 371 | " force_kernel = 0, run_julia = True, run_cuda = True, run_sklearn = True):\n", 372 | " print(f'n_samples={n_samples}, d={d}, k={k}, numIter={numIter}, model={model}: {datetime.now()}')\n", 373 | " #Generate sample\n", 374 | " data, gt = get_data(n_samples, d, k)\n", 375 | " if prior == None:\n", 376 | " if prior_niw_if_none:\n", 377 | " prior = DPMMPython.create_niw_prior(d, 0, 1, 1, 1)\n", 378 | " else:\n", 379 | " prior = DPMMPython.create_mnmm_prior(1,d)\n", 380 | "\n", 381 | " df = pd.DataFrame()\n", 382 | " df.index.name = 'Iter'\n", 383 | "\n", 384 | " params_str = str(n_samples)+'_'+str(d)+'_'+str(k)\n", 385 | " for i in range(numIter):\n", 386 | " if run_julia:\n", 387 | " #Julia\n", 388 | " print(f'Julia...: {datetime.now()}')\n", 389 | " labels,_,more = DPMMPython.fit(data, alpha, iterations = max_iter, prior = prior, verbose = False, burnout = burnout, gt = gt, gpu=False)\n", 390 | " nmi_result = nmi(gt, labels)\n", 391 | " print(f'NMI:{nmi_result}')\n", 392 | " df['Time_elapse_'+params_str+'_Julia'+str(i)] = more[1]\n", 393 | " df['NMI_' + params_str + '_Julia' + str(i)] = nmi_result\n", 394 | "\n", 395 | " if run_cuda:\n", 396 | " #Cuda\n", 397 | " print(f'Cuda...: {datetime.now()}')\n", 398 | " labels,_,more = DPMMPython.fit(data, alpha, iterations = max_iter, prior = prior, verbose = True, burnout = burnout, gt = gt, gpu=True, force_kernel = force_kernel)\n", 399 | " nmi_result = nmi(gt, labels)\n", 400 | " print(f'NMI:{nmi_result}')\n", 401 | " df['NMI_'+params_str+'_Cuda'+str(i)] = nmi_result\n", 402 | " df['Time_elapse_'+params_str+'_Cuda' + str(i)] = more[1]\n", 403 | "\n", 404 | " if run_sklearn:\n", 405 | " # Sklearn GM\n", 406 | " print(f'Sklearn_GM......: {datetime.now()}')\n", 407 | " gm = GaussianMixture(n_components=k, random_state=0, max_iter=max_iter, verbose=0, verbose_interval=1000)\n", 408 | " tic = time()\n", 409 | " gm.fit(data.T)\n", 410 | " gmm_time = time() - tic\n", 411 | " labels_pred = gm.predict(data.T)\n", 412 | " nmi_result = nmi(gt, labels_pred)\n", 413 | " print(f'NMI:{nmi_result}')\n", 414 | " df['NMI_'+ params_str+'_Sklearn_GM' + str(i)] = nmi_result\n", 415 | " df['Time_elapse_'+params_str+'_Sklearn_GM' + str(i)] = gmm_time\n", 416 | "\n", 417 | " #Sklearn BGM\n", 418 | " print(f'Sklearn_BGM......: {datetime.now()}')\n", 419 | " if i > 1 and d > 64:\n", 420 | " print('Skip on this iteration... too slow')\n", 421 | " continue\n", 422 | " gm = BayesianGaussianMixture(n_components=k*5, random_state=0, max_iter=max_iter, verbose=0, verbose_interval=1000)\n", 423 | " tic = time()\n", 424 | " gm.fit(data.T)\n", 425 | " gmm_time = time() - tic\n", 426 | " labels_pred = gm.predict(data.T)\n", 427 | " nmi_result = nmi(gt, labels_pred)\n", 428 | " print(f'NMI:{nmi_result}')\n", 429 | " df['NMI_'+ params_str+'_Sklearn_BGM' + str(i)] = nmi_result\n", 430 | " df['Time_elapse_'+params_str+'_Sklearn_BGM' + str(i)] = gmm_time\n", 431 | "\n", 432 | " path = os.path.join('results','run_result_'+model+'_'+params_str+'.csv')\n", 433 | " df = df.reindex(sorted(df.columns), axis=1)\n", 434 | " df.to_csv(path)" 435 | ] 436 | }, 437 | { 438 | "cell_type": "code", 439 | "execution_count": null, 440 | "metadata": {}, 441 | "outputs": [], 442 | "source": [ 443 | "# run known datasets\n", 444 | "N = 60000\n", 445 | "D = 32\n", 446 | "K = 10\n", 447 | "repeats = 10\n", 448 | "max_iter = 100\n", 449 | "\n", 450 | "run_test(60000, D, K, repeats, max_iter = 300, model='mnist', get_data=generate_mnist_data, prior=DPMMPython.create_niw_prior(D, 0, 1, 1.46, 456.8))\n", 451 | "run_test(60000, D, K, repeats, max_iter = 200, model='fashion_mnist', get_data=generate_fashion_mnist_data, prior=DPMMPython.create_niw_prior(D, 0, 1, 1.46, 456.8))\n", 452 | "run_test(125000, 64, 100, repeats, max_iter = 200, model='imagenet64', get_data=generate_imagenet64_data, prior=DPMMPython.create_niw_prior(64, 0, 1, 0.177459, 720.139))\n", 453 | "run_test(11314, 20000, 20, repeats, max_iter = 100, model='20newsgroups10k', prior_niw_if_none=False, get_data=generate_20newsgroups20k_data, force_kernel=2, run_sklearn=False)\n", 454 | "print(f'Complete test: {datetime.now()}')\n" 455 | ] 456 | }, 457 | { 458 | "cell_type": "code", 459 | "execution_count": null, 460 | "metadata": { 461 | "pycharm": { 462 | "name": "#%%\n" 463 | } 464 | }, 465 | "outputs": [], 466 | "source": [ 467 | "# generate gaussian random data\n", 468 | "\n", 469 | "max_iter = 100\n", 470 | "repeats = 10\n", 471 | "\n", 472 | "for N in [1000,10000,100000,1000000]:\n", 473 | " for D in [2,4,8,16,32,64,128]:\n", 474 | " for K in [4,8,16,32]:\n", 475 | " run_test(N, D, K, repeats, max_iter = max_iter, model='generated_gaussian', get_data=generate_gaussian_data)" 476 | ] 477 | }, 478 | { 479 | "cell_type": "code", 480 | "execution_count": null, 481 | "metadata": {}, 482 | "outputs": [], 483 | "source": [ 484 | "# generate multinomial random data\n", 485 | "\n", 486 | "max_iter = 100\n", 487 | "repeats = 10\n", 488 | "for N in [1000,10000,100000,1000000]:\n", 489 | " for D in [4,8,16,32,64,128]:\n", 490 | " for K in [4,8,16,32]:\n", 491 | " if D >= K:\n", 492 | " run_test(N, D, K, repeats, max_iter = max_iter, model='generate_mnmm', prior_niw_if_none=False, get_data=generate_mnmm_data, run_sklearn=False)" 493 | ] 494 | }, 495 | { 496 | "cell_type": "code", 497 | "execution_count": null, 498 | "metadata": { 499 | "pycharm": { 500 | "name": "#%%\n" 501 | } 502 | }, 503 | "outputs": [], 504 | "source": [ 505 | "#Collect all csv files results to one file\n", 506 | "from os import listdir\n", 507 | "from os.path import isfile, join\n", 508 | "\n", 509 | "def calculate_nmi_mean(pd,str_to_search):\n", 510 | " filter_col = [col for col in pd if str_to_search in col and 'NMI' in col]\n", 511 | " sum_value = 0\n", 512 | " for i in range(len(filter_col)):\n", 513 | " sum_value += pd[filter_col[i]].iloc[-1]\n", 514 | " if len(filter_col) > 0:\n", 515 | " mean = sum_value/len(filter_col)\n", 516 | " else:\n", 517 | " mean = -1\n", 518 | " return mean\n", 519 | "\n", 520 | "def calculate_time_mean(pd,str_to_search):\n", 521 | " filter_col = [col for col in pd if str_to_search in col and 'Time_elapse' in col]\n", 522 | " sum_value = 0\n", 523 | " for i in range(len(filter_col)):\n", 524 | " if 'Sklearn' in str_to_search:\n", 525 | " sum_value += pd[filter_col[i]].iloc[-1]\n", 526 | " else:\n", 527 | " sum_value += pd[filter_col[i]].sum(axis=0)\n", 528 | " if len(filter_col) > 0:\n", 529 | " mean = sum_value/len(filter_col)\n", 530 | " else:\n", 531 | " mean = -1\n", 532 | " return mean\n", 533 | "\n", 534 | "def create_all_result_file(result_type, calculate_mean, model):\n", 535 | " columns_list = ['Params','Cuda','Julia','Sklearn_GM','Sklearn_BGM', 'X','D','K']\n", 536 | " df_all = pd.DataFrame(columns=columns_list)\n", 537 | "\n", 538 | " files = [f for f in listdir('results') if isfile(join('results', f))]\n", 539 | "\n", 540 | " for file in files:\n", 541 | " if 'run_result_'+model+'_' in file:\n", 542 | " pd_file = pd.read_csv(os.path.join('results',file),index_col=0)\n", 543 | " params = file.replace('.csv','').replace('run_result_'+model+'_','')\n", 544 | " params_list = params.split('_')\n", 545 | " new_row = pd.DataFrame([[params,\n", 546 | " calculate_mean(pd_file,'Cuda'),\n", 547 | " calculate_mean(pd_file,'Julia'),\n", 548 | " calculate_mean(pd_file,'Sklearn_GM'),\n", 549 | " calculate_mean(pd_file,'Sklearn_BGM'),\n", 550 | " params_list[0],\n", 551 | " params_list[1],\n", 552 | " params_list[2]\n", 553 | " ]], columns=columns_list)\n", 554 | " df_all = df_all.append(new_row, ignore_index=True)\n", 555 | " \n", 556 | " df_all = df_all.astype({'X': int, 'D': int, 'K': int})\n", 557 | " df_all = df_all.sort_values(by=['X','D','K'])\n", 558 | " path = os.path.join('results','run_result_all_'+model+'_'+result_type+'_table.csv')\n", 559 | " df_all.to_csv(path, index=False)\n", 560 | "\n", 561 | "create_all_result_file('NMI', calculate_nmi_mean, 'mnist')\n", 562 | "create_all_result_file('time', calculate_time_mean, 'mnist')\n", 563 | "create_all_result_file('NMI', calculate_nmi_mean, 'fashion_mnist')\n", 564 | "create_all_result_file('time', calculate_time_mean, 'fashion_mnist')\n", 565 | "create_all_result_file('NMI', calculate_nmi_mean, 'imagenet64')\n", 566 | "create_all_result_file('time', calculate_time_mean, 'imagenet64')\n", 567 | "create_all_result_file('NMI', calculate_nmi_mean, 'generated_gaussian')\n", 568 | "create_all_result_file('time', calculate_time_mean, 'generated_gaussian')\n", 569 | "create_all_result_file('NMI', calculate_nmi_mean, 'generate_mnmm')\n", 570 | "create_all_result_file('time', calculate_time_mean, 'generate_mnmm')\n", 571 | "create_all_result_file('NMI', calculate_nmi_mean, '20newsgroups10k')\n", 572 | "create_all_result_file('time', calculate_time_mean, '20newsgroups10k')" 573 | ] 574 | } 575 | ], 576 | "metadata": { 577 | "celltoolbar": "Raw Cell Format", 578 | "interpreter": { 579 | "hash": "34b414f817d4cb14ae50d2374de9fe6611a9d65cd3430da6f5e12d03a6418fd7" 580 | }, 581 | "kernelspec": { 582 | "display_name": "Python 3", 583 | "language": "python", 584 | "name": "python3" 585 | }, 586 | "language_info": { 587 | "codemirror_mode": { 588 | "name": "ipython", 589 | "version": 3 590 | }, 591 | "file_extension": ".py", 592 | "mimetype": "text/x-python", 593 | "name": "python", 594 | "nbconvert_exporter": "python", 595 | "pygments_lexer": "ipython3", 596 | "version": "3.8.11" 597 | } 598 | }, 599 | "nbformat": 4, 600 | "nbformat_minor": 2 601 | } -------------------------------------------------------------------------------- /dpmmpython/dpmmwrapper.py: -------------------------------------------------------------------------------- 1 | import julia 2 | julia.install() 3 | from dpmmpython.priors import niw, multinomial 4 | from julia import DPMMSubClusters 5 | import numpy as np 6 | import platform 7 | import subprocess 8 | import json 9 | import os 10 | 11 | 12 | class DPMMPython: 13 | """ 14 | Wrapper for the DPMMSubCluster Julia package 15 | """ 16 | 17 | @staticmethod 18 | def create_prior(dim,mean_prior,mean_str,cov_prior,cov_str): 19 | """ 20 | Creates a gaussian prior, if cov_prior is a scalar, then creates an isotropic prior scaled to that, if its a matrix 21 | uses it as covariance 22 | :param dim: data dimension 23 | :param mean_prior: if a scalar, will create a vector scaled to that, if its a vector then use it as the prior mean 24 | :param mean_str: prior mean psuedo count 25 | :param cov_prior: if a scalar, will create an isotropic covariance scaled to cov_prior, if a matrix will use it as 26 | the covariance. 27 | :param cov_str: prior covariance psuedo counts 28 | :return: DPMMSubClusters.niw_hyperparams prior 29 | """ 30 | if isinstance(mean_prior,(int,float)): 31 | prior_mean = np.ones(dim) * mean_prior 32 | else: 33 | prior_mean = mean_prior 34 | 35 | if isinstance(cov_prior, (int, float)): 36 | prior_covariance = np.eye(dim) * cov_prior 37 | else: 38 | prior_covariance = cov_prior 39 | prior =niw(mean_str,prior_mean,dim + cov_str, prior_covariance) 40 | return prior 41 | 42 | @staticmethod 43 | def fit(data,alpha, prior = None, 44 | iterations= 100, verbose = False, 45 | burnout=15, gt=None, outlier_weight=0, outlier_params=None,smart_splits=False, gpu=False, force_kernel = 2): 46 | """ 47 | Wrapper for DPMMSubClusters fit, reffer to "https://bgu-cs-vil.github.io/DPMMSubClusters.jl/stable/usage/" for specification 48 | Note that directly working with the returned clusters can be problematic software displaying the workspace (such as PyCharm debugger). 49 | :return: labels, clusters, sublabels 50 | """ 51 | if gpu == True: 52 | np.save("modelData.npy", np.swapaxes(data, 0, 1)) 53 | modelParams = {'alpha': alpha, 54 | 'iterations': iterations, 55 | 'use_verbose': verbose, 56 | 'burnout_period': burnout, 57 | 'force_kernel': force_kernel, 58 | 'outlier_mod': outlier_weight, 59 | 'outlier_hyper_params': outlier_params, 60 | 'hyper_params': prior.to_JSON() 61 | } 62 | if gt is not None: 63 | modelParams['gt'] = gt.tolist() 64 | with open('modelParams.json', 'w') as f: 65 | json.dump(modelParams, f) 66 | if platform.system().startswith('Windows'): 67 | FULL_PATH_TO_PACKAGE_IN_WINDOWS = os.environ.get('DPMM_GPU_FULL_PATH_TO_PACKAGE_IN_WINDOWS') 68 | process = subprocess.Popen([FULL_PATH_TO_PACKAGE_IN_WINDOWS, 69 | "--prior_type=" + prior.get_type(), "--model_path=modelData.npy", 70 | "--params_path=modelParams.json", "--result_path=result.json"]) 71 | elif platform.system().startswith("Linux"): 72 | FULL_PATH_TO_PACKAGE_IN_LINUX = os.environ.get('DPMM_GPU_FULL_PATH_TO_PACKAGE_IN_LINUX') 73 | process = subprocess.Popen( 74 | [FULL_PATH_TO_PACKAGE_IN_LINUX, 75 | "--prior_type=" + prior.get_type(), "--model_path=modelData.npy", "--params_path=modelParams.json", 76 | "--result_path=result.json"]) 77 | else: 78 | print(f'Not support {platform.system()} OS') 79 | out, err = process.communicate() 80 | errcode = process.returncode 81 | process.kill() 82 | process.terminate() 83 | with open('result.json') as f: 84 | results_json = json.load(f) 85 | if "error" in results_json: 86 | print(f'Error:{results_json["error"]}') 87 | return [], [] 88 | os.remove("result.json") 89 | return results_json["labels"], None, [results_json["weights"], results_json["iter_count"]] 90 | else: 91 | if prior == None: 92 | results = DPMMSubClusters.fit(data, alpha, iters=iterations, 93 | verbose=verbose, burnout=burnout, 94 | gt=gt, outlier_weight=outlier_weight, 95 | outlier_params=outlier_params, 96 | smart_splits=smart_splits) 97 | else: 98 | results = DPMMSubClusters.fit(data, prior.to_julia_prior(), alpha, iters=iterations, 99 | verbose=verbose, burnout=burnout, 100 | gt=gt, outlier_weight=outlier_weight, 101 | outlier_params=outlier_params, 102 | smart_splits=smart_splits) 103 | return results[0],results[1],results[2:] 104 | 105 | @staticmethod 106 | def get_model_ll(points,labels,clusters): 107 | """ 108 | Wrapper for DPMMSubClusters cluster statistics 109 | :param points: data 110 | :param labels: labels 111 | :param clusters: vector of clusters distributions 112 | :return: vector with each cluster avg ll 113 | """ 114 | return DPMMSubClusters.cluster_statistics(points,labels,clusters)[0] 115 | 116 | @staticmethod 117 | def add_procs(procs_count): 118 | j = julia.Julia() 119 | j.eval('using Distributed') 120 | j.eval('addprocs(' + str(procs_count) + ')') 121 | j.eval('@everywhere using DPMMSubClusters') 122 | j.eval('@everywhere using LinearAlgebra') 123 | j.eval('@everywhere BLAS.set_num_threads(2)') 124 | 125 | 126 | @staticmethod 127 | def generate_gaussian_data(sample_count,dim,components,var): 128 | ''' 129 | Wrapper for DPMMSubClusters cluster statistics 130 | :param sample_count: how much of samples 131 | :param dim: samples dimension 132 | :param components: number of components 133 | :param var: variance between componenets means 134 | :return: (data, gt) 135 | ''' 136 | data = DPMMSubClusters.generate_gaussian_data(sample_count, dim, components, var) 137 | gt = data[1] 138 | data = data[0] 139 | return data,gt 140 | 141 | @staticmethod 142 | def predict(model,data): 143 | ''' 144 | Given a DPMM Model (which is located in fit(...)[2][-1] for backwards compatibility), 145 | predict the clusters for a data. The predict is using each cluster predictive posterior, 146 | in contrary to the model itself during training, which sample from the posterior. 147 | :params model: a DPMM (Julia object) model, returned from fit 148 | :data: The data in which to predict, DxN (similar to the fit argument) 149 | :return: labels 150 | ''' 151 | return DPMMSubClusters.predict(model,data) 152 | 153 | 154 | if __name__ == "__main__": 155 | j = julia.Julia() 156 | data,gt = DPMMPython.generate_gaussian_data(10000, 2, 10, 100.0) 157 | prior = niw(kappa = 1, mu = np.ones(2)*0, nu = 3, psi = np.eye(2)) 158 | # labels_j,_,sub_labels= DPMMPython.fit(data, 100, prior = prior, verbose = True, gt = gt, gpu = False) 159 | labels_j,_,sub_labels = DPMMPython.fit(data, 100, prior = prior, verbose = True, gt = gt, gpu = True) 160 | -------------------------------------------------------------------------------- /dpmmpython/dpmmwrapper.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BGU-CS-VIL/dpmmpython/f1cb9eac6f0bed45933cc5cad515890152d9d7c2/dpmmpython/dpmmwrapper.pyc -------------------------------------------------------------------------------- /dpmmpython/install.py: -------------------------------------------------------------------------------- 1 | import julia 2 | import os 3 | import sys 4 | import wget 5 | import tarfile 6 | from julia.api import Julia 7 | 8 | 9 | 10 | def get_julia_path_from_dir(base_dir): 11 | dir_content = os.listdir(base_dir) 12 | julia_path = base_dir 13 | for item in dir_content: 14 | if os.path.isdir(os.path.join(julia_path,item)): 15 | julia_path = os.path.join(julia_path,item) 16 | break 17 | 18 | return os.path.join(julia_path,'bin','julia'),os.path.join(julia_path,'bin') 19 | 20 | 21 | 22 | def install(julia_download_path = 'https://julialang-s3.julialang.org/bin/linux/x64/1.4/julia-1.4.0-linux-x86_64.tar.gz', julia_target_path = None): 23 | ''' 24 | :param julia_download_path: Path for the julia download, you can modify for your preferred version 25 | :param julia_target_path: Specify where to install Julia, if not specified will install in $HOME$/julia 26 | ''' 27 | if julia_target_path == None: 28 | julia_target_path = os.path.join(os.path.expanduser("~"),'julia') 29 | if not os.path.isdir(julia_target_path): 30 | os.mkdir(julia_target_path) 31 | download_path = os.path.join(julia_target_path,'julia_install.tar.gz') 32 | print("Downloading Julia:") 33 | wget.download(julia_download_path, download_path) 34 | print("\nExtracting...") 35 | tar = tarfile.open(download_path,"r:gz") 36 | tar.extractall(julia_target_path) 37 | _, partial_path = get_julia_path_from_dir(julia_target_path) 38 | os.environ["PATH"] += os.pathsep + partial_path 39 | os.system("echo '# added by dpmmpython' >> ~/.bashrc") 40 | os.system("echo 'export PATH=\""+partial_path+":$PATH\"' >> ~/.bashrc") 41 | print("Configuring PyJulia") 42 | julia.install() 43 | julia.Julia(compiled_modules=False) 44 | print("Adding DPMMSubClusters package") 45 | from julia import Pkg 46 | Pkg.add("DPMMSubClusters") 47 | print("Please exit the shell and restart, before attempting to use the package") 48 | 49 | 50 | if __name__ == "__main__": 51 | install() -------------------------------------------------------------------------------- /dpmmpython/priors.py: -------------------------------------------------------------------------------- 1 | import julia 2 | from julia import DPMMSubClusters 3 | import numpy as np 4 | 5 | class prior: 6 | def to_julia_prior(self): 7 | pass 8 | 9 | def get_type(self): 10 | pass 11 | def to_JSON(self): 12 | pass 13 | class niw(prior): 14 | def __init__(self, kappa, mu, nu, psi): 15 | if nu < len(mu): 16 | raise Exception('nu should be atleast the Dim') 17 | self.kappa = kappa 18 | self.mu = mu 19 | self.nu = nu 20 | self.psi = psi 21 | 22 | 23 | def to_julia_prior(self): 24 | return DPMMSubClusters.niw_hyperparams(self.kappa,self.mu,self.nu, self.psi) 25 | 26 | def get_type(self): 27 | return 'Gaussian' 28 | 29 | def to_JSON(self): 30 | j = {'k': self.kappa, 31 | 'm': self.mu.tolist(), 32 | 'v': self.nu, 33 | 'psi': self.psi.tolist() 34 | } 35 | return j 36 | class multinomial(prior): 37 | def __init__(self, alpha,dim = 1): 38 | if isinstance(alpha,np.ndarray): 39 | self.alpha = alpha 40 | else: 41 | self.alpha = np.ones(dim)*alpha 42 | 43 | 44 | 45 | def to_julia_prior(self): 46 | return DPMMSubClusters.multinomial_hyper(self.alpha) 47 | def get_type(self): 48 | return 'Multinomial' 49 | def to_JSON(self): 50 | j = {'alpha': self.alpha.tolist() 51 | } 52 | return j -------------------------------------------------------------------------------- /dpmmpython/release.py: -------------------------------------------------------------------------------- 1 | # This file is executed via setup.py and imported via __init__.py 2 | 3 | __version__ = "0.1.10" 4 | # For Python versioning scheme, see: 5 | # https://www.python.org/dev/peps/pep-0440/#version-scheme -------------------------------------------------------------------------------- /dpmmpython/replication.py: -------------------------------------------------------------------------------- 1 | import os 2 | import platform 3 | import julia 4 | julia.install() 5 | from julia.api import Julia 6 | from dpmmpython.priors import niw, multinomial 7 | from julia import DPMMSubClusters 8 | import numpy as np 9 | import subprocess 10 | import json 11 | import pandas as pd 12 | from sklearn.mixture import GaussianMixture 13 | from sklearn.mixture import BayesianGaussianMixture 14 | from sklearn.metrics import normalized_mutual_info_score as nmi 15 | from time import time 16 | from datetime import datetime 17 | from sklearn.decomposition import PCA 18 | from os import listdir 19 | from os.path import isfile, join 20 | import tensorflow as tf 21 | from sklearn.datasets import fetch_20newsgroups_vectorized 22 | import gdown 23 | 24 | # Set the full path for the PMMSubClusters.exe in windows 25 | FULL_PATH_TO_PACKAGE_IN_WINDOWS = os.environ.get('DPMM_GPU_FULL_PATH_TO_PACKAGE_IN_WINDOWS') 26 | 27 | # Set the full path for the PMMSubClusters in Linux 28 | FULL_PATH_TO_PACKAGE_IN_LINUX = os.environ.get('DPMM_GPU_FULL_PATH_TO_PACKAGE_IN_LINUX') 29 | 30 | IS_SHORT = True 31 | 32 | class prior: 33 | def to_julia_prior(self): 34 | pass 35 | 36 | def get_type(self): 37 | pass 38 | 39 | def to_JSON(self): 40 | pass 41 | 42 | 43 | class niw(prior): 44 | def __init__(self, kappa, mu, nu, psi): 45 | if nu < len(mu): 46 | raise Exception('nu should be atleast the Dim') 47 | self.kappa = kappa 48 | self.mu = mu 49 | self.nu = nu 50 | self.psi = psi 51 | 52 | def to_julia_prior(self): 53 | return DPMMSubClusters.niw_hyperparams(self.kappa, self.mu, self.nu, self.psi) 54 | 55 | def get_type(self): 56 | return 'Gaussian' 57 | 58 | def to_JSON(self): 59 | j = {'k': self.kappa, 60 | 'm': self.mu.tolist(), 61 | 'v': self.nu, 62 | 'psi': self.psi.tolist() 63 | } 64 | 65 | return j 66 | 67 | 68 | class multinomial(prior): 69 | def __init__(self, alpha, dim=1): 70 | if isinstance(alpha, np.ndarray): 71 | self.alpha = alpha 72 | else: 73 | self.alpha = np.ones(dim) * alpha 74 | 75 | def to_julia_prior(self): 76 | return DPMMSubClusters.multinomial_hyper(self.alpha) 77 | 78 | def get_type(self): 79 | return 'Multinomial' 80 | 81 | def to_JSON(self): 82 | j = {'alpha': self.alpha.tolist() 83 | } 84 | 85 | return j 86 | 87 | 88 | class DPMMPython: 89 | """ 90 | Wrapper for the DPMMSubCluster Julia package 91 | """ 92 | 93 | @staticmethod 94 | def create_niw_prior(dim, cov_prior, cov_str, mean_prior = 0 , kappa = 1): 95 | """ 96 | Creates a gaussian prior, if cov_prior is a scalar, then creates an isotropic prior scaled to that, if its a matrix 97 | uses it as covariance 98 | :param dim: data dimension 99 | :param mean_prior: if a scalar, will create a vector scaled to that, if its a vector then use it as the prior mean 100 | :param kappa: prior mean psuedo count 101 | :param cov_prior: if a scalar, will create an isotropic covariance scaled to cov_prior, if a matrix will use it as 102 | the covariance. 103 | :param cov_str: prior covariance psuedo counts 104 | :return: DPMMSubClusters.niw_hyperparams prior 105 | """ 106 | if isinstance(mean_prior, (int, float)): 107 | prior_mean = np.ones(dim) * mean_prior 108 | else: 109 | prior_mean = mean_prior 110 | 111 | if isinstance(cov_prior, (int, float)): 112 | prior_covariance = np.eye(dim) * cov_prior 113 | else: 114 | prior_covariance = cov_prior 115 | prior = niw(kappa, prior_mean, dim + cov_str, prior_covariance) 116 | return prior 117 | 118 | @staticmethod 119 | def create_mnmm_prior(alpha, dim): 120 | prior = multinomial(alpha, dim) 121 | return prior 122 | 123 | @staticmethod 124 | def fit(data, alpha, prior=None, 125 | iterations=100, verbose=False, 126 | burnout=15, gt=None, outlier_weight=0, outlier_params=None, gpu=True, force_kernel=2): 127 | """ 128 | Wrapper for DPMMSubClusters fit, reffer to "https://bgu-cs-vil.github.io/DPMMSubClusters.jl/stable/usage/" for specification 129 | Note that directly working with the returned clusters can be problematic software displaying the workspace (such as PyCharm debugger). 130 | :return: labels, clusters, sublabels 131 | """ 132 | if gpu == True: 133 | np.save("modelData.npy", np.swapaxes(data, 0, 1)) 134 | 135 | modelParams = {'alpha': alpha, 136 | 'iterations': iterations, 137 | 'use_verbose': verbose, 138 | 'burnout_period': burnout, 139 | 'force_kernel': force_kernel, 140 | 'outlier_mod': outlier_weight, 141 | 'outlier_hyper_params': outlier_params, 142 | 'hyper_params': prior.to_JSON() 143 | } 144 | if gt is not None: 145 | modelParams['gt'] = gt.tolist() 146 | 147 | with open('modelParams.json', 'w') as f: 148 | json.dump(modelParams, f) 149 | 150 | if platform.system().startswith('Windows'): 151 | process = subprocess.Popen([FULL_PATH_TO_PACKAGE_IN_WINDOWS, 152 | "--prior_type=" + prior.get_type(), "--model_path=modelData.npy", 153 | "--params_path=modelParams.json", "--result_path=result.json"]) 154 | elif platform.system().startswith("Linux"): 155 | process = subprocess.Popen( 156 | [FULL_PATH_TO_PACKAGE_IN_LINUX, 157 | "--prior_type=" + prior.get_type(), "--model_path=modelData.npy", "--params_path=modelParams.json", 158 | "--result_path=result.json"]) 159 | else: 160 | print(f'Not support {platform.system()} OS') 161 | 162 | out, err = process.communicate() 163 | errcode = process.returncode 164 | 165 | process.kill() 166 | process.terminate() 167 | 168 | with open('result.json') as f: 169 | results_json = json.load(f) 170 | 171 | if "error" in results_json: 172 | print(f'Error:{results_json["error"]}') 173 | return [], [] 174 | 175 | os.remove("result.json") 176 | return results_json["labels"], None, [results_json["weights"], results_json["iter_count"]] 177 | 178 | else: 179 | if prior == None: 180 | results = DPMMSubClusters.fit(data, alpha, iters=iterations, 181 | verbose=verbose, burnout=burnout, 182 | gt=gt, outlier_weight=outlier_weight, 183 | outlier_params=outlier_params) 184 | else: 185 | results = DPMMSubClusters.fit(data, prior.to_julia_prior(), alpha, iters=iterations, 186 | verbose=verbose, burnout=burnout, 187 | gt=gt, outlier_weight=outlier_weight, 188 | outlier_params=outlier_params) 189 | return results[0], results[1], results[2:] 190 | 191 | @staticmethod 192 | def get_model_ll(points, labels, clusters): 193 | """ 194 | Wrapper for DPMMSubClusters cluster statistics 195 | :param points: data 196 | :param labels: labels 197 | :param clusters: vector of clusters distributions 198 | :return: vector with each cluster avg ll 199 | """ 200 | return DPMMSubClusters.cluster_statistics(points, labels, clusters)[0] 201 | 202 | @staticmethod 203 | def add_procs(procs_count): 204 | j = julia.Julia() 205 | j.eval('using Distributed') 206 | j.eval('addprocs(' + str(procs_count) + ')') 207 | j.eval('@everywhere using DPMMSubClusters') 208 | j.eval('@everywhere using LinearAlgebra') 209 | j.eval('@everywhere BLAS.set_num_threads(2)') 210 | 211 | @staticmethod 212 | def generate_gaussian_data(sample_count, dim, components, var): 213 | ''' 214 | Wrapper for DPMMSubClusters cluster statistics 215 | :param sample_count: how much of samples 216 | :param dim: samples dimension 217 | :param components: number of components 218 | :param var: variance between componenets means 219 | :return: (data, gt) 220 | ''' 221 | data = DPMMSubClusters.generate_gaussian_data(sample_count, dim, components, var) 222 | gt = data[1] 223 | data = data[0] 224 | return data, gt 225 | 226 | @staticmethod 227 | def generate_mnmm_data(sample_count, dim, components, trials): 228 | ''' 229 | Wrapper for DPMMSubClusters cluster statistics 230 | :param sample_count: how much of samples 231 | :param dim: samples dimension 232 | :param components: number of components 233 | :param trials: draws from each vector 234 | :return: (data, gt) 235 | ''' 236 | data = DPMMSubClusters.generate_mnmm_data(sample_count, dim, components, trials) 237 | gt = data[1] 238 | data = data[0] 239 | return data, gt 240 | 241 | def generate_gaussian_data(n_samples, d, k): 242 | if d > 4: 243 | return DPMMPython.generate_gaussian_data(n_samples, d, k, 0.1) 244 | else: 245 | return DPMMPython.generate_gaussian_data(n_samples, d, k, 100) 246 | 247 | 248 | def generate_mnmm_data(n_samples, d, k): 249 | print(f'start generate_mnmm_data: {datetime.now()}') 250 | return DPMMPython.generate_mnmm_data(n_samples, d, k, 50) 251 | 252 | 253 | def generate_mnist_data(n_samples, d, k): 254 | data = np.load('mnist_images.npy') 255 | pca = PCA(n_components=d) 256 | data = pca.fit(data).transform(data) 257 | data = data - data.mean(axis=0) 258 | data = data / data.std(axis=0) 259 | data = np.swapaxes(data, 0, 1) 260 | gt = np.load('mnist_labels.npy').flatten() 261 | return data, gt 262 | 263 | 264 | def generate_fashion_mnist_data(n_samples, d, k): 265 | data = np.load('fashion_mnist_images.npy') 266 | pca = PCA(n_components=d) 267 | data = pca.fit(data).transform(data) 268 | data = data - data.mean(axis=0) 269 | data = data / data.std(axis=0) 270 | data = np.swapaxes(data, 0, 1) 271 | gt = np.load('fashion_mnist_labels.npy') 272 | return data, gt 273 | 274 | 275 | def generate_imagenet64_data(n_samples, d, k): 276 | data = np.load('imagenet64_images.npy') 277 | data = np.swapaxes(data, 0, 1) 278 | gt = np.load('imagenet64_labels.npy') 279 | return data, gt 280 | 281 | 282 | def generate_20newsgroups20k_data(n_samples, d, k): 283 | data = np.load('20newsgroups20000_train.npy') 284 | data = np.swapaxes(data, 0, 1) 285 | gt = np.load('20newsgroups20000_labels.npy') 286 | return data, gt 287 | 288 | 289 | def run_test(n_samples, d, k, numIter=10, max_iter=100, model='', 290 | get_data=generate_gaussian_data, prior=None, prior_niw_if_none=True, alpha=1, burnout=15, 291 | force_kernel=0, run_julia=True, run_cuda=True, run_sklearn=True): 292 | print(f'n_samples={n_samples}, d={d}, k={k}, numIter={numIter}, model={model}: {datetime.now()}') 293 | # Generate sample 294 | data, gt = get_data(n_samples, d, k) 295 | if prior == None: 296 | if prior_niw_if_none: 297 | prior = DPMMPython.create_niw_prior(d, 1, 1, 0, 1) 298 | else: 299 | prior = DPMMPython.create_mnmm_prior(1, d) 300 | 301 | df = pd.DataFrame() 302 | df.index.name = 'Iter' 303 | 304 | params_str = str(n_samples) + '_' + str(d) + '_' + str(k) 305 | for i in range(numIter): 306 | if run_julia: 307 | # Julia 308 | print(f'Julia...: {datetime.now()}') 309 | labels, _, more = DPMMPython.fit(data, alpha, iterations=max_iter, prior=prior, verbose=False, 310 | burnout=burnout, gt=gt, gpu=False) 311 | nmi_result = nmi(gt, labels) 312 | print(f'NMI:{nmi_result}') 313 | df['Time_elapse_' + params_str + '_Julia' + str(i)] = more[1] 314 | df['NMI_' + params_str + '_Julia' + str(i)] = nmi_result 315 | 316 | if run_cuda: 317 | # Cuda 318 | print(f'Cuda...: {datetime.now()}') 319 | labels, _, more = DPMMPython.fit(data, alpha, iterations=max_iter, prior=prior, verbose=True, 320 | burnout=burnout, gt=gt, gpu=True, force_kernel=force_kernel) 321 | nmi_result = nmi(gt, labels) 322 | print(f'NMI:{nmi_result}') 323 | df['NMI_' + params_str + '_Cuda' + str(i)] = nmi_result 324 | df['Time_elapse_' + params_str + '_Cuda' + str(i)] = more[1] 325 | 326 | if run_sklearn: 327 | # Sklearn GM 328 | print(f'Sklearn_GM......: {datetime.now()}') 329 | gm = GaussianMixture(n_components=k, random_state=0, max_iter=max_iter, verbose=0, verbose_interval=1000) 330 | tic = time() 331 | gm.fit(data.T) 332 | gmm_time = time() - tic 333 | labels_pred = gm.predict(data.T) 334 | nmi_result = nmi(gt, labels_pred) 335 | print(f'NMI:{nmi_result}') 336 | df['NMI_' + params_str + '_Sklearn_GM' + str(i)] = nmi_result 337 | df['Time_elapse_' + params_str + '_Sklearn_GM' + str(i)] = gmm_time 338 | 339 | # Sklearn BGM 340 | print(f'Sklearn_BGM......: {datetime.now()}') 341 | if i > 1 and d > 64: 342 | print('Skip on this iteration... too slow') 343 | continue 344 | gm = BayesianGaussianMixture(n_components=k * 5, random_state=0, max_iter=max_iter, verbose=0, 345 | verbose_interval=1000) 346 | tic = time() 347 | gm.fit(data.T) 348 | gmm_time = time() - tic 349 | labels_pred = gm.predict(data.T) 350 | nmi_result = nmi(gt, labels_pred) 351 | print(f'NMI:{nmi_result}') 352 | df['NMI_' + params_str + '_Sklearn_BGM' + str(i)] = nmi_result 353 | df['Time_elapse_' + params_str + '_Sklearn_BGM' + str(i)] = gmm_time 354 | 355 | path = os.path.join('results', 'run_result_' + model + '_' + params_str + '.csv') 356 | df = df.reindex(sorted(df.columns), axis=1) 357 | df.to_csv(path) 358 | 359 | def calculate_nmi_mean(pd, str_to_search): 360 | filter_col = [col for col in pd if str_to_search in col and 'NMI' in col] 361 | sum_value = 0 362 | for i in range(len(filter_col)): 363 | sum_value += pd[filter_col[i]].iloc[-1] 364 | if len(filter_col) > 0: 365 | mean = sum_value / len(filter_col) 366 | else: 367 | mean = -1 368 | return mean 369 | 370 | 371 | def calculate_time_mean(pd, str_to_search): 372 | filter_col = [col for col in pd if str_to_search in col and 'Time_elapse' in col] 373 | sum_value = 0 374 | for i in range(len(filter_col)): 375 | if 'Sklearn' in str_to_search: 376 | sum_value += pd[filter_col[i]].iloc[-1] 377 | else: 378 | sum_value += pd[filter_col[i]].sum(axis=0) 379 | if len(filter_col) > 0: 380 | mean = sum_value / len(filter_col) 381 | else: 382 | mean = -1 383 | return mean 384 | 385 | 386 | def create_all_result_file(result_type, calculate_mean, model): 387 | columns_list = ['Params', 'Cuda', 'Julia', 'Sklearn_GM', 'Sklearn_BGM', 'X', 'D', 'K'] 388 | df_all = pd.DataFrame(columns=columns_list) 389 | 390 | files = [f for f in listdir('results') if isfile(join('results', f))] 391 | 392 | for file in files: 393 | if 'run_result_' + model + '_' in file: 394 | pd_file = pd.read_csv(os.path.join('results', file), index_col=0) 395 | params = file.replace('.csv', '').replace('run_result_' + model + '_', '') 396 | params_list = params.split('_') 397 | new_row = pd.DataFrame([[params, 398 | calculate_mean(pd_file, 'Cuda'), 399 | calculate_mean(pd_file, 'Julia'), 400 | calculate_mean(pd_file, 'Sklearn_GM'), 401 | calculate_mean(pd_file, 'Sklearn_BGM'), 402 | params_list[0], 403 | params_list[1], 404 | params_list[2] 405 | ]], columns=columns_list) 406 | df_all = df_all.append(new_row, ignore_index=True) 407 | 408 | df_all = df_all.astype({'X': int, 'D': int, 'K': int}) 409 | df_all = df_all.sort_values(by=['X', 'D', 'K']) 410 | path = os.path.join('results', 'run_result_all_' + model + '_' + result_type + '_table.csv') 411 | df_all.to_csv(path, index=False) 412 | 413 | def buildDB(): 414 | mnist = tf.keras.datasets.mnist 415 | (train_images, train_labels), (test_images, test_labels) = mnist.load_data() 416 | train_images = train_images.reshape(60000, 784) 417 | np.save("mnist_images.npy",train_images) 418 | np.save("mnist_labels.npy",train_labels) 419 | 420 | if not IS_SHORT: 421 | url = "https://drive.google.com/uc?id=1_FgNQ5v9UnMSTbGduJjvue0a2p-EK2JZ" 422 | output = "imagenet_short.csv" 423 | gdown.download(url, output=output, quiet=False) 424 | 425 | fashion_mnist = tf.keras.datasets.fashion_mnist 426 | (train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data() 427 | train_images = train_images.reshape(60000, 784) 428 | np.save("fashion_mnist_images.npy",train_images) 429 | np.save("fashion_mnist_labels.npy",train_labels) 430 | 431 | cifar10 = tf.keras.datasets.cifar10 432 | (train_images, train_labels), (test_images, test_labels) = cifar10.load_data() 433 | train_images = train_images.reshape(50000, 32*32*3) 434 | np.save("cifar10_images.npy",train_images) 435 | np.save("cifar10_labels.npy",train_labels) 436 | 437 | from numpy import genfromtxt 438 | data = genfromtxt('imagenet_short.csv', delimiter=',') 439 | np.save("imagenet64_images.npy",data[:,0:64]) 440 | np.save("imagenet64_labels.npy",data[:,-1]) 441 | print(data[:,-1]) 442 | 443 | data, labels = fetch_20newsgroups_vectorized(subset='train',return_X_y=True,normalize=False) 444 | 445 | D = 20000 446 | print(data.shape) 447 | sum_row = data.sum(axis=0) 448 | sorted_sum_row = np.argsort(sum_row, axis=1)[0,::-1] 449 | indices = np.squeeze(np.asarray(sorted_sum_row[0,:D])) 450 | data_array = data[:,indices].toarray() 451 | np.save("20newsgroups"+str(D)+"_train.npy",data_array) 452 | np.save("20newsgroups"+str(D)+"_labels.npy",labels+1) 453 | 454 | 455 | def run(is_short): 456 | global IS_SHORT 457 | IS_SHORT = is_short 458 | 459 | buildDB() 460 | if platform.system().startswith('Windows'): 461 | if FULL_PATH_TO_PACKAGE_IN_WINDOWS == None: 462 | print( 463 | 'Missing path for windows package. For example: FULL_PATH_TO_PACKAGE_IN_WINDOWS = "C:/DPMMSubClusters.exe"') 464 | assert (False) 465 | elif platform.system().startswith("Linux"): 466 | if FULL_PATH_TO_PACKAGE_IN_LINUX == None: 467 | print( 468 | 'Missing path for linux package. For example: FULL_PATH_TO_PACKAGE_IN_LINUX = "/home/user/bin/DPMMSubClusters"') 469 | assert (False) 470 | 471 | jl = Julia(compiled_modules=False) 472 | 473 | if not os.path.exists('results'): 474 | os.makedirs('results') 475 | 476 | # run known datasets 477 | D = 32 478 | K = 10 479 | if IS_SHORT: 480 | repeats = 1 481 | else: 482 | repeats = 10 483 | 484 | run_test(60000, D, K, repeats, max_iter=300, model='mnist', get_data=generate_mnist_data, 485 | prior=DPMMPython.create_niw_prior(D, 1.46, 456.8)) 486 | if not IS_SHORT: 487 | run_test(60000, D, K, repeats, max_iter=200, model='fashion_mnist', get_data=generate_fashion_mnist_data, 488 | prior=DPMMPython.create_niw_prior(D, 1.46, 456.8)) 489 | run_test(125000, 64, 100, repeats, max_iter=200, model='imagenet64', get_data=generate_imagenet64_data, 490 | prior=DPMMPython.create_niw_prior(64, 0.177459, 720.139)) 491 | run_test(11314, 20000, 20, repeats, max_iter=100, model='20newsgroups10k', prior_niw_if_none=False, 492 | get_data=generate_20newsgroups20k_data, force_kernel=2, run_sklearn=False) 493 | print(f'Complete test: {datetime.now()}') 494 | 495 | # generate gaussian random data 496 | 497 | max_iter = 100 498 | 499 | if IS_SHORT: 500 | repeats = 1 501 | N_range = [100000] 502 | D_range = [4] 503 | K_range = [4] 504 | else: 505 | repeats = 10 506 | N_range = [1000, 10000, 100000, 1000000] 507 | D_range = [2, 4, 8, 16, 32, 64, 128] 508 | K_range = [4, 8, 16, 32] 509 | 510 | for N in N_range: 511 | for D in D_range: 512 | for K in K_range: 513 | run_test(N, D, K, repeats, max_iter=max_iter, model='generated_gaussian', 514 | get_data=generate_gaussian_data) 515 | # generate multinomial random data 516 | 517 | max_iter = 100 518 | if IS_SHORT: 519 | repeats = 1 520 | N_range = [100000] 521 | D_range = [4] 522 | K_range = [4] 523 | else: 524 | repeats = 10 525 | N_range = [1000, 10000, 100000, 1000000] 526 | D_range = [4, 8, 16, 32, 64, 128] 527 | K_range = [4, 8, 16, 32] 528 | 529 | for N in N_range: 530 | for D in D_range: 531 | for K in K_range: 532 | if D >= K: 533 | run_test(N, D, K, repeats, max_iter=max_iter, model='generate_mnmm', prior_niw_if_none=False, 534 | get_data=generate_mnmm_data, run_sklearn=False) 535 | 536 | create_all_result_file('NMI', calculate_nmi_mean, 'mnist') 537 | create_all_result_file('time', calculate_time_mean, 'mnist') 538 | create_all_result_file('NMI', calculate_nmi_mean, 'generated_gaussian') 539 | create_all_result_file('time', calculate_time_mean, 'generated_gaussian') 540 | create_all_result_file('NMI', calculate_nmi_mean, 'generate_mnmm') 541 | create_all_result_file('time', calculate_time_mean, 'generate_mnmm') 542 | if not IS_SHORT: 543 | create_all_result_file('NMI', calculate_nmi_mean, 'fashion_mnist') 544 | create_all_result_file('time', calculate_time_mean, 'fashion_mnist') 545 | create_all_result_file('NMI', calculate_nmi_mean, 'imagenet64') 546 | create_all_result_file('time', calculate_time_mean, 'imagenet64') 547 | create_all_result_file('NMI', calculate_nmi_mean, '20newsgroups10k') 548 | create_all_result_file('time', calculate_time_mean, '20newsgroups10k') -------------------------------------------------------------------------------- /dpmmpython/replication_full.py: -------------------------------------------------------------------------------- 1 | import replication 2 | 3 | if __name__ == "__main__": 4 | replication.run(False) -------------------------------------------------------------------------------- /dpmmpython/replication_short.py: -------------------------------------------------------------------------------- 1 | import replication 2 | 3 | if __name__ == "__main__": 4 | replication.run(True) -------------------------------------------------------------------------------- /examples/multi_process.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "#### Multiprocessing example\n", 8 | "\n", 9 | "In this example, we will Distributed the computation over several processes" 10 | ] 11 | }, 12 | { 13 | "cell_type": "code", 14 | "execution_count": 1, 15 | "metadata": {}, 16 | "outputs": [], 17 | "source": [ 18 | "import numpy as np\n", 19 | "from matplotlib import pyplot as plt\n", 20 | "from dpmmpython.dpmmwrapper import DPMMPython\n", 21 | "from dpmmpython.priors import niw\n", 22 | "from time import time" 23 | ] 24 | }, 25 | { 26 | "cell_type": "markdown", 27 | "metadata": {}, 28 | "source": [ 29 | "Generate some high dimensional data" 30 | ] 31 | }, 32 | { 33 | "cell_type": "code", 34 | "execution_count": 2, 35 | "metadata": {}, 36 | "outputs": [], 37 | "source": [ 38 | "D = 128 # Dimension\n", 39 | "K = 20 # Number of Clusters\n", 40 | "N = 200000 #Number of points\n", 41 | "var_scale = 0.1 # The variance of the MV-Normal distribution where the clusters means are sampled from.\n", 42 | "data, labels = DPMMPython.generate_gaussian_data(N, D, K, var_scale)" 43 | ] 44 | }, 45 | { 46 | "cell_type": "code", 47 | "execution_count": 3, 48 | "metadata": {}, 49 | "outputs": [], 50 | "source": [ 51 | "prior = niw(1,np.zeros(D),D+3,np.eye(D)*1.0)\n", 52 | "alpha = 10.0" 53 | ] 54 | }, 55 | { 56 | "cell_type": "code", 57 | "execution_count": 6, 58 | "metadata": {}, 59 | "outputs": [], 60 | "source": [ 61 | "tic = time()\n", 62 | "results = DPMMPython.fit(data,alpha,prior = prior,iterations=200)\n", 63 | "toc = time()-tic" 64 | ] 65 | }, 66 | { 67 | "cell_type": "code", 68 | "execution_count": 7, 69 | "metadata": {}, 70 | "outputs": [ 71 | { 72 | "name": "stdout", 73 | "output_type": "stream", 74 | "text": [ 75 | "481.3736569881439\n" 76 | ] 77 | } 78 | ], 79 | "source": [ 80 | "print(toc)" 81 | ] 82 | }, 83 | { 84 | "cell_type": "code", 85 | "execution_count": 8, 86 | "metadata": {}, 87 | "outputs": [ 88 | { 89 | "name": "stderr", 90 | "output_type": "stream", 91 | "text": [ 92 | "/home/dinari/anaconda2/envs/py36/lib/python3.6/site-packages/julia/core.py:689: FutureWarning: Accessing `Julia().` to obtain Julia objects is deprecated. Use `from julia import Main; Main.` or `jl = Julia(); jl.eval('')`.\n", 93 | " FutureWarning,\n" 94 | ] 95 | } 96 | ], 97 | "source": [ 98 | "DPMMPython.add_procs(4)" 99 | ] 100 | }, 101 | { 102 | "cell_type": "code", 103 | "execution_count": 11, 104 | "metadata": {}, 105 | "outputs": [], 106 | "source": [ 107 | "tic = time()\n", 108 | "results = DPMMPython.fit(data,alpha,prior = prior,iterations=200)\n", 109 | "toc = time()-tic" 110 | ] 111 | }, 112 | { 113 | "cell_type": "code", 114 | "execution_count": 12, 115 | "metadata": {}, 116 | "outputs": [ 117 | { 118 | "name": "stdout", 119 | "output_type": "stream", 120 | "text": [ 121 | "320.60466051101685\n" 122 | ] 123 | } 124 | ], 125 | "source": [ 126 | "print(toc)" 127 | ] 128 | }, 129 | { 130 | "cell_type": "markdown", 131 | "metadata": {}, 132 | "source": [ 133 | "When working with large datasets, using multiple processes could increase performance." 134 | ] 135 | } 136 | ], 137 | "metadata": { 138 | "kernelspec": { 139 | "display_name": "Python 3.6.8 64-bit ('py36': conda)", 140 | "language": "python", 141 | "name": "python36864bitpy36conda05fe657eec56468098aba7ffec2c4b33" 142 | }, 143 | "language_info": { 144 | "codemirror_mode": { 145 | "name": "ipython", 146 | "version": 3 147 | }, 148 | "file_extension": ".py", 149 | "mimetype": "text/x-python", 150 | "name": "python", 151 | "nbconvert_exporter": "python", 152 | "pygments_lexer": "ipython3", 153 | "version": "3.6.8" 154 | } 155 | }, 156 | "nbformat": 4, 157 | "nbformat_minor": 2 158 | } 159 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import setuptools 2 | 3 | import os 4 | from io import open # for Python 2 (identical to builtin in Python 3) 5 | 6 | from setuptools import find_packages, setup 7 | 8 | 9 | def pyload(name): 10 | ns = {} 11 | with open(name, encoding="utf-8") as f: 12 | exec(compile(f.read(), name, "exec"), ns) 13 | return ns 14 | 15 | 16 | # In case it's Python 2: 17 | try: 18 | execfile 19 | except NameError: 20 | pass 21 | else: 22 | def pyload(path): 23 | ns = {} 24 | execfile(path, ns) 25 | return ns 26 | 27 | 28 | repo_root = os.path.abspath(os.path.dirname(__file__)) 29 | 30 | with open(os.path.join(repo_root, "README.md"), encoding="utf-8") as f: 31 | long_description = f.read() 32 | 33 | 34 | ns = pyload(os.path.join(repo_root, "dpmmpython", "release.py")) 35 | version = ns["__version__"] 36 | 37 | 38 | 39 | setup(name='dpmmpython', 40 | version=version, 41 | description="Python wrapper for DPMMSubClusters julia package", 42 | long_description=long_description, 43 | long_description_content_type="text/markdown", 44 | author='Or Dinari', 45 | author_email='dinari@post.bgu.ac.il', 46 | license='MIT', 47 | keywords='julia python', 48 | classifiers=[ 49 | # How mature is this project? Common values are 50 | # 3 - Alpha 51 | # 4 - Beta 52 | # 5 - Production/Stable 53 | 'Development Status :: 3 - Alpha', 54 | 55 | # Indicate who your project is intended for 56 | #'Intended Audience :: Developers', 57 | 58 | 'License :: OSI Approved :: MIT License', 59 | 60 | # Specify the Python versions you support here. In particular, ensure 61 | # that you indicate whether you support Python 2, Python 3 or both. 62 | 'Programming Language :: Python :: 2', 63 | 'Programming Language :: Python :: 2.7', 64 | 'Programming Language :: Python :: 3', 65 | 'Programming Language :: Python :: 3.4', 66 | 'Programming Language :: Python :: 3.5', 67 | 'Programming Language :: Python :: 3.6', 68 | 'Programming Language :: Python :: 3.7', 69 | ], 70 | url='https://github.com/BGU-CS-VIL/DPMMPython', 71 | project_urls={ 72 | "Source": "https://github.com/BGU-CS-VIL/DPMMPython", 73 | "Tracker": "https://github.com/BGU-CS-VIL/issues", 74 | "Documentation": "https://bgu-cs-vil.github.io/DPMMSubClusters.jl/latest/", 75 | }, 76 | packages=find_packages(), 77 | install_requires=[ 78 | 'julia','wget' 79 | ], 80 | extras_require={ 81 | # Update `ci/test-upload/tox.ini` when "test" is changed: 82 | "test": [ 83 | "numpy", 84 | "ipython", 85 | # pytest 4.4 for pytest.skip in doctest: 86 | # https://github.com/pytest-dev/pytest/pull/4927 87 | "pytest>=4.4", 88 | "mock", 89 | ], 90 | }, 91 | ) --------------------------------------------------------------------------------