├── .coveragerc ├── .github └── workflows │ ├── docs.yml │ └── tests.yml ├── .gitignore ├── .pip_readme.rst ├── LICENSE.txt ├── LICENSE_EXT.txt ├── ProxNest ├── __init__.py ├── logs.py ├── operators │ ├── __init__.py │ ├── proximal_operators.py │ ├── sensing_operators.py │ └── wavelet_operators.py ├── optimisations │ ├── __init__.py │ ├── l1_norm_prox.py │ ├── l2_ball_proj.py │ └── tv_norm_prox.py ├── sampling │ ├── __init__.py │ ├── proximal_nested.py │ └── resampling.py ├── tests │ ├── __init__.py │ ├── test_logs.py │ ├── test_operators.py │ ├── test_optimisations.py │ ├── test_sampling.py │ └── test_utils.py └── utils.py ├── README.rst ├── build_proxnest.sh ├── data ├── galaxy_image_256.npy └── galaxy_image_64.npy ├── docs ├── Makefile ├── _static │ └── css │ │ ├── custom.css │ │ └── custom_tabs.css ├── api │ ├── index.rst │ ├── l1_norm_prox.rst │ ├── l2_ball_proj.rst │ ├── logs.rst │ ├── prox_nested.rst │ ├── prox_ops.rst │ ├── resampling.rst │ ├── sense_ops.rst │ ├── tv_norm_prox.rst │ ├── utils.rst │ └── wav_ops.rst ├── assets │ ├── ProxNestLogo.png │ └── static_notebooks │ │ ├── galaxy_denoising.ipynb │ │ ├── galaxy_radio.ipynb │ │ └── gaussian_benchmark.ipynb ├── background │ └── index.rst ├── conf.py ├── index.rst ├── tutorials │ ├── galaxy_denoising.nblink │ ├── galaxy_radio.nblink │ └── gaussian_benchmark.nblink └── user_guide │ └── install.rst ├── logs └── logging.yaml ├── notebooks ├── galaxy_denoising.ipynb ├── galaxy_radio.ipynb └── gaussian_benchmark.ipynb ├── pytest.ini ├── requirements ├── requirements-core.txt ├── requirements-docs.txt ├── requirements-notebooks.txt └── requirements-tests.txt └── setup.py /.coveragerc: -------------------------------------------------------------------------------- 1 | [report] 2 | omit = 3 | *test_* 4 | *__init__* -------------------------------------------------------------------------------- /.github/workflows/docs.yml: -------------------------------------------------------------------------------- 1 | name: Docs 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | # Add following in for testing 8 | # - feature/deploy_docs 9 | 10 | jobs: 11 | build: 12 | 13 | runs-on: ubuntu-latest 14 | strategy: 15 | matrix: 16 | python-version: [3.9] 17 | 18 | steps: 19 | - name: Checkout Source 20 | uses: actions/checkout@v2.3.1 21 | 22 | - name: Set up Python ${{ matrix.python-version }} 23 | uses: actions/setup-python@v2 24 | with: 25 | python-version: ${{ matrix.python-version }} 26 | 27 | - name: Install package 28 | run: | 29 | sudo apt install pandoc 30 | python -m pip install --upgrade pip 31 | bash build_proxnest.sh 32 | 33 | - name: Build Documentation 34 | run: | 35 | cd docs && make html 36 | 37 | - name: Deploy 38 | if: github.ref == 'refs/heads/main' 39 | uses: JamesIves/github-pages-deploy-action@4.1.5 40 | with: 41 | branch: gh-pages # The branch the action should deploy to. 42 | folder: docs/_build/html # The folder the action should deploy. 43 | -------------------------------------------------------------------------------- /.github/workflows/tests.yml: -------------------------------------------------------------------------------- 1 | name: Tests 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | pull_request: 8 | branches: 9 | - main 10 | 11 | jobs: 12 | build: 13 | 14 | runs-on: ubuntu-latest 15 | strategy: 16 | matrix: 17 | python-version: [3.9] 18 | 19 | steps: 20 | - name: Checkout Source 21 | uses: actions/checkout@v2.3.1 22 | 23 | - name: Set up Python ${{ matrix.python-version }} 24 | uses: actions/setup-python@v2 25 | with: 26 | python-version: ${{ matrix.python-version }} 27 | 28 | - name: Install package 29 | run: | 30 | sudo apt install pandoc 31 | python -m pip install --upgrade pip 32 | bash build_proxnest.sh 33 | 34 | - name: Run tests 35 | run: | 36 | pytest --cov-report term --cov=ProxNest --cov-config=.coveragerc 37 | codecov --token eb6c2b0b-4ad0-4c7d-9d8a-1d22cdf79673 38 | 39 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.cache/ 2 | __pycache__/ 3 | *.c 4 | *.cpython* 5 | build/ 6 | _build/ 7 | *~ 8 | .DS_Store 9 | logs/*.log 10 | .coverage 11 | .test.dat 12 | notebooks/.ipynb_checkpoints/ 13 | notebooks/.temp.* 14 | .vscode/ 15 | docs/assets/static_notebooks/.ipynb_checkpoints/ 16 | docs/assets/static_notebooks/.temp.* 17 | dist/ 18 | project_name.egg-info/ 19 | -------------------------------------------------------------------------------- /.pip_readme.rst: -------------------------------------------------------------------------------- 1 | .. image:: https://img.shields.io/badge/GitHub-ProxNest-brightgreen.svg?style=flat 2 | :target: https://github.com/astro-informatics/proxnest 3 | .. image:: https://github.com/astro-informatics/proxnest/actions/workflows/tests.yml/badge.svg?branch=main 4 | :target: https://github.com/astro-informatics/proxnest/actions/workflows/tests.yml 5 | .. image:: https://github.com/astro-informatics/proxnest/actions/workflows/docs.yml/badge.svg 6 | :target: https://astro-informatics.github.io/proxnest 7 | .. image:: https://codecov.io/gh/astro-informatics/proxnest/branch/main/graph/badge.svg?token=oGowwdoMRN 8 | :target: https://codecov.io/gh/astro-informatics/proxnest 9 | .. image:: https://badge.fury.io/py/ProxNest.svg 10 | :target: https://badge.fury.io/py/ProxNest 11 | .. image:: https://img.shields.io/badge/License-GPL-blue.svg 12 | :target: http://perso.crans.org/besson/LICENSE.html 13 | .. image:: http://img.shields.io/badge/arXiv-2106.03646-orange.svg?style=flat 14 | :target: https://arxiv.org/abs/2106.03646 15 | 16 | ProxNest: Proximal nested sampling for high-dimensional Bayesian model selection 17 | ================================================================================================================= 18 | 19 | ``ProxNest`` is an open source, well tested and documented Python implementation of the *proximal nested sampling* algorithm (`Cai et al. 2022 `_) which is uniquely suited for sampling from very high-dimensional posteriors that are log-concave and potentially not smooth (*e.g.* Laplace priors). This is achieved by exploiting tools from proximal calculus and Moreau-Yosida regularisation (`Moreau 1962 `_) to efficiently sample from the prior subject to the hard likelihood constraint. The resulting Markov chain iterations include a gradient step, approximating (with arbitrary precision) an overdamped Langevin SDE that can scale to very high-dimensional applications. 20 | 21 | Basic Usage 22 | =========== 23 | 24 | The following is a straightforward example application to image denoising (Phi = I), regularised with Daubechies wavelets (DB6). 25 | 26 | .. code-block:: Python 27 | 28 | # Import relevant modules. 29 | import numpy as np 30 | import ProxNest 31 | 32 | # Load your data and set parameters. 33 | data = np.load() 34 | params = params # Parameters of the prior resampling optimisation problem. 35 | options = options # Options associated with the sampling strategy. 36 | 37 | # Construct your forward model (phi) and wavelet operators (psi). 38 | phi = ProxNest.operators.sensing_operators.Identity() 39 | psi = ProxNest.operators.wavelet_operators.db_wavelets(["db6"], 2, (dim, dim)) 40 | 41 | # Define proximal operators for both your likelihood and prior. 42 | proxH = lambda x, T : ProxNest.operators.proximal_operators.l1_projection(x, T, delta, Psi=psi) 43 | proxB = lambda x, tau: ProxNest.optimisations.l2_ball_proj.sopt_fast_proj_B2(x, tau, params) 44 | 45 | # Write a lambda function to evaluate your likelihood term (here a Gaussian) 46 | LogLikeliL = lambda sol : - np.linalg.norm(y-phi.dir_op(sol), 'fro')**2/(2*sigma**2) 47 | 48 | # Perform proximal nested sampling 49 | BayEvi, XTrace = ProxNest.sampling.proximal_nested.ProxNestedSampling( 50 | np.abs(phi.adj_op(data)), LogLikeliL, proxH, proxB, params, options 51 | ) 52 | 53 | At this point you have recovered the tuple **BayEvi** and dict **Xtrace** which contain 54 | 55 | .. code-block:: python 56 | 57 | Live = options["samplesL"] # Number of live samples 58 | Disc = options["samplesD"] # Number of discarded samples 59 | 60 | # BayEvi is a tuple containing two values: 61 | BayEvi[0] = 'Estimate of Bayesian evidence (float).' 62 | BayEvi[1] = 'Variance of Bayesian evidence estimate (float).' 63 | 64 | # XTrace is a dictionary containing the np.ndarrays: 65 | XTrace['Liveset'] = 'Set of live samples (shape: Live, dim, dim).' 66 | XTrace['LivesetL'] = 'Likelihood of live samples (shape: Live).' 67 | 68 | XTrace['Discard'] = 'Set of discarded samples (shape: Disc, dim, dim).' 69 | XTrace['DiscardL'] = 'Likelihood of discarded samples (shape: Disc).' 70 | XTrace['DiscardW'] = 'Weights of discarded samples (shape: Disc).' 71 | 72 | XTrace['DiscardPostProb'] = 'Posterior probability of discarded samples (shape: Disc)' 73 | XTrace['DiscardPostMean'] = 'Posterior mean solution (shape: dim, dim)' 74 | 75 | from which one can perform *e.g.* Bayesian model comparison. 76 | 77 | Contributors 78 | ============ 79 | `Matthew Price `_, `Xiaohao Cai `_, `Jason McEwen `_, `Marcelo Pereyra `_, and contributors. 80 | 81 | Attribution 82 | =========== 83 | A BibTeX entry for ``ProxNest`` is: 84 | 85 | .. code-block:: 86 | 87 | @article{Cai:ProxNest:2021, 88 | author = {Cai, Xiaohao and McEwen, Jason~D. and Pereyra, Marcelo}, 89 | title = {"High-dimensional Bayesian model selection by proximal nested sampling"}, 90 | journal = {ArXiv}, 91 | eprint = {arXiv:2106.03646}, 92 | year = {2021} 93 | } 94 | 95 | License 96 | ======= 97 | 98 | ``ProxNest`` is released under the GPL-3 license (see `LICENSE.txt `_), subject to 99 | the non-commercial use condition (see `LICENSE_EXT.txt `_) 100 | 101 | .. code-block:: 102 | 103 | ProxNest 104 | Copyright (C) 2022 Matthew Price, Xiaohao Cai, Jason McEwen, Marcelo Pereyra & contributors 105 | 106 | This program is released under the GPL-3 license (see LICENSE.txt), 107 | subject to a non-commercial use condition (see LICENSE_EXT.txt). 108 | 109 | This program is distributed in the hope that it will be useful, 110 | but WITHOUT ANY WARRANTY; without even the implied warranty of 111 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. 112 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | GNU GENERAL PUBLIC LICENSE 2 | Version 3, 29 June 2007 3 | 4 | Copyright (C) 2007 Free Software Foundation, Inc. 5 | Everyone is permitted to copy and distribute verbatim copies 6 | of this license document, but changing it is not allowed. 7 | 8 | Preamble 9 | 10 | The GNU General Public License is a free, copyleft license for 11 | software and other kinds of works. 12 | 13 | The licenses for most software and other practical works are designed 14 | to take away your freedom to share and change the works. By contrast, 15 | the GNU General Public License is intended to guarantee your freedom to 16 | share and change all versions of a program--to make sure it remains free 17 | software for all its users. We, the Free Software Foundation, use the 18 | GNU General Public License for most of our software; it applies also to 19 | any other work released this way by its authors. You can apply it to 20 | your programs, too. 21 | 22 | When we speak of free software, we are referring to freedom, not 23 | price. Our General Public Licenses are designed to make sure that you 24 | have the freedom to distribute copies of free software (and charge for 25 | them if you wish), that you receive source code or can get it if you 26 | want it, that you can change the software or use pieces of it in new 27 | free programs, and that you know you can do these things. 28 | 29 | To protect your rights, we need to prevent others from denying you 30 | these rights or asking you to surrender the rights. Therefore, you have 31 | certain responsibilities if you distribute copies of the software, or if 32 | you modify it: responsibilities to respect the freedom of others. 33 | 34 | For example, if you distribute copies of such a program, whether 35 | gratis or for a fee, you must pass on to the recipients the same 36 | freedoms that you received. You must make sure that they, too, receive 37 | or can get the source code. And you must show them these terms so they 38 | know their rights. 39 | 40 | Developers that use the GNU GPL protect your rights with two steps: 41 | (1) assert copyright on the software, and (2) offer you this License 42 | giving you legal permission to copy, distribute and/or modify it. 43 | 44 | For the developers' and authors' protection, the GPL clearly explains 45 | that there is no warranty for this free software. For both users' and 46 | authors' sake, the GPL requires that modified versions be marked as 47 | changed, so that their problems will not be attributed erroneously to 48 | authors of previous versions. 49 | 50 | Some devices are designed to deny users access to install or run 51 | modified versions of the software inside them, although the manufacturer 52 | can do so. This is fundamentally incompatible with the aim of 53 | protecting users' freedom to change the software. The systematic 54 | pattern of such abuse occurs in the area of products for individuals to 55 | use, which is precisely where it is most unacceptable. Therefore, we 56 | have designed this version of the GPL to prohibit the practice for those 57 | products. If such problems arise substantially in other domains, we 58 | stand ready to extend this provision to those domains in future versions 59 | of the GPL, as needed to protect the freedom of users. 60 | 61 | Finally, every program is threatened constantly by software patents. 62 | States should not allow patents to restrict development and use of 63 | software on general-purpose computers, but in those that do, we wish to 64 | avoid the special danger that patents applied to a free program could 65 | make it effectively proprietary. To prevent this, the GPL assures that 66 | patents cannot be used to render the program non-free. 67 | 68 | The precise terms and conditions for copying, distribution and 69 | modification follow. 70 | 71 | TERMS AND CONDITIONS 72 | 73 | 0. Definitions. 74 | 75 | "This License" refers to version 3 of the GNU General Public License. 76 | 77 | "Copyright" also means copyright-like laws that apply to other kinds of 78 | works, such as semiconductor masks. 79 | 80 | "The Program" refers to any copyrightable work licensed under this 81 | License. Each licensee is addressed as "you". "Licensees" and 82 | "recipients" may be individuals or organizations. 83 | 84 | To "modify" a work means to copy from or adapt all or part of the work 85 | in a fashion requiring copyright permission, other than the making of an 86 | exact copy. The resulting work is called a "modified version" of the 87 | earlier work or a work "based on" the earlier work. 88 | 89 | A "covered work" means either the unmodified Program or a work based 90 | on the Program. 91 | 92 | To "propagate" a work means to do anything with it that, without 93 | permission, would make you directly or secondarily liable for 94 | infringement under applicable copyright law, except executing it on a 95 | computer or modifying a private copy. Propagation includes copying, 96 | distribution (with or without modification), making available to the 97 | public, and in some countries other activities as well. 98 | 99 | To "convey" a work means any kind of propagation that enables other 100 | parties to make or receive copies. Mere interaction with a user through 101 | a computer network, with no transfer of a copy, is not conveying. 102 | 103 | An interactive user interface displays "Appropriate Legal Notices" 104 | to the extent that it includes a convenient and prominently visible 105 | feature that (1) displays an appropriate copyright notice, and (2) 106 | tells the user that there is no warranty for the work (except to the 107 | extent that warranties are provided), that licensees may convey the 108 | work under this License, and how to view a copy of this License. If 109 | the interface presents a list of user commands or options, such as a 110 | menu, a prominent item in the list meets this criterion. 111 | 112 | 1. Source Code. 113 | 114 | The "source code" for a work means the preferred form of the work 115 | for making modifications to it. "Object code" means any non-source 116 | form of a work. 117 | 118 | A "Standard Interface" means an interface that either is an official 119 | standard defined by a recognized standards body, or, in the case of 120 | interfaces specified for a particular programming language, one that 121 | is widely used among developers working in that language. 122 | 123 | The "System Libraries" of an executable work include anything, other 124 | than the work as a whole, that (a) is included in the normal form of 125 | packaging a Major Component, but which is not part of that Major 126 | Component, and (b) serves only to enable use of the work with that 127 | Major Component, or to implement a Standard Interface for which an 128 | implementation is available to the public in source code form. A 129 | "Major Component", in this context, means a major essential component 130 | (kernel, window system, and so on) of the specific operating system 131 | (if any) on which the executable work runs, or a compiler used to 132 | produce the work, or an object code interpreter used to run it. 133 | 134 | The "Corresponding Source" for a work in object code form means all 135 | the source code needed to generate, install, and (for an executable 136 | work) run the object code and to modify the work, including scripts to 137 | control those activities. However, it does not include the work's 138 | System Libraries, or general-purpose tools or generally available free 139 | programs which are used unmodified in performing those activities but 140 | which are not part of the work. For example, Corresponding Source 141 | includes interface definition files associated with source files for 142 | the work, and the source code for shared libraries and dynamically 143 | linked subprograms that the work is specifically designed to require, 144 | such as by intimate data communication or control flow between those 145 | subprograms and other parts of the work. 146 | 147 | The Corresponding Source need not include anything that users 148 | can regenerate automatically from other parts of the Corresponding 149 | Source. 150 | 151 | The Corresponding Source for a work in source code form is that 152 | same work. 153 | 154 | 2. Basic Permissions. 155 | 156 | All rights granted under this License are granted for the term of 157 | copyright on the Program, and are irrevocable provided the stated 158 | conditions are met. This License explicitly affirms your unlimited 159 | permission to run the unmodified Program. The output from running a 160 | covered work is covered by this License only if the output, given its 161 | content, constitutes a covered work. This License acknowledges your 162 | rights of fair use or other equivalent, as provided by copyright law. 163 | 164 | You may make, run and propagate covered works that you do not 165 | convey, without conditions so long as your license otherwise remains 166 | in force. You may convey covered works to others for the sole purpose 167 | of having them make modifications exclusively for you, or provide you 168 | with facilities for running those works, provided that you comply with 169 | the terms of this License in conveying all material for which you do 170 | not control copyright. Those thus making or running the covered works 171 | for you must do so exclusively on your behalf, under your direction 172 | and control, on terms that prohibit them from making any copies of 173 | your copyrighted material outside their relationship with you. 174 | 175 | Conveying under any other circumstances is permitted solely under 176 | the conditions stated below. Sublicensing is not allowed; section 10 177 | makes it unnecessary. 178 | 179 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law. 180 | 181 | No covered work shall be deemed part of an effective technological 182 | measure under any applicable law fulfilling obligations under article 183 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or 184 | similar laws prohibiting or restricting circumvention of such 185 | measures. 186 | 187 | When you convey a covered work, you waive any legal power to forbid 188 | circumvention of technological measures to the extent such circumvention 189 | is effected by exercising rights under this License with respect to 190 | the covered work, and you disclaim any intention to limit operation or 191 | modification of the work as a means of enforcing, against the work's 192 | users, your or third parties' legal rights to forbid circumvention of 193 | technological measures. 194 | 195 | 4. Conveying Verbatim Copies. 196 | 197 | You may convey verbatim copies of the Program's source code as you 198 | receive it, in any medium, provided that you conspicuously and 199 | appropriately publish on each copy an appropriate copyright notice; 200 | keep intact all notices stating that this License and any 201 | non-permissive terms added in accord with section 7 apply to the code; 202 | keep intact all notices of the absence of any warranty; and give all 203 | recipients a copy of this License along with the Program. 204 | 205 | You may charge any price or no price for each copy that you convey, 206 | and you may offer support or warranty protection for a fee. 207 | 208 | 5. Conveying Modified Source Versions. 209 | 210 | You may convey a work based on the Program, or the modifications to 211 | produce it from the Program, in the form of source code under the 212 | terms of section 4, provided that you also meet all of these conditions: 213 | 214 | a) The work must carry prominent notices stating that you modified 215 | it, and giving a relevant date. 216 | 217 | b) The work must carry prominent notices stating that it is 218 | released under this License and any conditions added under section 219 | 7. This requirement modifies the requirement in section 4 to 220 | "keep intact all notices". 221 | 222 | c) You must license the entire work, as a whole, under this 223 | License to anyone who comes into possession of a copy. This 224 | License will therefore apply, along with any applicable section 7 225 | additional terms, to the whole of the work, and all its parts, 226 | regardless of how they are packaged. This License gives no 227 | permission to license the work in any other way, but it does not 228 | invalidate such permission if you have separately received it. 229 | 230 | d) If the work has interactive user interfaces, each must display 231 | Appropriate Legal Notices; however, if the Program has interactive 232 | interfaces that do not display Appropriate Legal Notices, your 233 | work need not make them do so. 234 | 235 | A compilation of a covered work with other separate and independent 236 | works, which are not by their nature extensions of the covered work, 237 | and which are not combined with it such as to form a larger program, 238 | in or on a volume of a storage or distribution medium, is called an 239 | "aggregate" if the compilation and its resulting copyright are not 240 | used to limit the access or legal rights of the compilation's users 241 | beyond what the individual works permit. Inclusion of a covered work 242 | in an aggregate does not cause this License to apply to the other 243 | parts of the aggregate. 244 | 245 | 6. Conveying Non-Source Forms. 246 | 247 | You may convey a covered work in object code form under the terms 248 | of sections 4 and 5, provided that you also convey the 249 | machine-readable Corresponding Source under the terms of this License, 250 | in one of these ways: 251 | 252 | a) Convey the object code in, or embodied in, a physical product 253 | (including a physical distribution medium), accompanied by the 254 | Corresponding Source fixed on a durable physical medium 255 | customarily used for software interchange. 256 | 257 | b) Convey the object code in, or embodied in, a physical product 258 | (including a physical distribution medium), accompanied by a 259 | written offer, valid for at least three years and valid for as 260 | long as you offer spare parts or customer support for that product 261 | model, to give anyone who possesses the object code either (1) a 262 | copy of the Corresponding Source for all the software in the 263 | product that is covered by this License, on a durable physical 264 | medium customarily used for software interchange, for a price no 265 | more than your reasonable cost of physically performing this 266 | conveying of source, or (2) access to copy the 267 | Corresponding Source from a network server at no charge. 268 | 269 | c) Convey individual copies of the object code with a copy of the 270 | written offer to provide the Corresponding Source. This 271 | alternative is allowed only occasionally and noncommercially, and 272 | only if you received the object code with such an offer, in accord 273 | with subsection 6b. 274 | 275 | d) Convey the object code by offering access from a designated 276 | place (gratis or for a charge), and offer equivalent access to the 277 | Corresponding Source in the same way through the same place at no 278 | further charge. You need not require recipients to copy the 279 | Corresponding Source along with the object code. If the place to 280 | copy the object code is a network server, the Corresponding Source 281 | may be on a different server (operated by you or a third party) 282 | that supports equivalent copying facilities, provided you maintain 283 | clear directions next to the object code saying where to find the 284 | Corresponding Source. Regardless of what server hosts the 285 | Corresponding Source, you remain obligated to ensure that it is 286 | available for as long as needed to satisfy these requirements. 287 | 288 | e) Convey the object code using peer-to-peer transmission, provided 289 | you inform other peers where the object code and Corresponding 290 | Source of the work are being offered to the general public at no 291 | charge under subsection 6d. 292 | 293 | A separable portion of the object code, whose source code is excluded 294 | from the Corresponding Source as a System Library, need not be 295 | included in conveying the object code work. 296 | 297 | A "User Product" is either (1) a "consumer product", which means any 298 | tangible personal property which is normally used for personal, family, 299 | or household purposes, or (2) anything designed or sold for incorporation 300 | into a dwelling. In determining whether a product is a consumer product, 301 | doubtful cases shall be resolved in favor of coverage. For a particular 302 | product received by a particular user, "normally used" refers to a 303 | typical or common use of that class of product, regardless of the status 304 | of the particular user or of the way in which the particular user 305 | actually uses, or expects or is expected to use, the product. A product 306 | is a consumer product regardless of whether the product has substantial 307 | commercial, industrial or non-consumer uses, unless such uses represent 308 | the only significant mode of use of the product. 309 | 310 | "Installation Information" for a User Product means any methods, 311 | procedures, authorization keys, or other information required to install 312 | and execute modified versions of a covered work in that User Product from 313 | a modified version of its Corresponding Source. The information must 314 | suffice to ensure that the continued functioning of the modified object 315 | code is in no case prevented or interfered with solely because 316 | modification has been made. 317 | 318 | If you convey an object code work under this section in, or with, or 319 | specifically for use in, a User Product, and the conveying occurs as 320 | part of a transaction in which the right of possession and use of the 321 | User Product is transferred to the recipient in perpetuity or for a 322 | fixed term (regardless of how the transaction is characterized), the 323 | Corresponding Source conveyed under this section must be accompanied 324 | by the Installation Information. But this requirement does not apply 325 | if neither you nor any third party retains the ability to install 326 | modified object code on the User Product (for example, the work has 327 | been installed in ROM). 328 | 329 | The requirement to provide Installation Information does not include a 330 | requirement to continue to provide support service, warranty, or updates 331 | for a work that has been modified or installed by the recipient, or for 332 | the User Product in which it has been modified or installed. Access to a 333 | network may be denied when the modification itself materially and 334 | adversely affects the operation of the network or violates the rules and 335 | protocols for communication across the network. 336 | 337 | Corresponding Source conveyed, and Installation Information provided, 338 | in accord with this section must be in a format that is publicly 339 | documented (and with an implementation available to the public in 340 | source code form), and must require no special password or key for 341 | unpacking, reading or copying. 342 | 343 | 7. Additional Terms. 344 | 345 | "Additional permissions" are terms that supplement the terms of this 346 | License by making exceptions from one or more of its conditions. 347 | Additional permissions that are applicable to the entire Program shall 348 | be treated as though they were included in this License, to the extent 349 | that they are valid under applicable law. If additional permissions 350 | apply only to part of the Program, that part may be used separately 351 | under those permissions, but the entire Program remains governed by 352 | this License without regard to the additional permissions. 353 | 354 | When you convey a copy of a covered work, you may at your option 355 | remove any additional permissions from that copy, or from any part of 356 | it. (Additional permissions may be written to require their own 357 | removal in certain cases when you modify the work.) You may place 358 | additional permissions on material, added by you to a covered work, 359 | for which you have or can give appropriate copyright permission. 360 | 361 | Notwithstanding any other provision of this License, for material you 362 | add to a covered work, you may (if authorized by the copyright holders of 363 | that material) supplement the terms of this License with terms: 364 | 365 | a) Disclaiming warranty or limiting liability differently from the 366 | terms of sections 15 and 16 of this License; or 367 | 368 | b) Requiring preservation of specified reasonable legal notices or 369 | author attributions in that material or in the Appropriate Legal 370 | Notices displayed by works containing it; or 371 | 372 | c) Prohibiting misrepresentation of the origin of that material, or 373 | requiring that modified versions of such material be marked in 374 | reasonable ways as different from the original version; or 375 | 376 | d) Limiting the use for publicity purposes of names of licensors or 377 | authors of the material; or 378 | 379 | e) Declining to grant rights under trademark law for use of some 380 | trade names, trademarks, or service marks; or 381 | 382 | f) Requiring indemnification of licensors and authors of that 383 | material by anyone who conveys the material (or modified versions of 384 | it) with contractual assumptions of liability to the recipient, for 385 | any liability that these contractual assumptions directly impose on 386 | those licensors and authors. 387 | 388 | All other non-permissive additional terms are considered "further 389 | restrictions" within the meaning of section 10. If the Program as you 390 | received it, or any part of it, contains a notice stating that it is 391 | governed by this License along with a term that is a further 392 | restriction, you may remove that term. If a license document contains 393 | a further restriction but permits relicensing or conveying under this 394 | License, you may add to a covered work material governed by the terms 395 | of that license document, provided that the further restriction does 396 | not survive such relicensing or conveying. 397 | 398 | If you add terms to a covered work in accord with this section, you 399 | must place, in the relevant source files, a statement of the 400 | additional terms that apply to those files, or a notice indicating 401 | where to find the applicable terms. 402 | 403 | Additional terms, permissive or non-permissive, may be stated in the 404 | form of a separately written license, or stated as exceptions; 405 | the above requirements apply either way. 406 | 407 | 8. Termination. 408 | 409 | You may not propagate or modify a covered work except as expressly 410 | provided under this License. Any attempt otherwise to propagate or 411 | modify it is void, and will automatically terminate your rights under 412 | this License (including any patent licenses granted under the third 413 | paragraph of section 11). 414 | 415 | However, if you cease all violation of this License, then your 416 | license from a particular copyright holder is reinstated (a) 417 | provisionally, unless and until the copyright holder explicitly and 418 | finally terminates your license, and (b) permanently, if the copyright 419 | holder fails to notify you of the violation by some reasonable means 420 | prior to 60 days after the cessation. 421 | 422 | Moreover, your license from a particular copyright holder is 423 | reinstated permanently if the copyright holder notifies you of the 424 | violation by some reasonable means, this is the first time you have 425 | received notice of violation of this License (for any work) from that 426 | copyright holder, and you cure the violation prior to 30 days after 427 | your receipt of the notice. 428 | 429 | Termination of your rights under this section does not terminate the 430 | licenses of parties who have received copies or rights from you under 431 | this License. If your rights have been terminated and not permanently 432 | reinstated, you do not qualify to receive new licenses for the same 433 | material under section 10. 434 | 435 | 9. Acceptance Not Required for Having Copies. 436 | 437 | You are not required to accept this License in order to receive or 438 | run a copy of the Program. Ancillary propagation of a covered work 439 | occurring solely as a consequence of using peer-to-peer transmission 440 | to receive a copy likewise does not require acceptance. However, 441 | nothing other than this License grants you permission to propagate or 442 | modify any covered work. These actions infringe copyright if you do 443 | not accept this License. Therefore, by modifying or propagating a 444 | covered work, you indicate your acceptance of this License to do so. 445 | 446 | 10. Automatic Licensing of Downstream Recipients. 447 | 448 | Each time you convey a covered work, the recipient automatically 449 | receives a license from the original licensors, to run, modify and 450 | propagate that work, subject to this License. You are not responsible 451 | for enforcing compliance by third parties with this License. 452 | 453 | An "entity transaction" is a transaction transferring control of an 454 | organization, or substantially all assets of one, or subdividing an 455 | organization, or merging organizations. If propagation of a covered 456 | work results from an entity transaction, each party to that 457 | transaction who receives a copy of the work also receives whatever 458 | licenses to the work the party's predecessor in interest had or could 459 | give under the previous paragraph, plus a right to possession of the 460 | Corresponding Source of the work from the predecessor in interest, if 461 | the predecessor has it or can get it with reasonable efforts. 462 | 463 | You may not impose any further restrictions on the exercise of the 464 | rights granted or affirmed under this License. For example, you may 465 | not impose a license fee, royalty, or other charge for exercise of 466 | rights granted under this License, and you may not initiate litigation 467 | (including a cross-claim or counterclaim in a lawsuit) alleging that 468 | any patent claim is infringed by making, using, selling, offering for 469 | sale, or importing the Program or any portion of it. 470 | 471 | 11. Patents. 472 | 473 | A "contributor" is a copyright holder who authorizes use under this 474 | License of the Program or a work on which the Program is based. The 475 | work thus licensed is called the contributor's "contributor version". 476 | 477 | A contributor's "essential patent claims" are all patent claims 478 | owned or controlled by the contributor, whether already acquired or 479 | hereafter acquired, that would be infringed by some manner, permitted 480 | by this License, of making, using, or selling its contributor version, 481 | but do not include claims that would be infringed only as a 482 | consequence of further modification of the contributor version. For 483 | purposes of this definition, "control" includes the right to grant 484 | patent sublicenses in a manner consistent with the requirements of 485 | this License. 486 | 487 | Each contributor grants you a non-exclusive, worldwide, royalty-free 488 | patent license under the contributor's essential patent claims, to 489 | make, use, sell, offer for sale, import and otherwise run, modify and 490 | propagate the contents of its contributor version. 491 | 492 | In the following three paragraphs, a "patent license" is any express 493 | agreement or commitment, however denominated, not to enforce a patent 494 | (such as an express permission to practice a patent or covenant not to 495 | sue for patent infringement). To "grant" such a patent license to a 496 | party means to make such an agreement or commitment not to enforce a 497 | patent against the party. 498 | 499 | If you convey a covered work, knowingly relying on a patent license, 500 | and the Corresponding Source of the work is not available for anyone 501 | to copy, free of charge and under the terms of this License, through a 502 | publicly available network server or other readily accessible means, 503 | then you must either (1) cause the Corresponding Source to be so 504 | available, or (2) arrange to deprive yourself of the benefit of the 505 | patent license for this particular work, or (3) arrange, in a manner 506 | consistent with the requirements of this License, to extend the patent 507 | license to downstream recipients. "Knowingly relying" means you have 508 | actual knowledge that, but for the patent license, your conveying the 509 | covered work in a country, or your recipient's use of the covered work 510 | in a country, would infringe one or more identifiable patents in that 511 | country that you have reason to believe are valid. 512 | 513 | If, pursuant to or in connection with a single transaction or 514 | arrangement, you convey, or propagate by procuring conveyance of, a 515 | covered work, and grant a patent license to some of the parties 516 | receiving the covered work authorizing them to use, propagate, modify 517 | or convey a specific copy of the covered work, then the patent license 518 | you grant is automatically extended to all recipients of the covered 519 | work and works based on it. 520 | 521 | A patent license is "discriminatory" if it does not include within 522 | the scope of its coverage, prohibits the exercise of, or is 523 | conditioned on the non-exercise of one or more of the rights that are 524 | specifically granted under this License. You may not convey a covered 525 | work if you are a party to an arrangement with a third party that is 526 | in the business of distributing software, under which you make payment 527 | to the third party based on the extent of your activity of conveying 528 | the work, and under which the third party grants, to any of the 529 | parties who would receive the covered work from you, a discriminatory 530 | patent license (a) in connection with copies of the covered work 531 | conveyed by you (or copies made from those copies), or (b) primarily 532 | for and in connection with specific products or compilations that 533 | contain the covered work, unless you entered into that arrangement, 534 | or that patent license was granted, prior to 28 March 2007. 535 | 536 | Nothing in this License shall be construed as excluding or limiting 537 | any implied license or other defenses to infringement that may 538 | otherwise be available to you under applicable patent law. 539 | 540 | 12. No Surrender of Others' Freedom. 541 | 542 | If conditions are imposed on you (whether by court order, agreement or 543 | otherwise) that contradict the conditions of this License, they do not 544 | excuse you from the conditions of this License. If you cannot convey a 545 | covered work so as to satisfy simultaneously your obligations under this 546 | License and any other pertinent obligations, then as a consequence you may 547 | not convey it at all. For example, if you agree to terms that obligate you 548 | to collect a royalty for further conveying from those to whom you convey 549 | the Program, the only way you could satisfy both those terms and this 550 | License would be to refrain entirely from conveying the Program. 551 | 552 | 13. Use with the GNU Affero General Public License. 553 | 554 | Notwithstanding any other provision of this License, you have 555 | permission to link or combine any covered work with a work licensed 556 | under version 3 of the GNU Affero General Public License into a single 557 | combined work, and to convey the resulting work. The terms of this 558 | License will continue to apply to the part which is the covered work, 559 | but the special requirements of the GNU Affero General Public License, 560 | section 13, concerning interaction through a network will apply to the 561 | combination as such. 562 | 563 | 14. Revised Versions of this License. 564 | 565 | The Free Software Foundation may publish revised and/or new versions of 566 | the GNU General Public License from time to time. Such new versions will 567 | be similar in spirit to the present version, but may differ in detail to 568 | address new problems or concerns. 569 | 570 | Each version is given a distinguishing version number. If the 571 | Program specifies that a certain numbered version of the GNU General 572 | Public License "or any later version" applies to it, you have the 573 | option of following the terms and conditions either of that numbered 574 | version or of any later version published by the Free Software 575 | Foundation. If the Program does not specify a version number of the 576 | GNU General Public License, you may choose any version ever published 577 | by the Free Software Foundation. 578 | 579 | If the Program specifies that a proxy can decide which future 580 | versions of the GNU General Public License can be used, that proxy's 581 | public statement of acceptance of a version permanently authorizes you 582 | to choose that version for the Program. 583 | 584 | Later license versions may give you additional or different 585 | permissions. However, no additional obligations are imposed on any 586 | author or copyright holder as a result of your choosing to follow a 587 | later version. 588 | 589 | 15. Disclaimer of Warranty. 590 | 591 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY 592 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT 593 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY 594 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, 595 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 596 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM 597 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF 598 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 599 | 600 | 16. Limitation of Liability. 601 | 602 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 603 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS 604 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY 605 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE 606 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF 607 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD 608 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), 609 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF 610 | SUCH DAMAGES. 611 | 612 | 17. Interpretation of Sections 15 and 16. 613 | 614 | If the disclaimer of warranty and limitation of liability provided 615 | above cannot be given local legal effect according to their terms, 616 | reviewing courts shall apply local law that most closely approximates 617 | an absolute waiver of all civil liability in connection with the 618 | Program, unless a warranty or assumption of liability accompanies a 619 | copy of the Program in return for a fee. 620 | 621 | END OF TERMS AND CONDITIONS 622 | 623 | How to Apply These Terms to Your New Programs 624 | 625 | If you develop a new program, and you want it to be of the greatest 626 | possible use to the public, the best way to achieve this is to make it 627 | free software which everyone can redistribute and change under these terms. 628 | 629 | To do so, attach the following notices to the program. It is safest 630 | to attach them to the start of each source file to most effectively 631 | state the exclusion of warranty; and each file should have at least 632 | the "copyright" line and a pointer to where the full notice is found. 633 | 634 | 635 | Copyright (C) 636 | 637 | This program is free software: you can redistribute it and/or modify 638 | it under the terms of the GNU General Public License as published by 639 | the Free Software Foundation, either version 3 of the License, or 640 | (at your option) any later version. 641 | 642 | This program is distributed in the hope that it will be useful, 643 | but WITHOUT ANY WARRANTY; without even the implied warranty of 644 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 645 | GNU General Public License for more details. 646 | 647 | You should have received a copy of the GNU General Public License 648 | along with this program. If not, see . 649 | 650 | Also add information on how to contact you by electronic and paper mail. 651 | 652 | If the program does terminal interaction, make it output a short 653 | notice like this when it starts in an interactive mode: 654 | 655 | Copyright (C) 656 | This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. 657 | This is free software, and you are welcome to redistribute it 658 | under certain conditions; type `show c' for details. 659 | 660 | The hypothetical commands `show w' and `show c' should show the appropriate 661 | parts of the General Public License. Of course, your program's commands 662 | might be different; for a GUI interface, you would use an "about box". 663 | 664 | You should also get your employer (if you work as a programmer) or school, 665 | if any, to sign a "copyright disclaimer" for the program, if necessary. 666 | For more information on this, and how to apply and follow the GNU GPL, see 667 | . 668 | 669 | The GNU General Public License does not permit incorporating your program 670 | into proprietary programs. If your program is a subroutine library, you 671 | may consider it more useful to permit linking proprietary applications with 672 | the library. If this is what you want to do, use the GNU Lesser General 673 | Public License instead of this License. But first, please read 674 | . 675 | -------------------------------------------------------------------------------- /LICENSE_EXT.txt: -------------------------------------------------------------------------------- 1 | NON-COMMERCIAL USE LICENSE CONDITION 2 | Version 1.0 3 | 4 | The Software is provided to you by the Licensor under the License, as defined 5 | below, subject to the following condition. 6 | 7 | The rights granted under the Licence apply to the use of the Software for 8 | academic, research and other similar non-commercial uses. Without limiting other 9 | conditions in the License, the grant of rights under the License will not 10 | include, and the License does not grant to you, the right to use the Software 11 | for commercial uses. 12 | 13 | Software: ProxNest 14 | 15 | License: See LICENSE.txt (GPL-3) 16 | 17 | Licensor: Xiaohao Cai, Jason McEwen, Marcelo Pereyra, Matthew Price, and contributors 18 | -------------------------------------------------------------------------------- /ProxNest/__init__.py: -------------------------------------------------------------------------------- 1 | from . import logs 2 | from . import utils 3 | -------------------------------------------------------------------------------- /ProxNest/logs.py: -------------------------------------------------------------------------------- 1 | import os 2 | import logging.config 3 | import logging 4 | import yaml 5 | import ProxNest 6 | import colorlog 7 | 8 | 9 | def setup_logging(custom_yaml_path=None, default_level=logging.DEBUG): 10 | """initialise and configure logging. 11 | 12 | Should be called at the beginning of code to initialise and configure the 13 | desired logging level. Logging levels can be ints in [0,50] where 10 is 14 | debug logging and 50 is critical logging. 15 | 16 | Args: 17 | 18 | custom_yaml_path (string): Complete pathname of desired yaml logging 19 | configuration. If empty will provide default logging config. 20 | 21 | default_level (int): Logging level at which to configure. 22 | 23 | Raises: 24 | 25 | ValueError: Raised if logging.yaml is not in ./logs/ directory. 26 | 27 | """ 28 | if custom_yaml_path == None: 29 | path = os.path.join( 30 | os.path.dirname(os.path.dirname(os.path.realpath(ProxNest.__file__))) 31 | + "/logs/logging.yaml" 32 | ) 33 | if custom_yaml_path != None: 34 | path = custom_yaml_path 35 | value = os.getenv("LOG_CFG", None) 36 | if value: 37 | path = value 38 | if os.path.exists(path): 39 | with open(path, "rt") as f: 40 | config = yaml.safe_load(f.read()) 41 | if custom_yaml_path == None: 42 | config["handlers"]["info_file_handler"]["filename"] = os.path.join( 43 | os.path.dirname(os.path.dirname(os.path.realpath(ProxNest.__file__))) 44 | + "/logs/info.log" 45 | ) 46 | config["handlers"]["debug_file_handler"]["filename"] = os.path.join( 47 | os.path.dirname(os.path.dirname(os.path.realpath(ProxNest.__file__))) 48 | + "/logs/debug.log" 49 | ) 50 | config["handlers"]["critical_file_handler"]["filename"] = os.path.join( 51 | os.path.dirname(os.path.dirname(os.path.realpath(ProxNest.__file__))) 52 | + "/logs/critical.log" 53 | ) 54 | config["handlers"]["info_file_handler"]["filename"] = os.path.join( 55 | os.path.dirname(os.path.dirname(os.path.realpath(ProxNest.__file__))) 56 | + "/logs/info.log" 57 | ) 58 | logging.config.dictConfig(config) 59 | else: 60 | logging.basicConfig(level=default_level) 61 | raise ValueError("Logging config pathway incorrect.") 62 | critical_log("Using custom config from {}".format(path)) 63 | 64 | 65 | def debug_log(message): 66 | """Log a debug message (e.g. for background logs to assist debugging). 67 | 68 | Args: 69 | 70 | message: Message to log. 71 | 72 | """ 73 | logger = logging.getLogger("ProxNest") 74 | logger.debug(message) 75 | 76 | 77 | def warning_log(message): 78 | """Log a warning (e.g. for internal code warnings such as large dynamic 79 | ranges). 80 | 81 | Args: 82 | 83 | message: Warning to log. 84 | 85 | """ 86 | logger = logging.getLogger("ProxNest") 87 | logger.warning(message) 88 | 89 | 90 | def critical_log(message): 91 | """Log a critical message (e.g. core code failures etc). 92 | 93 | Args: 94 | 95 | message: Message to log. 96 | 97 | """ 98 | logger = logging.getLogger("ProxNest") 99 | logger.critical(message) 100 | 101 | 102 | def info_log(message): 103 | """Log an information message (e.g. evidence value printing, run completion 104 | etc). 105 | 106 | Args: 107 | 108 | message: Message to log. 109 | 110 | """ 111 | logger = logging.getLogger("ProxNest") 112 | logger.info(message) 113 | -------------------------------------------------------------------------------- /ProxNest/operators/__init__.py: -------------------------------------------------------------------------------- 1 | from . import proximal_operators 2 | from . import wavelet_operators 3 | from . import sensing_operators 4 | -------------------------------------------------------------------------------- /ProxNest/operators/proximal_operators.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from . import sensing_operators as sense 3 | 4 | 5 | def soft_thresh(x, T, delta=2): 6 | r"""Compute the element-wise soft-thresholding of :math:`x`. 7 | 8 | Args: 9 | x (np.ndarray): Array to threshold. 10 | 11 | T (float): Soft-thresholding level (regularisation parameter) 12 | 13 | delta (float): Weighting parameter (default = 2). 14 | 15 | Returns: 16 | np.ndarray: Thresholded coefficients of :math:`x`. 17 | """ 18 | return np.sign(x) * np.maximum(np.abs(x) - T * delta / 2, 0) 19 | 20 | 21 | def hard_thresh(x, T): 22 | r"""Compute the element-wise hard-thresholding of :math:`x`. 23 | 24 | Args: 25 | x (np.ndarray): Array to threshold. 26 | 27 | T (float): Hard-thresholding level (regularisation parameter) 28 | 29 | delta (float): Weighting parameter. 30 | 31 | Returns: 32 | np.ndarray: Thresholded coefficients of :math:`x`. 33 | """ 34 | return x * (np.abs(x) > T).astype(float) 35 | 36 | 37 | def l1_projection(x, T, delta, Psi=sense.Identity()): 38 | r"""Compute the l1 proximal operator wrt dictionary :math:`\Psi`. 39 | 40 | Args: 41 | x (np.ndarray): Array to threshold. 42 | 43 | T (float): Soft-thresholding level (regularisation parameter) 44 | 45 | delta (float): Weighting parameter. 46 | 47 | Psi (LinearOperator): Prior dictionary (default = Identity) 48 | 49 | Returns: 50 | np.ndarray: Thresholded coefficients of :math:`x`. 51 | """ 52 | u = Psi.dir_op(x) 53 | return x + Psi.adj_op(soft_thresh(u, T, delta) - u) 54 | 55 | 56 | def l2_projection(x, T, delta, Psi=sense.Identity()): 57 | r"""Compute the l2 gradient step wrt dictionary :math:`\Psi`. 58 | 59 | Args: 60 | x (np.ndarray): Array to threshold. 61 | 62 | T (float): Soft-thresholding level (regularisation parameter) 63 | 64 | delta (float): Weighting parameter. 65 | 66 | Psi (LinearOperator): Prior dictionary (default = Identity) 67 | 68 | Returns: 69 | np.ndarray: Thresholded coefficients of :math:`x`. 70 | """ 71 | return x - 2 * T * Psi.adj_op(Psi.dir_op(x)) * 2 * delta 72 | -------------------------------------------------------------------------------- /ProxNest/operators/sensing_operators.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from numpy.fft import fft2, ifft2, fftshift, ifftshift 3 | 4 | 5 | class Identity: 6 | """Identity sensing operator 7 | 8 | Notes: 9 | Implemented originally in optimus-primal. 10 | """ 11 | 12 | def dir_op(self, x): 13 | """Computes the forward operator of the identity class. 14 | 15 | Args: 16 | x (np.ndarray): Vector to apply identity to. 17 | 18 | Returns: 19 | np.ndarray: array of coefficients 20 | """ 21 | return x 22 | 23 | def adj_op(self, x): 24 | """Computes the forward adjoint operator of the identity class. 25 | 26 | Args: 27 | x (np.ndarray): Vector to apply identity to. 28 | 29 | Returns: 30 | np.ndarray: array of coefficients 31 | """ 32 | return x 33 | 34 | 35 | class MaskedFourier: 36 | """ 37 | Masked fourier sensing operator i.e. MRI/Radio imaging. 38 | """ 39 | 40 | def __init__(self, dim, ratio): 41 | """Initialises the masked fourier sensing operator. 42 | 43 | Args: 44 | dim (int): Dimension of square pixel-space image. 45 | 46 | ratio (float): Fraction of measurements observed. 47 | """ 48 | mask = np.full(dim**2, False) 49 | mask[: int(ratio * dim**2)] = True 50 | np.random.shuffle(mask) 51 | self.mask = mask.reshape((dim, dim)) 52 | self.shape = (dim, dim) 53 | 54 | def dir_op(self, x): 55 | """Computes the forward operator of the class. 56 | 57 | Args: 58 | x (np.ndarray): Vector to apply identity to. 59 | 60 | Returns: 61 | np.ndarray: array of coefficients 62 | """ 63 | out = np.fft.fft2(x) 64 | return self.__mask(out) 65 | 66 | def adj_op(self, x): 67 | """Computes the forward adjoint operator of the class. 68 | 69 | Args: 70 | x (np.ndarray): Vector to apply identity to. 71 | 72 | Returns: 73 | np.ndarray: array of coefficients 74 | """ 75 | out = self.__mask_adjoint(x) 76 | return np.fft.ifft2(out) 77 | 78 | def __mask(self, x): 79 | """Applies observational mask to image. 80 | 81 | Args: 82 | x (np.ndarray): Vector to apply mask to. 83 | 84 | Returns: 85 | np.ndarray: slice of masked coefficients 86 | """ 87 | return x[self.mask] 88 | 89 | def __mask_adjoint(self, x): 90 | """Applies adjoint of observational mask to image. 91 | 92 | Args: 93 | x (np.ndarray): Vector to apply adjoint mask to. 94 | 95 | Returns: 96 | np.ndarray: Projection of masked coefficients onto image. 97 | """ 98 | xx = np.zeros(self.shape, dtype=complex) 99 | xx[self.mask] = x 100 | return xx 101 | -------------------------------------------------------------------------------- /ProxNest/operators/wavelet_operators.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pywt 3 | 4 | 5 | class db_wavelets: 6 | """Constructs a linear operator for abstract Daubechies Wavelets. 7 | 8 | Notes: 9 | Stripped back version of optimus-primal linear operator. 10 | """ 11 | 12 | def __init__(self, wav, levels, shape, axes=None): 13 | """Initialises Daubechies Wavelet linear operator class. 14 | 15 | Args: 16 | wav (string): Wavelet type (see https://tinyurl.com/5n7wzpmb). 17 | 18 | levels (list[int]): Wavelet levels (scales) to consider. 19 | 20 | shape (list[int]): Dimensionality of input to wavelet transform. 21 | 22 | axes (int): Which axes to perform wavelet transform (default = all axes). 23 | 24 | Raises: 25 | ValueError: Raised when levels are not positive definite. 26 | """ 27 | 28 | if np.any(levels <= 0): 29 | raise ValueError("Wavelet levels must be positive definite") 30 | if axes is None: 31 | axes = range(len(shape)) 32 | self.axes = axes 33 | self.wav = wav 34 | self.levels = np.array(levels, dtype=int) 35 | self.shape = shape 36 | self.coeff_slices = None 37 | self.coeff_shapes = None 38 | 39 | self.adj_op(self.dir_op(np.ones(shape))) 40 | 41 | def dir_op(self, x): 42 | r"""Evaluates the forward abstract wavelet transform of :math:`x`. 43 | 44 | Args: 45 | x (np.ndarray): Array to wavelet transform. 46 | 47 | Raises: 48 | ValueError: Raised when the shape of x is not even in every dimension. 49 | 50 | Returns: 51 | np.ndarray: Flattened array of wavelet coefficients. 52 | """ 53 | if self.wav == "dirac": 54 | return np.ravel(x) 55 | 56 | if self.shape[0] % 2 == 1: 57 | raise ValueError("Signal shape should be even dimensions.") 58 | 59 | if len(self.shape) > 1: 60 | if self.shape[1] % 2 == 1: 61 | raise ValueError("Signal shape should be even dimensions.") 62 | 63 | coeffs = pywt.wavedecn( 64 | x, wavelet=self.wav, level=self.levels, mode="periodic", axes=self.axes 65 | ) 66 | arr, self.coeff_slices, self.coeff_shapes = pywt.ravel_coeffs( 67 | coeffs, axes=self.axes 68 | ) 69 | return arr 70 | 71 | def adj_op(self, x): 72 | r"""Evaluates the forward adjoint abstract wavelet transform of :math:`x`. 73 | 74 | Args: 75 | x (np.ndarray): Array to adjoint wavelet transform. 76 | 77 | Returns: 78 | np.ndarray: Array of pixel-space coefficients. 79 | """ 80 | if self.wav == "dirac": 81 | return np.reshape(x, self.shape) 82 | 83 | coeffs_from_arr = pywt.unravel_coeffs( 84 | x, self.coeff_slices, self.coeff_shapes, output_format="wavedecn" 85 | ) 86 | return pywt.waverecn( 87 | coeffs_from_arr, wavelet=self.wav, mode="periodic", axes=self.axes 88 | ) 89 | -------------------------------------------------------------------------------- /ProxNest/optimisations/__init__.py: -------------------------------------------------------------------------------- 1 | from . import l2_ball_proj 2 | from . import l1_norm_prox 3 | from . import tv_norm_prox 4 | -------------------------------------------------------------------------------- /ProxNest/optimisations/l1_norm_prox.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import ProxNest.operators as ops 3 | 4 | 5 | def l1_norm_prox(x, lamb, params): 6 | r"""Proximal operator associated with L1 norm. 7 | 8 | Compute the L1 proximal operator, i.e. solve 9 | 10 | .. math:: 11 | 12 | z^* = \min_{z} \frac{1}{2}||x - z||_2^2 + \lambda * ||\Psi^{\dagger} z||_1, 13 | 14 | where :math:`x` is the input vector and the solution :math:`z^*` is returned as sol. 15 | 16 | Args: 17 | x (np.ndarray): A sample position :math:`x` in the posterior space. 18 | 19 | lamb (float): Regularisation parameter. 20 | 21 | params (dict): Dictionary of parameters defining the optimisation. 22 | 23 | Returns: 24 | np.ndarray: Optimal solution :math:`z^*` of proximal operator. 25 | 26 | Notes: 27 | [1] M.J. Fadili and J-L. Starck, "Monotone operator splitting for optimization problems in sparse recovery" , IEEE ICIP, Cairo, Egypt, 2009. 28 | 29 | [2] Amir Beck and Marc Teboulle, "A Fast Iterative Shrinkage-Thresholding Algorithm for Linear Inverse Problems", SIAM Journal on Imaging Sciences 2 (2009), no. 1, 183--202. 30 | """ 31 | 32 | # TIGHT FRAMES 33 | if (params["tight"]) and (params["pos"] or params["reality"]): 34 | 35 | temp = params["Psi"].dir_op(x) 36 | sol = x + 1 / params["nu"] * params["Psi"].adj_op( 37 | ops.proximal_operators.soft_thresh(temp, lamb * params["nu"] * params["l1weights"]) - temp 38 | ) 39 | dummy = params["Psi"].dir_op(sol) 40 | norm_l1 = np.sum(params["l1weights"] * np.abs(dummy)) 41 | crit_L1 = "REL_OBJ" 42 | iter_L1 = 1 43 | 44 | # NON TIGHT FRAME CASE OR CONSTRAINT INVOLVED 45 | else: 46 | 47 | # Initializations 48 | sol = x 49 | if params["pos"] or params["reality"]: 50 | sol = np.real(sol) 51 | 52 | dummy = params["Psi"].dir_op(sol) 53 | u_l1 = np.zeros(len(dummy)) 54 | prev_obj = 0 55 | iter_L1 = 0 56 | 57 | # Soft-thresholding 58 | if params["verbose"] > 1: 59 | print(" Proximal L1 operator:") 60 | 61 | while 1: 62 | 63 | # L1 norm of the estimate 64 | norm_l1 = np.sum(params["l1weights"] * np.abs(dummy)) 65 | obj = 0.5 * np.linalg.norm(x - sol, 2) ** 2 + lamb * norm_l1 66 | rel_obj = np.abs(obj - prev_obj) / obj 67 | 68 | # Log 69 | if params["verbose"] > 1: 70 | print( 71 | " Iter {}, prox_fval = {}, rel_fval = {}".format( 72 | iter_L1, obj, rel_obj 73 | ) 74 | ) 75 | 76 | # Stopping criterion 77 | if rel_obj < params["rel_obj"]: 78 | crit_L1 = "REL_OB" 79 | break 80 | elif iter_L1 >= params["max_iter"]: 81 | crit_L1 = "MAX_IT" 82 | break 83 | 84 | # Soft-thresholding 85 | res = u_l1 * params["nu"] + dummy 86 | dummy = ops.proximal_operators.soft_thresh(res, lamb * params["nu"] * params["l1weights"]) 87 | u_l1 = 1 / params["nu"] * (res - dummy) 88 | sol = x - params["Psi"].adj_op(u_l1) 89 | 90 | if params["pos"]: 91 | sol = np.real(sol) 92 | sol[sol < 0] = 0 93 | 94 | if params["reality"]: 95 | sol = np.real(sol) 96 | 97 | # Update 98 | prev_obj = obj 99 | iter_L1 = iter_L1 + 1 100 | dummy = params["Psi"].dir_op(sol) 101 | 102 | # Log after the projection onto the L2-ball 103 | if params["verbose"] >= 1: 104 | print( 105 | " prox_L1: prox_fval = {}, {}, iter = {}".format(norm_l1, crit_L1, iter_L1) 106 | ) 107 | 108 | return sol 109 | -------------------------------------------------------------------------------- /ProxNest/optimisations/l2_ball_proj.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def sopt_fast_proj_B2(x, tau, params): 5 | r"""Fast projection algorithm onto the :math:`\ell_2`-ball. 6 | 7 | Compute the projection onto the :math:`\ell_2` ball, i.e. solve 8 | 9 | .. math:: 10 | 11 | z^* = \min_{z} ||x - z||_2^2 s.t. ||y - \Phi z||_2 < \tau 12 | 13 | where :math:`x` is the input vector and the solution :math:`z^*` is returned as sol. 14 | 15 | Args: 16 | x (np.ndarray): A sample position :math:`x` in the posterior space. 17 | 18 | tau (float): Radius of likelihood :math:`\ell_2`-ball. 19 | 20 | params (dict): Dictionary of parameters defining the optimisation. 21 | 22 | Returns: 23 | np.ndarray: Optimal solution :math:`z^*` of proximal projection. 24 | 25 | Notes: 26 | [1] M.J. Fadili and J-L. Starck, "Monotone operator splitting for optimization problems in sparse recovery" , IEEE ICIP, Cairo, Egypt, 2009. 27 | 28 | [2] Amir Beck and Marc Teboulle, "A Fast Iterative Shrinkage-Thresholding Algorithm for Linear Inverse Problems", SIAM Journal on Imaging Sciences 2 (2009), no. 1, 183--202. 29 | """ 30 | # Lambda function for scaling, used for tight frames only 31 | sc = lambda z: z*np.minimum(tau / np.linalg.norm(z), 1) 32 | 33 | # TIGHT FRAMES 34 | if (params["tight"]) and (params["pos"] or params["reality"]): 35 | 36 | temp = params["Phi"].dir_op(x) - params["y"] 37 | sol = x + 1 / params["nu"] * params["Phi"].adj_op(sc(temp) - temp) 38 | crit_B2 = "TOL_EPS" 39 | iter = 0 40 | u = 0 41 | 42 | # NON-TIGHT FRAMES 43 | else: 44 | 45 | # Initializations 46 | sol = x 47 | # u = params['u'] 48 | u = params["Phi"].dir_op(sol) 49 | v = u 50 | iter = 1 51 | told = 1 52 | 53 | # Tolerance onto the L2 ball 54 | epsilon_low = tau / (1 + params["tol"]) 55 | epsilon_up = tau / (1 - params["tol"]) 56 | 57 | # Check if we are in the L2 ball 58 | dummy = params["Phi"].dir_op(sol) 59 | norm_res = np.linalg.norm(params["y"] - dummy, 2) 60 | if norm_res <= epsilon_up: 61 | crit_B2 = "TOL_EPS" 62 | true = 0 63 | 64 | # Projection onto the L2-ball 65 | if params["verbose"] > 1: 66 | print(" Proj. B2:") 67 | 68 | while 1: 69 | 70 | # Residual 71 | res = params["Phi"].dir_op(sol) - params["y"] 72 | norm_res = np.linalg.norm(res) 73 | 74 | # Scaling for the projection 75 | res = u * params["nu"] + res 76 | norm_proj = np.linalg.norm(res) 77 | 78 | # Log 79 | if params["verbose"] > 1: 80 | print( 81 | " Iter {}, epsilon = {}, ||y - Phi(x)||_2 = {}".format( 82 | iter, tau, norm_res 83 | ) 84 | ) 85 | 86 | # Stopping criterion 87 | if (norm_res >= epsilon_low) and (norm_res <= epsilon_up): 88 | crit_B2 = "TOL_EPS" 89 | break 90 | elif iter >= params["max_iter"]: 91 | crit_B2 = "MAX_IT" 92 | break 93 | 94 | # Projection onto the L2 ball 95 | t = (1 + np.sqrt(1 + 4 * told**2)) / 2 96 | ratio = np.minimum(1, tau / norm_proj) 97 | u = v 98 | v = 1 / params["nu"] * (res - res * ratio) 99 | u = v + (told - 1) / t * (v - u) 100 | 101 | # Current estimate 102 | sol = x - params["Phi"].adj_op(u) 103 | 104 | # Projection onto the non-negative orthant (positivity constraint) 105 | if params["pos"]: 106 | sol = np.real(sol) 107 | sol[sol < 0] = 0 108 | 109 | # Projection onto the real orthant (reality constraint) 110 | if params["reality"]: 111 | sol = np.real(sol) 112 | 113 | # Increment iteration labels 114 | told = t 115 | iter = iter + 1 116 | 117 | # Log after the projection onto the L2-ball 118 | if params["verbose"] >= 1: 119 | temp = params["Phi"].dir_op(sol) 120 | print( 121 | " Proj. B2: epsilon = {}, ||y - Phi(x)||_2 = {}, {}, iter = {}".format( 122 | tau, np.linalg.norm(params["y"] - temp), crit_B2, iter 123 | ) 124 | ) 125 | 126 | return sol 127 | -------------------------------------------------------------------------------- /ProxNest/optimisations/tv_norm_prox.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def augmented_TV_norm_prox(x, lamb, params): 5 | r"""Compute the augmented total variation proximal operator 6 | 7 | Compute the TV proximal operator when an additional linear operator A is 8 | incorporated in the TV norm, i.e. solve 9 | 10 | .. math:: 11 | 12 | x^* = \min_{x} ||y - x||_2^2 + \lambda * ||A x||_{TV} 13 | 14 | where :math:`y` is the input vector and the solution :math:`x^*` is returned as sol. 15 | 16 | Args: 17 | x (np.ndarray): A sample position :math:`x` in the posterior space. 18 | 19 | lamb (float): Regularisation parameter. 20 | 21 | params (dict): Dictionary of parameters defining the optimisation. 22 | 23 | Returns: 24 | np.ndarray: Optimal solution :math:`x^*` of proximal operator. 25 | 26 | Notes: 27 | [1] A. Beck and M. Teboulle, "Fast gradient-based algorithms for constrained Total Variation Image Denoising and Deblurring Problems", IEEE Transactions on Image Processing, VOL. 18, NO. 11, 2419-2434, November 2009. 28 | """ 29 | return 0 30 | -------------------------------------------------------------------------------- /ProxNest/sampling/__init__.py: -------------------------------------------------------------------------------- 1 | from . import proximal_nested 2 | from . import resampling 3 | -------------------------------------------------------------------------------- /ProxNest/sampling/proximal_nested.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from tqdm import tqdm 3 | import ProxNest.logs as lg 4 | from . import resampling 5 | 6 | 7 | def ProxNestedSampling(X0, LikeliL, proxH, proxB, params, options): 8 | r"""Executes the proximal nested sampling algorithm 9 | 10 | Args: 11 | X0 (np.ndarray): initialisation of the sample chain. 12 | 13 | LikeliL (lambda): function to compute the likelihood value of a sample. 14 | 15 | proxH (lambda): proximity operator of the prior. 16 | 17 | proxB (lambda): proximity operator of the constraint :math:`\ell_2`-ball. 18 | 19 | params (dict): parameters for prior resampling subject to likelihood isocontour. 20 | 21 | options (dict): parameters about number of samples, thinning factor, burnning numbers. 22 | 23 | Returns: 24 | tuple: (Evidence, sample trace). 25 | 26 | Notes: 27 | MATLAB version: Xiaohao Cai (21/02/2019) 28 | 29 | Python version: Matthew Price (9/05/2022) 30 | """ 31 | sigma = options["sigma"] 32 | Phi = params["Phi"] 33 | y = params["y"] 34 | 35 | lg.info_log("Constructing lambda functions for resampling projections...") 36 | 37 | # Simulation setup 38 | # Use backward-forward splitting to approximate proxPi using proxH and gradF 39 | driftIniN = lambda X, delta, lamb: np.real( 40 | (1 - delta / (2 * lamb)) * X + delta / (2 * lamb) * proxH(X, lamb) 41 | ) 42 | drift = lambda X, delta, lamb, tau: np.real( 43 | (1 - delta / lamb) * X 44 | + delta 45 | / (2 * lamb) 46 | * (proxH(X, lamb) + proxB(X, np.sqrt(tau * 2 * sigma**2))) 47 | ) 48 | 49 | # Initialize variables 50 | delta = options[ 51 | "delta" 52 | ] # delta controls the proposal variance, the step-length and Moreau approximation 53 | lamb = 5 * delta # lamb \in [4*delta, 10*delta] 54 | Xcur = X0 # set initial state as current state 55 | tau_0 = -LikeliL(Xcur) * 1e-1 56 | 57 | lg.info_log("Allocating memory and populating initial live-samples...") 58 | 59 | # Initialise arrays to store samples 60 | # Indexing: sample, likelihood, weights 61 | NumLiveSetSamples = options["samplesL"] 62 | NumDiscardSamples = options["samplesD"] 63 | 64 | Xtrace = {} 65 | 66 | Xtrace["LiveSet"] = np.zeros((NumLiveSetSamples, Xcur.shape[0], Xcur.shape[1])) 67 | Xtrace["LiveSetL"] = np.zeros(NumLiveSetSamples) 68 | 69 | Xtrace["Discard"] = np.zeros((NumDiscardSamples, Xcur.shape[0], Xcur.shape[1])) 70 | Xtrace["DiscardL"] = np.zeros(NumDiscardSamples) 71 | Xtrace["DiscardW"] = np.zeros(NumDiscardSamples) 72 | Xtrace["DiscardPostProb"] = np.zeros(NumDiscardSamples) 73 | 74 | # Generate initialisation 75 | j = 0 76 | for ii in tqdm(range(200), desc="ProxNest || Initialise"): 77 | # P-ULA -- MARKOV CHAIN generating initialisation 78 | Xcur = drift(Xcur, delta, lamb, tau_0) + np.sqrt(delta) * np.random.randn( 79 | Xcur.shape[0], Xcur.shape[1] 80 | ) 81 | 82 | # Obtain samples from priors 83 | for ii in tqdm( 84 | range(2, NumLiveSetSamples * options["thinning"] + options["burn"]), 85 | desc="ProxNest || Populate", 86 | ): 87 | 88 | # P-ULA -- MARKOV CHAIN generating live samples 89 | Xcur = driftIniN(Xcur, delta, lamb) + np.sqrt(delta) * np.random.randn( 90 | Xcur.shape[0], Xcur.shape[1] 91 | ) 92 | 93 | # Save sample (with thinning) 94 | if (ii > options["burn"]) and not ( 95 | (ii - options["burn"]) % options["thinning"] 96 | ): 97 | # Record the current sample in the live set and its likelihood 98 | Xtrace["LiveSet"][j] = Xcur 99 | Xtrace["LiveSetL"][j] = LikeliL(Xcur) 100 | 101 | j += 1 102 | 103 | lg.info_log("Executing primary nested resampling iterations...") 104 | 105 | # Reorder samples TODO: Make this more efficient! 106 | Xtrace["LiveSet"], Xtrace["LiveSetL"] = resampling.reorder_samples( 107 | Xtrace["LiveSet"], Xtrace["LiveSetL"] 108 | ) 109 | 110 | # Update samples using the proximal nested sampling technique 111 | for k in tqdm(range(NumDiscardSamples), desc="ProxNest || Sample"): 112 | # Compute the smallest threshould wrt live samples' likelihood 113 | tau = -Xtrace["LiveSetL"][-1] # - 1e-2 114 | 115 | # Randomly select a sample in the live set as a starting point 116 | indNewSample = ( 117 | np.floor(np.random.rand() * (NumLiveSetSamples - 1)).astype(int) - 1 118 | ) 119 | Xcur = Xtrace["LiveSet"][indNewSample] 120 | 121 | # Generate a new sample with likelihood larger than given threshould 122 | Xcur = drift(Xcur, delta, lamb, tau) + np.sqrt(delta) * np.random.randn( 123 | Xcur.shape[0], Xcur.shape[1] 124 | ) 125 | 126 | # check if the new sample is inside l2-ball (metropolis-hasting); if 127 | # not, force the new sample into L2-ball 128 | if np.sum(np.sum(np.abs(y - Phi.dir_op(Xcur)) ** 2)) > tau * 2 * sigma**2: 129 | Xcur = proxB(Xcur, np.sqrt(tau * 2 * sigma**2)) 130 | 131 | # Record the sample discarded and its likelihood 132 | Xtrace["Discard"][k] = Xtrace["LiveSet"][-1] 133 | Xtrace["DiscardL"][k] = Xtrace["LiveSetL"][-1] 134 | 135 | # Add the new sample to the live set and its likelihood 136 | Xtrace["LiveSet"][-1] = Xcur 137 | Xtrace["LiveSetL"][-1] = LikeliL(Xcur) 138 | 139 | # Reorder the live samples TODO: Make this more efficient! 140 | Xtrace["LiveSet"], Xtrace["LiveSetL"] = resampling.reorder_samples( 141 | Xtrace["LiveSet"], Xtrace["LiveSetL"] 142 | ) 143 | 144 | lg.info_log( 145 | "Estimating Bayesian evidence (with variance), posterior probabilies, and posterior mean..." 146 | ) 147 | 148 | # Bayesian evidence calculation 149 | BayEvi = np.zeros(2) 150 | Xtrace["DiscardW"][0] = 1 / NumLiveSetSamples 151 | 152 | # Compute the sample weight 153 | for k in tqdm(range(NumDiscardSamples), desc="ProxNest || Compute Weights"): 154 | Xtrace["DiscardW"][k] = np.exp(-(k + 1) / NumLiveSetSamples) 155 | 156 | # Compute the volumn length for each sample using trapezium rule 157 | discardLen = np.zeros(NumDiscardSamples) 158 | discardLen[0] = (1 - np.exp(-2 / NumLiveSetSamples)) / 2 159 | 160 | for i in tqdm( 161 | range(1, NumDiscardSamples - 1), desc="ProxNest || Trapezium Integrate" 162 | ): 163 | discardLen[i] = (Xtrace["DiscardW"][i - 1] - Xtrace["DiscardW"][i + 1]) / 2 164 | 165 | discardLen[-1] = ( 166 | np.exp(-(NumDiscardSamples - 1) / NumLiveSetSamples) 167 | - np.exp(-(NumDiscardSamples + 1) / NumLiveSetSamples) 168 | ) / 2 169 | # volume length of the last discarded sample 170 | 171 | liveSampleLen = np.exp(-(NumDiscardSamples) / NumLiveSetSamples) 172 | # volume length of the living sample 173 | 174 | # Apply the disgarded sample for Bayesian evidence value computation 175 | vecDiscardLLen = Xtrace["DiscardL"] + np.log(discardLen) 176 | 177 | # Apply the final live set samples for Bayesian evidence value computation 178 | vecLiveSetLLen = Xtrace["LiveSetL"] + np.log(liveSampleLen / NumLiveSetSamples) 179 | 180 | # # ------- Way 1: using discarded and living samples -------- 181 | # # Get the maximum value of the exponents for all the samples 182 | # maxAllSampleLLen = max(max(vecDiscardLLen),max(vecLiveSetLLen)) 183 | 184 | # # Compute the Bayesian evidence value using discarded and living samples 185 | # BayEvi[0] = maxAllSampleLLen + np.log(np.sum(np.exp(vecDiscardLLen-maxAllSampleLLen)) + np.sum(np.exp(vecLiveSetLLen-maxAllSampleLLen))) 186 | 187 | # ------- Way 2: using discarded samples -------- 188 | # Get the maximum value of the exponents for the discarded samples 189 | maxDiscardLLen = np.max(vecDiscardLLen) 190 | 191 | # Compute the Bayesian evidence value using discarded and living samples 192 | BayEvi[0] = maxDiscardLLen + np.log(np.sum(np.exp(vecDiscardLLen - maxDiscardLLen))) 193 | 194 | # Extimate the error of the computed Bayesian evidence 195 | entropyH = 0 196 | 197 | for k in tqdm(range(NumDiscardSamples), desc="ProxNest || Estimate Variance"): 198 | temp1 = np.exp(Xtrace["DiscardL"][k] + np.log(discardLen[k]) - BayEvi[0]) 199 | entropyH = entropyH + temp1 * (Xtrace["DiscardL"][k] - BayEvi[0]) 200 | 201 | # Evaluate the evidence variance 202 | BayEvi[1] = np.sqrt(np.abs(entropyH) / NumLiveSetSamples) 203 | 204 | # Compute the posterior probability for each discarded sample 205 | for k in tqdm(range(NumDiscardSamples), desc="ProxNest || Compute Posterior Mean"): 206 | Xtrace["DiscardPostProb"][k] = np.exp( 207 | Xtrace["DiscardL"][k] + np.log(discardLen[k]) - BayEvi[0] 208 | ) 209 | 210 | # Compute the posterior mean of the discarded samples -- optimal solution 211 | Xtrace["DiscardPostMean"] = np.zeros((Xcur.shape[0], Xcur.shape[1])) 212 | for k in range(NumDiscardSamples): 213 | Xtrace["DiscardPostMean"] += Xtrace["DiscardPostProb"][k] * Xtrace["Discard"][k] 214 | 215 | return BayEvi, Xtrace 216 | -------------------------------------------------------------------------------- /ProxNest/sampling/resampling.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def reorder_samples(samples, likelihood_values): 5 | r"""This program is to find the sample with the smallest likelihood and move it to the end of the list 6 | 7 | Args: 8 | samples (np.ndarray): given sample list 9 | likeliVal (np.ndarray): corresponding likelihood 10 | 11 | Returns: 12 | tuple: Reordered version of (samples, likelihood_values) 13 | 14 | Notes: 15 | MATLAB version: Xiaohao Cai (30/01/2019) 16 | 17 | Python version: Matthew Price (10/05/2022) 18 | """ 19 | 20 | # find the smallest likelihood and corresponding index 21 | minSamIdx = np.argmin(likelihood_values) 22 | 23 | # swap the sample wit the smallest likelihood to the end of the list 24 | tempSample = samples[minSamIdx] 25 | samples[minSamIdx] = samples[-1] 26 | samples[-1] = tempSample 27 | 28 | # swap the likelihood accordingly 29 | tempL = likelihood_values[minSamIdx] 30 | likelihood_values[minSamIdx] = likelihood_values[-1] 31 | likelihood_values[-1] = tempL 32 | 33 | return samples, likelihood_values 34 | -------------------------------------------------------------------------------- /ProxNest/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/astro-informatics/proxnest/bd12e29434b36815d5bf52a39b27391fc193a6ce/ProxNest/tests/__init__.py -------------------------------------------------------------------------------- /ProxNest/tests/test_logs.py: -------------------------------------------------------------------------------- 1 | import ProxNest.logs as lg 2 | import pytest 3 | 4 | 5 | def test_incorrect_log_yaml_path(): 6 | 7 | dir_name = "random/incorrect/filepath/" 8 | 9 | # Check cannot add samples with different ndim. 10 | with pytest.raises(ValueError): 11 | lg.setup_logging(custom_yaml_path=dir_name) 12 | 13 | 14 | def test_general_logging(): 15 | 16 | lg.setup_logging() 17 | lg.critical_log("A random critical message") 18 | lg.debug_log("A random debug message") 19 | lg.warning_log("A random warning message") 20 | lg.info_log("A random warning message") 21 | -------------------------------------------------------------------------------- /ProxNest/tests/test_operators.py: -------------------------------------------------------------------------------- 1 | from ProxNest.operators import proximal_operators as prox_ops 2 | from ProxNest.operators import sensing_operators as sense_ops 3 | from ProxNest.operators import wavelet_operators as wav_ops 4 | import numpy as np 5 | import pytest 6 | 7 | 8 | def test_identity_linear_operator(): 9 | x = np.random.randn(10, 10) + 1j * np.random.randn(10, 10) 10 | phi = sense_ops.Identity() 11 | np.testing.assert_allclose(phi.dir_op(x), x, 1e-14) 12 | np.testing.assert_allclose(phi.adj_op(x), x, 1e-14) 13 | 14 | 15 | def test_maskedfourier_linear_operator(): 16 | ratio = 0.5 17 | rdim = 10 18 | fdim = int(rdim**2 * ratio) 19 | 20 | x = np.random.randn(rdim, rdim) + 1j * np.random.randn(rdim, rdim) 21 | y = np.random.randn(fdim) + 1j * np.random.randn(fdim) 22 | phi = sense_ops.MaskedFourier(rdim, ratio) 23 | 24 | xx = phi.dir_op(x) 25 | yy = phi.adj_op(y) 26 | 27 | a = abs(np.vdot(x, yy)) 28 | b = abs(np.vdot(y, xx)) 29 | 30 | assert a * rdim**2 == pytest.approx(b) 31 | 32 | 33 | def test_wavelet_linear_operator(): 34 | dim = 64 35 | x = np.random.randn(dim, dim) + 1j * np.random.randn(dim, dim) 36 | psi = wav_ops.db_wavelets("db6", levels=2, shape=(dim, dim)) 37 | xx = psi.dir_op(x) 38 | np.testing.assert_allclose(x, psi.adj_op(xx), 1e-8) 39 | 40 | 41 | def test_soft_thresholding(): 42 | x = np.random.randn(10, 10) + 1j * np.random.randn(10, 10) 43 | x_thresh = np.zeros_like(x) 44 | threshold = 0.1 45 | 46 | for i in range(10): 47 | for j in range(10): 48 | xx = x[i, j] 49 | xabs = np.abs(xx) 50 | if xabs - threshold > 0: 51 | x_thresh[i, j] = np.sign(xx) * (xabs - threshold) 52 | 53 | np.testing.assert_allclose(x_thresh, prox_ops.soft_thresh(x, threshold), 1e-14) 54 | 55 | 56 | def test_hard_thresholding(): 57 | x = np.random.randn(10, 10) + 1j * np.random.randn(10, 10) 58 | x_thresh = np.zeros_like(x) 59 | threshold = 0.1 60 | 61 | for i in range(10): 62 | for j in range(10): 63 | xx = x[i, j] 64 | xabs = np.abs(xx) 65 | if xabs - threshold > 0: 66 | x_thresh[i, j] = xx 67 | 68 | np.testing.assert_allclose(x_thresh, prox_ops.hard_thresh(x, threshold), 1e-14) 69 | 70 | 71 | def test_l1_projection(): 72 | x = np.random.randn(10, 10) + 1j * np.random.randn(10, 10) 73 | x_backproj = np.zeros_like(x) 74 | threshold = 0.1 75 | 76 | np.testing.assert_allclose( 77 | prox_ops.soft_thresh(x, threshold), 78 | prox_ops.l1_projection(x, threshold, 2), 79 | 1e-14, 80 | ) 81 | 82 | 83 | def test_l2_projection(): 84 | x = np.random.randn(10, 10) + 1j * np.random.randn(10, 10) 85 | 86 | np.testing.assert_allclose( 87 | np.zeros_like(x), prox_ops.l2_projection(x, 1, 0.25), 1e-14 88 | ) 89 | -------------------------------------------------------------------------------- /ProxNest/tests/test_optimisations.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import numpy as np 3 | import ProxNest.utils as utils 4 | import ProxNest.optimisations as opts 5 | from ProxNest.operators import sensing_operators as sense_ops 6 | 7 | 8 | @pytest.mark.parametrize("tight", [True, False]) 9 | @pytest.mark.parametrize("pos", [True, False]) 10 | def test_l2_ball_projection(tight: bool, pos: bool): 11 | # Define the noise level. 12 | sigma = 1 13 | 14 | # Create random truth and observation set. 15 | x = np.ones((64, 64)) 16 | x0 = np.random.randn(64, 64) 17 | data = x + sigma * np.random.randn(64, 64) 18 | 19 | # Define likelihood lambda function and evaluate L2-ball radius tau. 20 | LogLikeliL = lambda sol: -np.linalg.norm(data - sol, "fro") ** 2 / (2 * sigma**2) 21 | tau = -LogLikeliL(x0) * 1e-1 22 | 23 | # Create a parameters structure. 24 | id = sense_ops.Identity() 25 | params = utils.create_parameters_dict(y=data, Phi=id, Psi=id, tight=tight, pos=pos, reality=True) 26 | 27 | # Evaluate the projection algorithm 28 | z = opts.l2_ball_proj.sopt_fast_proj_B2(x0, tau, params) 29 | assert np.linalg.norm(data - z) < tau 30 | 31 | 32 | @pytest.mark.parametrize("tight", [True, False]) 33 | @pytest.mark.parametrize("pos", [True, False]) 34 | def test_l1_norm_projection_extremes(tight: bool, pos: bool): 35 | # Create random signal 36 | x = np.random.randn(64, 64) 37 | 38 | # Create a parameters structure. 39 | id = sense_ops.Identity() 40 | params = utils.create_parameters_dict(tight=tight, Phi=id, Psi=id, pos=pos, reality=True) 41 | 42 | if tight: 43 | # Evaluate the l1-norm sub-iterations lambda=0 44 | z = opts.l1_norm_prox.l1_norm_prox(x, 0, params) 45 | np.testing.assert_allclose(z, x, 1e-14) 46 | 47 | # Evaluate the l1-norm sub-iterations lambda >> 0 48 | z = opts.l1_norm_prox.l1_norm_prox(x, 1e10, params) 49 | np.testing.assert_allclose(z, np.zeros_like(x), 1e-14) 50 | 51 | 52 | @pytest.mark.parametrize("tight", [True, False]) 53 | @pytest.mark.parametrize("pos", [True, False]) 54 | def test_l1_norm_projection_specific(tight: bool, pos: bool): 55 | # Create random signal 56 | lamb = 0.5 57 | x = np.ones((64, 64)) 58 | xpred = x / 2 59 | obj_pred = (3 / 8) * len(x.flatten("C")) 60 | 61 | # Create a parameters structure. 62 | id = sense_ops.Identity() 63 | params = utils.create_parameters_dict(tight=tight, Phi=id, Psi=id, pos=pos, reality=True) 64 | 65 | # Evaluate the l1-norm sub-iterations 66 | z = opts.l1_norm_prox.l1_norm_prox(x, lamb, params) 67 | np.testing.assert_allclose(z, xpred, 1e-10) 68 | 69 | # Minimised solution 70 | obj_z = 0.5 * np.linalg.norm(x - z) ** 2 + lamb * np.linalg.norm(np.ravel(z), ord=1) 71 | assert obj_z == pytest.approx(obj_pred) 72 | -------------------------------------------------------------------------------- /ProxNest/tests/test_sampling.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pytest 3 | 4 | import ProxNest.utils as utils 5 | import ProxNest.sampling as sampling 6 | import ProxNest.optimisations as optimisations 7 | from ProxNest.operators import sensing_operators as sense_ops 8 | 9 | def test_against_analytic_gaussian(): 10 | """ Tests ProxNest against analytic Gaussian """ 11 | 12 | # A simple identity forward model and redundant dictionary 13 | id = sense_ops.Identity() 14 | sigma = 1 15 | iterations = 20 16 | delta = 1/2 17 | error = 0 18 | dim = 32 19 | image = np.random.rand(dim, 1) 20 | 21 | # Parameter dictionary associated with optimisation problem of resampling from the prior subject to the likelihood iso-ball 22 | params = utils.create_parameters_dict( 23 | y = image, # Measurements i.e. data 24 | Phi = id, # Forward model 25 | epsilon = 1e-3, # Radius of L2-ball of likelihood 26 | tight = True, # Is Phi a tight frame or not? 27 | nu = 1, # Bound on the squared-norm of Phi 28 | tol = 1e-10, # Convergence tolerance of algorithm 29 | max_iter = 200, # Maximum number of iterations 30 | verbose = 0, # Verbosity level 31 | u = 0, # Initial vector for the dual problem 32 | pos = True, # Positivity flag 33 | reality = True # Reality flag 34 | ) 35 | 36 | # Options dictionary associated with the overall sampling algorithm 37 | options = utils.create_options_dict( 38 | samplesL = 2e3, # Number of live samples 39 | samplesD = 3e4, # Number of discarded samples 40 | thinning = 1e1, # Thinning factor (to mitigate correlations) 41 | delta = 1e-2, # Discretisation stepsize 42 | burn = 1e2, # Number of burn in samples 43 | sigma = sigma # Noise standard deviation of degraded image 44 | ) 45 | 46 | for iter in range(iterations): 47 | # Generate a vector drawn from a Uniform distribution 48 | image = np.random.rand(dim, 1) 49 | 50 | # Simulate some unit variance Gaussian noise on this random vector 51 | n = sigma*np.random.randn(dim, 1) 52 | image = image + n 53 | 54 | params["y"] = image 55 | 56 | # Lambda functions to evaluate cost function 57 | LogLikeliL = lambda sol : - np.linalg.norm(image-id.dir_op(sol))**2/(2*sigma**2) 58 | 59 | # Lambda function for L2-norm identity prior backprojection steps 60 | proxH = lambda x, T : x - 2*T*id.adj_op(id.dir_op(x))*2*delta 61 | 62 | # Lambda function for L2-ball likelihood projection during resampling 63 | proxB = lambda x, tau: optimisations.l2_ball_proj.sopt_fast_proj_B2(x, tau, params) 64 | 65 | # Select a starting position 66 | X0 = np.abs(id.adj_op(image)) 67 | 68 | # Perform proximal nested sampling 69 | NS_BayEvi, NS_Trace = sampling.proximal_nested.ProxNestedSampling(X0, LogLikeliL, proxH, proxB, params, options) 70 | rescaled_evidence_estimate = NS_BayEvi[0] + np.log(np.pi/delta)*(dim/2) 71 | 72 | detPar = 1/(2*delta+1/sigma**2) 73 | ySquare= np.linalg.norm(image,'fro')**2 74 | BayEvi_Val_gt_log = np.log(np.sqrt(((2*np.pi)**dim)*(detPar**dim))) + (-ySquare/(2*sigma**2)) + (detPar/2)*(ySquare/sigma**4) 75 | 76 | error += (rescaled_evidence_estimate - BayEvi_Val_gt_log)/BayEvi_Val_gt_log 77 | 78 | assert error / iterations == pytest.approx(0, abs=1, rel=1) -------------------------------------------------------------------------------- /ProxNest/tests/test_utils.py: -------------------------------------------------------------------------------- 1 | import ProxNest.utils as utils 2 | import pytest 3 | 4 | 5 | def test_parameter_dict_creation(): 6 | 7 | y = 0 8 | epsilon = 1e-3 9 | tight = True 10 | nu = 1 11 | tol = 1e-3 12 | max_iter = 200 13 | verbose = 1 14 | u = 0 15 | pos = False 16 | reality = False 17 | rel_obj = 0 18 | 19 | params = utils.create_parameters_dict( 20 | y=y, 21 | epsilon=epsilon, 22 | tight=tight, 23 | nu=nu, 24 | tol=tol, 25 | max_iter=max_iter, 26 | verbose=verbose, 27 | u=u, 28 | pos=pos, 29 | reality=reality, 30 | rel_obj=rel_obj, 31 | ) 32 | 33 | assert params["y"] == y 34 | assert params["epsilon"] == epsilon 35 | assert params["tight"] == tight 36 | assert params["nu"] == nu 37 | assert params["tol"] == tol 38 | assert params["max_iter"] == max_iter 39 | assert params["verbose"] == verbose 40 | assert params["u"] == u 41 | assert params["reality"] == reality 42 | assert params["pos"] == pos 43 | assert params["rel_obj"] == rel_obj 44 | 45 | 46 | def test_options_dict_creation(): 47 | 48 | samplesL = 1e3 49 | samplesD = 1e4 50 | thinning = 1e2 51 | delta = 1e-8 52 | burn = 1e2 53 | sigma = 1 54 | 55 | options = utils.create_options_dict( 56 | samplesL=samplesL, 57 | samplesD=samplesD, 58 | thinning=thinning, 59 | delta=delta, 60 | burn=burn, 61 | sigma=sigma, 62 | ) 63 | 64 | assert options["samplesL"] == samplesL 65 | assert options["samplesD"] == samplesD 66 | assert options["thinning"] == thinning 67 | assert options["delta"] == delta 68 | assert options["burn"] == burn 69 | assert options["sigma"] == sigma 70 | -------------------------------------------------------------------------------- /ProxNest/utils.py: -------------------------------------------------------------------------------- 1 | def create_parameters_dict( 2 | y=0, 3 | Phi=None, 4 | Psi=None, 5 | epsilon=1e-3, 6 | tight=True, 7 | nu=1, 8 | tol=1e-3, 9 | max_iter=200, 10 | verbose=1, 11 | u=0, 12 | pos=False, 13 | reality=False, 14 | l1weights=1, 15 | rel_obj=0, 16 | ): 17 | r"""Compiles a dictionary of parameters for code simplicity 18 | 19 | Args: 20 | y (np.ndarray): Measurements (default = 0). 21 | 22 | Phi (linear operator): Sensing operator (default = None). 23 | 24 | Psi (linear operator): Redundant dictionary (default = None). 25 | 26 | epsilon (float): Radius of the :math:`\ell_2` ball (default = 1e-3). 27 | 28 | tight (bool): True if A is a tight frame or False otherwise (default = 1). 29 | 30 | nu (float): Bound on the squared-norm of the operator A, i.e. :math:`||A x||^2 <= \nu ||x||^2` (default = 1). 31 | 32 | tol (float): Tolerance, i.e. the algorithms stops if :math:`\epsilon/(1-tol) <= ||y - A z||_2 <= \epsilon/(1+tol)` (default = 1e-3). 33 | 34 | max_iter (int): Maximum number of iterations (default: 200). 35 | 36 | verbose (int): Verbosity level (0 = no log, 1 = summary at convergence, 2 = print main steps; default = 1). 37 | 38 | u (np.ndarray): Initial vector for the dual problem, same dimension as y (default = 0). 39 | 40 | pos (bool): Positivity flag (True = positive solution, False (default) general case). 41 | 42 | reality (bool): Reality flag (True = real solution, 0 (default) = general complex case). 43 | 44 | l1weights (np.ndarray): Reweighting of thresholding of :math:`\ell_1`-norm (default = 1). 45 | 46 | rel_obj (float): Stopping criterion for :math:`\ell_1` proximal sub-iterations (default = 0). 47 | 48 | Returns: 49 | dict: Dictionary of parameters. 50 | """ 51 | params = {} 52 | params["y"] = y 53 | params["Phi"] = Phi 54 | params["Psi"] = Psi 55 | params["epsilon"] = epsilon 56 | params["tight"] = tight 57 | params["nu"] = nu 58 | params["tol"] = tol 59 | params["max_iter"] = int(max_iter) 60 | params["verbose"] = verbose 61 | params["u"] = u 62 | params["pos"] = pos 63 | params["reality"] = reality 64 | params["l1weights"] = l1weights 65 | params["rel_obj"] = rel_obj 66 | 67 | return params 68 | 69 | 70 | def create_options_dict( 71 | samplesL=1e3, samplesD=1e4, thinning=1e2, delta=1e-8, burn=1e2, sigma=1 72 | ): 73 | r"""Compiles a dictionary of option parameters for sampling 74 | 75 | Args: 76 | samplesL (int): Number of live samples (default = 1e3). 77 | 78 | samplesD (int): Number of discarded samples (default = 1e4). 79 | 80 | thinning (int): Thinning factors (i.e. iterations per sample, default =1 1e2). 81 | 82 | delta (float): Discretisation stepsize (< Lipschitz constant of :math:`\nabla F`, default = 1e-8). 83 | 84 | burn (int): Number of burn in samples to be discarded (default = 1e2). 85 | 86 | sigma (float): Noise std of degraded image (default = 1). 87 | 88 | Returns: 89 | dict: Dictionary of sampling options. 90 | """ 91 | options = {} 92 | options["samplesL"] = int(samplesL) 93 | options["samplesD"] = int(samplesD) 94 | options["thinning"] = int(thinning) 95 | options["delta"] = delta 96 | options["burn"] = int(burn) 97 | options["sigma"] = sigma 98 | 99 | return options 100 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | .. |github| image:: https://img.shields.io/badge/GitHub-ProxNest-brightgreen.svg?style=flat 2 | :target: https://github.com/astro-informatics/proxnest 3 | .. |tests| image:: https://github.com/astro-informatics/proxnest/actions/workflows/tests.yml/badge.svg?branch=main 4 | :target: https://github.com/astro-informatics/proxnest/actions/workflows/tests.yml 5 | .. |docs| image:: https://github.com/astro-informatics/proxnest/actions/workflows/docs.yml/badge.svg 6 | :target: https://astro-informatics.github.io/proxnest 7 | .. |codecov| image:: https://codecov.io/gh/astro-informatics/proxnest/branch/main/graph/badge.svg?token=oGowwdoMRN 8 | :target: https://codecov.io/gh/astro-informatics/proxnest 9 | .. |pypi| image:: https://badge.fury.io/py/ProxNest.svg 10 | :target: https://badge.fury.io/py/ProxNest 11 | .. |licence| image:: https://img.shields.io/badge/License-GPL-blue.svg 12 | :target: http://perso.crans.org/besson/LICENSE.html 13 | .. |arxiv| image:: http://img.shields.io/badge/arXiv-2106.03646-orange.svg?style=flat 14 | :target: https://arxiv.org/abs/2106.03646 15 | 16 | |github| |tests| |docs| |codecov| |pypi| |licence| |arxiv| 17 | 18 | |logo| Proximal nested sampling for high-dimensional Bayesian model selection 19 | ================================================================================================================= 20 | 21 | .. |logo| raw:: html 22 | 23 | 24 | 25 | ``ProxNest`` is an open source, well tested and documented Python implementation of the *proximal nested sampling* framework (`Cai et al. 2022 `_) to compute the Bayesian model evidence or marginal likelihood in high-dimensional log-convex settings. Furthermore, non-smooth sparsity-promoting priors are also supported. 26 | 27 | This is achieved by exploiting tools from proximal calculus and Moreau-Yosida regularisation (`Moreau 1962 `_) to efficiently sample from the prior subject to the hard likelihood constraint. The resulting Markov chain iterations include a gradient step, approximating (with arbitrary precision) an overdamped Langevin SDE that can scale to very high-dimensional applications. 28 | 29 | Basic Usage 30 | =========== 31 | 32 | The following is a straightforward example application to image denoising (Phi = I), regularised with Daubechies wavelets (DB6). 33 | 34 | .. code-block:: Python 35 | 36 | # Import relevant modules. 37 | import numpy as np 38 | import ProxNest 39 | 40 | # Load your data and set parameters. 41 | data = np.load() 42 | params = params # Parameters of the prior resampling optimisation problem. 43 | options = options # Options associated with the sampling strategy. 44 | 45 | # Construct your forward model (phi) and wavelet operators (psi). 46 | phi = ProxNest.operators.sensing_operators.Identity() 47 | psi = ProxNest.operators.wavelet_operators.db_wavelets(["db6"], 2, (dim, dim)) 48 | 49 | # Define proximal operators for both your likelihood and prior. 50 | proxH = lambda x, T : ProxNest.operators.proximal_operators.l1_projection(x, T, delta, Psi=psi) 51 | proxB = lambda x, tau: ProxNest.optimisations.l2_ball_proj.sopt_fast_proj_B2(x, tau, params) 52 | 53 | # Write a lambda function to evaluate your likelihood term (here a Gaussian) 54 | LogLikeliL = lambda sol : - np.linalg.norm(y-phi.dir_op(sol), 'fro')**2/(2*sigma**2) 55 | 56 | # Perform proximal nested sampling 57 | BayEvi, XTrace = ProxNest.sampling.proximal_nested.ProxNestedSampling( 58 | np.abs(phi.adj_op(data)), LogLikeliL, proxH, proxB, params, options 59 | ) 60 | 61 | At this point you have recovered the tuple **BayEvi** and dict **Xtrace** which contain 62 | 63 | .. code-block:: python 64 | 65 | Live = options["samplesL"] # Number of live samples 66 | Disc = options["samplesD"] # Number of discarded samples 67 | 68 | # BayEvi is a tuple containing two values: 69 | BayEvi[0] = 'Estimate of Bayesian evidence (float).' 70 | BayEvi[1] = 'Variance of Bayesian evidence estimate (float).' 71 | 72 | # XTrace is a dictionary containing the np.ndarrays: 73 | XTrace['Liveset'] = 'Set of live samples (shape: Live, dim, dim).' 74 | XTrace['LivesetL'] = 'Likelihood of live samples (shape: Live).' 75 | 76 | XTrace['Discard'] = 'Set of discarded samples (shape: Disc, dim, dim).' 77 | XTrace['DiscardL'] = 'Likelihood of discarded samples (shape: Disc).' 78 | XTrace['DiscardW'] = 'Weights of discarded samples (shape: Disc).' 79 | 80 | XTrace['DiscardPostProb'] = 'Posterior probability of discarded samples (shape: Disc)' 81 | XTrace['DiscardPostMean'] = 'Posterior mean solution (shape: dim, dim)' 82 | 83 | from which one can perform *e.g.* Bayesian model comparison. 84 | 85 | Installation 86 | ============ 87 | 88 | Brief installation instructions are given below (for further details see the full installation documentation). 89 | 90 | Quick install (PyPi) 91 | -------------------- 92 | The ``ProxNest`` package can be installed by running 93 | 94 | .. code-block:: bash 95 | 96 | pip install ProxNest 97 | 98 | Install from source (GitHub) 99 | ---------------------------- 100 | The ``ProxNest`` package can also be installed from source by running 101 | 102 | .. code-block:: bash 103 | 104 | git clone https://github.com/astro-informatics/proxnest 105 | cd harmonic 106 | 107 | and running the install script, within the root directory, with one command 108 | 109 | .. code-block:: bash 110 | 111 | bash build_proxnest.sh 112 | 113 | To check the install has worked correctly run the unit tests with 114 | 115 | .. code-block:: bash 116 | 117 | pytest --black ProxNest/tests/ 118 | 119 | Contributors 120 | ============ 121 | `Matthew Price `_, `Xiaohao Cai `_, `Jason McEwen `_, `Marcelo Pereyra `_, and contributors. 122 | 123 | Attribution 124 | =========== 125 | A BibTeX entry for ``ProxNest`` is: 126 | 127 | .. code-block:: 128 | 129 | @article{Cai:ProxNest:2021, 130 | author = {Cai, Xiaohao and McEwen, Jason~D. and Pereyra, Marcelo}, 131 | title = {"High-dimensional Bayesian model selection by proximal nested sampling"}, 132 | journal = {ArXiv}, 133 | eprint = {arXiv:2106.03646}, 134 | year = {2021} 135 | } 136 | 137 | License 138 | ======= 139 | 140 | ``ProxNest`` is released under the GPL-3 license (see `LICENSE.txt `_), subject to 141 | the non-commercial use condition (see `LICENSE_EXT.txt `_) 142 | 143 | .. code-block:: 144 | 145 | ProxNest 146 | Copyright (C) 2022 Matthew Price, Xiaohao Cai, Jason McEwen, Marcelo Pereyra & contributors 147 | 148 | This program is released under the GPL-3 license (see LICENSE.txt), 149 | subject to a non-commercial use condition (see LICENSE_EXT.txt). 150 | 151 | This program is distributed in the hope that it will be useful, 152 | but WITHOUT ANY WARRANTY; without even the implied warranty of 153 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. 154 | -------------------------------------------------------------------------------- /build_proxnest.sh: -------------------------------------------------------------------------------- 1 | # 2 | 3 | # Install core and extra requirements 4 | echo -ne 'Building Dependencies... \r' 5 | pip install -q -r requirements/requirements-core.txt 6 | echo -ne 'Building Dependencies... ######## (33%)\r' 7 | pip install -q -r requirements/requirements-docs.txt 8 | echo -ne 'Building Dependencies... ########### (66%)\r' 9 | pip install -q -r requirements/requirements-tests.txt 10 | echo -ne 'Building Dependencies... ################ (100%)\r' 11 | echo -ne '\n' 12 | 13 | # Install specific converter for building tutorial documentation 14 | conda install pandoc=1.19.2.1 -y 15 | 16 | # Build the proximal nested sampling package locally 17 | pip install -e . -------------------------------------------------------------------------------- /data/galaxy_image_256.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/astro-informatics/proxnest/bd12e29434b36815d5bf52a39b27391fc193a6ce/data/galaxy_image_256.npy -------------------------------------------------------------------------------- /data/galaxy_image_64.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/astro-informatics/proxnest/bd12e29434b36815d5bf52a39b27391fc193a6ce/data/galaxy_image_64.npy -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = "-Q" 6 | SPHINXBUILD = sphinx-build 7 | SOURCEDIR = . 8 | BUILDDIR = _build 9 | 10 | # Put it first so that "make" without argument is like "make help". 11 | help: 12 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 13 | 14 | .PHONY: help Makefile 15 | 16 | # Catch-all target: route all unknown targets to Sphinx using the new 17 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 18 | %: Makefile 19 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 20 | -------------------------------------------------------------------------------- /docs/_static/css/custom.css: -------------------------------------------------------------------------------- 1 | /* Change code blocks font and bump up font size slightly (normally 12px)*/ 2 | .rst-content pre.literal-block, .rst-content div[class^="highlight"] pre, .rst-content .linenodiv pre { 3 | font-family: 'Nunito', monospace !important; 4 | font-size: 14px !important; 5 | white-space: pre-wrap; 6 | } 7 | 8 | /* Change code descriptions and literal blocks (inline code via ``) to match the normal font size being used in the sphinx_RTD_theme text (normally 14px)*/ 9 | .rst-content dl:not(.docutils) tt.descname,.rst-content dl:not(.docutils) tt.descclassname,.rst-content dl:not(.docutils) tt.descname,.rst-content dl:not(.docutils) code.descname,.rst-content dl:not(.docutils) tt.descclassname,.rst-content dl:not(.docutils) code.descclassname, code.docutils { 10 | font-family: 'Nunito', monospace !important; 11 | font-size: 16px !important; 12 | } 13 | 14 | /* Change default max width from 800px to better suit max 150 line width in */ 15 | .wy-nav-content { 16 | max-width: 2000px !important; 17 | } 18 | 19 | .wy-side-nav-search .wy-dropdown > a img.logo, .wy-side-nav-search > a img.logo { 20 | width: 150px; 21 | } -------------------------------------------------------------------------------- /docs/_static/css/custom_tabs.css: -------------------------------------------------------------------------------- 1 | .sphinx-tabs { 2 | margin-bottom: 1rem; 3 | } 4 | 5 | [role="tablist"] { 6 | border-bottom: 0px solid white; 7 | } 8 | 9 | .sphinx-tabs-tab { 10 | position: relative; 11 | font-family: Lato,'Helvetica Neue',Arial,Helvetica,sans-serif; 12 | color: #1D5C87; 13 | line-height: 24px; 14 | margin: 0; 15 | font-size: 16px; 16 | font-weight: 400; 17 | font-color: #1D5C87; 18 | background-color: rgba(0, 0, 0, 0); 19 | border-radius: 0 0 0 0; 20 | border-bottom: 0; 21 | border: 0; 22 | padding: 1rem 1.5rem; 23 | margin-bottom: 0; 24 | } 25 | 26 | .sphinx-tabs-tab[aria-selected="true"] { 27 | font-weight: 800; 28 | border: 0px solid white; 29 | /*border-bottom: 0px solid white;*/ 30 | margin: -1px; 31 | background: rgba(156, 206, 240, 0.1); 32 | } 33 | 34 | .sphinx-tabs-tab[aria-selected="false"] { 35 | border-bottom: 0px solid white; 36 | } 37 | 38 | .sphinx-tabs-tab:focus { 39 | z-index: 1; 40 | outline-offset: 1px; 41 | } 42 | 43 | .sphinx-tabs-panel { 44 | position: relative; 45 | padding: 1rem; 46 | border: 0px solid white; 47 | margin: 0px -1px -1px -1px; 48 | border-radius: 0 0 0 0; 49 | /*border-top: 0;*/ 50 | /*background: white;*/ 51 | background: rgba(156, 206, 240, 0.1); 52 | } 53 | 54 | .sphinx-tabs-panel.code-tab { 55 | padding: 0.4rem; 56 | } 57 | 58 | .sphinx-tab img { 59 | margin-bottom: 24 px; 60 | } -------------------------------------------------------------------------------- /docs/api/index.rst: -------------------------------------------------------------------------------- 1 | ************************** 2 | Namespaces 3 | ************************** 4 | Automatically generated documentation for ``ProxNest`` APIs. All functionality is accessible through a pip installation of the ``ProxNest`` package. 5 | 6 | .. tabs:: 7 | 8 | .. tab:: Sampling 9 | 10 | .. tabs:: 11 | 12 | .. tab:: Proximal Nested 13 | 14 | .. include:: prox_nested.rst 15 | 16 | .. tab:: Sampling Utils 17 | 18 | .. include:: resampling.rst 19 | 20 | 21 | .. tab:: Optimisations 22 | 23 | .. tabs:: 24 | 25 | .. tab:: :math:`\ell_2`-ball proj 26 | 27 | .. include:: l2_ball_proj.rst 28 | 29 | .. tab:: :math:`\ell_1`-norm prox 30 | 31 | .. include:: l1_norm_prox.rst 32 | 33 | .. tab:: TV-norm prox 34 | 35 | .. include:: tv_norm_prox.rst 36 | 37 | .. tab:: Operators 38 | 39 | .. tabs:: 40 | 41 | .. tab:: Proximal 42 | 43 | .. include:: prox_ops.rst 44 | 45 | .. tab:: Wavelet 46 | 47 | .. include:: wav_ops.rst 48 | 49 | .. tab:: Sensing 50 | 51 | .. include:: sense_ops.rst 52 | 53 | .. tab:: Utils 54 | 55 | .. include:: utils.rst 56 | 57 | .. tab:: Logs 58 | 59 | .. include:: logs.rst -------------------------------------------------------------------------------- /docs/api/l1_norm_prox.rst: -------------------------------------------------------------------------------- 1 | .. automodule:: ProxNest.optimisations.l1_norm_prox 2 | :members: 3 | -------------------------------------------------------------------------------- /docs/api/l2_ball_proj.rst: -------------------------------------------------------------------------------- 1 | .. automodule:: ProxNest.optimisations.l2_ball_proj 2 | :members: 3 | -------------------------------------------------------------------------------- /docs/api/logs.rst: -------------------------------------------------------------------------------- 1 | .. automodule:: ProxNest.logs 2 | :members: 3 | -------------------------------------------------------------------------------- /docs/api/prox_nested.rst: -------------------------------------------------------------------------------- 1 | .. automodule:: ProxNest.sampling.proximal_nested 2 | :members: 3 | -------------------------------------------------------------------------------- /docs/api/prox_ops.rst: -------------------------------------------------------------------------------- 1 | .. automodule:: ProxNest.operators.proximal_operators 2 | :members: 3 | -------------------------------------------------------------------------------- /docs/api/resampling.rst: -------------------------------------------------------------------------------- 1 | .. automodule:: ProxNest.sampling.resampling 2 | :members: 3 | -------------------------------------------------------------------------------- /docs/api/sense_ops.rst: -------------------------------------------------------------------------------- 1 | .. automodule:: ProxNest.operators.sensing_operators 2 | :members: 3 | -------------------------------------------------------------------------------- /docs/api/tv_norm_prox.rst: -------------------------------------------------------------------------------- 1 | .. automodule:: ProxNest.optimisations.tv_norm_prox 2 | :members: 3 | -------------------------------------------------------------------------------- /docs/api/utils.rst: -------------------------------------------------------------------------------- 1 | .. automodule:: ProxNest.utils 2 | :members: 3 | -------------------------------------------------------------------------------- /docs/api/wav_ops.rst: -------------------------------------------------------------------------------- 1 | .. automodule:: ProxNest.operators.wavelet_operators 2 | :members: 3 | -------------------------------------------------------------------------------- /docs/assets/ProxNestLogo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/astro-informatics/proxnest/bd12e29434b36815d5bf52a39b27391fc193a6ce/docs/assets/ProxNestLogo.png -------------------------------------------------------------------------------- /docs/assets/static_notebooks/gaussian_benchmark.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "__Gaussian Benchmarking__\n", 8 | "---" 9 | ] 10 | }, 11 | { 12 | "cell_type": "code", 13 | "execution_count": 1, 14 | "metadata": {}, 15 | "outputs": [], 16 | "source": [ 17 | "import numpy as np\n", 18 | "import ProxNest.utils as utils\n", 19 | "import ProxNest.sampling as sampling\n", 20 | "import ProxNest.optimisations as optimisations\n", 21 | "import ProxNest.operators as operators" 22 | ] 23 | }, 24 | { 25 | "cell_type": "markdown", 26 | "metadata": {}, 27 | "source": [ 28 | "### Generate mock data " 29 | ] 30 | }, 31 | { 32 | "cell_type": "code", 33 | "execution_count": 2, 34 | "metadata": {}, 35 | "outputs": [], 36 | "source": [ 37 | "# Dimension of Gaussian\n", 38 | "dimension = 200\n", 39 | "\n", 40 | "# A simple identity forward model and redundant dictionary\n", 41 | "phi = operators.sensing_operators.Identity()\n", 42 | "psi = operators.sensing_operators.Identity()\n", 43 | "\n", 44 | "# Generate a vector drawn from a Uniform distribution\n", 45 | "image = np.random.rand(dimension, 1)\n", 46 | "\n", 47 | "# Simulate some unit variance Gaussian noise on this random vector\n", 48 | "sigma = 1\n", 49 | "n = sigma*np.random.randn(dimension, 1)\n", 50 | "image = image + n" 51 | ] 52 | }, 53 | { 54 | "cell_type": "markdown", 55 | "metadata": {}, 56 | "source": [ 57 | "### Define parameters" 58 | ] 59 | }, 60 | { 61 | "cell_type": "code", 62 | "execution_count": 6, 63 | "metadata": {}, 64 | "outputs": [], 65 | "source": [ 66 | "# Define a regularisation parameter (this should be tuned for a given problem)\n", 67 | "delta = 1/2\n", 68 | "\n", 69 | "# Parameter dictionary associated with optimisation problem of resampling from the prior subject to the likelihood iso-ball\n", 70 | "params = utils.create_parameters_dict(\n", 71 | " y = image, # Measurements i.e. data\n", 72 | " Phi = phi, # Forward model\n", 73 | " epsilon = 1e-3, # Radius of L2-ball of likelihood \n", 74 | " tight = True, # Is Phi a tight frame or not?\n", 75 | " nu = 1, # Bound on the squared-norm of Phi\n", 76 | " tol = 1e-10, # Convergence tolerance of algorithm\n", 77 | " max_iter = 200, # Maximum number of iterations\n", 78 | " verbose = 0, # Verbosity level\n", 79 | " u = 0, # Initial vector for the dual problem\n", 80 | " pos = True, # Positivity flag\n", 81 | " reality = True # Reality flag\n", 82 | ")\n", 83 | "\n", 84 | "# Options dictionary associated with the overall sampling algorithm\n", 85 | "options = utils.create_options_dict(\n", 86 | " samplesL = 2e4, # Number of live samples\n", 87 | " samplesD = 3e5, # Number of discarded samples \n", 88 | " thinning = 1e1, # Thinning factor (to mitigate correlations)\n", 89 | " delta = 1e-2, # Discretisation stepsize\n", 90 | " burn = 1e2, # Number of burn in samples\n", 91 | " sigma = sigma # Noise standard deviation of degraded image\n", 92 | ")" 93 | ] 94 | }, 95 | { 96 | "cell_type": "markdown", 97 | "metadata": {}, 98 | "source": [ 99 | "### Create lambda functions" 100 | ] 101 | }, 102 | { 103 | "cell_type": "code", 104 | "execution_count": 7, 105 | "metadata": {}, 106 | "outputs": [], 107 | "source": [ 108 | "# Lambda functions to evaluate cost function\n", 109 | "LogLikeliL = lambda sol : - np.linalg.norm(image-phi.dir_op(sol))**2/(2*sigma**2)\n", 110 | "\n", 111 | "# Lambda function for L2-norm identity prior backprojection steps\n", 112 | "proxH = lambda x, T : x - 2*T*psi.adj_op(psi.dir_op(x))*2*delta\n", 113 | "\n", 114 | "# Lambda function for L2-ball likelihood projection during resampling\n", 115 | "proxB = lambda x, tau: optimisations.l2_ball_proj.sopt_fast_proj_B2(x, tau, params)" 116 | ] 117 | }, 118 | { 119 | "cell_type": "markdown", 120 | "metadata": {}, 121 | "source": [ 122 | "### Perform Proximal Nested Sampling" 123 | ] 124 | }, 125 | { 126 | "cell_type": "code", 127 | "execution_count": 8, 128 | "metadata": {}, 129 | "outputs": [ 130 | { 131 | "name": "stderr", 132 | "output_type": "stream", 133 | "text": [ 134 | "ProxNest || Initialise: 100%|██████████| 200/200 [00:00<00:00, 41102.49it/s]\n", 135 | "ProxNest || Populate: 100%|██████████| 200098/200098 [00:02<00:00, 90153.93it/s]\n", 136 | "ProxNest || Sample: 100%|██████████| 300000/300000 [00:13<00:00, 22818.77it/s]\n", 137 | "ProxNest || Compute Weights: 100%|██████████| 300000/300000 [00:00<00:00, 1758625.40it/s]\n", 138 | "ProxNest || Trapezium Integrate: 100%|██████████| 299998/299998 [00:00<00:00, 2324879.32it/s]\n", 139 | "ProxNest || Estimate Variance: 100%|██████████| 300000/300000 [00:00<00:00, 600760.56it/s]\n", 140 | "ProxNest || Compute Posterior Mean: 100%|██████████| 300000/300000 [00:00<00:00, 667114.42it/s]\n" 141 | ] 142 | } 143 | ], 144 | "source": [ 145 | "# Select a starting position\n", 146 | "X0 = np.abs(phi.adj_op(image))\n", 147 | "\n", 148 | "# Perform proximal nested sampling\n", 149 | "NS_BayEvi, NS_Trace = sampling.proximal_nested.ProxNestedSampling(X0, LogLikeliL, proxH, proxB, params, options)\n", 150 | "rescaled_evidence_estimate = NS_BayEvi[0] + np.log(np.pi/delta)*(dimension/2)" 151 | ] 152 | }, 153 | { 154 | "cell_type": "markdown", 155 | "metadata": {}, 156 | "source": [ 157 | "### Evaluate analytic evidence" 158 | ] 159 | }, 160 | { 161 | "cell_type": "code", 162 | "execution_count": 10, 163 | "metadata": {}, 164 | "outputs": [], 165 | "source": [ 166 | "detPar = 1/(2*delta+1/sigma**2)\n", 167 | "ySquare= np.linalg.norm(image,'fro')**2\n", 168 | "BayEvi_Val_gt_log = np.log(np.sqrt(((2*np.pi)**dimension)*(detPar**dimension))) + (-ySquare/(2*sigma**2)) + (detPar/2)*(ySquare/sigma**4)" 169 | ] 170 | }, 171 | { 172 | "cell_type": "markdown", 173 | "metadata": {}, 174 | "source": [ 175 | "### Compare evidence estimates" 176 | ] 177 | }, 178 | { 179 | "cell_type": "code", 180 | "execution_count": 12, 181 | "metadata": {}, 182 | "outputs": [ 183 | { 184 | "name": "stdout", 185 | "output_type": "stream", 186 | "text": [ 187 | "44.97347444294445\n", 188 | "48.985383360556256\n" 189 | ] 190 | } 191 | ], 192 | "source": [ 193 | "print(rescaled_evidence_estimate)\n", 194 | "print(BayEvi_Val_gt_log)" 195 | ] 196 | } 197 | ], 198 | "metadata": { 199 | "kernelspec": { 200 | "display_name": "ProxNest", 201 | "language": "python", 202 | "name": "proxnest" 203 | }, 204 | "language_info": { 205 | "codemirror_mode": { 206 | "name": "ipython", 207 | "version": 3 208 | }, 209 | "file_extension": ".py", 210 | "mimetype": "text/x-python", 211 | "name": "python", 212 | "nbconvert_exporter": "python", 213 | "pygments_lexer": "ipython3", 214 | "version": "3.9.0" 215 | }, 216 | "orig_nbformat": 4 217 | }, 218 | "nbformat": 4, 219 | "nbformat_minor": 2 220 | } 221 | -------------------------------------------------------------------------------- /docs/background/index.rst: -------------------------------------------------------------------------------- 1 | ********************************************** 2 | Some background information 3 | ********************************************** 4 | Add some back ground text here 5 | 6 | A subtitle alluding to more details 7 | ======================================================= 8 | Some more details 9 | 10 | .. tabs:: 11 | 12 | .. tab:: Some 13 | 14 | .. tab:: Nice 15 | 16 | .. tab:: Tabs 17 | 18 | .. note:: A note for additional practical or theoretical considerations! 19 | 20 | .. warning:: A warning for something that could or has gone wrong! 21 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # Configuration file for the Sphinx documentation builder. 4 | # 5 | # This file does only contain a selection of the most common options. For a 6 | # full list see the documentation: 7 | # http://www.sphinx-doc.org/en/master/config 8 | 9 | # -- Path setup -------------------------------------------------------------- 10 | 11 | # If extensions (or modules to document with autodoc) are in another directory, 12 | # add these directories to sys.path here. If the directory is relative to the 13 | # documentation root, use os.path.abspath to make it absolute, like shown here. 14 | 15 | import os 16 | import sys 17 | sys.path.insert(0, os.path.abspath('..')) 18 | 19 | 20 | # -- Project information ----------------------------------------------------- 21 | 22 | project = 'ProxNest' 23 | copyright = '2022, Xiaohao Cai, Jason McEwen, Marcelo Pereyra, Matthew Price' 24 | author = 'Xiaohao Cai, Jason McEwen, Marcelo Pereyra, Matthew Price' 25 | 26 | # The short X.Y version 27 | version = '0.0.1' 28 | # The full version, including alpha/beta/rc tags 29 | release = '0.0.1' 30 | 31 | 32 | # -- General configuration --------------------------------------------------- 33 | 34 | # If your documentation needs a minimal Sphinx version, state it here. 35 | # 36 | # needs_sphinx = '1.0' 37 | 38 | # Add any Sphinx extension module names here, as strings. They can be 39 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 40 | # ones. 41 | extensions = [ 42 | 'nbsphinx_link', 43 | 'sphinx.ext.autodoc', 44 | 'sphinx.ext.napoleon', 45 | 'sphinx.ext.mathjax', 46 | 'sphinx.ext.githubpages', 47 | 'sphinx_rtd_theme', 48 | 'sphinx_rtd_dark_mode', 49 | 'nbsphinx', 50 | 'IPython.sphinxext.ipython_console_highlighting', 51 | 'sphinx_tabs.tabs', 52 | 'sphinx_git', 53 | 'sphinxcontrib.texfigure', 54 | 'sphinx.ext.autosectionlabel', 55 | ] 56 | 57 | nbsphinx_execute = 'never' 58 | napoleon_google_docstring = True 59 | napoleon_include_init_with_doc = True 60 | napoleon_numpy_docstring = False 61 | #autosummary_generate = True 62 | #autoclass_content = "class" 63 | #autodoc_default_flags = ["members", "no-special-members"] 64 | #always_document_param_types = False 65 | 66 | 67 | # Add any paths that contain templates here, relative to this directory. 68 | templates_path = ['_templates'] 69 | 70 | # The suffix(es) of source filenames. 71 | # You can specify multiple suffix as a list of string: 72 | # 73 | # source_suffix = ['.rst', '.md'] 74 | source_suffix = ['.rst', '.ipynb'] 75 | 76 | # The master toctree document. 77 | master_doc = 'index' 78 | 79 | # The language for content autogenerated by Sphinx. Refer to documentation 80 | # for a list of supported languages. 81 | # 82 | # This is also used if you do content translation via gettext catalogs. 83 | # Usually you set "language" from the command line for these cases. 84 | language = None 85 | 86 | # List of patterns, relative to source directory, that match files and 87 | # directories to ignore when looking for source files. 88 | # This pattern also affects html_static_path and html_extra_path. 89 | exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] 90 | 91 | # The name of the Pygments (syntax highlighting) style to use. 92 | pygments_style = None 93 | default_dark_mode = False 94 | sphinx_tabs_disable_css_loading = True 95 | 96 | # -- Options for HTML output ------------------------------------------------- 97 | 98 | # The theme to use for HTML and HTML Help pages. See the documentation for 99 | # a list of builtin themes. 100 | # 101 | import sphinx_rtd_theme 102 | html_theme = "sphinx_rtd_theme" 103 | html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] 104 | 105 | # html_logo = "assets/placeholder_logo.svg" 106 | # html_logo = "assets/placeholder_logo.png" 107 | html_logo = "assets/ProxNestLogo.png" 108 | html_theme_options = { 109 | 'logo_only': True, 110 | 'display_version': True, 111 | # 'style_nav_header_background': '#C48EDC', 112 | } 113 | 114 | # Theme options are theme-specific and customize the look and feel of a theme 115 | # further. For a list of options available for each theme, see the 116 | # documentation. 117 | # 118 | #html_theme_options = {} 119 | 120 | # Add any paths that contain custom static files (such as style sheets) here, 121 | # relative to this directory. They are copied after the builtin static files, 122 | # so a file named "default.css" will overwrite the builtin "default.css". 123 | html_static_path = ['_static'] 124 | html_css_files = [ 125 | 'css/custom.css', 126 | 'css/custom_tabs.css', 127 | ] 128 | 129 | # Custom sidebar templates, must be a dictionary that maps document names 130 | # to template names. 131 | # 132 | # The default sidebars (for documents that don't match any pattern) are 133 | # defined by theme itself. Builtin themes are using these templates by 134 | # default: ``['localtoc.html', 'relations.html', 'sourcelink.html', 135 | # 'searchbox.html']``. 136 | # 137 | # html_sidebars = {} 138 | 139 | 140 | # -- Options for HTMLHelp output --------------------------------------------- 141 | 142 | # Output file base name for HTML help builder. 143 | htmlhelp_basename = 'ProxNestdoc' 144 | 145 | 146 | # -- Options for LaTeX output ------------------------------------------------ 147 | 148 | latex_elements = { 149 | # The paper size ('letterpaper' or 'a4paper'). 150 | # 151 | # 'papersize': 'letterpaper', 152 | 153 | # The font size ('10pt', '11pt' or '12pt'). 154 | # 155 | # 'pointsize': '10pt', 156 | 157 | # Additional stuff for the LaTeX preamble. 158 | # 159 | # 'preamble': '', 160 | 161 | # Latex figure (float) alignment 162 | # 163 | # 'figure_align': 'htbp', 164 | } 165 | 166 | # Grouping the document tree into LaTeX files. List of tuples 167 | # (source start file, target name, title, 168 | # author, documentclass [howto, manual, or own class]). 169 | latex_documents = [ 170 | (master_doc, 'ProxNest.tex', 'ProxNest Documentation', 171 | 'Author names', 'manual'), 172 | ] 173 | 174 | 175 | # -- Options for manual page output ------------------------------------------ 176 | 177 | # One entry per manual page. List of tuples 178 | # (source start file, name, description, authors, manual section). 179 | man_pages = [ 180 | (master_doc, 'proxnest', 'ProxNest Documentation', 181 | [author], 1) 182 | ] 183 | 184 | 185 | # -- Options for Texinfo output ---------------------------------------------- 186 | 187 | # Grouping the document tree into Texinfo files. List of tuples 188 | # (source start file, target name, title, author, 189 | # dir menu entry, description, category) 190 | texinfo_documents = [ 191 | (master_doc, 'ProxNest', 'ProxNest Documentation', 192 | author, 'ProxNest', 'Proximal nested sampling for high-dimensional Bayesian inference', 193 | 'Miscellaneous'), 194 | ] 195 | 196 | 197 | # -- Options for Epub output ------------------------------------------------- 198 | 199 | # Bibliographic Dublin Core info. 200 | epub_title = "proxnest" 201 | 202 | # The unique identifier of the text. This can be a ISBN number 203 | # or the project homepage. 204 | # 205 | # epub_identifier = '' 206 | 207 | # A unique identification for the text. 208 | # 209 | # epub_uid = '' 210 | 211 | # A list of files that should not be packed into the epub file. 212 | epub_exclude_files = ['search.html'] 213 | 214 | suppress_warnings = [ 'autosectionlabel.*', 'autodoc','autodoc.import_object'] 215 | 216 | 217 | # -- Extension configuration ------------------------------------------------- 218 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | |GitHub| |Build Status| |CodeCov| |PyPi| |GPL license| |ArXiv| 2 | 3 | .. |GitHub| image:: https://img.shields.io/badge/GitHub-ProxNest-brightgreen.svg?style=flat 4 | :target: https://github.com/astro-informatics/proxnest 5 | .. |Build Status| image:: https://github.com/astro-informatics/proxnest/actions/workflows/tests.yml/badge.svg 6 | :target: https://github.com/astro-informatics/proxnest/actions/workflows/tests.yml 7 | .. |CodeCov| image:: https://codecov.io/gh/astro-informatics/proxnest/branch/main/graph/badge.svg?token=oGowwdoMRN 8 | :target: https://codecov.io/gh/astro-informatics/proxnest 9 | .. |PyPi| image:: https://badge.fury.io/py/ProxNest.svg 10 | :target: https://badge.fury.io/py/ProxNest 11 | .. |GPL License| image:: https://img.shields.io/badge/License-GPL-blue.svg 12 | :target: http://perso.crans.org/besson/LICENSE.html 13 | .. |ArXiv| image:: http://img.shields.io/badge/arXiv-2106.03646-orange.svg?style=flat 14 | :target: https://arxiv.org/abs/2106.03646 15 | 16 | ProxNest: Proximal nested sampling for high-dimensional Bayesian model selection 17 | ================================================================================= 18 | 19 | ``ProxNest`` is an open source, well tested and documented Python implementation of the *proximal nested sampling* algorithm (`Cai et al. 2022 `_) which is uniquely suited for sampling from very high-dimensional posteriors that are log-concave and potentially not smooth (*e.g.* Laplace priors). This is achieved by exploiting tools from proximal calculus and Moreau-Yosida regularisation (`Moreau 1962 `_) to efficiently sample from the prior subject to the hard likelihood constraint. The resulting Markov chain iterations include a gradient step, approximating (with arbitrary precision) an overdamped Langevin SDE that can scale to very high-dimensional applications. 20 | 21 | Basic Usage 22 | =========== 23 | 24 | The following is a straightforward example application to image denoising (Phi = I), regularised with Daubechies wavelets (DB6). 25 | 26 | .. code-block:: Python 27 | 28 | # Import relevant modules. 29 | import numpy as np 30 | import ProxNest 31 | 32 | # Load your data and set parameters. 33 | data = np.load() 34 | params = params # Parameters of the prior resampling optimisation problem. 35 | options = options # Options associated with the sampling strategy. 36 | 37 | # Construct your forward model (phi) and wavelet operators (psi). 38 | phi = ProxNest.operators.sensing_operators.Identity() 39 | psi = ProxNest.operators.wavelet_operators.db_wavelets(["db6"], 2, (dim, dim)) 40 | 41 | # Define proximal operators for both your likelihood and prior. 42 | proxH = lambda x, T : ProxNest.operators.proximal_operators.l1_projection(x, T, delta, Psi=psi) 43 | proxB = lambda x, tau: ProxNest.optimisations.l2_ball_proj.sopt_fast_proj_B2(x, tau, params) 44 | 45 | # Write a lambda function to evaluate your likelihood term (here a Gaussian) 46 | LogLikeliL = lambda sol : - np.linalg.norm(y-phi.dir_op(sol), 'fro')**2/(2*sigma**2) 47 | 48 | # Perform proximal nested sampling 49 | BayEvi, XTrace = ProxNest.sampling.proximal_nested.ProxNestedSampling( 50 | np.abs(phi.adj_op(data)), LogLikeliL, proxH, proxB, params, options 51 | ) 52 | 53 | At this point you have recovered the tuple **BayEvi** and dict **Xtrace** which contain 54 | 55 | .. code-block:: python 56 | 57 | Live = options["samplesL"] # Number of live samples 58 | Disc = options["samplesD"] # Number of discarded samples 59 | 60 | # BayEvi is a tuple containing two values: 61 | BayEvi[0] = 'Estimate of Bayesian evidence (float).' 62 | BayEvi[1] = 'Variance of Bayesian evidence estimate (float).' 63 | 64 | # XTrace is a dictionary containing the np.ndarrays: 65 | XTrace['Liveset'] = 'Set of live samples (shape: Live, dim, dim).' 66 | XTrace['LivesetL'] = 'Likelihood of live samples (shape: Live).' 67 | 68 | XTrace['Discard'] = 'Set of discarded samples (shape: Disc, dim, dim).' 69 | XTrace['DiscardL'] = 'Likelihood of discarded samples (shape: Disc).' 70 | XTrace['DiscardW'] = 'Weights of discarded samples (shape: Disc).' 71 | 72 | XTrace['DiscardPostProb'] = 'Posterior probability of discarded samples (shape: Disc)' 73 | XTrace['DiscardPostMean'] = 'Posterior mean solution (shape: dim, dim)' 74 | 75 | from which one can perform *e.g.* Bayesian model comparison. 76 | 77 | Referencing 78 | =========== 79 | A BibTeX entry for ``ProxNest`` is: 80 | 81 | .. code-block:: 82 | 83 | @article{Cai:ProxNest:2021, 84 | author = {Cai, Xiaohao and McEwen, Jason~D. and Pereyra, Marcelo}, 85 | title = {"High-dimensional Bayesian model selection by proximal nested sampling"}, 86 | journal = {ArXiv}, 87 | eprint = {arXiv:2106.03646}, 88 | year = {2021} 89 | } 90 | 91 | .. bibliography:: 92 | :notcited: 93 | :list: bullet 94 | 95 | .. toctree:: 96 | :hidden: 97 | :maxdepth: 2 98 | :caption: User Guide 99 | 100 | user_guide/install 101 | 102 | .. toctree:: 103 | :hidden: 104 | :maxdepth: 1 105 | :caption: Interactive Tutorials 106 | 107 | tutorials/gaussian_benchmark.nblink 108 | tutorials/galaxy_denoising.nblink 109 | tutorials/galaxy_radio.nblink 110 | 111 | .. toctree:: 112 | :hidden: 113 | :maxdepth: 2 114 | :caption: API 115 | 116 | api/index 117 | 118 | 119 | 120 | -------------------------------------------------------------------------------- /docs/tutorials/galaxy_denoising.nblink: -------------------------------------------------------------------------------- 1 | { 2 | "path": "../assets/static_notebooks/galaxy_denoising.ipynb" 3 | } -------------------------------------------------------------------------------- /docs/tutorials/galaxy_radio.nblink: -------------------------------------------------------------------------------- 1 | { 2 | "path": "../assets/static_notebooks/galaxy_radio.ipynb" 3 | } -------------------------------------------------------------------------------- /docs/tutorials/gaussian_benchmark.nblink: -------------------------------------------------------------------------------- 1 | { 2 | "path": "../assets/static_notebooks/gaussian_benchmark.ipynb" 3 | } -------------------------------------------------------------------------------- /docs/user_guide/install.rst: -------------------------------------------------------------------------------- 1 | .. _install: 2 | 3 | Installation 4 | ============ 5 | We recommend installing ``ProxNest`` through `PyPi `_ , however in some cases one may wish to install ``ProxNest`` directly from source, which is also relatively straightforward. 6 | 7 | Quick install (PyPi) 8 | -------------------- 9 | Install ``ProxNest`` from PyPi with a single command 10 | 11 | .. code-block:: bash 12 | 13 | pip install ProxNest 14 | 15 | Check that the package has installed by running 16 | 17 | .. code-block:: bash 18 | 19 | pip list 20 | 21 | and locate ProxNest. 22 | 23 | 24 | Install from source (GitHub) 25 | ---------------------------- 26 | 27 | When installing from source we recommend working within an existing conda environment, or creating a fresh conda environment to avoid any dependency conflicts, 28 | 29 | .. code-block:: bash 30 | 31 | conda create -n proxnest_env python=3.9 32 | conda activate proxnest_env 33 | 34 | Once within a fresh environment ``ProxNest`` may be installed by cloning the GitHub repository 35 | 36 | .. code-block:: bash 37 | 38 | git clone https://github.com/astro-informatics/proxnest 39 | cd proxnest 40 | 41 | and running the install script, within the root directory, with one command 42 | 43 | .. code-block:: bash 44 | 45 | bash build_proxnest.sh 46 | 47 | To check the install has worked correctly run the unit tests with 48 | 49 | .. code-block:: bash 50 | 51 | pytest --black ProxNest/tests/ 52 | 53 | .. note:: For installing from source a conda environment is required by the installation bash script, which is recommended, due to a pandoc dependency. 54 | -------------------------------------------------------------------------------- /logs/logging.yaml: -------------------------------------------------------------------------------- 1 | # ============================================================= 2 | # Logging setup for ProxNest Software package (2022) 3 | # ============================================================= 4 | 5 | version: 1 6 | disable_existing_loggers: False 7 | formatters: 8 | simple: 9 | format: "[%(asctime)s] [%(name)s] [%(levelname)s]: %(message)s" 10 | colored: 11 | (): "colorlog.ColoredFormatter" 12 | datefmt: "%Y-%m-%d %H:%M:%S" 13 | format: "%(log_color)s[%(asctime)s] [%(name)s] [%(levelname)s]: %(message)s%(reset)s" 14 | log_colors: 15 | DEBUG: blue 16 | INFO: cyan 17 | WARNING: purple 18 | ERROR: orange 19 | CRITICAL: red 20 | 21 | handlers: 22 | console: 23 | class: logging.StreamHandler 24 | level: INFO 25 | formatter: colored 26 | stream: ext://sys.stdout 27 | 28 | info_file_handler: 29 | class: logging.FileHandler 30 | level: INFO 31 | formatter: simple 32 | filename: /logs/info.log 33 | encoding: utf8 34 | 35 | debug_file_handler: 36 | class: logging.FileHandler 37 | level: DEBUG 38 | formatter: simple 39 | filename: /logs/debug.log 40 | encoding: utf8 41 | 42 | critical_file_handler: 43 | class: logging.FileHandler 44 | level: CRITICAL 45 | formatter: simple 46 | filename: /logs/critical.log 47 | encoding: utf8 48 | 49 | loggers: 50 | ProxNest: 51 | level: DEBUG 52 | handlers: [console, critical_file_handler, info_file_handler, debug_file_handler] 53 | propagate: no 54 | 55 | root: 56 | level: INFO 57 | handlers: [console, info_file_handler, debug_file_handler] 58 | ... -------------------------------------------------------------------------------- /notebooks/gaussian_benchmark.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "
\n", 8 | "\n", 9 | "# [`ProxNest`](https://github.com/astro-informatics/proxnest) - __Gaussian Benchmarking__ Interactive Tutorial\n", 10 | "---" 11 | ] 12 | }, 13 | { 14 | "cell_type": "code", 15 | "execution_count": 1, 16 | "metadata": {}, 17 | "outputs": [], 18 | "source": [ 19 | "import numpy as np\n", 20 | "import ProxNest.utils as utils\n", 21 | "import ProxNest.sampling as sampling\n", 22 | "import ProxNest.optimisations as optimisations\n", 23 | "import ProxNest.operators as operators" 24 | ] 25 | }, 26 | { 27 | "cell_type": "markdown", 28 | "metadata": {}, 29 | "source": [ 30 | "### Generate mock data " 31 | ] 32 | }, 33 | { 34 | "cell_type": "code", 35 | "execution_count": 2, 36 | "metadata": {}, 37 | "outputs": [], 38 | "source": [ 39 | "# Dimension of Gaussian\n", 40 | "dimension = 200\n", 41 | "\n", 42 | "# A simple identity forward model and redundant dictionary\n", 43 | "phi = operators.sensing_operators.Identity()\n", 44 | "psi = operators.sensing_operators.Identity()\n", 45 | "\n", 46 | "# Generate a vector drawn from a Uniform distribution\n", 47 | "image = np.random.rand(dimension, 1)\n", 48 | "\n", 49 | "# Simulate some unit variance Gaussian noise on this random vector\n", 50 | "sigma = 1\n", 51 | "n = sigma*np.random.randn(dimension, 1)\n", 52 | "image = image + n" 53 | ] 54 | }, 55 | { 56 | "cell_type": "markdown", 57 | "metadata": {}, 58 | "source": [ 59 | "### Define parameters" 60 | ] 61 | }, 62 | { 63 | "cell_type": "code", 64 | "execution_count": 6, 65 | "metadata": {}, 66 | "outputs": [], 67 | "source": [ 68 | "# Define a regularisation parameter (this should be tuned for a given problem)\n", 69 | "delta = 1/2\n", 70 | "\n", 71 | "# Parameter dictionary associated with optimisation problem of resampling from the prior subject to the likelihood iso-ball\n", 72 | "params = utils.create_parameters_dict(\n", 73 | " y = image, # Measurements i.e. data\n", 74 | " Phi = phi, # Forward model\n", 75 | " epsilon = 1e-3, # Radius of L2-ball of likelihood \n", 76 | " tight = True, # Is Phi a tight frame or not?\n", 77 | " nu = 1, # Bound on the squared-norm of Phi\n", 78 | " tol = 1e-10, # Convergence tolerance of algorithm\n", 79 | " max_iter = 200, # Maximum number of iterations\n", 80 | " verbose = 0, # Verbosity level\n", 81 | " u = 0, # Initial vector for the dual problem\n", 82 | " pos = True, # Positivity flag\n", 83 | " reality = True # Reality flag\n", 84 | ")\n", 85 | "\n", 86 | "# Options dictionary associated with the overall sampling algorithm\n", 87 | "options = utils.create_options_dict(\n", 88 | " samplesL = 2e4, # Number of live samples\n", 89 | " samplesD = 3e5, # Number of discarded samples \n", 90 | " thinning = 1e1, # Thinning factor (to mitigate correlations)\n", 91 | " delta = 1e-2, # Discretisation stepsize\n", 92 | " burn = 1e2, # Number of burn in samples\n", 93 | " sigma = sigma # Noise standard deviation of degraded image\n", 94 | ")" 95 | ] 96 | }, 97 | { 98 | "cell_type": "markdown", 99 | "metadata": {}, 100 | "source": [ 101 | "### Create lambda functions" 102 | ] 103 | }, 104 | { 105 | "cell_type": "code", 106 | "execution_count": 7, 107 | "metadata": {}, 108 | "outputs": [], 109 | "source": [ 110 | "# Lambda functions to evaluate cost function\n", 111 | "LogLikeliL = lambda sol : - np.linalg.norm(image-phi.dir_op(sol))**2/(2*sigma**2)\n", 112 | "\n", 113 | "# Lambda function for L2-norm identity prior backprojection steps\n", 114 | "proxH = lambda x, T : x - 2*T*psi.adj_op(psi.dir_op(x))*2*delta\n", 115 | "\n", 116 | "# Lambda function for L2-ball likelihood projection during resampling\n", 117 | "proxB = lambda x, tau: optimisations.l2_ball_proj.sopt_fast_proj_B2(x, tau, params)" 118 | ] 119 | }, 120 | { 121 | "cell_type": "markdown", 122 | "metadata": {}, 123 | "source": [ 124 | "### Perform Proximal Nested Sampling" 125 | ] 126 | }, 127 | { 128 | "cell_type": "code", 129 | "execution_count": 8, 130 | "metadata": {}, 131 | "outputs": [ 132 | { 133 | "name": "stderr", 134 | "output_type": "stream", 135 | "text": [ 136 | "ProxNest || Initialise: 100%|██████████| 200/200 [00:00<00:00, 41102.49it/s]\n", 137 | "ProxNest || Populate: 100%|██████████| 200098/200098 [00:02<00:00, 90153.93it/s]\n", 138 | "ProxNest || Sample: 100%|██████████| 300000/300000 [00:13<00:00, 22818.77it/s]\n", 139 | "ProxNest || Compute Weights: 100%|██████████| 300000/300000 [00:00<00:00, 1758625.40it/s]\n", 140 | "ProxNest || Trapezium Integrate: 100%|██████████| 299998/299998 [00:00<00:00, 2324879.32it/s]\n", 141 | "ProxNest || Estimate Variance: 100%|██████████| 300000/300000 [00:00<00:00, 600760.56it/s]\n", 142 | "ProxNest || Compute Posterior Mean: 100%|██████████| 300000/300000 [00:00<00:00, 667114.42it/s]\n" 143 | ] 144 | } 145 | ], 146 | "source": [ 147 | "# Select a starting position\n", 148 | "X0 = np.abs(phi.adj_op(image))\n", 149 | "\n", 150 | "# Perform proximal nested sampling\n", 151 | "NS_BayEvi, NS_Trace = sampling.proximal_nested.ProxNestedSampling(X0, LogLikeliL, proxH, proxB, params, options)\n", 152 | "rescaled_evidence_estimate = NS_BayEvi[0] + np.log(np.pi/delta)*(dimension/2)" 153 | ] 154 | }, 155 | { 156 | "cell_type": "markdown", 157 | "metadata": {}, 158 | "source": [ 159 | "### Evaluate analytic evidence" 160 | ] 161 | }, 162 | { 163 | "cell_type": "code", 164 | "execution_count": 10, 165 | "metadata": {}, 166 | "outputs": [], 167 | "source": [ 168 | "detPar = 1/(2*delta+1/sigma**2)\n", 169 | "ySquare= np.linalg.norm(image,'fro')**2\n", 170 | "BayEvi_Val_gt_log = np.log(np.sqrt(((2*np.pi)**dimension)*(detPar**dimension))) + (-ySquare/(2*sigma**2)) + (detPar/2)*(ySquare/sigma**4)" 171 | ] 172 | }, 173 | { 174 | "cell_type": "markdown", 175 | "metadata": {}, 176 | "source": [ 177 | "### Compare evidence estimates" 178 | ] 179 | }, 180 | { 181 | "cell_type": "code", 182 | "execution_count": 12, 183 | "metadata": {}, 184 | "outputs": [ 185 | { 186 | "name": "stdout", 187 | "output_type": "stream", 188 | "text": [ 189 | "44.97347444294445\n", 190 | "48.985383360556256\n" 191 | ] 192 | } 193 | ], 194 | "source": [ 195 | "print(rescaled_evidence_estimate)\n", 196 | "print(BayEvi_Val_gt_log)" 197 | ] 198 | } 199 | ], 200 | "metadata": { 201 | "kernelspec": { 202 | "display_name": "ProxNest", 203 | "language": "python", 204 | "name": "proxnest" 205 | }, 206 | "language_info": { 207 | "codemirror_mode": { 208 | "name": "ipython", 209 | "version": 3 210 | }, 211 | "file_extension": ".py", 212 | "mimetype": "text/x-python", 213 | "name": "python", 214 | "nbconvert_exporter": "python", 215 | "pygments_lexer": "ipython3", 216 | "version": "3.9.0" 217 | }, 218 | "orig_nbformat": 4 219 | }, 220 | "nbformat": 4, 221 | "nbformat_minor": 2 222 | } 223 | -------------------------------------------------------------------------------- /pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | filterwarnings = 3 | error 4 | ignore::UserWarning 5 | ignore::DeprecationWarning 6 | -------------------------------------------------------------------------------- /requirements/requirements-core.txt: -------------------------------------------------------------------------------- 1 | # Packages required for core functionality 2 | numpy 3 | colorlog 4 | pyyaml 5 | PyWavelets 6 | 7 | tqdm -------------------------------------------------------------------------------- /requirements/requirements-docs.txt: -------------------------------------------------------------------------------- 1 | sphinx==4.2.0 2 | nbsphinx-link==1.3.0 3 | sphinx-rtd-theme==1.0.0 4 | sphinx_toolbox==2.15.0 5 | sphinx-tabs==3.2.0 6 | sphinx-rtd-dark-mode==1.2.4 7 | sphinxcontrib-bibtex==2.4.1 8 | sphinx-git==11.0.0 9 | sphinxcontrib-texfigure==0.1.3 -------------------------------------------------------------------------------- /requirements/requirements-notebooks.txt: -------------------------------------------------------------------------------- 1 | matplotlib 2 | scikit-image -------------------------------------------------------------------------------- /requirements/requirements-tests.txt: -------------------------------------------------------------------------------- 1 | # Beautifiers 2 | black 3 | pytest-black 4 | 5 | # Testing 6 | pytest 7 | pytest-cov 8 | codecov 9 | 10 | # Notebooks 11 | ipython==7.16.1 12 | jupyter==1.0.0 13 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | from setuptools import setup 4 | 5 | import numpy 6 | 7 | # clean previous build 8 | for root, dirs, files in os.walk("./ProxNest/", topdown=False): 9 | for name in dirs: 10 | if name == "build": 11 | shutil.rmtree(name) 12 | 13 | from os import path 14 | 15 | this_directory = path.abspath(path.dirname(__file__)) 16 | 17 | 18 | def read_requirements(file): 19 | with open(file) as f: 20 | return f.read().splitlines() 21 | 22 | 23 | def read_file(file): 24 | with open(file) as f: 25 | return f.read() 26 | 27 | 28 | long_description = read_file(".pip_readme.rst") 29 | required = read_requirements("requirements/requirements-core.txt") 30 | 31 | include_dirs = [ 32 | numpy.get_include(), 33 | ] 34 | 35 | extra_link_args = [] 36 | 37 | setup( 38 | classifiers=[ 39 | "Programming Language :: Python :: 3.6", 40 | "Programming Language :: Python :: 3.7", 41 | "Programming Language :: Python :: 3.8", 42 | "Programming Language :: Python :: 3.9", 43 | "Operating System :: OS Independent", 44 | "Intended Audience :: Developers", 45 | "Intended Audience :: Science/Research", 46 | ], 47 | name="ProxNest", 48 | version="0.0.1", 49 | prefix=".", 50 | url="https://github.com/astro-informatics/proxnest", 51 | author="Xiaohao Cai, Jason McEwen, Marcelo Pereyra, Matthew Price", 52 | author_email="x.cai@soton.ac.uk", 53 | license="GNU General Public License v3 (GPLv3)", 54 | install_requires=required, 55 | description="Proximal nested sampling for high-dimensional Bayesian model selection", 56 | long_description_content_type="text/x-rst", 57 | long_description=long_description, 58 | packages=["ProxNest", "ProxNest.operators", "ProxNest.optimisations", "ProxNest.sampling"], 59 | ) 60 | --------------------------------------------------------------------------------