├── .github
└── images
│ ├── benchmark-spatialsir.gif
│ └── logo.png
├── .gitignore
├── CONTRIBUTING.md
├── LICENSE
├── README.md
├── docs
├── Makefile
├── make.bat
└── source
│ ├── beginnersguide.rst
│ ├── conf.py
│ ├── developers.rst
│ ├── hypothesis.png
│ ├── index.rst
│ ├── installation.rst
│ └── modules.rst
├── examples
├── README.md
├── amortized-ratio-estimation
│ └── likelihood-to-evidence-minimal.py
├── approximate-bayesian-computation
│ ├── abc_sequential-monte-carlo.ipynb
│ └── classical_abc.ipynb
├── markov-chain-monte-carlo
│ ├── aalr_metropolis_hastings.ipynb
│ └── metropolis_hastings.ipynb
└── neuromodulation
│ └── neuromodulated_ratio_estimator.ipynb
├── hypothesis
├── __init__.py
├── auto
│ ├── __init__.py
│ ├── inference
│ │ ├── __init__.py
│ │ ├── base.py
│ │ └── util.py
│ └── training
│ │ ├── __init__.py
│ │ ├── amortized_ratio_estimation.py
│ │ ├── base.py
│ │ └── util.py
├── benchmark
│ ├── __init__.py
│ ├── base.py
│ ├── biomoleculardocking
│ │ ├── __init__.py
│ │ ├── simulator.py
│ │ └── util.py
│ ├── catapult
│ │ ├── __init__.py
│ │ ├── simulator.py
│ │ └── util.py
│ ├── death
│ │ ├── __init__.py
│ │ ├── simulator.py
│ │ └── util.py
│ ├── environment.py
│ ├── mg1
│ │ ├── __init__.py
│ │ ├── simulator.py
│ │ └── util.py
│ ├── normal
│ │ ├── __init__.py
│ │ ├── environment.py
│ │ ├── simulator.py
│ │ └── util.py
│ ├── seismic
│ │ ├── __init__.py
│ │ ├── simulator.py
│ │ └── util.py
│ ├── sir
│ │ ├── __init__.py
│ │ ├── simulator.py
│ │ └── util.py
│ ├── spatialsir
│ │ ├── __init__.py
│ │ ├── simulator.py
│ │ └── util.py
│ ├── tractable
│ │ ├── __init__.py
│ │ ├── simulator.py
│ │ └── util.py
│ └── weinberg
│ │ ├── __init__.py
│ │ ├── simulator.py
│ │ └── util.py
├── bin
│ ├── __init__.py
│ ├── hpc
│ │ └── __init__.py
│ ├── io
│ │ ├── __init__.py
│ │ ├── merge.py
│ │ └── prune.py
│ └── ratio_estimation
│ │ ├── __init__.py
│ │ └── train.py
├── default.py
├── diagnostic
│ ├── __init__.py
│ ├── base.py
│ ├── density.py
│ └── ratio_estimator.py
├── engine
│ ├── __init__.py
│ └── engine.py
├── exception
│ ├── __init__.py
│ ├── intractable.py
│ ├── no_event_registration_exception.py
│ ├── no_such_event.py
│ ├── not_divisible_by_two.py
│ └── simulator.py
├── inference
│ ├── __init__.py
│ ├── abc.py
│ ├── abc_smc.py
│ ├── avo.py
│ ├── mcmc.py
│ ├── proposal_distribution.py
│ ├── ratio.py
│ └── transition_distribution.py
├── metric
│ ├── __init__.py
│ ├── base.py
│ ├── dataset_loss.py
│ ├── exponential_average.py
│ └── roc_auc.py
├── nn
│ ├── __init__.py
│ ├── amortized_ratio_estimation
│ │ ├── __init__.py
│ │ ├── base.py
│ │ ├── densenet
│ │ │ ├── __init__.py
│ │ │ └── likelihood_to_evidence.py
│ │ ├── likelihood_to_evidence.py
│ │ ├── multi_layered_perceptron
│ │ │ ├── __init__.py
│ │ │ ├── base.py
│ │ │ ├── likelihood_to_evidence.py
│ │ │ └── mutual_information.py
│ │ ├── mutual_information.py
│ │ ├── resnet
│ │ │ ├── __init__.py
│ │ │ ├── base.py
│ │ │ └── likelihood_to_evidence.py
│ │ └── util.py
│ ├── densenet
│ │ ├── __init__.py
│ │ ├── default.py
│ │ ├── head.py
│ │ ├── network.py
│ │ └── util.py
│ ├── multi_layered_perceptron.py
│ ├── neuromodulation
│ │ ├── __init__.py
│ │ ├── base.py
│ │ ├── controller.py
│ │ ├── elu.py
│ │ ├── relu.py
│ │ ├── selu.py
│ │ └── tanh.py
│ ├── resnet
│ │ ├── __init__.py
│ │ ├── default.py
│ │ ├── head.py
│ │ ├── network.py
│ │ └── util.py
│ └── util.py
├── rl
│ ├── __init__.py
│ ├── dqv.py
│ ├── environment.py
│ ├── experience_replay_buffer.py
│ └── reinforce.py
├── simulation
│ ├── __init__.py
│ ├── base.py
│ └── util.py
├── stat
│ ├── __init__.py
│ └── constraint.py
├── summary
│ ├── __init__.py
│ ├── mcmc.py
│ └── train.py
├── util
│ ├── __init__.py
│ ├── data
│ │ ├── __init__.py
│ │ ├── distribution_dataset.py
│ │ ├── numpy
│ │ │ ├── __init__.py
│ │ │ ├── dataset.py
│ │ │ ├── simulation_dataset.py
│ │ │ ├── storage.py
│ │ │ └── util.py
│ │ ├── pandas
│ │ │ ├── __init__.py
│ │ │ └── simulation_dataset.py
│ │ ├── simulation_tensor_dataset.py
│ │ └── simulator_dataset.py
│ ├── general.py
│ └── loss.py
├── visualization
│ ├── __init__.py
│ ├── mcmc.py
│ ├── nn.py
│ └── util.py
└── workflow
│ └── __init__.py
├── setup.py
└── tutorials
└── README.md
/.github/images/benchmark-spatialsir.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/montefiore-institute/hypothesis/29a2b7b4649db345d43a8d3bf98aa5d817b43f1b/.github/images/benchmark-spatialsir.gif
--------------------------------------------------------------------------------
/.github/images/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/montefiore-institute/hypothesis/29a2b7b4649db345d43a8d3bf98aa5d817b43f1b/.github/images/logo.png
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | *.egg-info/
24 | .installed.cfg
25 | *.egg
26 | MANIFEST
27 |
28 | # PyInstaller
29 | # Usually these files are written by a python script from a template
30 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
31 | *.manifest
32 | *.spec
33 |
34 | # Installer logs
35 | pip-log.txt
36 | pip-delete-this-directory.txt
37 |
38 | # Unit test / coverage reports
39 | htmlcov/
40 | .tox/
41 | .coverage
42 | .coverage.*
43 | .cache
44 | nosetests.xml
45 | coverage.xml
46 | *.cover
47 | .hypothesis/
48 | .pytest_cache/
49 |
50 | # Translations
51 | *.mo
52 | *.pot
53 |
54 | # Django stuff:
55 | *.log
56 | local_settings.py
57 | db.sqlite3
58 |
59 | # Flask stuff:
60 | instance/
61 | .webassets-cache
62 |
63 | # Scrapy stuff:
64 | .scrapy
65 |
66 | # Sphinx documentation
67 | docs/_build/
68 | docs/build/
69 |
70 | # PyBuilder
71 | target/
72 |
73 | # Jupyter Notebook
74 | .ipynb_checkpoints
75 |
76 | # pyenv
77 | .python-version
78 |
79 | # celery beat schedule file
80 | celerybeat-schedule
81 |
82 | # SageMath parsed files
83 | *.sage.py
84 |
85 | # Environments
86 | .env
87 | .venv
88 | env/
89 | venv/
90 | ENV/
91 | env.bak/
92 | venv.bak/
93 |
94 | # Spyder project settings
95 | .spyderproject
96 | .spyproject
97 |
98 | # Rope project settings
99 | .ropeproject
100 |
101 | # mkdocs documentation
102 | /site
103 |
104 | # mypy
105 | .mypy_cache/
106 |
107 | # macOS
108 | .DS_Store
109 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/montefiore-institute/hypothesis/29a2b7b4649db345d43a8d3bf98aa5d817b43f1b/CONTRIBUTING.md
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | BSD 3-Clause License
2 |
3 | Copyright (c) 2018, Joeri Hermans
4 | All rights reserved.
5 |
6 | Redistribution and use in source and binary forms, with or without
7 | modification, are permitted provided that the following conditions are met:
8 |
9 | * Redistributions of source code must retain the above copyright notice, this
10 | list of conditions and the following disclaimer.
11 |
12 | * Redistributions in binary form must reproduce the above copyright notice,
13 | this list of conditions and the following disclaimer in the documentation
14 | and/or other materials provided with the distribution.
15 |
16 | * Neither the name of the copyright holder nor the names of its
17 | contributors may be used to endorse or promote products derived from
18 | this software without specific prior written permission.
19 |
20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 | A Python toolkit for (likelihood-free) inference and the mechanization of the scientific method.
11 |
12 | ## Installation
13 |
14 | ### From source
15 |
16 | ```sh
17 | git clone https://github.com/montefiore-ai/hypothesis
18 | cd hypothesis
19 | pip install -e .
20 | ```
21 |
22 | ## Inference
23 |
24 | ### AALR-MCMC
25 |
26 | TODO
27 |
28 | ### Adversarial Variational Optimization
29 |
30 | TODO
31 |
32 | ### Amortized ratio estimation
33 |
34 | TODO
35 |
36 | ### Approximate Bayesian Computation
37 |
38 | TODO
39 |
40 | ### Approximate Bayesian Computation - Sequential Monte Carlo
41 |
42 | TODO
43 |
44 | ### Likelihood-free Inference by Ratio Estimation
45 |
46 | TODO
47 |
48 | ### Metropolis-Hastings
49 |
50 | TODO
51 |
52 | ## Benchmark problems
53 |
54 | ### M/G/1
55 |
56 | ```python
57 | from hypothesis.benchmark.mg1 import Simulator
58 | from hypothesis.benchmark.mg1 import Prior
59 |
60 | simulator = Simulator()
61 | prior = Prior()
62 |
63 | inputs = prior.sample((10,)) # Draw 10 samples from the prior.
64 | outputs = simulator(inputs)
65 | ```
66 |
67 | ### Biomolecular docking
68 |
69 | > :heavy_check_mark: Supports experimental design
70 |
71 | ```python
72 | from hypothesis.benchmark.biomoleculardocking import Simulator
73 | from hypothesis.benchmark.biomoleculardocking import Prior
74 |
75 | simulator = Simulator()
76 | prior = Prior()
77 |
78 | inputs = prior.sample((10,)) # Draw 10 samples from the prior.
79 | outputs = simulator(inputs)
80 |
81 | from hypothesis.benchmark.biomoleculardocking import PriorExperiment # Experimental design space
82 |
83 | prior_experiment = PriorExperiment()
84 | experimental_designs = prior_experiment.sample((10,))
85 |
86 | outputs = simulator(inputs, experimental_designs)
87 | ```
88 |
89 | ### Stochastic Death model
90 |
91 | > :heavy_check_mark: Supports experimental design
92 |
93 | ```python
94 | from hypothesis.benchmark.death import Simulator
95 | from hypothesis.benchmark.death import Prior
96 |
97 | simulator = Simulator()
98 | prior = Prior()
99 |
100 | inputs = prior.sample((10,)) # Draw 10 samples from the prior.
101 | outputs = simulator(inputs)
102 |
103 | from hypothesis.benchmark.death import PriorExperiment # Experimental design space
104 |
105 | prior_experiment = PriorExperiment()
106 | experimental_designs = prior_experiment.sample((10,))
107 |
108 | outputs = simulator(inputs, experimental_designs)
109 | ```
110 |
111 | ### Stochastic SIR (Susceptible-Infected-Recovered) model
112 |
113 | > :heavy_check_mark: Supports experimental design
114 |
115 | ```python
116 | from hypothesis.benchmark.sir import Simulator
117 | from hypothesis.benchmark.sir import Prior
118 |
119 | simulator = Simulator()
120 | prior = Prior()
121 |
122 | inputs = prior.sample((10,)) # Draw 10 samples from the prior.
123 | outputs = simulator(inputs)
124 |
125 | from hypothesis.benchmark.sir import PriorExperiment # Experimental design space
126 |
127 | prior_experiment = PriorExperiment()
128 | experimental_designs = prior_experiment.sample((10,))
129 |
130 | outputs = simulator(inputs, experimental_designs)
131 | ```
132 |
133 | ### Stochastic Spatial SIR (Susceptible-Infected-Recovered) model
134 |
135 | > :heavy_check_mark: Supports experimental design
136 |
137 |
138 |
139 |
140 |
141 | ```python
142 | from hypothesis.benchmark.spatialsir import Simulator
143 | from hypothesis.benchmark.spatialsir import Prior
144 |
145 | simulator = Simulator()
146 | prior = Prior()
147 |
148 | inputs = prior.sample((10,)) # Draw 10 samples from the prior.
149 | outputs = simulator(inputs)
150 |
151 | from hypothesis.benchmark.spatialsir import PriorExperiment # Experimental design space
152 |
153 | prior_experiment = PriorExperiment()
154 | experimental_designs = prior_experiment.sample((10,))
155 |
156 | outputs = simulator(inputs, experimental_designs)
157 | ```
158 |
159 | ### Tractable
160 |
161 | ```python
162 | from hypothesis.benchmark.tractable import Simulator
163 | from hypothesis.benchmark.tractable import Prior
164 |
165 | simulator = Simulator()
166 | prior = Prior()
167 |
168 | inputs = prior.sample((10,)) # Draw 10 samples from the prior.
169 | outputs = simulator(inputs)
170 | ```
171 |
172 | ### Weinberg
173 |
174 | > :heavy_check_mark: Supports experimental design
175 |
176 | ```python
177 | from hypothesis.benchmark.weinberg import Simulator
178 | from hypothesis.benchmark.weinberg import Prior
179 |
180 | simulator = Simulator()
181 | prior = Prior()
182 |
183 | inputs = prior.sample((10,)) # Draw 10 samples from the prior.
184 | outputs = simulator(inputs)
185 |
186 | from hypothesis.benchmark.weinberg import PriorExperiment # Experimental design space
187 |
188 | prior_experiment = PriorExperiment()
189 | experimental_designs = prior_experiment.sample((10,))
190 |
191 | outputs = simulator(inputs, experimental_designs)
192 | ```
193 |
194 | ## License
195 |
196 | Hypothesis is BSD-style licensed, as found in the LICENSE file.
197 |
--------------------------------------------------------------------------------
/docs/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line, and also
5 | # from the environment for the first two.
6 | SPHINXOPTS ?=
7 | SPHINXBUILD ?= sphinx-build
8 | SOURCEDIR = source
9 | BUILDDIR = build
10 |
11 | # Put it first so that "make" without argument is like "make help".
12 | help:
13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14 |
15 | .PHONY: help Makefile
16 |
17 | # Catch-all target: route all unknown targets to Sphinx using the new
18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
19 | %: Makefile
20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
21 |
--------------------------------------------------------------------------------
/docs/make.bat:
--------------------------------------------------------------------------------
1 | @ECHO OFF
2 |
3 | pushd %~dp0
4 |
5 | REM Command file for Sphinx documentation
6 |
7 | if "%SPHINXBUILD%" == "" (
8 | set SPHINXBUILD=sphinx-build
9 | )
10 | set SOURCEDIR=source
11 | set BUILDDIR=build
12 |
13 | if "%1" == "" goto help
14 |
15 | %SPHINXBUILD% >NUL 2>NUL
16 | if errorlevel 9009 (
17 | echo.
18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
19 | echo.installed, then set the SPHINXBUILD environment variable to point
20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you
21 | echo.may add the Sphinx directory to PATH.
22 | echo.
23 | echo.If you don't have Sphinx installed, grab it from
24 | echo.http://sphinx-doc.org/
25 | exit /b 1
26 | )
27 |
28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
29 | goto end
30 |
31 | :help
32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
33 |
34 | :end
35 | popd
36 |
--------------------------------------------------------------------------------
/docs/source/beginnersguide.rst:
--------------------------------------------------------------------------------
1 | Beginner's guide
2 | ^^^^^^^^^^^^^^^^
3 |
--------------------------------------------------------------------------------
/docs/source/conf.py:
--------------------------------------------------------------------------------
1 | # Configuration file for the Sphinx documentation builder.
2 | #
3 | # This file only contains a selection of the most common options. For a full
4 | # list see the documentation:
5 | # http://www.sphinx-doc.org/en/master/config
6 |
7 | # -- Path setup --------------------------------------------------------------
8 |
9 | # If extensions (or modules to document with autodoc) are in another directory,
10 | # add these directories to sys.path here. If the directory is relative to the
11 | # documentation root, use os.path.abspath to make it absolute, like shown here.
12 |
13 | import os
14 | import sys
15 | sys.path.insert(0, os.path.abspath('../..'))
16 | sys.setrecursionlimit(1500)
17 |
18 |
19 | # -- Project information -----------------------------------------------------
20 |
21 | project = "Hypothesis"
22 | copyright = "2019, Joeri Hermans"
23 | author = "Joeri Hermans"
24 |
25 | # The full version, including alpha/beta/rc tags
26 | release = "0.0.3"
27 |
28 |
29 | # -- General configuration ---------------------------------------------------
30 |
31 | # Add any Sphinx extension module names here, as strings. They can be
32 | # extensions coming with Sphinx (named "sphinx.ext.*") or your custom
33 | # ones.
34 | extensions = [
35 | "sphinx.ext.autodoc",
36 | "sphinx.ext.coverage",
37 | "sphinx.ext.mathjax",
38 | "sphinx.ext.napoleon",
39 | "sphinx.ext.todo",
40 | "sphinx.ext.viewcode"
41 | ]
42 |
43 | # Add any paths that contain templates here, relative to this directory.
44 | templates_path = ["_templates"]
45 |
46 | # List of patterns, relative to source directory, that match files and
47 | # directories to ignore when looking for source files.
48 | # This pattern also affects html_static_path and html_extra_path.
49 | exclude_patterns = []
50 |
51 |
52 | # -- Options for todo extension ----------------------------------------------
53 |
54 | todo_include_todos = True
55 |
56 | # -- Options for HTML output -------------------------------------------------
57 |
58 | # The theme to use for HTML and HTML Help pages. See the documentation for
59 | # a list of builtin themes.
60 | #
61 | html_theme = "press"
62 |
63 | # Add any paths that contain custom static files (such as style sheets) here,
64 | # relative to this directory. They are copied after the builtin static files,
65 | # so a file named "default.css" will overwrite the builtin "default.css".
66 | html_static_path = ["_static"]
67 |
--------------------------------------------------------------------------------
/docs/source/developers.rst:
--------------------------------------------------------------------------------
1 | Developers
2 | ^^^^^^^^^^
3 |
4 | Code style
5 | ##########
6 |
7 | ``__init__.py``
8 | ---------------
9 |
--------------------------------------------------------------------------------
/docs/source/hypothesis.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/montefiore-institute/hypothesis/29a2b7b4649db345d43a8d3bf98aa5d817b43f1b/docs/source/hypothesis.png
--------------------------------------------------------------------------------
/docs/source/index.rst:
--------------------------------------------------------------------------------
1 | .. image:: hypothesis.png
2 |
3 | Hypothesis
4 | ^^^^^^^^^^
5 |
6 | .. toctree::
7 | :glob:
8 | :titlesonly:
9 | :maxdepth: 2
10 | :caption: Contents
11 | :hidden:
12 |
13 | *
14 |
15 | .. automodule:: hypothesis
16 |
17 | .. todolist::
18 |
--------------------------------------------------------------------------------
/docs/source/installation.rst:
--------------------------------------------------------------------------------
1 | Installation
2 | ^^^^^^^^^^^^
3 |
4 | Test
5 | ====
6 |
7 | t
8 |
--------------------------------------------------------------------------------
/docs/source/modules.rst:
--------------------------------------------------------------------------------
1 | Documentation
2 | ^^^^^^^^^^^^^
3 |
4 | .. automodule:: hypothesis
5 | :noindex:
6 | :members:
7 |
8 | ``hypothesis.benchmark``
9 | ========================
10 |
11 | .. automodule:: hypothesis.benchmark
12 | :members:
13 |
14 | ``hypothesis.benchmark.tractable``
15 | ----------------------------------
16 | .. automodule:: hypothesis.benchmark.tractable
17 | .. automodule:: hypothesis.benchmark.tractable.simulator
18 | :members:
19 |
20 |
21 | ``hypothesis.benchmark.mg1``
22 | ----------------------------
23 | .. automodule:: hypothesis.benchmark.mg1
24 | .. automodule:: hypothesis.benchmark.mg1.simulator
25 | :members:
26 |
27 | ``hypothesis.nn``
28 | =================
29 |
30 | .. automodule:: hypothesis.nn
31 |
32 | ``hypothesis.simulation``
33 | =========================
34 |
35 | .. automodule:: hypothesis.simulation
36 | :members:
37 |
38 | .. automodule:: hypothesis.simulation.base
39 | :members:
40 |
41 | ``hypothesis.summary``
42 | ======================
43 |
44 | ``hypothesis.util``
45 | ===================
46 | .. automodule:: hypothesis.util
47 | :members:
48 |
49 | ``hypothesis.util.data``
50 | ---------------------------
51 | .. automodule:: hypothesis.util.data
52 | :members:
53 | .. automodule:: hypothesis.util.data.simulator_dataset
54 | :members:
55 |
--------------------------------------------------------------------------------
/examples/README.md:
--------------------------------------------------------------------------------
1 | # Examples
2 |
3 | ## Markov chain Monte Carlo
--------------------------------------------------------------------------------
/examples/amortized-ratio-estimation/likelihood-to-evidence-minimal.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import hypothesis
3 | import matplotlib.pyplot as plt
4 | import numpy as np
5 | import torch
6 |
7 | from hypothesis.auto.training import LikelihoodToEvidenceRatioEstimatorTrainer as Trainer
8 | from hypothesis.benchmark.normal import Prior
9 | from hypothesis.benchmark.normal import Simulator
10 | from hypothesis.nn.amortized_ratio_estimation import LikelihoodToEvidenceRatioEstimatorMLP as RatioEstimator
11 | from hypothesis.visualization.util import make_square
12 | from torch.utils.data import TensorDataset
13 |
14 |
15 |
16 | def main(arguments):
17 | # Allocate the ratio estimator
18 | estimator = RatioEstimator(
19 | activation=torch.nn.SELU,
20 | layers=[128, 128, 128],
21 | shape_inputs=(1,),
22 | shape_outputs=(1,))
23 | estimator = estimator.to(hypothesis.accelerator)
24 | # Allocate the optimizer
25 | optimizer = torch.optim.Adam(estimator.parameters())
26 | # Allocate the trainer, or optimization procedure.
27 | trainer = Trainer(
28 | estimator=estimator,
29 | dataset_train=allocate_dataset_train(),
30 | dataset_test=allocate_dataset_test(),
31 | epochs=arguments.epochs,
32 | checkpoint=arguments.checkpoint,
33 | batch_size=arguments.batch_size,
34 | optimizer=optimizer)
35 | # Execute the optimization process.
36 | summary = trainer.fit()
37 | print(summary)
38 | # Plot the testing and training loss.
39 | figure, axes = plt.subplots(nrows=1, ncols=2, sharey=True)
40 | # Training
41 | ax = axes[0]
42 | loss = summary.train_losses(log=True)
43 | ax.plot(loss, lw=2, color="black")
44 | ax.set_xlabel("Gradient updates")
45 | ax.set_ylabel("Logarithmic loss")
46 | ax.set_title("Training loss")
47 | ax.minorticks_on()
48 | # Testing
49 | ax = axes[1]
50 | epochs = np.arange(summary.num_epochs()) + 1
51 | loss = summary.test_losses(log=True)
52 | plt.plot(epochs, loss, lw=2, color="black")
53 | ax.set_xlabel("Epochs")
54 | ax.set_title("Testing loss")
55 | # Square axes
56 | make_square(axes[0])
57 | make_square(axes[1])
58 | figure.tight_layout()
59 | plt.show()
60 |
61 |
62 | def allocate_dataset_train():
63 | return allocate_dataset(100000)
64 |
65 |
66 | def allocate_dataset_test():
67 | return allocate_dataset(1000)
68 |
69 |
70 | @torch.no_grad()
71 | def allocate_dataset(n):
72 | prior = Prior()
73 | simulator = Simulator()
74 | size = torch.Size([n])
75 | inputs = prior.sample(size).view(-1, 1)
76 | outputs = simulator(inputs).view(-1, 1)
77 |
78 | return TensorDataset(inputs, outputs)
79 |
80 |
81 | def parse_arguments():
82 | parser = argparse.ArgumentParser("Amortized Likelihood-to-evidence Ratio Estimation: minimal example")
83 | parser.add_argument("--batch-size", type=int, default=hypothesis.default.batch_size, help="Batch-size of the stochastic optimization.")
84 | parser.add_argument("--checkpoint", type=str, default=None, help="Path to store the checkpoints. If specified, checkpointing will be enabled.")
85 | parser.add_argument("--epochs", type=int, default=10, help="Number of data epochs.")
86 | arguments, _ = parser.parse_known_args()
87 |
88 | return arguments
89 |
90 |
91 | if __name__ == "__main__":
92 | arguments = parse_arguments()
93 | main(arguments)
94 |
--------------------------------------------------------------------------------
/hypothesis/__init__.py:
--------------------------------------------------------------------------------
1 | r"""Hypothesis is a python module for statistical inference and the
2 | mechanization of science.
3 |
4 | The package contains (approximate) inference algorithms to solve statistical
5 | problems. Utilities are provided for data loading, efficient
6 | simulation, visualization, fire-and-forget inference, and validation.
7 | """
8 |
9 | __version__ = "0.0.3"
10 | __author__ = [
11 | "Joeri Hermans"]
12 |
13 | __email__ = [
14 | "joeri.hermans@doct.uliege.be"]
15 |
16 |
17 | ################################################################################
18 | # Global variables
19 | ################################################################################
20 |
21 | import multiprocessing
22 | import torch
23 |
24 |
25 |
26 | cpu_count = multiprocessing.cpu_count()
27 | """int: Number of available processor cores.
28 |
29 | Variable will be initialized when ``hypothesis`` is loaded fro the first time.
30 | """
31 |
32 |
33 |
34 | workers = cpu_count
35 | """int: Number of default workers.
36 |
37 | Default number of workers in Hypothesis.
38 | """
39 |
40 |
41 | def set_workers(n):
42 | r"""Sets the number of default parallel hypothesis workers."""
43 | assert(n >= 1)
44 | hypothesis.workers = n
45 |
46 |
47 |
48 | accelerator = torch.device("cuda" if torch.cuda.is_available() else "cpu")
49 | a = accelerator # Short name
50 | """torch.device: PyTorch device describing the accelerator backend.
51 |
52 | The variable will be initialized when ``hypothesis`` is loaded for the first
53 | time. It will check for the availibility of a CUDA device. If a CUDA enabled
54 | device is present, ``hypothesis`` will select the CUDA device defined in the
55 | ``CUDA_VISIBLE_DEVICES`` environment variable. If no such device is specified,
56 | the variable will default to GPU 0.
57 | """
58 |
59 |
60 | def disable_gpu():
61 | r"""Disables GPU acceleration. Hypothesis' accelerator will have been
62 | set to 'cpu'."""
63 | hypothesis.accelerator = "cpu"
64 | hypothesis.a = hypothesis.accelerator
65 |
66 | def enable_gpu():
67 | r"""Tries to enable GPU acceleration. If a GPU is present, a CUDA
68 | device will be set, else it will default to 'cpu'."""
69 | hypothesis.accelerator = torch.device("cuda" if torch.cuda.is_available() else "cpu")
70 | hypothesis.a = hypothesis.accelerator
71 |
72 | return hypothesis.accelerator
73 |
74 |
75 | def gpu_available():
76 | r"""Checks if GPU acceleration is available."""
77 | return hypothesis.accelerator != "cpu"
78 |
79 |
80 | ################################################################################
81 | # Hypothesis' defaults
82 | ################################################################################
83 |
84 | import hypothesis.default
85 |
--------------------------------------------------------------------------------
/hypothesis/auto/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/montefiore-institute/hypothesis/29a2b7b4649db345d43a8d3bf98aa5d817b43f1b/hypothesis/auto/__init__.py
--------------------------------------------------------------------------------
/hypothesis/auto/inference/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/montefiore-institute/hypothesis/29a2b7b4649db345d43a8d3bf98aa5d817b43f1b/hypothesis/auto/inference/__init__.py
--------------------------------------------------------------------------------
/hypothesis/auto/inference/base.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/montefiore-institute/hypothesis/29a2b7b4649db345d43a8d3bf98aa5d817b43f1b/hypothesis/auto/inference/base.py
--------------------------------------------------------------------------------
/hypothesis/auto/inference/util.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/montefiore-institute/hypothesis/29a2b7b4649db345d43a8d3bf98aa5d817b43f1b/hypothesis/auto/inference/util.py
--------------------------------------------------------------------------------
/hypothesis/auto/training/__init__.py:
--------------------------------------------------------------------------------
1 | from .base import BaseTrainer
2 | from .amortized_ratio_estimation import BaseAmortizedRatioEstimatorTrainer
3 | from .amortized_ratio_estimation import LikelihoodToEvidenceRatioEstimatorTrainer
4 | from .amortized_ratio_estimation import LikelihoodToEvidenceCriterion
5 | from .amortized_ratio_estimation import ConservativeLikelihoodToEvidenceCriterion
6 | from .amortized_ratio_estimation import create_trainer
7 |
--------------------------------------------------------------------------------
/hypothesis/auto/training/base.py:
--------------------------------------------------------------------------------
1 | import hypothesis
2 |
3 | from hypothesis.engine import Procedure
4 | from torch.utils.data import DataLoader
5 |
6 |
7 |
8 | class BaseTrainer(Procedure):
9 |
10 | def __init__(self,
11 | batch_size=hypothesis.default.batch_size,
12 | checkpoint=None,
13 | epochs=hypothesis.default.epochs,
14 | identifier=None,
15 | shuffle=True,
16 | workers=hypothesis.default.dataloader_workers):
17 | super(BaseTrainer, self).__init__()
18 | # Training hyperparameters
19 | self.batch_size = batch_size
20 | self.checkpoint_path = checkpoint
21 | self.dataloader_workers = workers
22 | self.epochs = epochs
23 | self.identifier = identifier
24 | self.shuffle = shuffle
25 | # Load the previously saved state.
26 | self._checkpoint_load()
27 |
28 | def _checkpoint_store(self):
29 | raise NotImplementedError
30 |
31 | def _checkpoint_load(self):
32 | raise NotImplementedError
33 |
34 | def _allocate_data_loader(self, dataset):
35 | return DataLoader(dataset,
36 | batch_size=self.batch_size,
37 | drop_last=True,
38 | num_workers=self.dataloader_workers,
39 | pin_memory=True,
40 | shuffle=self.shuffle)
41 |
42 | def _register_events(self):
43 | raise NotImplementedError
44 |
45 | def _summarize(self):
46 | raise NotImplementedError
47 |
48 | def checkpoint(self):
49 | raise NotImplementedError
50 |
51 | def fit(self):
52 | raise NotImplementedError
53 |
54 | def test(self):
55 | raise NotImplementedError
56 |
57 | def train(self):
58 | raise NotImplementedError
59 |
--------------------------------------------------------------------------------
/hypothesis/auto/training/util.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
--------------------------------------------------------------------------------
/hypothesis/benchmark/__init__.py:
--------------------------------------------------------------------------------
1 | r""""""
2 |
3 | from .environment import BenchmarkEnvironment
4 |
--------------------------------------------------------------------------------
/hypothesis/benchmark/base.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/montefiore-institute/hypothesis/29a2b7b4649db345d43a8d3bf98aa5d817b43f1b/hypothesis/benchmark/base.py
--------------------------------------------------------------------------------
/hypothesis/benchmark/biomoleculardocking/__init__.py:
--------------------------------------------------------------------------------
1 | r"""Based on the model described in
2 |
3 | Lyu, J., Wang, S., Balius, T. E., Singh, I., Levit, A., Moroz, Y. S., ... & Tolmachev, A. A. (2019). Ultra-large library docking for discovering new chemotypes. Nature, 566(7743), 224-229.
4 |
5 | https://www.nature.com/articles/s41586-019-0917-9
6 | """
7 |
8 | from .simulator import BiomolecularDockingSimulator as Simulator
9 | from .util import Prior
10 | from .util import PriorExperiment
11 | from .util import Truth
12 | from .util import log_likelihood
13 |
--------------------------------------------------------------------------------
/hypothesis/benchmark/biomoleculardocking/simulator.py:
--------------------------------------------------------------------------------
1 | import hypothesis
2 | import numpy as np
3 | import torch
4 |
5 | from hypothesis.simulation import Simulator as BaseSimulator
6 | from torch.distributions.bernoulli import Bernoulli
7 |
8 |
9 |
10 | class BiomolecularDockingSimulator(BaseSimulator):
11 |
12 | MIN_PSI = -75.0
13 | MAX_PSI = 0.0
14 | EXPERIMENTAL_SPACE = 100
15 |
16 | def __init__(self, default_experimental_design=torch.zeros(EXPERIMENTAL_SPACE)):
17 | super(BiomolecularDockingSimulator, self).__init__()
18 | self.default_experimental_design = default_experimental_design
19 |
20 | def simulate(self, theta, psi):
21 | bottom = theta[0].item()
22 | ee50 = theta[1].item()
23 | slope = theta[2].item()
24 | top = theta[3].item()
25 | n = len(psi)
26 | x = torch.zeros(n)
27 | for index in range(n):
28 | rate = bottom + (
29 | (top - bottom)
30 | /
31 | (1 + (-(psi[index] - ee50) * slope).exp()))
32 | p = Bernoulli(rate)
33 | x[index] = p.sample()
34 |
35 | return x
36 |
37 | @torch.no_grad()
38 | def forward(self, inputs, experimental_configurations=None):
39 | outputs = []
40 |
41 | n = len(inputs)
42 | for index in range(n):
43 | theta = inputs[index]
44 | if experimental_configurations is not None:
45 | psi = experimental_configurations[index]
46 | x = self.simulate(theta, psi)
47 | else:
48 | x = self.simulate(theta, self.default_experimental_design)
49 | outputs.append(x.view(-1, 1))
50 | outputs = torch.cat(outputs, dim=0)
51 |
52 | return outputs
53 |
--------------------------------------------------------------------------------
/hypothesis/benchmark/biomoleculardocking/util.py:
--------------------------------------------------------------------------------
1 | r"""Utilities for the Biomolecular Docking benchmark.
2 |
3 | """
4 |
5 | import torch
6 |
7 | from hypothesis.exception import IntractableException
8 | from torch.distributions.beta import Beta
9 | from torch.distributions.normal import Normal
10 |
11 |
12 |
13 | def Truth():
14 | raise NotImplementedError
15 |
16 |
17 | def log_likelihood(theta, x):
18 | raise IntractableException
19 |
20 |
21 |
22 | class Prior:
23 |
24 | def __init__(self):
25 | self.r_bottom = Beta(4, 96)
26 | self.r_ee50 = Normal(-50, 15 ** 2)
27 | self.r_slope = Normal(-0.15, 0.1 ** 2)
28 | self.r_top = Beta(25, 75)
29 |
30 | def sample(self, sample_shape=torch.Size()):
31 | bottom_samples = self.r_bottom.sample(sample_shape).view(-1, 1)
32 | ee50_samples = self.r_ee50.sample(sample_shape).view(-1, 1)
33 | slope_samples = self.r_slope.sample(sample_shape).view(-1, 1)
34 | top_samples = self.r_top.sample(sample_shape).view(-1, 1)
35 | samples = torch.cat([
36 | bottom_samples,
37 | ee50_samples,
38 | slope_samples,
39 | top_samples],
40 | dim=1)
41 |
42 | return samples
43 |
44 | def log_prob(self, sample):
45 | raise IntractableException
46 |
47 |
48 |
49 | class PriorExperiment(torch.distributions.uniform.Uniform):
50 |
51 | def __init__(self):
52 | lower = torch.ones(100) * -75
53 | upper = torch.zeros(100)
54 |
55 | def log_prob(self, sample):
56 | return super(Uniform, self).log_prob(sample).mean()
57 |
--------------------------------------------------------------------------------
/hypothesis/benchmark/catapult/__init__.py:
--------------------------------------------------------------------------------
1 | from .simulator import CatapultSimulator as Simulator
2 | from .util import Prior
3 | from .util import PriorExperiment
4 |
--------------------------------------------------------------------------------
/hypothesis/benchmark/catapult/simulator.py:
--------------------------------------------------------------------------------
1 | import hypothesis
2 | import numpy as np
3 | import torch
4 |
5 | from hypothesis.simulation import Simulator as BaseSimulator
6 | from .util import PriorExperiment
7 |
8 |
9 |
10 | class CatapultSimulator(BaseSimulator):
11 |
12 | LAUNCH_ANGLE_LIMIT_HIGH = 1.5707963267948965
13 | LAUNCH_ANGLE_LIMIT_LOW = 0.0
14 |
15 | def __init__(self, limit=100.0, step_size=0.01, record_wind=False):
16 | super(CatapultSimulator, self).__init__()
17 | self.dt = step_size
18 | # self.prior_experiment = PriorExperiment()
19 | self.limit = limit # Observational limit in meters
20 | self.record_wind = record_wind
21 | self.planet_mass = 5.972 * 10**24 # Kilogram
22 | self.planet_radius = 6371000 # Meters
23 | self.air_density = 1.2
24 |
25 | def _get_projectile(self, psi):
26 | area = psi[0].item()
27 | mass = psi[1].item()
28 |
29 | return Projectile(area=area, mass=mass)
30 |
31 | def _get_launch_angle(self, psi):
32 | nominal_launch_angle = psi[2].item()
33 | launch_angle = nominal_launch_angle + np.random.normal() * 0.1
34 | # Check if the launch angle is valid (in radians).
35 | if launch_angle < self.LAUNCH_ANGLE_LIMIT_LOW:
36 | launch_angle = self.LAUNCH_ANGLE_LIMIT_LOW
37 | elif launch_angle > self.LAUNCH_ANGLE_LIMIT_HIGH:
38 | launch_angle = self.LAUNCH_ANGLE_LIMIT_HIGH
39 |
40 | return launch_angle
41 |
42 | def _get_launch_force(self, psi):
43 | launch_force = psi[3].item()
44 | #launch_force = launch_force + (np.random.normal() * 5) # Newton
45 | if launch_force < 10:
46 | launch_force = 10
47 |
48 | return launch_force
49 |
50 | def _get_wind(self):
51 | return np.random.normal() * 5 # Meters per second
52 |
53 | def simulate(self, theta, psi, trajectory=False):
54 | # Setup the initial conditions and simulator state
55 | positions = []
56 | G = theta.item() * (10 ** -11)
57 | v_nominal_wind = self._get_wind()
58 | launch_angle = self._get_launch_angle(psi)
59 | launch_force = self._get_launch_force(psi)
60 | projectile = self._get_projectile(psi)
61 |
62 | # Compute the initial launch force.
63 | force = np.zeros(2)
64 | force[0] = np.cos(launch_angle) * launch_force
65 | force[1] = np.sin(launch_angle) * launch_force
66 |
67 | # Compute the force due to acceleration
68 | force_gravitational = np.zeros(2)
69 | force_gravitational[1] = -projectile.mass * ((G * self.planet_mass) / self.planet_radius ** 2)
70 | positions.append(np.copy(projectile.position).reshape(1, 2))
71 |
72 | # Apply the launching force for a 0.1 second.
73 | n = int(0.1 / self.dt)
74 | for _ in range(n):
75 | projectile.apply(force, self.dt)
76 | positions.append(np.copy(projectile.position).reshape(1, 2))
77 |
78 | # Integrate until the projectile hits the ground.
79 | while not projectile.stopped() and np.abs(projectile.position[0]) <= self.limit:
80 | v_wind = v_nominal_wind + 0.01 * np.random.normal()
81 | dv_x = projectile.velocity[0]
82 | dv_y = projectile.velocity[1]
83 |
84 | # Force of the wind component.
85 | force_wind = np.zeros(2)
86 | force_wind[0] = np.sign(v_wind) * 0.5 * self.air_density * (projectile.area / projectile.mass) * (v_wind ** 2)
87 |
88 | # Force of the drag
89 | force_drag = np.zeros(2)
90 | force_drag[0] = np.sign(dv_x) * 0.5 * projectile.drag_coefficient * self.air_density * projectile.area * (dv_x ** 2)
91 | force_drag[1] = np.sign(dv_y) * 0.5 * projectile.drag_coefficient * self.air_density * projectile.area * (dv_y ** 2)
92 |
93 | # Compute net drag
94 | force = force_gravitational + force_wind - force_drag
95 | projectile.apply(force, self.dt)
96 |
97 | # Check if projectile is within limits
98 | x_position = projectile.position[0]
99 | if np.abs(x_position) > self.limit:
100 | x_position = np.sign(x_position) * self.limit
101 | positions.append(np.array([[x_position, 0]]))
102 | else:
103 | positions.append(np.copy(projectile.position).reshape(1, 2))
104 |
105 | positions = np.vstack(positions)
106 | if trajectory:
107 | return positions
108 | else:
109 | if self.record_wind:
110 | return np.array([v_nominal_wind, positions[-1][0]])
111 | else:
112 | return positions[-1][0]
113 |
114 | @torch.no_grad()
115 | def forward(self, inputs, experimental_configurations):
116 | outputs = []
117 |
118 | n = len(inputs)
119 | for index in range(n):
120 | theta = inputs[index].view(-1)
121 | psi = experimental_configurations[index].view(-1)
122 | output = self.simulate(theta, psi).reshape(1, -1)
123 | outputs.append(torch.from_numpy(output))
124 |
125 | return torch.cat(outputs, dim=0).float()
126 |
127 |
128 |
129 | class Projectile:
130 | r"""A spherical projectile."""
131 |
132 | def __init__(self, area=0.1, mass=1.0, drag_coefficient=0.05):
133 | self.position = np.zeros(2) # x -> distance, y -> height
134 | self.velocity = np.zeros(2)
135 | self.mass = mass # Kilogram
136 | self.drag_coefficient = drag_coefficient
137 | self.area = area # Meter
138 |
139 | def stopped(self):
140 | return self.position[1] < 0
141 |
142 | def apply(self, force, dt):
143 | impulse = force * dt
144 | dv = impulse / self.mass
145 | self.velocity += dv
146 | self.position += self.velocity * dt
147 |
--------------------------------------------------------------------------------
/hypothesis/benchmark/catapult/util.py:
--------------------------------------------------------------------------------
1 | r"""Utilities for the catapult simulator to infer the gravitational constant.
2 |
3 | """
4 |
5 | import hypothesis
6 | import numpy as np
7 | import torch
8 |
9 | from hypothesis.exception import IntractableException
10 |
11 |
12 |
13 | def Prior():
14 | lower = torch.tensor(1.0)
15 | lower = lower.to(hypothesis.accelerator)
16 | upper = torch.tensor(10.0)
17 | upper = upper.to(hypothesis.accelerator)
18 |
19 | return torch.distributions.uniform.Uniform(lower, upper)
20 |
21 |
22 | def PriorExperiment():
23 | lower = torch.tensor([0.1, 0.1, 0.0, 10.0])
24 | lower = lower.to(hypothesis.accelerator)
25 | upper = torch.tensor([1.0, 10.0, np.pi / 2, 1000.0])
26 | upper = upper.to(hypothesis.accelerator)
27 |
28 | return Uniform(lower, upper)
29 |
30 |
31 |
32 | class Uniform(torch.distributions.uniform.Uniform):
33 |
34 | r"""Used to initialize the prior over the experimental design space."""
35 | def __init__(self, lower, upper):
36 | super(Uniform, self).__init__(lower, upper)
37 |
38 | def log_prob(self, sample):
39 | return super(Uniform, self).log_prob(sample).sum()
40 |
--------------------------------------------------------------------------------
/hypothesis/benchmark/death/__init__.py:
--------------------------------------------------------------------------------
1 | from .simulator import DeathModelSimulator as Simulator
2 | from .util import Prior
3 | from .util import PriorExperiment
4 | from .util import Truth
5 | from .util import log_likelihood
6 |
--------------------------------------------------------------------------------
/hypothesis/benchmark/death/simulator.py:
--------------------------------------------------------------------------------
1 | import hypothesis
2 | import numpy as np
3 | import torch
4 |
5 | from hypothesis.simulation import Simulator as BaseSimulator
6 | from torch.distributions.binomial import Binomial
7 |
8 |
9 |
10 | class DeathModelSimulator(BaseSimulator):
11 |
12 | def __init__(self, population_size=1000, default_measurement_time=1.0, step_size=0.01):
13 | super(DeathModelSimulator, self).__init__()
14 | self.default_measurement_time = torch.tensor(default_measurement_time).float()
15 | self.population_size = int(population_size)
16 | self.step_size = float(step_size)
17 |
18 | def simulate(self, theta, psi):
19 | # theta = [beta, gamma]
20 | # psi = tau
21 | # sample = [S(tau), I(tau), R(tau)]
22 | infection_rate = theta.item()
23 | design = psi.item()
24 | I = 0
25 | t = 0.0
26 | n_steps = int(psi / self.step_size)
27 | for _ in range(n_steps):
28 | S = self.population_size - I
29 | if S == 0:
30 | break
31 | p_inf = 1 - np.exp(-infection_rate * t)
32 | delta_I = int(Binomial(S, p_inf).sample())
33 | I += delta_I
34 | t += self.step_size
35 |
36 | return torch.tensor(I).float()
37 |
38 | @torch.no_grad()
39 | def forward(self, inputs, experimental_configurations=None):
40 | outputs = []
41 |
42 | n = len(inputs)
43 | for index in range(n):
44 | theta = inputs[index]
45 | if experimental_configurations is not None:
46 | psi = experimental_configurations[index]
47 | x = self.simulate(theta, psi)
48 | else:
49 | x = self.simulate(theta, self.default_measurement_time)
50 | outputs.append(x.view(1, -1))
51 | outputs = torch.cat(outputs, dim=0)
52 |
53 | return outputs
54 |
--------------------------------------------------------------------------------
/hypothesis/benchmark/death/util.py:
--------------------------------------------------------------------------------
1 | r"""Utilities for the Death Model benchmark.
2 |
3 | """
4 |
5 | import torch
6 |
7 | from torch.distributions.binomial import Binomial
8 | from torch.distributions.normal import Normal
9 | from torch.distributions.uniform import Uniform
10 |
11 |
12 |
13 | def PriorExperiment():
14 | r"""Prior over the experimental design space (measurement time)."""
15 | return Uniform(0., 10.0)
16 |
17 |
18 | def Truth():
19 | return torch.tensor([1.])
20 |
21 |
22 | def log_likelihood(theta, x):
23 | raise NotImplementedError
24 |
25 |
26 |
27 | class Prior:
28 |
29 | def __init__(self):
30 | self.normal = Normal(1, 1)
31 | self.uniform = Uniform(0, 10)
32 |
33 | def _sample(self):
34 | sample = None
35 |
36 | neg_infinity = float("-inf")
37 | while sample is None:
38 | candidate = self.normal.sample()
39 | if self.uniform.log_prob(candidate) > neg_infinity:
40 | sample = candidate
41 | break
42 |
43 | return sample
44 |
45 | def sample(self, sample_shape=torch.Size()):
46 | samples = []
47 |
48 | if len(sample_shape) == 0:
49 | n = 1
50 | else:
51 | n = sample_shape[0]
52 | for _ in range(n):
53 | samples.append(self._sample().view(-1, 1))
54 |
55 | return torch.cat(samples, dim=0)
56 |
57 | def log_prob(self, sample):
58 | raise NotImplementedError
59 |
--------------------------------------------------------------------------------
/hypothesis/benchmark/environment.py:
--------------------------------------------------------------------------------
1 | import hypothesis
2 | import torch
3 |
4 | from hypothesis.rl import Environment as BaseEnvironment
5 |
6 |
7 |
8 | class BenchmarkEnvironment(BaseEnvironment):
9 |
10 | def __init__(self, simulator,
11 | prior,
12 | prior_experiment,
13 | entropy_estimator,
14 | max_experiments=10,
15 | truth=None):
16 | super(BenchmarkEnvironment, self).__init__()
17 | # Check if a simulation model has been specified
18 | if simulator is None:
19 | raise ValueError("A simulation model is required.")
20 | # Check if an entropy estimator has been specified.
21 | if entropy_estimator is None:
22 | raise ValueError("An entropy-estimator is required.")
23 | # Environment properties
24 | self.conducted_experiments = 0
25 | self.entropy_estimator = entropy_estimator
26 | self.max_experiments = max_experiments
27 | self.predefined_truth = truth
28 | self.prior = prior
29 | self.prior_experiment = prior_experiment
30 | self.simulator = simulator
31 | # Environment state
32 | self.reset()
33 |
34 | @torch.no_grad()
35 | def _perform_experiment(self, experiment):
36 | inputs = self.truth.view(1, -1)
37 | designs = experiment.view(1, -1)
38 | outputs = self.simulator(inputs=inputs, designs=designs)
39 |
40 | return outputs
41 |
42 | def _reward(self):
43 | r"""We negate the entropy as the reward needs to be maximized."""
44 | return -self.entropy_estimator(self.actions, self.observations)
45 |
46 | @torch.no_grad()
47 | def summary(self):
48 | return {
49 | "experiments": self.actions,
50 | "observations": self.observations,
51 | "rewards": self.rewards,
52 | "truth": self.truth.squeeze().numpy()}
53 |
54 | def step(self, action):
55 | assert(self.conducted_experiments < self.max_experiments)
56 | observation = self._perform_experiment(action)
57 | self.conducted_experiments += 1
58 | self.observations.append(observation.cpu().detach())
59 | self.actions.append(action.cpu().detach())
60 | reward = self._reward()
61 | self.rewards.append(reward.cpu().detach())
62 | done = (self.conducted_experiments >= self.max_experiments)
63 |
64 | return observation, reward, done, self.summary()
65 |
66 | @torch.no_grad()
67 | def reset(self):
68 | self.actions = []
69 | self.conducted_experiments = 0
70 | self.observations = []
71 | self.rewards = []
72 | if self.predefined_truth is None:
73 | self.truth = self.prior.sample().view(1, -1)
74 | else:
75 | self.truth = self.predefined_truth.view(1, -1)
76 |
--------------------------------------------------------------------------------
/hypothesis/benchmark/mg1/__init__.py:
--------------------------------------------------------------------------------
1 | from .simulator import MG1Simulator
2 | from .util import Prior
3 | from .util import Truth
4 | from .util import log_likelihood
5 |
--------------------------------------------------------------------------------
/hypothesis/benchmark/mg1/simulator.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import numpy.random as rng
3 | import torch
4 |
5 | from hypothesis.simulation import Simulator
6 |
7 |
8 |
9 | class MG1Simulator(Simulator):
10 | r"""Simulation model of the M/G/1 queuing model.
11 |
12 | This model describes a queuing system of continuously arriving jobs by a
13 | single server. The time it takes to process every job is uniformly
14 | distributed in the interval :math:`[\theta_1, \theta_2]`. The arrival
15 | between two consecutive jobs is exponentially distributed according to
16 | the rate :math:`\theta_3`. That is, for
17 | every job :math:`i` we have the processing time :math:`p_i` , an arrival
18 | time :math:`a_i` and the time :math:`l_i` at which the job left the queue.
19 |
20 | Todo:
21 | Write method docs.
22 | """
23 |
24 | def __init__(self, percentiles=5, steps=50):
25 | super(Simulator, self).__init__()
26 | self.percentiles = int(percentiles)
27 | self.steps = int(steps)
28 |
29 | def _generate(self, input):
30 | input = input.view(-1)
31 | p1 = input[0].item()
32 | p2 = input[1].item()
33 | p3 = input[2].item()
34 | # Service / processing time.
35 | sts = (p2 - p1) * rng.random(self.num_steps) + p1
36 | # Interarrival times.
37 | iats = -np.log(1.0 - rng.rand(self.num_steps)) / p3
38 | # Arrival times.
39 | ats = np.cumsum(iats)
40 | # Interdeparture and departure times.
41 | idts = np.empty(self.num_steps)
42 | dts = np.empty(self.num_steps)
43 | idts[0] = sts[0] + ats[0]
44 | dts[0] = idts[0]
45 | for i in range(1, self.num_steps):
46 | idts[i] = sts[i] + max(0.0, ats[i] - dts[i-1])
47 | dts[i] = dts[i-1] + idts[i]
48 | # Compute the observation.
49 | perc = np.linspace(0.0, 100.0, self.num_percentiles)
50 | stats = np.percentile(idts, perc)
51 |
52 | return torch.tensor(stats).float().view(1, -1)
53 |
54 | def forward(self, inputs):
55 | r""""""
56 | samples = []
57 |
58 | for input in inputs:
59 | x_out = self._generate(input)
60 | samples.append(x_out.view(1, -1))
61 |
62 | return torch.cat(samples, dim=0)
63 |
--------------------------------------------------------------------------------
/hypothesis/benchmark/mg1/util.py:
--------------------------------------------------------------------------------
1 | r"""Utilities for the M/G/1 benchmark.
2 |
3 | """
4 |
5 | import torch
6 | import torch.distributions.uniform
7 |
8 | from hypothesis.exception import IntractableException
9 |
10 |
11 | def Prior():
12 | lower = torch.tensor([0, 0, 0]).float()
13 | upper = torch.tensor([10, 10, 1/3]).float()
14 | return Uniform(lower, upper)
15 |
16 |
17 | def Truth():
18 | return torch.tensor([1, 5, .2]).float()
19 |
20 |
21 | def log_likelihood(theta, x):
22 | raise IntractableException
23 |
24 |
25 | class Uniform(torch.distributions.uniform.Uniform):
26 |
27 | def __init__(self, lower, upper):
28 | super(Uniform, self).__init__(lower, upper)
29 |
30 | def log_prob(self, sample):
31 | return super(Uniform, self).log_prob(sample).mean()
32 |
--------------------------------------------------------------------------------
/hypothesis/benchmark/normal/__init__.py:
--------------------------------------------------------------------------------
1 | from .environment import Environment
2 | from .simulator import NormalSimulator as Simulator
3 | from .util import Prior
4 | from .util import PriorExperiment
5 | from .util import Truth
6 | from .util import log_likelihood
7 |
--------------------------------------------------------------------------------
/hypothesis/benchmark/normal/environment.py:
--------------------------------------------------------------------------------
1 | import hypothesis
2 | import torch
3 |
4 | from .simulator import NormalSimulator as Simulator
5 | from .util import Prior
6 | from .util import PriorExperiment
7 | from hypothesis.benchmark import BenchmarkEnvironment
8 |
9 |
10 |
11 | class Environment(BenchmarkEnvironment):
12 |
13 | def __init__(self, entropy_estimator,
14 | max_experiments=10,
15 | truth=None):
16 | super(Environment, self).__init__(
17 | entropy_estimator=entropy_estimator,
18 | max_experiments=max_experiments,
19 | prior=Prior(),
20 | prior_experiment=PriorExperiment(),
21 | simulator=Simulator(),
22 | truth=truth)
23 |
--------------------------------------------------------------------------------
/hypothesis/benchmark/normal/simulator.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 | from hypothesis.simulation import Simulator
4 | from torch.distributions.normal import Normal
5 |
6 |
7 |
8 | class NormalSimulator(Simulator):
9 | r"""
10 |
11 | Todo:
12 | Write method docs.
13 | """
14 |
15 | def __init__(self, uncertainty=1):
16 | super(NormalSimulator, self).__init__()
17 | self.uncertainty = float(uncertainty)
18 |
19 | def forward(self, inputs, designs=None):
20 | if designs is None:
21 | designs = self.uncertainty
22 |
23 | return Normal(inputs, designs).sample()
24 |
--------------------------------------------------------------------------------
/hypothesis/benchmark/normal/util.py:
--------------------------------------------------------------------------------
1 | r"""Utilities for the normal benchmark.
2 |
3 | """
4 |
5 | import torch
6 |
7 | from torch.distributions.normal import Normal
8 |
9 |
10 |
11 | def Prior():
12 | return Uniform(-5, 5)
13 |
14 |
15 | def PriorExperiment():
16 | return Uniform(-5, 5)
17 |
18 |
19 | def Truth():
20 | return torch.tensor([0]).float()
21 |
22 |
23 | def log_likelihood(theta, x):
24 | return Normal(theta, 1).log_prob(x)
25 |
26 |
27 |
28 | class Uniform(torch.distributions.uniform.Uniform):
29 |
30 | def __init__(self, lower, upper):
31 | super(Uniform, self).__init__(lower, upper)
32 |
33 | def log_prob(self, sample):
34 | return super(Uniform, self).log_prob(sample).mean()
35 |
--------------------------------------------------------------------------------
/hypothesis/benchmark/seismic/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/montefiore-institute/hypothesis/29a2b7b4649db345d43a8d3bf98aa5d817b43f1b/hypothesis/benchmark/seismic/__init__.py
--------------------------------------------------------------------------------
/hypothesis/benchmark/seismic/simulator.py:
--------------------------------------------------------------------------------
1 | r"""Generate the physics of a hypothetical 2-D spherical world, and then generate
2 | seismic events and detections.
3 |
4 | Based on the codebase of: Nimar Arora https://github.com/nimar/seismic-2d/blob/master/generate.py
5 | """
6 |
7 | import hypothesis
8 | import numpy as np
9 | import torch
10 |
11 | from hypothesis.simulation import Simulator as BaseSimulator
12 |
13 |
14 |
15 | class SeismicSimulator(BaseSimulator):
16 |
17 | def __init__(self):
18 | super(SeismicSimulator, self).__init__()
19 |
20 | def forward(self, inputs, experimental_configurations=None):
21 | raise NotImplementedError
22 |
--------------------------------------------------------------------------------
/hypothesis/benchmark/seismic/util.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/montefiore-institute/hypothesis/29a2b7b4649db345d43a8d3bf98aa5d817b43f1b/hypothesis/benchmark/seismic/util.py
--------------------------------------------------------------------------------
/hypothesis/benchmark/sir/__init__.py:
--------------------------------------------------------------------------------
1 | from .simulator import SIRSimulator as Simulator
2 | from .util import Prior
3 | from .util import PriorExperiment
4 | from .util import Truth
5 | from .util import log_likelihood
6 |
--------------------------------------------------------------------------------
/hypothesis/benchmark/sir/simulator.py:
--------------------------------------------------------------------------------
1 | import hypothesis
2 | import numpy as np
3 | import torch
4 |
5 | from hypothesis.simulation import Simulator as BaseSimulator
6 | from torch.distributions.binomial import Binomial
7 |
8 |
9 |
10 | class SIRSimulator(BaseSimulator):
11 |
12 | def __init__(self, population_size=1000, default_measurement_time=1.0, step_size=0.01):
13 | super(SIRSimulator, self).__init__()
14 | self.default_measurement_time = torch.tensor(default_measurement_time).float()
15 | self.population_size = int(population_size)
16 | self.step_size = float(step_size)
17 |
18 | def simulate(self, theta, psi):
19 | # theta = [beta, gamma]
20 | # psi = tau
21 | # sample = [S(tau), I(tau), R(tau)]
22 | beta = theta[0].item()
23 | gamma = theta[1].item()
24 | psi = psi.item()
25 | S = self.population_size - 1
26 | I = 1
27 | R = 0
28 | n_steps = int(psi / self.step_size)
29 | for i in range(n_steps):
30 | if I == 0: # State will remain the same.
31 | break
32 | delta_I = int(Binomial(S, beta * I / self.population_size).sample())
33 | delta_R = int(Binomial(I, gamma).sample())
34 | S -= delta_I
35 | I = I + delta_I - delta_R
36 | R += delta_R
37 |
38 | return torch.tensor([S, I, R]).float()
39 |
40 | @torch.no_grad()
41 | def forward(self, inputs, experimental_configurations=None):
42 | outputs = []
43 |
44 | n = len(inputs)
45 | for index in range(n):
46 | theta = inputs[index]
47 | if experimental_configurations is not None:
48 | psi = experimental_configurations[index]
49 | x = self.simulate(theta, psi)
50 | else:
51 | x = self.simulate(theta, self.default_measurement_time)
52 | outputs.append(x.view(1, -1))
53 | outputs = torch.cat(outputs, dim=0)
54 |
55 | return outputs
56 |
--------------------------------------------------------------------------------
/hypothesis/benchmark/sir/util.py:
--------------------------------------------------------------------------------
1 | r"""Utilities for the SIR benchmark.
2 |
3 | """
4 |
5 | import torch
6 |
7 | from hypothesis.exception import IntractableException
8 |
9 |
10 |
11 | def Prior():
12 | r"""Prior over the infection and recovery rates."""
13 | lower = torch.tensor([0, 0]).float()
14 | upper = torch.tensor([0.5, 0.5]).float()
15 |
16 | return Uniform(lower, upper)
17 |
18 |
19 | def PriorExperiment():
20 | r"""Prior over the experimental design space (measurement time)."""
21 | return Uniform(0.1, 10.0)
22 |
23 |
24 | def Truth():
25 | return torch.tensor([0.15, 0.05])
26 |
27 |
28 | def log_likelihood(theta, x):
29 | raise IntractableException
30 |
31 |
32 |
33 | class Uniform(torch.distributions.uniform.Uniform):
34 |
35 | r"""Used to initialize the prior over the experimental design space."""
36 | def __init__(self, lower, upper):
37 | super(Uniform, self).__init__(lower, upper)
38 |
39 | def log_prob(self, sample):
40 | return super(Uniform, self).log_prob(sample).mean()
41 |
--------------------------------------------------------------------------------
/hypothesis/benchmark/spatialsir/__init__.py:
--------------------------------------------------------------------------------
1 | from .simulator import SpatialSIRSimulator as Simulator
2 | from .util import Prior
3 | from .util import PriorExperiment
4 | from .util import Truth
5 | from .util import log_likelihood
6 |
--------------------------------------------------------------------------------
/hypothesis/benchmark/spatialsir/simulator.py:
--------------------------------------------------------------------------------
1 | import hypothesis
2 | import numpy as np
3 | import torch
4 |
5 | from hypothesis.simulation import Simulator as BaseSimulator
6 | from scipy import signal
7 | from torch.distributions.poisson import Poisson
8 |
9 |
10 |
11 | class SpatialSIRSimulator(BaseSimulator):
12 |
13 | def __init__(self, initial_infections_rate=3, shape=(100, 100), default_measurement_time=1.0, step_size=0.01):
14 | super(SpatialSIRSimulator, self).__init__()
15 | self.default_measurement_time = default_measurement_time
16 | self.lattice_shape = shape
17 | self.p_initial_infections = Poisson(float(initial_infections_rate))
18 | self.simulation_step_size = step_size
19 |
20 | def _sample_num_initial_infections(self):
21 | return int(1 + self.p_initial_infections.sample().item())
22 |
23 | def simulate(self, theta, psi):
24 | # Extract the simulation parameters.
25 | beta = theta[0].item() # Infection rate
26 | gamma = theta[1].item() # Recovery rate
27 | # Allocate the data grids.
28 | infected = np.zeros(self.lattice_shape, dtype=np.int)
29 | recovered = np.zeros(self.lattice_shape, dtype=np.int)
30 | kernel = np.ones((3, 3), dtype=np.int)
31 | # Seed the grid with the initial infections.
32 | num_initial_infections = self._sample_num_initial_infections()
33 | for _ in range(num_initial_infections):
34 | index_height = np.random.randint(0, self.lattice_shape[0])
35 | index_width = np.random.randint(0, self.lattice_shape[1])
36 | infected[index_height][index_width] = 1
37 | # Derrive the maximum number of simulation steps.
38 | simulation_steps = int(psi / self.simulation_step_size)
39 | susceptible = (1 - recovered) * (1 - infected)
40 | for _ in range(simulation_steps):
41 | if infected.sum() == 0:
42 | break
43 | # Infection
44 | potential = signal.convolve2d(infected, kernel, mode="same")
45 | potential *= susceptible
46 | potential = potential * beta / 8
47 | next_infected = ((potential > np.random.uniform(size=self.lattice_shape)).astype(np.int) + infected) * (1 - recovered)
48 | next_infected = (next_infected >= 1).astype(np.int)
49 | # Recover
50 | potential = infected * gamma
51 | next_recovered = (potential > np.random.uniform(size=self.lattice_shape)).astype(np.int) + recovered
52 | next_recovered = (next_recovered >= 1).astype(np.int)
53 | # Next parameters
54 | recovered = next_recovered
55 | infected = next_infected
56 | susceptible = (1 - recovered) * (1 - infected)
57 | # Convert to tensors
58 | susceptible = torch.from_numpy(susceptible).float().view(1, 1, self.lattice_shape[0], self.lattice_shape[1])
59 | infected = torch.from_numpy(infected).float().view(1, 1, self.lattice_shape[0], self.lattice_shape[1])
60 | recovered = torch.from_numpy(recovered).float().view(1, 1, self.lattice_shape[0], self.lattice_shape[1])
61 | image = torch.cat([susceptible, infected, recovered], dim=1)
62 |
63 | return image
64 |
65 | @torch.no_grad()
66 | def forward(self, inputs, experimental_configurations=None):
67 | outputs = []
68 |
69 | n = len(inputs)
70 | for index in range(n):
71 | theta = inputs[index]
72 | if experimental_configurations is not None:
73 | psi = experimental_configurations[index]
74 | x = self.simulate(theta, psi.item())
75 | else:
76 | x = self.simulate(theta, self.default_measurement_time)
77 | outputs.append(x)
78 |
79 | return torch.cat(outputs, dim=0).float()
80 |
--------------------------------------------------------------------------------
/hypothesis/benchmark/spatialsir/util.py:
--------------------------------------------------------------------------------
1 | r"""Utilities for the spatial SIR benchmark.
2 |
3 | """
4 |
5 | import torch
6 |
7 | from hypothesis.exception import IntractableException
8 |
9 |
10 |
11 | def Prior():
12 | r"""Prior over the infection and recovery rates."""
13 | lower = torch.tensor([0, 0]).float()
14 | upper = torch.tensor([1, 1]).float()
15 |
16 | return Uniform(lower, upper)
17 |
18 |
19 | def PriorExperiment():
20 | r"""Prior over the experimental design space (measurement time)."""
21 | return Uniform(0.1, 10.0)
22 |
23 |
24 | def Truth():
25 | return torch.tensor([0.15, 0.05])
26 |
27 |
28 | def log_likelihood(theta, x):
29 | raise IntractableException
30 |
31 |
32 |
33 | class Uniform(torch.distributions.uniform.Uniform):
34 |
35 | r"""Used to initialize the prior over the experimental design space."""
36 | def __init__(self, lower, upper):
37 | super(Uniform, self).__init__(lower, upper)
38 |
39 | def log_prob(self, sample):
40 | return super(Uniform, self).log_prob(sample).mean()
41 |
--------------------------------------------------------------------------------
/hypothesis/benchmark/tractable/__init__.py:
--------------------------------------------------------------------------------
1 | from .simulator import TractableSimulator as Simulator
2 | from .util import Prior
3 | from .util import Truth
4 | from .util import log_likelihood
5 |
--------------------------------------------------------------------------------
/hypothesis/benchmark/tractable/simulator.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 | from hypothesis.simulation import Simulator
4 | from torch.distributions.multivariate_normal import MultivariateNormal as Normal
5 |
6 |
7 |
8 | class TractableSimulator(Simulator):
9 | r"""
10 |
11 | Todo:
12 | Write method docs.
13 | """
14 |
15 | def __init__(self):
16 | super(TractableSimulator, self).__init__()
17 |
18 | def _generate(self, input):
19 | mean = torch.tensor([input[0], input[1]])
20 | scale = 1.0
21 | s_1 = input[2] ** 2
22 | s_2 = input[3] ** 2
23 | rho = input[4].tanh()
24 | covariance = torch.tensor([
25 | [scale * s_1 ** 2, scale * rho * s_1 * s_2],
26 | [scale * rho * s_1 * s_2, scale * s_2 ** 2]])
27 | normal = Normal(mean, covariance)
28 | x_out = normal.sample(torch.Size([4])).view(1, -1)
29 |
30 | return x_out
31 |
32 | def forward(self, inputs):
33 | r""""""
34 | samples = []
35 |
36 | for input in inputs:
37 | x_out = self._generate(input)
38 | samples.append(x_out.view(1, -1))
39 |
40 | return torch.cat(samples, dim=0)
41 |
--------------------------------------------------------------------------------
/hypothesis/benchmark/tractable/util.py:
--------------------------------------------------------------------------------
1 | r"""Utilities for the tractable benchmark.
2 |
3 | """
4 |
5 | import torch
6 |
7 | from torch.distributions.multivariate_normal import MultivariateNormal as Normal
8 |
9 |
10 |
11 | def Prior():
12 | lower = -3 * torch.ones(5).float()
13 | upper = 3 * torch.ones(5).float()
14 |
15 | return Uniform(lower, lower)
16 |
17 |
18 | def Truth():
19 | truth = [0.7, -2.9, -1.0, -0.9, 0.6]
20 |
21 | return torch.tensor(truth).float()
22 |
23 |
24 | def log_likelihood(theta, x):
25 | with torch.no_grad():
26 | input = theta
27 | mean = torch.tensor([input[0], input[1]])
28 | scale = 1.0
29 | s_1 = input[2] ** 2
30 | s_2 = input[3] ** 2
31 | rho = input[4].tanh()
32 | covariance = torch.tensor([
33 | [scale * s_1 ** 2, scale * rho * s_1 * s_2],
34 | [scale * rho * s_1 * s_2, scale * s_2 ** 2]])
35 | normal = Normal(mean, covariance)
36 | m = x.view(-1, 2)
37 | log_likelihood = normal.log_prob(m).sum()
38 |
39 | return log_likelihood
40 |
41 |
42 |
43 | class Uniform(torch.distributions.uniform.Uniform):
44 |
45 | def __init__(self, lower, upper):
46 | super(Uniform, self).__init__(lower, upper)
47 |
48 | def log_prob(self, sample):
49 | return super(Uniform, self).log_prob(sample).mean()
50 |
--------------------------------------------------------------------------------
/hypothesis/benchmark/weinberg/__init__.py:
--------------------------------------------------------------------------------
1 | from .simulator import WeinbergSimulator as Simulator
2 | from .util import Prior
3 | from .util import PriorExperiment
4 | from .util import Truth
5 | from .util import log_likelihood
6 |
--------------------------------------------------------------------------------
/hypothesis/benchmark/weinberg/simulator.py:
--------------------------------------------------------------------------------
1 | r"""This is a simulation of high energy particle collisions $e^+e^- \to \mu^+ \mu^-$.
2 | The angular distributions of the particles can be used to measure the Weinberg angle
3 | in the standard model of particle physics. If you get a PhD in particle physics,
4 | you may learn how to calculate these distributions and interpret those equations to
5 | learn that an effective way to infer this parameter is to run your particle accelerator
6 | with a beam energy just above or below half the $Z$ boson mass (i.e. the optimal $\phi$
7 | is just above and below 45 GeV).
8 |
9 | Adapted from https://github.com/cranmer/active_sciencing/blob/master/demo_weinberg.ipynb
10 |
11 | Original implementation by Lucas Heinrich and Kyle Cranmer
12 | """
13 |
14 | import hypothesis
15 | import numpy as np
16 | import torch
17 |
18 | from hypothesis.simulation import Simulator as BaseSimulator
19 |
20 |
21 |
22 | class WeinbergSimulator(BaseSimulator):
23 |
24 | MZ = int(90)
25 | GFNom = float(1)
26 |
27 | def __init__(self, default_beam_energy=45.0, num_samples=1):
28 | super(WeinbergSimulator, self).__init__()
29 | self.num_samples = int(num_samples)
30 | self.default_beam_energy = float(default_beam_energy)
31 |
32 | def _a_fb(self, sqrtshalf, gf):
33 | sqrts = sqrtshalf * 2.
34 | A_FB_EN = np.tanh((sqrts - self.MZ) / self.MZ * 10)
35 | A_FB_GF = gf / self.GFNom
36 |
37 | return 2 * A_FB_EN * A_FB_GF
38 |
39 | def _diffxsec(self, costheta, sqrtshalf, gf):
40 | norm = 2. * ((1. + 1. / 3.))
41 |
42 | return ((1 + costheta**2) + self._a_fb(sqrtshalf, gf) * costheta) / norm
43 |
44 | def simulate(self, theta, psi):
45 | # theta = gf
46 | # psi = sqrtshalf
47 | samples = []
48 |
49 | for _ in range(self.num_samples):
50 | sample = None
51 | x = np.linspace(-1, 1, 10000)
52 | maxval = np.max(self._diffxsec(x, psi, theta))
53 | while sample is None:
54 | xprop = np.random.uniform(-1, 1)
55 | ycut = np.random.random()
56 | yprop = self._diffxsec(xprop, psi, theta) / maxval
57 | if yprop / maxval < ycut:
58 | continue
59 | sample = xprop
60 | sample = torch.tensor(sample).view(1, 1)
61 | samples.append(sample)
62 |
63 | return torch.cat(samples, dim=1)
64 |
65 | @torch.no_grad()
66 | def forward(self, inputs, experimental_configurations=None):
67 | outputs = []
68 |
69 | n = len(inputs)
70 | for index in range(n):
71 | theta = inputs[index]
72 | if experimental_configurations is not None:
73 | psi = experimental_configurations[index]
74 | x = self.simulate(theta.item(), psi.item())
75 | else:
76 | x = self.simulate(theta.item(), self.default_beam_energy)
77 | outputs.append(x)
78 | outputs = torch.cat(outputs, dim=0)
79 |
80 | return outputs
81 |
--------------------------------------------------------------------------------
/hypothesis/benchmark/weinberg/util.py:
--------------------------------------------------------------------------------
1 | r"""Utilities for the Weinberg benchmark.
2 |
3 | """
4 |
5 | import torch
6 |
7 | from hypothesis.exception import IntractableException
8 |
9 |
10 |
11 | def Prior():
12 | r"""Prior over the Fermi constant."""
13 | return Uniform(0.5, 1.5)
14 |
15 |
16 | def PriorExperiment():
17 | r"""Prior over the experimental design space (the beam-energy)."""
18 | return Uniform(40, 50)
19 |
20 |
21 | def Truth():
22 | return torch.tensor([1]).float()
23 |
24 |
25 | def log_likelihood(theta, x):
26 | raise IntractableException
27 |
28 |
29 |
30 | class Uniform(torch.distributions.uniform.Uniform):
31 |
32 | def __init__(self, lower, upper):
33 | super(Uniform, self).__init__(lower, upper)
34 |
35 | def log_prob(self, sample):
36 | return super(Uniform, self).log_prob(sample).mean()
37 |
--------------------------------------------------------------------------------
/hypothesis/bin/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/montefiore-institute/hypothesis/29a2b7b4649db345d43a8d3bf98aa5d817b43f1b/hypothesis/bin/__init__.py
--------------------------------------------------------------------------------
/hypothesis/bin/hpc/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/montefiore-institute/hypothesis/29a2b7b4649db345d43a8d3bf98aa5d817b43f1b/hypothesis/bin/hpc/__init__.py
--------------------------------------------------------------------------------
/hypothesis/bin/io/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/montefiore-institute/hypothesis/29a2b7b4649db345d43a8d3bf98aa5d817b43f1b/hypothesis/bin/io/__init__.py
--------------------------------------------------------------------------------
/hypothesis/bin/io/merge.py:
--------------------------------------------------------------------------------
1 | r"""A utility program to merge data files.
2 |
3 | Use-cases include merging batch simulations.
4 | """
5 |
6 | import argparse
7 | import glob
8 | import numpy as np
9 | import os
10 | import torch
11 |
12 | from hypothesis.util.data.numpy import merge as numpy_merge
13 |
14 |
15 |
16 | def main(arguments):
17 | procedure = select_extension_procedure(arguments)
18 | procedure(arguments)
19 |
20 |
21 | def procedure_numpy(arguments):
22 | files = fetch_input_files(arguments)
23 | numpy_merge(input_files=files,
24 | output_file=arguments.out,
25 | tempfile=arguments.tempfile,
26 | in_memory=arguments.in_memory,
27 | axis=arguments.dimension)
28 |
29 |
30 | def procedure_torch(arguments):
31 | raise NotImplementedError
32 |
33 |
34 | def fetch_input_files(arguments, delimiter=','):
35 | # Check if the user specified a list of input files
36 | if delimiter in arguments.files:
37 | files = arguments.files.split(delimiter)
38 | # Check if the specified file exists
39 | elif os.path.exists(arguments.files):
40 | files = [arguments.files]
41 | # The specified argument is a query
42 | else:
43 | query = arguments.files
44 | files = glob.glob(query)
45 | # Check if the list of files needs to be sorted.
46 | if arguments.sort:
47 | files.sort()
48 |
49 | return files
50 |
51 |
52 | def select_extension_procedure(arguments):
53 | extension = arguments.extension
54 | mappings = {
55 | "numpy": procedure_numpy,
56 | "torch": procedure_torch}
57 | # Check if an extensions has been manually defined
58 | if extension in mappings.keys():
59 | procedure = mappings[extension]
60 | else:
61 | procecure = None
62 |
63 | return procedure
64 |
65 |
66 | def parse_arguments():
67 | parser = argparse.ArgumentParser("Merge: merging data files for your convenience.")
68 | parser.add_argument("--dimension", type=int, default=0, help="Dimension in which to merge the data (default: 0).")
69 | parser.add_argument("--extension", type=str, default=None, help="Data file to process, available options: numpy, torch. (default: none).")
70 | parser.add_argument("--files", type=str, default=None, help="A list of files delimited by ',' or a glob pattern (default: none).")
71 | parser.add_argument("--in-memory", action="store_true", help="Processes all chunks in memory (default: false).")
72 | parser.add_argument("--out", type=str, default=None, help="Output path to store the result (default: none).")
73 | parser.add_argument("--sort", action="store_true", help="Sort the input files before processing (default: false).")
74 | parser.add_argument("--tempfile", type=str, default=None, help="Path of the temporary file to store the intermediate results, only accessible to non in-memory operations. (default: none).")
75 | arguments, _ = parser.parse_known_args()
76 | # Check if a proper extension has been specified.
77 | if select_extension_procedure(arguments) is None:
78 | raise ValueError("The specified extention (", arguments.extension, ") does not exists.")
79 | # Check if files to merge have been specified.
80 | if arguments.files is None:
81 | raise ValueError("No input files have been specified.")
82 | # Check if an output path has been specified.
83 | if arguments.out is None:
84 | raise ValueError("No output path has been specified.")
85 |
86 | return arguments
87 |
88 |
89 | if __name__ == "__main__":
90 | arguments = parse_arguments()
91 | main(arguments)
92 |
--------------------------------------------------------------------------------
/hypothesis/bin/io/prune.py:
--------------------------------------------------------------------------------
1 | r"""A utility program to prune data files.
2 |
3 | """
4 |
5 | import argparse
6 | import numpy as np
7 | import os
8 | import shutil
9 | import torch
10 |
11 |
12 |
13 | def main(arguments):
14 | # Check if the file needs to be processed in memory.
15 | if arguments.in_memory:
16 | mmap_mode = 'r'
17 | else:
18 | mmap_mode = None
19 | data = np.load(arguments.in_file, mmap_mode=mmap_mode)
20 | data = np.delete(data, arguments.indices, arguments.dimension)
21 | np.save(arguments.out_file, data)
22 |
23 |
24 | def parse_arguments():
25 | parser = argparse.ArgumentParser("Prune: pruning data files for you convenience.")
26 | parser.add_argument("--dimension", type=int, default=1, help="Data dimension to work in (default: 1).")
27 | parser.add_argument("--in-file", type=str, default=None, help="Path to the file to process (default: none).")
28 | parser.add_argument("--in-memory", action="store_true", help="Process the data in-memory (default: false).")
29 | parser.add_argument("--indices", type=str, default=None, help="A comma-seperated list of indices to remove (default: none).")
30 | parser.add_argument("--out-file", type=str, default=None, help="Path of the processed file (default: none).")
31 | arguments, _ = parser.parse_known_args()
32 | # Check if an input file has been specified.
33 | if arguments.in_file is None:
34 | raise ValueError("No input file has been specified.")
35 | # Check if an output file has been specified.
36 | if arguments.out_file is None:
37 | raise ValueError("No output file has been specified.")
38 | # Check if indices have been specified.
39 | if arguments.indices is None:
40 | raise ValueError("No indices have been specified.")
41 | arguments.indices = [int(index) for index in arguments.indices.split(',')]
42 |
43 | return arguments
44 |
45 |
46 | if __name__ == "__main__":
47 | arguments = parse_arguments()
48 | main(arguments)
49 |
--------------------------------------------------------------------------------
/hypothesis/bin/ratio_estimation/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/montefiore-institute/hypothesis/29a2b7b4649db345d43a8d3bf98aa5d817b43f1b/hypothesis/bin/ratio_estimation/__init__.py
--------------------------------------------------------------------------------
/hypothesis/bin/ratio_estimation/train.py:
--------------------------------------------------------------------------------
1 | """General training script fo Amortised Approximate Ratio Estimation (AARE)."""
2 |
3 | import argparse
4 | import hypothesis
5 | import importlib
6 | import numpy as np
7 | import os
8 | import torch
9 |
10 | from hypothesis.auto.training import LikelihoodToEvidenceRatioEstimatorTrainer as Trainer
11 | from hypothesis.auto.training import create_trainer
12 | from hypothesis.nn.amortized_ratio_estimation import BaseConservativeCriterion
13 | from hypothesis.nn.amortized_ratio_estimation import BaseCriterion
14 | from hypothesis.nn.amortized_ratio_estimation import BaseExperimentalCriterion
15 | from torch.optim.lr_scheduler import ReduceLROnPlateau
16 | from torch.optim.lr_scheduler import StepLR
17 | from torch.utils.data import TensorDataset
18 | from tqdm import tqdm
19 |
20 |
21 |
22 | def main(arguments):
23 | # Allocate the datasets
24 | dataset_test = allocate_dataset_test(arguments)
25 | dataset_train = allocate_dataset_train(arguments)
26 | # Allocate the ratio estimator
27 | estimator = allocate_estimator(arguments)
28 | # Check if the gradients have to be clipped.
29 | if arguments.clip_grad != 0.0:
30 | for p in estimator.parameters():
31 | p.register_hook(lambda grad: torch.clamp(grad, -arguments.clip_grad, arguments.clip_grad))
32 | # Allocate the optimizer
33 | optimizer = torch.optim.AdamW(
34 | estimator.parameters(),
35 | amsgrad=arguments.amsgrad,
36 | lr=arguments.lr,
37 | weight_decay=arguments.weight_decay)
38 | # Prepare the training criterion
39 | if arguments.conservativeness > 0.0:
40 | criterion = BaseConservativeCriterion(
41 | batch_size=arguments.batch_size,
42 | beta=arguments.conservativeness,
43 | denominator=arguments.denominator,
44 | estimator=estimator,
45 | logits=arguments.logits)
46 | else:
47 | criterion = BaseCriterion(
48 | batch_size=arguments.batch_size,
49 | denominator=arguments.denominator,
50 | estimator=estimator,
51 | logits=arguments.logits)
52 | # Check if the experimental settings have to be activated
53 | if arguments.experimental:
54 | criterion = BaseExperimentalCriterion(
55 | batch_size=arguments.batch_size,
56 | denominator=arguments.denominator,
57 | estimator=estimator,
58 | logits=arguments.logits)
59 | # Allocate the learning rate scheduler, if requested.
60 | if arguments.lrsched:
61 | if arguments.lrsched_every is None or arguments.lrsched_gamma is None:
62 | lr_scheduler = ReduceLROnPlateau(optimizer, verbose=True)
63 | else:
64 | lr_scheduler = StepLR(optimizer, step_size=arguments.lrsched_every, gamma=arguments.lrsched_gamma)
65 | else:
66 | lr_scheduler = None
67 | # Allocate the trainer
68 | Trainer = create_trainer(criterion, arguments.denominator)
69 | trainer = Trainer(
70 | accelerator=hypothesis.accelerator,
71 | batch_size=arguments.batch_size,
72 | criterion=criterion,
73 | dataset_test=dataset_test,
74 | dataset_train=dataset_train,
75 | epochs=arguments.epochs,
76 | estimator=estimator,
77 | lr_scheduler=lr_scheduler,
78 | shuffle=(not arguments.dont_shuffle),
79 | optimizer=optimizer,
80 | workers=arguments.workers)
81 | # Register the callbacks
82 | if arguments.show:
83 | # Callbacks
84 | progress_bar = tqdm(total=arguments.epochs)
85 | def report_test_loss(caller):
86 | trainer = caller
87 | current_epoch = trainer.current_epoch
88 | test_loss = trainer.losses_test[-1]
89 | progress_bar.set_description("Test loss %s" % test_loss)
90 | progress_bar.update(1)
91 | trainer.add_event_handler(trainer.events.epoch_complete, report_test_loss)
92 | # Run the optimization procedure
93 | summary = trainer.fit()
94 | if arguments.show:
95 | # Cleanup the progress bar
96 | progress_bar.close()
97 | print(summary)
98 | if arguments.out is None:
99 | return # No output directory has been specified, exit.
100 | # Create the directory if it does not exist.
101 | if not os.path.exists(arguments.out):
102 | os.mkdir(arguments.out)
103 | best_model_weights = summary.best_model()
104 | final_model_weights = summary.final_model()
105 | train_losses = summary.train_losses()
106 | test_losses = summary.test_losses()
107 | # Save the results.
108 | np.save(arguments.out + "/losses-train.npy", train_losses)
109 | np.save(arguments.out + "/losses-test.npy", test_losses)
110 | torch.save(best_model_weights, arguments.out + "/best-model.th")
111 | torch.save(final_model_weights, arguments.out + "/model.th")
112 | summary.save(arguments.out + "/result.summary")
113 |
114 |
115 | @torch.no_grad()
116 | def allocate_dataset_train(arguments):
117 | return load_class(arguments.data_train)()
118 |
119 |
120 | @torch.no_grad()
121 | def allocate_dataset_test(arguments):
122 | if arguments.data_test is not None:
123 | dataset = load_class(arguments.data_test)()
124 | else:
125 | dataset = None
126 |
127 | return dataset
128 |
129 |
130 | @torch.no_grad()
131 | def allocate_estimator(arguments):
132 | estimator = load_class(arguments.estimator)()
133 | # Check if we are able to allocate a data parallel model.
134 | if torch.cuda.device_count() > 1 and arguments.data_parallel:
135 | estimator = torch.nn.DataParallel(estimator)
136 | estimator = estimator.to(hypothesis.accelerator)
137 |
138 | return estimator
139 |
140 |
141 | def load_class(full_classname):
142 | if full_classname is None:
143 | raise ValueError("The specified classname cannot be `None`.")
144 | module_name, class_name = full_classname.rsplit('.', 1)
145 | module = __import__(module_name, fromlist=[class_name])
146 |
147 | return getattr(module, class_name)
148 |
149 |
150 | def parse_arguments():
151 | parser = argparse.ArgumentParser("Amortised Approximate Ratio Estimator training")
152 | # General settings
153 | parser.add_argument("--data-parallel", action="store_true", help="Enable data-parallel training if multiple GPU's are available (default: false).")
154 | parser.add_argument("--disable-gpu", action="store_true", help="Disable the usage of the GPU, not recommended. (default: false).")
155 | parser.add_argument("--out", type=str, default=None, help="Output directory (default: none).")
156 | parser.add_argument("--show", action="store_true", help="Show the progress and the final result (default: false).")
157 | parser.add_argument("--dont-shuffle", action="store_true", help="Disables shuffling of the batch loader (default: false).")
158 | parser.add_argument("--denominator", type=str, default="inputs|outputs", help="Random variables in the denominator and their (in)dependence relation (default: 'inputs|outputs').")
159 | # Optimization settings
160 | parser.add_argument("--amsgrad", action="store_true", help="Use AMSGRAD version of Adam (default: false).")
161 | parser.add_argument("--batch-size", type=int, default=64, help="Batch size (default: 64).")
162 | parser.add_argument("--conservativeness", type=float, default=0.0, help="Conservative term (default: 0.0).")
163 | parser.add_argument("--clip-grad", type=float, default=0.0, help="Value to clip the gradients with (default: 0.0 or no clipping).")
164 | parser.add_argument("--epochs", type=int, default=1, help="Number of epochs (default: 1).")
165 | parser.add_argument("--logits", action="store_true", help="Use the logit-trick for the minimization criterion (default: false).")
166 | parser.add_argument("--lr", type=float, default=0.001, help="Learning rate (default: 0.001).")
167 | parser.add_argument("--lrsched", action="store_true", help="Enable learning rate scheduling (default: false).")
168 | parser.add_argument("--lrsched-every", type=int, default=None, help="Schedule the learning rate every n epochs (default: none).")
169 | parser.add_argument("--lrsched-gamma", type=float, default=None, help="Learning rate scheduling stepsize (default: none).")
170 | parser.add_argument("--weight-decay", type=float, default=0.0, help="Weight decay (default: 0.0).")
171 | parser.add_argument("--workers", type=int, default=2, help="Number of concurrent data loaders (default: 2).")
172 | # Data settings
173 | parser.add_argument("--data-test", type=str, default=None, help="Full classname of the testing dataset (default: none, optional).")
174 | parser.add_argument("--data-train", type=str, default=None, help="Full classname of the training dataset (default: none).")
175 | # Ratio estimator settings
176 | parser.add_argument("--estimator", type=str, default=None, help="Full classname of the ratio estimator (default: none).")
177 | # Experimental settings
178 | parser.add_argument("--experimental", action="store_true", help="Enable experimental settings (default: false).")
179 | arguments, _ = parser.parse_known_args()
180 |
181 | return arguments
182 |
183 |
184 | if __name__ == "__main__":
185 | arguments = parse_arguments()
186 | main(arguments)
187 |
--------------------------------------------------------------------------------
/hypothesis/default.py:
--------------------------------------------------------------------------------
1 | r"""Hypothesis defaults.
2 | """
3 |
4 | import hypothesis
5 | import numpy as np
6 | import torch
7 |
8 |
9 |
10 | activation = torch.nn.ReLU
11 | r"""Default activation function in Hypothesis."""
12 |
13 | batch_size = 128
14 | r"""Default batch size."""
15 |
16 | dropout = 0.0
17 | r"""Default dropout setting."""
18 |
19 | epochs = 1
20 | r"""Default number of data epochs."""
21 |
22 | output_transform = "normalize"
23 | r"""Default output transformation for neural networks.
24 |
25 | For 1-dimensional outputs, this is equivalent to torch.nn.Sigmoid. Otherwise, this
26 | will reduce to torch.nn.Softmax.
27 | """
28 |
29 | trunk = (512, 512, 512)
30 | r"""Default trunk of large convolution models such as ResNet or DenseNet."""
31 |
32 | dependent_delimiter = ','
33 | r"""Split character indicating the dependence between random variables."""
34 |
35 | independent_delimiter = '|'
36 | r"""Split character indicating the independene between random variables."""
37 |
38 | dataloader_workers = 4
39 | r"""Default number of dataloader workers."""
40 |
--------------------------------------------------------------------------------
/hypothesis/diagnostic/__init__.py:
--------------------------------------------------------------------------------
1 | from .base import BaseDiagnostic
2 | from .density import DensityDiagnostic
3 |
--------------------------------------------------------------------------------
/hypothesis/diagnostic/base.py:
--------------------------------------------------------------------------------
1 | import hypothesis
2 |
3 |
4 |
5 | class BaseDiagnostic:
6 |
7 | def reset(self):
8 | raise NotImplementedError
9 |
10 | def test(self, **kwargs):
11 | raise NotImplementedError
12 |
--------------------------------------------------------------------------------
/hypothesis/diagnostic/density.py:
--------------------------------------------------------------------------------
1 | import hypothesis
2 | import numpy as np
3 | import torch
4 |
5 | from hypothesis.diagnostic import BaseDiagnostic
6 | from scipy.integrate import nquad
7 |
8 |
9 |
10 | class DensityDiagnostic(BaseDiagnostic):
11 |
12 | def __init__(self, space, epsilon=0.1):
13 | super(DensityDiagnostic, self).__init__()
14 | self.epsilon = epsilon
15 | self.areas = []
16 | self.results = []
17 | self.space = space
18 |
19 | def reset(self):
20 | self.areas = []
21 | self.results = []
22 |
23 | def test(self, function):
24 | area, _ = nquad(function, self.space)
25 | passed = abs(1 - area) <= self.epsilon
26 | self.areas.append(area)
27 | self.results.append(passed)
28 |
29 | return passed
30 |
--------------------------------------------------------------------------------
/hypothesis/diagnostic/ratio_estimator.py:
--------------------------------------------------------------------------------
1 | import hypothesis
2 | import numpy as np
3 | import torch
4 |
5 | from hypothesis.metric import roc_auc_score
6 | from hypothesis.metric import roc_curve
7 |
--------------------------------------------------------------------------------
/hypothesis/engine/__init__.py:
--------------------------------------------------------------------------------
1 | from .engine import Procedure
2 |
--------------------------------------------------------------------------------
/hypothesis/engine/engine.py:
--------------------------------------------------------------------------------
1 | from hypothesis.exception import NoSuchEventException
2 | from hypothesis.exception import NoEventRegistrationException
3 |
4 |
5 |
6 | class Events: pass
7 |
8 |
9 |
10 | class Procedure:
11 | r""""""
12 |
13 | def __init__(self):
14 | self.hooks = {}
15 | self.events = Events()
16 | self.num_events = 0
17 | self._register_events()
18 |
19 | def _register_events(self):
20 | raise NoEventRegistrationException
21 |
22 | def _event_exists(self, event):
23 | return event in self.hooks.keys()
24 |
25 | def register_event(self, event):
26 | # Check if the event already exists.
27 | event = event.lower()
28 | if not hasattr(self.events, event):
29 | setattr(self.events, event, self.num_events)
30 | event_index = self.num_events
31 | self.hooks[event_index] = []
32 | self.num_events += 1
33 |
34 | def registered_events(self):
35 | return self.events
36 |
37 | def add_event_handler(self, event, f):
38 | # Check if the specified event exists.
39 | if not self._event_exists(event):
40 | raise NoSuchEventException
41 | self.hooks[event].append(f)
42 |
43 | def clear_event_handler(self, event):
44 | # Check if the specified event exists.
45 | if not self._event_exists(event):
46 | raise NoSuchEventException()
47 | self.hooks[event] = []
48 |
49 | def clear_event_handlers(self):
50 | for key in self.hooks.keys():
51 | self.clear_event_handler(key)
52 |
53 | def call_event(self, event, **kwargs):
54 | # Check if the specified event exists.
55 | if not self._event_exists(event):
56 | raise NoSuchEventException
57 | handlers = self.hooks[event]
58 | for handler in handlers:
59 | handler(self, **kwargs)
60 |
61 | def on(self, event):
62 | def wrap(f):
63 | self.add_event_handler(event, f)
64 |
65 | return wrap
66 |
--------------------------------------------------------------------------------
/hypothesis/exception/__init__.py:
--------------------------------------------------------------------------------
1 | r""""""
2 |
3 | from .intractable import IntractableException
4 | from .no_event_registration_exception import NoEventRegistrationException
5 | from .no_such_event import NoSuchEventException
6 | from .not_divisible_by_two import NotDivisibleByTwoException
7 | from .simulator import SimulatorException
8 |
--------------------------------------------------------------------------------
/hypothesis/exception/intractable.py:
--------------------------------------------------------------------------------
1 | class IntractableException(Exception):
2 | r""""""
3 |
4 | def __init__(self, message=None):
5 | if message is None:
6 | message = "This evaluation is intractable!"
7 | super(IntractableException, self).__init__(message)
8 |
--------------------------------------------------------------------------------
/hypothesis/exception/no_event_registration_exception.py:
--------------------------------------------------------------------------------
1 | class NoEventRegistrationException(Exception):
2 | r""""""
3 |
4 | def __init__(self, message=None):
5 | if message is None:
6 | message = "No events were registered by the procedure."
7 | super(NoEventRegistrationException, self).__init__(message)
8 |
--------------------------------------------------------------------------------
/hypothesis/exception/no_such_event.py:
--------------------------------------------------------------------------------
1 | class NoSuchEventException(Exception):
2 | r""""""
3 |
4 | def __init__(self, message=None):
5 | if message is None:
6 | message = "The specified event is not registered."
7 | super(NoSuchEventException, self).__init__(message)
8 |
--------------------------------------------------------------------------------
/hypothesis/exception/not_divisible_by_two.py:
--------------------------------------------------------------------------------
1 | class NotDivisibleByTwoException(Exception):
2 | r""""""
3 |
4 | def __init__(self, message=None):
5 | # Check if a custom message has been specified.
6 | if message is None:
7 | message = "Not divisible by two!"
8 | super(DivisibleByTwoException, self).__init__(message)
9 |
--------------------------------------------------------------------------------
/hypothesis/exception/simulator.py:
--------------------------------------------------------------------------------
1 | class SimulatorException(Exception):
2 | r""""""
3 |
4 | def __init__(self, message=None):
5 | if message is None:
6 | message = "Unspecified simulation error."
7 | super(SimulatorException, self).__init__(message)
8 |
--------------------------------------------------------------------------------
/hypothesis/inference/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/montefiore-institute/hypothesis/29a2b7b4649db345d43a8d3bf98aa5d817b43f1b/hypothesis/inference/__init__.py
--------------------------------------------------------------------------------
/hypothesis/inference/abc.py:
--------------------------------------------------------------------------------
1 | r"""Approximate Bayesian Computation"""
2 |
3 | import hypothesis
4 | import torch
5 |
6 | from hypothesis.engine import Procedure
7 | from torch.multiprocessing import Pool
8 |
9 |
10 |
11 | class ApproximateBayesianComputation(Procedure):
12 | r""""""
13 |
14 | def __init__(self, simulator, prior, summary, acceptor):
15 | super(ApproximateBayesianComputation, self).__init__()
16 | # Main classical ABC properties.
17 | self.acceptor = acceptor
18 | self.prior = prior
19 | self.simulator = simulator
20 | self.summary = summary
21 |
22 | def _register_events(self):
23 | # TODO Implement.
24 | pass
25 |
26 | def _draw_posterior_sample(self, summary_observation):
27 | sample = None
28 |
29 | while sample is None:
30 | prior_sample = self.prior.sample()
31 | x = self.simulator(prior_sample)
32 | s = self.summary(x)
33 | if self.acceptor(s, summary_observation):
34 | sample = prior_sample.unsqueeze(0)
35 |
36 | return sample
37 |
38 | def sample(self, observation, num_samples=1):
39 | samples = []
40 |
41 | summary_observation = self.summary(observation)
42 | for _ in range(num_samples):
43 | samples.append(self._draw_posterior_sample(summary_observation))
44 | samples = torch.cat(samples, dim=0)
45 |
46 | return samples
47 |
48 |
49 |
50 | class ParallelApproximateBayesianComputation:
51 |
52 | def __init__(self, abc, workers=2):
53 | super(ParallelApproximateBayesianComputation, self).__init__()
54 | self.abc = abc
55 | self.pool = Pool(processes=workers)
56 | self.workers = workers
57 |
58 | def _prepare_arguments(self, observation, num_samples):
59 | arguments = []
60 |
61 | inputs = torch.arange(num_samples)
62 | num_chunks = num_samples // self.workers
63 | if num_chunks == 0:
64 | num_chunks = 1
65 | chunks = inputs.split(num_chunks, dim=0)
66 | for chunk in chunks:
67 | a = (self.abc, observation, len(chunk))
68 | arguments.append(a)
69 |
70 | return arguments
71 |
72 | def sample(self, observation, num_samples=1):
73 | arguments = self._prepare_arguments(observation, num_samples)
74 | outputs = self.pool.map(self._sample, arguments)
75 | outputs = torch.cat(outputs, dim=0)
76 |
77 | return outputs
78 |
79 | def __del__(self):
80 | self.pool.close()
81 | del self.pool
82 | self.pool = None
83 |
84 | @staticmethod
85 | def _sample(arguments):
86 | abc, observation, n = arguments
87 |
88 | return abc.sample(observation, num_samples=n)
89 |
--------------------------------------------------------------------------------
/hypothesis/inference/abc_smc.py:
--------------------------------------------------------------------------------
1 | import hypothesis
2 | import numpy as np
3 | import torch
4 |
5 | from hypothesis.engine import Procedure
6 | from torch.distributions.multivariate_normal import MultivariateNormal
7 | from torch.distributions.normal import Normal
8 |
9 |
10 |
11 | class ApproximateBayesianComputationSequentialMonteCarlo(Procedure):
12 | r""""""
13 |
14 | def __init__(self, simulator, prior, summary, acceptor, particles=1000):
15 | super(ApproximateBayesianComputationSequentialMonteCarlo, self).__init__()
16 | # Main ABC SMC properties.
17 | self.acceptor = acceptor
18 | self.prior = prior
19 | self.simulator = simulator
20 | self.summary = summary
21 | self.num_particles = particles
22 | # Sampler state properties.
23 | self._reset()
24 |
25 | def _register_events(self):
26 | # TODO Implement.
27 | pass
28 |
29 | def _reset(self):
30 | self.covariance = None
31 | self.previous_covariance = None
32 | self.particles = []
33 | self.previous_particles = None
34 | self.weights = torch.ones(self.num_particles) / self.num_particles
35 | self.pertubator = None
36 |
37 | def _update_covariance(self):
38 | self.previous_covariance = self.covariance
39 | self.covariance = 2 * torch.from_numpy(np.cov(self.particles.numpy().T)).float()
40 |
41 | def _kernel_likelihood(self, particle_index):
42 | kernel = self.pertubator.__class__
43 | current_particle = self.particles[particle_index]
44 | previous_particle = self.previous_particles[particle_index]
45 | kernel = kernel(previous_particle, self.previous_covariance)
46 | likelihood = kernel.log_prob(current_particle).exp()
47 |
48 | return likelihood
49 |
50 | def _update_weights(self):
51 | # Compute the evidence.
52 | evidence = 0.0
53 | for index in range(self.num_particles):
54 | evidence += self.weights[index] * self._kernel_likelihood(index)
55 | # Compute the new weights.
56 | for index, particle in enumerate(self.particles):
57 | self.weights[index] = self.prior.log_prob(particle).exp() / evidence
58 | self.weights /= self.weights.sum()
59 |
60 | def _sample_from_prior(self, summary_observation):
61 | for particle_index in range(self.num_particles):
62 | sample = None
63 | while sample is None:
64 | prior_sample = self.prior.sample()
65 | x = self.simulator(prior_sample)
66 | s = self.summary(x)
67 | if self.acceptor(summary_observation, s):
68 | sample = prior_sample.unsqueeze(0)
69 | self.particles.append(sample)
70 | self.particles = torch.cat(self.particles, dim=0)
71 | self._update_covariance()
72 |
73 | def _sample_particle(self):
74 | indices = np.arange(self.num_particles)
75 | sampled_index = np.random.choice(indices, 1, p=self.weights.numpy())
76 |
77 | return self.particles[sampled_index]
78 |
79 | def _allocate_pertubator(self):
80 | dimensionality = self.covariance.dim()
81 | if dimensionality <= 1:
82 | pertubator = Normal(0, self.covariance)
83 | else:
84 | zeros = torch.zeros(dimensionality)
85 | pertubator = MultivariateNormal(zeros, covariance_matrix=self.covariance)
86 | self.pertubator = pertubator
87 |
88 | def _resample_particles(self, summary_observation):
89 | pertubator = self.pertubator
90 | self.previous_particles = self.particles.clone()
91 | for particle_index in range(self.num_particles):
92 | new_particle = None
93 | while new_particle is None:
94 | proposal = self._sample_particle()
95 | proposal = proposal + pertubator.sample()
96 | x = self.simulator(proposal)
97 | s = self.summary(x)
98 | if self.acceptor(s, summary_observation):
99 | new_particle = proposal
100 | self.particles[particle_index, :] = proposal
101 | self._update_covariance()
102 | self._update_weights()
103 |
104 | def sample(self, observation, num_samples=1):
105 | samples = []
106 |
107 | # Summarize the observation.
108 | summary_observation = self.summary(observation)
109 | # Initialize the particles and set initial weights.
110 | self._sample_from_prior(summary_observation)
111 | self._allocate_pertubator()
112 | samples.append(self.particles)
113 | num_samples -= self.num_particles
114 | while num_samples > 0:
115 | self._resample_particles(summary_observation)
116 | num_samples -= self.num_particles
117 | samples.append(self.particles.clone().view(-1, 1))
118 | samples = torch.cat(samples, dim=0)[num_samples:]
119 |
120 | return samples
121 |
--------------------------------------------------------------------------------
/hypothesis/inference/avo.py:
--------------------------------------------------------------------------------
1 | import hypothesis
2 | import torch
3 |
4 | from hypothesis.engine import Procedure
5 |
6 |
7 |
8 | class AdversarialVariationalOptimization(Procedure):
9 | r"""Adversarial Variational Optimization
10 |
11 | An implementation of arxiv.org/abs/1707.07113"""
12 |
13 | def __init__(self, simulator,
14 | discriminator,
15 | gamma=10.0,
16 | baseline=None):
17 | super(AdversarialVariationalOptimization, self).__init__()
18 | self.discriminator = discriminator
19 | self.simulator = simulator
20 | if not baseline:
21 | baseline = AVOBaseline(discriminator)
22 | self.baseline = baseline
23 | self.gamma = gamma
24 |
25 | def optimize(proposal, observations, num_steps=1):
26 | raise NotImplementedError
27 |
--------------------------------------------------------------------------------
/hypothesis/inference/mcmc.py:
--------------------------------------------------------------------------------
1 | r"""Markov chain Monte Carlo methods for inference.
2 | """
3 |
4 | import hypothesis
5 | import numpy as np
6 | import torch
7 |
8 | from hypothesis.engine import Procedure
9 | from hypothesis.summary.mcmc import Chain
10 | from torch.distributions.multivariate_normal import MultivariateNormal
11 | from torch.distributions.normal import Normal
12 | from torch.multiprocessing import Pool
13 |
14 |
15 |
16 | class ParallelSampler:
17 |
18 | def __init__(self, sampler, chains=2, workers=torch.multiprocessing.cpu_count()):
19 | self.chains = chains
20 | self.sampler = sampler
21 | self.workers = workers
22 |
23 | def _prepare_arguments(self, observations, thetas, num_samples):
24 | arguments = []
25 | for input in inputs:
26 | arguments.append(self.sampler, observations, input, num_samples)
27 |
28 | return arguments
29 |
30 | def _prepare_inputs(self):
31 | inputs = []
32 | prior = self.sampler.prior
33 | for _ in range(self.chains):
34 | inputs.append(prior.sample())
35 |
36 | return inputs
37 |
38 | @torch.no_grad()
39 | def sample(self, observations, num_samples, thetas=None):
40 | assert(thetas is None or len(thetas) is self.chains)
41 | self.sampler.reset()
42 | if thetas is None:
43 | inputs = self._prepare_inputs()
44 | pool = Pool(processes=self.workers)
45 | arguments = self._prepare_arguments(observations, inputs, num_samples)
46 | chains = pool.map(self.sample_chain, arguments)
47 | del pool
48 |
49 | return chains
50 |
51 | @staticmethod
52 | def sample_chain(arguments):
53 | sampler, observations, input, num_samples = arguments
54 | chain = sampler.sample(observations, input, num_samples)
55 |
56 | return chain
57 |
58 |
59 |
60 | class MarkovChainMonteCarlo(Procedure):
61 | r""""""
62 |
63 | def __init__(self, prior):
64 | super(MarkovChainMonteCarlo, self).__init__()
65 | self.prior = prior
66 |
67 | def _register_events(self):
68 | pass # No events to register.
69 |
70 | def _step(self, theta, observations):
71 | raise NotImplementedError
72 |
73 | def reset(self):
74 | pass
75 |
76 | @torch.no_grad()
77 | def sample(self, observations, input, num_samples):
78 | r""""""
79 | acceptance_probabilities = []
80 | acceptances = []
81 | samples = []
82 | self.reset()
83 | input = input.view(1, -1)
84 | for sample_index in range(num_samples):
85 | input, acceptance_probability, acceptance = self._step(input, observations)
86 | input = input.view(1, -1)
87 | samples.append(input)
88 | acceptance_probabilities.append(acceptance_probability)
89 | acceptances.append(acceptance)
90 | samples = torch.cat(samples, dim=0)
91 | chain = Chain(samples, acceptance_probabilities, acceptances)
92 |
93 | return chain
94 |
95 |
96 |
97 | class MetropolisHastings(MarkovChainMonteCarlo):
98 | r""""""
99 |
100 | def __init__(self, prior, log_likelihood, transition):
101 | super(MetropolisHastings, self).__init__(prior)
102 | self.denominator = None
103 | self.log_likelihood = log_likelihood
104 | self.transition = transition
105 |
106 | def _step(self, input, observations):
107 | accepted = False
108 |
109 | input_next = self.transition.sample(input)
110 | lnl_input_next = self.log_likelihood(input_next, observations)
111 | numerator = self.prior.log_prob(input_next) + lnl_input_next
112 | if self.denominator is None:
113 | lnl_input = self.log_likelihood(input, observations)
114 | self.denominator = self.prior.log_prob(input) + lnl_input
115 | acceptance_ratio = (numerator - self.denominator)
116 | if not self.transition.is_symmetrical():
117 | raise NotImplementedError
118 | acceptance_probability = min([1, acceptance_ratio.exp().item()])
119 | u = np.random.uniform()
120 | if u <= acceptance_probability:
121 | accepted = True
122 | input = input_next
123 | self.denominator = numerator
124 |
125 | return input, acceptance_probability, accepted
126 |
127 | def reset(self):
128 | self.denominator = None
129 |
130 |
131 |
132 | class AALRMetropolisHastings(MarkovChainMonteCarlo):
133 | r"""Ammortized Approximate Likelihood Ratio Metropolis Hastings
134 |
135 | https://arxiv.org/abs/1903.04057
136 | """
137 |
138 | def __init__(self, prior, ratio_estimator, transition):
139 | super(AALRMetropolisHastings, self).__init__(prior)
140 | self.denominator = None
141 | self.prior = prior
142 | self.ratio_estimator = ratio_estimator
143 | self.transition = transition
144 |
145 | def _compute_ratio(self, input, outputs):
146 | num_observations = outputs.shape[0]
147 | inputs = input.repeat(num_observations, 1)
148 | inputs = inputs.to(hypothesis.accelerator)
149 | _, log_ratios = self.ratio_estimator(inputs=inputs, outputs=outputs)
150 |
151 | return log_ratios.sum().cpu()
152 |
153 | def _step(self, input, observations):
154 | accepted = False
155 |
156 | with torch.no_grad():
157 | input_next = self.transition.sample(input)
158 | lnl_input_next = self._compute_ratio(input_next, observations)
159 | numerator = self.prior.log_prob(input_next) + lnl_input_next
160 | if self.denominator is None:
161 | lnl_input = self._compute_ratio(input, observations)
162 | self.denominator = self.prior.log_prob(input) + lnl_input
163 | acceptance_ratio = (numerator - self.denominator)
164 | if not self.transition.is_symmetrical():
165 | raise NotImplementedError
166 | acceptance_probability = min([1, acceptance_ratio.exp().item()])
167 | u = np.random.uniform()
168 | if u <= acceptance_probability:
169 | accepted = True
170 | input = input_next
171 | self.denominator = numerator
172 |
173 | return input, acceptance_probability, accepted
174 |
175 | def reset(self):
176 | self.denominator = None
177 |
178 | @torch.no_grad()
179 | def sample(self, outputs, input, num_samples):
180 | assert(not self.ratio_estimator.training)
181 | outputs = outputs.to(hypothesis.accelerator)
182 | chain = super(AALRMetropolisHastings, self).sample(outputs, input, num_samples)
183 |
184 | return chain
185 |
--------------------------------------------------------------------------------
/hypothesis/inference/proposal_distribution.py:
--------------------------------------------------------------------------------
1 | r""""""
2 |
3 | import torch
4 |
5 | from torch.distributions.multivariate_normal import MultivariateNormal as MultivariateNormal
6 | from torch.distributions.normal import NormalDistribution
7 |
--------------------------------------------------------------------------------
/hypothesis/inference/ratio.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/montefiore-institute/hypothesis/29a2b7b4649db345d43a8d3bf98aa5d817b43f1b/hypothesis/inference/ratio.py
--------------------------------------------------------------------------------
/hypothesis/inference/transition_distribution.py:
--------------------------------------------------------------------------------
1 | r""""""
2 |
3 | import hypothesis
4 | import numpy as np
5 | import torch
6 |
7 | from torch.distributions.multivariate_normal import MultivariateNormal as MultivariateNormalDistribution
8 | from torch.distributions.normal import Normal as NormalDistribution
9 | from torch.distributions.uniform import Uniform as UniformDistribution
10 |
11 |
12 |
13 | class Transition:
14 | r""""""
15 |
16 | def log_prob(self, xs, conditionals):
17 | raise NotImplementedError
18 |
19 | def sample(self, xs, samples=1):
20 | raise NotImplementedError
21 |
22 | def is_symmetrical(self):
23 | raise NotImplementedError
24 |
25 |
26 |
27 | class SymmetricalTransition(Transition):
28 |
29 | def is_symmetrical(self):
30 | return True
31 |
32 |
33 |
34 | class AsymmetricalTransition(Transition):
35 |
36 | def is_symmetrical(self):
37 | return False
38 |
39 |
40 |
41 | class Normal(SymmetricalTransition):
42 |
43 | def __init__(self, sigma):
44 | super(Normal, self).__init__()
45 | self.sigma = sigma
46 |
47 | def log_prob(self, mean, conditionals):
48 | normal = NormalDistribution(mean, self.sigma)
49 | log_probabilities = normal.log_prob(conditionals)
50 | del Normal
51 |
52 | return log_probabilities
53 |
54 | def sample(self, means, samples=1):
55 | with torch.no_grad():
56 | means = means.view(-1, 1)
57 | normal_samples = torch.randn(means.size(0), samples)
58 | normal_samples = normal_samples.to(hypothesis.accelerator)
59 | samples = (normal_samples * self.sigma) + means
60 |
61 | return samples
62 |
63 |
64 | class MultivariateNormal(SymmetricalTransition):
65 |
66 | def __init__(self, sigma):
67 | super(MultivariateNormal, self).__init__()
68 | self.sigma = sigma
69 | self.dimensionality = sigma.size(0)
70 |
71 | def log_prob(self, mean, conditionals):
72 | normal = MultivariateNormalDistribution(mean, self.sigma)
73 |
74 | return normal.log_prob(conditionals)
75 |
76 | def sample(self, means, samples=1):
77 | x = []
78 |
79 | with torch.no_grad():
80 | means = means.view(-1, self.dimensionality)
81 | mean_samples = torch.Size([samples])
82 | for mean in means:
83 | normal = MultivariateNormalDistribution(mean, self.sigma)
84 | x.append(normal.sample(mean_samples).view(-1, samples, self.dimensionality))
85 | x = torch.cat(x, dim=0).squeeze()
86 |
87 | return x
88 |
--------------------------------------------------------------------------------
/hypothesis/metric/__init__.py:
--------------------------------------------------------------------------------
1 | from .base import BaseMetric
2 | from .base import BaseValueMetric
3 | from .base import BaseStateMetric
4 | from .dataset_loss import DatasetLossMetric
5 | from .exponential_average import ExponentialAverageMetric
6 | from .roc_auc import AreaUnderCurveMetric
7 |
8 | from .roc_auc import roc_auc_score
9 | from .roc_auc import roc_curve
10 |
--------------------------------------------------------------------------------
/hypothesis/metric/base.py:
--------------------------------------------------------------------------------
1 | class BaseMetric:
2 | r""""""
3 |
4 | def update(self, value=None):
5 | raise NotImplementedError
6 |
7 | def reset(self):
8 | raise NotImplementedError
9 |
10 | def __getitem__(self, pattern):
11 | raise NotImplementedError
12 |
13 | def __len__(self):
14 | raise NotImplementedError
15 |
16 |
17 |
18 | class BaseValueMetric(BaseMetric):
19 | r""""""
20 |
21 | def __init__(self, initial_value=None):
22 | self.initial_value = initial_value
23 | self.current_value = initial_value
24 | self.history = []
25 | self.reset()
26 |
27 | def _set_current_value(self, value):
28 | self.history.append(value)
29 | self.current_value = value
30 |
31 | def reset(self):
32 | self.current_value = self.initial_value
33 | if self.initial_value is not None:
34 | self.history = [self.initial_value]
35 |
36 | def __getitem__(self, pattern):
37 | return self.history[pattern]
38 |
39 | def __len__(self):
40 | return len(self.history)
41 |
42 |
43 |
44 | class BaseStateMetric(BaseValueMetric):
45 | r""""""
46 |
47 | def __init__(initial_value=None):
48 | super(BaseStateMetric, self).__init__(initial_value)
49 |
50 | def update(self, value):
51 | self._set_current_value(value)
52 |
53 | def update(self):
54 | raise NotImplementedError
55 |
--------------------------------------------------------------------------------
/hypothesis/metric/dataset_loss.py:
--------------------------------------------------------------------------------
1 | import hypothesis
2 | import torch
3 |
4 | from hypothesis.metric import BaseStateMetric
5 | from torch.utils.data import DataLoader
6 |
7 |
8 |
9 | def _default_batch_handler(model, criterion, batch):
10 | x, y = batch
11 | x = x.to(hypothesis.accelerator, non_blocking=True)
12 | y = y.to(hypothesis.accelerator, non_blocking=True)
13 | y_hat = model(x)
14 | loss = criterion(y, y_hat)
15 |
16 | return loss
17 |
18 |
19 |
20 | class DatasetLossMetric(BaseStateMetric):
21 | r""""""
22 |
23 | def __init__(self, model, criterion, dataset,
24 | batch_size=32,
25 | workers=1,
26 | batch_handler=_default_batch_handler):
27 | super(DatasetLossMetric).__init__()
28 | self.batch_size = batch_size
29 | self.criterion = criterion
30 | self.dataset = dataset
31 | self.model = model
32 | self.workers = 1
33 | self.process_batch = _default_batch_handler
34 |
35 | def update(self):
36 | self.model.eval()
37 | losses = []
38 |
39 | with torch.no_grad():
40 | data_loader = DataLoader(dataset,
41 | batch_size=self.batch_size,
42 | shuffle=True,
43 | workers=self.workers)
44 | for batch in data_loader:
45 | loss = self.process_batch(batch)
46 | losses.append(loss.cpu())
47 | del data_loader
48 | losses = torch.cat(losses, dim=0)
49 | loss = losses.mean().item()
50 | self.update(loss)
51 |
--------------------------------------------------------------------------------
/hypothesis/metric/exponential_average.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import torch
3 |
4 | from hypothesis.metric import BaseValueMetric
5 |
6 |
7 |
8 | class ExponentialAverageMetric(BaseValueMetric):
9 | r""""""
10 |
11 | def __init__(self, initial_value=None, decay=.99):
12 | super(ExponentialAverageMetric, self).__init__(initial_value)
13 | self.decay = decay
14 |
15 | def update(self, value):
16 | # Check if the current value was initialized.
17 | if self.current_value is not None:
18 | next_value = self.decay * value + (1 - self.decay) * self.current_value
19 | else:
20 | next_value = value
21 | self._set_current_value(value)
22 |
--------------------------------------------------------------------------------
/hypothesis/metric/roc_auc.py:
--------------------------------------------------------------------------------
1 | import hypothesis
2 | import numpy as np
3 | import torch
4 |
5 | from hypothesis.metric import BaseStateMetric
6 | from sklearn.metrics import roc_auc_score as sklearn_roc_auc_score
7 | from sklearn.metrics import roc_curve as sklearn_roc_curve
8 | from torch.utils.data import DataLoader
9 |
10 |
11 |
12 | def roc_curve(predictions, targets):
13 | r""""""
14 | predictions = predictions.view(-1, 1).numpy()
15 | targets = targets.view(-1, 1).numpy()
16 | fpr, tpr, _ = sklearn_roc_curve(targets, predictions)
17 |
18 | return fpr, tpr
19 |
20 |
21 |
22 | def roc_auc_score(predictions, targets):
23 | r""""""
24 | predictions = predictions.view(-1, 1).numpy()
25 | targets = targets.view(-1, 1).numpy()
26 |
27 | return sklearn_roc_auc_score(targets, predictions)
28 |
29 |
30 |
31 | class AreaUnderCurveMetric(BaseStateMetric):
32 | r""""""
33 |
34 | def __init__(self, model, dataset, batch_size=32, workers=1):
35 | super(AreaUnderCurveMetric, self).__init__()
36 | self.batch_size = batch_size
37 | self.dataset = dataset
38 | self.model = model
39 | self.workers = workers
40 |
41 | def update(self):
42 | self.model.eval()
43 | predictions = []
44 | targets = []
45 |
46 | with torch.no_grad():
47 | data_loader = DataLoader(dataset,
48 | batch_size=self.batch_size,
49 | shuffle=True,
50 | workers=self.workers)
51 | for x, y in data_loader:
52 | x = x.to(hypothesis.accelerator, non_blocking=True)
53 | y = y.to(hypothesis.accelerator, non_blocking=True)
54 | y_hat = self.model(x)
55 | predictions.append(y_hat.cpu())
56 | targets.append(y.cpu())
57 | del data_loader
58 | predictions = torch.cat(predictions, dim=0)
59 | targets = torch.cat(targets, dim=0)
60 | auc = float(roc_auc_curve(predictions, targets))
61 | self.update(auc)
62 |
--------------------------------------------------------------------------------
/hypothesis/nn/__init__.py:
--------------------------------------------------------------------------------
1 | # Multi Layered Perceptron
2 | from .multi_layered_perceptron import MultiLayeredPerceptron
3 | from .multi_layered_perceptron import MultiLayeredPerceptron as MLP
4 | # Neuromodulation
5 | from .neuromodulation import NeuromodulatedELU
6 | from .neuromodulation import NeuromodulatedReLU
7 | from .neuromodulation import NeuromodulatedSELU
8 | from .neuromodulation import NeuromodulatedTanh
9 | from .neuromodulation import allocate_default_neuromodulation_controller
10 | from .neuromodulation import list_neuromodulated_modules
11 | # DenseNet
12 | from .densenet import DenseNetHead
13 | from .densenet import DenseNet
14 | # ResNet
15 | from .resnet import ResNetHead
16 | from .resnet import ResNet
17 |
--------------------------------------------------------------------------------
/hypothesis/nn/amortized_ratio_estimation/__init__.py:
--------------------------------------------------------------------------------
1 | from .base import RatioEstimatorEnsemble
2 | from .base import BaseRatioEstimator
3 | from .base import BaseCriterion
4 | from .base import BaseConservativeCriterion
5 | from .base import BaseExperimentalCriterion
6 | from .likelihood_to_evidence import BaseLikelihoodToEvidenceRatioEstimator
7 | from .likelihood_to_evidence import ConservativeLikelihoodToEvidenceCriterion
8 | from .likelihood_to_evidence import LikelihoodToEvidenceCriterion
9 |
10 | from .mutual_information import BaseMutualInformationRatioEstimator
11 | from .mutual_information import MutualInformationCriterion
12 | # Multi Layered Perceptron
13 | from .multi_layered_perceptron import LikelihoodToEvidenceRatioEstimatorMLP
14 | from .multi_layered_perceptron import LikelihoodToEvidenceRatioEstimatorNeuromodulatedMLP
15 | from .multi_layered_perceptron import MutualInformationRatioEstimatorMLP
16 | from .multi_layered_perceptron import MutualInformationRatioEstimatorNeuromodulatedMLP
17 | # DenseNet
18 | from .densenet import LikelihoodToEvidenceRatioEstimatorDenseNet
19 | # ResNet
20 | from .resnet import LikelihoodToEvidenceRatioEstimatorResNet
21 | from .util import build_ratio_estimator
22 |
--------------------------------------------------------------------------------
/hypothesis/nn/amortized_ratio_estimation/base.py:
--------------------------------------------------------------------------------
1 | import hypothesis
2 | import numpy as np
3 | import torch
4 |
5 |
6 |
7 | class BaseRatioEstimator(torch.nn.Module):
8 |
9 | def __init__(self):
10 | super(BaseRatioEstimator, self).__init__()
11 |
12 | def forward(self, **kwargs):
13 | log_ratios = self.log_ratio(**kwargs)
14 |
15 | return log_ratios.sigmoid(), log_ratios
16 |
17 | def log_ratio(self, **kwargs):
18 | raise NotImplementedError
19 |
20 |
21 |
22 | class RatioEstimatorEnsemble(BaseRatioEstimator):
23 |
24 | KEYWORD_REDUCE = "reduce"
25 |
26 | def __init__(self, estimators, reduce="mean"):
27 | super(RatioEstimatorEnsemble, self).__init__()
28 | self.estimators = estimators
29 | self.reduce = self._allocate_reduce(reduce)
30 |
31 | def reduce_as(self, reduce):
32 | self.reduce = self._allocate_reduce(reduce)
33 |
34 | def to(self, device):
35 | for index in range(len(self.estimators)):
36 | self.estimators[index] = self.estimators[index].to(device)
37 |
38 | return self
39 |
40 | def log_ratio(self, **kwargs):
41 | # Check if the 'reduce' keyword is an argument.
42 | if RatioEstimatorEnsemble.KEYWORD_REDUCE in kwargs.keys():
43 | reduce = kwargs[RatioEstimatorEnsemble.KEYWORD_REDUCE]
44 | del kwargs[RatioEstimatorEnsemble.KEYWORD_REDUCE]
45 | else:
46 | reduce = True # Default value
47 | # Estimate the log ratios
48 | log_ratios = []
49 | for estimator in self.estimators:
50 | log_ratios.append(estimator.log_ratio(**kwargs))
51 | log_ratios = torch.cat(log_ratios, dim=1)
52 | if reduce:
53 | log_ratios = self.reduce(log_ratios).view(-1, 1)
54 |
55 | return log_ratios
56 |
57 | @staticmethod
58 | def _allocate_reduce(f):
59 | reductions = {
60 | "mean": RatioEstimatorEnsemble._reduce_mean,
61 | "median": RatioEstimatorEnsemble._reduce_median}
62 | reduce = None
63 | if hasattr(f, "__call__"):
64 | return f
65 | else:
66 | return reductions[f]
67 |
68 | @staticmethod
69 | def _reduce_mean(log_ratios):
70 | return log_ratios.mean(dim=1)
71 |
72 | @staticmethod
73 | def _reduce_median(log_ratios):
74 | return log_ratios.median(dim=1).values
75 |
76 |
77 |
78 | class BaseCriterion(torch.nn.Module):
79 |
80 | def __init__(self,
81 | estimator,
82 | denominator,
83 | batch_size=hypothesis.default.batch_size,
84 | logits=False):
85 | super(BaseCriterion, self).__init__()
86 | if logits:
87 | self.criterion = torch.nn.BCEWithLogitsLoss()
88 | self._forward = self._forward_with_logits
89 | else:
90 | self.criterion = torch.nn.BCELoss()
91 | self._forward = self._forward_without_logits
92 | self.batch_size = batch_size
93 | self.estimator = estimator
94 | self.independent_random_variables = self._derive_independent_random_variables(denominator)
95 | self.ones = torch.ones(self.batch_size, 1)
96 | self.random_variables = self._derive_random_variables(denominator)
97 | self.zeros = torch.zeros(self.batch_size, 1)
98 |
99 | def _derive_random_variables(self, denominator):
100 | random_variables = denominator.replace(hypothesis.default.dependent_delimiter, " ") \
101 | .replace(hypothesis.default.independent_delimiter, " ") \
102 | .split(" ")
103 | random_variables.sort()
104 |
105 | return random_variables
106 |
107 | def _derive_independent_random_variables(self, denominator):
108 | groups = denominator.split(hypothesis.default.independent_delimiter)
109 | for index in range(len(groups)):
110 | groups[index] = groups[index].split(hypothesis.default.dependent_delimiter)
111 |
112 | return groups
113 |
114 | def _forward_without_logits(self, **kwargs):
115 | y_dependent, _ = self.estimator(**kwargs)
116 | for group in self.independent_random_variables:
117 | random_indices = torch.randperm(self.batch_size)
118 | for variable in group:
119 | kwargs[variable] = kwargs[variable][random_indices] # Make variable independent.
120 | y_independent, _ = self.estimator(**kwargs)
121 | loss = self.criterion(y_dependent, self.ones) + self.criterion(y_independent, self.zeros)
122 |
123 | return loss
124 |
125 | def _forward_with_logits(self, **kwargs):
126 | _, y_dependent = self.estimator(**kwargs)
127 | for group in self.independent_random_variables:
128 | random_indices = torch.randperm(self.batch_size)
129 | for variable in group:
130 | kwargs[variable] = kwargs[variable][random_indices] # Make variable independent.
131 | _, y_independent = self.estimator(**kwargs)
132 | loss = self.criterion(y_dependent, self.ones) + self.criterion(y_independent, self.zeros)
133 |
134 | return loss
135 |
136 | def variables(self):
137 | return self.random_variables
138 |
139 | def independent_variables(self):
140 | return self.independent_random_variables
141 |
142 | def to(self, device):
143 | self.criterion = self.criterion.to(device)
144 | self.ones = self.ones.to(device)
145 | self.zeros = self.zeros.to(device)
146 |
147 | return self
148 |
149 | def forward(self, **kwargs):
150 | return self._forward(**kwargs)
151 |
152 |
153 |
154 | class BaseConservativeCriterion(BaseCriterion):
155 |
156 | def __init__(self,
157 | estimator,
158 | denominator,
159 | batch_size=hypothesis.default.batch_size,
160 | beta=0.001,
161 | logits=False):
162 | super(BaseConservativeCriterion, self).__init__(
163 | estimator=estimator,
164 | denominator=denominator,
165 | batch_size=batch_size,
166 | logits=logits)
167 | self.beta = beta
168 |
169 | def _forward_without_logits(self, **kwargs):
170 | beta = self.beta
171 | y_dependent, _ = self.estimator(**kwargs)
172 | for group in self.independent_random_variables:
173 | random_indices = torch.randperm(self.batch_size)
174 | for variable in group:
175 | kwargs[variable] = kwargs[variable][random_indices] # Make variable independent.
176 | y_independent, _ = self.estimator(**kwargs)
177 | loss = ((1 - beta) * self.criterion(y_dependent, self.ones) + beta * self.criterion(y_independent, self.ones)) + self.criterion(y_independent, self.zeros)
178 |
179 | return loss
180 |
181 | def _forward_with_logits(self, **kwargs):
182 | beta = self.beta
183 | _, y_dependent = self.estimator(**kwargs)
184 | for group in self.independent_random_variables:
185 | random_indices = torch.randperm(self.batch_size)
186 | for variable in group:
187 | kwargs[variable] = kwargs[variable][random_indices] # Make variable independent.
188 | _, y_independent = self.estimator(**kwargs)
189 | loss = ((1 - beta) * self.criterion(y_dependent, self.ones) + beta * self.criterion(y_independent, self.ones)) + self.criterion(y_independent, self.zeros)
190 |
191 |
192 | return loss
193 |
194 |
195 | class BaseExperimentalCriterion(BaseCriterion):
196 |
197 | def __init__(self,
198 | estimator,
199 | denominator,
200 | batch_size=hypothesis.default.batch_size,
201 | beta=1.0,
202 | logits=False):
203 | super(BaseExperimentalCriterion, self).__init__(
204 | estimator=estimator,
205 | denominator=denominator,
206 | batch_size=batch_size,
207 | logits=logits)
208 | self.beta = beta
209 | self.base = np.log(4)
210 |
211 | def _forward_without_logits(self, **kwargs):
212 | y_dependent, log_ratios = self.estimator(**kwargs)
213 | for group in self.independent_random_variables:
214 | random_indices = torch.randperm(self.batch_size)
215 | for variable in group:
216 | kwargs[variable] = kwargs[variable][random_indices] # Make variable independent.
217 | y_independent, _ = self.estimator(**kwargs)
218 | loss = self.criterion(y_dependent, self.ones) + self.criterion(y_independent, self.zeros)
219 | loss = loss + self.beta * ((self.base - loss.detach()).abs() / 2 - log_ratios.mean()) ** 2
220 |
221 | return loss
222 |
223 | def _forward_with_logits(self, **kwargs):
224 | _, y_dependent = self.estimator(**kwargs)
225 | for group in self.independent_random_variables:
226 | random_indices = torch.randperm(self.batch_size)
227 | for variable in group:
228 | kwargs[variable] = kwargs[variable][random_indices] # Make variable independent.
229 | _, y_independent = self.estimator(**kwargs)
230 | loss = self.criterion(y_dependent, self.ones) + self.criterion(y_independent, self.zeros)
231 | loss = loss + self.beta * ((self.base - loss.detach()).abs() / 2 - log_ratios.mean()) ** 2
232 |
233 | return loss
234 |
--------------------------------------------------------------------------------
/hypothesis/nn/amortized_ratio_estimation/densenet/__init__.py:
--------------------------------------------------------------------------------
1 | from .likelihood_to_evidence import LikelihoodToEvidenceRatioEstimatorDenseNet
2 |
--------------------------------------------------------------------------------
/hypothesis/nn/amortized_ratio_estimation/densenet/likelihood_to_evidence.py:
--------------------------------------------------------------------------------
1 | import hypothesis
2 | import hypothesis.nn.densenet
3 | import torch
4 |
5 | from hypothesis.nn import DenseNetHead
6 | from hypothesis.nn import MultiLayeredPerceptron
7 | from hypothesis.nn.amortized_ratio_estimation import BaseLikelihoodToEvidenceRatioEstimator
8 | from hypothesis.nn.util import compute_dimensionality
9 |
10 |
11 |
12 | class LikelihoodToEvidenceRatioEstimatorDenseNet(BaseLikelihoodToEvidenceRatioEstimator):
13 |
14 | def __init__(self,
15 | shape_inputs,
16 | shape_outputs,
17 | activation=hypothesis.default.activation,
18 | batchnorm=hypothesis.nn.densenet.default.batchnorm,
19 | bottleneck_factor=hypothesis.nn.densenet.default.bottleneck_factor,
20 | channels=hypothesis.nn.densenet.default.channels,
21 | convolution_bias=hypothesis.nn.densenet.default.convolution_bias,
22 | depth=hypothesis.nn.densenet.default.depth,
23 | dropout=hypothesis.default.dropout,
24 | trunk_activation=None,
25 | trunk_dropout=None,
26 | trunk_layers=hypothesis.default.trunk):
27 | super(LikelihoodToEvidenceRatioEstimatorDenseNet, self).__init__()
28 | # Construct the convolutional DenseNet head.
29 | self.head = DenseNetHead(
30 | activation=activation,
31 | batchnorm=batchnorm,
32 | bottleneck_factor=bottleneck_factor,
33 | channels=channels,
34 | convolution_bias=convolution_bias,
35 | depth=depth,
36 | dropout=dropout,
37 | shape_xs=shape_outputs)
38 | # Compute the embedding dimensionality of the head.
39 | embedding_dim = self.head.embedding_dimensionality()
40 | # Check if custom trunk settings have been defined.
41 | if trunk_activation is None:
42 | trunk_activation = activation
43 | if trunk_dropout is None:
44 | trunk_dropout = dropout
45 | # Allocate the trunk.
46 | latent_dimensionality = compute_dimensionality(shape_inputs) + self.head.embedding_dimensionality()
47 | self.trunk = MultiLayeredPerceptron(
48 | shape_xs=(latent_dimensionality,),
49 | shape_ys=(1,),
50 | activation=trunk_activation,
51 | dropout=trunk_dropout,
52 | layers=trunk_layers,
53 | transform_output=None)
54 |
55 | def log_ratio(self, inputs, outputs):
56 | z_outputs = self.head(outputs)
57 | z = torch.cat([inputs, z_outputs], dim=1)
58 | log_ratios = self.trunk(z)
59 |
60 | return log_ratios
61 |
--------------------------------------------------------------------------------
/hypothesis/nn/amortized_ratio_estimation/likelihood_to_evidence.py:
--------------------------------------------------------------------------------
1 | import hypothesis
2 | import hypothesis.nn
3 | import torch
4 |
5 | from .base import BaseCriterion
6 | from .base import BaseConservativeCriterion
7 | from .base import BaseRatioEstimator
8 |
9 |
10 |
11 | DENOMINATOR = "inputs|outputs"
12 |
13 |
14 |
15 | class LikelihoodToEvidenceCriterion(BaseCriterion):
16 |
17 | def __init__(self,
18 | estimator,
19 | batch_size=hypothesis.default.batch_size,
20 | logits=False):
21 | super(LikelihoodToEvidenceCriterion, self).__init__(
22 | batch_size=batch_size,
23 | denominator=DENOMINATOR,
24 | estimator=estimator,
25 | logits=logits)
26 |
27 |
28 |
29 | class ConservativeLikelihoodToEvidenceCriterion(BaseConservativeCriterion):
30 |
31 | def __init__(self,
32 | estimator,
33 | beta=0.001,
34 | batch_size=hypothesis.default.batch_size,
35 | logits=False):
36 | super(ConservativeLikelihoodToEvidenceCriterion, self).__init__(
37 | batch_size=batch_size,
38 | denominator=DENOMINATOR,
39 | estimator=estimator,
40 | logits=logits)
41 |
42 |
43 |
44 | class BaseLikelihoodToEvidenceRatioEstimator(BaseRatioEstimator):
45 |
46 | def __init__(self):
47 | super(BaseLikelihoodToEvidenceRatioEstimator, self).__init__()
48 |
49 | def forward(self, inputs, outputs):
50 | log_ratios = self.log_ratio(inputs=inputs, outputs=outputs)
51 |
52 | return log_ratios.sigmoid(), log_ratios
53 |
54 | def log_ratio(self, inputs, outputs):
55 | raise NotImplementedError
56 |
--------------------------------------------------------------------------------
/hypothesis/nn/amortized_ratio_estimation/multi_layered_perceptron/__init__.py:
--------------------------------------------------------------------------------
1 | from .base import build_ratio_estimator
2 | from .likelihood_to_evidence import LikelihoodToEvidenceRatioEstimatorMLP
3 | from .likelihood_to_evidence import LikelihoodToEvidenceRatioEstimatorNeuromodulatedMLP
4 | from .mutual_information import MutualInformationRatioEstimatorMLP
5 | from .mutual_information import MutualInformationRatioEstimatorNeuromodulatedMLP
6 |
--------------------------------------------------------------------------------
/hypothesis/nn/amortized_ratio_estimation/multi_layered_perceptron/base.py:
--------------------------------------------------------------------------------
1 | import hypothesis
2 | import hypothesis.nn
3 | import torch
4 |
5 | from hypothesis.nn import MultiLayeredPerceptron as MLP
6 | from hypothesis.nn.amortized_ratio_estimation import BaseRatioEstimator
7 | from hypothesis.nn.util import compute_dimensionality
8 |
9 |
10 | def build_ratio_estimator(random_variables):
11 | # Flatten the shapes of the random variables
12 | for k in random_variables.keys():
13 | shape = random_variables[k]
14 | flattened_shape = (compute_dimensionality(shape),)
15 | random_variables[k] = flattened_shape
16 |
17 | class RatioEstimator(BaseRatioEstimator):
18 |
19 | def __init__(self,
20 | activation=hypothesis.default.activation,
21 | dropout=hypothesis.default.dropout,
22 | layers=hypothesis.default.trunk):
23 | super(RatioEstimator, self).__init__()
24 | shape_xs = (sum([compute_dimensionality(shape) for shape in random_variables.values()]),)
25 | self.mlp = MLP(
26 | activation=activation,
27 | dropout=dropout,
28 | layers=layers,
29 | shape_xs=shape_xs,
30 | shape_ys=(1,),
31 | transform_output=None)
32 |
33 | def log_ratio(self, **kwargs):
34 | tensors = [kwargs[k].view(-1, random_variables[k][0]) for k in random_variables]
35 | z = torch.cat(tensors, dim=1)
36 | log_ratios = self.mlp(z)
37 |
38 | return log_ratios
39 |
40 | return RatioEstimator
41 |
--------------------------------------------------------------------------------
/hypothesis/nn/amortized_ratio_estimation/multi_layered_perceptron/likelihood_to_evidence.py:
--------------------------------------------------------------------------------
1 | import hypothesis
2 | import hypothesis.nn
3 | import torch
4 |
5 | from hypothesis.nn import MultiLayeredPerceptron
6 | from hypothesis.nn.amortized_ratio_estimation import BaseLikelihoodToEvidenceRatioEstimator
7 | from hypothesis.nn.neuromodulation import BaseNeuromodulatedModule
8 | from hypothesis.nn.neuromodulation import allocate_neuromodulated_activation
9 | from hypothesis.nn.neuromodulation import list_neuromodulated_modules
10 | from hypothesis.nn.util import compute_dimensionality
11 |
12 |
13 |
14 | class LikelihoodToEvidenceRatioEstimatorMLP(BaseLikelihoodToEvidenceRatioEstimator):
15 |
16 | def __init__(self,
17 | shape_inputs,
18 | shape_outputs,
19 | activation=hypothesis.default.activation,
20 | dropout=hypothesis.default.dropout,
21 | layers=hypothesis.default.trunk):
22 | super(LikelihoodToEvidenceRatioEstimatorMLP, self).__init__()
23 | dimensionality = compute_dimensionality(shape_inputs) + compute_dimensionality(shape_outputs)
24 | self.mlp = MultiLayeredPerceptron(
25 | shape_xs=(dimensionality,),
26 | shape_ys=(1,),
27 | activation=activation,
28 | dropout=dropout,
29 | layers=layers,
30 | transform_output=None)
31 |
32 | def log_ratio(self, inputs, outputs):
33 | features = torch.cat([inputs, outputs], dim=1)
34 |
35 | return self.mlp(features)
36 |
37 |
38 |
39 | class LikelihoodToEvidenceRatioEstimatorNeuromodulatedMLP(BaseLikelihoodToEvidenceRatioEstimator):
40 |
41 | def __init__(self,
42 | shape_outputs,
43 | controller_allocator,
44 | activation=hypothesis.default.activation,
45 | dropout=hypothesis.default.dropout,
46 | layers=hypothesis.default.trunk):
47 | super(LikelihoodToEvidenceRatioEstimatorNeuromodulatedMLP, self).__init__()
48 | # Allocate the neuromodulated activation.
49 | neuromodulated_activation = allocate_neuromodulated_activation(
50 | activation=activation,
51 | allocator=controller_allocator)
52 | # Check if the specified activation is an i
53 | self.mlp = MultiLayeredPerceptron(
54 | shape_xs=shape_outputs,
55 | shape_ys=(1,),
56 | activation=neuromodulated_activation,
57 | dropout=dropout,
58 | layers=layers,
59 | transform_output=None)
60 | # List the neuromodulated modules.
61 | self.neuromodulated_modules = list_neuromodulated_modules(self)
62 |
63 | def log_ratio(self, inputs, outputs):
64 | for module in self.neuromodulated_modules:
65 | module.update(context=inputs)
66 |
67 | return self.mlp(outputs)
68 |
--------------------------------------------------------------------------------
/hypothesis/nn/amortized_ratio_estimation/multi_layered_perceptron/mutual_information.py:
--------------------------------------------------------------------------------
1 | import hypothesis
2 | import torch
3 |
4 | from hypothesis.nn import MultiLayeredPerceptron
5 | from hypothesis.nn.amortized_ratio_estimation import BaseMutualInformationRatioEstimator
6 | from hypothesis.nn.neuromodulation import BaseNeuromodulatedModule
7 | from hypothesis.nn.neuromodulation import allocate_neuromodulated_activation
8 | from hypothesis.nn.neuromodulation import list_neuromodulated_modules
9 | from hypothesis.nn.util import compute_dimensionality
10 |
11 |
12 |
13 | class MutualInformationRatioEstimatorMLP(BaseMutualInformationRatioEstimator):
14 |
15 | def __init__(self,
16 | shape_inputs,
17 | shape_outputs,
18 | activation=hypothesis.default.activation,
19 | dropout=hypothesis.default.dropout,
20 | layers=hypothesis.default.trunk):
21 | super(MutualInformationRatioEstimatorMLP, self).__init__()
22 | dimensionality = compute_dimensionality(shape_inputs) + compute_dimensionality(shape_outputs)
23 | self.mlp = MultiLayeredPerceptron(
24 | shape_xs=(dimensionality,),
25 | shape_ys=(1,),
26 | activation=activation,
27 | dropout=dropout,
28 | layers=layers,
29 | transform_output=None)
30 |
31 | def log_ratio(self, x, y):
32 | features = torch.cat([x, y], dim=1)
33 |
34 | return self.mlp(features)
35 |
36 |
37 |
38 | class MutualInformationRatioEstimatorNeuromodulatedMLP(BaseMutualInformationRatioEstimator):
39 |
40 | def __init__(self,
41 | shape_x,
42 | controller_allocator,
43 | activation=hypothesis.default.activation,
44 | dropout=hypothesis.default.dropout,
45 | layers=hypothesis.default.trunk):
46 | super(MutualInformationRatioEstimatorNeuromodulatedMLP, self).__init__()
47 | # Allocate the neuromodulated activation.
48 | neuromodulated_activation = allocate_neuromodulated_activation(
49 | activation=activation,
50 | allocator=controller_allocator)
51 | # Check if the specified activation is an i
52 | self.mlp = MultiLayeredPerceptron(
53 | shape_xs=shape_x,
54 | shape_ys=(1,),
55 | activation=neuromodulated_activation,
56 | dropout=dropout,
57 | layers=layers,
58 | transform_output=None)
59 | # List the neuromodulated modules.
60 | self.neuromodulated_modules = list_neuromodulated_modules(self)
61 |
62 | def log_ratio(self, x, y):
63 | for module in self.neuromodulated_modules:
64 | module.update(context=y)
65 |
66 | return self.mlp(x)
67 |
--------------------------------------------------------------------------------
/hypothesis/nn/amortized_ratio_estimation/mutual_information.py:
--------------------------------------------------------------------------------
1 | import hypothesis
2 | import torch
3 |
4 | from .base import BaseCriterion
5 | from .base import BaseRatioEstimator
6 |
7 |
8 |
9 | class MutualInformationCriterion(BaseCriterion):
10 |
11 | DENOMINATOR = "x|y"
12 |
13 | def __init__(self,
14 | estimator,
15 | batch_size=hypothesis.default.batch_size,
16 | logits=False):
17 | super(MutualInformationCriterion, self).__init__(
18 | batch_size=batch_size,
19 | denominator=LikelihoodToEvidenceCriterion.DENOMINATOR,
20 | estimator=estimator,
21 | logits=logits)
22 |
23 |
24 |
25 | class BaseMutualInformationRatioEstimator(BaseRatioEstimator):
26 |
27 | def __init__(self):
28 | super(BaseMutualInformationRatioEstimator, self).__init__()
29 |
30 | def forward(self, x, y):
31 | log_ratios = self.log_ratio(x=x, y=y)
32 |
33 | return log_ratios.sigmoid(), log_ratios
34 |
35 | def log_ratio(self, x, y):
36 | raise NotImplementedError
37 |
--------------------------------------------------------------------------------
/hypothesis/nn/amortized_ratio_estimation/resnet/__init__.py:
--------------------------------------------------------------------------------
1 | from .base import build_ratio_estimator
2 | from .likelihood_to_evidence import LikelihoodToEvidenceRatioEstimatorResNet
3 |
--------------------------------------------------------------------------------
/hypothesis/nn/amortized_ratio_estimation/resnet/base.py:
--------------------------------------------------------------------------------
1 | import hypothesis
2 | import hypothesis.nn
3 | import torch
4 |
5 | from hypothesis.nn import MultiLayeredPerceptron as MLP
6 | from hypothesis.nn import ResNetHead
7 | from hypothesis.nn.amortized_ratio_estimation import BaseRatioEstimator
8 | from hypothesis.nn.resnet.default import batchnorm as default_batchnorm
9 | from hypothesis.nn.resnet.default import channels as default_channels
10 | from hypothesis.nn.resnet.default import convolution_bias as default_convolution_bias
11 | from hypothesis.nn.resnet.default import depth as default_depth
12 | from hypothesis.nn.resnet.default import dilate as default_dilate
13 | from hypothesis.nn.resnet.default import groups as default_groups
14 | from hypothesis.nn.resnet.default import in_planes as default_in_planes
15 | from hypothesis.nn.resnet.default import width_per_group as default_width_per_group
16 | from hypothesis.nn.util import compute_dimensionality
17 |
18 |
19 |
20 | def build_ratio_estimator(random_variables, **kwargs):
21 | depth = kwargs.get("depth", default_depth)
22 | convolve_variable = kwargs.get("convolve", "outputs")
23 | trunk_variables = set(random_variables.keys()) - set([convolve_variable])
24 | trunk_random_variables = {}
25 | for k in trunk_variables:
26 | trunk_random_variables[k] = (-1, compute_dimensionality(random_variables[k]))
27 | if convolve_variable not in random_variables.keys():
28 | raise ValueError("No convolution random variable specified (default: outputs)!")
29 |
30 | class RatioEstimator(BaseRatioEstimator):
31 |
32 | def __init__(self,
33 | activation=hypothesis.default.activation,
34 | batchnorm=default_batchnorm,
35 | channels=default_channels,
36 | convolution_bias=default_convolution_bias,
37 | depth=depth,
38 | dilate=default_dilate,
39 | groups=default_groups,
40 | in_planes=default_in_planes,
41 | trunk_activation=None,
42 | trunk_dropout=hypothesis.default.dropout,
43 | trunk_layers=hypothesis.default.trunk,
44 | width_per_group=default_width_per_group):
45 | super(RatioEstimator, self).__init__()
46 | # Construct the convolutional ResNet head.
47 | self.head = ResNetHead(
48 | activation=hypothesis.default.activation,
49 | batchnorm=batchnorm,
50 | channels=channels,
51 | convolution_bias=convolution_bias,
52 | depth=depth,
53 | dilate=dilate,
54 | groups=groups,
55 | in_planes=in_planes,
56 | shape_xs=random_variables[convolve_variable],
57 | width_per_group=width_per_group)
58 | # Check if custom trunk settings have been defined.
59 | if trunk_activation is None:
60 | trunk_activation = activation
61 | # Construct the trunk of the network.
62 | self.embedding_dimensionality = self.head.embedding_dimensionality()
63 | dimensionality = self.embedding_dimensionality + sum([compute_dimensionality(random_variables[k]) for k in trunk_random_variables])
64 | self.trunk = MLP(
65 | shape_xs=(dimensionality,),
66 | shape_ys=(1,),
67 | activation=trunk_activation,
68 | dropout=trunk_dropout,
69 | layers=trunk_layers,
70 | transform_output=None)
71 |
72 | def log_ratio(self, **kwargs):
73 | z_head = self.head(kwargs[convolve_variable]).view(-1, self.embedding_dimensionality)
74 | tensors = [kwargs[k].view(v) for k, v in trunk_random_variables.items()]
75 | tensors.append(z_head)
76 | features = torch.cat(tensors, dim=1)
77 | log_ratios = self.trunk(features)
78 |
79 | return log_ratios
80 |
81 | return RatioEstimator
82 |
--------------------------------------------------------------------------------
/hypothesis/nn/amortized_ratio_estimation/resnet/likelihood_to_evidence.py:
--------------------------------------------------------------------------------
1 | import hypothesis
2 | import hypothesis.nn
3 | import torch
4 |
5 | from hypothesis.nn import MultiLayeredPerceptron
6 | from hypothesis.nn import ResNetHead
7 | from hypothesis.nn.amortized_ratio_estimation import BaseLikelihoodToEvidenceRatioEstimator
8 | from hypothesis.nn.resnet.default import batchnorm as default_batchnorm
9 | from hypothesis.nn.resnet.default import channels as default_channels
10 | from hypothesis.nn.resnet.default import convolution_bias as default_convolution_bias
11 | from hypothesis.nn.resnet.default import depth as default_depth
12 | from hypothesis.nn.resnet.default import dilate as default_dilate
13 | from hypothesis.nn.resnet.default import groups as default_groups
14 | from hypothesis.nn.resnet.default import in_planes as default_in_planes
15 | from hypothesis.nn.resnet.default import width_per_group as default_width_per_group
16 | from hypothesis.nn.util import compute_dimensionality
17 |
18 |
19 |
20 | class LikelihoodToEvidenceRatioEstimatorResNet(BaseLikelihoodToEvidenceRatioEstimator):
21 |
22 | def __init__(self,
23 | shape_inputs,
24 | shape_outputs,
25 | activation=hypothesis.default.activation,
26 | batchnorm=default_batchnorm,
27 | channels=default_channels,
28 | convolution_bias=default_convolution_bias,
29 | depth=default_depth,
30 | dilate=default_dilate,
31 | groups=default_groups,
32 | in_planes=default_in_planes,
33 | width_per_group=default_width_per_group,
34 | trunk_activation=None,
35 | trunk_dropout=hypothesis.default.dropout,
36 | trunk_layers=hypothesis.default.trunk):
37 | super(LikelihoodToEvidenceRatioEstimatorResNet, self).__init__()
38 | # Construct the convolutional ResNet head.
39 | self.head = ResNetHead(
40 | activation=hypothesis.default.activation,
41 | batchnorm=batchnorm,
42 | channels=channels,
43 | convolution_bias=convolution_bias,
44 | depth=depth,
45 | dilate=dilate,
46 | groups=groups,
47 | in_planes=in_planes,
48 | shape_xs=shape_xs,
49 | width_per_group=width_per_group)
50 | # Check if custom trunk settings have been defined.
51 | if trunk_activation is None:
52 | trunk_activation = activation
53 | # Construct the trunk of the network.
54 | dimensionality = self.head.embedding_dimensionality() + compute_dimensionality(shape_inputs)
55 | self.trunk = MLP(
56 | shape_xs=(dimensionality,),
57 | shape_ys=(1,),
58 | activation=trunk_activation,
59 | dropout=trunk_dropout,
60 | layers=trunk_layers,
61 | transform_output=None)
62 |
63 | def log_ratio(self, inputs, outputs):
64 | z_head = self.head(outputs)
65 | features = torch.cat([inputs, z_head])
66 |
67 | return self.trunk(features)
68 |
--------------------------------------------------------------------------------
/hypothesis/nn/amortized_ratio_estimation/util.py:
--------------------------------------------------------------------------------
1 | import hypothesis
2 | import numpy as np
3 | import torch
4 |
5 |
6 |
7 | def build_ratio_estimator(architecture, variables, **kwargs):
8 | creator = architectures[architecture]
9 |
10 | return creator(architecture, variables, **kwargs)
11 |
12 |
13 | def build_mlp_ratio_estimator(architecture, variables, **kwargs):
14 | from hypothesis.nn.amortized_ratio_estimation.multi_layered_perceptron import build_ratio_estimator
15 | return build_ratio_estimator(variables)
16 |
17 |
18 | def build_resnet_ratio_estimator(architecture, variables, **kwargs):
19 | from hypothesis.nn.amortized_ratio_estimation.resnet import build_ratio_estimator
20 | return build_ratio_estimator(variables, **kwargs)
21 |
22 |
23 | def build_resnet_with_depth_ratio_estimator(architecture, variables, **kwargs):
24 | _, depth = architecture.split('-')
25 | kwargs["depth"] = depth
26 | return build_resnet_ratio_estimator(architecture, variables, **kwargs)
27 |
28 |
29 | def build_densenet_ratio_estimator(architecture, variables, **kwargs):
30 | from hypothesis.nn.amortized_ratio_estimation.densenet import build_ratio_estimator
31 | return build_ratio_estimator(variables, **kwargs)
32 |
33 |
34 | def build_densenet_with_depth_ratio_estimator(architecture, variables, **kwargs):
35 | raise NotImplementedError
36 |
37 |
38 | architectures = {
39 | # Multi-Layered Perceptron
40 | "mlp": build_mlp_ratio_estimator,
41 | # ResNet
42 | "resnet": build_resnet_ratio_estimator,
43 | "resnet-18": build_resnet_with_depth_ratio_estimator,
44 | "resnet-34": build_resnet_with_depth_ratio_estimator,
45 | "resnet-50": build_resnet_with_depth_ratio_estimator,
46 | "resnet-101": build_resnet_with_depth_ratio_estimator,
47 | "resnet-152": build_resnet_with_depth_ratio_estimator,
48 | # DenseNet
49 | "densenet": build_densenet_ratio_estimator,
50 | "densenet-121": build_densenet_with_depth_ratio_estimator,
51 | "densenet-161": build_densenet_with_depth_ratio_estimator,
52 | "densenet-169": build_densenet_with_depth_ratio_estimator,
53 | "densenet-201": build_densenet_with_depth_ratio_estimator}
54 |
--------------------------------------------------------------------------------
/hypothesis/nn/densenet/__init__.py:
--------------------------------------------------------------------------------
1 | from .head import DenseNetHead
2 | from .network import DenseNet
3 |
--------------------------------------------------------------------------------
/hypothesis/nn/densenet/default.py:
--------------------------------------------------------------------------------
1 | depth = 121
2 | r"""Default DenseNet architecture (depth)."""
3 |
4 | batchnorm = True
5 | r"""Default batch normalization flag in Hypothesis."""
6 |
7 | bottleneck_factor = 4
8 | r"""Default bottleneck-factor in DenseNet architectures."""
9 |
10 | convolution_bias = False
11 | r"""Add biases in convolutions by default in Hypothesis."""
12 |
13 | channels = 3
14 | r"""Default number of data channels (e.g., channels in images)."""
15 |
--------------------------------------------------------------------------------
/hypothesis/nn/densenet/head.py:
--------------------------------------------------------------------------------
1 | import hypothesis
2 | import hypothesis.nn
3 | import hypothesis.nn.densenet
4 | import torch
5 |
6 | from .default import batchnorm as default_batchnorm
7 | from .default import bottleneck_factor as default_bottleneck_factor
8 | from .default import channels as default_channels
9 | from .default import convolution_bias as default_convolution_bias
10 | from .default import depth as default_depth
11 | from hypothesis.nn.densenet.util import load_configuration_121
12 | from hypothesis.nn.densenet.util import load_configuration_161
13 | from hypothesis.nn.densenet.util import load_configuration_169
14 | from hypothesis.nn.densenet.util import load_configuration_201
15 | from hypothesis.nn.densenet.util import load_modules
16 | from hypothesis.nn.util import compute_dimensionality
17 |
18 |
19 |
20 | class DenseNetHead(torch.nn.Module):
21 |
22 | def __init__(self,
23 | shape_xs,
24 | activation=hypothesis.default.activation,
25 | batchnorm=default_batchnorm,
26 | bottleneck_factor=default_bottleneck_factor,
27 | channels=default_channels,
28 | convolution_bias=default_convolution_bias,
29 | depth=default_depth,
30 | dropout=hypothesis.default.dropout):
31 | super(DenseNetHead, self).__init__()
32 | # Infer the dimensionality from the input shape.
33 | self.dimensionality = len(shape_xs)
34 | # Dimensionality and architecture properties.
35 | growth_rate, in_planes, config, modules = self._load_configuration(depth)
36 | self.module_convolution = modules[0]
37 | self.module_batchnorm = modules[1]
38 | self.module_maxpool = modules[2]
39 | self.module_average_pooling = modules[3]
40 | self.module_adaptive_average_pooling = modules[4]
41 | self.module_activation = activation
42 | # Network properties
43 | self.batchnorm = batchnorm
44 | self.channels = channels
45 | self.convolution_bias = convolution_bias
46 | self.in_planes = in_planes
47 | self.shape_xs = shape_xs
48 | # Network structure
49 | self.network_head = self._build_head()
50 | self.network_body = self._build_body(config, bottleneck_factor, dropout, growth_rate)
51 | self.embedding_dim = self._embedding_dimensionality()
52 |
53 | def _build_head(self):
54 | mappings = []
55 |
56 | # Convolution
57 | mappings.append(self.module_convolution(
58 | self.channels,
59 | self.in_planes,
60 | bias=self.convolution_bias,
61 | kernel_size=7,
62 | padding=3,
63 | stride=2))
64 | # Batch normalization
65 | if self.batchnorm:
66 | mappings.append(self.module_batchnorm(self.in_planes))
67 | # Activation
68 | mappings.append(self.module_activation())
69 | # Max pooling
70 | mappings.append(self.module_maxpool(
71 | kernel_size=3,
72 | padding=1,
73 | stride=2))
74 |
75 | return torch.nn.Sequential(*mappings)
76 |
77 | def _build_body(self, config, bottleneck_factor, dropout, growth_rate):
78 | mappings = []
79 | num_features = self.in_planes
80 | for index, num_layers in enumerate(config):
81 | # DenseBlock
82 | mappings.append(DenseBlock(
83 | activation=self.module_activation,
84 | batchnorm=self.batchnorm,
85 | bottleneck_factor=bottleneck_factor,
86 | dimensionality=self.dimensionality,
87 | dropout=dropout,
88 | growth_rate=growth_rate,
89 | num_input_features=num_features,
90 | num_layers=num_layers))
91 | num_features += num_layers * growth_rate
92 | # Transition
93 | if index != len(config) - 1:
94 | mappings.append(self._build_transition(
95 | input_features=num_features,
96 | output_features=num_features // 2))
97 | num_features = num_features // 2
98 | # Batch normalization
99 | if self.batchnorm:
100 | mappings.append(self.module_batchnorm(num_features))
101 | # Activation
102 | mappings.append(self.module_activation())
103 | # Adaptive average pooling
104 | pooling_shape = [1 for _ in range(self.dimensionality)]
105 | mappings.append(self.module_adaptive_average_pooling(pooling_shape))
106 |
107 | return torch.nn.Sequential(*mappings)
108 |
109 | def _build_transition(self, input_features, output_features):
110 | mappings = []
111 |
112 | # Batch normalization
113 | if self.batchnorm:
114 | mappings.append(self.module_batchnorm(input_features))
115 | # Activation
116 | mappings.append(self.module_activation())
117 | # Convolution
118 | mappings.append(self.module_convolution(
119 | input_features,
120 | output_features,
121 | bias=self.convolution_bias,
122 | kernel_size=1,
123 | stride=1))
124 | # Average pooling
125 | mappings.append(self.module_average_pooling(
126 | kernel_size=2,
127 | stride=2))
128 |
129 | return torch.nn.Sequential(*mappings)
130 |
131 | def _embedding_dimensionality(self):
132 | shape = (1, self.channels) + self.shape_xs
133 | with torch.no_grad():
134 | x = torch.randn(shape)
135 | latents = self.network_body(self.network_head(x)).view(-1)
136 | dimensionality = len(latents)
137 |
138 | return dimensionality
139 |
140 | def embedding_dimensionality(self):
141 | return self.embedding_dim
142 |
143 | def forward(self, x):
144 | z = self.network_head(x)
145 | z = self.network_body(z)
146 |
147 | return z.view(-1, self.embedding_dim) # Flatten
148 |
149 | def _load_configuration(self, depth):
150 | modules = load_modules(self.dimensionality)
151 | configurations = {
152 | 121: load_configuration_121,
153 | 161: load_configuration_161,
154 | 169: load_configuration_169,
155 | 201: load_configuration_201}
156 | growth_rate, input_features, config = configurations[depth]()
157 |
158 | return growth_rate, input_features, config, modules
159 |
160 |
161 |
162 | class DenseBlock(torch.nn.Module):
163 |
164 | def __init__(self, dimensionality,
165 | activation,
166 | batchnorm,
167 | bottleneck_factor,
168 | dropout,
169 | growth_rate,
170 | num_input_features,
171 | num_layers):
172 | super(DenseBlock, self).__init__()
173 | # Add the layers to the block.
174 | self.layers = torch.nn.ModuleList()
175 | for index in range(num_layers):
176 | self.layers.append(DenseLayer(
177 | activation=activation,
178 | batchnorm=batchnorm,
179 | bottleneck_factor=bottleneck_factor,
180 | dimensionality=dimensionality,
181 | dropout=dropout,
182 | growth_rate=growth_rate,
183 | num_input_features=num_input_features + index * growth_rate))
184 |
185 | def forward(self, x):
186 | features = [x]
187 | for layer in self.layers:
188 | features.append(layer(features))
189 |
190 | return torch.cat(features, dim=1)
191 |
192 |
193 |
194 | class DenseLayer(torch.nn.Module):
195 |
196 | def __init__(self, dimensionality,
197 | activation,
198 | batchnorm,
199 | bottleneck_factor,
200 | dropout,
201 | growth_rate,
202 | num_input_features):
203 | super(DenseLayer, self).__init__()
204 | # Load the modules depending on the dimensionality
205 | modules = load_modules(dimensionality)
206 | self.module_convolution = modules[0]
207 | self.module_batchnorm = modules[1]
208 | self.module_maxpool = modules[2]
209 | self.module_average_pooling = modules[3]
210 | self.module_activation = activation
211 | # Construct the dense layer
212 | self.network_mapping = self._build_mapping(
213 | batchnorm,
214 | bottleneck_factor,
215 | dropout,
216 | growth_rate,
217 | num_input_features)
218 |
219 | def _build_mapping(self, batchnorm, bottleneck_factor, dropout, growth_rate, num_input_features):
220 | mappings = []
221 |
222 | # Batch normalization
223 | if batchnorm:
224 | mappings.append(self.module_batchnorm(num_input_features))
225 | # Activation
226 | mappings.append(self.module_activation())
227 | # Convolution
228 | mappings.append(self.module_convolution(
229 | num_input_features,
230 | bottleneck_factor * growth_rate,
231 | kernel_size=1,
232 | stride=1,
233 | bias=False))
234 | # Normalization
235 | mappings.append(self.module_batchnorm(bottleneck_factor * growth_rate))
236 | # Activation
237 | mappings.append(self.module_activation())
238 | # Convolution
239 | mappings.append(self.module_convolution(
240 | bottleneck_factor * growth_rate,
241 | growth_rate,
242 | kernel_size=3,
243 | stride=1,
244 | padding=1,
245 | bias=False))
246 | # Dropout
247 | if dropout > 0:
248 | mappings.append(torch.nn.Dropout(p=dropout))
249 |
250 | return torch.nn.Sequential(*mappings)
251 |
252 | def forward(self, x):
253 | z = torch.cat(x, dim=1)
254 |
255 | return self.network_mapping(z)
256 |
--------------------------------------------------------------------------------
/hypothesis/nn/densenet/network.py:
--------------------------------------------------------------------------------
1 | import hypothesis
2 | import hypothesis.nn.densenet
3 | import torch
4 |
5 | from .default import batchnorm as default_batchnorm
6 | from .default import bottleneck_factor as default_bottleneck_factor
7 | from .default import channels as default_channels
8 | from .default import convolution_bias as default_convolution_bias
9 | from .default import depth as default_depth
10 | from hypothesis.nn import MLP
11 | from hypothesis.nn.densenet import DenseNetHead
12 |
13 |
14 |
15 | class DenseNet(torch.nn.Module):
16 |
17 | def __init__(self,
18 | shape_xs,
19 | shape_ys,
20 | activation=hypothesis.default.activation,
21 | batchnorm=default_batchnorm,
22 | bottleneck_factor=default_bottleneck_factor,
23 | channels=default_channels,
24 | convolution_bias=default_convolution_bias,
25 | depth=default_depth,
26 | dropout=hypothesis.default.dropout,
27 | trunk_activation=None,
28 | trunk_dropout=None,
29 | trunk_layers=hypothesis.default.trunk,
30 | transform_output="normalize"):
31 | super(DenseNet, self).__init__()
32 | # Compute the dimensionality of the inputs.
33 | self.dimensionality = len(shape_xs)
34 | # Construct the convolutional DenseNet head.
35 | self.head = DenseNetHead(
36 | activation=activation,
37 | batchnorm=batchnorm,
38 | bottleneck_factor=bottleneck_factor,
39 | channels=channels,
40 | convolution_bias=convolution_bias,
41 | depth=depth,
42 | dropout=dropout,
43 | shape_xs=shape_xs)
44 | # Compute the embedding dimensionality of the head.
45 | embedding_dim = self.head.embedding_dimensionality()
46 | # Check if custom trunk settings have been defined.
47 | if trunk_activation is None:
48 | trunk_activation = activation
49 | if trunk_dropout is None:
50 | trunk_dropout = dropout
51 | # Construct the trunk of the network.
52 | self.trunk = MLP(
53 | shape_xs=(embedding_dim,),
54 | shape_ys=shape_ys,
55 | activation=trunk_activation,
56 | dropout=trunk_dropout,
57 | layers=trunk_layers,
58 | transform_output=transform_output)
59 |
60 | def forward(self, x):
61 | z = self.head(x)
62 | y = self.trunk(z)
63 |
64 | return y
65 |
--------------------------------------------------------------------------------
/hypothesis/nn/densenet/util.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 |
4 |
5 | def load_modules(dimensionality):
6 | configurations = {
7 | 1: load_modules_1_dimensional,
8 | 2: load_modules_2_dimensional,
9 | 3: load_modules_3_dimensional}
10 |
11 | return configurations[dimensionality]()
12 |
13 |
14 | def load_modules_1_dimensional():
15 | c = torch.nn.Conv1d
16 | b = torch.nn.BatchNorm1d
17 | m = torch.nn.MaxPool1d
18 | a = torch.nn.AvgPool1d
19 | ap = torch.nn.AdaptiveAvgPool1d
20 |
21 | return c, b, m, a, ap
22 |
23 |
24 | def load_modules_2_dimensional():
25 | c = torch.nn.Conv2d
26 | b = torch.nn.BatchNorm2d
27 | m = torch.nn.MaxPool2d
28 | a = torch.nn.AvgPool2d
29 | ap = torch.nn.AdaptiveAvgPool2d
30 |
31 | return c, b, m, a, ap
32 |
33 |
34 | def load_modules_3_dimensional():
35 | c = torch.nn.Conv3d
36 | b = torch.nn.BatchNorm3d
37 | m = torch.nn.MaxPool3d
38 | a = torch.nn.AvgPool3d
39 | ap = torch.nn.AdaptiveAvgPool3d
40 |
41 | return c, b, m, a, ap
42 |
43 |
44 | def load_configuration_121():
45 | growth_rate = 32
46 | input_features = 64
47 | config = [6, 12, 24, 16]
48 |
49 | return growth_rate, input_features, config
50 |
51 |
52 | def load_configuration_161():
53 | growth_rate = 48
54 | input_features = 96
55 | config = [6, 12, 36, 24]
56 |
57 | return growth_rate, input_features, config
58 |
59 |
60 | def load_configuration_169():
61 | growth_rate = 32
62 | input_features = 64
63 | config = [6, 12, 32, 32]
64 |
65 | return growth_rate, input_features, config
66 |
67 |
68 | def load_configuration_201():
69 | growth_rate = 32
70 | input_features = 64
71 | config = [6, 12, 48, 32]
72 |
73 | return growth_rate, input_features, config
74 |
--------------------------------------------------------------------------------
/hypothesis/nn/multi_layered_perceptron.py:
--------------------------------------------------------------------------------
1 | r"""Multilayered Perceptron
2 |
3 | """
4 |
5 | import hypothesis
6 | import torch
7 |
8 | from hypothesis.nn.util import allocate_output_transform
9 | from hypothesis.nn.util import compute_dimensionality
10 |
11 |
12 |
13 | class MultiLayeredPerceptron(torch.nn.Module):
14 |
15 | def __init__(self, shape_xs, shape_ys,
16 | activation=hypothesis.default.activation,
17 | dropout=hypothesis.default.dropout,
18 | layers=hypothesis.default.trunk,
19 | transform_output="normalize"):
20 | super(MultiLayeredPerceptron, self).__init__()
21 | mappings = []
22 | dropout = float(dropout)
23 | # Dimensionality properties
24 | self.xs_dimensionality = compute_dimensionality(shape_xs)
25 | self.ys_dimensionality = compute_dimensionality(shape_ys)
26 | # Allocate input mapping
27 | mappings.append(torch.nn.Linear(self.xs_dimensionality, layers[0]))
28 | # Allocate internal network structure
29 | for index in range(1, len(layers)):
30 | mappings.append(self._make_layer(activation, dropout,
31 | layers[index - 1], layers[index]))
32 | # Allocate tail
33 | mappings.append(activation())
34 | mappings.append(torch.nn.Linear(layers[-1], self.ys_dimensionality))
35 | operation = allocate_output_transform(transform_output, self.ys_dimensionality)
36 | if operation is not None:
37 | mappings.append(operation)
38 | # Allocate sequential mapping
39 | self.mapping = torch.nn.Sequential(*mappings)
40 |
41 | def _make_layer(self, activation, dropout, num_a, num_b):
42 | mappings = []
43 |
44 | mappings.append(activation())
45 | if dropout > 0:
46 | mappings.append(torch.nn.Dropout(p=dropout))
47 | mappings.append(torch.nn.Linear(num_a, num_b))
48 |
49 | return torch.nn.Sequential(*mappings)
50 |
51 | def forward(self, xs):
52 | xs = xs.view(-1, self.xs_dimensionality)
53 | y = self.mapping(xs)
54 |
55 | return y
56 |
--------------------------------------------------------------------------------
/hypothesis/nn/neuromodulation/__init__.py:
--------------------------------------------------------------------------------
1 | from .base import BaseNeuromodulatedModule
2 | from .base import allocate_neuromodulated_activation
3 | from .base import list_neuromodulated_modules
4 | from .controller import allocate_default_neuromodulation_controller
5 | from .elu import NeuromodulatedELU
6 | from .relu import NeuromodulatedReLU
7 | from .selu import NeuromodulatedSELU
8 | from .tanh import NeuromodulatedTanh
9 |
--------------------------------------------------------------------------------
/hypothesis/nn/neuromodulation/base.py:
--------------------------------------------------------------------------------
1 | import hypothesis
2 | import torch
3 |
4 | from hypothesis.nn.util import list_modules_with_type
5 |
6 |
7 |
8 | def allocate_neuromodulated_activation(activation, allocator):
9 | class LambdaNeuromodulatedActivation(BaseNeuromodulatedModule):
10 |
11 | def __init__(self):
12 | super(LambdaNeuromodulatedActivation, self).__init__(
13 | controller=allocator(),
14 | activation=activation)
15 |
16 | return LambdaNeuromodulatedActivation
17 |
18 |
19 | def list_neuromodulated_modules(module):
20 | desired_type = BaseNeuromodulatedModule
21 |
22 | return list_modules_with_type(module, desired_type)
23 |
24 |
25 |
26 | class BaseNeuromodulatedModule(torch.nn.Module):
27 |
28 | def __init__(self, controller, activation=hypothesis.default.activation, **kwargs):
29 | super(BaseNeuromodulatedModule, self).__init__()
30 | self.activation = activation(**kwargs)
31 | self.bias = torch.randn(1, 1)
32 | self.controller = controller
33 |
34 | def forward(self, x, context=None):
35 | if context is not None:
36 | self.update(context)
37 |
38 | return self.activation(x + self.bias)
39 |
40 | def update(self, context):
41 | self.bias = self.controller(context)
42 |
--------------------------------------------------------------------------------
/hypothesis/nn/neuromodulation/controller.py:
--------------------------------------------------------------------------------
1 | import hypothesis
2 | import numpy as np
3 | import torch
4 |
5 | from hypothesis.nn import MLP
6 |
7 |
8 |
9 | def allocate_default_neuromodulation_controller(shape_context,
10 | activation=hypothesis.default.activation,
11 | dropout=hypothesis.default.dropout,
12 | layers=hypothesis.default.trunk):
13 | return DefaultNeuromodulationController(
14 | shape_context=shape_context,
15 | activation=activation,
16 | dropout=dropout,
17 | layers=layers)
18 |
19 |
20 |
21 | class DefaultNeuromodulationController(torch.nn.Module):
22 |
23 | def __init__(self, shape_context,
24 | activation=hypothesis.default.activation,
25 | dropout=hypothesis.default.dropout,
26 | layers=hypothesis.default.trunk):
27 | super(DefaultNeuromodulationController, self).__init__()
28 | self.mlp = MLP(
29 | shape_xs=shape_context,
30 | shape_ys=(1,),
31 | activation=activation,
32 | dropout=dropout,
33 | layers=layers,
34 | transform_output=None)
35 |
36 | def forward(self, x):
37 | return self.mlp(x)
38 |
--------------------------------------------------------------------------------
/hypothesis/nn/neuromodulation/elu.py:
--------------------------------------------------------------------------------
1 | import hypothesis
2 | import numpy as np
3 | import torch
4 |
5 | from hypothesis.nn.neuromodulation import BaseNeuromodulatedModule
6 |
7 |
8 |
9 | class NeuromodulatedELU(BaseNeuromodulatedModule):
10 |
11 | def __init__(self, controller, inplace=False):
12 | super(NeuromodulatedELU, self).__init__(
13 | controller=controller,
14 | activation=torch.nn.ELU,
15 | **{"inplace": inplace})
16 |
--------------------------------------------------------------------------------
/hypothesis/nn/neuromodulation/relu.py:
--------------------------------------------------------------------------------
1 | import hypothesis
2 | import numpy as np
3 | import torch
4 |
5 | from hypothesis.nn.neuromodulation import BaseNeuromodulatedModule
6 |
7 |
8 |
9 | class NeuromodulatedReLU(BaseNeuromodulatedModule):
10 |
11 | def __init__(self, controller, inplace=False):
12 | super(NeuromodulatedReLU, self).__init__(
13 | controller=controller,
14 | activation=torch.nn.ReLU,
15 | **{"inplace": inplace})
16 |
--------------------------------------------------------------------------------
/hypothesis/nn/neuromodulation/selu.py:
--------------------------------------------------------------------------------
1 | import hypothesis
2 | import numpy as np
3 | import torch
4 |
5 | from hypothesis.nn.neuromodulation import BaseNeuromodulatedModule
6 |
7 |
8 |
9 | class NeuromodulatedSELU(BaseNeuromodulatedModule):
10 |
11 | def __init__(self, controller, inplace=False):
12 | super(NeuromodulatedSELU, self).__init__(
13 | controller=controller,
14 | activation=torch.nn.SELU,
15 | **{"inplace": inplace})
16 |
--------------------------------------------------------------------------------
/hypothesis/nn/neuromodulation/tanh.py:
--------------------------------------------------------------------------------
1 | import hypothesis
2 | import numpy as np
3 | import torch
4 |
5 | from hypothesis.nn.neuromodulation import BaseNeuromodulatedModule
6 |
7 |
8 |
9 | class NeuromodulatedTanh(BaseNeuromodulatedModule):
10 |
11 | def __init__(self, controller):
12 | super(NeuromodulatedTanh, self).__init__(
13 | controller=controller,
14 | activation=torch.nn.Tanh)
15 |
--------------------------------------------------------------------------------
/hypothesis/nn/resnet/__init__.py:
--------------------------------------------------------------------------------
1 | from .head import ResNetHead
2 | from .network import ResNet
3 |
--------------------------------------------------------------------------------
/hypothesis/nn/resnet/default.py:
--------------------------------------------------------------------------------
1 | depth = 18
2 | r"""Default ResNet architecture (depth)."""
3 |
4 | batchnorm = True
5 | r"""Default batch normalization flag in Hypothesis."""
6 |
7 | convolution_bias = False
8 | r"""Add biases in convolutions by default in Hypothesis."""
9 |
10 | channels = 3
11 | r"""Default number of data channels (e.g., channels in images)."""
12 |
13 | dilate = False
14 | r"""Default usage of dilated convolutions."""
15 |
16 | in_planes = 64
17 | r"""Default number of input feature maps."""
18 |
19 | width_per_group = 64
20 | r"""Default group width."""
21 |
22 | groups = 1
23 | r"""Default number of concurrent convolutional groups."""
24 |
--------------------------------------------------------------------------------
/hypothesis/nn/resnet/network.py:
--------------------------------------------------------------------------------
1 | import hypothesis
2 | import hypothesis.nn.resnet
3 | import torch
4 |
5 | from .default import batchnorm as default_batchnorm
6 | from .default import channels as default_channels
7 | from .default import convolution_bias as default_convolution_bias
8 | from .default import depth as default_depth
9 | from .default import dilate as default_dilate
10 | from .default import groups as default_groups
11 | from .default import in_planes as default_in_planes
12 | from .default import width_per_group as default_width_per_group
13 | from hypothesis.nn import MLP
14 | from hypothesis.nn.resnet import ResNetHead
15 |
16 |
17 |
18 | class ResNet(torch.nn.Module):
19 |
20 | def __init__(self,
21 | shape_xs,
22 | shape_ys,
23 | activation=hypothesis.default.activation,
24 | batchnorm=default_batchnorm,
25 | channels=default_channels,
26 | convolution_bias=default_convolution_bias,
27 | depth=default_depth,
28 | dilate=default_dilate,
29 | groups=default_groups,
30 | in_planes=default_in_planes,
31 | width_per_group=default_width_per_group,
32 | trunk_activation=None,
33 | trunk_dropout=hypothesis.default.dropout,
34 | trunk_layers=hypothesis.default.trunk,
35 | transform_output="normalize"):
36 | super(ResNet, self).__init__()
37 | # Compute the dimensionality of the inputs.
38 | self.dimensionality = len(shape_xs)
39 | # Construct the convolutional ResNet head.
40 | self.head = ResNetHead(
41 | activation=hypothesis.default.activation,
42 | batchnorm=batchnorm,
43 | channels=channels,
44 | convolution_bias=convolution_bias,
45 | depth=depth,
46 | dilate=dilate,
47 | groups=groups,
48 | in_planes=in_planes,
49 | shape_xs=shape_xs,
50 | width_per_group=width_per_group)
51 | # Check if custom trunk settings have been defined.
52 | if trunk_activation is None:
53 | trunk_activation = activation
54 | # Construct the trunk of the network.
55 | self.trunk = MLP(
56 | shape_xs=(self.head.embedding_dimensionality(),),
57 | shape_ys=shape_ys,
58 | activation=trunk_activation,
59 | dropout=trunk_dropout,
60 | layers=trunk_layers,
61 | transform_output=transform_output)
62 |
63 | def forward(self, x):
64 | z = self.head(x)
65 | y = self.trunk(z)
66 |
67 | return y
68 |
--------------------------------------------------------------------------------
/hypothesis/nn/resnet/util.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 |
4 |
5 | def load_modules(dimensionality):
6 | configurations = {
7 | 1: load_modules_1_dimensional,
8 | 2: load_modules_2_dimensional,
9 | 3: load_modules_3_dimensional}
10 |
11 | return configurations[dimensionality]()
12 |
13 |
14 | def load_modules_1_dimensional():
15 | c = torch.nn.Conv1d
16 | b = torch.nn.BatchNorm1d
17 | m = torch.nn.MaxPool1d
18 | a = torch.nn.AdaptiveAvgPool1d
19 |
20 | return c, b, m, a
21 |
22 |
23 | def load_modules_2_dimensional():
24 | c = torch.nn.Conv2d
25 | b = torch.nn.BatchNorm2d
26 | m = torch.nn.MaxPool2d
27 | a = torch.nn.AdaptiveAvgPool2d
28 |
29 | return c, b, m, a
30 |
31 |
32 | def load_modules_3_dimensional():
33 | c = torch.nn.Conv3d
34 | b = torch.nn.BatchNorm3d
35 | m = torch.nn.MaxPool3d
36 | a = torch.nn.AdaptiveAvgPool3d
37 |
38 | return c, b, m, a
39 |
--------------------------------------------------------------------------------
/hypothesis/nn/util.py:
--------------------------------------------------------------------------------
1 | r"""Utilities for hypothesis.nn."""
2 |
3 | import hypothesis
4 | import numpy as np
5 | import torch
6 |
7 | from hypothesis.util import is_iterable
8 |
9 |
10 |
11 | def allocate_output_transform(transformation, shape):
12 | mapping = None
13 | if is_iterable(shape):
14 | dimensionality = compute_dimensionality(shape)
15 | else:
16 | dimensionality = shape
17 | if transformation is "normalize":
18 | if dimensionality > 1:
19 | mapping = torch.nn.Softmax(dim=0)
20 | else:
21 | mapping = torch.nn.Sigmoid()
22 | elif transformation is not None:
23 | mapping = transformation()
24 |
25 | return mapping
26 |
27 |
28 | def compute_dimensionality(shape):
29 | dimensionality = 1
30 | for dim in shape:
31 | dimensionality *= dim
32 |
33 | return dimensionality
34 |
35 |
36 | def list_modules_with_type(module, type):
37 | selected_modules = []
38 | for m in list(module.modules()):
39 | if isinstance(m, type):
40 | selected_modules.append(m)
41 |
42 | return selected_modules
43 |
--------------------------------------------------------------------------------
/hypothesis/rl/__init__.py:
--------------------------------------------------------------------------------
1 | from .environment import Environment
2 | from .experience_replay_buffer import ExperienceReplayBuffer
3 |
--------------------------------------------------------------------------------
/hypothesis/rl/dqv.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/montefiore-institute/hypothesis/29a2b7b4649db345d43a8d3bf98aa5d817b43f1b/hypothesis/rl/dqv.py
--------------------------------------------------------------------------------
/hypothesis/rl/environment.py:
--------------------------------------------------------------------------------
1 | class Environment:
2 |
3 | def step(self, action):
4 | raise NotImplementedError
5 |
6 | def reset(self):
7 | raise NotImplementedError
8 |
--------------------------------------------------------------------------------
/hypothesis/rl/experience_replay_buffer.py:
--------------------------------------------------------------------------------
1 | import hypothesis
2 | import numpy as np
3 | import torch
4 |
5 | from torch.utils.data import Dataset
6 |
7 |
8 |
9 | class ExperienceReplayBuffer(Dataset):
10 |
11 | def __init__(self, num_stores=5):
12 | # Check if a valid number of stores has been specified
13 | if num_stores <= 0:
14 | raise ValueError("A valid number of data storages (> 0) needs to be specified.")
15 | # Replay buffer state
16 | self.num_storages = num_stores
17 | self.storages = [[] for _ in range(self.num_storages)]
18 |
19 | @torch.no_grad()
20 | def _retrieve(self, index):
21 | elements = []
22 | for storage_index in range(self.num_storages):
23 | elements.append(self.storages[storage_index][index].unsqueeze(dim=0))
24 |
25 | return tuple(elements)
26 |
27 | @torch.no_grad()
28 | def put(self, transition):
29 | for index in range(self.num_storages):
30 | self.storages[index].append(transition[index].squeeze().cpu())
31 |
32 | def size(self):
33 | return len(self.storages[0])
34 |
35 | @torch.no_grad()
36 | def sample(self, batch_size=hypothesis.default.batch_size):
37 | indices = np.random.randint(0, self.size(), size=batch_size)
38 | tensors = [[] for _ in range(self.num_storages)]
39 | experiences = [self._retrieve(index) for index in indices]
40 | for experience in experiences:
41 | for storage_index, element in enumerate(experience):
42 | tensors[storage_index].append(element)
43 | for index in range(self.num_storages):
44 | tensors[index] = torch.cat(tensors[index], dim=0)
45 |
46 | return tuple(tensors)
47 |
48 | def __getitem__(self, index):
49 | return self._retrieve(index)
50 |
51 | def __len__(self):
52 | return self.size()
53 |
--------------------------------------------------------------------------------
/hypothesis/rl/reinforce.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/montefiore-institute/hypothesis/29a2b7b4649db345d43a8d3bf98aa5d817b43f1b/hypothesis/rl/reinforce.py
--------------------------------------------------------------------------------
/hypothesis/simulation/__init__.py:
--------------------------------------------------------------------------------
1 | r"""``hypothesis.simulation`` is a package consisting of the base simulator
2 | architecture and utilities to perform efficient simulations. Every forward
3 | model needs to be wrapped in a class which inherits from
4 | ``hypothesis.simulation.Simulator``.
5 | """
6 |
7 | from .base import Simulator
8 | from .base import ParallelSimulator
9 |
--------------------------------------------------------------------------------
/hypothesis/simulation/base.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 | from multiprocessing import Pool
4 |
5 |
6 |
7 | class Simulator(torch.nn.Module):
8 | r"""Base simulator class.
9 |
10 | A simulator defines the forward model.
11 |
12 | Example usage of a potential simulator implementation::
13 |
14 | simulator = MySimulator()
15 | inputs = prior.sample(torch.Size([10])) # Draw 10 samples from the prior.
16 | outputs = simulator(inputs)
17 | """
18 |
19 | def __init__(self):
20 | super(Simulator, self).__init__()
21 |
22 | def forward(self, inputs):
23 | r"""Defines the computation of the forward model at every call.
24 |
25 | Note:
26 | Should be overridden by all subclasses.
27 | """
28 | raise NotImplementedError
29 |
30 | def __del__(self):
31 | self.terminate()
32 |
33 | def terminate(self):
34 | r"""Terminates the simulator and cleans up possible contexts.
35 |
36 | Note:
37 | Should be overridden by subclasses with a simulator state requiring graceful exits.
38 | Note:
39 | Subclasses should describe the expected format of ``inputs``.
40 | """
41 | pass
42 |
43 |
44 |
45 | class ParallelSimulator(Simulator):
46 |
47 | def __init__(self, simulator, workers=2):
48 | super(ParallelSimulator, self).__init__()
49 | self.simulator = simulator
50 | self.workers = workers
51 |
52 | @torch.no_grad()
53 | def _prepare_arguments(self, **kwargs):
54 | arguments = []
55 |
56 | # Determine the number of chunks
57 | rows = kwargs[list(kwargs.keys())[0]].shape[0]
58 | chunk_size = rows // self.workers
59 | if chunk_size == 0:
60 | chunk_size = 1
61 | for base in range(0, rows, chunk_size):
62 | argument = {}
63 | for k, v in kwargs.items():
64 | argument[k] = v[base:base + chunk_size]
65 | arguments.append((self.simulator, argument))
66 |
67 | return arguments
68 |
69 | @torch.no_grad()
70 | def forward(self, **kwargs):
71 | pool = Pool(processes=self.workers)
72 | arguments = self._prepare_arguments(**kwargs)
73 | outputs = pool.map(self._simulate, arguments)
74 | pool.close()
75 | pool.join()
76 | del pool
77 |
78 | return torch.cat(outputs, dim=0)
79 |
80 | @staticmethod
81 | def _simulate(arguments):
82 | simulator, kwargs = arguments
83 |
84 | return simulator(**kwargs)
85 |
--------------------------------------------------------------------------------
/hypothesis/simulation/util.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 |
4 |
5 | def sample_joint(simulator, prior, n=1):
6 | r""""""
7 | inputs = prior.sample(torch.Size([n])).view(n, -1)
8 | outputs = simulator(inputs)
9 |
10 | return inputs, outputs
11 |
12 |
13 |
14 | def joint_sampler(simulator, prior):
15 | r""""""
16 | yield sample_joint(simulator, prior, n=1)
17 |
18 |
19 |
20 | def sample_marginal(simulator, prior, n=1):
21 | r""""""
22 | _, outputs = sample_joint(simulator, prior, n=n)
23 |
24 | return outputs
25 |
26 |
27 |
28 | def marginal_sampler(simulator, prior):
29 | r""""""
30 | yield sample_marginal(simulator, prior, n=1)
31 |
32 |
33 |
34 | def sample_likelihood(simulator, input, n=1):
35 | r""""""
36 | inputs = input.view(1, -1).repeat(n, 1)
37 | outputs = simulator(inputs)
38 |
39 | return outputs
40 |
41 |
42 |
43 | def likelihood_sampler(simulator, input):
44 | r""""""
45 | yield sample_likelihood(simulator, input, n=1)
46 |
--------------------------------------------------------------------------------
/hypothesis/stat/__init__.py:
--------------------------------------------------------------------------------
1 | from .constraint import confidence_level
2 | from .constraint import highest_density_level
3 | from .constraint import likelihood_ratio_test_statistic
4 |
--------------------------------------------------------------------------------
/hypothesis/stat/constraint.py:
--------------------------------------------------------------------------------
1 | import math
2 | import numpy as np
3 | import torch
4 |
5 | from scipy.stats import chi2
6 |
7 |
8 |
9 | @torch.no_grad()
10 | def highest_density_region(pdf, alpha, bias=0.0, min_epsilon=10e-17):
11 | _, mask = highest_density_level(pdf, alpha, bias=bias, min_epsilon=min_epsilon, region=True)
12 |
13 | return mask
14 |
15 |
16 | @torch.no_grad()
17 | def highest_density_level(pdf, alpha, bias=0.0, min_epsilon=10e-17, region=False):
18 | # Check if a proper bias has been specified.
19 | if bias >= alpha:
20 | raise ValueError("The bias cannot be larger or equal to the specified alpha level.")
21 | # Detect numpy type
22 | if type(pdf).__module__ != np.__name__:
23 | pdf = pdf.cpu().clone().numpy()
24 | else:
25 | pdf = np.array(pdf)
26 | total_pdf = pdf.sum()
27 | pdf /= total_pdf
28 | # Compute highest density level and the corresponding mask
29 | n = len(pdf)
30 | optimal_level = pdf.max().item()
31 | epsilon = 10e-02
32 | while epsilon >= min_epsilon:
33 | area = float(0)
34 | while area <= (alpha + bias):
35 | # Compute the integral
36 | m = (pdf >= optimal_level).astype(np.float32)
37 | area = np.sum(m * pdf)
38 | # Compute the error and apply gradient descent
39 | optimal_level -= epsilon
40 | optimal_level += 2 * epsilon
41 | epsilon /= 10
42 | optimal_level *= total_pdf
43 | if region:
44 | return optimal_level, torch.from_numpy(m)
45 | else:
46 | return optimal_level
47 |
48 |
49 | @torch.no_grad()
50 | def confidence_level(log_ratios, dof=None, level=0.95):
51 | if dof is None:
52 | dof = log_ratios.dim() - 1
53 | test_statistic = likelihood_ratio_test_statistic(log_ratios)
54 | level = chi2.isf(1 - level, df=dof)
55 |
56 | return test_statistic, level
57 |
58 |
59 | @torch.no_grad()
60 | def likelihood_ratio_test_statistic(log_ratios):
61 | max_ratio = log_ratios[log_ratios.argmax()]
62 | test_statistic = -2 * (log_ratios - max_ratio)
63 | test_statistic -= test_statistic.min()
64 |
65 | return test_statistic
66 |
--------------------------------------------------------------------------------
/hypothesis/summary/__init__.py:
--------------------------------------------------------------------------------
1 | from .mcmc import Chain
2 | from .train import TrainingSummary
3 |
--------------------------------------------------------------------------------
/hypothesis/summary/mcmc.py:
--------------------------------------------------------------------------------
1 | r"""Summary objects and statistics for Markov chain Monte Carlo methods."""
2 |
3 | import numpy as np
4 | import torch
5 | import warnings
6 |
7 |
8 |
9 | class Chain:
10 | r"""Summary of a Markov chain produced by an MCMC sampler."""
11 |
12 | def __init__(self, samples, acceptance_probabilities, acceptances):
13 | self.acceptance_probabilities = acceptance_probabilities
14 | self.acceptances = acceptances
15 | self.samples = samples.cpu()
16 | self.shape = samples.shape
17 |
18 | def mean(self, parameter_index=None):
19 | with torch.no_grad():
20 | mean = self.samples[:, parameter_index].mean(dim=0).squeeze()
21 |
22 | return mean
23 |
24 | def std(self, parameter_index=None):
25 | with torch.no_grad():
26 | std = self.samples[:, parameter_index].std(dim=0).squeeze()
27 |
28 | return std
29 |
30 | def variance(self, parameter_index=None):
31 | with torch.no_grad():
32 | variance = self.std(parameter_index) ** 2
33 |
34 | return variance
35 |
36 | def monte_carlo_error(self):
37 | with torch.no_grad():
38 | mc_error = (self.variance() / self.effective_size()).sqrt()
39 |
40 | return mc_error
41 |
42 | def size(self):
43 | return len(self.samples)
44 |
45 | def min(self):
46 | return self.samples.min(dim=0)
47 |
48 | def max(self):
49 | return self.samples.max(dim=0)
50 |
51 | def dimensionality(self):
52 | return self.samples.shape[1:][0]
53 |
54 | def autocorrelation(self, lag):
55 | return self.autocorrelations()[lag]
56 |
57 | def autocorrelations(self):
58 | with warnings.catch_warnings():
59 | warnings.simplefilter("ignore")
60 | samples = self.samples.numpy()
61 | samples = np.atleast_1d(samples)
62 | axis = 0
63 | m = [slice(None), ] * len(samples.shape)
64 | n = samples.shape[axis]
65 | f = np.fft.fft(samples - np.mean(samples, axis=axis), n=2 * n, axis=axis)
66 | m[axis] = slice(0, n)
67 | samples = np.fft.ifft(f * np.conjugate(f), axis=axis)[m].real
68 | m[axis] = 0
69 | acf = samples / samples[m]
70 |
71 | return torch.from_numpy(acf).float()
72 |
73 | def integrated_autocorrelation(self, max_lag=None):
74 | autocorrelations = self.autocorrelations()
75 | integrated_autocorrelation = 0.
76 | if max_lag is None:
77 | max_lag = self.size()
78 | a_0 = autocorrelations[0]
79 | for index in range(max_lag):
80 | integrated_autocorrelation += autocorrelations[index]
81 |
82 | return integrated_autocorrelation
83 |
84 | def integrated_autocorrelations(self, interval=1, max_lag=None):
85 | autocorrelations = self.autocorrelations()
86 | integrated_autocorrelation = 0.
87 | integrated_autocorrelations = []
88 | if max_lag is None:
89 | max_lag = self.size()
90 | a_0 = autocorrelations[0]
91 | for index in range(max_lag):
92 | integrated_autocorrelation += autocorrelations[index]
93 | if index % interval == 0:
94 | integrated_autocorrelations.append(integrated_autocorrelation)
95 |
96 | return integrated_autocorrelations
97 |
98 | def effective_size(self):
99 | acf = self.autocorrelations()
100 | M = 0
101 | size = self.size()
102 | a_0 = acf[0]
103 | for lag in range(size):
104 | a = acf[lag]
105 | p = a / a_0
106 | if p <= 0:
107 | M = lag - 1
108 | break
109 | tau = self.integrated_autocorrelation(M)
110 | if tau == 0:
111 | tau = 1
112 | effective_size = size / tau
113 |
114 | return int(abs(effective_size))
115 |
116 | def efficiency(self):
117 | return self.effective_size() / self.size()
118 |
119 | def thin(self, proportion=None, num_samples=None):
120 | if proportion is None:
121 | proportion = self.efficiency()
122 | indices = np.arange(self.size())
123 | if num_samples is not None:
124 | num_samples = num_samples
125 | else:
126 | num_samples = int(proportion * self.size())
127 | sampled_indices = np.random.choice(indices, size=num_samples)
128 | samples = self.samples[sampled_indices]
129 |
130 | return Chain(samples, None, None)
131 |
132 | def is_thinned(self):
133 | return self.acceptance_probabilities is None or self.acceptances is None
134 |
135 | def __getitem__(self, pattern):
136 | return self.samples[pattern]
137 |
138 | def __len__(self):
139 | return self.size()
140 |
--------------------------------------------------------------------------------
/hypothesis/summary/train.py:
--------------------------------------------------------------------------------
1 | r"""Summary objects at the end of training procedures."""
2 |
3 | import numpy as np
4 | import pickle
5 | import torch
6 |
7 |
8 |
9 | class TrainingSummary:
10 |
11 | def __init__(self,
12 | model_best,
13 | model_final,
14 | epochs,
15 | epoch_best,
16 | losses_train,
17 | losses_test=None,
18 | identifier=None):
19 | self.identifier = identifier
20 | self.epochs = epochs
21 | self.model_best = model_best
22 | self.model_final = model_final
23 | self.epoch_best = epoch_best
24 | self.losses_train = losses_train
25 | self.losses_test = losses_test
26 |
27 | def save(self, path):
28 | summary = {
29 | "identifier": self.identifier,
30 | "best_model": self.model_best,
31 | "final_model": self.model_final,
32 | "epochs": self.epochs,
33 | "best_epoch": self.epoch_best,
34 | "training_losses": self.losses_train,
35 | "testing_losses": self.losses_test}
36 | torch.save(summary, path)
37 |
38 | def load(self, path):
39 | summary = torch.load(path)
40 | self.identifier = summary["identifier"]
41 | self.model_best = summary["best_model"]
42 | self.model_final = summary["final_model"]
43 | self.epochs = summary["epochs"]
44 | self.epoch_best = summary["best_epoch"]
45 | self.losses_train = summary["training_losses"]
46 | self.losses_test = summary["testing_losses"]
47 |
48 | def test_losses_available(self):
49 | return self.losses_test is not None and len(self.losses_test) > 0
50 |
51 | def identifier_available(self):
52 | return self.identifier is not None
53 |
54 | def num_epochs(self):
55 | return self.epochs
56 |
57 | def best_epoch(self):
58 | return self.epoch_best
59 |
60 | def best_model(self):
61 | return self.model_best
62 |
63 | def final_model(self):
64 | return self.model_final
65 |
66 | def test_losses(self, log=False):
67 | if log:
68 | losses = np.log(self.losses_test)
69 | else:
70 | losses = self.losses_test
71 |
72 | return losses
73 |
74 | def train_losses(self, log=False):
75 | if log:
76 | losses = np.log(self.losses_train)
77 | else:
78 | losses = self.losses_train
79 |
80 | return losses
81 |
82 | def __str__(self):
83 | representation = ""
84 | if self.identifier_available():
85 | representation = "Identifier:\t\t{}\n".format(self.identifier)
86 | representation = representation + "Total epochs:\t\t{}\n".format(self.epochs) + \
87 | "Best training loss:\t{}\n".format(self.losses_train.min()) + \
88 | "Final training loss:\t{}".format(self.losses_train[-1])
89 | if self.test_losses_available():
90 | representation = representation + \
91 | "\nBest testing loss:\t{}\n".format(self.losses_test.min()) + \
92 | "Best test epoch:\t{}\n".format(self.epoch_best) + \
93 | "Final test loss:\t{}".format(self.losses_test[-1])
94 |
95 | return representation
96 |
--------------------------------------------------------------------------------
/hypothesis/util/__init__.py:
--------------------------------------------------------------------------------
1 | r""""""
2 |
3 | from hypothesis.util.loss import load_and_stack_losses
4 | from hypothesis.util.general import *
5 |
--------------------------------------------------------------------------------
/hypothesis/util/data/__init__.py:
--------------------------------------------------------------------------------
1 | r""""""
2 |
3 |
4 |
5 | from hypothesis.util.data.distribution_dataset import DistributionDataset
6 | from hypothesis.util.data.simulation_tensor_dataset import SimulationTensorDataset
7 | from hypothesis.util.data.simulator_dataset import SimulatorDataset
8 |
--------------------------------------------------------------------------------
/hypothesis/util/data/distribution_dataset.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 | from torch.utils.data import Dataset
4 |
5 |
6 |
7 | class DistributionDataset(Dataset):
8 | r""""""
9 |
10 | def __init__(self, distribution, size=1000000):
11 | self.distribution = distribution
12 | self.size = size
13 |
14 | def __getitem__(self, index):
15 | return self.distribution.sample()
16 |
17 | def __len__(self):
18 | return self.size
19 |
--------------------------------------------------------------------------------
/hypothesis/util/data/numpy/__init__.py:
--------------------------------------------------------------------------------
1 | from .storage import InMemoryStorage
2 | from .storage import PersistentStorage
3 | from .simulation_dataset import SimulationDataset
4 | from .util import compute_final_shape
5 | from .util import merge
6 | from .dataset import Dataset
7 |
--------------------------------------------------------------------------------
/hypothesis/util/data/numpy/dataset.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import os
3 | import torch
4 |
5 | from hypothesis.util.data.numpy import InMemoryStorage
6 | from hypothesis.util.data.numpy import PersistentStorage
7 | from torch.utils.data import Dataset as BaseDataset
8 |
9 |
10 |
11 | class Dataset(BaseDataset):
12 | r""""""
13 |
14 | def __init__(self, *paths, in_memory=True):
15 | super(Dataset, self).__init__()
16 | if in_memory:
17 | storage_type = InMemoryStorage
18 | else:
19 | storage_type = PersistentStorage
20 | if len(paths) > 1:
21 | self.storages = [storage_type(path) for path in paths]
22 | self.retriever = self._retrieve_multi_storage
23 | else:
24 | self.storage = storage_type(paths[0])
25 | self.retriever = self._retrieve_single_storage
26 | self.storages = [self.storage]
27 |
28 | def _retrieve_multi_storage(self, index):
29 | return tuple(storage[index].unsqueeze(0) for storage in self.storages)
30 |
31 | def _retrieve_single_storage(self, index):
32 | return self.storage[index]
33 |
34 | def __getitem__(self, index):
35 | return self.retriever(index)
36 |
37 | def __del__(self):
38 | if hasattr(self, "storages"):
39 | for index in range(len(self.storages)):
40 | storage = self.storages[index]
41 | storage.close()
42 | del storage
43 | self.storages[index] = None
44 | self.storages = None
45 |
46 | def __len__(self):
47 | return len(self.storages[0])
48 |
--------------------------------------------------------------------------------
/hypothesis/util/data/numpy/simulation_dataset.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import os
3 | import torch
4 |
5 | from torch.utils.data import Dataset
6 | from hypothesis.util.data.numpy import InMemoryStorage
7 | from hypothesis.util.data.numpy import PersistentStorage
8 |
9 |
10 |
11 | class SimulationDataset(Dataset):
12 | r""""""
13 |
14 | def __init__(self, inputs, outputs, in_memory=False):
15 | super(SimulationDataset, self).__init__()
16 | if in_memory:
17 | self.storage_inputs = InMemoryStorage(inputs)
18 | self.storage_outputs = InMemoryStorage(outputs)
19 | else:
20 | self.storage_inputs = PersistentStorage(inputs)
21 | self.storage_outputs = PersistentStorage(outputs)
22 |
23 | def __len__(self):
24 | return len(self.storage_inputs)
25 |
26 | def __del__(self):
27 | r""""""
28 | if hasattr(self, "storage_inputs") and self.storage_inputs is not None:
29 | self.storage_inputs.close()
30 | self.storage_outputs.close()
31 |
32 | def __getitem__(self, index):
33 | r""""""
34 | inputs = self.storage_inputs[index]
35 | outputs = self.storage_outputs[index]
36 |
37 | return inputs, outputs
38 |
--------------------------------------------------------------------------------
/hypothesis/util/data/numpy/storage.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import os
3 | import torch
4 |
5 |
6 | class BaseStorage:
7 |
8 | def close(self):
9 | raise NotImplementedError
10 |
11 | def __len__(self):
12 | raise NotImplementedError
13 |
14 | def __getitem__(self):
15 | raise NotImplementedError
16 |
17 | def __del__(self):
18 | self.close()
19 |
20 |
21 | class InMemoryStorage(BaseStorage):
22 |
23 | def __init__(self, path):
24 | super(InMemoryStorage, self).__init__()
25 | # Check if the specified path exists.
26 | if path is None or not os.path.exists(path):
27 | raise ValueError("The path", path, "does not exists.")
28 | # Storage properties.
29 | self.path = path
30 | self.data = np.load(path)
31 |
32 | def close(self):
33 | if hasattr(self, "data"):
34 | del self.data
35 | self.data = None
36 |
37 | def __len__(self):
38 | return len(self.data)
39 |
40 | def __getitem__(self, index):
41 | return torch.from_numpy(self.data[index])
42 |
43 |
44 | class PersistentStorage(BaseStorage):
45 |
46 | def __init__(self, path):
47 | super(PersistentStorage, self).__init__()
48 | # Check if the specified path exists.
49 | if path is None or not os.path.exists(path):
50 | raise ValueError("The path", path, "does not exists.")
51 | # Storage properties.
52 | self.path = path
53 | self.fd = open(self.path, "rb")
54 | self.header, self.offset = self._parse_header(self.fd)
55 | self.fd.close()
56 | self.fd = None
57 | self.data_shape = self.header["shape"][1:]
58 | self.data_type = self.header["descr"]
59 | self.data_dimensionality = self._compute_dimensionality(self.data_shape)
60 | self.data_bytes = int(self.data_type[-1]) * self.data_dimensionality
61 | self.size = self.header["shape"][0]
62 |
63 | def _retrieve(self, index):
64 | if self.fd is None:
65 | self.fd = open(self.path, "rb")
66 | self.fd.seek(self.offset + index * self.data_bytes)
67 | data = np.fromfile(self.fd, dtype=self.data_type, count=self.data_dimensionality)
68 |
69 | return data.reshape(self.data_shape)
70 |
71 | def close(self):
72 | if hasattr(self, "fd") and self.fd is not None:
73 | self.fd.close()
74 | self.fd = None
75 |
76 | def __getitem__(self, index):
77 | return self._retrieve(index)
78 | item = torch.from_numpy(data)
79 |
80 | return item
81 |
82 | def __len__(self):
83 | return self.size
84 |
85 | @staticmethod
86 | def _compute_dimensionality(shape):
87 | dimensionality = 1
88 | for size in shape:
89 | dimensionality *= size
90 |
91 | return dimensionality
92 |
93 | @staticmethod
94 | def _parse_header(fd):
95 | r"""
96 | Parses the ``numpy`` header of the specified file descriptor.
97 |
98 | Note:
99 | * The first 6 bytes are a magic string: exactly \x93NUMPY.
100 | * The next 1 byte is an unsigned byte: the major version number of the file format, e.g. \x01.
101 | * The next 1 byte is an unsigned byte: the minor version number of the file format, e.g. \x00. Note: the version of the file format is not tied to the version of the numpy package.
102 | * The next 2 bytes form a little-endian unsigned short int: the length of the header data HEADER_LEN.
103 | """
104 | prefix = fd.read(10) # Read fixed header.
105 | header_offset = int.from_bytes(prefix[-2:], byteorder="little")
106 | header = eval(fd.read(header_offset)) # Not very secure but whatever.
107 | header_offset += 10
108 |
109 | return header, header_offset
110 |
--------------------------------------------------------------------------------
/hypothesis/util/data/numpy/util.py:
--------------------------------------------------------------------------------
1 | import glob
2 | import numpy as np
3 | import os
4 | import tempfile as temp
5 | import torch
6 |
7 |
8 |
9 | def compute_final_shape(file_names, axis=0):
10 | num_files = len(file_names)
11 | shape = list(np.load(file_names[0]).shape)
12 | shape[axis] *= num_files
13 |
14 | return tuple(shape)
15 |
16 |
17 | def merge(input_files, output_file, tempfile=None, dtype=None, in_memory=False, axis=0):
18 | # Compute the shape of the final data file.
19 | shape = compute_final_shape(input_files, axis=axis)
20 | # Check if a dtype needs to be derived.
21 | if dtype is None:
22 | dtype = np.load(input_files[0]).dtype
23 | if in_memory:
24 | merge_in_memory(input_files, output_file, shape=shape, dtype=dtype, axis=axis)
25 | else:
26 | merge_on_disk(input_files, output_file, shape=shape, dtype=dtype, axis=axis, tempfile=tempfile)
27 |
28 |
29 | def merge_in_memory(input_files, output_file, shape, dtype=None, axis=0):
30 | datamap = np.zeros(shape, dtype=dtype)
31 | insert_data(input_files, datamap, axis=axis)
32 | np.save(output_file, datamap)
33 |
34 |
35 | def merge_on_disk(input_files, output_file, shape, dtype=None, axis=0, tempfile=None):
36 | # Check if a random temporary file needs to be allocated.
37 | if tempfile is None:
38 | _, tempfile = temp.mkstemp(dir='.')
39 | datamap = np.memmap(tempfile, dtype=dtype, mode="w+", shape=shape)
40 | insert_data(input_files, datamap, axis=axis)
41 | np.save(output_file, datamap)
42 | os.remove(tempfile)
43 |
44 |
45 | def insert_data(input_files, datamap, axis=0):
46 | index = 0
47 | if axis > 0:
48 | datamap = np.rollaxis(datamap, axis)
49 | for file_name in input_files:
50 | data = np.load(file_name)
51 | num_rows = data.shape[0]
52 | datamap[index:index + num_rows, :] = data
53 | index += num_rows
54 |
--------------------------------------------------------------------------------
/hypothesis/util/data/pandas/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/montefiore-institute/hypothesis/29a2b7b4649db345d43a8d3bf98aa5d817b43f1b/hypothesis/util/data/pandas/__init__.py
--------------------------------------------------------------------------------
/hypothesis/util/data/pandas/simulation_dataset.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/montefiore-institute/hypothesis/29a2b7b4649db345d43a8d3bf98aa5d817b43f1b/hypothesis/util/data/pandas/simulation_dataset.py
--------------------------------------------------------------------------------
/hypothesis/util/data/simulation_tensor_dataset.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 | from torch.utils.data import Dataset
4 |
5 |
6 |
7 | class SimulationTensorDataset(Dataset):
8 | r""""""
9 |
10 | def __init__(self, inputs, outputs):
11 | super(SimulationTensorDataset, self).__init__()
12 | self.inputs = inputs
13 | self.outputs = outputs
14 |
15 | def __getitem__(self, index):
16 | return self.inputs[index], self.outputs[index]
17 |
18 | def __len__(self):
19 | return self.inputs.shape[0]
20 |
--------------------------------------------------------------------------------
/hypothesis/util/data/simulator_dataset.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 | from hypothesis.simulation import Simulator
4 | from torch.utils.data import Dataset
5 |
6 |
7 |
8 | class SimulatorDataset(Dataset):
9 | r"""
10 |
11 | Todo:
12 | Write docs.
13 | """
14 |
15 | def __init__(self, simulator, prior, size=1000000):
16 | super(SimulatorDataset, self).__init__()
17 | self.prior = prior
18 | self.simulator = simulator
19 | self.size = int(size)
20 |
21 | def __getitem__(self, index):
22 | r"""
23 | Todo:
24 | Write docs.
25 | """
26 | passed = False
27 | while not passed:
28 | try:
29 | inputs = self.prior.sample(torch.Size([1])).unsqueeze(0)
30 | outputs = self.simulator(inputs)
31 | passed = True
32 | except Exception as e:
33 | print(e)
34 |
35 | return inputs, outputs
36 |
37 | def __len__(self):
38 | r"""
39 | Todo:
40 | Write docs.
41 | """
42 | return self.size
43 |
--------------------------------------------------------------------------------
/hypothesis/util/general.py:
--------------------------------------------------------------------------------
1 | r"""General utilities for Hypothesis."""
2 |
3 | import hypothesis
4 | import numpy as np
5 | import torch
6 |
7 |
8 |
9 | def is_iterable(item):
10 | return hasattr(item, "__getitem__")
11 |
--------------------------------------------------------------------------------
/hypothesis/util/loss.py:
--------------------------------------------------------------------------------
1 | r""""""
2 |
3 | import glob
4 | import numpy as np
5 | import torch
6 |
7 |
8 |
9 | def load_losses(pattern, format="numpy"):
10 | r""""""
11 | formats = {
12 | "numpy": load_losses_numpy,
13 | "torch": load_losses_torch}
14 | # Check if the specified format is available.
15 | if format not in formats.keys():
16 | raise ValueError("The format", format, "is not supported.")
17 | paths = glob.glob(pattern)
18 | with torch.no_grad():
19 | losses = formats[format](paths)
20 |
21 | return losses
22 |
23 |
24 | def load_losses_numpy(paths):
25 | r""""""
26 | losses = []
27 |
28 | for path in paths:
29 | losses.append(torch.from_numpy(np.load(path)).view(1, -1))
30 |
31 | return losses
32 |
33 |
34 | def load_losses_torch(paths):
35 | r""""""
36 | losses = []
37 |
38 | for path in paths:
39 | losses.append(torch.load(path, map_location="cpu").view(1, -1))
40 |
41 | return losses
42 |
43 |
44 | def stack_losses(losses):
45 | r""""""
46 | return torch.cat(losses, dim=0)
47 |
48 |
49 | def load_and_stack_losses(pattern, format="numpy"):
50 | r""""""
51 | with torch.no_grad():
52 | losses = load_losses(pattern, format=format)
53 | stacked = stack_losses(losses)
54 |
55 | return stacked
56 |
--------------------------------------------------------------------------------
/hypothesis/visualization/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/montefiore-institute/hypothesis/29a2b7b4649db345d43a8d3bf98aa5d817b43f1b/hypothesis/visualization/__init__.py
--------------------------------------------------------------------------------
/hypothesis/visualization/mcmc.py:
--------------------------------------------------------------------------------
1 | r""""""
2 |
3 | import corner
4 | import matplotlib.pyplot as plt
5 | import numpy as np
6 | import torch
7 |
8 | from hypothesis.summary.mcmc import Chain
9 | from hypothesis.visualization.util import make_square
10 | from hypothesis.visualization.util import set_aspect
11 |
12 |
13 |
14 | def plot_autocorrelation(chain, parameter_index=None):
15 | raise NotImplementedError
16 |
17 |
18 | def plot_density(chain):
19 | raise NotImplementedError
20 |
21 |
22 | def plot_autocorrelation(chain, interval=2, max_lag=100, radius=1.1):
23 | if max_lag is None:
24 | max_lag = chain.size()
25 | autocorrelations = chain.autocorrelations()[:max_lag]
26 | lags = np.arange(0, max_lag, interval)
27 | autocorrelations = autocorrelations[lags]
28 | plt.ylim([-radius, radius])
29 | center = .5
30 | for index, lag in enumerate(lags):
31 | autocorrelation = autocorrelations[index]
32 | plt.axvline(lag, center, center + autocorrelation / 2 / radius, c="black")
33 | plt.xlabel("Lag")
34 | plt.ylabel("Autocorrelation")
35 | plt.minorticks_on()
36 | plt.axhline(0, linestyle="--", c="black", alpha=.75, lw=2)
37 | make_square(plt.gca())
38 | figure = plt.gcf()
39 |
40 | return figure
41 |
42 |
43 | def plot_trace(chain, parameter_index=None):
44 | nrows = chain.dimensionality()
45 | figure, rows = plt.subplots(nrows, 2, sharey=False, sharex=False, figsize=(2 * 7, 2))
46 | num_samples = chain.size()
47 | def display(ax_trace, ax_density, theta_index=1):
48 | # Trace
49 | ax_trace.minorticks_on()
50 | ax_trace.plot(range(num_samples), chain.samples.numpy(), color="black", lw=2)
51 | ax_trace.set_xlim([0, num_samples])
52 | ax_trace.set_xticks([])
53 | ax_trace.set_ylabel(r"$\theta_" + str(theta_index) + "$")
54 | limits = ax_trace.get_ylim()
55 | # Density
56 | ax_density.minorticks_on()
57 | ax_density.hist(chain.samples.numpy(), bins=50, lw=2, color="black", histtype="step", density=True)
58 | ax_density.yaxis.tick_right()
59 | ax_density.yaxis.set_label_position("right")
60 | ax_density.set_ylabel("Probability mass function")
61 | ax_density.set_xlabel(r"$\theta_" + str(theta_index) + "$")
62 | ax_density.set_xlim(limits)
63 | # Aspects
64 | make_square(ax_density)
65 | ax_trace.set_aspect("auto")
66 | ax_trace.set_position([0, 0, .7, 1])
67 | ax_density.set_position([.28, 0, 1, 1])
68 | if nrows > 1:
69 | for index, ax_trace, ax_density in enumerate(rows):
70 | display(ax_trace, ax_density)
71 | else:
72 | ax_trace, ax_density = rows
73 | display(ax_trace, ax_density)
74 |
75 | return figure
76 |
--------------------------------------------------------------------------------
/hypothesis/visualization/nn.py:
--------------------------------------------------------------------------------
1 | r""""""
2 |
3 | import matplotlib.pyplot as plt
4 | import numpy as np
5 | import torch
6 |
7 | from hypothesis.visualization.util import make_square
8 | from hypothesis.visualization.util import set_aspect
9 |
10 |
11 |
12 | @torch.no_grad()
13 | def stack(paths):
14 | data = []
15 | for path in paths:
16 | data.append(np.load(path).reshape(1, -1))
17 |
18 | return torch.from_numpy(np.vstack(data))
19 |
20 |
21 |
22 | @torch.no_grad()
23 | def plot(paths, title=None):
24 | # Prepare the data
25 | data = stack(paths)
26 | figure, ax = plt.subplots(1)
27 | mean = data.mean(dim=0)
28 | std = data.std(dim=0)
29 | # Plot the data
30 | epochs = np.arange(1, len(mean) + 1)
31 | ax.set_title(title)
32 | ax.plot(epochs, mean, lw=2, color="black")
33 | ax.fill_between(epochs, mean - std, mean + std, color="black", alpha=0.1)
34 | ax.minorticks_on()
35 | ax.set_xlabel("Epochs")
36 | ax.set_ylabel("Loss")
37 | make_square(ax)
38 |
39 | return figure
40 |
--------------------------------------------------------------------------------
/hypothesis/visualization/util.py:
--------------------------------------------------------------------------------
1 | r""""""
2 |
3 | import hypothesis
4 | import matplotlib.pyplot as plt
5 |
6 |
7 |
8 | def make_square(ax):
9 | set_aspect(ax, 1)
10 |
11 |
12 | def set_aspect(ax, aspect):
13 | r""""""
14 | aspect = float(aspect)
15 | x0, x1 = ax.get_xlim()
16 | y0, y1 = ax.get_ylim()
17 | aspect = (x1 - x0) / (aspect * (y1 - y0))
18 | ax.set_aspect(aspect)
19 |
--------------------------------------------------------------------------------
/hypothesis/workflow/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/montefiore-institute/hypothesis/29a2b7b4649db345d43a8d3bf98aa5d817b43f1b/hypothesis/workflow/__init__.py
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | # ```hypothesis``` is free software; you can redistribute it and\or modify it
2 | # under the terms of the Revised BSD License; see LICENSE file for more details.
3 |
4 | """```hypothesis``` setup file."""
5 |
6 | import os
7 | import re
8 | import sys
9 |
10 | from setuptools import find_packages
11 | from setuptools import setup
12 |
13 | """Configuration"""
14 | include_extensions=True
15 | include_benchmarks=True
16 |
17 |
18 | exclusions=["doc", "examples"]
19 | if not include_extensions:
20 | exclusions.append("hypothesis/extension")
21 | if not include_benchmarks:
22 | exclusions.append("hypothesis/benchmark")
23 |
24 | packages = find_packages(exclude=exclusions)
25 |
26 | # Get the version string of hypothesis.
27 | with open(os.path.join("hypothesis", "__init__.py"), "rt") as fh:
28 | _version = re.search(
29 | '__version__\s*=\s*"(?P.*)"\n',
30 | fh.read()
31 | ).group("version")
32 |
33 | # Module requirements.
34 | _install_requires = [
35 | "argparse",
36 | "corner",
37 | "numpy",
38 | "pandas",
39 | "scipy",
40 | "sklearn",
41 | "torch"
42 | ]
43 |
44 | _parameters = {
45 | "install_requires": _install_requires,
46 | "license": "BSD",
47 | "name": "hypothesis",
48 | "packages": packages,
49 | "platform": "any",
50 | "url": "https://github.com/montefiore-ai/hypothesis/",
51 | "version": _version
52 | }
53 |
54 | setup(**_parameters)
55 |
--------------------------------------------------------------------------------
/tutorials/README.md:
--------------------------------------------------------------------------------
1 | # Tutorials
2 |
--------------------------------------------------------------------------------