├── .coveragerc
├── .gitignore
├── .travis.yml
├── LICENSE
├── README.md
├── bayes_opt
├── __init__.py
├── bayesian_optimization.py
├── domain_reduction.py
├── event.py
├── logger.py
├── observer.py
├── target_space.py
└── util.py
├── examples
├── advanced-tour.ipynb
├── async_optimization.py
├── basic-tour.ipynb
├── bayesian_optimization.gif
├── bo_example.png
├── domain_reduction.ipynb
├── exploitation_vs_exploration.ipynb
├── func.png
├── sdr.png
├── sklearn_example.py
└── visualization.ipynb
├── pytest.ini
├── setup.cfg
├── setup.py
└── tests
├── test_acceptance.py
├── test_bayesian_optimization.py
├── test_logs.json
├── test_observer.py
├── test_queue.py
├── test_seq_domain_red.py
├── test_target_space.py
└── test_util.py
/.coveragerc:
--------------------------------------------------------------------------------
1 | [run]
2 | branch = True
3 | source = bayes_opt
4 |
5 | [report]
6 | exclude_lines =
7 | pragma: no cover
8 | .* # pragma: no cover
9 | .* # nocover
10 | def __repr__
11 | raise AssertionError
12 | raise NotImplementedError
13 | if 0:
14 | verbose = .*
15 | raise
16 | pass
17 | if __name__ == .__main__.:
18 | print(.*)
19 |
20 | omit =
21 | */setup.py
22 | examples/*
23 | tests/*
24 | bayes_opt/logger.py
25 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .ipynb_checkpoints
2 | *.pyc
3 | *.egg-info/
4 | build/
5 | dist/
6 | scratch/
7 | .idea/
8 | .DS_Store
9 | bo_eg*.png
10 | gif/
11 |
12 | # Unit test / coverage reports
13 | htmlcov/
14 | .tox/
15 | .coverage
16 | .coverage.*
17 | .cache
18 | nosetests.xml
19 | coverage.xml
20 | *,cover
21 | .hypothesis/
22 |
23 | # Environments
24 | .env
25 | .venv
26 | env/
27 | venv/
28 | ENV/
29 | env.bak/
30 | venv.bak/
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | language: python
2 | sudo: false
3 |
4 | cache:
5 | apt: true
6 | directories:
7 | - $HOME/.cache/pip
8 | - $HOME/download
9 | python:
10 | - "2.7"
11 | - "3.4"
12 | - "3.5"
13 | - "3.6"
14 | before_install:
15 | - pip install pip -U
16 | - pip install pytest -U
17 | - pip install pytest-cov -U
18 | - pip install codecov -U
19 | #- pip install xdoctest -U
20 | install:
21 | #- travis_retry python setup.py build develop
22 | - travis_retry pip install -e .
23 | script:
24 | - travis_wait pytest --cov-config .coveragerc --cov-report html --cov=bayes_opt
25 | #-p no:doctest --xdoctest
26 | after_success:
27 | #- coveralls || echo "Coveralls upload failed"
28 | - codecov
29 | cache:
30 | apt: true
31 | directories:
32 | - $HOME/.pip-cache
33 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | The MIT License (MIT)
2 |
3 | Copyright (c) 2014 Fernando M. F. Nogueira
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
6 |
7 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
8 |
9 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |

3 |
4 |
5 | # Bayesian Optimization
6 |
7 | [](https://travis-ci.org/fmfn/BayesianOptimization)
8 | [](https://codecov.io/github/fmfn/BayesianOptimization?branch=master)
9 | [](https://pypi.python.org/pypi/bayesian-optimization)
10 |
11 | Pure Python implementation of bayesian global optimization with gaussian
12 | processes.
13 |
14 | * PyPI (pip):
15 |
16 | ```console
17 | $ pip install bayesian-optimization
18 | ```
19 |
20 | * Conda from conda-forge channel:
21 |
22 | ```console
23 | $ conda install -c conda-forge bayesian-optimization
24 | ```
25 |
26 | This is a constrained global optimization package built upon bayesian inference
27 | and gaussian process, that attempts to find the maximum value of an unknown
28 | function in as few iterations as possible. This technique is particularly
29 | suited for optimization of high cost functions, situations where the balance
30 | between exploration and exploitation is important.
31 |
32 | ## Quick Start
33 | See below for a quick tour over the basics of the Bayesian Optimization package. More detailed information, other advanced features, and tips on usage/implementation can be found in the [examples](https://github.com/fmfn/BayesianOptimization/tree/master/examples) folder. I suggest that you:
34 | - Follow the
35 | [basic tour notebook](https://github.com/fmfn/BayesianOptimization/blob/master/examples/basic-tour.ipynb)
36 | to learn how to use the package's most important features.
37 | - Take a look at the
38 | [advanced tour notebook](https://github.com/fmfn/BayesianOptimization/blob/master/examples/advanced-tour.ipynb)
39 | to learn how to make the package more flexible, how to deal with categorical parameters, how to use observers, and more.
40 | - Check out this
41 | [notebook](https://github.com/fmfn/BayesianOptimization/blob/master/examples/visualization.ipynb)
42 | with a step by step visualization of how this method works.
43 | - Explore this [notebook](https://github.com/fmfn/BayesianOptimization/blob/master/examples/exploitation_vs_exploration.ipynb)
44 | exemplifying the balance between exploration and exploitation and how to
45 | control it.
46 | - Go over this [script](https://github.com/fmfn/BayesianOptimization/blob/master/examples/sklearn_example.py)
47 | for examples of how to tune parameters of Machine Learning models using cross validation and bayesian optimization.
48 | - Explore the [domain reduction notebook](https://github.com/fmfn/BayesianOptimization/blob/master/examples/domain_reduction.ipynb) to learn more about how search can be sped up by dynamically changing parameters' bounds.
49 | - Finally, take a look at this [script](https://github.com/fmfn/BayesianOptimization/blob/master/examples/async_optimization.py)
50 | for ideas on how to implement bayesian optimization in a distributed fashion using this package.
51 |
52 |
53 | ## How does it work?
54 |
55 | Bayesian optimization works by constructing a posterior distribution of functions (gaussian process) that best describes the function you want to optimize. As the number of observations grows, the posterior distribution improves, and the algorithm becomes more certain of which regions in parameter space are worth exploring and which are not, as seen in the picture below.
56 |
57 | 
58 |
59 | As you iterate over and over, the algorithm balances its needs of exploration and exploitation taking into account what it knows about the target function. At each step a Gaussian Process is fitted to the known samples (points previously explored), and the posterior distribution, combined with a exploration strategy (such as UCB (Upper Confidence Bound), or EI (Expected Improvement)), are used to determine the next point that should be explored (see the gif below).
60 |
61 | 
62 |
63 | This process is designed to minimize the number of steps required to find a combination of parameters that are close to the optimal combination. To do so, this method uses a proxy optimization problem (finding the maximum of the acquisition function) that, albeit still a hard problem, is cheaper (in the computational sense) and common tools can be employed. Therefore Bayesian Optimization is most adequate for situations where sampling the function to be optimized is a very expensive endeavor. See the references for a proper discussion of this method.
64 |
65 | This project is under active development, if you find a bug, or anything that
66 | needs correction, please let me know.
67 |
68 |
69 | Basic tour of the Bayesian Optimization package
70 | ===============================================
71 |
72 | ## 1. Specifying the function to be optimized
73 |
74 | This is a function optimization package, therefore the first and most important ingredient is, of course, the function to be optimized.
75 |
76 | **DISCLAIMER:** We know exactly how the output of the function below depends on its parameter. Obviously this is just an example, and you shouldn't expect to know it in a real scenario. However, it should be clear that you don't need to. All you need in order to use this package (and more generally, this technique) is a function `f` that takes a known set of parameters and outputs a real number.
77 |
78 |
79 | ```python
80 | def black_box_function(x, y):
81 | """Function with unknown internals we wish to maximize.
82 |
83 | This is just serving as an example, for all intents and
84 | purposes think of the internals of this function, i.e.: the process
85 | which generates its output values, as unknown.
86 | """
87 | return -x ** 2 - (y - 1) ** 2 + 1
88 | ```
89 |
90 | ## 2. Getting Started
91 |
92 | All we need to get started is to instantiate a `BayesianOptimization` object specifying a function to be optimized `f`, and its parameters with their corresponding bounds, `pbounds`. This is a constrained optimization technique, so you must specify the minimum and maximum values that can be probed for each parameter in order for it to work
93 |
94 |
95 | ```python
96 | from bayes_opt import BayesianOptimization
97 |
98 | # Bounded region of parameter space
99 | pbounds = {'x': (2, 4), 'y': (-3, 3)}
100 |
101 | optimizer = BayesianOptimization(
102 | f=black_box_function,
103 | pbounds=pbounds,
104 | random_state=1,
105 | )
106 | ```
107 |
108 | The BayesianOptimization object will work out of the box without much tuning needed. The main method you should be aware of is `maximize`, which does exactly what you think it does.
109 |
110 | There are many parameters you can pass to maximize, nonetheless, the most important ones are:
111 | - `n_iter`: How many steps of bayesian optimization you want to perform. The more steps the more likely to find a good maximum you are.
112 | - `init_points`: How many steps of **random** exploration you want to perform. Random exploration can help by diversifying the exploration space.
113 |
114 |
115 | ```python
116 | optimizer.maximize(
117 | init_points=2,
118 | n_iter=3,
119 | )
120 | ```
121 |
122 | | iter | target | x | y |
123 | -------------------------------------------------
124 | | 1 | -7.135 | 2.834 | 1.322 |
125 | | 2 | -7.78 | 2.0 | -1.186 |
126 | | 3 | -19.0 | 4.0 | 3.0 |
127 | | 4 | -16.3 | 2.378 | -2.413 |
128 | | 5 | -4.441 | 2.105 | -0.005822 |
129 | =================================================
130 |
131 |
132 | The best combination of parameters and target value found can be accessed via the property `optimizer.max`.
133 |
134 |
135 | ```python
136 | print(optimizer.max)
137 | >>> {'target': -4.441293113411222, 'params': {'y': -0.005822117636089974, 'x': 2.104665051994087}}
138 | ```
139 |
140 |
141 | While the list of all parameters probed and their corresponding target values is available via the property `optimizer.res`.
142 |
143 |
144 | ```python
145 | for i, res in enumerate(optimizer.res):
146 | print("Iteration {}: \n\t{}".format(i, res))
147 |
148 | >>> Iteration 0:
149 | >>> {'target': -7.135455292718879, 'params': {'y': 1.3219469606529488, 'x': 2.8340440094051482}}
150 | >>> Iteration 1:
151 | >>> {'target': -7.779531005607566, 'params': {'y': -1.1860045642089614, 'x': 2.0002287496346898}}
152 | >>> Iteration 2:
153 | >>> {'target': -19.0, 'params': {'y': 3.0, 'x': 4.0}}
154 | >>> Iteration 3:
155 | >>> {'target': -16.29839645063864, 'params': {'y': -2.412527795983739, 'x': 2.3776144540856503}}
156 | >>> Iteration 4:
157 | >>> {'target': -4.441293113411222, 'params': {'y': -0.005822117636089974, 'x': 2.104665051994087}}
158 | ```
159 |
160 |
161 | ### 2.1 Changing bounds
162 |
163 | During the optimization process you may realize the bounds chosen for some parameters are not adequate. For these situations you can invoke the method `set_bounds` to alter them. You can pass any combination of **existing** parameters and their associated new bounds.
164 |
165 |
166 | ```python
167 | optimizer.set_bounds(new_bounds={"x": (-2, 3)})
168 |
169 | optimizer.maximize(
170 | init_points=0,
171 | n_iter=5,
172 | )
173 | ```
174 |
175 | | iter | target | x | y |
176 | -------------------------------------------------
177 | | 6 | -5.145 | 2.115 | -0.2924 |
178 | | 7 | -5.379 | 2.337 | 0.04124 |
179 | | 8 | -3.581 | 1.874 | -0.03428 |
180 | | 9 | -2.624 | 1.702 | 0.1472 |
181 | | 10 | -1.762 | 1.442 | 0.1735 |
182 | =================================================
183 |
184 | ### 2.2 Sequential Domain Reduction
185 |
186 | Sometimes the initial boundaries specified for a problem are too wide, and adding points to improve the response surface in regions of the solution domain is extraneous. Other times the cost function is very expensive to compute, and minimizing the number of calls is extremely beneficial.
187 |
188 | When it's worthwhile to converge on an optimal point quickly rather than try to find the optimal point, contracting the domain around the current optimal value as the search progresses can speed up the search progress considerably. Using the `SequentialDomainReductionTransformer` the bounds of the problem can be panned and zoomed dynamically in an attempt to improve convergence.
189 |
190 | 
191 |
192 | An example of using the `SequentialDomainReductionTransformer` is shown in the [domain reduction notebook](https://github.com/fmfn/BayesianOptimization/blob/master/examples/domain_reduction.ipynb). More information about this method can be found in the paper ["On the robustness of a simple domain reduction scheme for simulation‐based optimization"](http://www.truegrid.com/srsm_revised.pdf).
193 |
194 | ## 3. Guiding the optimization
195 |
196 | It is often the case that we have an idea of regions of the parameter space where the maximum of our function might lie. For these situations the `BayesianOptimization` object allows the user to specify points to be probed. By default these will be explored lazily (`lazy=True`), meaning these points will be evaluated only the next time you call `maximize`. This probing process happens before the gaussian process takes over.
197 |
198 | Parameters can be passed as dictionaries or as an iterable.
199 |
200 | ```python
201 | optimizer.probe(
202 | params={"x": 0.5, "y": 0.7},
203 | lazy=True,
204 | )
205 |
206 | optimizer.probe(
207 | params=[-0.3, 0.1],
208 | lazy=True,
209 | )
210 |
211 | # Will probe only the two points specified above
212 | optimizer.maximize(init_points=0, n_iter=0)
213 | ```
214 |
215 | | iter | target | x | y |
216 | -------------------------------------------------
217 | | 11 | 0.66 | 0.5 | 0.7 |
218 | | 12 | 0.1 | -0.3 | 0.1 |
219 | =================================================
220 |
221 |
222 | ## 4. Saving, loading and restarting
223 |
224 | By default you can follow the progress of your optimization by setting `verbose>0` when instantiating the `BayesianOptimization` object. If you need more control over logging/alerting you will need to use an observer. For more information about observers checkout the advanced tour notebook. Here we will only see how to use the native `JSONLogger` object to save to and load progress from files.
225 |
226 | ### 4.1 Saving progress
227 |
228 |
229 | ```python
230 | from bayes_opt.logger import JSONLogger
231 | from bayes_opt.event import Events
232 | ```
233 |
234 | The observer paradigm works by:
235 | 1. Instantiating an observer object.
236 | 2. Tying the observer object to a particular event fired by an optimizer.
237 |
238 | The `BayesianOptimization` object fires a number of internal events during optimization, in particular, everytime it probes the function and obtains a new parameter-target combination it will fire an `Events.OPTIMIZATION_STEP` event, which our logger will listen to.
239 |
240 | **Caveat:** The logger will not look back at previously probed points.
241 |
242 |
243 | ```python
244 | logger = JSONLogger(path="./logs.json")
245 | optimizer.subscribe(Events.OPTIMIZATION_STEP, logger)
246 |
247 | # Results will be saved in ./logs.json
248 | optimizer.maximize(
249 | init_points=2,
250 | n_iter=3,
251 | )
252 | ```
253 |
254 | By default the previous data in the json file is removed. If you want to keep working with the same logger, the `reset` paremeter in `JSONLogger` should be set to False.
255 |
256 | ### 4.2 Loading progress
257 |
258 | Naturally, if you stored progress you will be able to load that onto a new instance of `BayesianOptimization`. The easiest way to do it is by invoking the `load_logs` function, from the `util` submodule.
259 |
260 |
261 | ```python
262 | from bayes_opt.util import load_logs
263 |
264 |
265 | new_optimizer = BayesianOptimization(
266 | f=black_box_function,
267 | pbounds={"x": (-2, 2), "y": (-2, 2)},
268 | verbose=2,
269 | random_state=7,
270 | )
271 |
272 | # New optimizer is loaded with previously seen points
273 | load_logs(new_optimizer, logs=["./logs.json"]);
274 | ```
275 |
276 | ## Next Steps
277 |
278 | This introduction covered the most basic functionality of the package. Checkout the [basic-tour](https://github.com/fmfn/BayesianOptimization/blob/master/examples/basic-tour.ipynb) and [advanced-tour](https://github.com/fmfn/BayesianOptimization/blob/master/examples/advanced-tour.ipynb) notebooks in the example folder, where you will find detailed explanations and other more advanced functionality. Also, browse the examples folder for implementation tips and ideas.
279 |
280 | Installation
281 | ============
282 |
283 | ### Installation
284 |
285 | The latest release can be obtained by two ways:
286 |
287 | * With PyPI (pip):
288 |
289 | pip install bayesian-optimization
290 |
291 | * With conda (from conda-forge channel):
292 |
293 | conda install -c conda-forge bayesian-optimization
294 |
295 | The bleeding edge version can be installed with:
296 |
297 | pip install git+https://github.com/fmfn/BayesianOptimization.git
298 |
299 | If you prefer, you can clone it and run the setup.py file. Use the following
300 | commands to get a copy from Github and install all dependencies:
301 |
302 | git clone https://github.com/fmfn/BayesianOptimization.git
303 | cd BayesianOptimization
304 | python setup.py install
305 |
306 | Citation
307 | ============
308 |
309 | If you used this package in your research and is interested in citing it here's how you do it:
310 |
311 | ```
312 | @Misc{,
313 | author = {Fernando Nogueira},
314 | title = {{Bayesian Optimization}: Open source constrained global optimization tool for {Python}},
315 | year = {2014--},
316 | url = " https://github.com/fmfn/BayesianOptimization"
317 | }
318 | ```
319 |
320 | # Dependencies
321 | * Numpy
322 | * Scipy
323 | * Scikit-learn
324 |
325 | # References:
326 | * http://papers.nips.cc/paper/4522-practical-bayesian-optimization-of-machine-learning-algorithms.pdf
327 | * http://arxiv.org/pdf/1012.2599v1.pdf
328 | * http://www.gaussianprocess.org/gpml/
329 | * https://www.youtube.com/watch?v=vz3D36VXefI&index=10&list=PLE6Wd9FR--EdyJ5lbFl8UuGjecvVw66F6
330 |
--------------------------------------------------------------------------------
/bayes_opt/__init__.py:
--------------------------------------------------------------------------------
1 | from .bayesian_optimization import BayesianOptimization, Events
2 | from .domain_reduction import SequentialDomainReductionTransformer
3 | from .util import UtilityFunction
4 | from .logger import ScreenLogger, JSONLogger
5 |
6 | __all__ = [
7 | "BayesianOptimization",
8 | "UtilityFunction",
9 | "Events",
10 | "ScreenLogger",
11 | "JSONLogger",
12 | "SequentialDomainReductionTransformer",
13 | ]
14 |
--------------------------------------------------------------------------------
/bayes_opt/bayesian_optimization.py:
--------------------------------------------------------------------------------
1 | import warnings
2 |
3 | from .target_space import TargetSpace
4 | from .event import Events, DEFAULT_EVENTS
5 | from .logger import _get_default_logger
6 | from .util import UtilityFunction, acq_max, ensure_rng
7 |
8 | from sklearn.gaussian_process.kernels import Matern
9 | from sklearn.gaussian_process import GaussianProcessRegressor
10 |
11 |
12 | class Queue:
13 | def __init__(self):
14 | self._queue = []
15 |
16 | @property
17 | def empty(self):
18 | return len(self) == 0
19 |
20 | def __len__(self):
21 | return len(self._queue)
22 |
23 | def __next__(self):
24 | if self.empty:
25 | raise StopIteration("Queue is empty, no more objects to retrieve.")
26 | obj = self._queue[0]
27 | self._queue = self._queue[1:]
28 | return obj
29 |
30 | def next(self):
31 | return self.__next__()
32 |
33 | def add(self, obj):
34 | """Add object to end of queue."""
35 | self._queue.append(obj)
36 |
37 |
38 | class Observable(object):
39 | """
40 |
41 | Inspired/Taken from
42 | https://www.protechtraining.com/blog/post/879#simple-observer
43 | """
44 | def __init__(self, events):
45 | # maps event names to subscribers
46 | # str -> dict
47 | self._events = {event: dict() for event in events}
48 |
49 | def get_subscribers(self, event):
50 | return self._events[event]
51 |
52 | def subscribe(self, event, subscriber, callback=None):
53 | if callback is None:
54 | callback = getattr(subscriber, 'update')
55 | self.get_subscribers(event)[subscriber] = callback
56 |
57 | def unsubscribe(self, event, subscriber):
58 | del self.get_subscribers(event)[subscriber]
59 |
60 | def dispatch(self, event):
61 | for _, callback in self.get_subscribers(event).items():
62 | callback(event, self)
63 |
64 |
65 | class BayesianOptimization(Observable):
66 | """
67 | This class takes the function to optimize as well as the parameters bounds
68 | in order to find which values for the parameters yield the maximum value
69 | using bayesian optimization.
70 |
71 | Parameters
72 | ----------
73 | f: function
74 | Function to be maximized.
75 |
76 | pbounds: dict
77 | Dictionary with parameters names as keys and a tuple with minimum
78 | and maximum values.
79 |
80 | random_state: int or numpy.random.RandomState, optional(default=None)
81 | If the value is an integer, it is used as the seed for creating a
82 | numpy.random.RandomState. Otherwise the random state provieded it is used.
83 | When set to None, an unseeded random state is generated.
84 |
85 | verbose: int, optional(default=2)
86 | The level of verbosity.
87 |
88 | bounds_transformer: DomainTransformer, optional(default=None)
89 | If provided, the transformation is applied to the bounds.
90 |
91 | Methods
92 | -------
93 | probe()
94 | Evaluates the function on the given points.
95 | Can be used to guide the optimizer.
96 |
97 | maximize()
98 | Tries to find the parameters that yield the maximum value for the
99 | given function.
100 |
101 | set_bounds()
102 | Allows changing the lower and upper searching bounds
103 | """
104 | def __init__(self, f, pbounds, random_state=None, verbose=2,
105 | bounds_transformer=None):
106 | self._random_state = ensure_rng(random_state)
107 |
108 | # Data structure containing the function to be optimized, the bounds of
109 | # its domain, and a record of the evaluations we have done so far
110 | self._space = TargetSpace(f, pbounds, random_state)
111 |
112 | self._queue = Queue()
113 |
114 | # Internal GP regressor
115 | self._gp = GaussianProcessRegressor(
116 | kernel=Matern(nu=2.5),
117 | alpha=1e-6,
118 | normalize_y=True,
119 | n_restarts_optimizer=5,
120 | random_state=self._random_state,
121 | )
122 |
123 | self._verbose = verbose
124 | self._bounds_transformer = bounds_transformer
125 | if self._bounds_transformer:
126 | try:
127 | self._bounds_transformer.initialize(self._space)
128 | except (AttributeError, TypeError):
129 | raise TypeError('The transformer must be an instance of '
130 | 'DomainTransformer')
131 |
132 | super(BayesianOptimization, self).__init__(events=DEFAULT_EVENTS)
133 |
134 | @property
135 | def space(self):
136 | return self._space
137 |
138 | @property
139 | def max(self):
140 | return self._space.max()
141 |
142 | @property
143 | def res(self):
144 | return self._space.res()
145 |
146 | def register(self, params, target):
147 | """Expect observation with known target"""
148 | self._space.register(params, target)
149 | self.dispatch(Events.OPTIMIZATION_STEP)
150 |
151 | def probe(self, params, lazy=True):
152 | """
153 | Evaluates the function on the given points. Useful to guide the optimizer.
154 |
155 | Parameters
156 | ----------
157 | params: dict or list
158 | The parameters where the optimizer will evaluate the function.
159 |
160 | lazy: bool, optional(default=True)
161 | If True, the optimizer will evaluate the points when calling
162 | maximize(). Otherwise it will evaluate it at the moment.
163 | """
164 | if lazy:
165 | self._queue.add(params)
166 | else:
167 | self._space.probe(params)
168 | self.dispatch(Events.OPTIMIZATION_STEP)
169 |
170 | def suggest(self, utility_function):
171 | """Most promising point to probe next"""
172 | if len(self._space) == 0:
173 | return self._space.array_to_params(self._space.random_sample())
174 |
175 | # Sklearn's GP throws a large number of warnings at times, but
176 | # we don't really need to see them here.
177 | with warnings.catch_warnings():
178 | warnings.simplefilter("ignore")
179 | self._gp.fit(self._space.params, self._space.target)
180 |
181 | # Finding argmax of the acquisition function.
182 | suggestion = acq_max(
183 | ac=utility_function.utility,
184 | gp=self._gp,
185 | y_max=self._space.target.max(),
186 | bounds=self._space.bounds,
187 | random_state=self._random_state
188 | )
189 |
190 | return self._space.array_to_params(suggestion)
191 |
192 | def _prime_queue(self, init_points):
193 | """Make sure there's something in the queue at the very beginning."""
194 | if self._queue.empty and self._space.empty:
195 | init_points = max(init_points, 1)
196 |
197 | for _ in range(init_points):
198 | self._queue.add(self._space.random_sample())
199 |
200 | def _prime_subscriptions(self):
201 | if not any([len(subs) for subs in self._events.values()]):
202 | _logger = _get_default_logger(self._verbose)
203 | self.subscribe(Events.OPTIMIZATION_START, _logger)
204 | self.subscribe(Events.OPTIMIZATION_STEP, _logger)
205 | self.subscribe(Events.OPTIMIZATION_END, _logger)
206 |
207 | def maximize(self,
208 | init_points=5,
209 | n_iter=25,
210 | acq='ucb',
211 | kappa=2.576,
212 | kappa_decay=1,
213 | kappa_decay_delay=0,
214 | xi=0.0,
215 | **gp_params):
216 | """
217 | Probes the target space to find the parameters that yield the maximum
218 | value for the given function.
219 |
220 | Parameters
221 | ----------
222 | init_points : int, optional(default=5)
223 | Number of iterations before the explorations starts the exploration
224 | for the maximum.
225 |
226 | n_iter: int, optional(default=25)
227 | Number of iterations where the method attempts to find the maximum
228 | value.
229 |
230 | acq: {'ucb', 'ei', 'poi'}
231 | The acquisition method used.
232 | * 'ucb' stands for the Upper Confidence Bounds method
233 | * 'ei' is the Expected Improvement method
234 | * 'poi' is the Probability Of Improvement criterion.
235 |
236 | kappa: float, optional(default=2.576)
237 | Parameter to indicate how closed are the next parameters sampled.
238 | Higher value = favors spaces that are least explored.
239 | Lower value = favors spaces where the regression function is the
240 | highest.
241 |
242 | kappa_decay: float, optional(default=1)
243 | `kappa` is multiplied by this factor every iteration.
244 |
245 | kappa_decay_delay: int, optional(default=0)
246 | Number of iterations that must have passed before applying the decay
247 | to `kappa`.
248 |
249 | xi: float, optional(default=0.0)
250 | [unused]
251 | """
252 | self._prime_subscriptions()
253 | self.dispatch(Events.OPTIMIZATION_START)
254 | self._prime_queue(init_points)
255 | self.set_gp_params(**gp_params)
256 |
257 | util = UtilityFunction(kind=acq,
258 | kappa=kappa,
259 | xi=xi,
260 | kappa_decay=kappa_decay,
261 | kappa_decay_delay=kappa_decay_delay)
262 | iteration = 0
263 | while not self._queue.empty or iteration < n_iter:
264 | try:
265 | x_probe = next(self._queue)
266 | except StopIteration:
267 | util.update_params()
268 | x_probe = self.suggest(util)
269 | iteration += 1
270 |
271 | self.probe(x_probe, lazy=False)
272 |
273 | if self._bounds_transformer:
274 | self.set_bounds(
275 | self._bounds_transformer.transform(self._space))
276 |
277 | self.dispatch(Events.OPTIMIZATION_END)
278 |
279 | def set_bounds(self, new_bounds):
280 | """
281 | A method that allows changing the lower and upper searching bounds
282 |
283 | Parameters
284 | ----------
285 | new_bounds : dict
286 | A dictionary with the parameter name and its new bounds
287 | """
288 | self._space.set_bounds(new_bounds)
289 |
290 | def set_gp_params(self, **params):
291 | """Set parameters to the internal Gaussian Process Regressor"""
292 | self._gp.set_params(**params)
293 |
--------------------------------------------------------------------------------
/bayes_opt/domain_reduction.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from .target_space import TargetSpace
3 |
4 |
5 | class DomainTransformer():
6 | '''The base transformer class'''
7 |
8 | def __init__(self, **kwargs):
9 | pass
10 |
11 | def initialize(self, target_space: TargetSpace):
12 | raise NotImplementedError
13 |
14 | def transform(self, target_space: TargetSpace):
15 | raise NotImplementedError
16 |
17 |
18 | class SequentialDomainReductionTransformer(DomainTransformer):
19 | """
20 | A sequential domain reduction transformer bassed on the work by Stander, N. and Craig, K:
21 | "On the robustness of a simple domain reduction scheme for simulation‐based optimization"
22 | """
23 |
24 | def __init__(
25 | self,
26 | gamma_osc: float = 0.7,
27 | gamma_pan: float = 1.0,
28 | eta: float = 0.9
29 | ) -> None:
30 | self.gamma_osc = gamma_osc
31 | self.gamma_pan = gamma_pan
32 | self.eta = eta
33 | pass
34 |
35 | def initialize(self, target_space: TargetSpace) -> None:
36 | """Initialize all of the parameters"""
37 | self.original_bounds = np.copy(target_space.bounds)
38 | self.bounds = [self.original_bounds]
39 |
40 | self.previous_optimal = np.mean(target_space.bounds, axis=1)
41 | self.current_optimal = np.mean(target_space.bounds, axis=1)
42 | self.r = target_space.bounds[:, 1] - target_space.bounds[:, 0]
43 |
44 | self.previous_d = 2.0 * \
45 | (self.current_optimal - self.previous_optimal) / self.r
46 |
47 | self.current_d = 2.0 * (self.current_optimal -
48 | self.previous_optimal) / self.r
49 |
50 | self.c = self.current_d * self.previous_d
51 | self.c_hat = np.sqrt(np.abs(self.c)) * np.sign(self.c)
52 |
53 | self.gamma = 0.5 * (self.gamma_pan * (1.0 + self.c_hat) +
54 | self.gamma_osc * (1.0 - self.c_hat))
55 |
56 | self.contraction_rate = self.eta + \
57 | np.abs(self.current_d) * (self.gamma - self.eta)
58 |
59 | self.r = self.contraction_rate * self.r
60 |
61 | def _update(self, target_space: TargetSpace) -> None:
62 |
63 | # setting the previous
64 | self.previous_optimal = self.current_optimal
65 | self.previous_d = self.current_d
66 |
67 | self.current_optimal = target_space.params[
68 | np.argmax(target_space.target)
69 | ]
70 |
71 | self.current_d = 2.0 * (self.current_optimal -
72 | self.previous_optimal) / self.r
73 |
74 | self.c = self.current_d * self.previous_d
75 |
76 | self.c_hat = np.sqrt(np.abs(self.c)) * np.sign(self.c)
77 |
78 | self.gamma = 0.5 * (self.gamma_pan * (1.0 + self.c_hat) +
79 | self.gamma_osc * (1.0 - self.c_hat))
80 |
81 | self.contraction_rate = self.eta + \
82 | np.abs(self.current_d) * (self.gamma - self.eta)
83 |
84 | self.r = self.contraction_rate * self.r
85 |
86 | def _trim(self, new_bounds: np.array, global_bounds: np.array) -> np.array:
87 | for i, variable in enumerate(new_bounds):
88 | if variable[0] < global_bounds[i, 0]:
89 | variable[0] = global_bounds[i, 0]
90 | if variable[1] > global_bounds[i, 1]:
91 | variable[1] = global_bounds[i, 1]
92 |
93 | return new_bounds
94 |
95 | def _create_bounds(self, parameters: dict, bounds: np.array) -> dict:
96 | return {param: bounds[i, :] for i, param in enumerate(parameters)}
97 |
98 | def transform(self, target_space: TargetSpace) -> dict:
99 |
100 | self._update(target_space)
101 |
102 | new_bounds = np.array(
103 | [
104 | self.current_optimal - 0.5 * self.r,
105 | self.current_optimal + 0.5 * self.r
106 | ]
107 | ).T
108 |
109 | self._trim(new_bounds, self.original_bounds)
110 | self.bounds.append(new_bounds)
111 | return self._create_bounds(target_space.keys, new_bounds)
112 |
--------------------------------------------------------------------------------
/bayes_opt/event.py:
--------------------------------------------------------------------------------
1 | class Events:
2 | OPTIMIZATION_START = 'optimization:start'
3 | OPTIMIZATION_STEP = 'optimization:step'
4 | OPTIMIZATION_END = 'optimization:end'
5 |
6 |
7 | DEFAULT_EVENTS = [
8 | Events.OPTIMIZATION_START,
9 | Events.OPTIMIZATION_STEP,
10 | Events.OPTIMIZATION_END,
11 | ]
12 |
--------------------------------------------------------------------------------
/bayes_opt/logger.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function
2 | import os
3 | import json
4 |
5 | from .observer import _Tracker
6 | from .event import Events
7 | from .util import Colours
8 |
9 |
10 | def _get_default_logger(verbose):
11 | return ScreenLogger(verbose=verbose)
12 |
13 |
14 | class ScreenLogger(_Tracker):
15 | _default_cell_size = 9
16 | _default_precision = 4
17 |
18 | def __init__(self, verbose=2):
19 | self._verbose = verbose
20 | self._header_length = None
21 | super(ScreenLogger, self).__init__()
22 |
23 | @property
24 | def verbose(self):
25 | return self._verbose
26 |
27 | @verbose.setter
28 | def verbose(self, v):
29 | self._verbose = v
30 |
31 | def _format_number(self, x):
32 | if isinstance(x, int):
33 | s = "{x:< {s}}".format(
34 | x=x,
35 | s=self._default_cell_size,
36 | )
37 | else:
38 | s = "{x:< {s}.{p}}".format(
39 | x=x,
40 | s=self._default_cell_size,
41 | p=self._default_precision,
42 | )
43 |
44 | if len(s) > self._default_cell_size:
45 | if "." in s:
46 | return s[:self._default_cell_size]
47 | else:
48 | return s[:self._default_cell_size - 3] + "..."
49 | return s
50 |
51 | def _format_key(self, key):
52 | s = "{key:^{s}}".format(
53 | key=key,
54 | s=self._default_cell_size
55 | )
56 | if len(s) > self._default_cell_size:
57 | return s[:self._default_cell_size - 3] + "..."
58 | return s
59 |
60 | def _step(self, instance, colour=Colours.black):
61 | res = instance.res[-1]
62 | cells = []
63 |
64 | cells.append(self._format_number(self._iterations + 1))
65 | cells.append(self._format_number(res["target"]))
66 |
67 | for key in instance.space.keys:
68 | cells.append(self._format_number(res["params"][key]))
69 |
70 | return "| " + " | ".join(map(colour, cells)) + " |"
71 |
72 | def _header(self, instance):
73 | cells = []
74 | cells.append(self._format_key("iter"))
75 | cells.append(self._format_key("target"))
76 | for key in instance.space.keys:
77 | cells.append(self._format_key(key))
78 |
79 | line = "| " + " | ".join(cells) + " |"
80 | self._header_length = len(line)
81 | return line + "\n" + ("-" * self._header_length)
82 |
83 | def _is_new_max(self, instance):
84 | if self._previous_max is None:
85 | self._previous_max = instance.max["target"]
86 | return instance.max["target"] > self._previous_max
87 |
88 | def update(self, event, instance):
89 | if event == Events.OPTIMIZATION_START:
90 | line = self._header(instance) + "\n"
91 | elif event == Events.OPTIMIZATION_STEP:
92 | is_new_max = self._is_new_max(instance)
93 | if self._verbose == 1 and not is_new_max:
94 | line = ""
95 | else:
96 | colour = Colours.purple if is_new_max else Colours.black
97 | line = self._step(instance, colour=colour) + "\n"
98 | elif event == Events.OPTIMIZATION_END:
99 | line = "=" * self._header_length + "\n"
100 |
101 | if self._verbose:
102 | print(line, end="")
103 | self._update_tracker(event, instance)
104 |
105 |
106 | class JSONLogger(_Tracker):
107 | def __init__(self, path, reset=True):
108 | self._path = path if path[-5:] == ".json" else path + ".json"
109 | if reset:
110 | try:
111 | os.remove(self._path)
112 | except OSError:
113 | pass
114 | super(JSONLogger, self).__init__()
115 |
116 | def update(self, event, instance):
117 | if event == Events.OPTIMIZATION_STEP:
118 | data = dict(instance.res[-1])
119 |
120 | now, time_elapsed, time_delta = self._time_metrics()
121 | data["datetime"] = {
122 | "datetime": now,
123 | "elapsed": time_elapsed,
124 | "delta": time_delta,
125 | }
126 |
127 | with open(self._path, "a") as f:
128 | f.write(json.dumps(data) + "\n")
129 |
130 | self._update_tracker(event, instance)
131 |
--------------------------------------------------------------------------------
/bayes_opt/observer.py:
--------------------------------------------------------------------------------
1 | """
2 | observers...
3 | """
4 | from datetime import datetime
5 | from .event import Events
6 |
7 |
8 | class Observer:
9 | def update(self, event, instance):
10 | raise NotImplementedError
11 |
12 |
13 | class _Tracker(object):
14 | def __init__(self):
15 | self._iterations = 0
16 |
17 | self._previous_max = None
18 | self._previous_max_params = None
19 |
20 | self._start_time = None
21 | self._previous_time = None
22 |
23 | def _update_tracker(self, event, instance):
24 | if event == Events.OPTIMIZATION_STEP:
25 | self._iterations += 1
26 |
27 | current_max = instance.max
28 | if (self._previous_max is None or
29 | current_max["target"] > self._previous_max):
30 | self._previous_max = current_max["target"]
31 | self._previous_max_params = current_max["params"]
32 |
33 | def _time_metrics(self):
34 | now = datetime.now()
35 | if self._start_time is None:
36 | self._start_time = now
37 | if self._previous_time is None:
38 | self._previous_time = now
39 |
40 | time_elapsed = now - self._start_time
41 | time_delta = now - self._previous_time
42 |
43 | self._previous_time = now
44 | return (
45 | now.strftime("%Y-%m-%d %H:%M:%S"),
46 | time_elapsed.total_seconds(),
47 | time_delta.total_seconds()
48 | )
49 |
--------------------------------------------------------------------------------
/bayes_opt/target_space.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from .util import ensure_rng
3 |
4 |
5 | def _hashable(x):
6 | """ ensure that an point is hashable by a python dict """
7 | return tuple(map(float, x))
8 |
9 |
10 | class TargetSpace(object):
11 | """
12 | Holds the param-space coordinates (X) and target values (Y)
13 | Allows for constant-time appends while ensuring no duplicates are added
14 |
15 | Example
16 | -------
17 | >>> def target_func(p1, p2):
18 | >>> return p1 + p2
19 | >>> pbounds = {'p1': (0, 1), 'p2': (1, 100)}
20 | >>> space = TargetSpace(target_func, pbounds, random_state=0)
21 | >>> x = space.random_points(1)[0]
22 | >>> y = space.register_point(x)
23 | >>> assert self.max_point()['max_val'] == y
24 | """
25 | def __init__(self, target_func, pbounds, random_state=None):
26 | """
27 | Parameters
28 | ----------
29 | target_func : function
30 | Function to be maximized.
31 |
32 | pbounds : dict
33 | Dictionary with parameters names as keys and a tuple with minimum
34 | and maximum values.
35 |
36 | random_state : int, RandomState, or None
37 | optionally specify a seed for a random number generator
38 | """
39 | self.random_state = ensure_rng(random_state)
40 |
41 | # The function to be optimized
42 | self.target_func = target_func
43 |
44 | # Get the name of the parameters
45 | self._keys = sorted(pbounds)
46 | # Create an array with parameters bounds
47 | self._bounds = np.array(
48 | [item[1] for item in sorted(pbounds.items(), key=lambda x: x[0])],
49 | dtype=np.float
50 | )
51 |
52 | # preallocated memory for X and Y points
53 | self._params = np.empty(shape=(0, self.dim))
54 | self._target = np.empty(shape=(0))
55 |
56 | # keep track of unique points we have seen so far
57 | self._cache = {}
58 |
59 | def __contains__(self, x):
60 | return _hashable(x) in self._cache
61 |
62 | def __len__(self):
63 | assert len(self._params) == len(self._target)
64 | return len(self._target)
65 |
66 | @property
67 | def empty(self):
68 | return len(self) == 0
69 |
70 | @property
71 | def params(self):
72 | return self._params
73 |
74 | @property
75 | def target(self):
76 | return self._target
77 |
78 | @property
79 | def dim(self):
80 | return len(self._keys)
81 |
82 | @property
83 | def keys(self):
84 | return self._keys
85 |
86 | @property
87 | def bounds(self):
88 | return self._bounds
89 |
90 | def params_to_array(self, params):
91 | try:
92 | assert set(params) == set(self.keys)
93 | except AssertionError:
94 | raise ValueError(
95 | "Parameters' keys ({}) do ".format(sorted(params)) +
96 | "not match the expected set of keys ({}).".format(self.keys)
97 | )
98 | return np.asarray([params[key] for key in self.keys])
99 |
100 | def array_to_params(self, x):
101 | try:
102 | assert len(x) == len(self.keys)
103 | except AssertionError:
104 | raise ValueError(
105 | "Size of array ({}) is different than the ".format(len(x)) +
106 | "expected number of parameters ({}).".format(len(self.keys))
107 | )
108 | return dict(zip(self.keys, x))
109 |
110 | def _as_array(self, x):
111 | try:
112 | x = np.asarray(x, dtype=float)
113 | except TypeError:
114 | x = self.params_to_array(x)
115 |
116 | x = x.ravel()
117 | try:
118 | assert x.size == self.dim
119 | except AssertionError:
120 | raise ValueError(
121 | "Size of array ({}) is different than the ".format(len(x)) +
122 | "expected number of parameters ({}).".format(len(self.keys))
123 | )
124 | return x
125 |
126 | def register(self, params, target):
127 | """
128 | Append a point and its target value to the known data.
129 |
130 | Parameters
131 | ----------
132 | x : ndarray
133 | a single point, with len(x) == self.dim
134 |
135 | y : float
136 | target function value
137 |
138 | Raises
139 | ------
140 | KeyError:
141 | if the point is not unique
142 |
143 | Notes
144 | -----
145 | runs in ammortized constant time
146 |
147 | Example
148 | -------
149 | >>> pbounds = {'p1': (0, 1), 'p2': (1, 100)}
150 | >>> space = TargetSpace(lambda p1, p2: p1 + p2, pbounds)
151 | >>> len(space)
152 | 0
153 | >>> x = np.array([0, 0])
154 | >>> y = 1
155 | >>> space.add_observation(x, y)
156 | >>> len(space)
157 | 1
158 | """
159 | x = self._as_array(params)
160 | if x in self:
161 | raise KeyError('Data point {} is not unique'.format(x))
162 |
163 | # Insert data into unique dictionary
164 | self._cache[_hashable(x.ravel())] = target
165 |
166 | self._params = np.concatenate([self._params, x.reshape(1, -1)])
167 | self._target = np.concatenate([self._target, [target]])
168 |
169 | def probe(self, params):
170 | """
171 | Evaulates a single point x, to obtain the value y and then records them
172 | as observations.
173 |
174 | Notes
175 | -----
176 | If x has been previously seen returns a cached value of y.
177 |
178 | Parameters
179 | ----------
180 | x : ndarray
181 | a single point, with len(x) == self.dim
182 |
183 | Returns
184 | -------
185 | y : float
186 | target function value.
187 | """
188 | x = self._as_array(params)
189 |
190 | try:
191 | target = self._cache[_hashable(x)]
192 | except KeyError:
193 | params = dict(zip(self._keys, x))
194 | target = self.target_func(**params)
195 | self.register(x, target)
196 | return target
197 |
198 | def random_sample(self):
199 | """
200 | Creates random points within the bounds of the space.
201 |
202 | Returns
203 | ----------
204 | data: ndarray
205 | [num x dim] array points with dimensions corresponding to `self._keys`
206 |
207 | Example
208 | -------
209 | >>> target_func = lambda p1, p2: p1 + p2
210 | >>> pbounds = {'p1': (0, 1), 'p2': (1, 100)}
211 | >>> space = TargetSpace(target_func, pbounds, random_state=0)
212 | >>> space.random_points(1)
213 | array([[ 55.33253689, 0.54488318]])
214 | """
215 | # TODO: support integer, category, and basic scipy.optimize constraints
216 | data = np.empty((1, self.dim))
217 | for col, (lower, upper) in enumerate(self._bounds):
218 | data.T[col] = self.random_state.uniform(lower, upper, size=1)
219 | return data.ravel()
220 |
221 | def max(self):
222 | """Get maximum target value found and corresponding parametes."""
223 | try:
224 | res = {
225 | 'target': self.target.max(),
226 | 'params': dict(
227 | zip(self.keys, self.params[self.target.argmax()])
228 | )
229 | }
230 | except ValueError:
231 | res = {}
232 | return res
233 |
234 | def res(self):
235 | """Get all target values found and corresponding parametes."""
236 | params = [dict(zip(self.keys, p)) for p in self.params]
237 |
238 | return [
239 | {"target": target, "params": param}
240 | for target, param in zip(self.target, params)
241 | ]
242 |
243 | def set_bounds(self, new_bounds):
244 | """
245 | A method that allows changing the lower and upper searching bounds
246 |
247 | Parameters
248 | ----------
249 | new_bounds : dict
250 | A dictionary with the parameter name and its new bounds
251 | """
252 | for row, key in enumerate(self.keys):
253 | if key in new_bounds:
254 | self._bounds[row] = new_bounds[key]
255 |
--------------------------------------------------------------------------------
/bayes_opt/util.py:
--------------------------------------------------------------------------------
1 | import warnings
2 | import numpy as np
3 | from scipy.stats import norm
4 | from scipy.optimize import minimize
5 |
6 |
7 | def acq_max(ac, gp, y_max, bounds, random_state, n_warmup=10000, n_iter=10):
8 | """
9 | A function to find the maximum of the acquisition function
10 |
11 | It uses a combination of random sampling (cheap) and the 'L-BFGS-B'
12 | optimization method. First by sampling `n_warmup` (1e5) points at random,
13 | and then running L-BFGS-B from `n_iter` (250) random starting points.
14 |
15 | Parameters
16 | ----------
17 | :param ac:
18 | The acquisition function object that return its point-wise value.
19 |
20 | :param gp:
21 | A gaussian process fitted to the relevant data.
22 |
23 | :param y_max:
24 | The current maximum known value of the target function.
25 |
26 | :param bounds:
27 | The variables bounds to limit the search of the acq max.
28 |
29 | :param random_state:
30 | instance of np.RandomState random number generator
31 |
32 | :param n_warmup:
33 | number of times to randomly sample the aquisition function
34 |
35 | :param n_iter:
36 | number of times to run scipy.minimize
37 |
38 | Returns
39 | -------
40 | :return: x_max, The arg max of the acquisition function.
41 | """
42 |
43 | # Warm up with random points
44 | x_tries = random_state.uniform(bounds[:, 0], bounds[:, 1],
45 | size=(n_warmup, bounds.shape[0]))
46 | ys = ac(x_tries, gp=gp, y_max=y_max)
47 | x_max = x_tries[ys.argmax()]
48 | max_acq = ys.max()
49 |
50 | # Explore the parameter space more throughly
51 | x_seeds = random_state.uniform(bounds[:, 0], bounds[:, 1],
52 | size=(n_iter, bounds.shape[0]))
53 | for x_try in x_seeds:
54 | # Find the minimum of minus the acquisition function
55 | res = minimize(lambda x: -ac(x.reshape(1, -1), gp=gp, y_max=y_max),
56 | x_try.reshape(1, -1),
57 | bounds=bounds,
58 | method="L-BFGS-B")
59 |
60 | # See if success
61 | if not res.success:
62 | continue
63 |
64 | # Store it if better than previous minimum(maximum).
65 | if max_acq is None or -res.fun[0] >= max_acq:
66 | x_max = res.x
67 | max_acq = -res.fun[0]
68 |
69 | # Clip output to make sure it lies within the bounds. Due to floating
70 | # point technicalities this is not always the case.
71 | return np.clip(x_max, bounds[:, 0], bounds[:, 1])
72 |
73 |
74 | class UtilityFunction(object):
75 | """
76 | An object to compute the acquisition functions.
77 | """
78 |
79 | def __init__(self, kind, kappa, xi, kappa_decay=1, kappa_decay_delay=0):
80 |
81 | self.kappa = kappa
82 | self._kappa_decay = kappa_decay
83 | self._kappa_decay_delay = kappa_decay_delay
84 |
85 | self.xi = xi
86 |
87 | self._iters_counter = 0
88 |
89 | if kind not in ['ucb', 'ei', 'poi']:
90 | err = "The utility function " \
91 | "{} has not been implemented, " \
92 | "please choose one of ucb, ei, or poi.".format(kind)
93 | raise NotImplementedError(err)
94 | else:
95 | self.kind = kind
96 |
97 | def update_params(self):
98 | self._iters_counter += 1
99 |
100 | if self._kappa_decay < 1 and self._iters_counter > self._kappa_decay_delay:
101 | self.kappa *= self._kappa_decay
102 |
103 | def utility(self, x, gp, y_max):
104 | if self.kind == 'ucb':
105 | return self._ucb(x, gp, self.kappa)
106 | if self.kind == 'ei':
107 | return self._ei(x, gp, y_max, self.xi)
108 | if self.kind == 'poi':
109 | return self._poi(x, gp, y_max, self.xi)
110 |
111 | @staticmethod
112 | def _ucb(x, gp, kappa):
113 | with warnings.catch_warnings():
114 | warnings.simplefilter("ignore")
115 | mean, std = gp.predict(x, return_std=True)
116 |
117 | return mean + kappa * std
118 |
119 | @staticmethod
120 | def _ei(x, gp, y_max, xi):
121 | with warnings.catch_warnings():
122 | warnings.simplefilter("ignore")
123 | mean, std = gp.predict(x, return_std=True)
124 |
125 | a = (mean - y_max - xi)
126 | z = a / std
127 | return a * norm.cdf(z) + std * norm.pdf(z)
128 |
129 | @staticmethod
130 | def _poi(x, gp, y_max, xi):
131 | with warnings.catch_warnings():
132 | warnings.simplefilter("ignore")
133 | mean, std = gp.predict(x, return_std=True)
134 |
135 | z = (mean - y_max - xi)/std
136 | return norm.cdf(z)
137 |
138 |
139 | def load_logs(optimizer, logs):
140 | """Load previous ...
141 |
142 | """
143 | import json
144 |
145 | if isinstance(logs, str):
146 | logs = [logs]
147 |
148 | for log in logs:
149 | with open(log, "r") as j:
150 | while True:
151 | try:
152 | iteration = next(j)
153 | except StopIteration:
154 | break
155 |
156 | iteration = json.loads(iteration)
157 | try:
158 | optimizer.register(
159 | params=iteration["params"],
160 | target=iteration["target"],
161 | )
162 | except KeyError:
163 | pass
164 |
165 | return optimizer
166 |
167 |
168 | def ensure_rng(random_state=None):
169 | """
170 | Creates a random number generator based on an optional seed. This can be
171 | an integer or another random state for a seeded rng, or None for an
172 | unseeded rng.
173 | """
174 | if random_state is None:
175 | random_state = np.random.RandomState()
176 | elif isinstance(random_state, int):
177 | random_state = np.random.RandomState(random_state)
178 | else:
179 | assert isinstance(random_state, np.random.RandomState)
180 | return random_state
181 |
182 |
183 | class Colours:
184 | """Print in nice colours."""
185 |
186 | BLUE = '\033[94m'
187 | BOLD = '\033[1m'
188 | CYAN = '\033[96m'
189 | DARKCYAN = '\033[36m'
190 | END = '\033[0m'
191 | GREEN = '\033[92m'
192 | PURPLE = '\033[95m'
193 | RED = '\033[91m'
194 | UNDERLINE = '\033[4m'
195 | YELLOW = '\033[93m'
196 |
197 | @classmethod
198 | def _wrap_colour(cls, s, colour):
199 | return colour + s + cls.END
200 |
201 | @classmethod
202 | def black(cls, s):
203 | """Wrap text in black."""
204 | return cls._wrap_colour(s, cls.END)
205 |
206 | @classmethod
207 | def blue(cls, s):
208 | """Wrap text in blue."""
209 | return cls._wrap_colour(s, cls.BLUE)
210 |
211 | @classmethod
212 | def bold(cls, s):
213 | """Wrap text in bold."""
214 | return cls._wrap_colour(s, cls.BOLD)
215 |
216 | @classmethod
217 | def cyan(cls, s):
218 | """Wrap text in cyan."""
219 | return cls._wrap_colour(s, cls.CYAN)
220 |
221 | @classmethod
222 | def darkcyan(cls, s):
223 | """Wrap text in darkcyan."""
224 | return cls._wrap_colour(s, cls.DARKCYAN)
225 |
226 | @classmethod
227 | def green(cls, s):
228 | """Wrap text in green."""
229 | return cls._wrap_colour(s, cls.GREEN)
230 |
231 | @classmethod
232 | def purple(cls, s):
233 | """Wrap text in purple."""
234 | return cls._wrap_colour(s, cls.PURPLE)
235 |
236 | @classmethod
237 | def red(cls, s):
238 | """Wrap text in red."""
239 | return cls._wrap_colour(s, cls.RED)
240 |
241 | @classmethod
242 | def underline(cls, s):
243 | """Wrap text in underline."""
244 | return cls._wrap_colour(s, cls.UNDERLINE)
245 |
246 | @classmethod
247 | def yellow(cls, s):
248 | """Wrap text in yellow."""
249 | return cls._wrap_colour(s, cls.YELLOW)
250 |
--------------------------------------------------------------------------------
/examples/advanced-tour.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Advanced tour of the Bayesian Optimization package"
8 | ]
9 | },
10 | {
11 | "cell_type": "code",
12 | "execution_count": 1,
13 | "metadata": {},
14 | "outputs": [],
15 | "source": [
16 | "from bayes_opt import BayesianOptimization"
17 | ]
18 | },
19 | {
20 | "cell_type": "markdown",
21 | "metadata": {},
22 | "source": [
23 | "## 1. Suggest-Evaluate-Register Paradigm\n",
24 | "\n",
25 | "Internally the `maximize` method is simply a wrapper around the methods `suggest`, `probe`, and `register`. If you need more control over your optimization loops the Suggest-Evaluate-Register paradigm should give you that extra flexibility.\n",
26 | "\n",
27 | "For an example of running the `BayesianOptimization` in a distributed fashion (where the function being optimized is evaluated concurrently in different cores/machines/servers), checkout the `async_optimization.py` script in the examples folder."
28 | ]
29 | },
30 | {
31 | "cell_type": "code",
32 | "execution_count": 2,
33 | "metadata": {},
34 | "outputs": [],
35 | "source": [
36 | "# Let's start by definying our function, bounds, and instanciating an optimization object.\n",
37 | "def black_box_function(x, y):\n",
38 | " return -x ** 2 - (y - 1) ** 2 + 1"
39 | ]
40 | },
41 | {
42 | "cell_type": "markdown",
43 | "metadata": {},
44 | "source": [
45 | "Notice that the evaluation of the blackbox function will NOT be carried out by the optimizer object. We are simulating a situation where this function could be being executed in a different machine, maybe it is written in another language, or it could even be the result of a chemistry experiment. Whatever the case may be, you can take charge of it and as long as you don't invoke the `probe` or `maximize` methods directly, the optimizer object will ignore the blackbox function."
46 | ]
47 | },
48 | {
49 | "cell_type": "code",
50 | "execution_count": 3,
51 | "metadata": {},
52 | "outputs": [],
53 | "source": [
54 | "optimizer = BayesianOptimization(\n",
55 | " f=None,\n",
56 | " pbounds={'x': (-2, 2), 'y': (-3, 3)},\n",
57 | " verbose=2,\n",
58 | " random_state=1,\n",
59 | ")"
60 | ]
61 | },
62 | {
63 | "cell_type": "markdown",
64 | "metadata": {},
65 | "source": [
66 | "One extra ingredient we will need is an `UtilityFunction` instance. In case it is not clear why, take a look at the literature to understand better how this method works."
67 | ]
68 | },
69 | {
70 | "cell_type": "code",
71 | "execution_count": 4,
72 | "metadata": {},
73 | "outputs": [],
74 | "source": [
75 | "from bayes_opt import UtilityFunction\n",
76 | "\n",
77 | "utility = UtilityFunction(kind=\"ucb\", kappa=2.5, xi=0.0)"
78 | ]
79 | },
80 | {
81 | "cell_type": "markdown",
82 | "metadata": {},
83 | "source": [
84 | "The `suggest` method of our optimizer can be called at any time. What you get back is a suggestion for the next parameter combination the optimizer wants to probe.\n",
85 | "\n",
86 | "Notice that while the optimizer hasn't observed any points, the suggestions will be random. However, they will stop being random and improve in quality the more points are observed."
87 | ]
88 | },
89 | {
90 | "cell_type": "code",
91 | "execution_count": 5,
92 | "metadata": {},
93 | "outputs": [
94 | {
95 | "name": "stdout",
96 | "output_type": "stream",
97 | "text": [
98 | "Next point to probe is: {'y': 1.3219469606529488, 'x': -0.331911981189704}\n"
99 | ]
100 | }
101 | ],
102 | "source": [
103 | "next_point_to_probe = optimizer.suggest(utility)\n",
104 | "print(\"Next point to probe is:\", next_point_to_probe)"
105 | ]
106 | },
107 | {
108 | "cell_type": "markdown",
109 | "metadata": {},
110 | "source": [
111 | "You are now free to evaluate your function at the suggested point however/whenever you like."
112 | ]
113 | },
114 | {
115 | "cell_type": "code",
116 | "execution_count": 6,
117 | "metadata": {},
118 | "outputs": [
119 | {
120 | "name": "stdout",
121 | "output_type": "stream",
122 | "text": [
123 | "Found the target value to be: 0.7861845912690542\n"
124 | ]
125 | }
126 | ],
127 | "source": [
128 | "target = black_box_function(**next_point_to_probe)\n",
129 | "print(\"Found the target value to be:\", target)"
130 | ]
131 | },
132 | {
133 | "cell_type": "markdown",
134 | "metadata": {},
135 | "source": [
136 | "Last thing left to do is to tell the optimizer what target value was observed."
137 | ]
138 | },
139 | {
140 | "cell_type": "code",
141 | "execution_count": 7,
142 | "metadata": {},
143 | "outputs": [],
144 | "source": [
145 | "optimizer.register(\n",
146 | " params=next_point_to_probe,\n",
147 | " target=target,\n",
148 | ")"
149 | ]
150 | },
151 | {
152 | "cell_type": "markdown",
153 | "metadata": {},
154 | "source": [
155 | "### 1.1 The maximize loop\n",
156 | "\n",
157 | "And that's it. By repeating the steps above you recreate the internals of the `maximize` method. This should give you all the flexibility you need to log progress, hault execution, perform concurrent evaluations, etc."
158 | ]
159 | },
160 | {
161 | "cell_type": "code",
162 | "execution_count": 8,
163 | "metadata": {},
164 | "outputs": [
165 | {
166 | "name": "stdout",
167 | "output_type": "stream",
168 | "text": [
169 | "-19.0 {'y': -3.0, 'x': 2.0}\n",
170 | "-12.194801029414048 {'y': -2.412527795983739, 'x': -1.2447710918286998}\n",
171 | "0.6381713808008993 {'y': 1.4965397889559267, 'x': -0.3395244574146384}\n",
172 | "0.5052897389362041 {'y': 1.2837707069731576, 'x': -0.6435716330974743}\n",
173 | "0.9493808230928116 {'y': 1.2241444765020055, 'x': -0.019453291773639306}\n",
174 | "{'target': 0.9493808230928116, 'params': {'y': 1.2241444765020055, 'x': -0.019453291773639306}}\n"
175 | ]
176 | }
177 | ],
178 | "source": [
179 | "for _ in range(5):\n",
180 | " next_point = optimizer.suggest(utility)\n",
181 | " target = black_box_function(**next_point)\n",
182 | " optimizer.register(params=next_point, target=target)\n",
183 | " \n",
184 | " print(target, next_point)\n",
185 | "print(optimizer.max)"
186 | ]
187 | },
188 | {
189 | "cell_type": "markdown",
190 | "metadata": {},
191 | "source": [
192 | "## 2. Dealing with discrete parameters\n",
193 | "\n",
194 | "**There is no principled way of dealing with discrete parameters using this package.**\n",
195 | "\n",
196 | "Ok, now that we got that out of the way, how do you do it? You're bound to be in a situation where some of your function's parameters may only take on discrete values. Unfortunately, the nature of bayesian optimization with gaussian processes doesn't allow for an easy/intuitive way of dealing with discrete parameters - but that doesn't mean it is impossible. The example below showcases a simple, yet reasonably adequate, way to dealing with discrete parameters."
197 | ]
198 | },
199 | {
200 | "cell_type": "code",
201 | "execution_count": 8,
202 | "metadata": {},
203 | "outputs": [],
204 | "source": [
205 | "def func_with_discrete_params(x, y, d):\n",
206 | " # Simulate necessity of having d being discrete.\n",
207 | " assert type(d) == int\n",
208 | " \n",
209 | " return ((x + y + d) // (1 + d)) / (1 + (x + y) ** 2)"
210 | ]
211 | },
212 | {
213 | "cell_type": "code",
214 | "execution_count": 9,
215 | "metadata": {},
216 | "outputs": [],
217 | "source": [
218 | "def function_to_be_optimized(x, y, w):\n",
219 | " d = int(w)\n",
220 | " return func_with_discrete_params(x, y, d)"
221 | ]
222 | },
223 | {
224 | "cell_type": "code",
225 | "execution_count": 10,
226 | "metadata": {},
227 | "outputs": [],
228 | "source": [
229 | "optimizer = BayesianOptimization(\n",
230 | " f=function_to_be_optimized,\n",
231 | " pbounds={'x': (-10, 10), 'y': (-10, 10), 'w': (0, 5)},\n",
232 | " verbose=2,\n",
233 | " random_state=1,\n",
234 | ")"
235 | ]
236 | },
237 | {
238 | "cell_type": "code",
239 | "execution_count": 11,
240 | "metadata": {},
241 | "outputs": [
242 | {
243 | "name": "stdout",
244 | "output_type": "stream",
245 | "text": [
246 | "| iter | target | w | x | y |\n",
247 | "-------------------------------------------------------------\n",
248 | "| \u001b[0m 1 \u001b[0m | \u001b[0m-0.06199 \u001b[0m | \u001b[0m 2.085 \u001b[0m | \u001b[0m 4.406 \u001b[0m | \u001b[0m-9.998 \u001b[0m |\n",
249 | "| \u001b[95m 2 \u001b[0m | \u001b[95m-0.0344 \u001b[0m | \u001b[95m 1.512 \u001b[0m | \u001b[95m-7.065 \u001b[0m | \u001b[95m-8.153 \u001b[0m |\n",
250 | "| \u001b[0m 3 \u001b[0m | \u001b[0m-0.2177 \u001b[0m | \u001b[0m 0.9313 \u001b[0m | \u001b[0m-3.089 \u001b[0m | \u001b[0m-2.065 \u001b[0m |\n",
251 | "| \u001b[95m 4 \u001b[0m | \u001b[95m 0.1865 \u001b[0m | \u001b[95m 2.694 \u001b[0m | \u001b[95m-1.616 \u001b[0m | \u001b[95m 3.704 \u001b[0m |\n",
252 | "| \u001b[0m 5 \u001b[0m | \u001b[0m-0.2187 \u001b[0m | \u001b[0m 1.022 \u001b[0m | \u001b[0m 7.562 \u001b[0m | \u001b[0m-9.452 \u001b[0m |\n",
253 | "| \u001b[0m 6 \u001b[0m | \u001b[0m 0.009975\u001b[0m | \u001b[0m 5.0 \u001b[0m | \u001b[0m 10.0 \u001b[0m | \u001b[0m 10.0 \u001b[0m |\n",
254 | "| \u001b[0m 7 \u001b[0m | \u001b[0m 0.0 \u001b[0m | \u001b[0m 5.0 \u001b[0m | \u001b[0m-10.0 \u001b[0m | \u001b[0m 10.0 \u001b[0m |\n",
255 | "| \u001b[0m 8 \u001b[0m | \u001b[0m 0.09003 \u001b[0m | \u001b[0m 0.0 \u001b[0m | \u001b[0m 0.4916 \u001b[0m | \u001b[0m 10.0 \u001b[0m |\n",
256 | "| \u001b[0m 9 \u001b[0m | \u001b[0m-0.007481\u001b[0m | \u001b[0m 5.0 \u001b[0m | \u001b[0m-10.0 \u001b[0m | \u001b[0m-10.0 \u001b[0m |\n",
257 | "| \u001b[0m 10 \u001b[0m | \u001b[0m 0.01989 \u001b[0m | \u001b[0m 5.0 \u001b[0m | \u001b[0m-0.02203 \u001b[0m | \u001b[0m 10.0 \u001b[0m |\n",
258 | "| \u001b[0m 11 \u001b[0m | \u001b[0m 0.0189 \u001b[0m | \u001b[0m 5.0 \u001b[0m | \u001b[0m 10.0 \u001b[0m | \u001b[0m 0.238 \u001b[0m |\n",
259 | "| \u001b[0m 12 \u001b[0m | \u001b[0m-0.2149 \u001b[0m | \u001b[0m 0.0 \u001b[0m | \u001b[0m-10.0 \u001b[0m | \u001b[0m 5.282 \u001b[0m |\n",
260 | "| \u001b[0m 13 \u001b[0m | \u001b[0m 0.05995 \u001b[0m | \u001b[0m 0.0 \u001b[0m | \u001b[0m 10.0 \u001b[0m | \u001b[0m 5.786 \u001b[0m |\n",
261 | "| \u001b[0m 14 \u001b[0m | \u001b[0m-0.01299 \u001b[0m | \u001b[0m 5.0 \u001b[0m | \u001b[0m-2.367 \u001b[0m | \u001b[0m-10.0 \u001b[0m |\n",
262 | "| \u001b[0m 15 \u001b[0m | \u001b[0m 0.03637 \u001b[0m | \u001b[0m 5.0 \u001b[0m | \u001b[0m 3.773 \u001b[0m | \u001b[0m 3.575 \u001b[0m |\n",
263 | "| \u001b[0m 16 \u001b[0m | \u001b[0m-0.01214 \u001b[0m | \u001b[0m 5.0 \u001b[0m | \u001b[0m-10.0 \u001b[0m | \u001b[0m 0.9779 \u001b[0m |\n",
264 | "| \u001b[0m 17 \u001b[0m | \u001b[0m 0.0 \u001b[0m | \u001b[0m 5.0 \u001b[0m | \u001b[0m 10.0 \u001b[0m | \u001b[0m-10.0 \u001b[0m |\n",
265 | "| \u001b[0m 18 \u001b[0m | \u001b[0m 0.0 \u001b[0m | \u001b[0m 5.0 \u001b[0m | \u001b[0m-4.58 \u001b[0m | \u001b[0m 5.518 \u001b[0m |\n",
266 | "| \u001b[0m 19 \u001b[0m | \u001b[0m-0.04988 \u001b[0m | \u001b[0m 0.0 \u001b[0m | \u001b[0m-10.0 \u001b[0m | \u001b[0m-10.0 \u001b[0m |\n",
267 | "| \u001b[0m 20 \u001b[0m | \u001b[0m 0.1246 \u001b[0m | \u001b[0m 0.0 \u001b[0m | \u001b[0m 2.311 \u001b[0m | \u001b[0m 5.116 \u001b[0m |\n",
268 | "| \u001b[0m 21 \u001b[0m | \u001b[0m 0.04988 \u001b[0m | \u001b[0m 0.0 \u001b[0m | \u001b[0m 10.0 \u001b[0m | \u001b[0m 10.0 \u001b[0m |\n",
269 | "| \u001b[0m 22 \u001b[0m | \u001b[0m 0.04567 \u001b[0m | \u001b[0m 2.029 \u001b[0m | \u001b[0m 0.1434 \u001b[0m | \u001b[0m 6.398 \u001b[0m |\n",
270 | "| \u001b[0m 23 \u001b[0m | \u001b[0m 0.0 \u001b[0m | \u001b[0m 5.0 \u001b[0m | \u001b[0m 4.685 \u001b[0m | \u001b[0m-4.937 \u001b[0m |\n",
271 | "| \u001b[0m 24 \u001b[0m | \u001b[0m 0.06466 \u001b[0m | \u001b[0m 0.0 \u001b[0m | \u001b[0m 5.198 \u001b[0m | \u001b[0m 10.0 \u001b[0m |\n",
272 | "| \u001b[95m 25 \u001b[0m | \u001b[95m 0.3751 \u001b[0m | \u001b[95m 5.0 \u001b[0m | \u001b[95m-0.6795 \u001b[0m | \u001b[95m 1.97 \u001b[0m |\n",
273 | "| \u001b[0m 26 \u001b[0m | \u001b[0m 0.0 \u001b[0m | \u001b[0m 5.0 \u001b[0m | \u001b[0m-2.001 \u001b[0m | \u001b[0m-0.5515 \u001b[0m |\n",
274 | "| \u001b[0m 27 \u001b[0m | \u001b[0m 0.1072 \u001b[0m | \u001b[0m 0.0 \u001b[0m | \u001b[0m 10.0 \u001b[0m | \u001b[0m-1.419 \u001b[0m |\n",
275 | "| \u001b[0m 28 \u001b[0m | \u001b[0m-0.08895 \u001b[0m | \u001b[0m 0.0 \u001b[0m | \u001b[0m-2.048 \u001b[0m | \u001b[0m-10.0 \u001b[0m |\n",
276 | "| \u001b[0m 29 \u001b[0m | \u001b[0m 0.1907 \u001b[0m | \u001b[0m 0.0 \u001b[0m | \u001b[0m 3.994 \u001b[0m | \u001b[0m-0.1557 \u001b[0m |\n",
277 | "| \u001b[0m 30 \u001b[0m | \u001b[0m-0.0 \u001b[0m | \u001b[0m 0.0 \u001b[0m | \u001b[0m-10.0 \u001b[0m | \u001b[0m 10.0 \u001b[0m |\n",
278 | "=============================================================\n"
279 | ]
280 | }
281 | ],
282 | "source": [
283 | "optimizer.maximize(alpha=1e-3)"
284 | ]
285 | },
286 | {
287 | "cell_type": "markdown",
288 | "metadata": {},
289 | "source": [
290 | "## 3. Tuning the underlying Gaussian Process\n",
291 | "\n",
292 | "The bayesian optimization algorithm works by performing a gaussian process regression of the observed combination of parameters and their associated target values. The predicted parameter$\\rightarrow$target hyper-surface (and its uncertainty) is then used to guide the next best point to probe."
293 | ]
294 | },
295 | {
296 | "cell_type": "markdown",
297 | "metadata": {},
298 | "source": [
299 | "### 3.1 Passing parameter to the GP\n",
300 | "\n",
301 | "Depending on the problemn it could be beneficial to change the default parameters of the underlying GP. You can simply pass GP parameters to the maximize method directly as you can see below:"
302 | ]
303 | },
304 | {
305 | "cell_type": "code",
306 | "execution_count": 12,
307 | "metadata": {},
308 | "outputs": [
309 | {
310 | "name": "stdout",
311 | "output_type": "stream",
312 | "text": [
313 | "| iter | target | x | y |\n",
314 | "-------------------------------------------------\n",
315 | "| \u001b[0m 1 \u001b[0m | \u001b[0m 0.7862 \u001b[0m | \u001b[0m-0.3319 \u001b[0m | \u001b[0m 1.322 \u001b[0m |\n",
316 | "| \u001b[0m 2 \u001b[0m | \u001b[0m-18.96 \u001b[0m | \u001b[0m 1.993 \u001b[0m | \u001b[0m-2.998 \u001b[0m |\n",
317 | "| \u001b[0m 3 \u001b[0m | \u001b[0m 0.7858 \u001b[0m | \u001b[0m-0.3333 \u001b[0m | \u001b[0m 1.321 \u001b[0m |\n",
318 | "| \u001b[0m 4 \u001b[0m | \u001b[0m 0.5787 \u001b[0m | \u001b[0m-0.429 \u001b[0m | \u001b[0m 1.487 \u001b[0m |\n",
319 | "| \u001b[0m 5 \u001b[0m | \u001b[0m 0.7798 \u001b[0m | \u001b[0m 0.02543 \u001b[0m | \u001b[0m 1.469 \u001b[0m |\n",
320 | "| \u001b[95m 6 \u001b[0m | \u001b[95m 0.9779 \u001b[0m | \u001b[95m 0.1301 \u001b[0m | \u001b[95m 0.9282 \u001b[0m |\n",
321 | "=================================================\n"
322 | ]
323 | }
324 | ],
325 | "source": [
326 | "optimizer = BayesianOptimization(\n",
327 | " f=black_box_function,\n",
328 | " pbounds={'x': (-2, 2), 'y': (-3, 3)},\n",
329 | " verbose=2,\n",
330 | " random_state=1,\n",
331 | ")\n",
332 | "optimizer.maximize(\n",
333 | " init_points=1,\n",
334 | " n_iter=5,\n",
335 | " # What follows are GP regressor parameters\n",
336 | " alpha=1e-3,\n",
337 | " n_restarts_optimizer=5\n",
338 | ")"
339 | ]
340 | },
341 | {
342 | "cell_type": "markdown",
343 | "metadata": {},
344 | "source": [
345 | "Another alternative, specially useful if you're calling `maximize` multiple times or optimizing outside the `maximize` loop, is to call the `set_gp_params` method."
346 | ]
347 | },
348 | {
349 | "cell_type": "code",
350 | "execution_count": 13,
351 | "metadata": {},
352 | "outputs": [],
353 | "source": [
354 | "optimizer.set_gp_params(normalize_y=True)"
355 | ]
356 | },
357 | {
358 | "cell_type": "markdown",
359 | "metadata": {},
360 | "source": [
361 | "### 3.2 Tuning the `alpha` parameter\n",
362 | "\n",
363 | "When dealing with functions with discrete parameters,or particularly erratic target space it might be beneficial to increase the value of the `alpha` parameter. This parameters controls how much noise the GP can handle, so increase it whenever you think that extra flexibility is needed."
364 | ]
365 | },
366 | {
367 | "cell_type": "markdown",
368 | "metadata": {},
369 | "source": [
370 | "### 3.3 Changing kernels\n",
371 | "\n",
372 | "By default this package uses the Mattern 2.5 kernel. Depending on your use case you may find that tunning the GP kernel could be beneficial. You're on your own here since these are very specific solutions to very specific problems."
373 | ]
374 | },
375 | {
376 | "cell_type": "markdown",
377 | "metadata": {},
378 | "source": [
379 | "## Observers Continued\n",
380 | "\n",
381 | "Observers are objects that subscribe and listen to particular events fired by the `BayesianOptimization` object. \n",
382 | "\n",
383 | "When an event gets fired a callback function is called with the event and the `BayesianOptimization` instance passed as parameters. The callback can be specified at the time of subscription. If none is given it will look for an `update` method from the observer."
384 | ]
385 | },
386 | {
387 | "cell_type": "code",
388 | "execution_count": 14,
389 | "metadata": {},
390 | "outputs": [],
391 | "source": [
392 | "from bayes_opt.event import DEFAULT_EVENTS, Events"
393 | ]
394 | },
395 | {
396 | "cell_type": "code",
397 | "execution_count": 15,
398 | "metadata": {},
399 | "outputs": [],
400 | "source": [
401 | "optimizer = BayesianOptimization(\n",
402 | " f=black_box_function,\n",
403 | " pbounds={'x': (-2, 2), 'y': (-3, 3)},\n",
404 | " verbose=2,\n",
405 | " random_state=1,\n",
406 | ")"
407 | ]
408 | },
409 | {
410 | "cell_type": "code",
411 | "execution_count": 16,
412 | "metadata": {},
413 | "outputs": [],
414 | "source": [
415 | "class BasicObserver:\n",
416 | " def update(self, event, instance):\n",
417 | " \"\"\"Does whatever you want with the event and `BayesianOptimization` instance.\"\"\"\n",
418 | " print(\"Event `{}` was observed\".format(event))"
419 | ]
420 | },
421 | {
422 | "cell_type": "code",
423 | "execution_count": 17,
424 | "metadata": {},
425 | "outputs": [],
426 | "source": [
427 | "my_observer = BasicObserver()\n",
428 | "\n",
429 | "optimizer.subscribe(\n",
430 | " event=Events.OPTIMIZATION_STEP,\n",
431 | " subscriber=my_observer,\n",
432 | " callback=None, # Will use the `update` method as callback\n",
433 | ")"
434 | ]
435 | },
436 | {
437 | "cell_type": "markdown",
438 | "metadata": {},
439 | "source": [
440 | "Alternatively you have the option to pass a completely different callback."
441 | ]
442 | },
443 | {
444 | "cell_type": "code",
445 | "execution_count": 18,
446 | "metadata": {},
447 | "outputs": [],
448 | "source": [
449 | "def my_callback(event, instance):\n",
450 | " print(\"Go nuts here!\")\n",
451 | "\n",
452 | "optimizer.subscribe(\n",
453 | " event=Events.OPTIMIZATION_START,\n",
454 | " subscriber=\"Any hashable object\",\n",
455 | " callback=my_callback,\n",
456 | ")"
457 | ]
458 | },
459 | {
460 | "cell_type": "code",
461 | "execution_count": 19,
462 | "metadata": {},
463 | "outputs": [
464 | {
465 | "name": "stdout",
466 | "output_type": "stream",
467 | "text": [
468 | "Go nuts here!\n",
469 | "Event `optimization:step` was observed\n",
470 | "Event `optimization:step` was observed\n",
471 | "Event `optimization:step` was observed\n"
472 | ]
473 | }
474 | ],
475 | "source": [
476 | "optimizer.maximize(init_points=1, n_iter=2)"
477 | ]
478 | },
479 | {
480 | "cell_type": "markdown",
481 | "metadata": {},
482 | "source": [
483 | "For a list of all default events you can checkout `DEFAULT_EVENTS`"
484 | ]
485 | },
486 | {
487 | "cell_type": "code",
488 | "execution_count": 20,
489 | "metadata": {},
490 | "outputs": [
491 | {
492 | "data": {
493 | "text/plain": [
494 | "['optimization:start', 'optimization:step', 'optimization:end']"
495 | ]
496 | },
497 | "execution_count": 20,
498 | "metadata": {},
499 | "output_type": "execute_result"
500 | }
501 | ],
502 | "source": [
503 | "DEFAULT_EVENTS"
504 | ]
505 | }
506 | ],
507 | "metadata": {
508 | "kernelspec": {
509 | "display_name": "Python 3",
510 | "language": "python",
511 | "name": "python3"
512 | },
513 | "language_info": {
514 | "codemirror_mode": {
515 | "name": "ipython",
516 | "version": 3
517 | },
518 | "file_extension": ".py",
519 | "mimetype": "text/x-python",
520 | "name": "python",
521 | "nbconvert_exporter": "python",
522 | "pygments_lexer": "ipython3",
523 | "version": "3.5.2"
524 | }
525 | },
526 | "nbformat": 4,
527 | "nbformat_minor": 2
528 | }
529 |
--------------------------------------------------------------------------------
/examples/async_optimization.py:
--------------------------------------------------------------------------------
1 | import time
2 | import random
3 |
4 | from bayes_opt import BayesianOptimization
5 | from bayes_opt.util import UtilityFunction, Colours
6 |
7 | import asyncio
8 | import threading
9 |
10 | try:
11 | import json
12 | import tornado.ioloop
13 | import tornado.httpserver
14 | from tornado.web import RequestHandler
15 | import requests
16 | except ImportError:
17 | raise ImportError(
18 | "In order to run this example you must have the libraries: " +
19 | "`tornado` and `requests` installed."
20 | )
21 |
22 |
23 | def black_box_function(x, y):
24 | """Function with unknown internals we wish to maximize.
25 |
26 | This is just serving as an example, however, for all intents and
27 | purposes think of the internals of this function, i.e.: the process
28 | which generates its outputs values, as unknown.
29 | """
30 | time.sleep(random.randint(1, 7))
31 | return -x ** 2 - (y - 1) ** 2 + 1
32 |
33 |
34 | class BayesianOptimizationHandler(RequestHandler):
35 | """Basic functionality for NLP handlers."""
36 | _bo = BayesianOptimization(
37 | f=black_box_function,
38 | pbounds={"x": (-4, 4), "y": (-3, 3)}
39 | )
40 | _uf = UtilityFunction(kind="ucb", kappa=3, xi=1)
41 |
42 | def post(self):
43 | """Deal with incoming requests."""
44 | body = tornado.escape.json_decode(self.request.body)
45 |
46 | try:
47 | self._bo.register(
48 | params=body["params"],
49 | target=body["target"],
50 | )
51 | print("BO has registered: {} points.".format(len(self._bo.space)), end="\n\n")
52 | except KeyError:
53 | pass
54 | finally:
55 | suggested_params = self._bo.suggest(self._uf)
56 |
57 | self.write(json.dumps(suggested_params))
58 |
59 |
60 | def run_optimization_app():
61 | asyncio.set_event_loop(asyncio.new_event_loop())
62 | handlers = [
63 | (r"/bayesian_optimization", BayesianOptimizationHandler),
64 | ]
65 | server = tornado.httpserver.HTTPServer(
66 | tornado.web.Application(handlers)
67 | )
68 | server.listen(9009)
69 | tornado.ioloop.IOLoop.instance().start()
70 |
71 |
72 | def run_optimizer():
73 | global optimizers_config
74 | config = optimizers_config.pop()
75 | name = config["name"]
76 | colour = config["colour"]
77 |
78 | register_data = {}
79 | max_target = None
80 | for _ in range(10):
81 | status = name + " wants to register: {}.\n".format(register_data)
82 |
83 | resp = requests.post(
84 | url="http://localhost:9009/bayesian_optimization",
85 | json=register_data,
86 | ).json()
87 | target = black_box_function(**resp)
88 |
89 | register_data = {
90 | "params": resp,
91 | "target": target,
92 | }
93 |
94 | if max_target is None or target > max_target:
95 | max_target = target
96 |
97 | status += name + " got {} as target.\n".format(target)
98 | status += name + " will to register next: {}.\n".format(register_data)
99 | print(colour(status), end="\n")
100 |
101 | global results
102 | results.append((name, max_target))
103 | print(colour(name + " is done!"), end="\n\n")
104 |
105 |
106 | if __name__ == "__main__":
107 | ioloop = tornado.ioloop.IOLoop.instance()
108 | optimizers_config = [
109 | {"name": "optimizer 1", "colour": Colours.red},
110 | {"name": "optimizer 2", "colour": Colours.green},
111 | {"name": "optimizer 3", "colour": Colours.blue},
112 | ]
113 |
114 | app_thread = threading.Thread(target=run_optimization_app)
115 | app_thread.daemon = True
116 | app_thread.start()
117 |
118 | targets = (
119 | run_optimizer,
120 | run_optimizer,
121 | run_optimizer
122 | )
123 | optimizer_threads = []
124 | for target in targets:
125 | optimizer_threads.append(threading.Thread(target=target))
126 | optimizer_threads[-1].daemon = True
127 | optimizer_threads[-1].start()
128 |
129 | results = []
130 | for optimizer_thread in optimizer_threads:
131 | optimizer_thread.join()
132 |
133 | for result in results:
134 | print(result[0], "found a maximum value of: {}".format(result[1]))
135 |
136 | ioloop.stop()
137 |
--------------------------------------------------------------------------------
/examples/basic-tour.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Basic tour of the Bayesian Optimization package\n",
8 | "\n",
9 | "This is a constrained global optimization package built upon bayesian inference and gaussian process, that attempts to find the maximum value of an unknown function in as few iterations as possible. This technique is particularly suited for optimization of high cost functions, situations where the balance between exploration and exploitation is important.\n",
10 | "\n",
11 | "Bayesian optimization works by constructing a posterior distribution of functions (gaussian process) that best describes the function you want to optimize. As the number of observations grows, the posterior distribution improves, and the algorithm becomes more certain of which regions in parameter space are worth exploring and which are not, as seen in the picture below.\n",
12 | "\n",
13 | "As you iterate over and over, the algorithm balances its needs of exploration and exploitation taking into account what it knows about the target function. At each step a Gaussian Process is fitted to the known samples (points previously explored), and the posterior distribution, combined with a exploration strategy (such as UCB (Upper Confidence Bound), or EI (Expected Improvement)), are used to determine the next point that should be explored (see the gif below).\n",
14 | "\n",
15 | "This process is designed to minimize the number of steps required to find a combination of parameters that are close to the optimal combination. To do so, this method uses a proxy optimization problem (finding the maximum of the acquisition function) that, albeit still a hard problem, is cheaper (in the computational sense) and common tools can be employed. Therefore Bayesian Optimization is most adequate for situations where sampling the function to be optimized is a very expensive endeavor. See the references for a proper discussion of this method."
16 | ]
17 | },
18 | {
19 | "cell_type": "markdown",
20 | "metadata": {},
21 | "source": [
22 | "## 1. Specifying the function to be optimized\n",
23 | "\n",
24 | "This is a function optimization package, therefore the first and most important ingreedient is, of course, the function to be optimized.\n",
25 | "\n",
26 | "**DISCLAIMER:** We know exactly how the output of the function below depends on its parameter. Obviously this is just an example, and you shouldn't expect to know it in a real scenario. However, it should be clear that you don't need to. All you need in order to use this package (and more generally, this technique) is a function `f` that takes a known set of parameters and outputs a real number."
27 | ]
28 | },
29 | {
30 | "cell_type": "code",
31 | "execution_count": 1,
32 | "metadata": {},
33 | "outputs": [],
34 | "source": [
35 | "def black_box_function(x, y):\n",
36 | " \"\"\"Function with unknown internals we wish to maximize.\n",
37 | "\n",
38 | " This is just serving as an example, for all intents and\n",
39 | " purposes think of the internals of this function, i.e.: the process\n",
40 | " which generates its output values, as unknown.\n",
41 | " \"\"\"\n",
42 | " return -x ** 2 - (y - 1) ** 2 + 1"
43 | ]
44 | },
45 | {
46 | "cell_type": "markdown",
47 | "metadata": {},
48 | "source": [
49 | "## 2. Getting Started\n",
50 | "\n",
51 | "All we need to get started is to instanciate a `BayesianOptimization` object specifying a function to be optimized `f`, and its parameters with their corresponding bounds, `pbounds`. This is a constrained optimization technique, so you must specify the minimum and maximum values that can be probed for each parameter in order for it to work"
52 | ]
53 | },
54 | {
55 | "cell_type": "code",
56 | "execution_count": 2,
57 | "metadata": {},
58 | "outputs": [],
59 | "source": [
60 | "from bayes_opt import BayesianOptimization"
61 | ]
62 | },
63 | {
64 | "cell_type": "code",
65 | "execution_count": 3,
66 | "metadata": {},
67 | "outputs": [],
68 | "source": [
69 | "# Bounded region of parameter space\n",
70 | "pbounds = {'x': (2, 4), 'y': (-3, 3)}"
71 | ]
72 | },
73 | {
74 | "cell_type": "code",
75 | "execution_count": 4,
76 | "metadata": {},
77 | "outputs": [],
78 | "source": [
79 | "optimizer = BayesianOptimization(\n",
80 | " f=black_box_function,\n",
81 | " pbounds=pbounds,\n",
82 | " verbose=2, # verbose = 1 prints only when a maximum is observed, verbose = 0 is silent\n",
83 | " random_state=1,\n",
84 | ")"
85 | ]
86 | },
87 | {
88 | "cell_type": "markdown",
89 | "metadata": {},
90 | "source": [
91 | "The BayesianOptimization object will work out of the box without much tuning needed. The main method you should be aware of is `maximize`, which does exactly what you think it does.\n",
92 | "\n",
93 | "There are many parameters you can pass to maximize, nonetheless, the most important ones are:\n",
94 | "- `n_iter`: How many steps of bayesian optimization you want to perform. The more steps the more likely to find a good maximum you are.\n",
95 | "- `init_points`: How many steps of **random** exploration you want to perform. Random exploration can help by diversifying the exploration space."
96 | ]
97 | },
98 | {
99 | "cell_type": "code",
100 | "execution_count": 5,
101 | "metadata": {},
102 | "outputs": [
103 | {
104 | "name": "stdout",
105 | "output_type": "stream",
106 | "text": [
107 | "| iter | target | x | y |\n",
108 | "-------------------------------------------------\n",
109 | "| \u001b[0m 1 \u001b[0m | \u001b[0m-7.135 \u001b[0m | \u001b[0m 2.834 \u001b[0m | \u001b[0m 1.322 \u001b[0m |\n",
110 | "| \u001b[0m 2 \u001b[0m | \u001b[0m-7.78 \u001b[0m | \u001b[0m 2.0 \u001b[0m | \u001b[0m-1.186 \u001b[0m |\n",
111 | "| \u001b[0m 3 \u001b[0m | \u001b[0m-19.0 \u001b[0m | \u001b[0m 4.0 \u001b[0m | \u001b[0m 3.0 \u001b[0m |\n",
112 | "| \u001b[0m 4 \u001b[0m | \u001b[0m-16.3 \u001b[0m | \u001b[0m 2.378 \u001b[0m | \u001b[0m-2.413 \u001b[0m |\n",
113 | "| \u001b[95m 5 \u001b[0m | \u001b[95m-4.441 \u001b[0m | \u001b[95m 2.105 \u001b[0m | \u001b[95m-0.005822\u001b[0m |\n",
114 | "=================================================\n"
115 | ]
116 | }
117 | ],
118 | "source": [
119 | "optimizer.maximize(\n",
120 | " init_points=2,\n",
121 | " n_iter=3,\n",
122 | ")"
123 | ]
124 | },
125 | {
126 | "cell_type": "markdown",
127 | "metadata": {},
128 | "source": [
129 | "The best combination of parameters and target value found can be accessed via the property `bo.max`."
130 | ]
131 | },
132 | {
133 | "cell_type": "code",
134 | "execution_count": 6,
135 | "metadata": {},
136 | "outputs": [
137 | {
138 | "name": "stdout",
139 | "output_type": "stream",
140 | "text": [
141 | "{'target': -4.441293113411222, 'params': {'y': -0.005822117636089974, 'x': 2.104665051994087}}\n"
142 | ]
143 | }
144 | ],
145 | "source": [
146 | "print(optimizer.max)"
147 | ]
148 | },
149 | {
150 | "cell_type": "markdown",
151 | "metadata": {},
152 | "source": [
153 | "While the list of all parameters probed and their corresponding target values is available via the property `bo.res`."
154 | ]
155 | },
156 | {
157 | "cell_type": "code",
158 | "execution_count": 7,
159 | "metadata": {},
160 | "outputs": [
161 | {
162 | "name": "stdout",
163 | "output_type": "stream",
164 | "text": [
165 | "Iteration 0: \n",
166 | "\t{'target': -7.135455292718879, 'params': {'y': 1.3219469606529488, 'x': 2.8340440094051482}}\n",
167 | "Iteration 1: \n",
168 | "\t{'target': -7.779531005607566, 'params': {'y': -1.1860045642089614, 'x': 2.0002287496346898}}\n",
169 | "Iteration 2: \n",
170 | "\t{'target': -19.0, 'params': {'y': 3.0, 'x': 4.0}}\n",
171 | "Iteration 3: \n",
172 | "\t{'target': -16.29839645063864, 'params': {'y': -2.412527795983739, 'x': 2.3776144540856503}}\n",
173 | "Iteration 4: \n",
174 | "\t{'target': -4.441293113411222, 'params': {'y': -0.005822117636089974, 'x': 2.104665051994087}}\n"
175 | ]
176 | }
177 | ],
178 | "source": [
179 | "for i, res in enumerate(optimizer.res):\n",
180 | " print(\"Iteration {}: \\n\\t{}\".format(i, res))"
181 | ]
182 | },
183 | {
184 | "cell_type": "markdown",
185 | "metadata": {},
186 | "source": [
187 | "### 2.1 Changing bounds\n",
188 | "\n",
189 | "During the optimization process you may realize the bounds chosen for some parameters are not adequate. For these situations you can invoke the method `set_bounds` to alter them. You can pass any combination of **existing** parameters and their associated new bounds."
190 | ]
191 | },
192 | {
193 | "cell_type": "code",
194 | "execution_count": 8,
195 | "metadata": {},
196 | "outputs": [],
197 | "source": [
198 | "optimizer.set_bounds(new_bounds={\"x\": (-2, 3)})"
199 | ]
200 | },
201 | {
202 | "cell_type": "code",
203 | "execution_count": 9,
204 | "metadata": {},
205 | "outputs": [
206 | {
207 | "name": "stdout",
208 | "output_type": "stream",
209 | "text": [
210 | "| iter | target | x | y |\n",
211 | "-------------------------------------------------\n",
212 | "| \u001b[0m 6 \u001b[0m | \u001b[0m-5.145 \u001b[0m | \u001b[0m 2.115 \u001b[0m | \u001b[0m-0.2924 \u001b[0m |\n",
213 | "| \u001b[0m 7 \u001b[0m | \u001b[0m-5.379 \u001b[0m | \u001b[0m 2.337 \u001b[0m | \u001b[0m 0.04124 \u001b[0m |\n",
214 | "| \u001b[95m 8 \u001b[0m | \u001b[95m-3.581 \u001b[0m | \u001b[95m 1.874 \u001b[0m | \u001b[95m-0.03428 \u001b[0m |\n",
215 | "| \u001b[95m 9 \u001b[0m | \u001b[95m-2.624 \u001b[0m | \u001b[95m 1.702 \u001b[0m | \u001b[95m 0.1472 \u001b[0m |\n",
216 | "| \u001b[95m 10 \u001b[0m | \u001b[95m-1.762 \u001b[0m | \u001b[95m 1.442 \u001b[0m | \u001b[95m 0.1735 \u001b[0m |\n",
217 | "=================================================\n"
218 | ]
219 | }
220 | ],
221 | "source": [
222 | "optimizer.maximize(\n",
223 | " init_points=0,\n",
224 | " n_iter=5,\n",
225 | ")"
226 | ]
227 | },
228 | {
229 | "cell_type": "markdown",
230 | "metadata": {},
231 | "source": [
232 | "## 3. Guiding the optimization\n",
233 | "\n",
234 | "It is often the case that we have an idea of regions of the parameter space where the maximum of our function might lie. For these situations the `BayesianOptimization` object allows the user to specify specific points to be probed. By default these will be explored lazily (`lazy=True`), meaning these points will be evaluated only the next time you call `maximize`. This probing process happens before the gaussian process takes over.\n",
235 | "\n",
236 | "Parameters can be passed as dictionaries such as below:"
237 | ]
238 | },
239 | {
240 | "cell_type": "code",
241 | "execution_count": 10,
242 | "metadata": {},
243 | "outputs": [],
244 | "source": [
245 | "optimizer.probe(\n",
246 | " params={\"x\": 0.5, \"y\": 0.7},\n",
247 | " lazy=True,\n",
248 | ")"
249 | ]
250 | },
251 | {
252 | "cell_type": "markdown",
253 | "metadata": {},
254 | "source": [
255 | "Or as an iterable. Beware that the order has to be alphabetical. You can usee `optimizer.space.keys` for guidance"
256 | ]
257 | },
258 | {
259 | "cell_type": "code",
260 | "execution_count": 11,
261 | "metadata": {},
262 | "outputs": [
263 | {
264 | "name": "stdout",
265 | "output_type": "stream",
266 | "text": [
267 | "['x', 'y']\n"
268 | ]
269 | }
270 | ],
271 | "source": [
272 | "print(optimizer.space.keys)"
273 | ]
274 | },
275 | {
276 | "cell_type": "code",
277 | "execution_count": 12,
278 | "metadata": {},
279 | "outputs": [],
280 | "source": [
281 | "optimizer.probe(\n",
282 | " params=[-0.3, 0.1],\n",
283 | " lazy=True,\n",
284 | ")"
285 | ]
286 | },
287 | {
288 | "cell_type": "code",
289 | "execution_count": 13,
290 | "metadata": {},
291 | "outputs": [
292 | {
293 | "name": "stdout",
294 | "output_type": "stream",
295 | "text": [
296 | "| iter | target | x | y |\n",
297 | "-------------------------------------------------\n",
298 | "| \u001b[95m 11 \u001b[0m | \u001b[95m 0.66 \u001b[0m | \u001b[95m 0.5 \u001b[0m | \u001b[95m 0.7 \u001b[0m |\n",
299 | "| \u001b[0m 12 \u001b[0m | \u001b[0m 0.1 \u001b[0m | \u001b[0m-0.3 \u001b[0m | \u001b[0m 0.1 \u001b[0m |\n",
300 | "=================================================\n"
301 | ]
302 | }
303 | ],
304 | "source": [
305 | "optimizer.maximize(init_points=0, n_iter=0)"
306 | ]
307 | },
308 | {
309 | "cell_type": "markdown",
310 | "metadata": {},
311 | "source": [
312 | "## 4. Saving, loading and restarting\n",
313 | "\n",
314 | "By default you can follow the progress of your optimization by setting `verbose>0` when instanciating the `BayesianOptimization` object. If you need more control over logging/alerting you will need to use an observer. For more information about observers checkout the advanced tour notebook. Here we will only see how to use the native `JSONLogger` object to save to and load progress from files.\n",
315 | "\n",
316 | "### 4.1 Saving progress"
317 | ]
318 | },
319 | {
320 | "cell_type": "code",
321 | "execution_count": 14,
322 | "metadata": {},
323 | "outputs": [],
324 | "source": [
325 | "from bayes_opt.logger import JSONLogger\n",
326 | "from bayes_opt.event import Events"
327 | ]
328 | },
329 | {
330 | "cell_type": "markdown",
331 | "metadata": {},
332 | "source": [
333 | "The observer paradigm works by:\n",
334 | "1. Instantiating an observer object.\n",
335 | "2. Tying the observer object to a particular event fired by an optimizer.\n",
336 | "\n",
337 | "The `BayesianOptimization` object fires a number of internal events during optimization, in particular, everytime it probes the function and obtains a new parameter-target combination it will fire an `Events.OPTIMIZATION_STEP` event, which our logger will listen to.\n",
338 | "\n",
339 | "**Caveat:** The logger will not look back at previously probed points."
340 | ]
341 | },
342 | {
343 | "cell_type": "code",
344 | "execution_count": 15,
345 | "metadata": {},
346 | "outputs": [],
347 | "source": [
348 | "logger = JSONLogger(path=\"./logs.json\")\n",
349 | "optimizer.subscribe(Events.OPTIMIZATION_STEP, logger)"
350 | ]
351 | },
352 | {
353 | "cell_type": "code",
354 | "execution_count": 16,
355 | "metadata": {},
356 | "outputs": [
357 | {
358 | "name": "stdout",
359 | "output_type": "stream",
360 | "text": [
361 | "| iter | target | x | y |\n",
362 | "-------------------------------------------------\n",
363 | "| \u001b[0m 13 \u001b[0m | \u001b[0m-12.48 \u001b[0m | \u001b[0m-1.266 \u001b[0m | \u001b[0m-2.446 \u001b[0m |\n",
364 | "| \u001b[0m 14 \u001b[0m | \u001b[0m-3.854 \u001b[0m | \u001b[0m-1.069 \u001b[0m | \u001b[0m-0.9266 \u001b[0m |\n",
365 | "| \u001b[0m 15 \u001b[0m | \u001b[0m 0.3932 \u001b[0m | \u001b[0m 0.3099 \u001b[0m | \u001b[0m 0.2853 \u001b[0m |\n",
366 | "| \u001b[95m 16 \u001b[0m | \u001b[95m 0.8768 \u001b[0m | \u001b[95m 0.02197 \u001b[0m | \u001b[95m 0.6497 \u001b[0m |\n",
367 | "| \u001b[95m 17 \u001b[0m | \u001b[95m 0.9446 \u001b[0m | \u001b[95m 0.198 \u001b[0m | \u001b[95m 0.8727 \u001b[0m |\n",
368 | "=================================================\n"
369 | ]
370 | }
371 | ],
372 | "source": [
373 | "optimizer.maximize(\n",
374 | " init_points=2,\n",
375 | " n_iter=3,\n",
376 | ")"
377 | ]
378 | },
379 | {
380 | "cell_type": "markdown",
381 | "metadata": {},
382 | "source": [
383 | "### 4.2 Loading progress\n",
384 | "\n",
385 | "Naturally, if you stored progress you will be able to load that onto a new instance of `BayesianOptimization`. The easiest way to do it is by invoking the `load_logs` function, from the `util` submodule."
386 | ]
387 | },
388 | {
389 | "cell_type": "code",
390 | "execution_count": 17,
391 | "metadata": {},
392 | "outputs": [],
393 | "source": [
394 | "from bayes_opt.util import load_logs"
395 | ]
396 | },
397 | {
398 | "cell_type": "code",
399 | "execution_count": 18,
400 | "metadata": {},
401 | "outputs": [
402 | {
403 | "name": "stdout",
404 | "output_type": "stream",
405 | "text": [
406 | "0\n"
407 | ]
408 | }
409 | ],
410 | "source": [
411 | "new_optimizer = BayesianOptimization(\n",
412 | " f=black_box_function,\n",
413 | " pbounds={\"x\": (-2, 2), \"y\": (-2, 2)},\n",
414 | " verbose=2,\n",
415 | " random_state=7,\n",
416 | ")\n",
417 | "print(len(new_optimizer.space))"
418 | ]
419 | },
420 | {
421 | "cell_type": "code",
422 | "execution_count": 19,
423 | "metadata": {},
424 | "outputs": [],
425 | "source": [
426 | "load_logs(new_optimizer, logs=[\"./logs.json\"]);"
427 | ]
428 | },
429 | {
430 | "cell_type": "code",
431 | "execution_count": 20,
432 | "metadata": {},
433 | "outputs": [
434 | {
435 | "name": "stdout",
436 | "output_type": "stream",
437 | "text": [
438 | "New optimizer is now aware of 5 points.\n"
439 | ]
440 | }
441 | ],
442 | "source": [
443 | "print(\"New optimizer is now aware of {} points.\".format(len(new_optimizer.space)))"
444 | ]
445 | },
446 | {
447 | "cell_type": "code",
448 | "execution_count": 21,
449 | "metadata": {},
450 | "outputs": [
451 | {
452 | "name": "stdout",
453 | "output_type": "stream",
454 | "text": [
455 | "| iter | target | x | y |\n",
456 | "-------------------------------------------------\n",
457 | "| \u001b[0m 1 \u001b[0m | \u001b[0m 0.6131 \u001b[0m | \u001b[0m 0.5571 \u001b[0m | \u001b[0m 0.7233 \u001b[0m |\n",
458 | "| \u001b[0m 2 \u001b[0m | \u001b[0m 0.8609 \u001b[0m | \u001b[0m-0.3295 \u001b[0m | \u001b[0m 1.175 \u001b[0m |\n",
459 | "| \u001b[0m 3 \u001b[0m | \u001b[0m 0.3761 \u001b[0m | \u001b[0m 0.2406 \u001b[0m | \u001b[0m 1.752 \u001b[0m |\n",
460 | "| \u001b[0m 4 \u001b[0m | \u001b[0m-0.7845 \u001b[0m | \u001b[0m-0.8972 \u001b[0m | \u001b[0m 1.99 \u001b[0m |\n",
461 | "| \u001b[0m 5 \u001b[0m | \u001b[0m 0.1401 \u001b[0m | \u001b[0m-0.8733 \u001b[0m | \u001b[0m 0.6882 \u001b[0m |\n",
462 | "| \u001b[0m 6 \u001b[0m | \u001b[0m-1.798 \u001b[0m | \u001b[0m 1.545 \u001b[0m | \u001b[0m 1.642 \u001b[0m |\n",
463 | "| \u001b[0m 7 \u001b[0m | \u001b[0m 0.9331 \u001b[0m | \u001b[0m 0.05549 \u001b[0m | \u001b[0m 1.253 \u001b[0m |\n",
464 | "| \u001b[0m 8 \u001b[0m | \u001b[0m-5.369 \u001b[0m | \u001b[0m 2.0 \u001b[0m | \u001b[0m-0.5391 \u001b[0m |\n",
465 | "| \u001b[95m 9 \u001b[0m | \u001b[95m 0.97 \u001b[0m | \u001b[95m-0.1587 \u001b[0m | \u001b[95m 0.9305 \u001b[0m |\n",
466 | "| \u001b[0m 10 \u001b[0m | \u001b[0m 0.7718 \u001b[0m | \u001b[0m 0.4293 \u001b[0m | \u001b[0m 1.21 \u001b[0m |\n",
467 | "=================================================\n"
468 | ]
469 | }
470 | ],
471 | "source": [
472 | "new_optimizer.maximize(\n",
473 | " init_points=0,\n",
474 | " n_iter=10,\n",
475 | ")"
476 | ]
477 | },
478 | {
479 | "cell_type": "markdown",
480 | "metadata": {},
481 | "source": [
482 | "## Next Steps\n",
483 | "\n",
484 | "This tour should be enough to cover most usage scenarios of this package. If, however, you feel like you need to know more, please checkout the `advanced-tour` notebook. There you will be able to find other, more advanced features of this package that could be what you're looking for. Also, browse the examples folder for implementation tips and ideas."
485 | ]
486 | }
487 | ],
488 | "metadata": {
489 | "kernelspec": {
490 | "display_name": "Python 3",
491 | "language": "python",
492 | "name": "python3"
493 | },
494 | "language_info": {
495 | "codemirror_mode": {
496 | "name": "ipython",
497 | "version": 3
498 | },
499 | "file_extension": ".py",
500 | "mimetype": "text/x-python",
501 | "name": "python",
502 | "nbconvert_exporter": "python",
503 | "pygments_lexer": "ipython3",
504 | "version": "3.5.2"
505 | }
506 | },
507 | "nbformat": 4,
508 | "nbformat_minor": 2
509 | }
510 |
--------------------------------------------------------------------------------
/examples/bayesian_optimization.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mit-biomimetics/BayesianOptimization/91441fe4002fb6ebdb4aa5e33826230d8df560d0/examples/bayesian_optimization.gif
--------------------------------------------------------------------------------
/examples/bo_example.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mit-biomimetics/BayesianOptimization/91441fe4002fb6ebdb4aa5e33826230d8df560d0/examples/bo_example.png
--------------------------------------------------------------------------------
/examples/func.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mit-biomimetics/BayesianOptimization/91441fe4002fb6ebdb4aa5e33826230d8df560d0/examples/func.png
--------------------------------------------------------------------------------
/examples/sdr.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mit-biomimetics/BayesianOptimization/91441fe4002fb6ebdb4aa5e33826230d8df560d0/examples/sdr.png
--------------------------------------------------------------------------------
/examples/sklearn_example.py:
--------------------------------------------------------------------------------
1 | from sklearn.datasets import make_classification
2 | from sklearn.model_selection import cross_val_score
3 | from sklearn.ensemble import RandomForestClassifier as RFC
4 | from sklearn.svm import SVC
5 |
6 | from bayes_opt import BayesianOptimization
7 | from bayes_opt.util import Colours
8 |
9 | def get_data():
10 | """Synthetic binary classification dataset."""
11 | data, targets = make_classification(
12 | n_samples=1000,
13 | n_features=45,
14 | n_informative=12,
15 | n_redundant=7,
16 | random_state=134985745,
17 | )
18 | return data, targets
19 |
20 |
21 | def svc_cv(C, gamma, data, targets):
22 | """SVC cross validation.
23 |
24 | This function will instantiate a SVC classifier with parameters C and
25 | gamma. Combined with data and targets this will in turn be used to perform
26 | cross validation. The result of cross validation is returned.
27 |
28 | Our goal is to find combinations of C and gamma that maximizes the roc_auc
29 | metric.
30 | """
31 | estimator = SVC(C=C, gamma=gamma, random_state=2)
32 | cval = cross_val_score(estimator, data, targets, scoring='roc_auc', cv=4)
33 | return cval.mean()
34 |
35 |
36 | def rfc_cv(n_estimators, min_samples_split, max_features, data, targets):
37 | """Random Forest cross validation.
38 |
39 | This function will instantiate a random forest classifier with parameters
40 | n_estimators, min_samples_split, and max_features. Combined with data and
41 | targets this will in turn be used to perform cross validation. The result
42 | of cross validation is returned.
43 |
44 | Our goal is to find combinations of n_estimators, min_samples_split, and
45 | max_features that minimzes the log loss.
46 | """
47 | estimator = RFC(
48 | n_estimators=n_estimators,
49 | min_samples_split=min_samples_split,
50 | max_features=max_features,
51 | random_state=2
52 | )
53 | cval = cross_val_score(estimator, data, targets,
54 | scoring='neg_log_loss', cv=4)
55 | return cval.mean()
56 |
57 |
58 | def optimize_svc(data, targets):
59 | """Apply Bayesian Optimization to SVC parameters."""
60 | def svc_crossval(expC, expGamma):
61 | """Wrapper of SVC cross validation.
62 |
63 | Notice how we transform between regular and log scale. While this
64 | is not technically necessary, it greatly improves the performance
65 | of the optimizer.
66 | """
67 | C = 10 ** expC
68 | gamma = 10 ** expGamma
69 | return svc_cv(C=C, gamma=gamma, data=data, targets=targets)
70 |
71 | optimizer = BayesianOptimization(
72 | f=svc_crossval,
73 | pbounds={"expC": (-3, 2), "expGamma": (-4, -1)},
74 | random_state=1234,
75 | verbose=2
76 | )
77 | optimizer.maximize(n_iter=10)
78 |
79 | print("Final result:", optimizer.max)
80 |
81 |
82 | def optimize_rfc(data, targets):
83 | """Apply Bayesian Optimization to Random Forest parameters."""
84 | def rfc_crossval(n_estimators, min_samples_split, max_features):
85 | """Wrapper of RandomForest cross validation.
86 |
87 | Notice how we ensure n_estimators and min_samples_split are casted
88 | to integer before we pass them along. Moreover, to avoid max_features
89 | taking values outside the (0, 1) range, we also ensure it is capped
90 | accordingly.
91 | """
92 | return rfc_cv(
93 | n_estimators=int(n_estimators),
94 | min_samples_split=int(min_samples_split),
95 | max_features=max(min(max_features, 0.999), 1e-3),
96 | data=data,
97 | targets=targets,
98 | )
99 |
100 | optimizer = BayesianOptimization(
101 | f=rfc_crossval,
102 | pbounds={
103 | "n_estimators": (10, 250),
104 | "min_samples_split": (2, 25),
105 | "max_features": (0.1, 0.999),
106 | },
107 | random_state=1234,
108 | verbose=2
109 | )
110 | optimizer.maximize(n_iter=10)
111 |
112 | print("Final result:", optimizer.max)
113 |
114 | if __name__ == "__main__":
115 | data, targets = get_data()
116 |
117 | print(Colours.yellow("--- Optimizing SVM ---"))
118 | optimize_svc(data, targets)
119 |
120 | print(Colours.green("--- Optimizing Random Forest ---"))
121 | optimize_rfc(data, targets)
122 |
--------------------------------------------------------------------------------
/pytest.ini:
--------------------------------------------------------------------------------
1 | [pytest]
2 | ;addopts = -p no:doctest --xdoctest --xdoctest-style=google
3 | norecursedirs = .git ignore build __pycache__
4 |
5 | filterwarnings= default
6 |
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | [metadata]
2 | description-file = README.md
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup, find_packages
2 |
3 | setup(
4 | name='bayesian-optimization',
5 | version='1.2.0',
6 | url='https://github.com/fmfn/BayesianOptimization',
7 | packages=find_packages(),
8 | author='Fernando Nogueira',
9 | author_email="fmfnogueira@gmail.com",
10 | description='Bayesian Optimization package',
11 | long_description='A Python implementation of global optimization with gaussian processes.',
12 | download_url='https://github.com/fmfn/BayesianOptimization/tarball/0.6',
13 | install_requires=[
14 | "numpy >= 1.9.0",
15 | "scipy >= 0.14.0",
16 | "scikit-learn >= 0.18.0",
17 | ],
18 | classifiers=[
19 | 'License :: OSI Approved :: MIT License',
20 | ]
21 | )
22 |
--------------------------------------------------------------------------------
/tests/test_acceptance.py:
--------------------------------------------------------------------------------
1 | # import numpy as np
2 |
3 | # from bayes_opt import BayesianOptimization
4 | # from bayes_opt.util import ensure_rng
5 |
6 |
7 | # def test_simple_optimization():
8 | # """
9 | # ...
10 | # """
11 | # def f(x, y):
12 | # return -x ** 2 - (y - 1) ** 2 + 1
13 |
14 |
15 | # optimizer = BayesianOptimization(
16 | # f=f,
17 | # pbounds={"x": (-3, 3), "y": (-3, 3)},
18 | # random_state=12356,
19 | # verbose=0,
20 | # )
21 |
22 | # optimizer.maximize(init_points=0, n_iter=25)
23 |
24 | # max_target = optimizer.max["target"]
25 | # max_x = optimizer.max["params"]["x"]
26 | # max_y = optimizer.max["params"]["y"]
27 |
28 | # assert (1 - max_target) < 1e-3
29 | # assert np.abs(max_x - 0) < 1e-1
30 | # assert np.abs(max_y - 1) < 1e-1
31 |
32 |
33 | # def test_intermediate_optimization():
34 | # """
35 | # ...
36 | # """
37 | # def f(x, y, z):
38 | # x_factor = np.exp(-(x - 2) ** 2) + (1 / (x ** 2 + 1))
39 | # y_factor = np.exp(-(y - 6) ** 2 / 10)
40 | # z_factor = (1 + 0.2 * np.cos(z)) / (1 + z ** 2)
41 | # return (x_factor + y_factor) * z_factor
42 |
43 | # optimizer = BayesianOptimization(
44 | # f=f,
45 | # pbounds={"x": (-7, 7), "y": (-7, 7), "z": (-7, 7)},
46 | # random_state=56,
47 | # verbose=0,
48 | # )
49 |
50 | # optimizer.maximize(init_points=0, n_iter=150)
51 |
52 | # max_target = optimizer.max["target"]
53 | # max_x = optimizer.max["params"]["x"]
54 | # max_y = optimizer.max["params"]["y"]
55 | # max_z = optimizer.max["params"]["z"]
56 |
57 | # assert (2.640 - max_target) < 0
58 | # assert np.abs(2 - max_x) < 1e-1
59 | # assert np.abs(6 - max_y) < 1e-1
60 | # assert np.abs(0 - max_z) < 1e-1
61 |
62 |
63 | # if __name__ == '__main__':
64 | # r"""
65 | # CommandLine:
66 | # python tests/test_bayesian_optimization.py
67 | # """
68 | # import pytest
69 | # pytest.main([__file__])
70 |
--------------------------------------------------------------------------------
/tests/test_bayesian_optimization.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | import numpy as np
3 | from bayes_opt import UtilityFunction
4 | from bayes_opt import BayesianOptimization
5 | from bayes_opt.logger import ScreenLogger
6 | from bayes_opt.event import Events, DEFAULT_EVENTS
7 |
8 |
9 | def target_func(**kwargs):
10 | # arbitrary target func
11 | return sum(kwargs.values())
12 |
13 |
14 | PBOUNDS = {'p1': (0, 10), 'p2': (0, 10)}
15 |
16 |
17 | def test_register():
18 | optimizer = BayesianOptimization(target_func, PBOUNDS, random_state=1)
19 | assert len(optimizer.space) == 0
20 |
21 | optimizer.register(params={"p1": 1, "p2": 2}, target=3)
22 | assert len(optimizer.res) == 1
23 | assert len(optimizer.space) == 1
24 |
25 | optimizer.space.register(params={"p1": 5, "p2": 4}, target=9)
26 | assert len(optimizer.res) == 2
27 | assert len(optimizer.space) == 2
28 |
29 | with pytest.raises(KeyError):
30 | optimizer.register(params={"p1": 1, "p2": 2}, target=3)
31 | with pytest.raises(KeyError):
32 | optimizer.register(params={"p1": 5, "p2": 4}, target=9)
33 |
34 |
35 | def test_probe_lazy():
36 | optimizer = BayesianOptimization(target_func, PBOUNDS, random_state=1)
37 |
38 | optimizer.probe(params={"p1": 1, "p2": 2}, lazy=True)
39 | assert len(optimizer.space) == 0
40 | assert len(optimizer._queue) == 1
41 |
42 | optimizer.probe(params={"p1": 6, "p2": 2}, lazy=True)
43 | assert len(optimizer.space) == 0
44 | assert len(optimizer._queue) == 2
45 |
46 | optimizer.probe(params={"p1": 6, "p2": 2}, lazy=True)
47 | assert len(optimizer.space) == 0
48 | assert len(optimizer._queue) == 3
49 |
50 |
51 | def test_probe_eager():
52 | optimizer = BayesianOptimization(target_func, PBOUNDS, random_state=1)
53 |
54 | optimizer.probe(params={"p1": 1, "p2": 2}, lazy=False)
55 | assert len(optimizer.space) == 1
56 | assert len(optimizer._queue) == 0
57 | assert optimizer.max["target"] == 3
58 | assert optimizer.max["params"] == {"p1": 1, "p2": 2}
59 |
60 | optimizer.probe(params={"p1": 3, "p2": 3}, lazy=False)
61 | assert len(optimizer.space) == 2
62 | assert len(optimizer._queue) == 0
63 | assert optimizer.max["target"] == 6
64 | assert optimizer.max["params"] == {"p1": 3, "p2": 3}
65 |
66 | optimizer.probe(params={"p1": 3, "p2": 3}, lazy=False)
67 | assert len(optimizer.space) == 2
68 | assert len(optimizer._queue) == 0
69 | assert optimizer.max["target"] == 6
70 | assert optimizer.max["params"] == {"p1": 3, "p2": 3}
71 |
72 |
73 | def test_suggest_at_random():
74 | util = UtilityFunction(kind="poi", kappa=5, xi=0)
75 | optimizer = BayesianOptimization(target_func, PBOUNDS, random_state=1)
76 |
77 | for _ in range(50):
78 | sample = optimizer.space.params_to_array(optimizer.suggest(util))
79 | assert len(sample) == optimizer.space.dim
80 | assert all(sample >= optimizer.space.bounds[:, 0])
81 | assert all(sample <= optimizer.space.bounds[:, 1])
82 |
83 |
84 | def test_suggest_with_one_observation():
85 | util = UtilityFunction(kind="ucb", kappa=5, xi=0)
86 | optimizer = BayesianOptimization(target_func, PBOUNDS, random_state=1)
87 |
88 | optimizer.register(params={"p1": 1, "p2": 2}, target=3)
89 |
90 | for _ in range(5):
91 | sample = optimizer.space.params_to_array(optimizer.suggest(util))
92 | assert len(sample) == optimizer.space.dim
93 | assert all(sample >= optimizer.space.bounds[:, 0])
94 | assert all(sample <= optimizer.space.bounds[:, 1])
95 |
96 | # suggestion = optimizer.suggest(util)
97 | # for _ in range(5):
98 | # new_suggestion = optimizer.suggest(util)
99 | # assert suggestion == new_suggestion
100 |
101 |
102 | def test_prime_queue_all_empty():
103 | optimizer = BayesianOptimization(target_func, PBOUNDS, random_state=1)
104 | assert len(optimizer._queue) == 0
105 | assert len(optimizer.space) == 0
106 |
107 | optimizer._prime_queue(init_points=0)
108 | assert len(optimizer._queue) == 1
109 | assert len(optimizer.space) == 0
110 |
111 |
112 | def test_prime_queue_empty_with_init():
113 | optimizer = BayesianOptimization(target_func, PBOUNDS, random_state=1)
114 | assert len(optimizer._queue) == 0
115 | assert len(optimizer.space) == 0
116 |
117 | optimizer._prime_queue(init_points=5)
118 | assert len(optimizer._queue) == 5
119 | assert len(optimizer.space) == 0
120 |
121 |
122 | def test_prime_queue_with_register():
123 | optimizer = BayesianOptimization(target_func, PBOUNDS, random_state=1)
124 | assert len(optimizer._queue) == 0
125 | assert len(optimizer.space) == 0
126 |
127 | optimizer.register(params={"p1": 1, "p2": 2}, target=3)
128 | optimizer._prime_queue(init_points=0)
129 | assert len(optimizer._queue) == 0
130 | assert len(optimizer.space) == 1
131 |
132 |
133 | def test_prime_queue_with_register_and_init():
134 | optimizer = BayesianOptimization(target_func, PBOUNDS, random_state=1)
135 | assert len(optimizer._queue) == 0
136 | assert len(optimizer.space) == 0
137 |
138 | optimizer.register(params={"p1": 1, "p2": 2}, target=3)
139 | optimizer._prime_queue(init_points=3)
140 | assert len(optimizer._queue) == 3
141 | assert len(optimizer.space) == 1
142 |
143 |
144 | def test_prime_subscriptions():
145 | optimizer = BayesianOptimization(target_func, PBOUNDS, random_state=1)
146 | optimizer._prime_subscriptions()
147 |
148 | # Test that the default observer is correctly subscribed
149 | for event in DEFAULT_EVENTS:
150 | assert all([
151 | isinstance(k, ScreenLogger) for k in
152 | optimizer._events[event].keys()
153 | ])
154 | assert all([
155 | hasattr(k, "update") for k in
156 | optimizer._events[event].keys()
157 | ])
158 |
159 | test_subscriber = "test_subscriber"
160 |
161 | def test_callback(event, instance):
162 | pass
163 |
164 | optimizer = BayesianOptimization(target_func, PBOUNDS, random_state=1)
165 | optimizer.subscribe(
166 | event=Events.OPTIMIZATION_START,
167 | subscriber=test_subscriber,
168 | callback=test_callback,
169 | )
170 | # Test that the desired observer is subscribed
171 | assert all([
172 | k == test_subscriber for k in
173 | optimizer._events[Events.OPTIMIZATION_START].keys()
174 | ])
175 | assert all([
176 | v == test_callback for v in
177 | optimizer._events[Events.OPTIMIZATION_START].values()
178 | ])
179 |
180 | # Check that prime subscriptions won't overight manual subscriptions
181 | optimizer._prime_subscriptions()
182 | assert all([
183 | k == test_subscriber for k in
184 | optimizer._events[Events.OPTIMIZATION_START].keys()
185 | ])
186 | assert all([
187 | v == test_callback for v in
188 | optimizer._events[Events.OPTIMIZATION_START].values()
189 | ])
190 |
191 | assert optimizer._events[Events.OPTIMIZATION_STEP] == {}
192 | assert optimizer._events[Events.OPTIMIZATION_END] == {}
193 |
194 | with pytest.raises(KeyError):
195 | optimizer._events["other"]
196 |
197 |
198 | def test_set_bounds():
199 | pbounds = {
200 | 'p1': (0, 1),
201 | 'p3': (0, 3),
202 | 'p2': (0, 2),
203 | 'p4': (0, 4),
204 | }
205 | optimizer = BayesianOptimization(target_func, pbounds, random_state=1)
206 |
207 | # Ignore unknown keys
208 | optimizer.set_bounds({"other": (7, 8)})
209 | assert all(optimizer.space.bounds[:, 0] == np.array([0, 0, 0, 0]))
210 | assert all(optimizer.space.bounds[:, 1] == np.array([1, 2, 3, 4]))
211 |
212 | # Update bounds accordingly
213 | optimizer.set_bounds({"p2": (1, 8)})
214 | assert all(optimizer.space.bounds[:, 0] == np.array([0, 1, 0, 0]))
215 | assert all(optimizer.space.bounds[:, 1] == np.array([1, 8, 3, 4]))
216 |
217 |
218 | def test_set_gp_params():
219 | optimizer = BayesianOptimization(target_func, PBOUNDS, random_state=1)
220 | assert optimizer._gp.alpha == 1e-6
221 | assert optimizer._gp.n_restarts_optimizer == 5
222 |
223 | optimizer.set_gp_params(alpha=1e-2)
224 | assert optimizer._gp.alpha == 1e-2
225 | assert optimizer._gp.n_restarts_optimizer == 5
226 |
227 | optimizer.set_gp_params(n_restarts_optimizer=7)
228 | assert optimizer._gp.alpha == 1e-2
229 | assert optimizer._gp.n_restarts_optimizer == 7
230 |
231 |
232 | def test_maximize():
233 | from sklearn.exceptions import NotFittedError
234 | class Tracker:
235 | def __init__(self):
236 | self.start_count = 0
237 | self.step_count = 0
238 | self.end_count = 0
239 |
240 | def update_start(self, event, instance):
241 | self.start_count += 1
242 |
243 | def update_step(self, event, instance):
244 | self.step_count += 1
245 |
246 | def update_end(self, event, instance):
247 | self.end_count += 1
248 |
249 | def reset(self):
250 | self.__init__()
251 |
252 | optimizer = BayesianOptimization(target_func, PBOUNDS,
253 | random_state=np.random.RandomState(1))
254 |
255 | tracker = Tracker()
256 | optimizer.subscribe(
257 | event=Events.OPTIMIZATION_START,
258 | subscriber=tracker,
259 | callback=tracker.update_start,
260 | )
261 | optimizer.subscribe(
262 | event=Events.OPTIMIZATION_STEP,
263 | subscriber=tracker,
264 | callback=tracker.update_step,
265 | )
266 | optimizer.subscribe(
267 | event=Events.OPTIMIZATION_END,
268 | subscriber=tracker,
269 | callback=tracker.update_end,
270 | )
271 |
272 | optimizer.maximize(init_points=0, n_iter=0)
273 | assert optimizer._queue.empty
274 | assert len(optimizer.space) == 1
275 | assert tracker.start_count == 1
276 | assert tracker.step_count == 1
277 | assert tracker.end_count == 1
278 |
279 | optimizer.maximize(init_points=2, n_iter=0, alpha=1e-2)
280 | assert optimizer._queue.empty
281 | assert len(optimizer.space) == 3
282 | assert optimizer._gp.alpha == 1e-2
283 | assert tracker.start_count == 2
284 | assert tracker.step_count == 3
285 | assert tracker.end_count == 2
286 |
287 | optimizer.maximize(init_points=0, n_iter=2)
288 | assert optimizer._queue.empty
289 | assert len(optimizer.space) == 5
290 | assert tracker.start_count == 3
291 | assert tracker.step_count == 5
292 | assert tracker.end_count == 3
293 |
294 |
295 | def test_define_wrong_transformer():
296 | with pytest.raises(TypeError):
297 | optimizer = BayesianOptimization(target_func, PBOUNDS,
298 | random_state=np.random.RandomState(1),
299 | bounds_transformer=3)
300 |
301 |
302 | if __name__ == '__main__':
303 | r"""
304 | CommandLine:
305 | python tests/test_bayesian_optimization.py
306 | """
307 | pytest.main([__file__])
308 |
--------------------------------------------------------------------------------
/tests/test_logs.json:
--------------------------------------------------------------------------------
1 | {"datetime": {"delta": 0.0, "datetime": "2018-11-25 08:29:25", "elapsed": 0.0}, "params": {"y": 1.3219469606529488, "x": 2.8340440094051482}, "target": -7.135455292718879}
2 | {"datetime": {"delta": 0.001301, "datetime": "2018-11-25 08:29:25", "elapsed": 0.001301}, "params": {"y": -1.1860045642089614, "x": 2.0002287496346898}, "target": -7.779531005607566}
3 | {"datetime": {"delta": 1.075242, "datetime": "2018-11-25 08:29:26", "elapsed": 1.076543}, "params": {"y": 3.0, "x": 4.0}, "target": -19.0}
4 | {"datetime": {"delta": 0.239797, "datetime": "2018-11-25 08:29:26", "elapsed": 1.31634}, "params": {"y": -2.412527795983739, "x": 2.3776144540856503}, "target": -16.29839645063864}
5 | {"datetime": {"delta": 0.247293, "datetime": "2018-11-25 08:29:26", "elapsed": 1.563633}, "params": {"y": -0.005822117636089974, "x": 2.104665051994087}, "target": -4.441293113411222}
6 |
--------------------------------------------------------------------------------
/tests/test_observer.py:
--------------------------------------------------------------------------------
1 | from bayes_opt.bayesian_optimization import Observable
2 | from bayes_opt.observer import _Tracker
3 | from bayes_opt.event import Events
4 |
5 |
6 | EVENTS = ["a", "b", "c"]
7 |
8 |
9 | class SimpleObserver():
10 | def __init__(self):
11 | self.counter = 0
12 |
13 | def update(self, event, instance):
14 | self.counter += 1
15 |
16 |
17 | def test_get_subscribers():
18 | observer = SimpleObserver()
19 | observable = Observable(events=EVENTS)
20 | observable.subscribe("a", observer)
21 |
22 | assert observer in observable.get_subscribers('a')
23 | assert observer not in observable.get_subscribers('b')
24 | assert observer not in observable.get_subscribers('c')
25 |
26 | assert len(observable.get_subscribers('a')) == 1
27 | assert len(observable.get_subscribers('b')) == 0
28 | assert len(observable.get_subscribers('c')) == 0
29 |
30 |
31 | def test_unsubscribe():
32 | observer = SimpleObserver()
33 | observable = Observable(events=EVENTS)
34 |
35 | observable.subscribe("a", observer)
36 | observable.unsubscribe("a", observer)
37 |
38 | assert observer not in observable.get_subscribers('a')
39 | assert len(observable.get_subscribers('a')) == 0
40 |
41 |
42 | def test_dispatch():
43 | observer_a = SimpleObserver()
44 | observer_b = SimpleObserver()
45 | observable = Observable(events=EVENTS)
46 |
47 | observable.subscribe("a", observer_a)
48 | observable.subscribe("b", observer_b)
49 |
50 | assert observer_a.counter == 0
51 | assert observer_b.counter == 0
52 |
53 | observable.dispatch('b')
54 | assert observer_a.counter == 0
55 | assert observer_b.counter == 1
56 |
57 | observable.dispatch('a')
58 | observable.dispatch('b')
59 | assert observer_a.counter == 1
60 | assert observer_b.counter == 2
61 |
62 | observable.dispatch('a')
63 | observable.dispatch('c')
64 | assert observer_a.counter == 2
65 | assert observer_a.counter == 2
66 |
67 |
68 | def test_tracker():
69 | class MockInstance:
70 | def __init__(self, max_target=1, max_params=[1, 1]):
71 | self._max_target = max_target
72 | self._max_params = max_params
73 |
74 | @property
75 | def max(self):
76 | return {"target": self._max_target, "params": self._max_params}
77 |
78 | tracker = _Tracker()
79 | assert tracker._iterations == 0
80 | assert tracker._previous_max is None
81 | assert tracker._previous_max_params is None
82 |
83 | test_instance = MockInstance()
84 | tracker._update_tracker("other_event", test_instance)
85 | assert tracker._iterations == 0
86 | assert tracker._previous_max is None
87 | assert tracker._previous_max_params is None
88 |
89 | tracker._update_tracker(Events.OPTIMIZATION_STEP, test_instance)
90 | assert tracker._iterations == 1
91 | assert tracker._previous_max == 1
92 | assert tracker._previous_max_params == [1, 1]
93 |
94 | new_instance = MockInstance(max_target=7, max_params=[7, 7])
95 | tracker._update_tracker(Events.OPTIMIZATION_STEP, new_instance)
96 | assert tracker._iterations == 2
97 | assert tracker._previous_max == 7
98 | assert tracker._previous_max_params == [7, 7]
99 |
100 | other_instance = MockInstance(max_target=2, max_params=[2, 2])
101 | tracker._update_tracker(Events.OPTIMIZATION_STEP, other_instance)
102 | assert tracker._iterations == 3
103 | assert tracker._previous_max == 7
104 | assert tracker._previous_max_params == [7, 7]
105 |
106 | tracker._time_metrics()
107 | start_time = tracker._start_time
108 | previous_time = tracker._previous_time
109 |
110 | tracker._time_metrics()
111 | assert start_time == tracker._start_time
112 | assert previous_time < tracker._previous_time
113 |
114 |
115 | if __name__ == '__main__':
116 | r"""
117 | CommandLine:
118 | python tests/test_observer.py
119 | """
120 | import pytest
121 | pytest.main([__file__])
122 |
--------------------------------------------------------------------------------
/tests/test_queue.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | from bayes_opt.bayesian_optimization import Queue
3 |
4 |
5 | def test_add():
6 | queue = Queue()
7 |
8 | assert len(queue) == 0
9 | assert queue.empty
10 |
11 | queue.add(1)
12 | assert len(queue) == 1
13 |
14 | queue.add(1)
15 | assert len(queue) == 2
16 |
17 | queue.add(2)
18 | assert len(queue) == 3
19 |
20 |
21 | def test_queue():
22 |
23 | queue = Queue()
24 |
25 | with pytest.raises(StopIteration):
26 | next(queue)
27 |
28 | queue.add(1)
29 | queue.add(2)
30 | queue.add(3)
31 |
32 | assert len(queue) == 3
33 | assert not queue.empty
34 |
35 | assert next(queue) == 1
36 | assert len(queue) == 2
37 |
38 | assert next(queue) == 2
39 | assert next(queue) == 3
40 | assert len(queue) == 0
41 |
42 |
43 |
44 | if __name__ == '__main__':
45 | r"""
46 | CommandLine:
47 | python tests/test_observer.py
48 | """
49 | pytest.main([__file__])
50 |
--------------------------------------------------------------------------------
/tests/test_seq_domain_red.py:
--------------------------------------------------------------------------------
1 | from bayes_opt import SequentialDomainReductionTransformer
2 | from bayes_opt import BayesianOptimization
3 |
4 |
5 | def black_box_function(x, y):
6 | """Function with unknown internals we wish to maximize.
7 |
8 | This is just serving as an example, for all intents and
9 | purposes think of the internals of this function, i.e.: the process
10 | which generates its output values, as unknown.
11 | """
12 | return -x ** 2 - (y - 1) ** 2 + 1
13 |
14 |
15 | def test_bound_x_maximize():
16 |
17 | class Tracker:
18 | def __init__(self):
19 | self.start_count = 0
20 | self.step_count = 0
21 | self.end_count = 0
22 |
23 | def update_start(self, event, instance):
24 | self.start_count += 1
25 |
26 | def update_step(self, event, instance):
27 | self.step_count += 1
28 |
29 | def update_end(self, event, instance):
30 | self.end_count += 1
31 |
32 | def reset(self):
33 | self.__init__()
34 |
35 | bounds_transformer = SequentialDomainReductionTransformer()
36 | pbounds = {'x': (-10, 10), 'y': (-10, 10)}
37 | n_iter = 10
38 |
39 | standard_optimizer = BayesianOptimization(
40 | f=black_box_function,
41 | pbounds=pbounds,
42 | verbose=2,
43 | random_state=1,
44 | )
45 |
46 | standard_optimizer.maximize(
47 | init_points=2,
48 | n_iter=n_iter,
49 | )
50 |
51 | mutated_optimizer = BayesianOptimization(
52 | f=black_box_function,
53 | pbounds=pbounds,
54 | verbose=2,
55 | random_state=1,
56 | bounds_transformer=bounds_transformer
57 | )
58 |
59 | mutated_optimizer.maximize(
60 | init_points=2,
61 | n_iter=n_iter,
62 | )
63 |
64 | assert len(standard_optimizer.space) == len(mutated_optimizer.space)
65 | assert not (standard_optimizer._space.bounds ==
66 | mutated_optimizer._space.bounds).any()
67 |
--------------------------------------------------------------------------------
/tests/test_target_space.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | import numpy as np
3 | from bayes_opt.target_space import TargetSpace
4 |
5 |
6 | def target_func(**kwargs):
7 | # arbitrary target func
8 | return sum(kwargs.values())
9 |
10 |
11 | PBOUNDS = {'p1': (0, 1), 'p2': (1, 100)}
12 |
13 |
14 | def test_keys_and_bounds_in_same_order():
15 | pbounds = {
16 | 'p1': (0, 1),
17 | 'p3': (0, 3),
18 | 'p2': (0, 2),
19 | 'p4': (0, 4),
20 | }
21 | space = TargetSpace(target_func, pbounds)
22 |
23 | assert space.dim == len(pbounds)
24 | assert space.empty
25 | assert space.keys == ["p1", "p2", "p3", "p4"]
26 | assert all(space.bounds[:, 0] == np.array([0, 0, 0, 0]))
27 | assert all(space.bounds[:, 1] == np.array([1, 2, 3, 4]))
28 |
29 |
30 | def test_params_to_array():
31 | space = TargetSpace(target_func, PBOUNDS)
32 |
33 | assert all(space.params_to_array({"p1": 2, "p2": 3}) == np.array([2, 3]))
34 | assert all(space.params_to_array({"p2": 2, "p1": 9}) == np.array([9, 2]))
35 | with pytest.raises(ValueError):
36 | space.params_to_array({"p2": 1})
37 | with pytest.raises(ValueError):
38 | space.params_to_array({"p2": 1, "p1": 7, "other": 4})
39 | with pytest.raises(ValueError):
40 | space.params_to_array({"other": 1})
41 |
42 |
43 | def test_array_to_params():
44 | space = TargetSpace(target_func, PBOUNDS)
45 |
46 | assert space.array_to_params(np.array([2, 3])) == {"p1": 2, "p2": 3}
47 | with pytest.raises(ValueError):
48 | space.array_to_params(np.array([2]))
49 | with pytest.raises(ValueError):
50 | space.array_to_params(np.array([2, 3, 5]))
51 |
52 |
53 | def test_as_array():
54 | space = TargetSpace(target_func, PBOUNDS)
55 |
56 | x = space._as_array([0, 1])
57 | assert x.shape == (2,)
58 | assert all(x == np.array([0, 1]))
59 |
60 | x = space._as_array({"p2": 1, "p1": 2})
61 | assert x.shape == (2,)
62 | assert all(x == np.array([2, 1]))
63 |
64 | with pytest.raises(ValueError):
65 | x = space._as_array([2, 1, 7])
66 | with pytest.raises(ValueError):
67 | x = space._as_array({"p2": 1, "p1": 2, "other": 7})
68 | with pytest.raises(ValueError):
69 | x = space._as_array({"p2": 1})
70 | with pytest.raises(ValueError):
71 | x = space._as_array({"other": 7})
72 |
73 |
74 | def test_register():
75 | space = TargetSpace(target_func, PBOUNDS)
76 |
77 | assert len(space) == 0
78 | # registering with dict
79 | space.register(params={"p1": 1, "p2": 2}, target=3)
80 | assert len(space) == 1
81 | assert all(space.params[0] == np.array([1, 2]))
82 | assert all(space.target == np.array([3]))
83 |
84 | # registering with array
85 | space.register(params={"p1": 5, "p2": 4}, target=9)
86 | assert len(space) == 2
87 | assert all(space.params[1] == np.array([5, 4]))
88 | assert all(space.target == np.array([3, 9]))
89 |
90 | with pytest.raises(KeyError):
91 | space.register(params={"p1": 1, "p2": 2}, target=3)
92 | with pytest.raises(KeyError):
93 | space.register(params={"p1": 5, "p2": 4}, target=9)
94 |
95 |
96 | def test_probe():
97 | space = TargetSpace(target_func, PBOUNDS)
98 |
99 | assert len(space) == 0
100 | # probing with dict
101 | space.probe(params={"p1": 1, "p2": 2})
102 | assert len(space) == 1
103 | assert all(space.params[0] == np.array([1, 2]))
104 | assert all(space.target == np.array([3]))
105 |
106 | # probing with array
107 | space.probe(np.array([5, 4]))
108 | assert len(space) == 2
109 | assert all(space.params[1] == np.array([5, 4]))
110 | assert all(space.target == np.array([3, 9]))
111 |
112 | # probing same point with dict
113 | space.probe(params={"p1": 1, "p2": 2})
114 | assert len(space) == 2
115 | assert all(space.params[1] == np.array([5, 4]))
116 | assert all(space.target == np.array([3, 9]))
117 |
118 | # probing same point with array
119 | space.probe(np.array([5, 4]))
120 | assert len(space) == 2
121 | assert all(space.params[1] == np.array([5, 4]))
122 | assert all(space.target == np.array([3, 9]))
123 |
124 |
125 | def test_random_sample():
126 | pbounds = {
127 | 'p1': (0, 1),
128 | 'p3': (0, 3),
129 | 'p2': (0, 2),
130 | 'p4': (0, 4),
131 | }
132 | space = TargetSpace(target_func, pbounds, random_state=8)
133 |
134 | for _ in range(50):
135 | random_sample = space.random_sample()
136 | assert len(random_sample) == space.dim
137 | assert all(random_sample >= space.bounds[:, 0])
138 | assert all(random_sample <= space.bounds[:, 1])
139 |
140 |
141 | def test_max():
142 | space = TargetSpace(target_func, PBOUNDS)
143 |
144 | assert space.max() == {}
145 | space.probe(params={"p1": 1, "p2": 2})
146 | space.probe(params={"p1": 5, "p2": 4})
147 | space.probe(params={"p1": 2, "p2": 3})
148 | space.probe(params={"p1": 1, "p2": 6})
149 | assert space.max() == {"params": {"p1": 5, "p2": 4}, "target": 9}
150 |
151 |
152 | def test_res():
153 | space = TargetSpace(target_func, PBOUNDS)
154 |
155 | assert space.res() == []
156 | space.probe(params={"p1": 1, "p2": 2})
157 | space.probe(params={"p1": 5, "p2": 4})
158 | space.probe(params={"p1": 2, "p2": 3})
159 | space.probe(params={"p1": 1, "p2": 6})
160 |
161 | expected_res = [
162 | {"params": {"p1": 1, "p2": 2}, "target": 3},
163 | {"params": {"p1": 5, "p2": 4}, "target": 9},
164 | {"params": {"p1": 2, "p2": 3}, "target": 5},
165 | {"params": {"p1": 1, "p2": 6}, "target": 7},
166 | ]
167 | assert len(space.res()) == 4
168 | assert space.res() == expected_res
169 |
170 |
171 | def test_set_bounds():
172 | pbounds = {
173 | 'p1': (0, 1),
174 | 'p3': (0, 3),
175 | 'p2': (0, 2),
176 | 'p4': (0, 4),
177 | }
178 | space = TargetSpace(target_func, pbounds)
179 |
180 | # Ignore unknown keys
181 | space.set_bounds({"other": (7, 8)})
182 | assert all(space.bounds[:, 0] == np.array([0, 0, 0, 0]))
183 | assert all(space.bounds[:, 1] == np.array([1, 2, 3, 4]))
184 |
185 | # Update bounds accordingly
186 | space.set_bounds({"p2": (1, 8)})
187 | assert all(space.bounds[:, 0] == np.array([0, 1, 0, 0]))
188 | assert all(space.bounds[:, 1] == np.array([1, 8, 3, 4]))
189 |
190 |
191 | if __name__ == '__main__':
192 | r"""
193 | CommandLine:
194 | python tests/test_target_space.py
195 | """
196 | pytest.main([__file__])
197 |
--------------------------------------------------------------------------------
/tests/test_util.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | import numpy as np
3 |
4 | from bayes_opt import BayesianOptimization
5 | from bayes_opt.util import UtilityFunction, Colours
6 | from bayes_opt.util import acq_max, load_logs, ensure_rng
7 |
8 | from sklearn.gaussian_process.kernels import Matern
9 | from sklearn.gaussian_process import GaussianProcessRegressor
10 |
11 |
12 | def get_globals():
13 | X = np.array([
14 | [0.00, 0.00],
15 | [0.99, 0.99],
16 | [0.00, 0.99],
17 | [0.99, 0.00],
18 | [0.50, 0.50],
19 | [0.25, 0.50],
20 | [0.50, 0.25],
21 | [0.75, 0.50],
22 | [0.50, 0.75],
23 | ])
24 |
25 | def get_y(X):
26 | return -(X[:, 0] - 0.3) ** 2 - 0.5 * (X[:, 1] - 0.6)**2 + 2
27 | y = get_y(X)
28 |
29 | mesh = np.dstack(
30 | np.meshgrid(np.arange(0, 1, 0.005), np.arange(0, 1, 0.005))
31 | ).reshape(-1, 2)
32 |
33 | GP = GaussianProcessRegressor(
34 | kernel=Matern(),
35 | n_restarts_optimizer=25,
36 | )
37 | GP.fit(X, y)
38 |
39 | return {'x': X, 'y': y, 'gp': GP, 'mesh': mesh}
40 |
41 |
42 | def brute_force_maximum(MESH, GP, kind='ucb', kappa=1.0, xi=1.0):
43 | uf = UtilityFunction(kind=kind, kappa=kappa, xi=xi)
44 |
45 | mesh_vals = uf.utility(MESH, GP, 2)
46 | max_val = mesh_vals.max()
47 | max_arg_val = MESH[np.argmax(mesh_vals)]
48 |
49 | return max_val, max_arg_val
50 |
51 |
52 | GLOB = get_globals()
53 | X, Y, GP, MESH = GLOB['x'], GLOB['y'], GLOB['gp'], GLOB['mesh']
54 |
55 |
56 | def test_utility_fucntion():
57 | util = UtilityFunction(kind="ucb", kappa=1.0, xi=1.0)
58 | assert util.kind == "ucb"
59 |
60 | util = UtilityFunction(kind="ei", kappa=1.0, xi=1.0)
61 | assert util.kind == "ei"
62 |
63 | util = UtilityFunction(kind="poi", kappa=1.0, xi=1.0)
64 | assert util.kind == "poi"
65 |
66 | with pytest.raises(NotImplementedError):
67 | util = UtilityFunction(kind="other", kappa=1.0, xi=1.0)
68 |
69 |
70 | def test_acq_with_ucb():
71 | util = UtilityFunction(kind="ucb", kappa=1.0, xi=1.0)
72 | episilon = 1e-2
73 | y_max = 2.0
74 |
75 | max_arg = acq_max(
76 | util.utility,
77 | GP,
78 | y_max,
79 | bounds=np.array([[0, 1], [0, 1]]),
80 | random_state=ensure_rng(0),
81 | n_iter=20
82 | )
83 | _, brute_max_arg = brute_force_maximum(MESH, GP, kind='ucb', kappa=1.0, xi=1.0)
84 |
85 | assert all(abs(brute_max_arg - max_arg) < episilon)
86 |
87 |
88 | def test_acq_with_ei():
89 | util = UtilityFunction(kind="ei", kappa=1.0, xi=1e-6)
90 | episilon = 1e-2
91 | y_max = 2.0
92 |
93 | max_arg = acq_max(
94 | util.utility,
95 | GP,
96 | y_max,
97 | bounds=np.array([[0, 1], [0, 1]]),
98 | random_state=ensure_rng(0),
99 | n_iter=200,
100 | )
101 | _, brute_max_arg = brute_force_maximum(MESH, GP, kind='ei', kappa=1.0, xi=1e-6)
102 |
103 | assert all(abs(brute_max_arg - max_arg) < episilon)
104 |
105 |
106 | def test_acq_with_poi():
107 | util = UtilityFunction(kind="poi", kappa=1.0, xi=1e-4)
108 | episilon = 1e-2
109 | y_max = 2.0
110 |
111 | max_arg = acq_max(
112 | util.utility,
113 | GP,
114 | y_max,
115 | bounds=np.array([[0, 1], [0, 1]]),
116 | random_state=ensure_rng(0),
117 | n_iter=200,
118 | )
119 | _, brute_max_arg = brute_force_maximum(MESH, GP, kind='poi', kappa=1.0, xi=1e-4)
120 |
121 | assert all(abs(brute_max_arg - max_arg) < episilon)
122 |
123 |
124 | def test_logs():
125 | import pytest
126 | def f(x, y):
127 | return -x ** 2 - (y - 1) ** 2 + 1
128 |
129 | optimizer = BayesianOptimization(
130 | f=f,
131 | pbounds={"x": (-2, 2), "y": (-2, 2)}
132 | )
133 | assert len(optimizer.space) == 0
134 |
135 | load_logs(optimizer, "./tests/test_logs.json")
136 | assert len(optimizer.space) == 5
137 |
138 | load_logs(optimizer, ["./tests/test_logs.json"])
139 | assert len(optimizer.space) == 5
140 |
141 | other_optimizer = BayesianOptimization(
142 | f=lambda x: -x ** 2,
143 | pbounds={"x": (-2, 2)}
144 | )
145 | with pytest.raises(ValueError):
146 | load_logs(other_optimizer, ["./tests/test_logs.json"])
147 |
148 |
149 | def test_colours():
150 | colour_wrappers = [
151 | (Colours.END, Colours.black),
152 | (Colours.BLUE, Colours.blue),
153 | (Colours.BOLD, Colours.bold),
154 | (Colours.CYAN, Colours.cyan),
155 | (Colours.DARKCYAN, Colours.darkcyan),
156 | (Colours.GREEN, Colours.green),
157 | (Colours.PURPLE, Colours.purple),
158 | (Colours.RED, Colours.red),
159 | (Colours.UNDERLINE, Colours.underline),
160 | (Colours.YELLOW, Colours.yellow),
161 | ]
162 |
163 | for colour, wrapper in colour_wrappers:
164 | text1 = Colours._wrap_colour("test", colour)
165 | text2 = wrapper("test")
166 |
167 | assert text1.split("test") == [colour, Colours.END]
168 | assert text2.split("test") == [colour, Colours.END]
169 |
170 |
171 | if __name__ == '__main__':
172 | r"""
173 | CommandLine:
174 | python tests/test_target_space.py
175 | """
176 | import pytest
177 | pytest.main([__file__])
178 |
--------------------------------------------------------------------------------