├── docs ├── modules.rst ├── GPyOpt.testing.rst ├── GPyOpt.plotting.rst ├── GPyOpt.rst ├── Makefile ├── GPyOpt.core.rst ├── GPyOpt.methods.rst ├── index.rst ├── make.bat ├── GPyOpt.optimization.rst ├── GPyOpt.objective_examples.rst ├── GPyOpt.core.task.rst ├── GPyOpt.interface.rst ├── GPyOpt.util.rst ├── GPyOpt.models.rst ├── GPyOpt.core.evaluators.rst ├── GPyOpt.experiment_design.rst └── GPyOpt.acquisitions.rst ├── manual ├── figures │ ├── gpyopt.jpeg │ ├── iteration000.png │ ├── iteration001.png │ ├── iteration002.png │ ├── iteration003.png │ ├── iteration004.png │ ├── iteration005.png │ ├── iteration006.png │ ├── iteration007.png │ ├── iteration008.png │ ├── iteration009.png │ ├── iteration010.png │ ├── iteration011.png │ ├── iteration012.png │ ├── iteration013.png │ ├── iteration014.png │ └── iteration015.png ├── data │ ├── branin.py │ └── config.json ├── notebooks_check.py ├── GPyOpt_models.ipynb ├── GPyOpt_initial_design.ipynb └── GPyOpt_creating_new_models.ipynb ├── GPyOpt ├── util │ ├── __init__.py │ ├── stats.py │ ├── io.py │ ├── duplicate_manager.py │ └── mcmc_sampler.py ├── testing │ ├── __init__.py │ ├── functional_tests │ │ ├── test_files │ │ │ ├── ts_parallel_context_with_duplication_acquisition_gradient_testfile.txt │ │ │ ├── ts_parallel_context_without_duplication_acquisition_gradient_testfile.txt │ │ │ ├── constraints_acquisition_gradient_testfile.txt │ │ │ ├── update_intervals_acquisition_gradient_testfile.txt │ │ │ ├── random_parallel_context_with_duplication_acquisition_gradient_testfile.txt │ │ │ ├── random_parallel_context_without_duplication_acquisition_gradient_testfile.txt │ │ │ ├── EI_acquisition_gradient_testfile.txt │ │ │ ├── LCB_acquisition_gradient_testfile.txt │ │ │ ├── MPI_acquisition_gradient_testfile.txt │ │ │ ├── lbfgs_acquisition_gradient_testfile.txt │ │ │ ├── noiseless_evauations_acquisition_gradient_testfile.txt │ │ │ ├── output_normalization_acquisition_gradient_testfile.txt │ │ │ ├── Random_acquisition_gradient_testfile.txt │ │ │ ├── Local_penalization_acquisition_gradient_testfile.txt │ │ │ ├── Thompson_sampling_acquisition_gradient_testfile.txt │ │ │ ├── Random_with_duplicate_check_acquisition_gradient_testfile.txt │ │ │ ├── Thompson_sampling_with_duplicate_check_acquisition_gradient_testfile.txt │ │ │ ├── fully_discrete_domain_acquisition_gradient_testfile.txt │ │ │ ├── context_with_duplication_acquisition_gradient_testfile.txt │ │ │ ├── context_without_duplication_acquisition_gradient_testfile.txt │ │ │ ├── input_warped_GP_acquisition_gradient_testfile.txt │ │ │ └── mixed_domain_acquisition_gradient_testfile.txt │ │ ├── test_context_manager.py │ │ ├── mocks.py │ │ ├── test_duplicate_manager.py │ │ ├── test_acquisitions_gradient.py │ │ ├── base_test_case.py │ │ ├── test_input_warped_gp.py │ │ ├── test_constraints.py │ │ └── test_mixed_domain.py │ ├── optimization_tests │ │ ├── test_optimizer_creation.py │ │ └── test_problem_with_context.py │ ├── acquisitions_tests │ │ ├── test_mpi_mcmc_acquisition.py │ │ ├── test_entropy_search_acquisition.py │ │ ├── test_lcb_acquisition.py │ │ ├── test_mpi_acquisition.py │ │ ├── test_lcb_mcmc_acquisition.py │ │ └── test_ei_mcmc_acquisition.py │ ├── evaluators_tests │ │ └── test_batch_random.py │ └── core_tests │ │ ├── test_save.py │ │ ├── test_cost.py │ │ └── test_model.py ├── plotting │ └── __init__.py ├── core │ ├── errors.py │ ├── __init__.py │ ├── task │ │ ├── __init__.py │ │ ├── cost.py │ │ └── objective.py │ └── evaluators │ │ ├── __init__.py │ │ ├── sequential.py │ │ ├── batch_random.py │ │ ├── batch_thompson.py │ │ ├── batch_local_penalization.py │ │ └── base.py ├── __version__.py ├── optimization │ └── __init__.py ├── objective_examples │ ├── __init__.py │ └── experiments1d.py ├── methods │ ├── __init__.py │ └── modular_bayesian_optimization.py ├── interface │ ├── __init__.py │ ├── func_loader.py │ └── config_parser.py ├── experiment_design │ ├── base.py │ ├── __init__.py │ ├── sobol_design.py │ ├── latin_design.py │ ├── grid_design.py │ └── random_design.py ├── models │ ├── __init__.py │ ├── base.py │ ├── warpedgpmodel.py │ ├── input_warped_gpmodel.py │ └── rfmodel.py ├── __init__.py └── acquisitions │ ├── __init__.py │ ├── LCB.py │ ├── EI.py │ ├── MPI.py │ ├── LCB_mcmc.py │ ├── MPI_mcmc.py │ ├── EI_mcmc.py │ └── base.py ├── setup.cfg ├── travis_tests.py ├── MANIFEST.in ├── examples ├── branin │ ├── branin.py │ └── config.json ├── six_hump_camel │ ├── README.md │ ├── camel.py │ └── config.json └── README.md ├── AUTHORS.txt ├── requirements.txt ├── .gitignore ├── gpyopt.py ├── .travis.yml ├── LICENSE.txt ├── setup.py └── README.md /docs/modules.rst: -------------------------------------------------------------------------------- 1 | GPyOpt 2 | ====== 3 | 4 | .. toctree:: 5 | :maxdepth: 4 6 | 7 | GPyOpt 8 | -------------------------------------------------------------------------------- /manual/figures/gpyopt.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SheffieldML/GPyOpt/HEAD/manual/figures/gpyopt.jpeg -------------------------------------------------------------------------------- /manual/figures/iteration000.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SheffieldML/GPyOpt/HEAD/manual/figures/iteration000.png -------------------------------------------------------------------------------- /manual/figures/iteration001.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SheffieldML/GPyOpt/HEAD/manual/figures/iteration001.png -------------------------------------------------------------------------------- /manual/figures/iteration002.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SheffieldML/GPyOpt/HEAD/manual/figures/iteration002.png -------------------------------------------------------------------------------- /manual/figures/iteration003.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SheffieldML/GPyOpt/HEAD/manual/figures/iteration003.png -------------------------------------------------------------------------------- /manual/figures/iteration004.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SheffieldML/GPyOpt/HEAD/manual/figures/iteration004.png -------------------------------------------------------------------------------- /manual/figures/iteration005.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SheffieldML/GPyOpt/HEAD/manual/figures/iteration005.png -------------------------------------------------------------------------------- /manual/figures/iteration006.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SheffieldML/GPyOpt/HEAD/manual/figures/iteration006.png -------------------------------------------------------------------------------- /manual/figures/iteration007.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SheffieldML/GPyOpt/HEAD/manual/figures/iteration007.png -------------------------------------------------------------------------------- /manual/figures/iteration008.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SheffieldML/GPyOpt/HEAD/manual/figures/iteration008.png -------------------------------------------------------------------------------- /manual/figures/iteration009.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SheffieldML/GPyOpt/HEAD/manual/figures/iteration009.png -------------------------------------------------------------------------------- /manual/figures/iteration010.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SheffieldML/GPyOpt/HEAD/manual/figures/iteration010.png -------------------------------------------------------------------------------- /manual/figures/iteration011.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SheffieldML/GPyOpt/HEAD/manual/figures/iteration011.png -------------------------------------------------------------------------------- /manual/figures/iteration012.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SheffieldML/GPyOpt/HEAD/manual/figures/iteration012.png -------------------------------------------------------------------------------- /manual/figures/iteration013.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SheffieldML/GPyOpt/HEAD/manual/figures/iteration013.png -------------------------------------------------------------------------------- /manual/figures/iteration014.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SheffieldML/GPyOpt/HEAD/manual/figures/iteration014.png -------------------------------------------------------------------------------- /manual/figures/iteration015.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SheffieldML/GPyOpt/HEAD/manual/figures/iteration015.png -------------------------------------------------------------------------------- /GPyOpt/util/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2016, the GPyOpt Authors 2 | # Licensed under the BSD 3-clause license (see LICENSE.txt) 3 | -------------------------------------------------------------------------------- /GPyOpt/testing/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2016, the GPyOpt Authors 2 | # Licensed under the BSD 3-clause license (see LICENSE.txt) 3 | 4 | -------------------------------------------------------------------------------- /GPyOpt/plotting/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012, GPyOpt authors (see AUTHORS.txt). 2 | # Licensed under the BSD 3-clause license (see LICENSE.txt) 3 | 4 | from . import plots_bo 5 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [build_sphinx] 2 | source-dir = docs 3 | build-dir = docs/_build 4 | all_files = 1 5 | 6 | [upload_sphinx] 7 | upload-dir = docs/_build/html 8 | 9 | [easy_install] 10 | 11 | -------------------------------------------------------------------------------- /manual/data/branin.py: -------------------------------------------------------------------------------- 1 | 2 | from GPyOpt.fmodels.experiments2d import branin as branin_creator 3 | import numpy as np 4 | 5 | f = branin_creator() 6 | 7 | def branin(x,y): 8 | return f.f(np.hstack([x,y])) 9 | -------------------------------------------------------------------------------- /GPyOpt/core/errors.py: -------------------------------------------------------------------------------- 1 | class InvalidConfigError(Exception): 2 | pass 3 | 4 | class FullyExploredOptimizationDomainError(Exception): 5 | pass 6 | 7 | class InvalidVariableNameError(Exception): 8 | pass 9 | -------------------------------------------------------------------------------- /GPyOpt/__version__.py: -------------------------------------------------------------------------------- 1 | from pkg_resources import get_distribution, DistributionNotFound 2 | 3 | try: 4 | __version__ = get_distribution('GPyOpt').version 5 | except DistributionNotFound: 6 | __version__ = 'dev' 7 | -------------------------------------------------------------------------------- /GPyOpt/optimization/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2016, the GPyOpt Authors 2 | # Licensed under the BSD 3-clause license (see LICENSE.txt) 3 | 4 | from .acquisition_optimizer import AcquisitionOptimizer, AcquisitionOptimizer -------------------------------------------------------------------------------- /GPyOpt/util/stats.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2016, the GPyOpt Authors 2 | # Licensed under the BSD 3-clause license (see LICENSE.txt) 3 | 4 | #from ..util.general import samples_multidimensional_uniform, multigrid, iroot 5 | import numpy as np 6 | -------------------------------------------------------------------------------- /docs/GPyOpt.testing.rst: -------------------------------------------------------------------------------- 1 | GPyOpt.testing package 2 | ====================== 3 | 4 | Module contents 5 | --------------- 6 | 7 | .. automodule:: GPyOpt.testing 8 | :members: 9 | :undoc-members: 10 | :show-inheritance: 11 | -------------------------------------------------------------------------------- /GPyOpt/core/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2016, GPyOpt authors (see AUTHORS.txt). 2 | # Licensed under the BSD 3-clause license (see LICENSE.txt) 3 | 4 | from .bo import BO 5 | from . import task 6 | from .. import acquisitions as acquisition 7 | -------------------------------------------------------------------------------- /GPyOpt/objective_examples/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2016, the GPyOpt Authors 2 | # Licensed under the BSD 3-clause license (see LICENSE.txt) 3 | 4 | from . import experiments1d 5 | from . import experiments2d 6 | from . import experimentsNd 7 | -------------------------------------------------------------------------------- /travis_tests.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import matplotlib 3 | matplotlib.use('agg') 4 | 5 | import nose, warnings 6 | with warnings.catch_warnings(): 7 | warnings.simplefilter("ignore") 8 | nose.main('GPyOpt', defaultTest='GPyOpt/testing/', argv=['']) -------------------------------------------------------------------------------- /GPyOpt/core/task/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2016, the GPyOpt Authors 2 | # Licensed under the BSD 3-clause license (see LICENSE.txt) 3 | 4 | from .objective import SingleObjective 5 | from . import space 6 | from . import cost 7 | from .cost import CostModel 8 | -------------------------------------------------------------------------------- /GPyOpt/methods/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2016, the GPyOpt Authors 2 | # Licensed under the BSD 3-clause license (see LICENSE.txt) 3 | 4 | from .bayesian_optimization import BayesianOptimization 5 | from .modular_bayesian_optimization import ModularBayesianOptimization 6 | -------------------------------------------------------------------------------- /GPyOpt/interface/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2014, GPyOpt authors (see AUTHORS.txt). 2 | # Licensed under the BSD 3-clause license (see LICENSE.txt) 3 | 4 | from .config_parser import parser 5 | from .driver import BODriver 6 | from .func_loader import load_objective 7 | from .output import OutputEng 8 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include *.txt 2 | recursive-include doc *.txt 3 | include *.md 4 | recursive-include doc *.md 5 | include *.cfg 6 | recursive-include doc *.cfg 7 | include *.json 8 | recursive-include doc *.json 9 | recursive-include GPyOpt *.c 10 | recursive-include GPyOpt *.so 11 | recursive-include GPyOpt *.pyx 12 | -------------------------------------------------------------------------------- /GPyOpt/experiment_design/base.py: -------------------------------------------------------------------------------- 1 | class ExperimentDesign(object): 2 | """ 3 | Base class for all experiment designs 4 | """ 5 | def __init__(self, space): 6 | self.space = space 7 | 8 | def get_samples(self, init_points_count): 9 | raise NotImplementedError("Subclasses should implement this method.") 10 | -------------------------------------------------------------------------------- /examples/branin/branin.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2016, the GPyOpt Authors 2 | # Licensed under the BSD 3-clause license (see LICENSE.txt) 3 | 4 | 5 | from GPyOpt.objective_examples.experiments2d import branin as branin_creator 6 | import numpy as np 7 | 8 | f = branin_creator() 9 | 10 | def branin(x,y): 11 | return f.f(np.hstack([x,y])) 12 | -------------------------------------------------------------------------------- /AUTHORS.txt: -------------------------------------------------------------------------------- 1 | -Aki Vehtari 2 | -Alan Saul 3 | -Andreas Damianou 4 | -Andrei Paleyes 5 | -Fela Winkelmolen 6 | -Huibin Shen 7 | -James Hensman 8 | -Javier Gonzalez 9 | -Jordan Massiah 10 | -Josh Fass 11 | -Neil Lawrence 12 | -Rasmus Berg Palm 13 | -Rodolphe Jenatton 14 | -Simon Kamronn 15 | -Zhenwen Dai 16 | -see also GPy and GPyOpt contributors in GitHub -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | cycler>=0.10.0 2 | decorator>=4.0.10 3 | numpy>=1.11.2 4 | six>=1.10.0 5 | python-dateutil>=2.6.0 6 | paramz>=0.7.0 7 | GPy>=1.8 8 | matplotlib>=1.5.3 9 | #pyparsing==2.1.10 10 | #pytz==2016.7 11 | scipy>=0.18.1 12 | mock>=2.0.0 13 | PyDOE >= 0.3.0 14 | sobol_seq >=0.1 15 | emcee==2.2.1 16 | 17 | # For tests 18 | nose 19 | codecov 20 | -------------------------------------------------------------------------------- /examples/six_hump_camel/README.md: -------------------------------------------------------------------------------- 1 | ## The Six Hump Camel Back 2 | 3 | A more difficult global optimization test function, this function has 6 local minima, two of which are global. 4 | 5 | f(x,y) = (4 - 2.1x^2 + x^4 / 3)x^2 + xy + (-4 + 4y^2)y^2 6 | 7 | ### Global optimium 8 | 9 | f(x,y) = −1.0316 10 | 11 | X = -0.898 (0.898) 12 | Y = 0.7126 (-0.7126) 13 | -------------------------------------------------------------------------------- /GPyOpt/util/io.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2016, the GPyOpt Authors 2 | # Licensed under the BSD 3-clause license (see LICENSE.txt) 3 | 4 | def gen_datestr(): 5 | """ 6 | Returns a string with the yy/mm/dd and hh/mm/ss 7 | """ 8 | from datetime import datetime 9 | dt = datetime.now() 10 | return str(dt.year)+'.'+str(dt.month)+'.'+str(dt.day)+'_'+str(dt.hour)+'.'+str(dt.minute)+'.'+str(dt.second) -------------------------------------------------------------------------------- /docs/GPyOpt.plotting.rst: -------------------------------------------------------------------------------- 1 | GPyOpt.plotting package 2 | ======================= 3 | 4 | Submodules 5 | ---------- 6 | 7 | GPyOpt.plotting.plots\_bo module 8 | -------------------------------- 9 | 10 | .. automodule:: GPyOpt.plotting.plots_bo 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | 16 | Module contents 17 | --------------- 18 | 19 | .. automodule:: GPyOpt.plotting 20 | :members: 21 | :undoc-members: 22 | :show-inheritance: 23 | -------------------------------------------------------------------------------- /docs/GPyOpt.rst: -------------------------------------------------------------------------------- 1 | GPyOpt package 2 | ============== 3 | 4 | Subpackages 5 | ----------- 6 | 7 | .. toctree:: 8 | 9 | GPyOpt.acquisitions 10 | GPyOpt.core 11 | GPyOpt.experiment_design 12 | GPyOpt.interface 13 | GPyOpt.methods 14 | GPyOpt.models 15 | GPyOpt.objective_examples 16 | GPyOpt.optimization 17 | GPyOpt.plotting 18 | GPyOpt.testing 19 | GPyOpt.util 20 | 21 | Module contents 22 | --------------- 23 | 24 | .. automodule:: GPyOpt 25 | :members: 26 | :undoc-members: 27 | :show-inheritance: 28 | -------------------------------------------------------------------------------- /examples/README.md: -------------------------------------------------------------------------------- 1 | GPyOpt examples 2 | ====== 3 | 4 | The goal of this set of examples is to show how to GPyOpt can be used in a similar way to Spearmint (https://github.com/JasperSnoek/spearmint). The default options that GPyOpt uses in the config.json files are identical to those used by Spearmint. 5 | 6 | Just some small differences that aim to address in the future: 7 | 8 | * So far only objective functions written in Python can be used. 9 | * The parallelization of the evaluations is carried out in a synchronous way rather than asynchronous as it is done is Spearmint. 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | -------------------------------------------------------------------------------- /examples/six_hump_camel/camel.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2016, the GPyOpt Authors 2 | # Licensed under the BSD 3-clause license (see LICENSE.txt) 3 | 4 | import math 5 | 6 | def camel(x,y): 7 | x2 = math.pow(x,2) 8 | x4 = math.pow(x,4) 9 | y2 = math.pow(y,2) 10 | 11 | return (4.0 - 2.1 * x2 + (x4 / 3.0)) * x2 + x*y + (-4.0 + 4.0 * y2) * y2 12 | 13 | 14 | def main(job_id, params): 15 | x = params['X'][0] 16 | y = params['Y'][0] 17 | res = camel(x, y) 18 | print('The Six hump camel back function:') 19 | print('\tf(%.4f, %0.4f) = %f' % (x, y, res)) 20 | return camel(x, y) 21 | 22 | 23 | if __name__ == "__main__": 24 | main(23, {'X': [0.0898], 'Y': [-0.7126]}) 25 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | SPHINXPROJ = GPyOpt 8 | SOURCEDIR = . 9 | BUILDDIR = _build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | -------------------------------------------------------------------------------- /manual/data/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "language" : "PYTHON", 3 | "main-file" : "branin.py", 4 | "experiment-name" : "simple-branin", 5 | "likelihood" : "gaussian", 6 | "resources": { 7 | "maximum-iterations" : 1, 8 | "max-run-time": "NA" 9 | }, 10 | "variables" : { 11 | "y" : { 12 | "type" : "FLOAT", 13 | "size" : 1, 14 | "min" : -2, 15 | "max" : 5 16 | }, 17 | "x" : { 18 | "type" : "FLOAT", 19 | "size" : 1, 20 | "min" : 0, 21 | "max" : 5 22 | } 23 | }, 24 | "output":{ 25 | "verbosity": true 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /GPyOpt/models/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2016, the GPyOpt Authors 2 | # Licensed under the BSD 3-clause license (see LICENSE.txt) 3 | 4 | from .base import BOModel 5 | from .gpmodel import GPModel, GPModel_MCMC 6 | from .rfmodel import RFModel 7 | from .warpedgpmodel import WarpedGPModel 8 | from .input_warped_gpmodel import InputWarpedGPModel 9 | #from . import gpykernels 10 | 11 | def select_model(name): 12 | if name == 'GP': 13 | return GPModel 14 | elif name == 'GP_MCMC': 15 | return GPModel_MCMC 16 | elif name == 'RF': 17 | return RFModel 18 | elif name == 'warpGP': 19 | return WarpedGPModel 20 | else: 21 | raise Exception('Invalid model selected.') 22 | -------------------------------------------------------------------------------- /docs/GPyOpt.core.rst: -------------------------------------------------------------------------------- 1 | GPyOpt.core package 2 | =================== 3 | 4 | Subpackages 5 | ----------- 6 | 7 | .. toctree:: 8 | 9 | GPyOpt.core.evaluators 10 | GPyOpt.core.task 11 | 12 | Submodules 13 | ---------- 14 | 15 | GPyOpt.core.bo module 16 | --------------------- 17 | 18 | .. automodule:: GPyOpt.core.bo 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | GPyOpt.core.errors module 24 | ------------------------- 25 | 26 | .. automodule:: GPyOpt.core.errors 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | 32 | Module contents 33 | --------------- 34 | 35 | .. automodule:: GPyOpt.core 36 | :members: 37 | :undoc-members: 38 | :show-inheritance: 39 | -------------------------------------------------------------------------------- /examples/branin/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "language" : "PYTHON", 3 | "main-file" : "branin.py", 4 | "experiment-name" : "simple-branin", 5 | "likelihood" : "gaussian", 6 | "resources": { 7 | "maximum-iterations" : 1, 8 | "max-run-time": "NA" 9 | }, 10 | "space" : [ 11 | { "name" : "y", 12 | "type" : "continuous", 13 | "domain": "(-2,5)", 14 | "dimensionality": "1" 15 | }, 16 | { 17 | "name" : "x", 18 | "type" : "continuous", 19 | "domain": "(0,5)", 20 | "dimensionality": "1" 21 | } 22 | ], 23 | "constraints": [], 24 | "output":{ 25 | "verbosity": true 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /examples/six_hump_camel/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "language" : "PYTHON", 3 | "main-file" : "camel.py", 4 | "experiment-name" : "simple-camel", 5 | "likelihood" : "gaussian", 6 | "resources": { 7 | "maximum-iterations" : 50, 8 | "max-run-time": "NA" 9 | }, 10 | "space" : [ 11 | { "name" : "X", 12 | "type" : "continuous", 13 | "domain": "(-3,3)", 14 | "dimensionality": "1" 15 | }, 16 | { 17 | "name" : "Y", 18 | "type" : "continuous", 19 | "domain": "(-2,2)", 20 | "dimensionality": "1" 21 | } 22 | ], 23 | "constraints": [], 24 | "output":{ 25 | "verbosity": true 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /GPyOpt/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2016, the GPyOpt Authors 2 | # Licensed under the BSD 3-clause license (see LICENSE.txt) 3 | 4 | import warnings 5 | warnings.filterwarnings("ignore", category=DeprecationWarning) 6 | import logging 7 | 8 | logger = logging.getLogger(__name__) 9 | 10 | from GPyOpt.core.task.space import Design_space 11 | from . import core 12 | from . import methods 13 | from . import util 14 | from . import interface 15 | from . import models 16 | from . import acquisitions 17 | from . import optimization 18 | try: 19 | from . import objective_examples 20 | from . import objective_examples as fmodels 21 | except ImportError as e: 22 | logger.warning("Could not import examples: {}".format(e)) 23 | 24 | from .__version__ import __version__ 25 | -------------------------------------------------------------------------------- /GPyOpt/core/evaluators/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2016, the GPyOpt Authors 2 | # Licensed under the BSD 3-clause license (see LICENSE.txt) 3 | 4 | from .base import EvaluatorBase 5 | from .sequential import Sequential 6 | from .batch_random import RandomBatch 7 | from .batch_local_penalization import LocalPenalization 8 | from .batch_thompson import ThompsonBatch 9 | 10 | def select_evaluator(name): 11 | if name == 'sequential': 12 | return Sequential 13 | elif name == 'random': 14 | return RandomBatch 15 | elif name == 'local_penalization': 16 | return LocalPenalization 17 | elif name == 'thompson_sampling': 18 | return ThompsonBatch 19 | else: 20 | raise Exception('Invalid acquisition evaluator selected.') 21 | -------------------------------------------------------------------------------- /GPyOpt/experiment_design/__init__.py: -------------------------------------------------------------------------------- 1 | from .base import ExperimentDesign 2 | from .grid_design import GridDesign 3 | from .latin_design import LatinDesign 4 | from .random_design import RandomDesign 5 | from .sobol_design import SobolDesign 6 | 7 | def initial_design(design_name, space, init_points_count): 8 | design = None 9 | if design_name == 'random': 10 | design = RandomDesign(space) 11 | elif design_name == 'sobol': 12 | design = SobolDesign(space) 13 | elif design_name == 'grid': 14 | design = GridDesign(space) 15 | elif design_name == 'latin': 16 | design = LatinDesign(space) 17 | else: 18 | raise ValueError('Unknown design type: ' + design_name) 19 | 20 | return design.get_samples(init_points_count) -------------------------------------------------------------------------------- /docs/GPyOpt.methods.rst: -------------------------------------------------------------------------------- 1 | GPyOpt.methods package 2 | ====================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | GPyOpt.methods.bayesian\_optimization module 8 | -------------------------------------------- 9 | 10 | .. automodule:: GPyOpt.methods.bayesian_optimization 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | GPyOpt.methods.modular\_bayesian\_optimization module 16 | ----------------------------------------------------- 17 | 18 | .. automodule:: GPyOpt.methods.modular_bayesian_optimization 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | 24 | Module contents 25 | --------------- 26 | 27 | .. automodule:: GPyOpt.methods 28 | :members: 29 | :undoc-members: 30 | :show-inheritance: 31 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | .. GPyOpt documentation master file, created by 2 | sphinx-quickstart on Mon Mar 26 11:17:54 2018. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Welcome to GPyOpt's documentation! 7 | ================================== 8 | 9 | .. toctree:: 10 | :maxdepth: 2 11 | :caption: Contents: 12 | 13 | .. toctree:: 14 | :maxdepth: 1 15 | 16 | GPyOpt.acquisitions 17 | GPyOpt.core 18 | GPyOpt.experiment_design 19 | GPyOpt.interface 20 | GPyOpt.methods 21 | GPyOpt.models 22 | GPyOpt.objective_examples 23 | GPyOpt.optimization 24 | GPyOpt.plotting 25 | GPyOpt.util 26 | 27 | Indices and tables 28 | ================== 29 | 30 | * :ref:`genindex` 31 | * :ref:`modindex` 32 | * :ref:`search` 33 | -------------------------------------------------------------------------------- /GPyOpt/interface/func_loader.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2014, GPyOpt authors (see AUTHORS.txt). 2 | # Licensed under the BSD 3-clause license (see LICENSE.txt) 3 | 4 | import os 5 | import numpy as np 6 | 7 | def load_objective(config): 8 | """ 9 | Loads the objective function from a .json file. 10 | """ 11 | 12 | assert 'prjpath' in config 13 | assert 'main-file' in config, "The problem file ('main-file') is missing!" 14 | 15 | os.chdir(config['prjpath']) 16 | if config['language'].lower()=='python': 17 | assert config['main-file'].endswith('.py'), 'The python problem file has to end with .py!' 18 | import imp 19 | m = imp.load_source(config['main-file'][:-3], os.path.join(config['prjpath'],config['main-file'])) 20 | func = m.__dict__[config['main-file'][:-3]] 21 | return func 22 | 23 | -------------------------------------------------------------------------------- /GPyOpt/testing/functional_tests/test_files/ts_parallel_context_with_duplication_acquisition_gradient_testfile.txt: -------------------------------------------------------------------------------- 1 | -2.241785176759851339e+00 4.909631914574283940e-01 -2.970747112143128810e+00 1.000000000000000000e+00 5.000000000000000000e+00 2 | 3.394920736069600764e+00 -1.130942124408866434e+00 -3.262315760968206746e+00 0.000000000000000000e+00 -1.000000000000000000e+00 3 | 8.710781416120635612e+00 -5.408455725403489289e+00 2.330405683659883209e+00 0.000000000000000000e+00 -1.000000000000000000e+00 4 | 6.926218333720342457e+00 6.882781789403402684e-01 5.622815997018673073e-01 1.000000000000000000e+00 5.000000000000000000e+00 5 | -3.734529661354497598e+00 8.279240491584658912e+00 -1.243243243479934534e-01 1.000000000000000000e+00 -1.000000000000000000e+00 6 | 2.999999999999999889e-01 -9.998554324256922143e+00 -5.770342105356025542e+00 1.000000000000000000e+00 -1.000000000000000000e+00 7 | -------------------------------------------------------------------------------- /GPyOpt/testing/functional_tests/test_files/ts_parallel_context_without_duplication_acquisition_gradient_testfile.txt: -------------------------------------------------------------------------------- 1 | -2.241785176759851339e+00 4.909631914574283940e-01 -2.970747112143128810e+00 1.000000000000000000e+00 5.000000000000000000e+00 2 | 3.394920736069600764e+00 -1.130942124408866434e+00 -3.262315760968206746e+00 0.000000000000000000e+00 -1.000000000000000000e+00 3 | 8.710781416120635612e+00 -5.408455725403489289e+00 2.330405683659883209e+00 0.000000000000000000e+00 -1.000000000000000000e+00 4 | 6.926218333720342457e+00 6.882781789403402684e-01 5.622815997018673073e-01 1.000000000000000000e+00 5.000000000000000000e+00 5 | -3.734529661354497598e+00 8.279240491584658912e+00 -1.243243243479934534e-01 1.000000000000000000e+00 -1.000000000000000000e+00 6 | 2.999999999999999889e-01 1.570910157073559077e+00 -1.547774392084244610e+00 1.000000000000000000e+00 5.000000000000000000e+00 7 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.py[cod~] 2 | # C extensions 3 | *.so 4 | # Packages 5 | *.egg 6 | *.egg-info 7 | dist 8 | build 9 | eggs 10 | bin 11 | var 12 | sdist 13 | develop-eggs 14 | .installed.cfg 15 | lib 16 | lib64 17 | 18 | # Installer logs 19 | pip-log.txt 20 | 21 | # Results from optimization in demos 22 | GPyOpt-results.txt 23 | 24 | # figures in notebooks 25 | .png 26 | 27 | # Unit test / coverage reports 28 | .coverage 29 | .tox 30 | nosetests.xml 31 | # Translations 32 | *.mo 33 | # Mr Developer 34 | .mr.developer.cfg 35 | .project 36 | .pydevproject 37 | #vim 38 | *.swp 39 | #bfgs optimiser leaves this lying around 40 | iterate.dat 41 | # Nosetests # 42 | ############# 43 | *.noseids 44 | # git merge files # 45 | ################### 46 | *.orig 47 | 48 | .DS_Store 49 | 50 | .ipynb_checkpoints 51 | 52 | docs/_build 53 | .pytest_cache 54 | 55 | # Virtual environments 56 | venv/ 57 | -------------------------------------------------------------------------------- /GPyOpt/core/evaluators/sequential.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2016, the GPyOpt Authors 2 | # Licensed under the BSD 3-clause license (see LICENSE.txt) 3 | 4 | from .base import EvaluatorBase 5 | 6 | 7 | class Sequential(EvaluatorBase): 8 | """ 9 | Class for standard Sequential Bayesian optimization methods. 10 | 11 | :param acquisition: acquisition function to be used to compute the batch. 12 | :param batch size: it is 1 by default since this class is only used for sequential methods. 13 | """ 14 | 15 | def __init__(self, acquisition, batch_size=1): 16 | super(Sequential, self).__init__(acquisition, batch_size) 17 | 18 | def compute_batch(self, duplicate_manager=None,context_manager=None): 19 | """ 20 | Selects the new location to evaluate the objective. 21 | """ 22 | x, _ = self.acquisition.optimize(duplicate_manager=duplicate_manager) 23 | return x 24 | -------------------------------------------------------------------------------- /gpyopt.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # Copyright (c) 2014, GPyOpt authors (see AUTHORS.txt). 3 | # Licensed under the BSD 3-clause license (see LICENSE.txt) 4 | 5 | from GPyOpt.interface import parser, BODriver, load_objective, OutputEng 6 | 7 | if __name__ == '__main__': 8 | import sys,os 9 | if len(sys.argv)<1: 10 | print('Need the config file!') 11 | exit() 12 | 13 | configfile = sys.argv[1] 14 | curpath = os.path.dirname(os.path.abspath(configfile)) 15 | 16 | config = parser(configfile) 17 | config['prjpath'] = curpath 18 | obj_func = load_objective(config) 19 | driver = BODriver(config, obj_func) 20 | bo = driver.run() 21 | # bo.save_report(os.path.join(curpath,config['experiment-name']+'_report.txt')) 22 | bo.save_evaluations(os.path.join(curpath,config['experiment-name']+'_evaluations.txt')) 23 | bo.save_models(os.path.join(curpath,config['experiment-name']+'_model.txt')) -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=. 11 | set BUILDDIR=_build 12 | set SPHINXPROJ=GPyOpt 13 | 14 | if "%1" == "" goto help 15 | 16 | %SPHINXBUILD% >NUL 2>NUL 17 | if errorlevel 9009 ( 18 | echo. 19 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 20 | echo.installed, then set the SPHINXBUILD environment variable to point 21 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 22 | echo.may add the Sphinx directory to PATH. 23 | echo. 24 | echo.If you don't have Sphinx installed, grab it from 25 | echo.http://sphinx-doc.org/ 26 | exit /b 1 27 | ) 28 | 29 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% 30 | goto end 31 | 32 | :help 33 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% 34 | 35 | :end 36 | popd 37 | -------------------------------------------------------------------------------- /GPyOpt/testing/functional_tests/test_files/constraints_acquisition_gradient_testfile.txt: -------------------------------------------------------------------------------- 1 | -0.165955990594852 -1.2229842156936066 2 | 0.4406489868843162 -0.9412193658669873 3 | -0.39533485473632046 -0.3096975773079902 4 | -0.7064882183657739 0.11645020201007084 5 | -0.1616109711934104 0.5114025305352068 6 | 0.8460060815073307 -0.09654585655388481 7 | 0.42129453106421 0.5411026606436793 8 | -0.27067263037622546 -0.7773176139507966 9 | 0.7143475474889778 0.6757693186032174 10 | 0.546758394769407 0.6732334569585419 11 | 0.6116040726004173 0.2179539190121953 12 | -0.6979401495314701 0.6853292029271287 13 | 0.9725136602999132 0.6613336224664068 14 | -0.3947307012607917 -0.7710994363337204 15 | -0.8867648541511022 0.506327874465911 16 | 0.7574514994920194 0.2744491479066373 17 | 0.3814658794809189 -0.5935115492873151 18 | -0.11047944658786246 -0.5867788112296773 19 | -0.4518774611614127 -0.5603720990073233 20 | 0.1846090170171062 -0.6958669931894023 21 | -------------------------------------------------------------------------------- /GPyOpt/testing/functional_tests/test_files/update_intervals_acquisition_gradient_testfile.txt: -------------------------------------------------------------------------------- 1 | -8.297799529742597713e-01 -4.076614052312022451e+00 -8.080548559670521769e-01 1.704675101784022040e+00 3.007445686755366054e+00 2 | 2.203244934421580759e+00 -3.137397886223291188e+00 1.852195003967595177e+00 -8.269519763287300762e-01 4.682615757193975270e+00 3 | -4.998856251826550690e+00 -1.544392729569522338e+00 -2.955477502684825453e+00 5.868982844575167945e-01 -1.865758218407571611e+00 4 | -1.976674273681602312e+00 -1.032325257693300635e+00 3.781174363909453717e+00 -3.596130614047662100e+00 1.923226156693140787e+00 5 | -3.532441091828869340e+00 3.881673400335694524e-01 -4.726124068020737923e+00 -3.018985109151212320e+00 3.763891522960383540e+00 6 | 5.000000000000000000e+00 -5.000000000000000000e+00 -5.000000000000000000e+00 5.000000000000000000e+00 -5.000000000000000000e+00 7 | 5.000000000000000000e+00 -5.000000000000000000e+00 -5.000000000000000000e+00 5.000000000000000000e+00 -5.000000000000000000e+00 8 | -------------------------------------------------------------------------------- /docs/GPyOpt.optimization.rst: -------------------------------------------------------------------------------- 1 | GPyOpt.optimization package 2 | =========================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | GPyOpt.optimization.acquisition\_optimizer module 8 | ------------------------------------------------- 9 | 10 | .. automodule:: GPyOpt.optimization.acquisition_optimizer 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | GPyOpt.optimization.anchor\_points\_generator module 16 | ---------------------------------------------------- 17 | 18 | .. automodule:: GPyOpt.optimization.anchor_points_generator 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | GPyOpt.optimization.optimizer module 24 | ------------------------------------ 25 | 26 | .. automodule:: GPyOpt.optimization.optimizer 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | 32 | Module contents 33 | --------------- 34 | 35 | .. automodule:: GPyOpt.optimization 36 | :members: 37 | :undoc-members: 38 | :show-inheritance: 39 | -------------------------------------------------------------------------------- /GPyOpt/models/base.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2016, the GPyOpt Authors 2 | # Licensed under the BSD 3-clause license (see LICENSE.txt) 3 | 4 | import abc 5 | from six import with_metaclass 6 | 7 | class BOModel(with_metaclass(abc.ABCMeta, object)): 8 | """ 9 | The abstract Model for Bayesian Optimization 10 | """ 11 | 12 | MCMC_sampler = False 13 | analytical_gradient_prediction = False 14 | 15 | @abc.abstractmethod 16 | def updateModel(self, X_all, Y_all, X_new, Y_new): 17 | "Augment the dataset of the model" 18 | return 19 | 20 | @abc.abstractmethod 21 | def predict(self, X): 22 | "Get the predicted mean and std at X." 23 | return 24 | 25 | # We keep this one optional 26 | def predict_withGradients(self, X): 27 | "Get the gradients of the predicted mean and variance at X." 28 | return 29 | 30 | @abc.abstractmethod 31 | def get_fmin(self): 32 | "Get the minimum of the current model." 33 | return 34 | -------------------------------------------------------------------------------- /docs/GPyOpt.objective_examples.rst: -------------------------------------------------------------------------------- 1 | GPyOpt.objective\_examples package 2 | ================================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | GPyOpt.objective\_examples.experiments1d module 8 | ----------------------------------------------- 9 | 10 | .. automodule:: GPyOpt.objective_examples.experiments1d 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | GPyOpt.objective\_examples.experiments2d module 16 | ----------------------------------------------- 17 | 18 | .. automodule:: GPyOpt.objective_examples.experiments2d 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | GPyOpt.objective\_examples.experimentsNd module 24 | ----------------------------------------------- 25 | 26 | .. automodule:: GPyOpt.objective_examples.experimentsNd 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | 32 | Module contents 33 | --------------- 34 | 35 | .. automodule:: GPyOpt.objective_examples 36 | :members: 37 | :undoc-members: 38 | :show-inheritance: 39 | -------------------------------------------------------------------------------- /docs/GPyOpt.core.task.rst: -------------------------------------------------------------------------------- 1 | GPyOpt.core.task package 2 | ======================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | GPyOpt.core.task.cost module 8 | ---------------------------- 9 | 10 | .. automodule:: GPyOpt.core.task.cost 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | GPyOpt.core.task.objective module 16 | --------------------------------- 17 | 18 | .. automodule:: GPyOpt.core.task.objective 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | GPyOpt.core.task.space module 24 | ----------------------------- 25 | 26 | .. automodule:: GPyOpt.core.task.space 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | GPyOpt.core.task.variables module 32 | --------------------------------- 33 | 34 | .. automodule:: GPyOpt.core.task.variables 35 | :members: 36 | :undoc-members: 37 | :show-inheritance: 38 | 39 | 40 | Module contents 41 | --------------- 42 | 43 | .. automodule:: GPyOpt.core.task 44 | :members: 45 | :undoc-members: 46 | :show-inheritance: 47 | -------------------------------------------------------------------------------- /docs/GPyOpt.interface.rst: -------------------------------------------------------------------------------- 1 | GPyOpt.interface package 2 | ======================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | GPyOpt.interface.config\_parser module 8 | -------------------------------------- 9 | 10 | .. automodule:: GPyOpt.interface.config_parser 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | GPyOpt.interface.driver module 16 | ------------------------------ 17 | 18 | .. automodule:: GPyOpt.interface.driver 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | GPyOpt.interface.func\_loader module 24 | ------------------------------------ 25 | 26 | .. automodule:: GPyOpt.interface.func_loader 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | GPyOpt.interface.output module 32 | ------------------------------ 33 | 34 | .. automodule:: GPyOpt.interface.output 35 | :members: 36 | :undoc-members: 37 | :show-inheritance: 38 | 39 | 40 | Module contents 41 | --------------- 42 | 43 | .. automodule:: GPyOpt.interface 44 | :members: 45 | :undoc-members: 46 | :show-inheritance: 47 | -------------------------------------------------------------------------------- /GPyOpt/testing/functional_tests/test_files/random_parallel_context_with_duplication_acquisition_gradient_testfile.txt: -------------------------------------------------------------------------------- 1 | -2.241785176759851339e+00 4.909631914574283940e-01 -2.970747112143128810e+00 1.000000000000000000e+00 5.000000000000000000e+00 2 | 3.394920736069600764e+00 -1.130942124408866434e+00 -3.262315760968206746e+00 0.000000000000000000e+00 -1.000000000000000000e+00 3 | 8.710781416120635612e+00 -5.408455725403489289e+00 2.330405683659883209e+00 0.000000000000000000e+00 -1.000000000000000000e+00 4 | 6.926218333720342457e+00 6.882781789403402684e-01 5.622815997018673073e-01 1.000000000000000000e+00 5.000000000000000000e+00 5 | -3.734529661354497598e+00 8.279240491584658912e+00 -1.243243243479934534e-01 1.000000000000000000e+00 -1.000000000000000000e+00 6 | 2.999999999999999889e-01 1.000000000000000000e+01 -8.000000000000000000e+00 1.000000000000000000e+00 -1.000000000000000000e+00 7 | 2.999999999999999889e-01 -9.999057416311769586e+00 2.417903827007494399e+00 1.000000000000000000e+00 5.000000000000000000e+00 8 | 2.999999999999999889e-01 -9.998421285764067079e+00 -6.609848208261302815e+00 1.000000000000000000e+00 -1.000000000000000000e+00 9 | -------------------------------------------------------------------------------- /GPyOpt/testing/functional_tests/test_files/random_parallel_context_without_duplication_acquisition_gradient_testfile.txt: -------------------------------------------------------------------------------- 1 | -2.241785176759851339e+00 4.909631914574283940e-01 -2.970747112143128810e+00 1.000000000000000000e+00 5.000000000000000000e+00 2 | 3.394920736069600764e+00 -1.130942124408866434e+00 -3.262315760968206746e+00 0.000000000000000000e+00 -1.000000000000000000e+00 3 | 8.710781416120635612e+00 -5.408455725403489289e+00 2.330405683659883209e+00 0.000000000000000000e+00 -1.000000000000000000e+00 4 | 6.926218333720342457e+00 6.882781789403402684e-01 5.622815997018673073e-01 1.000000000000000000e+00 5.000000000000000000e+00 5 | -3.734529661354497598e+00 8.279240491584658912e+00 -1.243243243479934534e-01 1.000000000000000000e+00 -1.000000000000000000e+00 6 | 2.999999999999999889e-01 1.000000000000000000e+01 -8.000000000000000000e+00 1.000000000000000000e+00 -1.000000000000000000e+00 7 | 2.999999999999999889e-01 6.741184473640831953e+00 2.341822562832572174e+00 1.000000000000000000e+00 -1.000000000000000000e+00 8 | 2.999999999999999889e-01 -5.144574323862696197e+00 7.390690684178835568e-02 1.000000000000000000e+00 -1.000000000000000000e+00 9 | -------------------------------------------------------------------------------- /GPyOpt/acquisitions/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2016, the GPyOpt Authors 2 | # Licensed under the BSD 3-clause license (see LICENSE.txt) 3 | 4 | from .base import AcquisitionBase 5 | from .EI import AcquisitionEI 6 | from GPyOpt.acquisitions.EI_mcmc import AcquisitionEI_MCMC 7 | from .MPI import AcquisitionMPI 8 | from .MPI_mcmc import AcquisitionMPI_MCMC 9 | from .LCB import AcquisitionLCB 10 | from .LCB_mcmc import AcquisitionLCB_MCMC 11 | from .LP import AcquisitionLP 12 | from .ES import AcquisitionEntropySearch 13 | 14 | def select_acquisition(name): 15 | ''' 16 | Acquisition selector 17 | ''' 18 | if name == 'EI': 19 | return AcquisitionEI 20 | elif name == 'EI_MCMC': 21 | return AcquisitionEI_MCMC 22 | elif name == 'LCB': 23 | return AcquisitionLCB 24 | elif name == 'LCB_MCMC': 25 | return AcquisitionLCB_MCMC 26 | elif name == 'MPI': 27 | return AcquisitionMPI 28 | elif name == 'MPI_MCMC': 29 | return AcquisitionMPI_MCMC 30 | elif name == 'LP': 31 | return AcquisitionLP 32 | elif name == 'ES': 33 | return AcquisitionEntropySearch 34 | else: 35 | raise Exception('Invalid acquisition selected.') 36 | -------------------------------------------------------------------------------- /docs/GPyOpt.util.rst: -------------------------------------------------------------------------------- 1 | GPyOpt.util package 2 | =================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | GPyOpt.util.arguments\_manager module 8 | ------------------------------------- 9 | 10 | .. automodule:: GPyOpt.util.arguments_manager 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | GPyOpt.util.duplicate\_manager module 16 | ------------------------------------- 17 | 18 | .. automodule:: GPyOpt.util.duplicate_manager 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | GPyOpt.util.general module 24 | -------------------------- 25 | 26 | .. automodule:: GPyOpt.util.general 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | GPyOpt.util.io module 32 | --------------------- 33 | 34 | .. automodule:: GPyOpt.util.io 35 | :members: 36 | :undoc-members: 37 | :show-inheritance: 38 | 39 | GPyOpt.util.stats module 40 | ------------------------ 41 | 42 | .. automodule:: GPyOpt.util.stats 43 | :members: 44 | :undoc-members: 45 | :show-inheritance: 46 | 47 | 48 | Module contents 49 | --------------- 50 | 51 | .. automodule:: GPyOpt.util 52 | :members: 53 | :undoc-members: 54 | :show-inheritance: 55 | -------------------------------------------------------------------------------- /docs/GPyOpt.models.rst: -------------------------------------------------------------------------------- 1 | GPyOpt.models package 2 | ===================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | GPyOpt.models.base module 8 | ------------------------- 9 | 10 | .. automodule:: GPyOpt.models.base 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | GPyOpt.models.gpmodel module 16 | ---------------------------- 17 | 18 | .. automodule:: GPyOpt.models.gpmodel 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | GPyOpt.models.input\_warped\_gpmodel module 24 | ------------------------------------------- 25 | 26 | .. automodule:: GPyOpt.models.input_warped_gpmodel 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | GPyOpt.models.rfmodel module 32 | ---------------------------- 33 | 34 | .. automodule:: GPyOpt.models.rfmodel 35 | :members: 36 | :undoc-members: 37 | :show-inheritance: 38 | 39 | GPyOpt.models.warpedgpmodel module 40 | ---------------------------------- 41 | 42 | .. automodule:: GPyOpt.models.warpedgpmodel 43 | :members: 44 | :undoc-members: 45 | :show-inheritance: 46 | 47 | 48 | Module contents 49 | --------------- 50 | 51 | .. automodule:: GPyOpt.models 52 | :members: 53 | :undoc-members: 54 | :show-inheritance: 55 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | 3 | notifications: 4 | email: 5 | on_success: change 6 | on_failure: always 7 | 8 | os: 9 | - linux 10 | 11 | python: 12 | - "2.7" 13 | - "3.5" 14 | - "3.6" 15 | - "3.7" 16 | # command to install dependencies 17 | 18 | install: 19 | - pip install -r requirements.txt 20 | - pip install . 21 | # command to run tests 22 | script: 23 | - coverage run travis_tests.py 24 | 25 | 26 | after_success: 27 | - codecov 28 | 29 | deploy: 30 | provider: pypi 31 | user: __token__ 32 | skip_existing: true 33 | password: 34 | secure: phHZX7dkqarH2i86eMNwV4CB0Of6AG9a81Kn/svIn5VMadZ0vHCzeoTmGXc6sJetuD2LAjZeiSwD0kIixesoQ1STV94cxTmx3eIv85fZXW7aeQJ7vEr2cMch0Fcx9FSw1u31KI0iY6V2iqy2gKR0M3UFZgjFPIjhVcfPTeWyGmzVjBNnskLwlozojlKHP6upBwXwzTAlNf8eY/IvQORRW8V0+ld5PsOPUxiUY7v8GHDfZwDZ7KVibh2YoXD3351JLvAs9sukWadDRT3euDLBdgg5JSZBpuuvnqxzf5t2TfYXgS3fGre/YCT8flYbAJ2v6fSk88tUVgQUdWMV2Ee8rAIdID6LzGKUoi1TMxg47ZnSvh5y+3Pa7i7uqvthSm7lYMTCAoa01/HfG8LNHcpW3NjySzJ7/c7cFhk422VDUzk4N2n9buGFGcYEAvAvqPje/KGPAPMeCme8aGxHZFC52AdKrxDdL+8WJeHHAvJxRekvPJGxYzbf6LQrWAXQgzhROW8t+5H2y7/K0pJTppJ30tl7hHcl8vJ4Szau1QWwwuZvQMhrWsiM7NDL0vH07I//OsALQETsNEyWmMRoKe+kOnL5j2dkUHdfisjIZvHNSUqinhzxZLps6dTHiepM3o8TOpisdX03voNiORM3GsPRwhgxa1HE9uPoMW0UbMcAYoU= 35 | on: 36 | branch: master 37 | -------------------------------------------------------------------------------- /GPyOpt/testing/functional_tests/test_files/EI_acquisition_gradient_testfile.txt: -------------------------------------------------------------------------------- 1 | -8.297799529742597713e-01 -4.076614052312022451e+00 -8.080548559670521769e-01 1.704675101784022040e+00 3.007445686755366054e+00 2 | 2.203244934421580759e+00 -3.137397886223291188e+00 1.852195003967595177e+00 -8.269519763287300762e-01 4.682615757193975270e+00 3 | -4.998856251826550690e+00 -1.544392729569522338e+00 -2.955477502684825453e+00 5.868982844575167945e-01 -1.865758218407571611e+00 4 | -1.976674273681602312e+00 -1.032325257693300635e+00 3.781174363909453717e+00 -3.596130614047662100e+00 1.923226156693140787e+00 5 | -3.532441091828869340e+00 3.881673400335694524e-01 -4.726124068020737923e+00 -3.018985109151212320e+00 3.763891522960383540e+00 6 | 5.000000000000000000e+00 -5.000000000000000000e+00 -5.000000000000000000e+00 5.000000000000000000e+00 -5.000000000000000000e+00 7 | -5.000000000000000000e+00 5.000000000000000000e+00 5.000000000000000000e+00 5.000000000000000000e+00 5.000000000000000000e+00 8 | 2.654969604694120910e+00 -2.498851398450873340e+00 -4.570167867661299788e+00 -2.269571263382637039e+00 -1.983740517721036767e+00 9 | 4.468567087769349300e+00 2.733791973847035095e+00 2.244111523195138957e+00 -4.952546326811056332e+00 -1.231285322289288331e-02 10 | 3.058020363002086128e+00 7.265130633739849131e-01 -2.349330368421926885e+00 -2.514272015535059523e+00 -1.235536761429303354e+00 11 | -------------------------------------------------------------------------------- /GPyOpt/testing/functional_tests/test_files/LCB_acquisition_gradient_testfile.txt: -------------------------------------------------------------------------------- 1 | -8.297799529742597713e-01 -4.076614052312022451e+00 -8.080548559670521769e-01 1.704675101784022040e+00 3.007445686755366054e+00 2 | 2.203244934421580759e+00 -3.137397886223291188e+00 1.852195003967595177e+00 -8.269519763287300762e-01 4.682615757193975270e+00 3 | -4.998856251826550690e+00 -1.544392729569522338e+00 -2.955477502684825453e+00 5.868982844575167945e-01 -1.865758218407571611e+00 4 | -1.976674273681602312e+00 -1.032325257693300635e+00 3.781174363909453717e+00 -3.596130614047662100e+00 1.923226156693140787e+00 5 | -3.532441091828869340e+00 3.881673400335694524e-01 -4.726124068020737923e+00 -3.018985109151212320e+00 3.763891522960383540e+00 6 | 5.000000000000000000e+00 -5.000000000000000000e+00 -5.000000000000000000e+00 5.000000000000000000e+00 -5.000000000000000000e+00 7 | -5.000000000000000000e+00 5.000000000000000000e+00 5.000000000000000000e+00 5.000000000000000000e+00 5.000000000000000000e+00 8 | -5.000000000000000000e+00 -5.000000000000000000e+00 5.000000000000000000e+00 -5.000000000000000000e+00 5.000000000000000000e+00 9 | 5.000000000000000000e+00 5.000000000000000000e+00 -5.000000000000000000e+00 -5.000000000000000000e+00 5.000000000000000000e+00 10 | -5.000000000000000888e+00 -5.000000000000000000e+00 -5.000000000000000000e+00 5.000000000000000000e+00 5.000000000000000000e+00 11 | -------------------------------------------------------------------------------- /GPyOpt/testing/functional_tests/test_files/MPI_acquisition_gradient_testfile.txt: -------------------------------------------------------------------------------- 1 | -8.297799529742597713e-01 -4.076614052312022451e+00 -8.080548559670521769e-01 1.704675101784022040e+00 3.007445686755366054e+00 2 | 2.203244934421580759e+00 -3.137397886223291188e+00 1.852195003967595177e+00 -8.269519763287300762e-01 4.682615757193975270e+00 3 | -4.998856251826550690e+00 -1.544392729569522338e+00 -2.955477502684825453e+00 5.868982844575167945e-01 -1.865758218407571611e+00 4 | -1.976674273681602312e+00 -1.032325257693300635e+00 3.781174363909453717e+00 -3.596130614047662100e+00 1.923226156693140787e+00 5 | -3.532441091828869340e+00 3.881673400335694524e-01 -4.726124068020737923e+00 -3.018985109151212320e+00 3.763891522960383540e+00 6 | -8.297799529742597713e-01 -1.741900333867951733e+00 7.697784647750163245e-01 2.934846741670334325e+00 -1.411539663464591587e+00 7 | -1.694218276461010309e+00 -1.887009241946460136e+00 -2.968816834799267390e+00 3.427835002023702060e+00 2.329537887129031759e-01 8 | 2.654969604694120910e+00 -2.498851398450873340e+00 -4.570167867661299788e+00 -2.269571263382637039e+00 -1.983740517721036767e+00 9 | -1.074088406373792992e+00 -1.795591958252819254e+00 2.062109186958414497e+00 4.977116849773786100e+00 5.410479386267130764e-01 10 | 3.058020363002086128e+00 7.265130633739849131e-01 -2.349330368421926885e+00 -2.514272015535059523e+00 -1.235536761429303354e+00 11 | -------------------------------------------------------------------------------- /GPyOpt/testing/functional_tests/test_files/lbfgs_acquisition_gradient_testfile.txt: -------------------------------------------------------------------------------- 1 | -8.297799529742597713e-01 -4.076614052312022451e+00 -8.080548559670521769e-01 1.704675101784022040e+00 3.007445686755366054e+00 2 | 2.203244934421580759e+00 -3.137397886223291188e+00 1.852195003967595177e+00 -8.269519763287300762e-01 4.682615757193975270e+00 3 | -4.998856251826550690e+00 -1.544392729569522338e+00 -2.955477502684825453e+00 5.868982844575167945e-01 -1.865758218407571611e+00 4 | -1.976674273681602312e+00 -1.032325257693300635e+00 3.781174363909453717e+00 -3.596130614047662100e+00 1.923226156693140787e+00 5 | -3.532441091828869340e+00 3.881673400335694524e-01 -4.726124068020737923e+00 -3.018985109151212320e+00 3.763891522960383540e+00 6 | 5.000000000000000000e+00 -5.000000000000000000e+00 -5.000000000000000000e+00 5.000000000000000000e+00 -5.000000000000000000e+00 7 | -5.000000000000000000e+00 5.000000000000000000e+00 5.000000000000000000e+00 5.000000000000000000e+00 5.000000000000000000e+00 8 | 2.654969604694120910e+00 -2.498851398450873340e+00 -4.570167867661299788e+00 -2.269571263382637039e+00 -1.983740517721036767e+00 9 | 4.468567087769349300e+00 2.733791973847035095e+00 2.244111523195138957e+00 -4.952546326811056332e+00 -1.231285322289288331e-02 10 | 3.058020363002086128e+00 7.265130633739849131e-01 -2.349330368421926885e+00 -2.514272015535059523e+00 -1.235536761429303354e+00 11 | -------------------------------------------------------------------------------- /GPyOpt/objective_examples/experiments1d.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2016, the GPyOpt Authors 2 | # Licensed under the BSD 3-clause license (see LICENSE.txt) 3 | 4 | import matplotlib.pyplot as plt 5 | import numpy as np 6 | 7 | 8 | class function1d: 9 | ''' 10 | This is a benchmark of unidimensional functions interesting to optimize. 11 | :param bounds: the box constraints to define the domain in which the function is optimized. 12 | ''' 13 | def plot(self,bounds=None): 14 | if bounds is None: bounds = self.bounds 15 | X = np.arange(bounds[0][0], bounds[0][1], 0.01) 16 | Y = self.f(X) 17 | plt.plot(X, Y, lw=2) 18 | plt.xlabel('x') 19 | plt.ylabel('f(x)') 20 | plt.show() 21 | 22 | class forrester(function1d): 23 | ''' 24 | Forrester function. 25 | 26 | :param sd: standard deviation, to generate noisy evaluations of the function. 27 | ''' 28 | def __init__(self,sd=None): 29 | self.input_dim = 1 30 | if sd==None: self.sd = 0 31 | else: self.sd=sd 32 | self.min = 0.78 ## approx 33 | self.fmin = -6 ## approx 34 | self.bounds = [(0,1)] 35 | 36 | def f(self,X): 37 | X = X.reshape((len(X),1)) 38 | n = X.shape[0] 39 | fval = ((6*X -2)**2)*np.sin(12*X-4) 40 | if self.sd ==0: 41 | noise = np.zeros(n).reshape(n,1) 42 | else: 43 | noise = np.random.normal(0,self.sd,n).reshape(n,1) 44 | return fval.reshape(n,1) + noise 45 | 46 | 47 | -------------------------------------------------------------------------------- /GPyOpt/testing/functional_tests/test_files/noiseless_evauations_acquisition_gradient_testfile.txt: -------------------------------------------------------------------------------- 1 | -8.297799529742597713e-01 -4.076614052312022451e+00 -8.080548559670521769e-01 1.704675101784022040e+00 3.007445686755366054e+00 2 | 2.203244934421580759e+00 -3.137397886223291188e+00 1.852195003967595177e+00 -8.269519763287300762e-01 4.682615757193975270e+00 3 | -4.998856251826550690e+00 -1.544392729569522338e+00 -2.955477502684825453e+00 5.868982844575167945e-01 -1.865758218407571611e+00 4 | -1.976674273681602312e+00 -1.032325257693300635e+00 3.781174363909453717e+00 -3.596130614047662100e+00 1.923226156693140787e+00 5 | -3.532441091828869340e+00 3.881673400335694524e-01 -4.726124068020737923e+00 -3.018985109151212320e+00 3.763891522960383540e+00 6 | 5.000000000000000000e+00 -5.000000000000000000e+00 -5.000000000000000000e+00 5.000000000000000000e+00 -5.000000000000000000e+00 7 | -5.000000000000000000e+00 5.000000000000000000e+00 5.000000000000000000e+00 5.000000000000000000e+00 5.000000000000000000e+00 8 | 2.654969604694120910e+00 -2.498851398450873340e+00 -4.570167867661299788e+00 -2.269571263382637039e+00 -1.983740517721036767e+00 9 | 4.468567087769349300e+00 2.733791973847035095e+00 2.244111523195138957e+00 -4.952546326811056332e+00 -1.231285322289288331e-02 10 | 3.058020363002086128e+00 7.265130633739849131e-01 -2.349330368421926885e+00 -2.514272015535059523e+00 -1.235536761429303354e+00 11 | -------------------------------------------------------------------------------- /GPyOpt/testing/functional_tests/test_files/output_normalization_acquisition_gradient_testfile.txt: -------------------------------------------------------------------------------- 1 | -8.297799529742597713e-01 -4.076614052312022451e+00 -8.080548559670521769e-01 1.704675101784022040e+00 3.007445686755366054e+00 2 | 2.203244934421580759e+00 -3.137397886223291188e+00 1.852195003967595177e+00 -8.269519763287300762e-01 4.682615757193975270e+00 3 | -4.998856251826550690e+00 -1.544392729569522338e+00 -2.955477502684825453e+00 5.868982844575167945e-01 -1.865758218407571611e+00 4 | -1.976674273681602312e+00 -1.032325257693300635e+00 3.781174363909453717e+00 -3.596130614047662100e+00 1.923226156693140787e+00 5 | -3.532441091828869340e+00 3.881673400335694524e-01 -4.726124068020737923e+00 -3.018985109151212320e+00 3.763891522960383540e+00 6 | 5.000000000000000000e+00 5.000000000000000000e+00 5.000000000000000000e+00 5.000000000000000000e+00 -5.000000000000000000e+00 7 | 5.000000000000000000e+00 -5.000000000000000000e+00 -5.000000000000000000e+00 -5.000000000000000000e+00 -5.000000000000000000e+00 8 | -5.000000000000000000e+00 -5.000000000000000000e+00 5.000000000000000000e+00 5.000000000000000000e+00 5.000000000000000000e+00 9 | 4.468567087769349300e+00 2.733791973847035095e+00 2.244111523195138957e+00 -4.952546326811056332e+00 -1.231285322289288331e-02 10 | 3.058020363002086128e+00 7.265130633739849131e-01 -2.349330368421926885e+00 -2.514272015535059523e+00 -1.235536761429303354e+00 11 | -------------------------------------------------------------------------------- /GPyOpt/testing/functional_tests/test_context_manager.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import numpy as np 3 | 4 | from GPyOpt.core.task.space import Design_space 5 | from GPyOpt.experiment_design import initial_design 6 | from GPyOpt.optimization.acquisition_optimizer import ContextManager 7 | 8 | class TestContextManager(unittest.TestCase): 9 | def test_context_hadler(self): 10 | space = [ 11 | {'name': 'var1', 'type': 'continuous', 'domain':(-3,1), 'dimensionality': 3}, 12 | {'name': 'var2', 'type': 'discrete', 'domain': (0,1,2,3)}, 13 | {'name': 'var3', 'type': 'continuous', 'domain':(-5,5)}, 14 | {'name': 'var4', 'type': 'categorical', 'domain': (0, 1)} 15 | ] 16 | 17 | context = {'var1_1':0.45,'var3':0.52} 18 | 19 | design_space = Design_space(space) 20 | np.random.seed(666) 21 | 22 | self.context_manager = ContextManager(space = design_space, context = context) 23 | 24 | noncontext_bounds = [(-3, 1), (-3, 1), (0, 3), (0, 1), (0, 1)] 25 | noncontext_index = [1, 2, 3, 5, 6] 26 | expanded_vector = np.array([[ 0.45, 0. , 0. , 0. , 0.52, 0. , 0. ]]) 27 | 28 | assert np.all(noncontext_bounds == self.context_manager.noncontext_bounds ) 29 | assert np.all(noncontext_index == self.context_manager.noncontext_index) 30 | assert np.all(expanded_vector == self.context_manager._expand_vector(np.array([[0,0,0,0,0]]))) 31 | -------------------------------------------------------------------------------- /docs/GPyOpt.core.evaluators.rst: -------------------------------------------------------------------------------- 1 | GPyOpt.core.evaluators package 2 | ============================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | GPyOpt.core.evaluators.base module 8 | ---------------------------------- 9 | 10 | .. automodule:: GPyOpt.core.evaluators.base 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | GPyOpt.core.evaluators.batch\_local\_penalization module 16 | -------------------------------------------------------- 17 | 18 | .. automodule:: GPyOpt.core.evaluators.batch_local_penalization 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | GPyOpt.core.evaluators.batch\_random module 24 | ------------------------------------------- 25 | 26 | .. automodule:: GPyOpt.core.evaluators.batch_random 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | GPyOpt.core.evaluators.batch\_thompson module 32 | --------------------------------------------- 33 | 34 | .. automodule:: GPyOpt.core.evaluators.batch_thompson 35 | :members: 36 | :undoc-members: 37 | :show-inheritance: 38 | 39 | GPyOpt.core.evaluators.sequential module 40 | ---------------------------------------- 41 | 42 | .. automodule:: GPyOpt.core.evaluators.sequential 43 | :members: 44 | :undoc-members: 45 | :show-inheritance: 46 | 47 | 48 | Module contents 49 | --------------- 50 | 51 | .. automodule:: GPyOpt.core.evaluators 52 | :members: 53 | :undoc-members: 54 | :show-inheritance: 55 | -------------------------------------------------------------------------------- /docs/GPyOpt.experiment_design.rst: -------------------------------------------------------------------------------- 1 | GPyOpt.experiment\_design package 2 | ================================= 3 | 4 | Submodules 5 | ---------- 6 | 7 | GPyOpt.experiment\_design.base module 8 | ------------------------------------- 9 | 10 | .. automodule:: GPyOpt.experiment_design.base 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | GPyOpt.experiment\_design.grid\_design module 16 | --------------------------------------------- 17 | 18 | .. automodule:: GPyOpt.experiment_design.grid_design 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | GPyOpt.experiment\_design.latin\_design module 24 | ---------------------------------------------- 25 | 26 | .. automodule:: GPyOpt.experiment_design.latin_design 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | GPyOpt.experiment\_design.random\_design module 32 | ----------------------------------------------- 33 | 34 | .. automodule:: GPyOpt.experiment_design.random_design 35 | :members: 36 | :undoc-members: 37 | :show-inheritance: 38 | 39 | GPyOpt.experiment\_design.sobol\_design module 40 | ---------------------------------------------- 41 | 42 | .. automodule:: GPyOpt.experiment_design.sobol_design 43 | :members: 44 | :undoc-members: 45 | :show-inheritance: 46 | 47 | 48 | Module contents 49 | --------------- 50 | 51 | .. automodule:: GPyOpt.experiment_design 52 | :members: 53 | :undoc-members: 54 | :show-inheritance: 55 | -------------------------------------------------------------------------------- /GPyOpt/testing/functional_tests/test_files/Random_acquisition_gradient_testfile.txt: -------------------------------------------------------------------------------- 1 | -8.297799529742597713e-01 -4.076614052312022451e+00 -8.080548559670521769e-01 1.704675101784022040e+00 3.007445686755366054e+00 2 | 2.203244934421580759e+00 -3.137397886223291188e+00 1.852195003967595177e+00 -8.269519763287300762e-01 4.682615757193975270e+00 3 | -4.998856251826550690e+00 -1.544392729569522338e+00 -2.955477502684825453e+00 5.868982844575167945e-01 -1.865758218407571611e+00 4 | -1.976674273681602312e+00 -1.032325257693300635e+00 3.781174363909453717e+00 -3.596130614047662100e+00 1.923226156693140787e+00 5 | -3.532441091828869340e+00 3.881673400335694524e-01 -4.726124068020737923e+00 -3.018985109151212320e+00 3.763891522960383540e+00 6 | 5.000000000000000000e+00 -5.000000000000000000e+00 -5.000000000000000000e+00 5.000000000000000000e+00 -5.000000000000000000e+00 7 | 1.787554875114909514e+00 4.468567087769349300e+00 4.154743683488572259e+00 4.190256658653185085e+00 -2.676564161404360043e+00 8 | 2.642020789997054919e+00 7.435633689559235648e-01 3.661052359286266267e+00 2.134308575158240018e+00 -4.136059345593357861e+00 9 | -5.000000000000000000e+00 5.000000000000000000e+00 5.000000000000000000e+00 5.000000000000000000e+00 5.000000000000000000e+00 10 | -2.120748931317948394e+00 -1.794374516427107658e+00 -3.114114739670693410e+00 -4.818201686634876246e+00 -1.815387794585582526e+00 11 | 1.626818768373873070e+00 -4.236498125024136030e+00 2.203162592132953179e+00 -2.126598473699166103e+00 1.002861014441851495e+00 12 | -------------------------------------------------------------------------------- /GPyOpt/testing/functional_tests/test_files/Local_penalization_acquisition_gradient_testfile.txt: -------------------------------------------------------------------------------- 1 | -8.297799529742597713e-01 -4.076614052312022451e+00 -8.080548559670521769e-01 1.704675101784022040e+00 3.007445686755366054e+00 2 | 2.203244934421580759e+00 -3.137397886223291188e+00 1.852195003967595177e+00 -8.269519763287300762e-01 4.682615757193975270e+00 3 | -4.998856251826550690e+00 -1.544392729569522338e+00 -2.955477502684825453e+00 5.868982844575167945e-01 -1.865758218407571611e+00 4 | -1.976674273681602312e+00 -1.032325257693300635e+00 3.781174363909453717e+00 -3.596130614047662100e+00 1.923226156693140787e+00 5 | -3.532441091828869340e+00 3.881673400335694524e-01 -4.726124068020737923e+00 -3.018985109151212320e+00 3.763891522960383540e+00 6 | 5.000000000000000000e+00 -5.000000000000000000e+00 -5.000000000000000000e+00 5.000000000000000000e+00 -5.000000000000000000e+00 7 | 5.000000000000000000e+00 -5.000000000000000000e+00 -5.000000000000000000e+00 5.000000000000000000e+00 -5.000000000000000000e+00 8 | 7.854550785367795385e-01 3.044361509227082152e+00 3.977177712467156567e+00 1.380670605012348062e+00 7.228748803695062009e-03 9 | -5.000000000000000000e+00 5.000000000000000000e+00 5.000000000000000000e+00 5.000000000000000000e+00 5.000000000000000000e+00 10 | -5.000000000000000000e+00 5.000000000000000000e+00 5.000000000000000000e+00 5.000000000000000000e+00 5.000000000000000000e+00 11 | -5.000000000000000000e+00 5.000000000000000000e+00 5.000000000000000000e+00 5.000000000000000000e+00 5.000000000000000000e+00 12 | -------------------------------------------------------------------------------- /GPyOpt/testing/functional_tests/test_files/Thompson_sampling_acquisition_gradient_testfile.txt: -------------------------------------------------------------------------------- 1 | -8.297799529742597713e-01 -4.076614052312022451e+00 -8.080548559670521769e-01 1.704675101784022040e+00 3.007445686755366054e+00 2 | 2.203244934421580759e+00 -3.137397886223291188e+00 1.852195003967595177e+00 -8.269519763287300762e-01 4.682615757193975270e+00 3 | -4.998856251826550690e+00 -1.544392729569522338e+00 -2.955477502684825453e+00 5.868982844575167945e-01 -1.865758218407571611e+00 4 | -1.976674273681602312e+00 -1.032325257693300635e+00 3.781174363909453717e+00 -3.596130614047662100e+00 1.923226156693140787e+00 5 | -3.532441091828869340e+00 3.881673400335694524e-01 -4.726124068020737923e+00 -3.018985109151212320e+00 3.763891522960383540e+00 6 | 2.741477056899114473e+00 3.051765658246123891e+00 -3.332471098345672011e+00 4.183395892061327004e+00 1.007429144290281187e+00 7 | 3.119061365520590101e-01 8.912668700536832134e-01 3.966241628449107992e+00 2.063994725477250824e+00 1.671808993227479334e+00 8 | 4.486003479382683068e+00 3.091085677034939749e+00 4.474447711206018141e+00 4.679266501086916108e+00 -3.245538459000502307e+00 9 | -1.708448364889072479e+00 -4.671421000712320470e+00 3.168956674653049532e-01 -4.429702848274565063e+00 -3.597630050389653000e+00 10 | -3.239325749426169576e+00 1.873631355934328724e-01 1.624028656583811348e+00 1.858204594288571876e+00 -2.460796120865174075e+00 11 | 4.138870642296877733e+00 -2.607614977910333032e+00 -4.071462286268428876e+00 -2.549142965219435286e+00 2.906152460562731576e+00 12 | -------------------------------------------------------------------------------- /GPyOpt/testing/functional_tests/test_files/Random_with_duplicate_check_acquisition_gradient_testfile.txt: -------------------------------------------------------------------------------- 1 | -8.297799529742597713e-01 -4.076614052312022451e+00 -8.080548559670521769e-01 1.704675101784022040e+00 3.007445686755366054e+00 2 | 2.203244934421580759e+00 -3.137397886223291188e+00 1.852195003967595177e+00 -8.269519763287300762e-01 4.682615757193975270e+00 3 | -4.998856251826550690e+00 -1.544392729569522338e+00 -2.955477502684825453e+00 5.868982844575167945e-01 -1.865758218407571611e+00 4 | -1.976674273681602312e+00 -1.032325257693300635e+00 3.781174363909453717e+00 -3.596130614047662100e+00 1.923226156693140787e+00 5 | -3.532441091828869340e+00 3.881673400335694524e-01 -4.726124068020737923e+00 -3.018985109151212320e+00 3.763891522960383540e+00 6 | 5.000000000000000000e+00 -5.000000000000000000e+00 -5.000000000000000000e+00 5.000000000000000000e+00 -5.000000000000000000e+00 7 | -4.999030493123251873e+00 1.635365121127891896e+00 4.260719033274007472e-01 2.671321601689705005e+00 2.006715852212042961e+00 8 | -4.998856251826550690e+00 2.353778827811242813e+00 3.788867564056872084e+00 3.353402361204299353e+00 1.818752429206016430e+00 9 | -5.000000000000000000e+00 -5.000000000000000000e+00 5.000000000000000000e+00 -5.000000000000000000e+00 5.000000000000000000e+00 10 | -4.999535322604655718e+00 -8.562428460578654921e-01 2.868128145926287509e+00 -8.130189386972697108e-01 -3.948875182387190375e+00 11 | -4.999342145969858819e+00 1.897269488886196065e+00 -1.320022702660018865e+00 8.213374167035247808e-01 -1.003601954774317129e+00 12 | -------------------------------------------------------------------------------- /GPyOpt/testing/functional_tests/test_files/Thompson_sampling_with_duplicate_check_acquisition_gradient_testfile.txt: -------------------------------------------------------------------------------- 1 | -8.297799529742597713e-01 -4.076614052312022451e+00 -8.080548559670521769e-01 1.704675101784022040e+00 3.007445686755366054e+00 2 | 2.203244934421580759e+00 -3.137397886223291188e+00 1.852195003967595177e+00 -8.269519763287300762e-01 4.682615757193975270e+00 3 | -4.998856251826550690e+00 -1.544392729569522338e+00 -2.955477502684825453e+00 5.868982844575167945e-01 -1.865758218407571611e+00 4 | -1.976674273681602312e+00 -1.032325257693300635e+00 3.781174363909453717e+00 -3.596130614047662100e+00 1.923226156693140787e+00 5 | -3.532441091828869340e+00 3.881673400335694524e-01 -4.726124068020737923e+00 -3.018985109151212320e+00 3.763891522960383540e+00 6 | 2.737279834022937841e+00 -2.388169785719291838e-01 -3.076444399226651250e+00 3.043488421783562714e+00 3.062894069657145835e+00 7 | 1.234871736928093711e+00 -5.591770734931493791e-01 4.266947476046739851e+00 3.953452211007727257e+00 1.784406028708920289e-01 8 | 3.602261249251670350e-01 1.765222322396274635e+00 5.992956848266057790e-01 -1.240295974693576220e+00 2.190744241966040562e-01 9 | -3.022024761715417007e+00 1.825634039243217543e+00 -4.704861922168817046e+00 -5.230258744433005091e-01 1.785365285083919673e-01 10 | -5.723913185073499221e-01 4.303887987307703611e+00 -4.704509581470478707e+00 3.967575501782729219e+00 3.195316738514227595e+00 11 | 3.260370977261004555e-01 2.891196654677543165e+00 1.846190599239175612e+00 8.922588314544199761e-01 -1.043415259312858723e+00 12 | -------------------------------------------------------------------------------- /GPyOpt/testing/optimization_tests/test_optimizer_creation.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import unittest 3 | 4 | from GPyOpt.core.errors import InvalidVariableNameError 5 | from GPyOpt.core.task.space import Design_space 6 | from GPyOpt.optimization.optimizer import choose_optimizer 7 | 8 | 9 | class TestOptimizerCreation(unittest.TestCase): 10 | def __init__(self, *args, **kwargs): 11 | super(TestOptimizerCreation, self).__init__(*args, **kwargs) 12 | 13 | self.space = [ 14 | {'name': 'var_1', 'type': 'continuous', 'domain': (-1, 1), 'dimensionality': 1}, 15 | {'name': 'var_2', 'type': 'continuous', 'domain': (-1, 1), 'dimensionality': 1} 16 | ] 17 | self.design_space = Design_space(self.space) 18 | self.f = lambda x: np.sum(np.sin(x)) 19 | 20 | def test_invalid_optimizer_name_raises_error(self): 21 | self.assertRaises(InvalidVariableNameError, 22 | choose_optimizer, 'asd', None) 23 | 24 | def test_create_lbfgs_optimizer(self): 25 | optimizer = choose_optimizer('lbfgs', self.design_space.get_bounds()) 26 | 27 | self.assertIsNotNone(optimizer) 28 | 29 | def test_create_direct_optimizer(self): 30 | optimizer = choose_optimizer('DIRECT', self.design_space.get_bounds()) 31 | 32 | self.assertIsNotNone(optimizer) 33 | 34 | def test_create_cma_optimizer(self): 35 | optimizer = choose_optimizer('CMA', self.design_space.get_bounds()) 36 | 37 | self.assertIsNotNone(optimizer) 38 | -------------------------------------------------------------------------------- /GPyOpt/experiment_design/sobol_design.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from ..core.errors import InvalidConfigError 4 | 5 | from .base import ExperimentDesign 6 | from .random_design import RandomDesign 7 | 8 | class SobolDesign(ExperimentDesign): 9 | """ 10 | Sobol experiment design. 11 | Uses random design for non-continuous variables, and Sobol sequence for continuous ones 12 | """ 13 | def __init__(self, space): 14 | if space.has_constraints(): 15 | raise InvalidConfigError('Sampling with constraints is not allowed by Sobol design') 16 | super(SobolDesign, self).__init__(space) 17 | 18 | def get_samples(self, init_points_count): 19 | samples = np.empty((init_points_count, self.space.dimensionality)) 20 | 21 | # Use random design to fill non-continuous variables 22 | random_design = RandomDesign(self.space) 23 | random_design.fill_noncontinous_variables(samples) 24 | 25 | if self.space.has_continuous(): 26 | bounds = self.space.get_continuous_bounds() 27 | lower_bound = np.asarray(bounds)[:,0].reshape(1,len(bounds)) 28 | upper_bound = np.asarray(bounds)[:,1].reshape(1,len(bounds)) 29 | diff = upper_bound-lower_bound 30 | 31 | from sobol_seq import i4_sobol_generate 32 | X_design = np.dot(i4_sobol_generate(len(self.space.get_continuous_bounds()),init_points_count),np.diag(diff.flatten()))[None,:] + lower_bound 33 | samples[:, self.space.get_continuous_dims()] = X_design 34 | 35 | return samples -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | Copyright (c) 2015, the GPyOpt authors 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions are met: 6 | * Redistributions of source code must retain the above copyright 7 | notice, this list of conditions and the following disclaimer. 8 | * Redistributions in binary form must reproduce the above copyright 9 | notice, this list of conditions and the following disclaimer in the 10 | documentation and/or other materials provided with the distribution. 11 | * Neither the name of the nor the 12 | names of its contributors may be used to endorse or promote products 13 | derived from this software without specific prior written permission. 14 | 15 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 16 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 17 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 18 | DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY 19 | DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 20 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 21 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 22 | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 24 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 | -------------------------------------------------------------------------------- /GPyOpt/util/duplicate_manager.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2016, the GPyOpt Authors 2 | # Licensed under the BSD 3-clause license (see LICENSE.txt) 3 | 4 | import numpy as np 5 | from ..core.task.space import Design_space 6 | 7 | class DuplicateManager(object): 8 | """ 9 | Class to manage potential duplicates in the suggested candidates. 10 | 11 | :param space: object managing all the logic related the domain of the optimization 12 | :param zipped_X: matrix of evaluated configurations 13 | :param pending_zipped_X: matrix of configurations in the pending state 14 | :param ignored_zipped_X: matrix of configurations that the user desires to ignore (e.g., because they may have led to failures) 15 | """ 16 | 17 | def __init__(self, space, zipped_X, pending_zipped_X=None, ignored_zipped_X=None): 18 | 19 | self.space = space 20 | 21 | self.unique_points = set() 22 | self.unique_points.update(tuple(x.flatten()) for x in zipped_X) 23 | 24 | if np.any(pending_zipped_X): 25 | self.unique_points.update(tuple(x.flatten()) for x in pending_zipped_X) 26 | 27 | if np.any(ignored_zipped_X): 28 | self.unique_points.update(tuple(x.flatten()) for x in ignored_zipped_X) 29 | 30 | 31 | def is_zipped_x_duplicate(self, zipped_x): 32 | """ 33 | param: zipped_x: configuration assumed to be zipped 34 | """ 35 | return tuple(zipped_x.flatten()) in self.unique_points 36 | 37 | def is_unzipped_x_duplicate(self, unzipped_x): 38 | """ 39 | param: unzipped_x: configuration assumed to be unzipped 40 | """ 41 | return self.is_zipped_x_duplicate(self.space.zip_inputs(np.atleast_2d(unzipped_x))) 42 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | import os 5 | from setuptools import setup, find_packages 6 | 7 | def read(fname): 8 | return open(os.path.join(os.path.dirname(__file__), fname)).read() 9 | 10 | __version__ = "1.2.6" 11 | 12 | packages = find_packages(exclude=("GPyOpt.testing",)) 13 | setup(name = 'GPyOpt', 14 | version = __version__, 15 | author = read('AUTHORS.txt').replace('\n', ', ').replace('-', ''), 16 | author_email = "j.h.gonzalez@sheffield.ac.uk", 17 | description = "The Bayesian Optimization Toolbox", 18 | long_description = read('README.md'), 19 | long_description_content_type = 'text/markdown', 20 | license = "BSD 3-clause", 21 | keywords = "machine-learning gaussian-processes kernels optimization", 22 | url = "http://sheffieldml.github.io/GPyOpt/", 23 | packages = packages, 24 | package_dir = {'GPyOpt': 'GPyOpt'}, 25 | include_package_data = True, 26 | py_modules = ['GPyOpt.__init__'], 27 | install_requires = ['numpy>=1.7', 'scipy>=0.16', 'GPy>=1.8'], 28 | extras_require = {'optimizer':['DIRECT','cma','pyDOE','sobol_seq','emcee'],'docs':['matplotlib >=1.3','Sphinx','IPython']}, 29 | classifiers=['License :: OSI Approved :: BSD License', 30 | 'Natural Language :: English', 31 | 'Operating System :: MacOS :: MacOS X', 32 | 'Operating System :: Microsoft :: Windows', 33 | 'Operating System :: POSIX :: Linux', 34 | 'Programming Language :: Python :: 2.7', 35 | 'Topic :: Scientific/Engineering :: Artificial Intelligence'], 36 | scripts=['gpyopt.py'], 37 | ) 38 | -------------------------------------------------------------------------------- /GPyOpt/testing/functional_tests/mocks.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from GPyOpt.models.base import BOModel 4 | 5 | class MockModel(BOModel): 6 | def __init__(self): 7 | self.params = [] 8 | self.X = [] 9 | self.Y = [] 10 | 11 | def f(self, x): 12 | return np.dot(np.insert(x, 0, 1.0), self.params) 13 | 14 | def updateModel(self, X_all, Y_all, X_new, Y_new): 15 | self.X = X_all 16 | self.Y = Y_all 17 | ones = np.ones(X_all.shape[0]).reshape(-1, 1) 18 | X = np.hstack([ones, X_all]) 19 | self.params = np.linalg.lstsq(X, Y_all.flatten())[0] 20 | 21 | def predict(self, X): 22 | preds = [self.f(x) for x in X] 23 | return np.atleast_2d(np.mean(preds)), np.atleast_2d(np.std(preds)) 24 | 25 | def predict_withGradients(self, X): 26 | preds = [self.f(x) for x in X] 27 | return np.atleast_2d(np.mean(preds)), np.atleast_2d(np.std(preds)), X, X 28 | 29 | def get_fmin(self): 30 | preds = [self.f(x) for x in self.X] 31 | return min(preds) 32 | 33 | def get_model_parameters(self): 34 | return np.atleast_2d(self.params) 35 | 36 | def get_model_parameter_namess(self): 37 | return np.atleast_2d(['b' + i for i, _ in enumerate(self.params)]) 38 | 39 | class MockModelVectorValuedPredict(MockModel): 40 | 41 | def predict(self, X): 42 | preds = [self.f(x) for x in X] 43 | return np.atleast_2d(np.mean(preds)*np.ones(len(preds))), np.atleast_2d(np.std(preds)*np.ones(len(preds))) 44 | 45 | def predict_withGradients(self, X): 46 | preds = [self.f(x) for x in X] 47 | return np.atleast_2d(np.mean(preds)*np.ones(len(preds))), np.atleast_2d(np.std(preds)*np.ones(len(preds))), X, X 48 | -------------------------------------------------------------------------------- /docs/GPyOpt.acquisitions.rst: -------------------------------------------------------------------------------- 1 | GPyOpt.acquisitions package 2 | =========================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | GPyOpt.acquisitions.EI module 8 | ----------------------------- 9 | 10 | .. automodule:: GPyOpt.acquisitions.EI 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | GPyOpt.acquisitions.EI\_mcmc module 16 | ----------------------------------- 17 | 18 | .. automodule:: GPyOpt.acquisitions.EI_mcmc 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | GPyOpt.acquisitions.LCB module 24 | ------------------------------ 25 | 26 | .. automodule:: GPyOpt.acquisitions.LCB 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | GPyOpt.acquisitions.LCB\_mcmc module 32 | ------------------------------------ 33 | 34 | .. automodule:: GPyOpt.acquisitions.LCB_mcmc 35 | :members: 36 | :undoc-members: 37 | :show-inheritance: 38 | 39 | GPyOpt.acquisitions.LP module 40 | ----------------------------- 41 | 42 | .. automodule:: GPyOpt.acquisitions.LP 43 | :members: 44 | :undoc-members: 45 | :show-inheritance: 46 | 47 | GPyOpt.acquisitions.MPI module 48 | ------------------------------ 49 | 50 | .. automodule:: GPyOpt.acquisitions.MPI 51 | :members: 52 | :undoc-members: 53 | :show-inheritance: 54 | 55 | GPyOpt.acquisitions.MPI\_mcmc module 56 | ------------------------------------ 57 | 58 | .. automodule:: GPyOpt.acquisitions.MPI_mcmc 59 | :members: 60 | :undoc-members: 61 | :show-inheritance: 62 | 63 | GPyOpt.acquisitions.base module 64 | ------------------------------- 65 | 66 | .. automodule:: GPyOpt.acquisitions.base 67 | :members: 68 | :undoc-members: 69 | :show-inheritance: 70 | 71 | 72 | Module contents 73 | --------------- 74 | 75 | .. automodule:: GPyOpt.acquisitions 76 | :members: 77 | :undoc-members: 78 | :show-inheritance: 79 | -------------------------------------------------------------------------------- /GPyOpt/core/evaluators/batch_random.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2016, the GPyOpt Authors 2 | # Licensed under the BSD 3-clause license (see LICENSE.txt) 3 | 4 | from .base import SamplingBasedBatchEvaluator 5 | 6 | from ...optimization.anchor_points_generator import RandomAnchorPointsGenerator 7 | import numpy as np 8 | 9 | class RandomBatch(SamplingBasedBatchEvaluator): 10 | """ 11 | Class for a random batch method. The first element of the batch is selected by optimizing the acquisition in a standard way. The remaining elements are 12 | selected uniformly random in the domain of the objective. 13 | 14 | :param acquisition: acquisition function to be used to compute the batch. 15 | :param batch size: the number of elements in the batch. 16 | 17 | """ 18 | def __init__(self, acquisition, batch_size): 19 | 20 | super(RandomBatch, self).__init__(acquisition, batch_size) 21 | 22 | def initialize_batch(self, duplicate_manager=None,context_manager=None): 23 | 24 | x, _ = self.acquisition.optimize(duplicate_manager=duplicate_manager) 25 | 26 | return x 27 | 28 | def get_anchor_points(self, duplicate_manager=None,context_manager=None): 29 | 30 | design_type, unique = "random", False 31 | if duplicate_manager: 32 | unique = True 33 | 34 | anchor_points_generator = RandomAnchorPointsGenerator(self.space, design_type) 35 | return anchor_points_generator.get(num_anchor=self.num_anchor, duplicate_manager=duplicate_manager, unique=unique, context_manager = self.acquisition.optimizer.context_manager) 36 | 37 | def optimize_anchor_point(self, a, duplicate_manager=None,context_manager=None): 38 | 39 | return a 40 | 41 | def compute_batch_without_duplicate_logic(self, context_manager=None): 42 | 43 | x, anchor_points = self.initialize_batch(), self.get_anchor_points() 44 | 45 | return np.vstack((x, anchor_points[:(self.batch_size - 1), :])) 46 | -------------------------------------------------------------------------------- /GPyOpt/experiment_design/latin_design.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from ..core.errors import InvalidConfigError 4 | 5 | from .base import ExperimentDesign 6 | from .random_design import RandomDesign 7 | 8 | class LatinDesign(ExperimentDesign): 9 | """ 10 | Latin experiment design. 11 | Uses random design for non-continuous variables, and latin hypercube for continuous ones 12 | """ 13 | def __init__(self, space): 14 | if space.has_constraints(): 15 | raise InvalidConfigError('Sampling with constraints is not allowed by latin design') 16 | super(LatinDesign, self).__init__(space) 17 | 18 | def get_samples(self, init_points_count, criterion='center'): 19 | """ 20 | Generates required amount of sample points 21 | 22 | :param init_points_count: Number of samples to generate 23 | :param criterion: For details of the effect of this parameter, please refer to pyDOE.lhs documentation 24 | Default: 'center' 25 | :returns: Generated samples 26 | """ 27 | samples = np.empty((init_points_count, self.space.dimensionality)) 28 | 29 | # Use random design to fill non-continuous variables 30 | random_design = RandomDesign(self.space) 31 | random_design.fill_noncontinous_variables(samples) 32 | 33 | if self.space.has_continuous(): 34 | bounds = self.space.get_continuous_bounds() 35 | lower_bound = np.asarray(bounds)[:,0].reshape(1, len(bounds)) 36 | upper_bound = np.asarray(bounds)[:,1].reshape(1, len(bounds)) 37 | diff = upper_bound - lower_bound 38 | 39 | from pyDOE import lhs 40 | X_design_aux = lhs(len(self.space.get_continuous_bounds()), init_points_count, criterion=criterion) 41 | I = np.ones((X_design_aux.shape[0], 1)) 42 | X_design = np.dot(I, lower_bound) + X_design_aux * np.dot(I, diff) 43 | 44 | samples[:, self.space.get_continuous_dims()] = X_design 45 | 46 | return samples 47 | -------------------------------------------------------------------------------- /GPyOpt/acquisitions/LCB.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2016, the GPyOpt Authors 2 | # Licensed under the BSD 3-clause license (see LICENSE.txt) 3 | 4 | from .base import AcquisitionBase 5 | from ..util.general import get_quantiles 6 | 7 | class AcquisitionLCB(AcquisitionBase): 8 | """ 9 | GP-Lower Confidence Bound acquisition function with constant exploration weight. 10 | See: 11 | 12 | Gaussian Process Optimization in the Bandit Setting: No Regret and Experimental Design 13 | Srinivas et al., Proc. International Conference on Machine Learning (ICML), 2010 14 | 15 | :param model: GPyOpt class of model 16 | :param space: GPyOpt class of domain 17 | :param optimizer: optimizer of the acquisition. Should be a GPyOpt optimizer 18 | :param cost_withGradients: function 19 | :param jitter: positive value to make the acquisition more explorative 20 | 21 | .. Note:: does not allow to be used with cost 22 | 23 | """ 24 | 25 | analytical_gradient_prediction = True 26 | 27 | def __init__(self, model, space, optimizer=None, cost_withGradients=None, exploration_weight=2): 28 | self.optimizer = optimizer 29 | super(AcquisitionLCB, self).__init__(model, space, optimizer) 30 | self.exploration_weight = exploration_weight 31 | 32 | if cost_withGradients is not None: 33 | print('The set cost function is ignored! LCB acquisition does not make sense with cost.') 34 | 35 | def _compute_acq(self, x): 36 | """ 37 | Computes the GP-Lower Confidence Bound 38 | """ 39 | m, s = self.model.predict(x) 40 | f_acqu = -m + self.exploration_weight * s 41 | return f_acqu 42 | 43 | def _compute_acq_withGradients(self, x): 44 | """ 45 | Computes the GP-Lower Confidence Bound and its derivative 46 | """ 47 | m, s, dmdx, dsdx = self.model.predict_withGradients(x) 48 | f_acqu = -m + self.exploration_weight * s 49 | df_acqu = -dmdx + self.exploration_weight * dsdx 50 | return f_acqu, df_acqu 51 | 52 | -------------------------------------------------------------------------------- /GPyOpt/acquisitions/EI.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2016, the GPyOpt Authors 2 | # Licensed under the BSD 3-clause license (see LICENSE.txt) 3 | 4 | from .base import AcquisitionBase 5 | from ..util.general import get_quantiles 6 | 7 | class AcquisitionEI(AcquisitionBase): 8 | """ 9 | Expected improvement acquisition function 10 | 11 | :param model: GPyOpt class of model 12 | :param space: GPyOpt class of domain 13 | :param optimizer: optimizer of the acquisition. Should be a GPyOpt optimizer 14 | :param cost_withGradients: function 15 | :param jitter: positive value to make the acquisition more explorative. 16 | 17 | .. Note:: allows to compute the Improvement per unit of cost 18 | 19 | """ 20 | 21 | analytical_gradient_prediction = True 22 | 23 | def __init__(self, model, space, optimizer=None, cost_withGradients=None, jitter=0.01): 24 | self.optimizer = optimizer 25 | super(AcquisitionEI, self).__init__(model, space, optimizer, cost_withGradients=cost_withGradients) 26 | self.jitter = jitter 27 | 28 | @staticmethod 29 | def fromConfig(model, space, optimizer, cost_withGradients, config): 30 | return AcquisitionEI(model, space, optimizer, cost_withGradients, jitter=config['jitter']) 31 | 32 | def _compute_acq(self, x): 33 | """ 34 | Computes the Expected Improvement per unit of cost 35 | """ 36 | m, s = self.model.predict(x) 37 | fmin = self.model.get_fmin() 38 | phi, Phi, u = get_quantiles(self.jitter, fmin, m, s) 39 | f_acqu = s * (u * Phi + phi) 40 | return f_acqu 41 | 42 | def _compute_acq_withGradients(self, x): 43 | """ 44 | Computes the Expected Improvement and its derivative (has a very easy derivative!) 45 | """ 46 | fmin = self.model.get_fmin() 47 | m, s, dmdx, dsdx = self.model.predict_withGradients(x) 48 | phi, Phi, u = get_quantiles(self.jitter, fmin, m, s) 49 | f_acqu = s * (u * Phi + phi) 50 | df_acqu = dsdx * phi - Phi * dmdx 51 | return f_acqu, df_acqu 52 | -------------------------------------------------------------------------------- /GPyOpt/acquisitions/MPI.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2016, the GPyOpt Authors 2 | # Licensed under the BSD 3-clause license (see LICENSE.txt) 3 | 4 | from .base import AcquisitionBase 5 | from ..util.general import get_quantiles 6 | 7 | class AcquisitionMPI(AcquisitionBase): 8 | """ 9 | Maximum probability of improvement acquisition function 10 | 11 | :param model: GPyOpt class of model 12 | :param space: GPyOpt class of domain 13 | :param optimizer: optimizer of the acquisition. Should be a GPyOpt optimizer 14 | :param cost_withGradients: function 15 | :param jitter: positive value to make the acquisition more explorative 16 | 17 | .. Note:: allows to compute the Improvement per unit of cost 18 | 19 | """ 20 | 21 | analytical_gradient_prediction = True 22 | 23 | def __init__(self, model, space, optimizer=None, cost_withGradients=None, jitter=0.01): 24 | self.optimizer = optimizer 25 | super(AcquisitionMPI, self).__init__(model, space, optimizer, cost_withGradients=cost_withGradients) 26 | self.jitter = jitter 27 | 28 | @staticmethod 29 | def fromConfig(model, space, optimizer, cost_withGradients, config): 30 | return AcquisitionMPI(model, space, optimizer, cost_withGradients, jitter=config['jitter']) 31 | 32 | def _compute_acq(self, x): 33 | """ 34 | Computes the Maximum probability of improvement per unit of cost 35 | """ 36 | m, s = self.model.predict(x) 37 | fmin = self.model.get_fmin() 38 | _, Phi, _ = get_quantiles(self.jitter, fmin, m, s) 39 | f_acqu = Phi 40 | return f_acqu 41 | 42 | def _compute_acq_withGradients(self, x): 43 | """ 44 | Computes the Maximum probability of improvement and its derivative (has a very easy derivative!) 45 | """ 46 | fmin = self.model.get_fmin() 47 | m, s, dmdx, dsdx = self.model.predict_withGradients(x) 48 | phi, Phi, u = get_quantiles(self.jitter, fmin, m, s) 49 | f_acqu = Phi 50 | df_acqu = -(phi/s)* (dmdx + dsdx * u) 51 | return f_acqu, df_acqu 52 | 53 | 54 | -------------------------------------------------------------------------------- /GPyOpt/testing/acquisitions_tests/test_mpi_mcmc_acquisition.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import numpy as np 3 | 4 | from GPyOpt.acquisitions import AcquisitionMPI_MCMC 5 | from GPyOpt.core.task.space import Design_space 6 | from mock import Mock 7 | 8 | class TestMPImcmcAcquisition(unittest.TestCase): 9 | def setUp(self): 10 | self.mock_model = Mock() 11 | self.mock_optimizer = Mock() 12 | domain = [{'name': 'var_1', 'type': 'continuous', 'domain': (-5,5), 'dimensionality': 2}] 13 | self.space = Design_space(domain, None) 14 | 15 | self.mpi_mcmc_acquisition = AcquisitionMPI_MCMC(self.mock_model, self.space, self.mock_optimizer) 16 | 17 | def test_acquisition_function(self): 18 | """Test that acquisition function returns correct weighted acquisition 19 | """ 20 | self.mock_model.predict.return_value = np.array([[1,2,3,4], [3,3,3,3]]) 21 | self.mock_model.get_fmin.return_value = np.array([[0.1,0.2,0.3,0.4]]) 22 | 23 | weighted_acquisition = self.mpi_mcmc_acquisition.acquisition_function(np.array([2,2])) 24 | 25 | expected_acquisition = np.array([[-0.09520448, -0.09839503, -0.10161442, -0.10485931], [-0.09520448, -0.09839503, -0.10161442, -0.10485931]]) 26 | self.assertTrue(np.isclose(weighted_acquisition, expected_acquisition).all()) 27 | 28 | def test_acquisition_function_withGradients(self): 29 | """Test that acquisition function with gradients returns correct weight acquisition and gradient 30 | """ 31 | self.mock_model.predict_withGradients.return_value = np.array([[1,2,3,4],[3,2,3,5],[0.1,0.4,0.1,0.2],[0.2,0.4,0.2,0.1]]) 32 | self.mock_model.get_fmin.return_value = np.array([[1,3]]) 33 | 34 | weighted_acquisition, weighted_gradient = self.mpi_mcmc_acquisition.acquisition_function_withGradients(np.array([2,2])) 35 | 36 | self.assertTrue(np.isclose(weighted_acquisition, np.array([[-0.12466755, -0.18661036],[-0.12466755, -0.18661036]])).all()) 37 | self.assertTrue(np.isclose(weighted_gradient, np.array([[0.00330234, 0.00620749],[0.00330234, 0.00620749]])).all()) 38 | -------------------------------------------------------------------------------- /GPyOpt/testing/functional_tests/test_files/fully_discrete_domain_acquisition_gradient_testfile.txt: -------------------------------------------------------------------------------- 1 | -5.000000000000000000e+00 3.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 2 | 1.000000000000000000e+00 3.000000000000000000e+00 -2.000000000000000000e+00 2.000000000000000000e+00 3 | 2.000000000000000000e+00 3.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 4 | -2.000000000000000000e+00 -8.000000000000000000e+00 -2.000000000000000000e+00 2.000000000000000000e+00 5 | -1.000000000000000000e+00 -8.000000000000000000e+00 2.000000000000000000e+00 1.000000000000000000e+00 6 | -5.000000000000000000e+00 -8.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 7 | 1.000000000000000000e+01 3.000000000000000000e+00 -2.000000000000000000e+00 0.000000000000000000e+00 8 | -1.000000000000000000e+01 -8.000000000000000000e+00 2.000000000000000000e+00 0.000000000000000000e+00 9 | -1.000000000000000000e+01 3.000000000000000000e+00 -2.000000000000000000e+00 0.000000000000000000e+00 10 | -1.000000000000000000e+01 3.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 11 | -4.000000000000000000e+00 -8.000000000000000000e+00 -2.000000000000000000e+00 2.000000000000000000e+00 12 | -8.000000000000000000e+00 3.000000000000000000e+00 -2.000000000000000000e+00 0.000000000000000000e+00 13 | -1.000000000000000000e+00 -8.000000000000000000e+00 2.000000000000000000e+00 2.000000000000000000e+00 14 | 8.000000000000000000e+00 3.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 15 | -1.000000000000000000e+00 3.000000000000000000e+00 0.000000000000000000e+00 2.000000000000000000e+00 16 | 1.000000000000000000e+01 -8.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 17 | -4.000000000000000000e+00 3.000000000000000000e+00 2.000000000000000000e+00 0.000000000000000000e+00 18 | 4.000000000000000000e+00 3.000000000000000000e+00 -2.000000000000000000e+00 2.000000000000000000e+00 19 | -7.000000000000000000e+00 3.000000000000000000e+00 2.000000000000000000e+00 2.000000000000000000e+00 20 | 3.000000000000000000e+00 -8.000000000000000000e+00 2.000000000000000000e+00 1.000000000000000000e+00 21 | -------------------------------------------------------------------------------- /GPyOpt/acquisitions/LCB_mcmc.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2016, the GPyOpt Authors 2 | # Licensed under the BSD 3-clause license (see LICENSE.txt) 3 | 4 | from .LCB import AcquisitionLCB 5 | 6 | class AcquisitionLCB_MCMC(AcquisitionLCB): 7 | """ 8 | Integrated GP-Lower Confidence Bound acquisition function 9 | 10 | :param model: GPyOpt class of model 11 | :param space: GPyOpt class of domain 12 | :param optimizer: optimizer of the acquisition. Should be a GPyOpt optimizer 13 | :param cost_withGradients: function 14 | :param exploration_weight: positive parameter to control exploration / exploitation 15 | 16 | .. Note:: allows to compute the Improvement per unit of cost 17 | 18 | """ 19 | 20 | analytical_gradient_prediction = True 21 | 22 | def __init__(self, model, space, optimizer=None, cost_withGradients=None, exploration_weight=2): 23 | super(AcquisitionLCB_MCMC, self).__init__(model, space, optimizer,cost_withGradients,exploration_weight) 24 | assert self.model.MCMC_sampler, 'Samples from the hyper-parameters are needed to compute the integrated GP-LCB' 25 | 26 | def _compute_acq(self,x): 27 | """ 28 | Integrated GP-Lower Confidence Bound 29 | """ 30 | means, stds = self.model.predict(x) 31 | f_acqu = 0 32 | for m,s in zip(means, stds): 33 | f_acqu += -m + self.exploration_weight * s 34 | return f_acqu/(len(means)) 35 | 36 | def _compute_acq_withGradients(self, x): 37 | """ 38 | Integrated GP-Lower Confidence Bound and its derivative 39 | """ 40 | means, stds, dmdxs, dsdxs = self.model.predict_withGradients(x) 41 | f_acqu = None 42 | df_acqu = None 43 | for m, s, dmdx, dsdx in zip(means, stds, dmdxs, dsdxs): 44 | f = -m + self.exploration_weight * s 45 | df = -dmdx + self.exploration_weight * dsdx 46 | if f_acqu is None: 47 | f_acqu = f 48 | df_acqu = df 49 | else: 50 | f_acqu += f 51 | df_acqu += df 52 | return f_acqu/(len(means)), df_acqu/(len(means)) 53 | 54 | 55 | 56 | -------------------------------------------------------------------------------- /GPyOpt/util/mcmc_sampler.py: -------------------------------------------------------------------------------- 1 | import emcee 2 | 3 | from ..experiment_design import initial_design 4 | 5 | class McmcSampler(object): 6 | def __init__(self, space): 7 | """ 8 | Creates an instance of the sampler. 9 | 10 | Parameters: 11 | space - variable space 12 | """ 13 | self.space = space 14 | 15 | def get_samples(self, n_samples, log_p_function, burn_in_steps=50): 16 | """ 17 | Generates samples. 18 | 19 | Parameters: 20 | n_samples - number of samples to generate 21 | log_p_function - a function that returns log density for a specific sample 22 | burn_in_steps - number of burn-in steps for sampling 23 | 24 | Returns a tuple of two lists: (samples, log_p_function values for samples) 25 | """ 26 | 27 | raise NotImplementedError 28 | 29 | class AffineInvariantEnsembleSampler(McmcSampler): 30 | def __init__(self, space): 31 | """ 32 | Creates an instance of the affine invariant ensemble sampler. 33 | 34 | Parameters: 35 | space - variable space 36 | """ 37 | super(AffineInvariantEnsembleSampler, self).__init__(space) 38 | 39 | def get_samples(self, n_samples, log_p_function, burn_in_steps=50): 40 | """ 41 | Generates samples. 42 | 43 | Parameters: 44 | n_samples - number of samples to generate 45 | log_p_function - a function that returns log density for a specific sample 46 | burn_in_steps - number of burn-in steps for sampling 47 | 48 | Returns a tuple of two array: (samples, log_p_function values for samples) 49 | """ 50 | restarts = initial_design('random', self.space, n_samples) 51 | sampler = emcee.EnsembleSampler(n_samples, self.space.input_dim(), log_p_function) 52 | samples, samples_log, _ = sampler.run_mcmc(restarts, burn_in_steps) 53 | 54 | # make sure we have an array of shape (n samples, space input dim) 55 | if len(samples.shape) == 1: 56 | samples = samples.reshape(-1, 1) 57 | samples_log = samples_log.reshape(-1, 1) 58 | 59 | return samples, samples_log -------------------------------------------------------------------------------- /GPyOpt/testing/acquisitions_tests/test_entropy_search_acquisition.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from mock import Mock 3 | 4 | import numpy as np 5 | from numpy.testing import assert_allclose 6 | 7 | import GPy 8 | 9 | from GPyOpt.acquisitions import AcquisitionEntropySearch 10 | from GPyOpt.core.task.space import Design_space 11 | from GPyOpt.models import GPModel 12 | 13 | from GPyOpt.util.mcmc_sampler import McmcSampler, AffineInvariantEnsembleSampler 14 | from GPyOpt.experiment_design import initial_design 15 | 16 | class MockSampler(McmcSampler): 17 | def __init__(self, space): 18 | self.space = space 19 | 20 | def get_samples(self, n_samples, log_p_function, burn_in_steps=50): 21 | samples = initial_design('latin', self.space, n_samples) 22 | samples_log = np.array([[i] for i in range(n_samples)]) 23 | 24 | return samples, samples_log 25 | 26 | 27 | class TestEntropySearchAcquisition(unittest.TestCase): 28 | def setUp(self): 29 | np.random.seed(1) 30 | 31 | X = np.array([[-1.5, -1], [1, 1.5], [3, 3]]) 32 | y = 2 * -np.array([[-0.1], [.3], [.9]]) 33 | bounds = [(-5, 5)] 34 | input_dim = X.shape[1] 35 | kern = GPy.kern.RBF(input_dim, variance=1., lengthscale=1.) 36 | self.model = GPModel(kern, noise_var=0.0, max_iters=0, optimize_restarts=0) 37 | self.model.updateModel(X, y, None, None) 38 | domain = [{'name': 'var_1', 'type': 'continuous', 'domain': bounds[0], 'dimensionality': 2}] 39 | self.space = Design_space(domain) 40 | 41 | self.mock_optimizer = Mock() 42 | 43 | def test_acquisition_function(self): 44 | es = AcquisitionEntropySearch(self.model, self.space, MockSampler(self.space)) 45 | acquisition_value = es.acquisition_function(np.array([[1, 1]])) 46 | 47 | assert_allclose(acquisition_value, np.array([[-20.587977]]), 1e-5) 48 | 49 | def test_optimize(self): 50 | expected_optimum_position = [[0, 0]] 51 | self.mock_optimizer.optimize.return_value = expected_optimum_position 52 | es = AcquisitionEntropySearch(self.model, self.space, MockSampler(self.space), optimizer=self.mock_optimizer) 53 | 54 | optimum_position = es.optimize() 55 | 56 | assert optimum_position == expected_optimum_position 57 | -------------------------------------------------------------------------------- /GPyOpt/acquisitions/MPI_mcmc.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2016, the GPyOpt Authors 2 | # Licensed under the BSD 3-clause license (see LICENSE.txt) 3 | 4 | from .MPI import AcquisitionMPI 5 | from ..util.general import get_quantiles 6 | 7 | 8 | class AcquisitionMPI_MCMC(AcquisitionMPI): 9 | """ 10 | Integrated Maximum Probability of Improvement acquisition function 11 | 12 | :param model: GPyOpt class of model 13 | :param space: GPyOpt class of domain 14 | :param optimizer: optimizer of the acquisition. Should be a GPyOpt optimizer 15 | :param cost_withGradients: function 16 | :param jitter: positive value to make the acquisition more explorative 17 | 18 | .. Note:: allows to compute the Improvement per unit of cost 19 | 20 | """ 21 | 22 | analytical_gradient_prediction = True 23 | 24 | def __init__(self, model, space, optimizer=None, cost_withGradients=None, jitter=0.01): 25 | super(AcquisitionMPI_MCMC, self).__init__(model, space, optimizer, cost_withGradients, jitter) 26 | 27 | assert self.model.MCMC_sampler, 'Samples from the hyper-parameters are needed to compute the integrated EI' 28 | 29 | def _compute_acq(self,x): 30 | """ 31 | Integrated Expected Improvement 32 | """ 33 | means, stds = self.model.predict(x) 34 | fmins = self.model.get_fmin() 35 | f_acqu = 0 36 | for m,s,fmin in zip(means, stds, fmins): 37 | _, Phi, _ = get_quantiles(self.jitter, fmin, m, s) 38 | f_acqu += Phi 39 | return f_acqu/len(means) 40 | 41 | def _compute_acq_withGradients(self, x): 42 | """ 43 | Integrated Expected Improvement and its derivative 44 | """ 45 | means, stds, dmdxs, dsdxs = self.model.predict_withGradients(x) 46 | fmins = self.model.get_fmin() 47 | f_acqu = None 48 | df_acqu = None 49 | for m, s, fmin, dmdx, dsdx in zip(means, stds, fmins, dmdxs, dsdxs): 50 | phi, Phi, u = get_quantiles(self.jitter, fmin, m, s) 51 | f = Phi 52 | df = -(phi/s)* (dmdx + dsdx * u) 53 | if f_acqu is None: 54 | f_acqu = f 55 | df_acqu = df 56 | else: 57 | f_acqu += f 58 | df_acqu += df 59 | return f_acqu/(len(means)), df_acqu/(len(means)) 60 | -------------------------------------------------------------------------------- /GPyOpt/acquisitions/EI_mcmc.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2016, the GPyOpt Authors 2 | # Licensed under the BSD 3-clause license (see LICENSE.txt) 3 | 4 | from .EI import AcquisitionEI 5 | from ..util.general import get_quantiles 6 | 7 | 8 | class AcquisitionEI_MCMC(AcquisitionEI): 9 | """ 10 | Integrated Expected improvement acquisition function 11 | 12 | :param model: GPyOpt class of model 13 | :param space: GPyOpt class of domain 14 | :param optimizer: optimizer of the acquisition. Should be a GPyOpt optimizer 15 | :param cost_withGradients: function 16 | :param jitter: positive value to make the acquisition more explorative 17 | 18 | .. Note:: allows to compute the Improvement per unit of cost 19 | 20 | """ 21 | 22 | analytical_gradient_prediction = True 23 | 24 | def __init__(self, model, space, optimizer=None, cost_withGradients=None, jitter=0.01): 25 | super(AcquisitionEI_MCMC, self).__init__(model, space, optimizer, cost_withGradients, jitter) 26 | 27 | assert self.model.MCMC_sampler, 'Samples from the hyper-parameters are needed to compute the integrated EI' 28 | 29 | def _compute_acq(self,x): 30 | """ 31 | Integrated Expected Improvement 32 | """ 33 | means, stds = self.model.predict(x) 34 | fmins = self.model.get_fmin() 35 | f_acqu = 0 36 | for m,s,fmin in zip(means, stds, fmins): 37 | phi, Phi, _ = get_quantiles(self.jitter, fmin, m, s) 38 | f_acqu += (fmin - m + self.jitter) * Phi + s * phi 39 | return f_acqu/(len(means)) 40 | 41 | def _compute_acq_withGradients(self, x): 42 | """ 43 | Integrated Expected Improvement and its derivative 44 | """ 45 | means, stds, dmdxs, dsdxs = self.model.predict_withGradients(x) 46 | fmins = self.model.get_fmin() 47 | f_acqu = None 48 | df_acqu = None 49 | for m, s, fmin, dmdx, dsdx in zip(means, stds, fmins, dmdxs, dsdxs): 50 | phi, Phi, _ = get_quantiles(self.jitter, fmin, m, s) 51 | f = (fmin - m + self.jitter) * Phi + s * phi 52 | df = dsdx * phi - Phi * dmdx 53 | if f_acqu is None: 54 | f_acqu = f 55 | df_acqu = df 56 | else: 57 | f_acqu += f 58 | df_acqu += df 59 | return f_acqu/(len(means)), df_acqu/(len(means)) 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | -------------------------------------------------------------------------------- /GPyOpt/testing/evaluators_tests/test_batch_random.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from mock import Mock 3 | 4 | import numpy as np 5 | 6 | from GPyOpt.core.evaluators import RandomBatch 7 | from GPyOpt.acquisitions import AcquisitionEI 8 | from GPyOpt.core.task.space import Design_space 9 | from GPyOpt.optimization.acquisition_optimizer import ContextManager 10 | 11 | class TestRandomBatch(unittest.TestCase): 12 | def setUp(self): 13 | self.mock_model = Mock() 14 | self.mock_optimizer = Mock() 15 | self.expected_optimum_position = [[0, 0]] 16 | self.mock_optimizer.optimize.return_value = self.expected_optimum_position, self.expected_optimum_position 17 | domain = [{'name': 'var_1', 'type': 'continuous', 'domain': (-5,5), 'dimensionality': 2}] 18 | self.space = Design_space(domain, None) 19 | self.mock_optimizer.context_manager = ContextManager(self.space) 20 | self.ei_acquisition = AcquisitionEI(self.mock_model, self.space, self.mock_optimizer) 21 | 22 | self.random_batch = RandomBatch(self.ei_acquisition, 10) 23 | 24 | def test_initialize_batch(self): 25 | x = self.random_batch.initialize_batch() 26 | 27 | self.assertEqual(x,self.expected_optimum_position) 28 | 29 | def test_get_anchor_points(self): 30 | anchor_points = self.random_batch.get_anchor_points() 31 | 32 | self.assertEqual((50,2),anchor_points.shape) 33 | self.assertTrue(np.absolute(np.mean(anchor_points)) < 1.5) 34 | self.assertTrue(np.std(anchor_points) < 4) 35 | 36 | def test_optimize_anchor_points(self): 37 | self.assertEqual(self.random_batch.optimize_anchor_point(5), 5) 38 | 39 | def test_compute_batch_without_duplicate_logic(self): 40 | points = self.random_batch.compute_batch_without_duplicate_logic() 41 | 42 | self.assertEqual((10,2),points.shape) 43 | self.assertTrue(np.absolute(np.mean(points)) < 2) 44 | self.assertTrue(np.std(points) < 4) 45 | 46 | def test_compute_batch(self): 47 | batch = self.random_batch.compute_batch() 48 | 49 | self.assertEqual((10,2),batch.shape) 50 | self.assertTrue(np.absolute(np.mean(batch)) < 2) 51 | self.assertTrue(np.std(batch) < 3.5) 52 | 53 | def test_zip_and_tuple(self): 54 | zipped_tuple = self.random_batch.zip_and_tuple(np.array([[1,2],[1,3],[2,2]])) 55 | 56 | self.assertEqual(zipped_tuple, (1,2,1,3,2,2)) -------------------------------------------------------------------------------- /manual/notebooks_check.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import subprocess 4 | import tempfile 5 | 6 | import nbformat 7 | 8 | def check_notebooks_for_errors(notebooks_directory): 9 | ''' Evaluates all notebooks in given directory and prints errors, if any ''' 10 | print("Checking notebooks in directory {} for errors".format(notebooks_directory)) 11 | 12 | failed_notebooks_count = 0 13 | for file in os.listdir(notebooks_directory): 14 | if file.endswith(".ipynb"): 15 | print("Checking notebook " + file) 16 | full_file_path = os.path.join(notebooks_directory, file) 17 | output, errors = run_notebook(full_file_path) 18 | if errors is not None and len(errors) > 0: 19 | failed_notebooks_count += 1 20 | print("Errors in notebook " + file) 21 | print(errors) 22 | 23 | if failed_notebooks_count == 0: 24 | print("No errors found in notebooks under " + notebooks_directory) 25 | 26 | def run_notebook(notebook_path): 27 | """Execute a notebook via nbconvert and collect output. 28 | :returns (parsed nb object, execution errors) 29 | """ 30 | dirname, __ = os.path.split(notebook_path) 31 | os.chdir(dirname) 32 | with tempfile.NamedTemporaryFile(suffix=".ipynb") as fout: 33 | args = ["jupyter-nbconvert", "--to", "notebook", "--execute", "--allow-errors", 34 | "--ExecutePreprocessor.timeout=300", 35 | "--output", fout.name, notebook_path] 36 | try: 37 | subprocess.check_call(args) 38 | except subprocess.CalledProcessError as e: 39 | if e.returncode == 1: 40 | # print the message and ignore error with code 1 as this indicates there were errors in the notebook 41 | print(e.output) 42 | pass 43 | else: 44 | # all other codes indicate some other problem, rethrow 45 | raise 46 | 47 | fout.seek(0) 48 | nb = nbformat.read(fout, nbformat.current_nbformat) 49 | 50 | errors = [output for cell in nb.cells if "outputs" in cell 51 | for output in cell["outputs"]\ 52 | if output.output_type == "error"] 53 | 54 | return nb, errors 55 | 56 | if __name__ == "__main__": 57 | if len(sys.argv) <= 1: 58 | notebooks_directory = os.getcwd() 59 | else: 60 | notebooks_directory = sys.argv[1] 61 | 62 | check_notebooks_for_errors(notebooks_directory) -------------------------------------------------------------------------------- /GPyOpt/models/warpedgpmodel.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2016, the GPyOpt Authors 2 | # Licensed under the BSD 3-clause license (see LICENSE.txt) 3 | 4 | import numpy as np 5 | import GPy 6 | 7 | from .base import BOModel 8 | 9 | 10 | ## 11 | ## TODO: not fully tested yet. 12 | ## 13 | 14 | 15 | class WarpedGPModel(BOModel): 16 | 17 | analytical_gradient_prediction = False 18 | 19 | def __init__(self, kernel=None, noise_var=None, exact_feval=False, optimizer='bfgs', max_iters=1000, 20 | optimize_restarts=5, warping_function=None, warping_terms=3, verbose=False): 21 | 22 | self.kernel = kernel 23 | self.noise_var = noise_var 24 | self.exact_feval = exact_feval 25 | self.optimize_restarts = optimize_restarts 26 | self.optimizer = optimizer 27 | self.max_iters = max_iters 28 | self.verbose = verbose 29 | self.warping_function = warping_function 30 | self.warping_terms = warping_terms 31 | self.model = None 32 | 33 | def _create_model(self, X, Y): 34 | # --- define kernel 35 | self.input_dim = X.shape[1] 36 | if self.kernel is None: 37 | self.kernel = GPy.kern.Matern32(self.input_dim, variance=1.) #+ GPy.kern.Bias(self.input_dim) 38 | else: 39 | self.kernel = self.kernel 40 | 41 | # --- define model 42 | noise_var = Y.var()*0.01 if self.noise_var is None else self.noise_var 43 | 44 | self.model = GPy.models.WarpedGP(X, Y, kernel=self.kernel, warping_function=self.warping_function, warping_terms=self.warping_terms ) 45 | 46 | # --- restrict variance if exact evaluations of the objective 47 | if self.exact_feval: 48 | self.model.Gaussian_noise.constrain_fixed(1e-6, warning=False) 49 | else: 50 | self.model.Gaussian_noise.constrain_positive(warning=False) 51 | 52 | def updateModel(self, X_all, Y_all, X_new, Y_new): 53 | if self.model is None: 54 | self._create_model(X_all, Y_all) 55 | else: 56 | self.model.set_XY(X_all, Y_all) 57 | 58 | self.model.optimize(optimizer = self.optimizer, messages=self.verbose, max_iters=self.max_iters) 59 | 60 | 61 | def predict(self, X): 62 | if X.ndim==1: X = X[None,:] 63 | m, v = self.model.predict(X) 64 | v = np.clip(v, 1e-10, np.inf) 65 | return m, np.sqrt(v) 66 | 67 | def get_fmin(self): 68 | return self.model.predict(self.model.X)[0].min() -------------------------------------------------------------------------------- /GPyOpt/testing/acquisitions_tests/test_lcb_acquisition.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from mock import Mock 3 | 4 | import numpy as np 5 | 6 | from GPyOpt.acquisitions import AcquisitionLCB 7 | from GPyOpt.core.task.space import Design_space 8 | 9 | class TestLCBAcquisition(unittest.TestCase): 10 | def setUp(self): 11 | self.mock_model = Mock() 12 | self.mock_optimizer = Mock() 13 | domain = [{'name': 'var_1', 'type': 'continuous', 'domain': (-5,5), 'dimensionality': 2}] 14 | self.space = Design_space(domain, None) 15 | 16 | self.lcb_acquisition = AcquisitionLCB(self.mock_model, self.space, self.mock_optimizer) 17 | 18 | def test_acquisition_function(self): 19 | self.mock_model.predict.return_value = (1, 3) 20 | 21 | weighted_acquisition = self.lcb_acquisition.acquisition_function(np.array([2,2])) 22 | expected_acquisition = np.array([[-5.0],[-5.0]]) 23 | 24 | self.assertTrue(np.array_equal(expected_acquisition, weighted_acquisition)) 25 | 26 | def test_acquisition_function_withGradients(self): 27 | self.mock_model.predict_withGradients.return_value = (1, 1, 0.1, 0.1) 28 | 29 | weighted_acquisition, weighted_gradient = self.lcb_acquisition.acquisition_function_withGradients(np.array([2,2])) 30 | 31 | self.assertTrue(np.array_equal(np.array([[-1.0],[-1.0]]), weighted_acquisition)) 32 | self.assertTrue(np.array_equal(np.array([[-0.1,-0.1],[-0.1,-0.1]]), weighted_gradient)) 33 | 34 | def test_optimize_with_analytical_gradient_prediction(self): 35 | expected_optimum_position = [[0,0]] 36 | self.mock_optimizer.optimize.return_value = expected_optimum_position 37 | self.mock_model.analytical_gradient_prediction = True 38 | 39 | self.lcb_acquisition = AcquisitionLCB(self.mock_model, self.space, self.mock_optimizer) 40 | optimum_position = self.lcb_acquisition.optimize() 41 | 42 | self.assertEqual(expected_optimum_position, optimum_position) 43 | 44 | def test_optimize_without_analytical_gradient_prediction(self): 45 | expected_optimum_position = [[0,0]] 46 | self.mock_optimizer.optimize.return_value = expected_optimum_position 47 | self.mock_model.analytical_gradient_prediction = False 48 | self.lcb_acquisition = AcquisitionLCB(self.mock_model, self.space, self.mock_optimizer) 49 | 50 | optimum_position = self.lcb_acquisition.optimize() 51 | 52 | self.assertEqual(expected_optimum_position, optimum_position) 53 | -------------------------------------------------------------------------------- /GPyOpt/methods/modular_bayesian_optimization.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2016, the GPyOpt Authors 2 | # Licensed under the BSD 3-clause license (see LICENSE.txt) 3 | 4 | from ..core.bo import BO 5 | 6 | class ModularBayesianOptimization(BO): 7 | 8 | """ 9 | Modular Bayesian optimization. This class wraps the optimization loop around the different handlers. 10 | 11 | :param model: GPyOpt model class. 12 | :param space: GPyOpt space class. 13 | :param objective: GPyOpt objective class. 14 | :param acquisition: GPyOpt acquisition class. 15 | :param evaluator: GPyOpt evaluator class. 16 | :param X_init: 2d numpy array containing the initial inputs (one per row) of the model. 17 | :param Y_init: 2d numpy array containing the initial outputs (one per row) of the model. 18 | :param cost: GPyOpt cost class (default, none). 19 | :param normalize_Y: whether to normalize the outputs before performing any optimization (default, True). 20 | :param model_update_interval: interval of collected observations after which the model is updated (default, 1). 21 | :param de_duplication: instantiated de_duplication GPyOpt class. 22 | """ 23 | 24 | def __init__(self, model, space, objective, acquisition, evaluator, X_init, Y_init=None, cost = None, normalize_Y = True, model_update_interval = 1, de_duplication=False): 25 | 26 | self.initial_iter = True 27 | self.modular_optimization = True 28 | 29 | # --- Create optimization space 30 | super(ModularBayesianOptimization ,self).__init__( model = model, 31 | space = space, 32 | objective = objective, 33 | acquisition = acquisition, 34 | evaluator = evaluator, 35 | X_init = X_init, 36 | Y_init = Y_init, 37 | cost = cost, 38 | normalize_Y = normalize_Y, 39 | model_update_interval = model_update_interval, 40 | de_duplication = de_duplication) 41 | -------------------------------------------------------------------------------- /GPyOpt/testing/functional_tests/test_files/context_with_duplication_acquisition_gradient_testfile.txt: -------------------------------------------------------------------------------- 1 | -2.241785176759851339e+00 4.909631914574283940e-01 -2.970747112143128810e+00 1.000000000000000000e+00 5.000000000000000000e+00 2 | 3.394920736069600764e+00 -1.130942124408866434e+00 -3.262315760968206746e+00 0.000000000000000000e+00 -1.000000000000000000e+00 3 | 8.710781416120635612e+00 -5.408455725403489289e+00 2.330405683659883209e+00 0.000000000000000000e+00 -1.000000000000000000e+00 4 | 6.926218333720342457e+00 6.882781789403402684e-01 5.622815997018673073e-01 1.000000000000000000e+00 5.000000000000000000e+00 5 | -3.734529661354497598e+00 8.279240491584658912e+00 -1.243243243479934534e-01 1.000000000000000000e+00 -1.000000000000000000e+00 6 | 2.999999999999999889e-01 1.000000000000000000e+01 -8.000000000000000000e+00 1.000000000000000000e+00 -1.000000000000000000e+00 7 | 2.999999999999999889e-01 1.000000000000000000e+01 3.000000000000000000e+00 1.000000000000000000e+00 5.000000000000000000e+00 8 | 2.999999999999999889e-01 -8.522264781926143229e-01 -2.567303979086433152e+00 1.000000000000000000e+00 -1.000000000000000000e+00 9 | 2.999999999999999889e-01 -1.000000000000000000e+01 -8.000000000000000000e+00 1.000000000000000000e+00 5.000000000000000000e+00 10 | 2.999999999999999889e-01 5.932794576452007362e+00 -6.649001026098096290e+00 1.000000000000000000e+00 -1.000000000000000000e+00 11 | 2.999999999999999889e-01 -4.539142526765274077e+00 -4.682114569493140088e+00 1.000000000000000000e+00 -1.000000000000000000e+00 12 | 2.999999999999999889e-01 2.761341210024696124e+00 -2.492048376315935343e+00 1.000000000000000000e+00 -1.000000000000000000e+00 13 | 2.999999999999999889e-01 -9.905092653622112664e+00 -2.513544138545182882e+00 1.000000000000000000e+00 -1.000000000000000000e+00 14 | 2.999999999999999889e-01 3.763232146829638580e+00 1.389934618929913768e+00 1.000000000000000000e+00 5.000000000000000000e+00 15 | 2.999999999999999889e-01 -5.028544031070119047e+00 -3.859090437572233689e+00 1.000000000000000000e+00 5.000000000000000000e+00 16 | 2.999999999999999889e-01 5.465311806480251278e+00 -7.170401627554437773e+00 1.000000000000000000e+00 -1.000000000000000000e+00 17 | 2.999999999999999889e-01 9.725136602999132407e+00 -7.511005095650880037e-02 1.000000000000000000e+00 5.000000000000000000e+00 18 | 2.999999999999999889e-01 6.111413870761911227e+00 -3.919034312855641744e+00 1.000000000000000000e+00 -1.000000000000000000e+00 19 | 2.999999999999999889e-01 9.874663459857025316e+00 2.572808450987601248e+00 1.000000000000000000e+00 -1.000000000000000000e+00 20 | 2.999999999999999889e-01 -2.064049075678362044e+00 -7.221749424479821933e+00 1.000000000000000000e+00 5.000000000000000000e+00 21 | -------------------------------------------------------------------------------- /GPyOpt/testing/functional_tests/test_files/context_without_duplication_acquisition_gradient_testfile.txt: -------------------------------------------------------------------------------- 1 | -2.241785176759851339e+00 4.909631914574283940e-01 -2.970747112143128810e+00 1.000000000000000000e+00 5.000000000000000000e+00 2 | 3.394920736069600764e+00 -1.130942124408866434e+00 -3.262315760968206746e+00 0.000000000000000000e+00 -1.000000000000000000e+00 3 | 8.710781416120635612e+00 -5.408455725403489289e+00 2.330405683659883209e+00 0.000000000000000000e+00 -1.000000000000000000e+00 4 | 6.926218333720342457e+00 6.882781789403402684e-01 5.622815997018673073e-01 1.000000000000000000e+00 5.000000000000000000e+00 5 | -3.734529661354497598e+00 8.279240491584658912e+00 -1.243243243479934534e-01 1.000000000000000000e+00 -1.000000000000000000e+00 6 | 2.999999999999999889e-01 1.000000000000000000e+01 -8.000000000000000000e+00 1.000000000000000000e+00 -1.000000000000000000e+00 7 | 2.999999999999999889e-01 1.000000000000000000e+01 3.000000000000000000e+00 1.000000000000000000e+00 5.000000000000000000e+00 8 | 2.999999999999999889e-01 -8.522264781926143229e-01 -2.567303979086433152e+00 1.000000000000000000e+00 -1.000000000000000000e+00 9 | 2.999999999999999889e-01 -1.000000000000000000e+01 -8.000000000000000000e+00 1.000000000000000000e+00 5.000000000000000000e+00 10 | 2.999999999999999889e-01 5.932794576452007362e+00 -6.649001026098096290e+00 1.000000000000000000e+00 -1.000000000000000000e+00 11 | 2.999999999999999889e-01 -4.539142526765274077e+00 -4.682114569493140088e+00 1.000000000000000000e+00 -1.000000000000000000e+00 12 | 2.999999999999999889e-01 2.761341210024696124e+00 -2.492048376315935343e+00 1.000000000000000000e+00 -1.000000000000000000e+00 13 | 2.999999999999999889e-01 -9.905092653622112664e+00 -2.513544138545182882e+00 1.000000000000000000e+00 -1.000000000000000000e+00 14 | 2.999999999999999889e-01 3.763232146829638580e+00 1.389934618929913768e+00 1.000000000000000000e+00 5.000000000000000000e+00 15 | 2.999999999999999889e-01 -5.028544031070119047e+00 -3.859090437572233689e+00 1.000000000000000000e+00 5.000000000000000000e+00 16 | 2.999999999999999889e-01 5.465311806480251278e+00 -7.170401627554437773e+00 1.000000000000000000e+00 -1.000000000000000000e+00 17 | 2.999999999999999889e-01 9.725136602999132407e+00 -7.511005095650880037e-02 1.000000000000000000e+00 5.000000000000000000e+00 18 | 2.999999999999999889e-01 6.111413870761911227e+00 -3.919034312855641744e+00 1.000000000000000000e+00 -1.000000000000000000e+00 19 | 2.999999999999999889e-01 9.874663459857025316e+00 2.572808450987601248e+00 1.000000000000000000e+00 -1.000000000000000000e+00 20 | 2.999999999999999889e-01 -2.064049075678362044e+00 -7.221749424479821933e+00 1.000000000000000000e+00 5.000000000000000000e+00 21 | -------------------------------------------------------------------------------- /GPyOpt/testing/functional_tests/test_files/input_warped_GP_acquisition_gradient_testfile.txt: -------------------------------------------------------------------------------- 1 | -2.241785176759851339e+00 4.909631914574283940e-01 -2.970747112143128810e+00 0.000000000000000000e+00 5.000000000000000000e+00 2 | 3.394920736069600764e+00 -1.130942124408866434e+00 -3.262315760968206746e+00 -2.000000000000000000e+00 -1.000000000000000000e+00 3 | 8.710781416120635612e+00 -5.408455725403489289e+00 2.330405683659883209e+00 -2.000000000000000000e+00 -1.000000000000000000e+00 4 | 6.926218333720342457e+00 6.882781789403402684e-01 5.622815997018673073e-01 0.000000000000000000e+00 5.000000000000000000e+00 5 | -3.734529661354497598e+00 8.279240491584658912e+00 -1.243243243479934534e-01 0.000000000000000000e+00 -1.000000000000000000e+00 6 | 1.000000000000000000e+01 1.000000000000000000e+01 -8.000000000000000000e+00 2.000000000000000000e+00 -1.000000000000000000e+00 7 | -9.035025467440078728e-01 -4.805178143957880010e+00 -1.992789764722139090e+00 0.000000000000000000e+00 5.000000000000000000e+00 8 | -1.000000000000000000e+01 1.000000000000000000e+01 -8.000000000000000000e+00 -2.000000000000000000e+00 5.000000000000000000e+00 9 | -1.000000000000000000e+01 -1.000000000000000000e+01 3.000000000000000000e+00 2.000000000000000000e+00 -1.000000000000000000e+00 10 | -1.631040836482997136e+00 -5.938454197868670015e+00 2.726584063724128981e+00 2.000000000000000000e+00 5.000000000000000000e+00 11 | 9.440817768373960917e+00 -4.537449756833764170e+00 1.296875715014571995e+00 0.000000000000000000e+00 -1.000000000000000000e+00 12 | -1.939938183054970011e+00 -8.822461573787592215e-01 -1.425264735633291480e+00 -2.000000000000000000e+00 -1.000000000000000000e+00 13 | -1.334923006015635849e+00 -7.339939895126638802e+00 2.098239699430399341e+00 2.000000000000000000e+00 -1.000000000000000000e+00 14 | 7.723755213610186132e+00 -4.362764206984572901e+00 -1.540714535795496332e+00 2.000000000000000000e+00 -1.000000000000000000e+00 15 | 2.831388263129525740e+00 -7.266051849178804645e+00 -6.742150019661360183e+00 2.000000000000000000e+00 -1.000000000000000000e+00 16 | 3.052052060376087894e+00 6.283158990870955307e+00 -1.914748213843416202e+00 2.000000000000000000e+00 5.000000000000000000e+00 17 | 5.058290676088457616e+00 7.019523343208049937e+00 -6.764391213140498849e+00 -2.000000000000000000e+00 -1.000000000000000000e+00 18 | -3.469579907514042105e+00 5.249276197761616913e+00 -5.107079972482531538e+00 -2.000000000000000000e+00 -1.000000000000000000e+00 19 | -9.512560804588684249e+00 8.972434297417585469e+00 -4.826337410927559191e+00 -2.000000000000000000e+00 5.000000000000000000e+00 20 | 3.653609486837861908e+00 -9.171883322960699303e+00 -3.485368053143004907e+00 -2.000000000000000000e+00 5.000000000000000000e+00 21 | -------------------------------------------------------------------------------- /GPyOpt/testing/functional_tests/test_files/mixed_domain_acquisition_gradient_testfile.txt: -------------------------------------------------------------------------------- 1 | -2.241785176759851339e+00 4.909631914574283940e-01 -2.970747112143128810e+00 0.000000000000000000e+00 5.000000000000000000e+00 2 | 3.394920736069600764e+00 -1.130942124408866434e+00 -3.262315760968206746e+00 -2.000000000000000000e+00 -1.000000000000000000e+00 3 | 8.710781416120635612e+00 -5.408455725403489289e+00 2.330405683659883209e+00 -2.000000000000000000e+00 -1.000000000000000000e+00 4 | 6.926218333720342457e+00 6.882781789403402684e-01 5.622815997018673073e-01 0.000000000000000000e+00 5.000000000000000000e+00 5 | -3.734529661354497598e+00 8.279240491584658912e+00 -1.243243243479934534e-01 0.000000000000000000e+00 -1.000000000000000000e+00 6 | 1.000000000000000000e+01 1.000000000000000000e+01 -8.000000000000000000e+00 2.000000000000000000e+00 -1.000000000000000000e+00 7 | -9.035025467440078728e-01 -4.805178143957880010e+00 -1.992789764722139090e+00 0.000000000000000000e+00 5.000000000000000000e+00 8 | -1.000000000000000000e+01 1.000000000000000000e+01 -8.000000000000000000e+00 -2.000000000000000000e+00 5.000000000000000000e+00 9 | -1.000000000000000000e+01 -1.000000000000000000e+01 3.000000000000000000e+00 2.000000000000000000e+00 -1.000000000000000000e+00 10 | -1.631040836482997136e+00 -5.938454197868670015e+00 2.726584063724128981e+00 2.000000000000000000e+00 5.000000000000000000e+00 11 | 9.440817768373960917e+00 -4.537449756833764170e+00 1.296875715014571995e+00 0.000000000000000000e+00 -1.000000000000000000e+00 12 | -1.939938183054970011e+00 -8.822461573787592215e-01 -1.425264735633291480e+00 -2.000000000000000000e+00 -1.000000000000000000e+00 13 | -1.334923006015635849e+00 -7.339939895126638802e+00 2.098239699430399341e+00 2.000000000000000000e+00 -1.000000000000000000e+00 14 | 7.723755213610186132e+00 -4.362764206984572901e+00 -1.540714535795496332e+00 2.000000000000000000e+00 -1.000000000000000000e+00 15 | 2.831388263129525740e+00 -7.266051849178804645e+00 -6.742150019661360183e+00 2.000000000000000000e+00 -1.000000000000000000e+00 16 | 3.052052060376087894e+00 6.283158990870955307e+00 -1.914748213843416202e+00 2.000000000000000000e+00 5.000000000000000000e+00 17 | 5.058290676088457616e+00 7.019523343208049937e+00 -6.764391213140498849e+00 -2.000000000000000000e+00 -1.000000000000000000e+00 18 | -3.469579907514042105e+00 5.249276197761616913e+00 -5.107079972482531538e+00 -2.000000000000000000e+00 -1.000000000000000000e+00 19 | -9.512560804588684249e+00 8.972434297417585469e+00 -4.826337410927559191e+00 -2.000000000000000000e+00 5.000000000000000000e+00 20 | 3.653609486837861908e+00 -9.171883322960699303e+00 -3.485368053143004907e+00 -2.000000000000000000e+00 5.000000000000000000e+00 21 | -------------------------------------------------------------------------------- /GPyOpt/testing/acquisitions_tests/test_mpi_acquisition.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from mock import Mock 3 | 4 | import numpy as np 5 | 6 | from GPyOpt.acquisitions import AcquisitionMPI 7 | from GPyOpt.core.task.space import Design_space 8 | 9 | class TestMPIAcquisition(unittest.TestCase): 10 | def setUp(self): 11 | self.mock_model = Mock() 12 | self.mock_optimizer = Mock() 13 | domain = [{'name': 'var_1', 'type': 'continuous', 'domain': (-5,5), 'dimensionality': 2}] 14 | self.space = Design_space(domain, None) 15 | 16 | self.mpi_acquisition = AcquisitionMPI(self.mock_model, self.space, self.mock_optimizer) 17 | 18 | def test_acquisition_function(self): 19 | self.mock_model.predict.return_value = (1,3) 20 | self.mock_model.get_fmin.return_value = (0.1) 21 | 22 | weighted_acquisition = self.mpi_acquisition.acquisition_function(np.array([2,2])) 23 | 24 | expected_acquisition = np.array([[-0.38081792], [-0.38081792]]) 25 | self.assertTrue(np.isclose(weighted_acquisition, expected_acquisition).all()) 26 | 27 | def test_acquisition_function_withGradients(self): 28 | self.mock_model.predict_withGradients.return_value = (1,3,0.1,0.2) 29 | self.mock_model.get_fmin.return_value = 0.1 30 | 31 | weighted_acquisition, weighted_gradient = self.mpi_acquisition.acquisition_function_withGradients(np.array([2,2])) 32 | 33 | self.assertTrue(np.isclose(weighted_acquisition, np.array([[-0.38081792],[-0.38081792]])).all()) 34 | self.assertTrue(np.isclose(weighted_gradient, np.array([[0.00499539, 0.00499539],[0.00499539,0.00499539]])).all()) 35 | 36 | def test_optimize_with_analytical_gradient_prediction(self): 37 | expected_optimum_position = [[0,0]] 38 | self.mock_optimizer.optimize.return_value = expected_optimum_position 39 | self.mock_model.analytical_gradient_prediction = True 40 | self.mpi_acquisition = AcquisitionMPI(self.mock_model, self.space, self.mock_optimizer) 41 | 42 | optimum_position = self.mpi_acquisition.optimize() 43 | 44 | self.assertEqual(optimum_position, expected_optimum_position) 45 | 46 | def test_optimize_without_analytical_gradient_prediction(self): 47 | expected_optimum_position = [[0,0]] 48 | self.mock_optimizer.optimize.return_value = expected_optimum_position 49 | self.mock_model.analytical_gradient_prediction = False 50 | self.mpi_acquisition = AcquisitionMPI(self.mock_model, self.space, self.mock_optimizer) 51 | 52 | optimum_position = self.mpi_acquisition.optimize() 53 | 54 | self.assertEqual(optimum_position, expected_optimum_position) 55 | -------------------------------------------------------------------------------- /GPyOpt/testing/optimization_tests/test_problem_with_context.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import unittest 3 | 4 | from GPyOpt.core.task.space import Design_space 5 | from GPyOpt.optimization.acquisition_optimizer import ContextManager 6 | from GPyOpt.optimization.optimizer import OptimizationWithContext 7 | from GPyOpt.methods import BayesianOptimization 8 | from GPyOpt.objective_examples.experimentsNd import alpine1 9 | 10 | class TestOptimizationWithContext(unittest.TestCase): 11 | """ 12 | Class to test the mapping of the objective function through the context 13 | """ 14 | def setUp(self): 15 | np.random.seed(123) 16 | domain = [{'name': 'var1', 'type': 'continuous', 'domain': (-5, 5), 'dimensionality': 5}] 17 | space = Design_space(domain) 18 | func = alpine1(input_dim=5, bounds=space.get_bounds()) 19 | bo = BayesianOptimization(f=func.f, domain=domain) 20 | context = {'var1_1': 0.3, 'var1_2': 0.4} 21 | context_manager = ContextManager(space, context) 22 | x0 = np.array([[0, 0, 0, 0, 0]]) 23 | 24 | # initialize the model in a least intrusive way possible 25 | bo.suggest_next_locations() 26 | 27 | f = bo.acquisition.acquisition_function 28 | f_df = bo.acquisition.acquisition_function_withGradients 29 | self.problem_with_context = OptimizationWithContext(x0=x0, f=f, df=None, f_df=f_df, context_manager=context_manager) 30 | self.x = np.array([[3, -3, 3]]) 31 | 32 | 33 | def test_objective_mapping_objective(self): 34 | """ 35 | Test for the mapping through the context variables 36 | """ 37 | f_nc_x = np.array([-0.10986074]) 38 | self.assertTrue(np.isclose(self.problem_with_context.f_nc(self.x), f_nc_x).all()) 39 | 40 | 41 | def test_gradient_mapping_objective(self): 42 | """ 43 | Test for the gradient of the mapping through the context variables 44 | """ 45 | df_nc_x = np.array([[0, 0, 0]]) 46 | self.assertTrue(np.isclose(self.problem_with_context.df_nc(self.x), df_nc_x).all()) 47 | 48 | 49 | def test_objective_and_mapping_objective(self): 50 | """ 51 | Test for the mapping and the gradient through the context variables 52 | """ 53 | f_nc_x = np.array([-0.10986074]) 54 | df_nc_x = np.array([[0, 0, 0]]) 55 | tested_mapping, tested_gradient = self.problem_with_context.f_df_nc(self.x) 56 | print(tested_mapping) 57 | print(tested_gradient) 58 | 59 | self.assertTrue(np.isclose(tested_mapping, f_nc_x).all()) 60 | self.assertTrue(np.isclose(tested_gradient, df_nc_x).all()) 61 | -------------------------------------------------------------------------------- /GPyOpt/interface/config_parser.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2016, the GPyOpt Authors 2 | # Licensed under the BSD 3-clause license (see LICENSE.txt) 3 | 4 | import sys 5 | import optparse 6 | import os 7 | import numpy as np 8 | import json 9 | 10 | default_config = { 11 | "language" : "PYTHON", 12 | "experiment-name" : "no-named-experiment", 13 | "likelihood" : "NOISELESS", 14 | 'initialization': { 15 | 'type':'random', 16 | 'num-eval':5, 17 | }, 18 | 19 | "model": { 20 | "type" : "GP", 21 | "num_inducing": 10, 22 | }, 23 | 24 | "constraints": [], 25 | 26 | "resources": { 27 | "maximum-iterations" : 20, 28 | "max-run-time": 'NA', #minutes 29 | "cores": 1, 30 | "tolerance": 1e-8, 31 | }, 32 | 33 | "acquisition": { 34 | "type" : 'EI', 35 | "jitter" : 0.01, 36 | "optimizer" : { 37 | "name": "lbfgs" 38 | }, 39 | "evaluator" : { 40 | "type" : "sequential" 41 | } 42 | }, 43 | 44 | "output":{ 45 | "verbosity": False, 46 | "file-report": { 47 | 'type': 'report', 48 | 'filename': None, 49 | 'interval': -1, 50 | }, 51 | "Ybest": { 52 | 'type': 'logger', 53 | 'content': 'ybest', 54 | 'format': 'csv', 55 | 'filename': None, 56 | 'interval': 1, 57 | }, 58 | }, 59 | } 60 | 61 | 62 | def update_config(config_new, config_default): 63 | 64 | ''' 65 | Updates the loaded method configuration with default values. 66 | ''' 67 | if any([isinstance(v, dict) for v in list(config_new.values())]): 68 | for k,v in list(config_new.items()): 69 | if isinstance(v,dict) and k in config_default: 70 | update_config(config_new[k],config_default[k]) 71 | else: 72 | config_default[k] = v 73 | else: 74 | config_default.update(config_new) 75 | return config_default 76 | 77 | 78 | def parser(input_file_path='config.json'): 79 | ''' 80 | Parser for the .json file containing the configuration of the method. 81 | ''' 82 | 83 | # --- Read .json file 84 | try: 85 | with open(input_file_path, 'r') as config_file: 86 | config_new = json.load(config_file) 87 | config_file.close() 88 | except: 89 | raise Exception('Config file "'+input_file_path+'" not loaded properly. Please check it an try again.') 90 | 91 | import copy 92 | options = update_config(config_new, copy.deepcopy(default_config)) 93 | 94 | return options 95 | 96 | -------------------------------------------------------------------------------- /GPyOpt/testing/acquisitions_tests/test_lcb_mcmc_acquisition.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from mock import Mock 3 | 4 | import numpy as np 5 | 6 | from GPyOpt.acquisitions import AcquisitionLCB_MCMC 7 | from GPyOpt.core.task.space import Design_space 8 | 9 | class TestLCBmcmcAcquisition(unittest.TestCase): 10 | def setUp(self): 11 | self.mock_model = Mock() 12 | self.mock_optimizer = Mock() 13 | domain = [{'name': 'var_1', 'type': 'continuous', 'domain': (-5,5), 'dimensionality': 2}] 14 | self.space = Design_space(domain, None) 15 | 16 | self.lcb_mcmc_acquisition = AcquisitionLCB_MCMC(self.mock_model, self.space, self.mock_optimizer) 17 | 18 | def test_acquisition_function(self): 19 | self.mock_model.predict.return_value = ([1,2,3,4],[1,.5,0,2]) 20 | 21 | weighted_acquisition = self.lcb_mcmc_acquisition.acquisition_function(np.array([2,2])) 22 | 23 | self.assertTrue(np.array_equal(np.array([[0.75],[0.75]]), weighted_acquisition)) 24 | 25 | def test_acquisition_function_withGradients(self): 26 | self.mock_model.predict_withGradients.return_value = ([1.,2.,3.,4.],[3.,2.,3.,2.],[0.1,0.1,0.1,0.1],[0.2,0.2,0.2,0.2]) 27 | 28 | weighted_acquisition, weighted_gradient = self.lcb_mcmc_acquisition.acquisition_function_withGradients(np.array([2.,2.])) 29 | 30 | expected_acquisition = np.array([[-2.5],[-2.5]]) 31 | expected_gradient = np.array([[-0.3,-0.3],[-0.3,-0.3]]) 32 | 33 | self.assertTrue(np.array_equal(expected_acquisition, weighted_acquisition)) 34 | self.assertTrue(np.isclose(weighted_gradient,expected_gradient).all()) 35 | 36 | def test_optimize_with_analytical_gradient_prediction(self): 37 | expected_optimum_position = [[0,0]] 38 | self.mock_optimizer.optimize.return_value = expected_optimum_position 39 | self.mock_model.analytical_gradient_prediction = True 40 | self.lcb_mcmc_acquisition = AcquisitionLCB_MCMC(self.mock_model, self.space, self.mock_optimizer) 41 | 42 | optimum_position = self.lcb_mcmc_acquisition.optimize() 43 | 44 | self.assertEqual(expected_optimum_position, optimum_position) 45 | 46 | def test_optimize_without_analytical_gradient_prediction(self): 47 | expected_optimum_position = [[0,0]] 48 | self.mock_optimizer.optimize.return_value = expected_optimum_position 49 | self.mock_model.analytical_gradient_prediction = False 50 | self.lcb_mcmc_acquisition = AcquisitionLCB_MCMC(self.mock_model, self.space, self.mock_optimizer) 51 | 52 | optimum_position = self.lcb_mcmc_acquisition.optimize() 53 | 54 | self.assertEqual(expected_optimum_position, optimum_position) -------------------------------------------------------------------------------- /GPyOpt/testing/functional_tests/test_duplicate_manager.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import numpy as np 3 | 4 | from GPyOpt.core.task.space import Design_space 5 | from GPyOpt.experiment_design import initial_design 6 | from GPyOpt.util.duplicate_manager import DuplicateManager 7 | 8 | class TestDuplicateManager(unittest.TestCase): 9 | def test_duplicate(self): 10 | space = [ 11 | {'name': 'var_1', 'type': 'continuous', 'domain':(-3,1), 'dimensionality': 1}, 12 | {'name': 'var_2', 'type': 'discrete', 'domain': (0,1,2,3)}, 13 | {'name': 'var_3', 'type': 'categorical', 'domain': (0, 1)} 14 | ] 15 | design_space = Design_space(space) 16 | 17 | np.random.seed(666) 18 | 19 | number_points = 5 20 | 21 | zipped_X = initial_design("random",design_space,number_points) 22 | 23 | d = DuplicateManager(design_space, zipped_X) 24 | 25 | duplicate = np.atleast_2d(zipped_X[0,:].copy()) 26 | 27 | assert d.is_zipped_x_duplicate(duplicate) 28 | 29 | assert d.is_unzipped_x_duplicate(design_space.unzip_inputs(duplicate)) 30 | 31 | non_duplicate = np.array([[-2.5, 2., 0.]]) 32 | 33 | for x in zipped_X: 34 | assert not np.all(non_duplicate==x) 35 | 36 | assert not d.is_zipped_x_duplicate(non_duplicate) 37 | 38 | assert not d.is_unzipped_x_duplicate(design_space.unzip_inputs(non_duplicate)) 39 | 40 | def test_duplicate_with_ignored_and_pending(self): 41 | space = [ 42 | {'name': 'var_1', 'type': 'continuous', 'domain':(-3,1), 'dimensionality': 1}, 43 | {'name': 'var_2', 'type': 'discrete', 'domain': (0,1,2,3)}, 44 | {'name': 'var_3', 'type': 'categorical', 'domain': (0, 1)} 45 | ] 46 | design_space = Design_space(space) 47 | 48 | np.random.seed(666) 49 | 50 | number_points = 5 51 | 52 | zipped_X = initial_design("random",design_space,number_points) 53 | pending_zipped_X = initial_design("random", design_space, number_points) 54 | ignored_zipped_X = initial_design("random", design_space, number_points) 55 | 56 | d = DuplicateManager(design_space, zipped_X, pending_zipped_X, ignored_zipped_X) 57 | 58 | duplicate_in_pending_state = np.atleast_2d(pending_zipped_X[0,:].copy()) 59 | 60 | assert d.is_zipped_x_duplicate(duplicate_in_pending_state) 61 | 62 | assert d.is_unzipped_x_duplicate(design_space.unzip_inputs(duplicate_in_pending_state)) 63 | 64 | duplicate_in_ignored_state = np.atleast_2d(ignored_zipped_X[0,:].copy()) 65 | 66 | assert d.is_zipped_x_duplicate(duplicate_in_ignored_state) 67 | 68 | assert d.is_unzipped_x_duplicate(design_space.unzip_inputs(duplicate_in_ignored_state)) 69 | 70 | -------------------------------------------------------------------------------- /GPyOpt/core/evaluators/batch_thompson.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2016, the GPyOpt Authors 2 | # Licensed under the BSD 3-clause license (see LICENSE.txt) 3 | 4 | from .base import SamplingBasedBatchEvaluator 5 | from ...optimization.anchor_points_generator import ThompsonSamplingAnchorPointsGenerator 6 | from ...optimization.optimizer import OptLbfgs, apply_optimizer, choose_optimizer 7 | import numpy as np 8 | 9 | class ThompsonBatch(SamplingBasedBatchEvaluator): 10 | """ 11 | Class for a Thompson batch method. Elements are selected iteratively using the current acquistion function but exploring the models 12 | by using Thompson sampling 13 | 14 | :param acquisition: acquisition function to be used to compute the batch. 15 | :param batch size: the number of elements in the batch. 16 | 17 | """ 18 | def __init__(self, acquisition, batch_size): 19 | 20 | super(ThompsonBatch, self).__init__(acquisition, batch_size) 21 | self.model = self.acquisition.model 22 | self.optimizer_name = 'lbfgs' 23 | self.f = self.acquisition.acquisition_function 24 | self.f_df = self.acquisition.acquisition_function_withGradients 25 | self.space = self.acquisition.space 26 | 27 | def initialize_batch(self, duplicate_manager=None, context_manager=None): 28 | 29 | return None 30 | 31 | def get_anchor_points(self, duplicate_manager=None, context_manager=None): 32 | design_type, unique = "random", False 33 | if duplicate_manager: 34 | unique = True 35 | 36 | anchor_points_generator = ThompsonSamplingAnchorPointsGenerator(self.space, design_type, model=self.model) 37 | return anchor_points_generator.get(num_anchor=self.num_anchor, duplicate_manager=duplicate_manager, unique=unique, context_manager = self.context_manager) 38 | 39 | def optimize_anchor_point(self, a, duplicate_manager=None, context_manager=None): 40 | ### --- Update the bounds of the default optimizer according to the context_manager 41 | if context_manager: 42 | bounds = self.context_manager.noncontext_bounds 43 | else: 44 | bounds = self.space.get_bounds() 45 | 46 | self.local_optimizer = choose_optimizer(self.optimizer_name, bounds) 47 | 48 | ### --- Run the local optimizer 49 | x, _ = apply_optimizer(self.local_optimizer, a, f=self.f, df=None, f_df=self.f_df, duplicate_manager=duplicate_manager, context_manager = self.context_manager, space=self.space) 50 | return self.space.round_optimum(x) 51 | 52 | def compute_batch_without_duplicate_logic(self, context_manager=None): 53 | anchor_points = self.get_anchor_points(context_manager=context_manager) 54 | return np.vstack([self.optimize_anchor_point(a, context_manager=context_manager) for a, _ in zip(anchor_points, range(self.batch_size))]) 55 | -------------------------------------------------------------------------------- /GPyOpt/core/task/cost.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2016, the GPyOpt Authors 2 | # Licensed under the BSD 3-clause license (see LICENSE.txt) 3 | 4 | from ...models import GPModel 5 | import numpy as np 6 | 7 | 8 | class CostModel(object): 9 | """ 10 | Class to handle the cost of evaluating the function. 11 | 12 | param cost_withGradients: function that returns the cost of evaluating the function and its gradient. By default 13 | no cost is used. Options are: 14 | - cost_withGradients is some pre-defined cost function. Should return numpy array as outputs. 15 | - cost_withGradients = 'evaluation_time'. 16 | 17 | .. Note:: if cost_withGradients = 'evaluation time' the evaluation time of the function is used to model a GP whose 18 | mean is used as cost. 19 | 20 | """ 21 | 22 | def __init__(self, cost_withGradients): 23 | super(CostModel, self).__init__() 24 | 25 | self.cost_type = cost_withGradients 26 | 27 | # --- Set-up evaluation cost 28 | if self.cost_type is None: 29 | self.cost_withGradients = constant_cost_withGradients 30 | self.cost_type = 'Constant cost' 31 | 32 | elif self.cost_type == 'evaluation_time': 33 | self.cost_model = GPModel() 34 | self.cost_withGradients = self._cost_gp_withGradients 35 | self.num_updates = 0 36 | else: 37 | self.cost_withGradients = cost_withGradients 38 | self.cost_type = 'User defined cost' 39 | 40 | 41 | def _cost_gp(self,x): 42 | """ 43 | Predicts the time cost of evaluating the function at x. 44 | """ 45 | m, _, _, _ = self.cost_model.predict_withGradients(x) 46 | return np.exp(m) 47 | 48 | def _cost_gp_withGradients(self,x): 49 | """ 50 | Predicts the time cost and its gradient of evaluating the function at x. 51 | """ 52 | m, _, dmdx, _= self.cost_model.predict_withGradients(x) 53 | return np.exp(m), np.exp(m)*dmdx 54 | 55 | def update_cost_model(self, x, cost_x): 56 | """ 57 | Updates the GP used to handle the cost. 58 | 59 | param x: input of the GP for the cost model. 60 | param x_cost: values of the time cost at the input locations. 61 | """ 62 | 63 | if self.cost_type == 'evaluation_time': 64 | cost_evals = np.log(np.atleast_2d(np.asarray(cost_x)).T) 65 | 66 | if self.num_updates == 0: 67 | X_all = x 68 | costs_all = cost_evals 69 | else: 70 | X_all = np.vstack((self.cost_model.model.X,x)) 71 | costs_all = np.vstack((self.cost_model.model.Y,cost_evals)) 72 | 73 | self.num_updates += 1 74 | self.cost_model.updateModel(X_all, costs_all, None, None) 75 | 76 | def constant_cost_withGradients(x): 77 | """ 78 | Constant cost function used by default: cost = 1, d_cost = 0. 79 | """ 80 | return np.ones(x.shape[0])[:,None], np.zeros(x.shape) 81 | -------------------------------------------------------------------------------- /GPyOpt/experiment_design/grid_design.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from ..core.errors import InvalidConfigError 4 | 5 | from .base import ExperimentDesign 6 | from .random_design import RandomDesign 7 | 8 | class GridDesign(ExperimentDesign): 9 | """ 10 | Grid experiment design. 11 | Uses random design for non-continuous variables, and square grid for continuous ones 12 | """ 13 | 14 | def __init__(self, space): 15 | if space.has_constraints(): 16 | raise InvalidConfigError('Sampling with constraints is not allowed by grid design') 17 | super(GridDesign, self).__init__(space) 18 | 19 | def _adjust_init_points_count(self, init_points_count): 20 | # TODO: log this 21 | print('Note: in grid designs the total number of generated points is the smallest closest integer of n^d to the selected amount of points') 22 | continuous_dims = len(self.space.get_continuous_dims()) 23 | self.data_per_dimension = iroot(continuous_dims, init_points_count) 24 | return self.data_per_dimension**continuous_dims 25 | 26 | def get_samples(self, init_points_count): 27 | """ 28 | This method may return less points than requested. 29 | The total number of generated points is the smallest closest integer of n^d to the selected amount of points. 30 | """ 31 | 32 | init_points_count = self._adjust_init_points_count(init_points_count) 33 | samples = np.empty((init_points_count, self.space.dimensionality)) 34 | 35 | # Use random design to fill non-continuous variables 36 | random_design = RandomDesign(self.space) 37 | random_design.fill_noncontinous_variables(samples) 38 | 39 | if self.space.has_continuous(): 40 | X_design = multigrid(self.space.get_continuous_bounds(), self.data_per_dimension) 41 | samples[:,self.space.get_continuous_dims()] = X_design 42 | 43 | return samples 44 | 45 | # Computes integer root 46 | # The greatest integer whose k-th power is less than or equal to n 47 | # That is the greatest x such that x^k <= n 48 | def iroot(k, n): 49 | # Implements Newton Iroot algorithm 50 | # Details can be found here: https://www.akalin.com/computing-iroot 51 | # In a nutshell, it constructs a decreasing number series 52 | # that is guaranteed to terminate at the required integer root 53 | u, s = n, n+1 54 | while u < s: 55 | s = u 56 | t = (k-1) * s + n // pow(s, k-1) 57 | u = t // k 58 | return s 59 | 60 | def multigrid(bounds, points_count): 61 | """ 62 | Generates a multidimensional lattice 63 | :param bounds: box constraints 64 | :param points_count: number of points per dimension. 65 | """ 66 | if len(bounds)==1: 67 | return np.linspace(bounds[0][0], bounds[0][1], points_count).reshape(points_count, 1) 68 | x_grid_rows = np.meshgrid(*[np.linspace(b[0], b[1], points_count) for b in bounds]) 69 | x_grid_columns = np.vstack([x.flatten(order='F') for x in x_grid_rows]).T 70 | return x_grid_columns -------------------------------------------------------------------------------- /GPyOpt/core/evaluators/batch_local_penalization.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2016, the GPyOpt Authors 2 | # Licensed under the BSD 3-clause license (see LICENSE.txt) 3 | 4 | from .base import EvaluatorBase 5 | import scipy 6 | from ...util.general import samples_multidimensional_uniform 7 | import numpy as np 8 | 9 | class LocalPenalization(EvaluatorBase): 10 | """ 11 | Class for the batch method on 'Batch Bayesian optimization via local penalization' (Gonzalez et al., 2016). 12 | 13 | :param acquisition: acquisition function to be used to compute the batch. 14 | :param batch size: the number of elements in the batch. 15 | 16 | """ 17 | def __init__(self, acquisition, batch_size): 18 | super(LocalPenalization, self).__init__(acquisition, batch_size) 19 | self.acquisition = acquisition 20 | self.batch_size = batch_size 21 | 22 | def compute_batch(self, duplicate_manager=None, context_manager=None): 23 | """ 24 | Computes the elements of the batch sequentially by penalizing the acquisition. 25 | """ 26 | from ...acquisitions import AcquisitionLP 27 | assert isinstance(self.acquisition, AcquisitionLP) 28 | 29 | self.acquisition.update_batches(None,None,None) 30 | 31 | # --- GET first element in the batch 32 | X_batch = self.acquisition.optimize()[0] 33 | k=1 34 | 35 | if self.batch_size >1: 36 | # ---------- Approximate the constants of the the method 37 | L = estimate_L(self.acquisition.model.model,self.acquisition.space.get_bounds()) 38 | Min = self.acquisition.model.model.Y.min() 39 | 40 | # --- GET the remaining elements 41 | while k2: 60 | dmdx = dmdx.reshape(dmdx.shape[:2]) 61 | res = np.sqrt((dmdx*dmdx).sum(1)) # simply take the norm of the expectation of the gradient 62 | return -res 63 | 64 | samples = samples_multidimensional_uniform(bounds,500) 65 | samples = np.vstack([samples,model.X]) 66 | pred_samples = df(samples,model,0) 67 | x0 = samples[np.argmin(pred_samples)] 68 | res = scipy.optimize.minimize(df,x0, method='L-BFGS-B',bounds=bounds, args = (model,x0), options = {'maxiter': 200}) 69 | minusL = float(res.fun) 70 | L = -minusL 71 | if L<1e-7: L=10 ## to avoid problems in cases in which the model is flat. 72 | return L 73 | -------------------------------------------------------------------------------- /GPyOpt/testing/functional_tests/test_acquisitions_gradient.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2016, the GPyOpt Authors 2 | # Licensed under the BSD 3-clause license (see LICENSE.txt) 3 | 4 | import numpy as np 5 | 6 | import GPy 7 | from GPy.models.gradient_checker import GradientChecker 8 | 9 | import GPyOpt 10 | from GPyOpt.util.general import samples_multidimensional_uniform 11 | from GPyOpt.acquisitions import AcquisitionEI, AcquisitionMPI, AcquisitionLCB 12 | 13 | from mock import Mock 14 | import unittest 15 | 16 | 17 | class acquisition_for_test(): 18 | ''' 19 | Class to run the unit test for the gradients of the acquisitions 20 | ''' 21 | def __init__(self,gpyopt_acuq): 22 | self.gpyopt_acuq = gpyopt_acuq 23 | 24 | def acquisition_function(self,x): 25 | return self.gpyopt_acuq.acquisition_function_withGradients(x)[0] 26 | 27 | def d_acquisition_function(self,x): 28 | return self.gpyopt_acuq.acquisition_function_withGradients(x)[1] 29 | 30 | 31 | class TestAcquisitionsGradients(unittest.TestCase): 32 | ''' 33 | Unittest for the gradients of the available acquisition functions 34 | ''' 35 | 36 | def setUp(self): 37 | np.random.seed(10) 38 | self.tolerance = 0.05 # Tolerance for difference between true and approximated gradients 39 | objective = GPyOpt.objective_examples.experiments1d.forrester() 40 | self.feasible_region = GPyOpt.Design_space(space = [{'name': 'var_1', 'type': 'continuous', 'domain': objective.bounds[0]}]) 41 | n_inital_design = 10 42 | X = samples_multidimensional_uniform(objective.bounds,n_inital_design) 43 | Y = objective.f(X) 44 | self.X_test = samples_multidimensional_uniform(objective.bounds,n_inital_design) 45 | 46 | self.model = Mock() 47 | self.model.get_fmin.return_value = 0.0 48 | self.model.predict_withGradients.return_value = np.zeros(X.shape), np.zeros(Y.shape), np.zeros(X.shape), np.zeros(X.shape) 49 | 50 | def test_ChecKGrads_EI(self): 51 | acquisition_ei = acquisition_for_test(AcquisitionEI(self.model, self.feasible_region)) 52 | grad_ei = GradientChecker(acquisition_ei.acquisition_function, acquisition_ei.d_acquisition_function, self.X_test) 53 | self.assertTrue(grad_ei.checkgrad(tolerance=self.tolerance)) 54 | 55 | 56 | def test_ChecKGrads_MPI(self): 57 | acquisition_mpi = acquisition_for_test(AcquisitionMPI(self.model, self.feasible_region)) 58 | grad_mpi = GradientChecker(acquisition_mpi.acquisition_function, acquisition_mpi.d_acquisition_function, self.X_test) 59 | self.assertTrue(grad_mpi.checkgrad(tolerance=self.tolerance)) 60 | 61 | 62 | def test_ChecKGrads_LCB(self): 63 | acquisition_lcb = acquisition_for_test(AcquisitionLCB(self.model, self.feasible_region)) 64 | grad_lcb = GradientChecker(acquisition_lcb.acquisition_function, acquisition_lcb.d_acquisition_function, self.X_test) 65 | self.assertTrue(grad_lcb.checkgrad(tolerance=self.tolerance)) 66 | 67 | 68 | if __name__=='main': 69 | unittest.main() 70 | -------------------------------------------------------------------------------- /manual/GPyOpt_models.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# GPyOpt: available surrogate models and acquisitions\n", 8 | "\n", 9 | "### Written by Javier Gonzalez, University of Sheffield.\n", 10 | "\n", 11 | "## Reference Manual index\n", 12 | "\n", 13 | "*Last updated Friday, 11 March 2016.*" 14 | ] 15 | }, 16 | { 17 | "cell_type": "markdown", 18 | "metadata": {}, 19 | "source": [ 20 | "### 1. Supported models\n", 21 | "\n", 22 | "The surrogate models supported in GPyOpt are:\n", 23 | "\n", 24 | "* **Standard Gaussian Processes** with standard MLE over the model hyperparameters: select ``model_type = GP`` in the GPyOpt wrapper.\n", 25 | "\n", 26 | "\n", 27 | "* **Standard Gaussian Processes with MCMC** sampling over the model hyperparameters: select ``model_type = GP_MCMC`` in the GPyOpt wrapper.\n", 28 | "\n", 29 | "\n", 30 | "* **Sparse Gaussian processes**: select ``model_type = sparseGP`` in the GPyOpt wrapper. \n", 31 | "\n", 32 | "\n", 33 | "* **Random Forrest**: select ``model_type = RF``. To illustrate GPyOpt modularity, we have also wrapped the random forrest method implemetented in Scikit-learn." 34 | ] 35 | }, 36 | { 37 | "cell_type": "markdown", 38 | "metadata": {}, 39 | "source": [ 40 | "### 2. Supported acquisiitions\n", 41 | "\n", 42 | "The supported acquisition functions in GPyOpt are:\n", 43 | "\n", 44 | "* **Expected Improvement**: select ``acquisition_type = EI`` in the GPyOpt wrapper.\n", 45 | "\n", 46 | "\n", 47 | "* **Expected Improvement integrated over the model hyperparameters**: select ``acquisition_type = EI_MCMC`` in the GPyOpt wrapper. Only works if ``model_type`` is set to ``GP_MCMC``.\n", 48 | "\n", 49 | "\n", 50 | "* **Maximum Probability of Improvement**: select ``acquisition_type = MPI`` in the GPyOpt wrapper.\n", 51 | "\n", 52 | "\n", 53 | "* **Maximum Probability of Improvement integrated over the model hyperparameters**: select ``acquisition_type = MPI_MCMC`` in the GPyOpt wrapper. Only works if ``model_type`` is set to ``GP_MCMC``.\n", 54 | "\n", 55 | "\n", 56 | "* **GP-Lower confidence bound**: select ``acquisition_type = LCB`` in the GPyOpt wrapper.\n", 57 | "\n", 58 | "\n", 59 | "* **GP-Lower confidence bound integrated over the model hyperparameters**: select ``acquisition_type = LCB_MCMC`` in the GPyOpt wrapper. Only works if ``model_type`` is set to ``GP_MCMC``.\n", 60 | "\n", 61 | "\n" 62 | ] 63 | } 64 | ], 65 | "metadata": { 66 | "kernelspec": { 67 | "display_name": "Python 3", 68 | "language": "python", 69 | "name": "python3" 70 | }, 71 | "language_info": { 72 | "codemirror_mode": { 73 | "name": "ipython", 74 | "version": 3 75 | }, 76 | "file_extension": ".py", 77 | "mimetype": "text/x-python", 78 | "name": "python", 79 | "nbconvert_exporter": "python", 80 | "pygments_lexer": "ipython3", 81 | "version": "3.6.1" 82 | } 83 | }, 84 | "nbformat": 4, 85 | "nbformat_minor": 1 86 | } 87 | -------------------------------------------------------------------------------- /GPyOpt/acquisitions/base.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2016, the GPyOpt Authors 2 | # Licensed under the BSD 3-clause license (see LICENSE.txt) 3 | 4 | from ..core.task.cost import constant_cost_withGradients 5 | 6 | class AcquisitionBase(object): 7 | """ 8 | Base class for acquisition functions in Bayesian Optimization 9 | 10 | :param model: GPyOpt class of model 11 | :param space: GPyOpt class of domain 12 | :param optimizer: optimizer of the acquisition. Should be a GPyOpt optimizer 13 | 14 | """ 15 | 16 | analytical_gradient_prediction = False 17 | 18 | def __init__(self, model, space, optimizer, cost_withGradients=None): 19 | self.model = model 20 | self.space = space 21 | self.optimizer = optimizer 22 | self.analytical_gradient_acq = self.analytical_gradient_prediction and self.model.analytical_gradient_prediction # flag from the model to test if gradients are available 23 | 24 | if cost_withGradients is None: 25 | self.cost_withGradients = constant_cost_withGradients 26 | else: 27 | self.cost_withGradients = cost_withGradients 28 | 29 | @staticmethod 30 | def fromDict(model, space, optimizer, cost_withGradients, config): 31 | raise NotImplementedError() 32 | 33 | def acquisition_function(self,x): 34 | """ 35 | Takes an acquisition and weights it so the domain and cost are taken into account. 36 | """ 37 | f_acqu = self._compute_acq(x) 38 | cost_x, _ = self.cost_withGradients(x) 39 | x_z = x if self.space.model_dimensionality == self.space.objective_dimensionality else self.space.zip_inputs(x) 40 | return -(f_acqu*self.space.indicator_constraints(x_z))/cost_x 41 | 42 | 43 | def acquisition_function_withGradients(self, x): 44 | """ 45 | Takes an acquisition and it gradient and weights it so the domain and cost are taken into account. 46 | """ 47 | f_acqu,df_acqu = self._compute_acq_withGradients(x) 48 | cost_x, cost_grad_x = self.cost_withGradients(x) 49 | f_acq_cost = f_acqu/cost_x 50 | df_acq_cost = (df_acqu*cost_x - f_acqu*cost_grad_x)/(cost_x**2) 51 | x_z = x if self.space.model_dimensionality == self.space.objective_dimensionality else self.space.zip_inputs(x) 52 | return -f_acq_cost*self.space.indicator_constraints(x_z), -df_acq_cost*self.space.indicator_constraints(x_z) 53 | 54 | def optimize(self, duplicate_manager=None): 55 | """ 56 | Optimizes the acquisition function (uses a flag from the model to use gradients or not). 57 | """ 58 | if not self.analytical_gradient_acq: 59 | out = self.optimizer.optimize(f=self.acquisition_function, duplicate_manager=duplicate_manager) 60 | else: 61 | out = self.optimizer.optimize(f=self.acquisition_function, f_df=self.acquisition_function_withGradients, duplicate_manager=duplicate_manager) 62 | return out 63 | 64 | def _compute_acq(self,x): 65 | 66 | raise NotImplementedError('') 67 | 68 | def _compute_acq_withGradients(self, x): 69 | 70 | raise NotImplementedError('') 71 | -------------------------------------------------------------------------------- /GPyOpt/testing/core_tests/test_save.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import numpy as np 3 | from GPyOpt.methods import BayesianOptimization 4 | from GPyOpt.models import GPModel 5 | import GPy 6 | import tempfile 7 | import os 8 | 9 | 10 | class TestSaveModel(unittest.TestCase): 11 | """ 12 | Tests to check primarily that saving a BO model results in a file without errors. 13 | """ 14 | 15 | def setUp(self): 16 | self.f_1d = lambda x: (6*x-2)**2*np.sin(12*x-4) 17 | self.f_2d = lambda x: (6*x[:,0]-2)**2*np.sin(12*x[:,1]-4) 18 | self.domain_1d = [{'name': 'var_1', 'type': 'continuous', 'domain': (0,1), 'dimensionality': 1}] 19 | self.domain_2d = [{'name': 'var_1', 'type': 'continuous', 'domain': (0,1)}, {'name': 'var_2', 'type': 'continuous', 'domain': (0,1)}] 20 | 21 | self.outfile_path = tempfile.mkstemp()[1] 22 | # Need to delete the file afterwards - no matter what 23 | self.addCleanup(os.remove, self.outfile_path) 24 | 25 | def check_output_model_file(self, contained_strings): 26 | """ Rudimentary test to check the file contents contain something sensible - can add to this but don't want to make it too implementation specific """ 27 | contents = open(self.outfile_path).read() 28 | for substring in contained_strings: 29 | self.assertTrue(substring in contents) 30 | 31 | def test_save_gp_default_no_iters(self): 32 | myBopt = BayesianOptimization(f=self.f_2d, domain=self.domain_2d) 33 | # Exception should be raised as no iterations have been carried out yet 34 | self.assertRaises(ValueError, lambda: myBopt.save_models(self.outfile_path)) 35 | 36 | def test_save_gp_no_filename(self): 37 | myBopt = BayesianOptimization(f=self.f_2d, domain=self.domain_2d) 38 | myBopt.run_optimization(max_iter=1, verbosity=False) 39 | # Need to at least pass in filename or buffer 40 | self.assertRaises(TypeError, lambda: myBopt.save_models()) 41 | 42 | def test_save_gp_default(self): 43 | myBopt = BayesianOptimization(f=self.f_2d, domain=self.domain_2d) 44 | myBopt.run_optimization(max_iter=1, verbosity=False) 45 | myBopt.save_models(self.outfile_path) 46 | self.check_output_model_file(['Iteration']) 47 | 48 | def test_save_gp_2d(self): 49 | k = GPy.kern.Matern52(input_dim=2) 50 | m = GPModel(kernel=k) 51 | myBopt = BayesianOptimization(f=self.f_2d, domain=self.domain_2d, model=m) 52 | myBopt.run_optimization(max_iter=1, verbosity=False) 53 | myBopt.save_models(self.outfile_path) 54 | self.check_output_model_file(['Iteration']) 55 | 56 | def test_save_gp_2d_ard(self): 57 | """ 58 | This was previously an edge-case, when some parameters were vectors, the naming of the columns was incorrect 59 | """ 60 | k = GPy.kern.Matern52(input_dim=2, ARD=True) 61 | m = GPModel(kernel=k) 62 | myBopt = BayesianOptimization(f=self.f_2d, domain=self.domain_2d, model=m) 63 | myBopt.run_optimization(max_iter=1, verbosity=False) 64 | myBopt.save_models(self.outfile_path) 65 | self.check_output_model_file(['Iteration']) 66 | -------------------------------------------------------------------------------- /GPyOpt/testing/acquisitions_tests/test_ei_mcmc_acquisition.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from mock import Mock 3 | 4 | import numpy as np 5 | 6 | from GPyOpt.acquisitions import AcquisitionEI_MCMC 7 | from GPyOpt.core.task.space import Design_space 8 | 9 | class TestEImcmcAcquisition(unittest.TestCase): 10 | def setUp(self): 11 | self.mock_model = Mock() 12 | self.mock_optimizer = Mock() 13 | domain = [{'name': 'var_1', 'type': 'continuous', 'domain': (-5,5), 'dimensionality': 2}] 14 | self.space = Design_space(domain, None) 15 | 16 | self.ei_mcmc_acquisition = AcquisitionEI_MCMC(self.mock_model, self.space, self.mock_optimizer) 17 | 18 | def test_acquisition_function(self): 19 | """Test that acquisition function returns correct weighted acquisition 20 | """ 21 | self.mock_model.predict.return_value = ([1,2,3,4], [3,3,3,3]) 22 | self.mock_model.get_fmin.return_value = ([0.1,0.2,0.3,0.4]) 23 | 24 | weighted_acquisition = self.ei_mcmc_acquisition.acquisition_function(np.array([2,2])) 25 | 26 | self.assertTrue(np.isclose(weighted_acquisition, np.array([[-0.44634968], [-0.44634968]])).all()) 27 | 28 | def test_acquisition_function_withGradients(self): 29 | """Test that acquisition function with gradients returns correct weight acquisition and gradient 30 | """ 31 | self.mock_model.predict_withGradients.return_value = ([1,2,3,4],[3,2,3,2],[0.1,0.1,0.1,0.1],[0.2,0.2,0.2,0.2]) 32 | self.mock_model.get_fmin.return_value = ([1,1,2,3]) 33 | 34 | weighted_acquisition, weighted_gradient = self.ei_mcmc_acquisition.acquisition_function_withGradients(np.array([2,2])) 35 | 36 | self.assertTrue(np.isclose(weighted_acquisition, np.array([[-0.69137376],[-0.69137376]])).all()) 37 | self.assertTrue(np.isclose(weighted_gradient, np.array([[-0.03690296, -0.03690296],[-0.03690296,-0.03690296]])).all()) 38 | 39 | def test_optimize_with_analytical_gradient_prediction(self): 40 | """Test that acquisition function optimize method returns expected optimum with analytical gradient prediction 41 | """ 42 | expected_optimum_position = [[0,0]] 43 | self.mock_optimizer.optimize.return_value = expected_optimum_position 44 | self.mock_model.analytical_gradient_prediction = True 45 | self.ei_mcmc_acquisition = AcquisitionEI_MCMC(self.mock_model, self.space, self.mock_optimizer) 46 | 47 | optimum_position = self.ei_mcmc_acquisition.optimize() 48 | 49 | self.assertEqual(optimum_position, expected_optimum_position) 50 | 51 | def test_optimize_without_analytical_gradient_prediction(self): 52 | """Test that acquisition function optimize method returns expected optimum without analytical gradient prediction 53 | """ 54 | expected_optimum_position = [[0,0]] 55 | self.mock_optimizer.optimize.return_value = expected_optimum_position 56 | self.mock_model.analytical_gradient_prediction = False 57 | self.ei_mcmc_acquisition = AcquisitionEI_MCMC(self.mock_model, self.space, self.mock_optimizer) 58 | 59 | optimum_position = self.ei_mcmc_acquisition.optimize() 60 | 61 | self.assertEqual(optimum_position, expected_optimum_position) 62 | -------------------------------------------------------------------------------- /GPyOpt/testing/functional_tests/base_test_case.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | 4 | import unittest 5 | from mock import patch 6 | 7 | from driver import run_eval, run_evaluation_in_steps 8 | from mocks import MockModel 9 | 10 | class BaseTestCase(unittest.TestCase): 11 | 12 | def __init__(self, *args, **kwargs): 13 | super(BaseTestCase, self).__init__(*args, **kwargs) 14 | 15 | # This file was used to generate the test files 16 | self.outpath = os.path.join(os.path.dirname(__file__), 'test_files') 17 | 18 | # Change this False to generate test files 19 | self.is_unittest = True 20 | 21 | # Allowed margin of error for test outputs 22 | self.precision = 1e-6 23 | 24 | def get_result_filename(self, test_name): 25 | return '{}_{}'.format(test_name, 'acquisition_gradient_testfile') 26 | 27 | def load_result_file(self, test_name): 28 | filename = self.get_result_filename(test_name) 29 | file_path = '{}/{}.txt'.format(self.outpath, filename) 30 | original_result = np.loadtxt(file_path) 31 | return original_result 32 | 33 | @patch('GPyOpt.methods.BayesianOptimization._model_chooser') 34 | def check_configs(self, mock_model_chooser, mock_gpy_model = None, mock_model = MockModel()): 35 | if mock_gpy_model is not None: 36 | mock_model.model = mock_gpy_model 37 | mock_model_chooser.return_value = mock_model 38 | 39 | for m_c in self.methods_configs: 40 | np.random.seed(1) 41 | 42 | if mock_gpy_model is not None: 43 | mock_model.model = mock_gpy_model 44 | mock_model_chooser.return_value = mock_model 45 | 46 | print('Testing acquisition ' + m_c['name']) 47 | name = self.get_result_filename(m_c['name']) 48 | unittest_result = run_eval(problem_config= self.problem_config, f_inits= self.f_inits, method_config=m_c, name=name, outpath=self.outpath, time_limit=None, unittest = self.is_unittest) 49 | original_result = self.load_result_file(m_c['name']) 50 | 51 | self.assertTrue((abs(original_result - unittest_result) < self.precision).all(), msg=m_c['name'] + ' failed') 52 | 53 | @patch('GPyOpt.methods.BayesianOptimization._model_chooser') 54 | def check_configs_in_steps(self, mock_model_chooser, mock_gpy_model=None, init_num_steps=None): 55 | for m_c in self.methods_configs: 56 | np.random.seed(1) 57 | mock_model = MockModel() 58 | if mock_gpy_model is not None: 59 | mock_model.model = mock_gpy_model 60 | mock_model_chooser.return_value = mock_model 61 | 62 | print('Testing acquisition ' + m_c['name'] + ' in steps') 63 | original_result = self.load_result_file(m_c['name']) 64 | 65 | if init_num_steps is None: 66 | num_steps = original_result.shape[0] - self.f_inits.shape[0] 67 | else: 68 | num_steps = init_num_steps 69 | 70 | unittest_result = run_evaluation_in_steps(problem_config= self.problem_config, f_inits= self.f_inits, method_config=m_c, num_steps=num_steps) 71 | 72 | self.assertTrue((abs(original_result - unittest_result) < self.precision).all(), msg=m_c['name'] + ' failed step-by-step check') 73 | -------------------------------------------------------------------------------- /GPyOpt/testing/core_tests/test_cost.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import numpy as np 3 | 4 | from GPyOpt.core.task import CostModel 5 | from GPyOpt.core.task.cost import constant_cost_withGradients 6 | 7 | class TestCostModel(unittest.TestCase): 8 | def test_cost_gp(self): 9 | cost_model = CostModel(None) 10 | 11 | with self.assertRaises(AttributeError): 12 | time_cost = cost_model._cost_gp(np.array([[3,0],[4,1],[5,1]])) 13 | 14 | def test_cost_gp_evaluation_time(self): 15 | cost_model = CostModel('evaluation_time') 16 | cost_model.update_cost_model(np.array([[3,0],[4,1],[5,1]]),np.array([4,5,6])) 17 | time_cost = cost_model._cost_gp(np.array([[3,0],[4,1],[5,1]])) 18 | 19 | self.assertTrue(np.isclose(time_cost,np.array([[4.00000008],[ 5.00000038],[ 5.99999939]])).all()) 20 | 21 | def test_cost_gp_defined_cost(self): 22 | cost_model = CostModel(5) 23 | 24 | with self.assertRaises(AttributeError): 25 | time_cost = cost_model._cost_gp(np.array([[3,0],[4,1],[5,1]])) 26 | 27 | def test_cost_withGradients_constant_cost(self): 28 | cost_model = CostModel(None) 29 | 30 | cost, d_cost = cost_model.cost_withGradients(np.array([2,2])) 31 | 32 | self.assertTrue(np.isclose(cost,np.array([[1.0],[1.0]])).all()) 33 | self.assertTrue(np.isclose(d_cost,np.array([0.0, 0.0])).all()) 34 | 35 | def test_cost_withGradients_evaluation_time(self): 36 | cost_model = CostModel('evaluation_time') 37 | cost_model.update_cost_model(np.array([[3,0],[4,1],[5,1]]),np.array([4,5,6])) 38 | 39 | cost, d_cost = cost_model.cost_withGradients(np.array([2,2])) 40 | 41 | self.assertTrue(np.isclose(cost,np.array([3.52110177]))) 42 | self.assertTrue(np.isclose(d_cost,np.array([0.65617088, 0.08849139]),1e-04).all()) 43 | 44 | def test_cost_withGradients_user_defined_cost(self): 45 | def f(x): 46 | return x*x 47 | 48 | cost_model = CostModel(f) 49 | cost, d_cost = cost_model.cost_withGradients(np.array([2,2])) 50 | 51 | self.assertEqual(cost,4) 52 | self.assertEqual(d_cost,4) 53 | 54 | def test_update_cost_model(self): 55 | cost_model = CostModel('evaluation_time') 56 | 57 | x = np.array([[3,0],[4,1],[5,1]]) 58 | cost_model.update_cost_model(x,np.array([4,5,6])) 59 | 60 | self.assertTrue(cost_model.num_updates == 1) 61 | self.assertEqual(x.all(), cost_model.cost_model.model.X.all()) 62 | self.assertTrue(np.isclose(cost_model.cost_model.model.Y, np.array([[1.38629436], [1.60943791], [1.79175947]])).all()) 63 | 64 | def test_update_cost_model_repeat(self): 65 | cost_model = CostModel('evaluation_time') 66 | 67 | x1 = np.array([[3,0],[4,1],[5,1]]) 68 | cost_model.update_cost_model(x1,np.array([4,5,6])) 69 | x2 = np.array([[1,1],[3,2],[4,3]]) 70 | cost_model.update_cost_model(x2,np.array([4,2,1])) 71 | 72 | self.assertTrue(cost_model.num_updates == 2) 73 | self.assertEqual(np.concatenate((x1,x2)).all(), cost_model.cost_model.model.X.all()) 74 | self.assertTrue(np.isclose(cost_model.cost_model.model.Y, np.array([[1.38629436], [1.60943791], [1.79175947],[1.38629436],[0.69314718],[0.0]])).all()) 75 | -------------------------------------------------------------------------------- /GPyOpt/experiment_design/random_design.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from .base import ExperimentDesign 4 | from ..core.task.variables import BanditVariable, DiscreteVariable, CategoricalVariable 5 | 6 | 7 | class RandomDesign(ExperimentDesign): 8 | """ 9 | Random experiment design. 10 | Random values for all variables within the given bounds. 11 | """ 12 | def __init__(self, space): 13 | super(RandomDesign, self).__init__(space) 14 | 15 | def get_samples(self, init_points_count): 16 | if self.space.has_constraints(): 17 | return self.get_samples_with_constraints(init_points_count) 18 | else: 19 | return self.get_samples_without_constraints(init_points_count) 20 | 21 | def get_samples_with_constraints(self, init_points_count): 22 | """ 23 | Draw random samples and only save those that satisfy constraints 24 | Finish when required number of samples is generated 25 | """ 26 | samples = np.empty((0, self.space.dimensionality)) 27 | 28 | while samples.shape[0] < init_points_count: 29 | domain_samples = self.get_samples_without_constraints(init_points_count) 30 | valid_indices = (self.space.indicator_constraints(domain_samples) == 1).flatten() 31 | if sum(valid_indices) > 0: 32 | valid_samples = domain_samples[valid_indices,:] 33 | samples = np.vstack((samples,valid_samples)) 34 | 35 | return samples[0:init_points_count,:] 36 | 37 | def fill_noncontinous_variables(self, samples): 38 | """ 39 | Fill sample values to non-continuous variables in place 40 | """ 41 | init_points_count = samples.shape[0] 42 | 43 | for (idx, var) in enumerate(self.space.space_expanded): 44 | if isinstance(var, DiscreteVariable) or isinstance(var, CategoricalVariable) : 45 | sample_var = np.atleast_2d(np.random.choice(var.domain, init_points_count)) 46 | samples[:,idx] = sample_var.flatten() 47 | 48 | # sample in the case of bandit variables 49 | elif isinstance(var, BanditVariable): 50 | # Bandit variable is represented by a several adjacent columns in the samples array 51 | idx_samples = np.random.randint(var.domain.shape[0], size=init_points_count) 52 | bandit_idx = np.arange(idx, idx + var.domain.shape[1]) 53 | samples[:, bandit_idx] = var.domain[idx_samples,:] 54 | 55 | 56 | def get_samples_without_constraints(self, init_points_count): 57 | samples = np.empty((init_points_count, self.space.dimensionality)) 58 | 59 | self.fill_noncontinous_variables(samples) 60 | 61 | if self.space.has_continuous(): 62 | X_design = samples_multidimensional_uniform(self.space.get_continuous_bounds(), init_points_count) 63 | samples[:, self.space.get_continuous_dims()] = X_design 64 | 65 | return samples 66 | 67 | def samples_multidimensional_uniform(bounds, points_count): 68 | """ 69 | Generates a multidimensional grid uniformly distributed. 70 | :param bounds: tuple defining the box constraints. 71 | :points_count: number of data points to generate. 72 | """ 73 | dim = len(bounds) 74 | Z_rand = np.zeros(shape=(points_count, dim)) 75 | for k in range(0,dim): 76 | Z_rand[:,k] = np.random.uniform(low=bounds[k][0], high=bounds[k][1], size=points_count) 77 | return Z_rand -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # End of maintenance for GPyOpt 2 | 3 | Dear GPyOpt community! 4 | 5 | We would like to acknowledge the obvious. The core team of GPyOpt has moved on, and over the past months we weren't giving the package nearly as much attention as it deserves. Instead of dragging our feet and giving people only occasional replies and no new features, we feel the time has come to officially declare the end of GPyOpt maintenance and archive this repository. 6 | 7 | We would like to thank the community that has formed around GPyOpt. Without your interest, discussions, bug fixes and pull requests the package would never be as successful as it is. We hope we were able to provide you with a useful tool to aid your research and work. 8 | 9 | If you feel really enthusiastic and would like to take over the package, feel free to drop us an email, and who knows, maybe you'll be the one(s) carrying the GPyOpt to new heights! 10 | 11 | Sincerely yours, 12 | [Andrei Paleyes](https://paleyes.info/) and [Javier Gonzalez](https://javiergonzalezh.github.io/) 13 | 14 | 15 | # GPyOpt 16 | 17 | Gaussian process optimization using [GPy](http://sheffieldml.github.io/GPy/). Performs global optimization with different acquisition functions. Among other functionalities, it is possible to use GPyOpt to optimize physical experiments (sequentially or in batches) and tune the parameters of Machine Learning algorithms. It is able to handle large data sets via sparse Gaussian process models. 18 | 19 | * [GPyOpt homepage](http://sheffieldml.github.io/GPyOpt/) 20 | * [Tutorial Notebooks](http://nbviewer.ipython.org/github/SheffieldML/GPyOpt/blob/master/manual/index.ipynb) 21 | * [Online documentation](http://gpyopt.readthedocs.io/) 22 | 23 | [![licence](https://img.shields.io/badge/licence-BSD-blue.svg)](http://opensource.org/licenses/BSD-3-Clause) [![develstat](https://travis-ci.org/SheffieldML/GPyOpt.svg?branch=master)](https://travis-ci.org/SheffieldML/GPyOpt) [![covdevel](http://codecov.io/github/SheffieldML/GPyOpt/coverage.svg?branch=master)](http://codecov.io/github/SheffieldML/GPyOpt?branch=master) [![Research software impact](http://depsy.org/api/package/pypi/GPyOpt/badge.svg)](http://depsy.org/package/python/GPyOpt) 24 | 25 | ### Citation 26 | 27 | ``` 28 | @Misc{gpyopt2016, 29 | author = {The GPyOpt authors}, 30 | title = {{GPyOpt}: A Bayesian Optimization framework in python}, 31 | howpublished = {\url{http://github.com/SheffieldML/GPyOpt}}, 32 | year = {2016} 33 | } 34 | ``` 35 | 36 | ## Getting started 37 | 38 | ### Installing with pip 39 | 40 | The simplest way to install GPyOpt is using pip. ubuntu users can do: 41 | 42 | ```bash 43 | sudo apt-get install python-pip 44 | pip install gpyopt 45 | ``` 46 | 47 | If you'd like to install from source, or want to contribute to the project (e.g. by sending pull requests via github), read on. Clone the repository in GitHub and add it to your $PYTHONPATH. 48 | 49 | ```bash 50 | git clone https://github.com/SheffieldML/GPyOpt.git 51 | cd GPyOpt 52 | python setup.py develop 53 | ``` 54 | 55 | ## Dependencies: 56 | 57 | - GPy 58 | - paramz 59 | - numpy 60 | - scipy 61 | - matplotlib 62 | - DIRECT (optional) 63 | - cma (optional) 64 | - pyDOE (optional) 65 | - sobol_seq (optional) 66 | 67 | You can install dependencies by running: 68 | ``` 69 | pip install -r requirements.txt 70 | ``` 71 | 72 | 73 | ## Funding Acknowledgements 74 | 75 | * [BBSRC Project No BB/K011197/1](http://staffwww.dcs.shef.ac.uk/people/N.Lawrence/projects/recombinant/) "Linking recombinant gene sequence to protein product manufacturability using CHO cell genomic resources" 76 | 77 | * See GPy funding Acknowledgements 78 | -------------------------------------------------------------------------------- /GPyOpt/models/input_warped_gpmodel.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2016, the GPyOpt Authors 2 | # Licensed under the BSD 3-clause license (see LICENSE.txt) 3 | 4 | from .gpmodel import GPModel 5 | import numpy as np 6 | import GPy 7 | 8 | 9 | class InputWarpedGPModel(GPModel): 10 | """Bayesian Optimization with Input Warped GP using Kumar Warping 11 | 12 | The Kumar warping only applies to the numerical variables: continuous and discrete 13 | 14 | Parameters 15 | ---------- 16 | space : object 17 | Instance of Design_space defined in GPyOpt.core.task.space 18 | 19 | warping_function : object, optional 20 | Warping function defined in GPy.util.input_warping_functions.py. Default is Kumar warping 21 | 22 | kernel : object, optional 23 | An instance of kernel function defined in GPy.kern. Default is Matern 52 24 | 25 | noise_var : float, optional 26 | Value of the noise variance if known 27 | 28 | exact_feval : bool, optional 29 | Whether noiseless evaluations are available. 30 | IMPORTANT to make the optimization work well in noiseless scenarios, Default is False 31 | 32 | optimizer : string, optional 33 | Optimizer of the model. Check GPy for details. Default to bfgs 34 | 35 | max_iter : int, optional 36 | Maximum number of iterations used to optimize the parameters of the model. Default is 1000 37 | 38 | optimize_restarts : int, optional 39 | Number of restarts in the optimization. Default is 5 40 | 41 | verbose : bool, optional 42 | Whether to print out the model messages. Default is False 43 | """ 44 | 45 | analytical_gradient_prediction = False 46 | 47 | def __init__(self, space, warping_function=None, kernel=None, noise_var=None, exact_feval=False, optimizer='bfgs', 48 | max_iters=1000, optimize_restarts=5, verbose=False, ARD=False): 49 | self.space = space 50 | # set the warping indices 51 | self.warping_indices = [] 52 | i = 0 53 | for var in self.space.space: 54 | for _ in range(var.dimensionality): 55 | if var.type == 'continuous' or var.type == 'discrete': 56 | self.warping_indices.append(i) 57 | i += 1 58 | self.warping_function = warping_function 59 | 60 | self.kernel = kernel 61 | self.noise_var = noise_var 62 | self.exact_feval = exact_feval 63 | self.optimize_restarts = optimize_restarts 64 | self.optimizer = optimizer 65 | self.max_iters = max_iters 66 | self.verbose = verbose 67 | self.model = None 68 | self.ARD = ARD 69 | 70 | def _create_model(self, X, Y): 71 | # --- define kernel 72 | self.input_dim = X.shape[1] 73 | if self.kernel is None: 74 | self.kernel = GPy.kern.Matern52(self.input_dim, variance=1., ARD=self.ARD) #+ GPy.kern.Bias(self.input_dim) 75 | else: 76 | self.kernel = self.kernel 77 | 78 | # --- define model 79 | noise_var = Y.var()*0.01 if self.noise_var is None else self.noise_var 80 | 81 | self.model = GPy.models.InputWarpedGP(X, Y, kernel=self.kernel, warping_function=self.warping_function, 82 | warping_indices=self.warping_indices, Xmin=X.min(axis=0), Xmax=X.max(axis=0)) 83 | 84 | # --- restrict variance if exact evaluations of the objective 85 | if self.exact_feval: 86 | self.model.Gaussian_noise.constrain_fixed(1e-6, warning=False) 87 | else: 88 | self.model.Gaussian_noise.constrain_bounded(1e-9, 1e6, warning=False) -------------------------------------------------------------------------------- /manual/GPyOpt_initial_design.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# GPyOpt: Initial designs\n", 8 | "\n", 9 | "### Written by Javier Gonzalez, Amazon Reseach Cambridge\n", 10 | "\n", 11 | "\n", 12 | "*Last updated, July 2017.*" 13 | ] 14 | }, 15 | { 16 | "cell_type": "markdown", 17 | "metadata": {}, 18 | "source": [ 19 | "Before starting the optimization, it is important to initialize the model. This notebook quickly takes you over the available initial designs available in GPyOpt." 20 | ] 21 | }, 22 | { 23 | "cell_type": "code", 24 | "execution_count": null, 25 | "metadata": { 26 | "collapsed": false 27 | }, 28 | "outputs": [], 29 | "source": [ 30 | "%pylab inline \n", 31 | "import numpy as np\n", 32 | "import GPyOpt\n", 33 | "import GPy\n", 34 | "from GPyOpt.experiment_design import initial_design\n", 35 | "import matplotlib.pyplot as plt" 36 | ] 37 | }, 38 | { 39 | "cell_type": "code", 40 | "execution_count": null, 41 | "metadata": { 42 | "collapsed": false 43 | }, 44 | "outputs": [], 45 | "source": [ 46 | "func = GPyOpt.objective_examples.experimentsNd.alpine1(input_dim=2) \n", 47 | "\n", 48 | "mixed_domain =[{'name': 'var1_2', 'type': 'continuous', 'domain': (-10,10),'dimensionality': 1},\n", 49 | " {'name': 'var5', 'type': 'continuous', 'domain': (-1,5)}]\n", 50 | "\n", 51 | "space = GPyOpt.Design_space(mixed_domain)\n", 52 | "data_init = 500" 53 | ] 54 | }, 55 | { 56 | "cell_type": "code", 57 | "execution_count": null, 58 | "metadata": { 59 | "collapsed": false 60 | }, 61 | "outputs": [], 62 | "source": [ 63 | "### --- Grid design\n", 64 | "X = initial_design('grid',space,data_init)\n", 65 | "plt.plot(X[:,0],X[:,1],'b.')\n", 66 | "plt.title('Grid design')" 67 | ] 68 | }, 69 | { 70 | "cell_type": "code", 71 | "execution_count": null, 72 | "metadata": { 73 | "collapsed": false 74 | }, 75 | "outputs": [], 76 | "source": [ 77 | "### --- Random initial design\n", 78 | "X = initial_design('random',space,data_init)\n", 79 | "plt.plot(X[:,0],X[:,1],'b.')\n", 80 | "plt.title('Random design')" 81 | ] 82 | }, 83 | { 84 | "cell_type": "code", 85 | "execution_count": null, 86 | "metadata": { 87 | "collapsed": false 88 | }, 89 | "outputs": [], 90 | "source": [ 91 | "### --- Latin design\n", 92 | "X = initial_design('latin',space,data_init)\n", 93 | "plt.plot(X[:,0],X[:,1],'b.')\n", 94 | "plt.title('Latin design')" 95 | ] 96 | }, 97 | { 98 | "cell_type": "code", 99 | "execution_count": null, 100 | "metadata": { 101 | "collapsed": false 102 | }, 103 | "outputs": [], 104 | "source": [ 105 | "### --- Sobol design\n", 106 | "X = initial_design('sobol',space,data_init)\n", 107 | "plt.plot(X[:,0],X[:,1],'b.')\n", 108 | "plt.title('Sobol design')" 109 | ] 110 | } 111 | ], 112 | "metadata": { 113 | "anaconda-cloud": {}, 114 | "kernelspec": { 115 | "display_name": "Python 3", 116 | "language": "python", 117 | "name": "python3" 118 | }, 119 | "language_info": { 120 | "codemirror_mode": { 121 | "name": "ipython", 122 | "version": 3 123 | }, 124 | "file_extension": ".py", 125 | "mimetype": "text/x-python", 126 | "name": "python", 127 | "nbconvert_exporter": "python", 128 | "pygments_lexer": "ipython3", 129 | "version": "3.5.2" 130 | } 131 | }, 132 | "nbformat": 4, 133 | "nbformat_minor": 1 134 | } 135 | -------------------------------------------------------------------------------- /GPyOpt/testing/functional_tests/test_input_warped_gp.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2016, the GPyOpt Authors 2 | # Licensed under the BSD 3-clause license (see LICENSE.txt) 3 | 4 | import numpy as np 5 | import unittest 6 | 7 | import GPyOpt 8 | from GPyOpt.util.general import samples_multidimensional_uniform 9 | 10 | from base_test_case import BaseTestCase 11 | 12 | 13 | class TestInputWarpedGP(BaseTestCase): 14 | ''' 15 | Unittest for the InputWarpedGP functions 16 | ''' 17 | 18 | def setUp(self): 19 | 20 | ## 21 | # -- methods configuration 22 | ## 23 | model_type = 'input_warped_GP' 24 | initial_design_numdata = None 25 | initial_design_type = 'random' 26 | acquisition_type = 'EI' 27 | normalize_Y = True 28 | exact_feval = True 29 | acquisition_optimizer_type = 'lbfgs' 30 | model_update_interval = 1 31 | evaluator_type = 'sequential' 32 | batch_size = 1 33 | num_cores = 1 34 | verbosity = False 35 | 36 | # stop conditions 37 | max_iter = 15 38 | max_time = 999 39 | eps = 1e-8 40 | 41 | 42 | self.methods_configs = [ 43 | { 'name': 'input_warped_GP', 44 | 'model_type' : model_type, 45 | 'initial_design_numdata' : initial_design_numdata, 46 | 'initial_design_type' : initial_design_type, 47 | 'acquisition_type' : acquisition_type, 48 | 'normalize_Y' : normalize_Y, 49 | 'exact_feval' : exact_feval, 50 | 'acquisition_optimizer_type' : acquisition_optimizer_type, 51 | 'model_update_interval' : model_update_interval, 52 | 'verbosity' : verbosity, 53 | 'evaluator_type' : evaluator_type, 54 | 'batch_size' : batch_size, 55 | 'num_cores' : num_cores, 56 | 'max_iter' : max_iter, 57 | 'max_time' : max_time, 58 | 'eps' : eps 59 | } 60 | ] 61 | 62 | # -- Problem setup 63 | np.random.seed(1) 64 | n_inital_design = 5 65 | input_dim = 5 66 | 67 | self.problem_config = { 68 | 'objective': GPyOpt.objective_examples.experimentsNd.alpine1(input_dim = input_dim).f, 69 | 'domain': [{'name': 'var1_2', 'type': 'continuous', 'domain': (-10,10),'dimensionality': 2}, 70 | {'name': 'var3', 'type': 'continuous', 'domain': (-8,3)}, 71 | {'name': 'var4', 'type': 'discrete', 'domain': (-2,0,2)}, 72 | {'name': 'var5', 'type': 'discrete', 'domain': (-1,5)}], 73 | 'constraints': None, 74 | 'cost_withGradients': None} 75 | 76 | 77 | feasible_region = GPyOpt.Design_space(space = self.problem_config['domain'], constraints = self.problem_config['constraints']) 78 | self.f_inits = GPyOpt.experiment_design.initial_design('random', feasible_region, 5) 79 | self.f_inits = self.f_inits.reshape(n_inital_design, input_dim) 80 | 81 | def test_run(self): 82 | self.check_configs() 83 | 84 | 85 | if __name__ == '__main__': 86 | unittest.main() -------------------------------------------------------------------------------- /GPyOpt/testing/functional_tests/test_constraints.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2016, the GPyOpt Authors 2 | # Licensed under the BSD 3-clause license (see LICENSE.txt) 3 | 4 | import numpy as np 5 | 6 | import GPyOpt 7 | from GPyOpt.util.general import samples_multidimensional_uniform 8 | 9 | from base_test_case import BaseTestCase 10 | 11 | 12 | class TestAcquisitions(BaseTestCase): 13 | ''' 14 | Unittest for the constraints 15 | ''' 16 | 17 | def setUp(self): 18 | ## 19 | # -- methods configuration 20 | ## 21 | 22 | model_type = 'GP' 23 | initial_design_numdata = None 24 | initial_design_type = 'random' 25 | acquisition_type = 'EI' 26 | normalize_Y = True 27 | exact_feval = True 28 | acquisition_optimizer_type = 'lbfgs' 29 | model_update_interval = 1 30 | evaluator_type = 'sequential' 31 | batch_size = 1 32 | num_cores = 1 33 | verbosity = False 34 | 35 | # stop conditions 36 | max_iter = 15 37 | max_time = 999 38 | eps = 1e-8 39 | 40 | 41 | self.methods_configs = [ 42 | { 'name' : 'constraints', 43 | 'model_type' : model_type, 44 | 'initial_design_numdata' : initial_design_numdata, 45 | 'initial_design_type' : initial_design_type, 46 | 'acquisition_type' : acquisition_type, 47 | 'normalize_Y' : normalize_Y, 48 | 'exact_feval' : exact_feval, 49 | 'acquisition_optimizer_type' : acquisition_optimizer_type, 50 | 'model_update_interval' : model_update_interval, 51 | 'verbosity' : verbosity, 52 | 'evaluator_type' : evaluator_type, 53 | 'batch_size' : batch_size, 54 | 'num_cores' : num_cores, 55 | 'max_iter' : max_iter, 56 | 'max_time' : max_time, 57 | 'eps' : eps 58 | } 59 | ] 60 | 61 | # -- Problem setup 62 | np.random.seed(1) 63 | n_inital_design = 5 64 | input_dim = 2 65 | 66 | self.problem_config = { 67 | 'objective': GPyOpt.objective_examples.experiments2d.sixhumpcamel().f, 68 | 'domain': [{'name': 'var_1', 'type': 'continuous', 'domain': (-1,1)}, 69 | {'name': 'var_2', 'type': 'continuous', 'domain': (-1.5,1.5)}], 70 | 'constraints': [{'name': 'constr_1', 'constraint': '-x[:,1] -.5 + abs(x[:,0]) - np.sqrt(1-x[:,0]**2)'}, 71 | {'name': 'constr_2', 'constraint': 'x[:,1] +.5 - abs(x[:,0]) - np.sqrt(1-x[:,0]**2)'}], 72 | 'cost_withGradients': None} 73 | 74 | 75 | feasible_region = GPyOpt.Design_space(space = self.problem_config['domain'], constraints = self.problem_config['constraints']) 76 | self.f_inits = GPyOpt.experiment_design.initial_design('random', feasible_region, 5) 77 | self.f_inits = self.f_inits.reshape(n_inital_design, input_dim) 78 | 79 | def test_run(self): 80 | self.check_configs() 81 | 82 | def test_run_in_steps(self): 83 | self.check_configs_in_steps() 84 | 85 | 86 | if __name__=='main': 87 | unittest.main() 88 | -------------------------------------------------------------------------------- /GPyOpt/testing/functional_tests/test_mixed_domain.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2016, the GPyOpt Authors 2 | # Licensed under the BSD 3-clause license (see LICENSE.txt) 3 | 4 | import numpy as np 5 | 6 | import GPyOpt 7 | from GPyOpt.util.general import samples_multidimensional_uniform 8 | 9 | from base_test_case import BaseTestCase 10 | 11 | 12 | class TestAcquisitions(BaseTestCase): 13 | ''' 14 | Unittest for the available acquisition functions 15 | ''' 16 | 17 | def setUp(self): 18 | 19 | ## 20 | # -- methods configuration 21 | ## 22 | 23 | model_type = 'GP' 24 | initial_design_numdata = None 25 | initial_design_type = 'random' 26 | acquisition_type = 'EI' 27 | normalize_Y = True 28 | exact_feval = True 29 | acquisition_optimizer_type = 'lbfgs' 30 | model_update_interval = 1 31 | evaluator_type = 'sequential' 32 | batch_size = 1 33 | num_cores = 1 34 | verbosity = False 35 | 36 | # stop conditions 37 | max_iter = 15 38 | max_time = 999 39 | eps = 1e-8 40 | 41 | 42 | self.methods_configs = [ 43 | { 'name': 'mixed_domain', 44 | 'model_type' : model_type, 45 | 'initial_design_numdata' : initial_design_numdata, 46 | 'initial_design_type' : initial_design_type, 47 | 'acquisition_type' : acquisition_type, 48 | 'normalize_Y' : normalize_Y, 49 | 'exact_feval' : exact_feval, 50 | 'acquisition_optimizer_type' : acquisition_optimizer_type, 51 | 'model_update_interval' : model_update_interval, 52 | 'verbosity' : verbosity, 53 | 'evaluator_type' : evaluator_type, 54 | 'batch_size' : batch_size, 55 | 'num_cores' : num_cores, 56 | 'max_iter' : max_iter, 57 | 'max_time' : max_time, 58 | 'eps' : eps 59 | } 60 | ] 61 | 62 | # -- Problem setup 63 | np.random.seed(1) 64 | n_inital_design = 5 65 | input_dim = 5 66 | 67 | self.problem_config = { 68 | 'objective': GPyOpt.objective_examples.experimentsNd.alpine1(input_dim = input_dim).f, 69 | 'domain': [{'name': 'var1_2', 'type': 'continuous', 'domain': (-10,10),'dimensionality': 2}, 70 | {'name': 'var3', 'type': 'continuous', 'domain': (-8,3)}, 71 | {'name': 'var4', 'type': 'discrete', 'domain': (-2,0,2)}, 72 | {'name': 'var5', 'type': 'discrete', 'domain': (-1,5)}], 73 | 'constraints': None, 74 | 'cost_withGradients': None} 75 | 76 | 77 | feasible_region = GPyOpt.Design_space(space = self.problem_config['domain'], constraints = self.problem_config['constraints']) 78 | self.f_inits = GPyOpt.experiment_design.initial_design('random', feasible_region, 5) 79 | self.f_inits = self.f_inits.reshape(n_inital_design, input_dim) 80 | 81 | def test_run(self): 82 | self.check_configs() 83 | 84 | def test_run_in_steps(self): 85 | self.check_configs_in_steps() 86 | 87 | 88 | if __name__=='main': 89 | unittest.main() 90 | -------------------------------------------------------------------------------- /GPyOpt/models/rfmodel.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2016, the GPyOpt Authors 2 | # Licensed under the BSD 3-clause license (see LICENSE.txt) 3 | 4 | from .base import BOModel 5 | import numpy as np 6 | 7 | 8 | class RFModel(BOModel): 9 | """ 10 | General class for handling a Random Forest in GPyOpt. 11 | 12 | .. Note:: The model has beed wrapper 'as it is' from Scikit-learn. Check 13 | http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestRegressor.html 14 | for further details. 15 | """ 16 | 17 | analytical_gradient_prediction = False 18 | 19 | def __init__(self, bootstrap=True, criterion='mse', max_depth=None, 20 | max_features='auto', max_leaf_nodes=None, min_samples_leaf=1, 21 | min_samples_split=2, min_weight_fraction_leaf=0.0, 22 | n_estimators=500, n_jobs=1, oob_score=False, random_state=None, 23 | verbose=0, warm_start=False): 24 | 25 | self.bootstrap = bootstrap 26 | self.criterion = criterion 27 | self.max_depth = max_depth 28 | self.max_features = max_features 29 | self.max_leaf_nodes = max_leaf_nodes 30 | self.min_samples_leaf = min_samples_leaf 31 | self.min_samples_split = min_samples_split 32 | self.min_weight_fraction_leaf = min_weight_fraction_leaf 33 | self.n_estimators = n_estimators 34 | self.n_jobs = n_jobs 35 | self.oob_score = oob_score 36 | self.random_state = random_state 37 | self.verbose = verbose 38 | self.warm_start = warm_start 39 | 40 | self.model = None 41 | 42 | def _create_model(self, X, Y): 43 | """ 44 | Creates the model given some input data X and Y. 45 | """ 46 | from sklearn.ensemble import RandomForestRegressor 47 | self.X = X 48 | self.Y = Y 49 | self.model = RandomForestRegressor(bootstrap = self.bootstrap, 50 | criterion = self.criterion, 51 | max_depth = self.max_depth, 52 | max_features = self.max_features, 53 | max_leaf_nodes = self.max_leaf_nodes, 54 | min_samples_leaf = self.min_samples_leaf, 55 | min_samples_split = self.min_samples_split, 56 | min_weight_fraction_leaf = self.min_weight_fraction_leaf, 57 | n_estimators = self.n_estimators, 58 | n_jobs = self.n_jobs, 59 | oob_score = self.oob_score, 60 | random_state = self.random_state, 61 | verbose = self.verbose, 62 | warm_start = self.warm_start) 63 | 64 | #self.model = RandomForestRegressor() 65 | self.model.fit(X,Y.flatten()) 66 | 67 | 68 | def updateModel(self, X_all, Y_all, X_new, Y_new): 69 | """ 70 | Updates the model with new observations. 71 | """ 72 | self.X = X_all 73 | self.Y = Y_all 74 | if self.model is None: 75 | self._create_model(X_all, Y_all) 76 | else: 77 | self.model.fit(X_all, Y_all.flatten()) 78 | 79 | def predict(self, X): 80 | """ 81 | Predictions with the model. Returns posterior means and standard deviations at X. 82 | """ 83 | X = np.atleast_2d(X) 84 | m = np.empty(shape=(0,1)) 85 | s = np.empty(shape=(0,1)) 86 | 87 | for k in range(X.shape[0]): 88 | preds = [] 89 | for pred in self.model.estimators_: 90 | preds.append(pred.predict(X[k,:])[0]) 91 | m = np.vstack((m ,np.array(preds).mean())) 92 | s = np.vstack((s ,np.array(preds).std())) 93 | return m, s 94 | 95 | def get_fmin(self): 96 | return self.model.predict(self.X).min() 97 | -------------------------------------------------------------------------------- /manual/GPyOpt_creating_new_models.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Creating new surrogate models for GPyOpt\n", 8 | "\n", 9 | "### Written by Javier Gonzalez and Andrei Paleyes.\n" 10 | ] 11 | }, 12 | { 13 | "cell_type": "markdown", 14 | "metadata": {}, 15 | "source": [ 16 | "You can create and use your own surrogate models functions in GPyOpt. To do it just complete the following template." 17 | ] 18 | }, 19 | { 20 | "cell_type": "code", 21 | "execution_count": null, 22 | "metadata": {}, 23 | "outputs": [], 24 | "source": [ 25 | "# Copyright (c) 2016, the GPyOpt Authors\n", 26 | "# Licensed under the BSD 3-clause license (see LICENSE.txt)\n", 27 | "\n", 28 | "from GPyOpt.models import BOModel\n", 29 | "import numpy as np\n", 30 | "\n", 31 | "class NewModel(BOModel):\n", 32 | " \n", 33 | " \"\"\"\n", 34 | " General template to create a new GPyOPt surrogate model\n", 35 | "\n", 36 | " :param normalize Y: wheter the outputs are normalized (default, false)\n", 37 | "\n", 38 | " \"\"\"\n", 39 | "\n", 40 | " # SET THIS LINE TO True of False DEPENDING IN THE ANALYTICAL GRADIENTS OF THE PREDICTIONS ARE AVAILABLE OR NOT\n", 41 | " analytical_gradient_prediction = False\n", 42 | "\n", 43 | " def __init__(self, normalize_Y=True, **kwargs ):\n", 44 | "\n", 45 | " # ---\n", 46 | " # ADD TO self... THE REST OF THE PARAMETERS OF YOUR MODEL\n", 47 | " # ---\n", 48 | " \n", 49 | " self.normalize_Y = normalize_Y\n", 50 | " self.model = None\n", 51 | "\n", 52 | " def _create_model(self, X, Y):\n", 53 | " \"\"\"\n", 54 | " Creates the model given some input data X and Y.\n", 55 | " \"\"\"\n", 56 | " self.X = X\n", 57 | " self.Y = Y\n", 58 | " \n", 59 | " # ---\n", 60 | " # ADD TO self.model THE MODEL CREATED USING X AND Y.\n", 61 | " # ---\n", 62 | " \n", 63 | " \n", 64 | " def updateModel(self, X_all, Y_all, X_new, Y_new):\n", 65 | " \"\"\"\n", 66 | " Updates the model with new observations.\n", 67 | " \"\"\"\n", 68 | " self.X = X_all\n", 69 | " self.Y = Y_all\n", 70 | " \n", 71 | " if self.normalize_Y:\n", 72 | " Y_all = (Y_all - Y_all.mean())/(Y_all.std())\n", 73 | " \n", 74 | " if self.model is None: \n", 75 | " self._create_model(X_all, Y_all)\n", 76 | " else:\n", 77 | " pass\n", 78 | " # ---\n", 79 | " # AUGMENT THE MODEL HERE AND REUPDATE THE HIPER-PARAMETERS\n", 80 | " # ---\n", 81 | " \n", 82 | " def predict(self, X):\n", 83 | " \"\"\"\n", 84 | " Preditions with the model. Returns posterior means m and standard deviations s at X. \n", 85 | " \"\"\"\n", 86 | "\n", 87 | " # ---\n", 88 | " # IMPLEMENT THE MODEL PREDICTIONS HERE (outputs are numpy arrays with a point per row)\n", 89 | " # ---\n", 90 | " \n", 91 | " return m, s\n", 92 | " \n", 93 | " def get_fmin(self):\n", 94 | " return self.model.predict(self.X).min()" 95 | ] 96 | } 97 | ], 98 | "metadata": { 99 | "kernelspec": { 100 | "display_name": "Python 3", 101 | "language": "python", 102 | "name": "python3" 103 | }, 104 | "language_info": { 105 | "codemirror_mode": { 106 | "name": "ipython", 107 | "version": 3 108 | }, 109 | "file_extension": ".py", 110 | "mimetype": "text/x-python", 111 | "name": "python", 112 | "nbconvert_exporter": "python", 113 | "pygments_lexer": "ipython3", 114 | "version": "3.6.5" 115 | } 116 | }, 117 | "nbformat": 4, 118 | "nbformat_minor": 1 119 | } 120 | -------------------------------------------------------------------------------- /GPyOpt/core/evaluators/base.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2016, the GPyOpt Authors 2 | # Licensed under the BSD 3-clause license (see LICENSE.txt) 3 | 4 | import numpy as np 5 | 6 | 7 | class EvaluatorBase(object): 8 | """ 9 | Base class for the evaluator of the function. This class handles both sequential and batch evaluators. 10 | 11 | """ 12 | 13 | def __init__(self, acquisition, batch_size, **kwargs): 14 | self.acquisition = acquisition 15 | self.batch_size = batch_size 16 | 17 | def compute_batch(self, duplicate_manager=None, context_manager=None): 18 | raise NotImplementedError("Need to implement compute_batch.") 19 | 20 | 21 | class SamplingBasedBatchEvaluator(EvaluatorBase): 22 | """ 23 | This class handles specific types of batch evaluators, based on the sampling of anchor points (examples are random and Thompson sampling). 24 | 25 | """ 26 | 27 | def __init__(self, acquisition, batch_size, **kwargs): 28 | self.acquisition = acquisition 29 | self.batch_size = batch_size 30 | self.space = acquisition.space 31 | # The following number of anchor points is heuristically picked, to obtain good and various batches 32 | self.num_anchor = 5*batch_size 33 | 34 | def initialize_batch(self, duplicate_manager=None, context_manager=None): 35 | raise NotImplementedError("Need to implement initialize_batch.") 36 | 37 | def get_anchor_points(self, duplicate_manager=None, context_manager=None): 38 | raise NotImplementedError("Need to implement get_anchor_points.") 39 | 40 | def optimize_anchor_point(self, a, duplicate_manager=None, context_manager=None): 41 | raise NotImplementedError("Need to implement optimize_anchor_point.") 42 | 43 | def compute_batch_without_duplicate_logic(self,context_manager=None): 44 | raise NotImplementedError("Need to implement compute_batch_without_duplicate_logic.") 45 | 46 | def compute_batch(self, duplicate_manager=None, context_manager=None): 47 | 48 | self.context_manager = context_manager 49 | 50 | # Easy case where we do not care about having duplicates suggested 51 | if not duplicate_manager: 52 | return self.compute_batch_without_duplicate_logic(context_manager=self.context_manager) 53 | 54 | batch, already_suggested_points = [], duplicate_manager.unique_points.copy() 55 | 56 | anchor_points = self.get_anchor_points(duplicate_manager=duplicate_manager, context_manager=self.context_manager) 57 | 58 | x0 = self.initialize_batch(duplicate_manager=duplicate_manager, context_manager = self.context_manager) 59 | 60 | if np.any(x0): 61 | batch.append(x0) 62 | already_suggested_points.add(self.zip_and_tuple(x0)) 63 | 64 | for a in anchor_points: 65 | x = self.optimize_anchor_point(a, duplicate_manager=duplicate_manager, context_manager = self.context_manager) 66 | 67 | # We first try to add the optimized anchor point; if we cannot, we then try the initial anchor point. 68 | zipped_x = self.zip_and_tuple(x) 69 | 70 | if zipped_x not in already_suggested_points: 71 | batch.append(x) 72 | already_suggested_points.add(zipped_x) 73 | else: 74 | zipped_a = self.zip_and_tuple(a) 75 | 76 | if zipped_a not in already_suggested_points: 77 | batch.append(a) 78 | already_suggested_points.add(zipped_a) 79 | 80 | if len(batch) == self.batch_size: 81 | break 82 | 83 | if len(batch) < self.batch_size: 84 | # Note that the case where anchor_points is empty is handled in self.get_anchor_points that would throw a FullyExploredOptimizationDomainError 85 | print("Warning: the batch of requested size {} could not be entirely filled in (only {} points)".format(self.batch_size, len(batch))) 86 | 87 | return np.vstack(batch) 88 | 89 | def zip_and_tuple(self, x): 90 | """ 91 | convenient helper 92 | :param x: input configuration in the model space 93 | :return: zipped x as a tuple 94 | """ 95 | return tuple(self.space.zip_inputs(np.atleast_2d(x)).flatten()) 96 | -------------------------------------------------------------------------------- /GPyOpt/testing/core_tests/test_model.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | import unittest 4 | from mock import Mock 5 | from numpy.testing import assert_allclose 6 | 7 | from GPyOpt.models.gpmodel import GPModel 8 | from GPyOpt.models.input_warped_gpmodel import InputWarpedGPModel 9 | from GPyOpt.core.task.space import Design_space 10 | 11 | 12 | class TestModels(unittest.TestCase): 13 | 14 | def test_gpmodel_predict(self): 15 | config = [{'name': 'var_1', 'type': 'continuous', 'domain':(-1,1), 'dimensionality': 1}, 16 | {'name': 'var_2', 'type': 'continuous', 'domain':(-1,1), 'dimensionality': 1}] 17 | space = Design_space(config) 18 | model = GPModel(space) 19 | mock_mean = np.array([[2.0], [-2.0]]) 20 | mock_variance = np.array([[4.0, 0.0], [0.0, 9.0]]) 21 | model.model = Mock() 22 | model.model.predict.return_value = (mock_mean, mock_variance) 23 | 24 | m, d = model.predict(np.ones((2, 2))) 25 | 26 | assert_allclose(m, mock_mean, atol=1e-5) 27 | assert_allclose(np.square(d), mock_variance, atol=1e-5) 28 | 29 | def test_gpmodel_predict_covariance(self): 30 | config = [{'name': 'var_1', 'type': 'continuous', 'domain':(-1,1), 'dimensionality': 1}, 31 | {'name': 'var_2', 'type': 'continuous', 'domain':(-1,1), 'dimensionality': 1}] 32 | space = Design_space(config) 33 | model = GPModel(space) 34 | mock_mean = np.array([[2.0], [-2.0]]) 35 | mock_variance = np.array([[4.0, 3.0], [4.5, 9.0]]) 36 | model.model = Mock() 37 | model.model.predict.return_value = (mock_mean, mock_variance) 38 | 39 | v = model.predict_covariance(np.ones((2, 2))) 40 | 41 | assert_allclose(v, mock_variance, atol=1e-5) 42 | 43 | def test_input_warping_indices(self): 44 | config1 = [{'name': 'var_1', 'type': 'continuous', 'domain':(-3,1), 'dimensionality': 2}, 45 | {'name': 'var_2', 'type': 'continuous', 'domain':(-3,1), 'dimensionality': 1}] 46 | warp_ind1 = [0, 1, 2] 47 | space1 = Design_space(config1) 48 | m1 = InputWarpedGPModel(space1) 49 | self.assertEqual(m1.warping_indices, warp_ind1) 50 | 51 | config2 = [{'name': 'var_1', 'type': 'categorical', 'domain': (0,1,2,3)}, 52 | {'name': 'var_2', 'type': 'continuous', 'domain':(-3,1), 'dimensionality': 1}] 53 | warp_ind2 = [1] 54 | space2 = Design_space(config2) 55 | m2 = InputWarpedGPModel(space2) 56 | self.assertEqual(m2.warping_indices, warp_ind2) 57 | 58 | config3 = [{'name': 'var_1', 'type': 'categorical', 'domain': (0,1,2,3)}, 59 | {'name': 'var_2', 'type': 'continuous', 'domain':(-3,1), 'dimensionality': 1}] 60 | warp_ind3 = [1] 61 | space3 = Design_space(config3) 62 | m3 = InputWarpedGPModel(space3) 63 | self.assertEqual(m3.warping_indices, warp_ind3) 64 | 65 | config4 = [ 66 | {'name': 'var_3', 'type': 'discrete', 'domain': (0,1,2,3)}, 67 | {'name': 'var_3', 'type': 'continuous', 'domain': (2, 4), 'dimensionality': 2}, 68 | {'name': 'var_1', 'type': 'continuous', 'domain':(-3,1), 'dimensionality': 1} 69 | ] 70 | warp_ind4 = [0, 1, 2, 3] 71 | space4 = Design_space(config4) 72 | m4 = InputWarpedGPModel(space4) 73 | self.assertEqual(m4.warping_indices, warp_ind4) 74 | 75 | config5 = [ 76 | {'name': 'var_4', 'type': 'bandit', 'domain': np.array([[-2, -1],[0, 1]])} 77 | ] 78 | warp_ind5 = [] 79 | space5 = Design_space(config5) 80 | m5 = InputWarpedGPModel(space5) 81 | self.assertEqual(m5.warping_indices, warp_ind5) 82 | 83 | def test_input_warping_model(self): 84 | config1 = [{'name': 'var_1', 'type': 'continuous', 'domain':(-3,1), 'dimensionality': 2}, 85 | {'name': 'var_2', 'type': 'discrete', 'domain':(-3,1), 'dimensionality': 1}] 86 | space1 = Design_space(config1) 87 | m = InputWarpedGPModel(space1) 88 | np.random.seed(0) 89 | X = np.random.randn(50, 3) 90 | Y = np.sum(np.sin(X), 1).reshape(50, 1) 91 | m._create_model(X, Y) 92 | 93 | if __name__ == "__main__": 94 | print("Running unit tests for GPyOpt Models ...") 95 | unittest.main() -------------------------------------------------------------------------------- /GPyOpt/core/task/objective.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2016, the GPyOpt Authors 2 | # Licensed under the BSD 3-clause license (see LICENSE.txt) 3 | 4 | import time 5 | import numpy as np 6 | from ...util.general import spawn 7 | from ...util.general import get_d_moments 8 | import GPy 9 | import GPyOpt 10 | 11 | class Objective(object): 12 | """ 13 | General class to handle the objective function internally. 14 | """ 15 | 16 | def evaluate(self, x): 17 | raise NotImplementedError() 18 | 19 | 20 | class SingleObjective(Objective): 21 | """ 22 | Class to handle problems with one single objective function. 23 | 24 | param func: objective function. 25 | param batch_size: size of the batches (default, 1) 26 | param num_cores: number of cores to use in the process of evaluating the objective (default, 1). 27 | param objective_name: name of the objective function. 28 | param batch_type: Type of batch used. Only 'synchronous' evaluations are possible at the moment. 29 | param space: Not in use. 30 | 31 | .. Note:: the objective function should take 2-dimensional numpy arrays as input and outputs. Each row should 32 | contain a location (in the case of the inputs) or a function evaluation (in the case of the outputs). 33 | """ 34 | 35 | 36 | def __init__(self, func, num_cores = 1, objective_name = 'no_name', batch_type = 'synchronous', space = None): 37 | self.func = func 38 | self.n_procs = num_cores 39 | self.num_evaluations = 0 40 | self.space = space 41 | self.objective_name = objective_name 42 | 43 | 44 | def evaluate(self, x): 45 | """ 46 | Performs the evaluation of the objective at x. 47 | """ 48 | 49 | if self.n_procs == 1: 50 | f_evals, cost_evals = self._eval_func(x) 51 | else: 52 | try: 53 | f_evals, cost_evals = self._syncronous_batch_evaluation(x) 54 | except: 55 | if not hasattr(self, 'parallel_error'): 56 | print('Error in parallel computation. Fall back to single process!') 57 | else: 58 | self.parallel_error = True 59 | f_evals, cost_evals = self._eval_func(x) 60 | 61 | return f_evals, cost_evals 62 | 63 | 64 | def _eval_func(self, x): 65 | """ 66 | Performs sequential evaluations of the function at x (single location or batch). The computing time of each 67 | evaluation is also provided. 68 | """ 69 | cost_evals = [] 70 | f_evals = np.empty(shape=[0, 1]) 71 | 72 | for i in range(x.shape[0]): 73 | st_time = time.time() 74 | rlt = self.func(np.atleast_2d(x[i])) 75 | f_evals = np.vstack([f_evals,rlt]) 76 | cost_evals += [time.time()-st_time] 77 | return f_evals, cost_evals 78 | 79 | 80 | def _syncronous_batch_evaluation(self,x): 81 | """ 82 | Evaluates the function a x, where x can be a single location or a batch. The evaluation is performed in parallel 83 | according to the number of accessible cores. 84 | """ 85 | from multiprocessing import Process, Pipe 86 | 87 | # --- parallel evaluation of the function 88 | divided_samples = [x[i::self.n_procs] for i in range(self.n_procs)] 89 | pipe = [Pipe() for i in range(self.n_procs)] 90 | proc = [Process(target=spawn(self._eval_func),args=(c,k)) for k,(p,c) in zip(divided_samples,pipe)] 91 | [p.start() for p in proc] 92 | [p.join() for p in proc] 93 | 94 | # --- time of evaluation is set to constant (=1). This is one of the hypothesis of synchronous batch methods. 95 | f_evals = np.zeros((x.shape[0],1)) 96 | cost_evals = np.ones((x.shape[0],1)) 97 | i = 0 98 | for (p,c) in pipe: 99 | f_evals[i::self.n_procs] = p.recv()[0] # throw away costs 100 | i += 1 101 | return f_evals, cost_evals 102 | 103 | def _asyncronous_batch_evaluation(self,x): 104 | 105 | """ 106 | Performs the evaluation of the function at x while other evaluations are pending. 107 | """ 108 | ### --- TODO 109 | pass 110 | --------------------------------------------------------------------------------