├── .gitignore ├── .travis.yml ├── Dockerfile ├── LICENSE ├── README.rst ├── docs ├── Makefile ├── api.rst ├── conf.py ├── index.rst ├── make.bat ├── requirements.txt └── template.rst ├── examples ├── 1d_example.ipynb ├── 1d_multiple_constraints_example.ipynb ├── 2d_example.ipynb ├── README.md └── context_example.ipynb ├── requirements.dev.txt ├── requirements.txt ├── safeopt ├── __init__.py ├── gp_opt.py ├── swarm.py ├── tests │ ├── test_gps.py │ └── test_swarm.py └── utilities.py ├── setup.cfg ├── setup.py └── test_code.sh /.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | _build 3 | .ipynb_checkpoints 4 | *.pyc 5 | *.swp 6 | docs/safeopt.*.rst 7 | .coverage 8 | covhtml 9 | safeopt.egg-info 10 | htmlcov 11 | build 12 | dist 13 | .eggs 14 | .cache 15 | .pytest_cache 16 | 17 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | 3 | sudo: required 4 | services: 5 | - docker 6 | 7 | env: 8 | - PYTHON=2.7 9 | - PYTHON=3.5 10 | 11 | # Setup anaconda 12 | install: 13 | # Disabled since docker pull does not affect cache 14 | # Fixed in Docker 1.13 with --cache-from 15 | # - docker pull befelix/lyapunov-learning-private:${PYTHON} || true 16 | - docker build -f Dockerfile --build-arg PYTHON=${PYTHON} -t test-image . 17 | - docker ps -a 18 | 19 | # Run tests 20 | script: 21 | - docker run test-image flake8 safeopt --exclude "test*.py,__init__.py" --ignore=E402,E731,W503,E741 --show-source 22 | - docker run test-image flake8 safeopt --filename="__init__.py,test*.py" --ignore=F,E402,W503 --show-source 23 | - docker run test-image pydocstyle safeopt --convention=numpy 24 | - docker run test-image pytest --doctest-modules --cov --cov-fail-under=50 safeopt 25 | 26 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM continuumio/miniconda 2 | 3 | # Install build essentials and clean up 4 | RUN apt-get update --quiet \ 5 | && apt-get install -y --no-install-recommends --quiet build-essential \ 6 | && apt-get clean \ 7 | && rm -rf /var/lib/apt/lists/* 8 | 9 | ARG PYTHON="3.5" 10 | 11 | # Update conda, install packages, and clean up 12 | RUN conda update conda --yes --quiet \ 13 | && conda create -n safeopt python=$PYTHON pip numpy scipy --yes --quiet \ 14 | && conda clean --yes --all \ 15 | && hash -r 16 | 17 | # Source the anaconda environment 18 | ENV PATH /opt/conda/envs/safeopt/bin:$PATH 19 | 20 | # The following are useful for developtment, but not testing 21 | # Get the requirements files (seperate from the main body) 22 | #COPY requirements.txt requirements.dev.txt /code/ 23 | 24 | # Install requirements and clean up 25 | #RUN pip --no-cache-dir install -r /code/requirements.txt \ 26 | # && pip --no-cache-dir install -r /code/requirements.dev.txt \ 27 | # && rm -rf /root/.cache 28 | 29 | # Copy the main code 30 | COPY . /code 31 | RUN cd /code \ 32 | && pip install --no-cache-dir -e . \ 33 | && pip --no-cache-dir install -r /code/requirements.dev.txt \ 34 | && rm -rf /root/.cache 35 | 36 | WORKDIR /code 37 | 38 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2015 Felix Berkenkamp 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | 23 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | ==================================== 2 | SafeOpt - Safe Bayesian Optimization 3 | ==================================== 4 | 5 | .. image:: https://travis-ci.org/befelix/SafeOpt.svg?branch=master 6 | :target: https://travis-ci.org/befelix/SafeOpt 7 | :alt: Build Status 8 | .. image:: https://readthedocs.org/projects/safeopt/badge/?version=latest 9 | :target: http://safeopt.readthedocs.io/en/latest/?badge=latest 10 | :alt: Documentation Status 11 | 12 | This code implements an adapted version of the safe, Bayesian optimization algorithm, SafeOpt [1]_, [2]_. It also provides a more scalable implementation based on [3]_ as well as an implementation for the original algorithm in [4]_. 13 | The code can be used to automatically optimize a performance measures subject to a safety constraint by adapting parameters. 14 | The prefered way of citing this code is by referring to [1] or [2]. 15 | 16 | .. image:: http://img.youtube.com/vi/GiqNQdzc5TI/0.jpg 17 | :target: http://www.youtube.com/watch?feature=player_embedded&v=GiqNQdzc5TI 18 | :alt: Youtube video 19 | 20 | .. [1] F. Berkenkamp, A. P. Schoellig, A. Krause, 21 | `Safe Controller Optimization for Quadrotors with Gaussian Processes `_ 22 | in Proc. of the IEEE International Conference on Robotics and Automation (ICRA), 2016, pp. 491-496. 23 | 24 | .. [2] F. Berkenkamp, A. Krause, A. P. Schoellig, 25 | `Bayesian Optimization with Safety Constraints: Safe and Automatic Parameter Tuning in Robotics `_, 26 | ArXiv, 2016, arXiv:1602.04450 [cs.RO]. 27 | 28 | .. [3] Rikky R.P.R. Duivenvoorden, Felix Berkenkamp, Nicolas Carion, Andreas Krause, Angela P. Schoellig, 29 | `Constrained Bayesian optimization with Particle Swarms for Safe Adaptive Controller Tuning `_, 30 | in Proc. of the IFAC (International Federation of Automatic Control) World Congress, 2017. 31 | 32 | .. [4] Y. Sui, A. Gotovos, J. W. Burdick, and A. Krause, 33 | `Safe exploration for optimization with Gaussian processes `_ 34 | in Proc. of the International Conference on Machine Learning (ICML), 2015, pp. 997–1005. 35 | 36 | Warning: Maintenance mode 37 | ------------------------- 38 | This package is no longer actively maintained. That bein said, pull requests to add functionality or fix bugs are always welcome. 39 | 40 | Installation 41 | ------------ 42 | The easiest way to install the necessary python libraries is by installing pip (e.g. ``apt-get install python-pip`` on Ubuntu) and running 43 | 44 | ``pip install safeopt`` 45 | 46 | Alternatively you can clone the repository and install it using 47 | 48 | ``python setup.py install`` 49 | 50 | Usage 51 | ----- 52 | 53 | *The easiest way to get familiar with the library is to run the interactive example ipython notebooks!* 54 | 55 | Make sure that the ``ipywidgets`` module is installed. All functions and classes are documented on `Read The Docs `_. 56 | 57 | 58 | License 59 | ------- 60 | 61 | The code is licenced under the MIT license and free to use by anyone without any restrictions. 62 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | BUILDDIR = _build 9 | 10 | # User-friendly check for sphinx-build 11 | ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) 12 | $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) 13 | endif 14 | 15 | # Internal variables. 16 | PAPEROPT_a4 = -D latex_paper_size=a4 17 | PAPEROPT_letter = -D latex_paper_size=letter 18 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 19 | # the i18n builder cannot share the environment and doctrees with the others 20 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 21 | 22 | .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest coverage gettext 23 | 24 | help: 25 | @echo "Please use \`make ' where is one of" 26 | @echo " html to make standalone HTML files" 27 | @echo " dirhtml to make HTML files named index.html in directories" 28 | @echo " singlehtml to make a single large HTML file" 29 | @echo " pickle to make pickle files" 30 | @echo " json to make JSON files" 31 | @echo " htmlhelp to make HTML files and a HTML help project" 32 | @echo " qthelp to make HTML files and a qthelp project" 33 | @echo " applehelp to make an Apple Help Book" 34 | @echo " devhelp to make HTML files and a Devhelp project" 35 | @echo " epub to make an epub" 36 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 37 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 38 | @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" 39 | @echo " text to make text files" 40 | @echo " man to make manual pages" 41 | @echo " texinfo to make Texinfo files" 42 | @echo " info to make Texinfo files and run them through makeinfo" 43 | @echo " gettext to make PO message catalogs" 44 | @echo " changes to make an overview of all changed/added/deprecated items" 45 | @echo " xml to make Docutils-native XML files" 46 | @echo " pseudoxml to make pseudoxml-XML files for display purposes" 47 | @echo " linkcheck to check all external links for integrity" 48 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 49 | @echo " coverage to run coverage check of the documentation (if enabled)" 50 | 51 | clean: 52 | rm -rf $(BUILDDIR)/* 53 | 54 | html: 55 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 56 | @echo 57 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 58 | 59 | dirhtml: 60 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 61 | @echo 62 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 63 | 64 | singlehtml: 65 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 66 | @echo 67 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 68 | 69 | pickle: 70 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 71 | @echo 72 | @echo "Build finished; now you can process the pickle files." 73 | 74 | json: 75 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 76 | @echo 77 | @echo "Build finished; now you can process the JSON files." 78 | 79 | htmlhelp: 80 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 81 | @echo 82 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 83 | ".hhp project file in $(BUILDDIR)/htmlhelp." 84 | 85 | qthelp: 86 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 87 | @echo 88 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 89 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 90 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/SafeOpt.qhcp" 91 | @echo "To view the help file:" 92 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/SafeOpt.qhc" 93 | 94 | applehelp: 95 | $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp 96 | @echo 97 | @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." 98 | @echo "N.B. You won't be able to view it unless you put it in" \ 99 | "~/Library/Documentation/Help or install it in your application" \ 100 | "bundle." 101 | 102 | devhelp: 103 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 104 | @echo 105 | @echo "Build finished." 106 | @echo "To view the help file:" 107 | @echo "# mkdir -p $$HOME/.local/share/devhelp/SafeOpt" 108 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/SafeOpt" 109 | @echo "# devhelp" 110 | 111 | epub: 112 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 113 | @echo 114 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 115 | 116 | latex: 117 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 118 | @echo 119 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 120 | @echo "Run \`make' in that directory to run these through (pdf)latex" \ 121 | "(use \`make latexpdf' here to do that automatically)." 122 | 123 | latexpdf: 124 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 125 | @echo "Running LaTeX files through pdflatex..." 126 | $(MAKE) -C $(BUILDDIR)/latex all-pdf 127 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 128 | 129 | latexpdfja: 130 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 131 | @echo "Running LaTeX files through platex and dvipdfmx..." 132 | $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja 133 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 134 | 135 | text: 136 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 137 | @echo 138 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 139 | 140 | man: 141 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 142 | @echo 143 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 144 | 145 | texinfo: 146 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 147 | @echo 148 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." 149 | @echo "Run \`make' in that directory to run these through makeinfo" \ 150 | "(use \`make info' here to do that automatically)." 151 | 152 | info: 153 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 154 | @echo "Running Texinfo files through makeinfo..." 155 | make -C $(BUILDDIR)/texinfo info 156 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." 157 | 158 | gettext: 159 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale 160 | @echo 161 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." 162 | 163 | changes: 164 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 165 | @echo 166 | @echo "The overview file is in $(BUILDDIR)/changes." 167 | 168 | linkcheck: 169 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 170 | @echo 171 | @echo "Link check complete; look for any errors in the above output " \ 172 | "or in $(BUILDDIR)/linkcheck/output.txt." 173 | 174 | doctest: 175 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 176 | @echo "Testing of doctests in the sources finished, look at the " \ 177 | "results in $(BUILDDIR)/doctest/output.txt." 178 | 179 | coverage: 180 | $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage 181 | @echo "Testing of coverage in the sources finished, look at the " \ 182 | "results in $(BUILDDIR)/coverage/python.txt." 183 | 184 | xml: 185 | $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml 186 | @echo 187 | @echo "Build finished. The XML files are in $(BUILDDIR)/xml." 188 | 189 | pseudoxml: 190 | $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml 191 | @echo 192 | @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." 193 | -------------------------------------------------------------------------------- /docs/api.rst: -------------------------------------------------------------------------------- 1 | API Documentation 2 | ***************** 3 | 4 | .. automodule:: safeopt 5 | 6 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # SafeOpt documentation build configuration file, created by 4 | # sphinx-quickstart on Tue Sep 1 10:17:29 2015. 5 | # 6 | # This file is execfile()d with the current directory set to its 7 | # containing dir. 8 | # 9 | # Note that not all possible configuration values are present in this 10 | # autogenerated file. 11 | # 12 | # All configuration values have a default; values that are commented out 13 | # serve to show the default. 14 | 15 | import sys 16 | import os 17 | import shlex 18 | import mock 19 | 20 | MOCK_MODULES = ['GPy', 21 | 'GPy.util', 22 | 'GPy.util.linalg', 23 | 'GPy.inference', 24 | 'GPy.inference.latent_function_inference', 25 | 'GPy.inference.latent_function_inference.posterior', 26 | 'mpl_toolkits', 27 | 'mpl_toolkits.mplot3d', 28 | 'matplotlib', 29 | 'matplotlib.pyplot', 30 | 'numpy', 31 | 'scipy', 32 | 'scipy.interpolate', 33 | 'scipy.spatial', 34 | 'scipy.spatial.distance', 35 | 'scipy.special', 36 | 'scipy.stats', 37 | ] 38 | 39 | for mod_name in MOCK_MODULES: 40 | sys.modules[mod_name] = mock.Mock() 41 | 42 | # If extensions (or modules to document with autodoc) are in another directory, 43 | # add these directories to sys.path here. If the directory is relative to the 44 | # documentation root, use os.path.abspath to make it absolute, like shown here. 45 | sys.path.insert(0, os.path.abspath('../')) 46 | 47 | # -- General configuration ------------------------------------------------ 48 | 49 | # If your documentation needs a minimal Sphinx version, state it here. 50 | #needs_sphinx = '1.0' 51 | 52 | # Add any Sphinx extension module names here, as strings. They can be 53 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 54 | # ones. 55 | extensions = [ 56 | 'sphinx.ext.autodoc', 57 | 'numpydoc', 58 | 'sphinx.ext.autosummary', 59 | ] 60 | 61 | # Add any paths that contain templates here, relative to this directory. 62 | templates_path = [''] 63 | 64 | # Generate an autosummary with one file per function. 65 | autosummary_generate = True 66 | 67 | autodoc_default_flags = [] 68 | 69 | # The suffix(es) of source filenames. 70 | # You can specify multiple suffix as a list of string: 71 | # source_suffix = ['.rst', '.md'] 72 | source_suffix = '.rst' 73 | 74 | # The encoding of source files. 75 | #source_encoding = 'utf-8-sig' 76 | 77 | # The master toctree document. 78 | master_doc = 'index' 79 | 80 | # General information about the project. 81 | project = u'SafeOpt' 82 | copyright = u'2015, Felix Berkenkamp, Angela P. Schoellig, Andreas Krause' 83 | author = u'Felix Berkenkamp, Angela P. Schoellig, Andreas Krause' 84 | 85 | # The version info for the project you're documenting, acts as replacement for 86 | # |version| and |release|, also used in various other places throughout the 87 | # built documents. 88 | # 89 | # The short X.Y version. 90 | version = '0.15' 91 | # The full version, including alpha/beta/rc tags. 92 | release = '0.15' 93 | 94 | # The language for content autogenerated by Sphinx. Refer to documentation 95 | # for a list of supported languages. 96 | # 97 | # This is also used if you do content translation via gettext catalogs. 98 | # Usually you set "language" from the command line for these cases. 99 | language = None 100 | 101 | # There are two options for replacing |today|: either, you set today to some 102 | # non-false value, then it is used: 103 | #today = '' 104 | # Else, today_fmt is used as the format for a strftime call. 105 | #today_fmt = '%B %d, %Y' 106 | 107 | # List of patterns, relative to source directory, that match files and 108 | # directories to ignore when looking for source files. 109 | exclude_patterns = ['_build'] 110 | 111 | # The reST default role (used for this markup: `text`) to use for all 112 | # documents. 113 | #default_role = None 114 | 115 | # If true, '()' will be appended to :func: etc. cross-reference text. 116 | #add_function_parentheses = True 117 | 118 | # If true, the current module name will be prepended to all description 119 | # unit titles (such as .. function::). 120 | #add_module_names = True 121 | 122 | # If true, sectionauthor and moduleauthor directives will be shown in the 123 | # output. They are ignored by default. 124 | #show_authors = False 125 | 126 | # The name of the Pygments (syntax highlighting) style to use. 127 | pygments_style = 'sphinx' 128 | 129 | # A list of ignored prefixes for module index sorting. 130 | #modindex_common_prefix = [] 131 | 132 | # If true, keep warnings as "system message" paragraphs in the built documents. 133 | #keep_warnings = False 134 | 135 | # If true, `todo` and `todoList` produce output, else they produce nothing. 136 | todo_include_todos = False 137 | 138 | 139 | # -- Options for HTML output ---------------------------------------------- 140 | 141 | # The theme to use for HTML and HTML Help pages. See the documentation for 142 | # a list of builtin themes. 143 | html_theme = 'sphinx_rtd_theme' 144 | 145 | # Theme options are theme-specific and customize the look and feel of a theme 146 | # further. For a list of options available for each theme, see the 147 | # documentation. 148 | #html_theme_options = {} 149 | 150 | # Add any paths that contain custom themes here, relative to this directory. 151 | #html_theme_path = [] 152 | 153 | # The name for this set of Sphinx documents. If None, it defaults to 154 | # " v documentation". 155 | #html_title = None 156 | 157 | # A shorter title for the navigation bar. Default is the same as html_title. 158 | #html_short_title = None 159 | 160 | # The name of an image file (relative to this directory) to place at the top 161 | # of the sidebar. 162 | #html_logo = None 163 | 164 | # The name of an image file (within the static path) to use as favicon of the 165 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 166 | # pixels large. 167 | #html_favicon = None 168 | 169 | # Add any paths that contain custom static files (such as style sheets) here, 170 | # relative to this directory. They are copied after the builtin static files, 171 | # so a file named "default.css" will overwrite the builtin "default.css". 172 | #html_static_path = ['_static'] 173 | 174 | # Add any extra paths that contain custom files (such as robots.txt or 175 | # .htaccess) here, relative to this directory. These files are copied 176 | # directly to the root of the documentation. 177 | #html_extra_path = [] 178 | 179 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 180 | # using the given strftime format. 181 | #html_last_updated_fmt = '%b %d, %Y' 182 | 183 | # If true, SmartyPants will be used to convert quotes and dashes to 184 | # typographically correct entities. 185 | #html_use_smartypants = True 186 | 187 | # Custom sidebar templates, maps document names to template names. 188 | #html_sidebars = {} 189 | 190 | # Additional templates that should be rendered to pages, maps page names to 191 | # template names. 192 | #html_additional_pages = {} 193 | 194 | # If false, no module index is generated. 195 | #html_domain_indices = True 196 | 197 | # If false, no index is generated. 198 | #html_use_index = True 199 | 200 | # If true, the index is split into individual pages for each letter. 201 | #html_split_index = False 202 | 203 | # If true, links to the reST sources are added to the pages. 204 | #html_show_sourcelink = True 205 | 206 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. 207 | #html_show_sphinx = True 208 | 209 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. 210 | #html_show_copyright = True 211 | 212 | # If true, an OpenSearch description file will be output, and all pages will 213 | # contain a tag referring to it. The value of this option must be the 214 | # base URL from which the finished HTML is served. 215 | #html_use_opensearch = '' 216 | 217 | # This is the file name suffix for HTML files (e.g. ".xhtml"). 218 | #html_file_suffix = None 219 | 220 | # Language to be used for generating the HTML full-text search index. 221 | # Sphinx supports the following languages: 222 | # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' 223 | # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' 224 | html_search_language = 'en' 225 | 226 | # A dictionary with options for the search language support, empty by default. 227 | # Now only 'ja' uses this config value 228 | #html_search_options = {'type': 'default'} 229 | 230 | # The name of a javascript file (relative to the configuration directory) that 231 | # implements a search results scorer. If empty, the default will be used. 232 | #html_search_scorer = 'scorer.js' 233 | 234 | # Output file base name for HTML help builder. 235 | htmlhelp_basename = 'SafeOptdoc' 236 | 237 | # -- Options for LaTeX output --------------------------------------------- 238 | 239 | latex_elements = { 240 | # The paper size ('letterpaper' or 'a4paper'). 241 | #'papersize': 'letterpaper', 242 | 243 | # The font size ('10pt', '11pt' or '12pt'). 244 | #'pointsize': '10pt', 245 | 246 | # Additional stuff for the LaTeX preamble. 247 | #'preamble': '', 248 | 249 | # Latex figure (float) alignment 250 | #'figure_align': 'htbp', 251 | } 252 | 253 | # Grouping the document tree into LaTeX files. List of tuples 254 | # (source start file, target name, title, 255 | # author, documentclass [howto, manual, or own class]). 256 | latex_documents = [ 257 | (master_doc, 'SafeOpt.tex', u'SafeOpt Documentation', 258 | u'Felix Berkenkamp, Angela P. Schoellig, Andreas Krause', 'manual'), 259 | ] 260 | 261 | # The name of an image file (relative to this directory) to place at the top of 262 | # the title page. 263 | #latex_logo = None 264 | 265 | # For "manual" documents, if this is true, then toplevel headings are parts, 266 | # not chapters. 267 | #latex_use_parts = False 268 | 269 | # If true, show page references after internal links. 270 | #latex_show_pagerefs = False 271 | 272 | # If true, show URL addresses after external links. 273 | #latex_show_urls = False 274 | 275 | # Documents to append as an appendix to all manuals. 276 | #latex_appendices = [] 277 | 278 | # If false, no module index is generated. 279 | #latex_domain_indices = True 280 | 281 | 282 | # -- Options for manual page output --------------------------------------- 283 | 284 | # One entry per manual page. List of tuples 285 | # (source start file, name, description, authors, manual section). 286 | man_pages = [ 287 | (master_doc, 'safeopt', u'SafeOpt Documentation', 288 | [author], 1) 289 | ] 290 | 291 | # If true, show URL addresses after external links. 292 | #man_show_urls = False 293 | 294 | 295 | # -- Options for Texinfo output ------------------------------------------- 296 | 297 | # Grouping the document tree into Texinfo files. List of tuples 298 | # (source start file, target name, title, author, 299 | # dir menu entry, description, category) 300 | texinfo_documents = [ 301 | (master_doc, 'SafeOpt', u'SafeOpt Documentation', 302 | author, 'SafeOpt', 'One line description of project.', 303 | 'Miscellaneous'), 304 | ] 305 | 306 | # Documents to append as an appendix to all manuals. 307 | #texinfo_appendices = [] 308 | 309 | # If false, no module index is generated. 310 | #texinfo_domain_indices = True 311 | 312 | # How to display URL addresses: 'footnote', 'no', or 'inline'. 313 | #texinfo_show_urls = 'footnote' 314 | 315 | # If true, do not generate a @detailmenu in the "Top" node's menu. 316 | #texinfo_no_detailmenu = False 317 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | .. SafeOpt documentation master file, created by 2 | sphinx-quickstart on Tue Sep 1 10:17:29 2015. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | .. include:: ../README.rst 7 | 8 | .. toctree:: 9 | :caption: Contents 10 | :maxdepth: 3 11 | 12 | api 13 | 14 | Indices and tables 15 | ================== 16 | 17 | * :ref:`genindex` 18 | * :ref:`modindex` 19 | * :ref:`search` 20 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | REM Command file for Sphinx documentation 4 | 5 | if "%SPHINXBUILD%" == "" ( 6 | set SPHINXBUILD=sphinx-build 7 | ) 8 | set BUILDDIR=_build 9 | set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% . 10 | set I18NSPHINXOPTS=%SPHINXOPTS% . 11 | if NOT "%PAPER%" == "" ( 12 | set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% 13 | set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS% 14 | ) 15 | 16 | if "%1" == "" goto help 17 | 18 | if "%1" == "help" ( 19 | :help 20 | echo.Please use `make ^` where ^ is one of 21 | echo. html to make standalone HTML files 22 | echo. dirhtml to make HTML files named index.html in directories 23 | echo. singlehtml to make a single large HTML file 24 | echo. pickle to make pickle files 25 | echo. json to make JSON files 26 | echo. htmlhelp to make HTML files and a HTML help project 27 | echo. qthelp to make HTML files and a qthelp project 28 | echo. devhelp to make HTML files and a Devhelp project 29 | echo. epub to make an epub 30 | echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter 31 | echo. text to make text files 32 | echo. man to make manual pages 33 | echo. texinfo to make Texinfo files 34 | echo. gettext to make PO message catalogs 35 | echo. changes to make an overview over all changed/added/deprecated items 36 | echo. xml to make Docutils-native XML files 37 | echo. pseudoxml to make pseudoxml-XML files for display purposes 38 | echo. linkcheck to check all external links for integrity 39 | echo. doctest to run all doctests embedded in the documentation if enabled 40 | echo. coverage to run coverage check of the documentation if enabled 41 | goto end 42 | ) 43 | 44 | if "%1" == "clean" ( 45 | for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i 46 | del /q /s %BUILDDIR%\* 47 | goto end 48 | ) 49 | 50 | 51 | REM Check if sphinx-build is available and fallback to Python version if any 52 | %SPHINXBUILD% 2> nul 53 | if errorlevel 9009 goto sphinx_python 54 | goto sphinx_ok 55 | 56 | :sphinx_python 57 | 58 | set SPHINXBUILD=python -m sphinx.__init__ 59 | %SPHINXBUILD% 2> nul 60 | if errorlevel 9009 ( 61 | echo. 62 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 63 | echo.installed, then set the SPHINXBUILD environment variable to point 64 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 65 | echo.may add the Sphinx directory to PATH. 66 | echo. 67 | echo.If you don't have Sphinx installed, grab it from 68 | echo.http://sphinx-doc.org/ 69 | exit /b 1 70 | ) 71 | 72 | :sphinx_ok 73 | 74 | 75 | if "%1" == "html" ( 76 | %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html 77 | if errorlevel 1 exit /b 1 78 | echo. 79 | echo.Build finished. The HTML pages are in %BUILDDIR%/html. 80 | goto end 81 | ) 82 | 83 | if "%1" == "dirhtml" ( 84 | %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml 85 | if errorlevel 1 exit /b 1 86 | echo. 87 | echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. 88 | goto end 89 | ) 90 | 91 | if "%1" == "singlehtml" ( 92 | %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml 93 | if errorlevel 1 exit /b 1 94 | echo. 95 | echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. 96 | goto end 97 | ) 98 | 99 | if "%1" == "pickle" ( 100 | %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle 101 | if errorlevel 1 exit /b 1 102 | echo. 103 | echo.Build finished; now you can process the pickle files. 104 | goto end 105 | ) 106 | 107 | if "%1" == "json" ( 108 | %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json 109 | if errorlevel 1 exit /b 1 110 | echo. 111 | echo.Build finished; now you can process the JSON files. 112 | goto end 113 | ) 114 | 115 | if "%1" == "htmlhelp" ( 116 | %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp 117 | if errorlevel 1 exit /b 1 118 | echo. 119 | echo.Build finished; now you can run HTML Help Workshop with the ^ 120 | .hhp project file in %BUILDDIR%/htmlhelp. 121 | goto end 122 | ) 123 | 124 | if "%1" == "qthelp" ( 125 | %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp 126 | if errorlevel 1 exit /b 1 127 | echo. 128 | echo.Build finished; now you can run "qcollectiongenerator" with the ^ 129 | .qhcp project file in %BUILDDIR%/qthelp, like this: 130 | echo.^> qcollectiongenerator %BUILDDIR%\qthelp\SafeOpt.qhcp 131 | echo.To view the help file: 132 | echo.^> assistant -collectionFile %BUILDDIR%\qthelp\SafeOpt.ghc 133 | goto end 134 | ) 135 | 136 | if "%1" == "devhelp" ( 137 | %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp 138 | if errorlevel 1 exit /b 1 139 | echo. 140 | echo.Build finished. 141 | goto end 142 | ) 143 | 144 | if "%1" == "epub" ( 145 | %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub 146 | if errorlevel 1 exit /b 1 147 | echo. 148 | echo.Build finished. The epub file is in %BUILDDIR%/epub. 149 | goto end 150 | ) 151 | 152 | if "%1" == "latex" ( 153 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex 154 | if errorlevel 1 exit /b 1 155 | echo. 156 | echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. 157 | goto end 158 | ) 159 | 160 | if "%1" == "latexpdf" ( 161 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex 162 | cd %BUILDDIR%/latex 163 | make all-pdf 164 | cd %~dp0 165 | echo. 166 | echo.Build finished; the PDF files are in %BUILDDIR%/latex. 167 | goto end 168 | ) 169 | 170 | if "%1" == "latexpdfja" ( 171 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex 172 | cd %BUILDDIR%/latex 173 | make all-pdf-ja 174 | cd %~dp0 175 | echo. 176 | echo.Build finished; the PDF files are in %BUILDDIR%/latex. 177 | goto end 178 | ) 179 | 180 | if "%1" == "text" ( 181 | %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text 182 | if errorlevel 1 exit /b 1 183 | echo. 184 | echo.Build finished. The text files are in %BUILDDIR%/text. 185 | goto end 186 | ) 187 | 188 | if "%1" == "man" ( 189 | %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man 190 | if errorlevel 1 exit /b 1 191 | echo. 192 | echo.Build finished. The manual pages are in %BUILDDIR%/man. 193 | goto end 194 | ) 195 | 196 | if "%1" == "texinfo" ( 197 | %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo 198 | if errorlevel 1 exit /b 1 199 | echo. 200 | echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo. 201 | goto end 202 | ) 203 | 204 | if "%1" == "gettext" ( 205 | %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale 206 | if errorlevel 1 exit /b 1 207 | echo. 208 | echo.Build finished. The message catalogs are in %BUILDDIR%/locale. 209 | goto end 210 | ) 211 | 212 | if "%1" == "changes" ( 213 | %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes 214 | if errorlevel 1 exit /b 1 215 | echo. 216 | echo.The overview file is in %BUILDDIR%/changes. 217 | goto end 218 | ) 219 | 220 | if "%1" == "linkcheck" ( 221 | %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck 222 | if errorlevel 1 exit /b 1 223 | echo. 224 | echo.Link check complete; look for any errors in the above output ^ 225 | or in %BUILDDIR%/linkcheck/output.txt. 226 | goto end 227 | ) 228 | 229 | if "%1" == "doctest" ( 230 | %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest 231 | if errorlevel 1 exit /b 1 232 | echo. 233 | echo.Testing of doctests in the sources finished, look at the ^ 234 | results in %BUILDDIR%/doctest/output.txt. 235 | goto end 236 | ) 237 | 238 | if "%1" == "coverage" ( 239 | %SPHINXBUILD% -b coverage %ALLSPHINXOPTS% %BUILDDIR%/coverage 240 | if errorlevel 1 exit /b 1 241 | echo. 242 | echo.Testing of coverage in the sources finished, look at the ^ 243 | results in %BUILDDIR%/coverage/python.txt. 244 | goto end 245 | ) 246 | 247 | if "%1" == "xml" ( 248 | %SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml 249 | if errorlevel 1 exit /b 1 250 | echo. 251 | echo.Build finished. The XML files are in %BUILDDIR%/xml. 252 | goto end 253 | ) 254 | 255 | if "%1" == "pseudoxml" ( 256 | %SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml 257 | if errorlevel 1 exit /b 1 258 | echo. 259 | echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml. 260 | goto end 261 | ) 262 | 263 | :end 264 | -------------------------------------------------------------------------------- /docs/requirements.txt: -------------------------------------------------------------------------------- 1 | numpydoc >= 0.5 2 | sphinx_rtd_theme >= 0.1.8 3 | mock >= 2 4 | -------------------------------------------------------------------------------- /docs/template.rst: -------------------------------------------------------------------------------- 1 | {{ name }} 2 | {{ underline }} 3 | 4 | .. currentmodule:: {{ module }} 5 | .. auto{{ objtype }}:: {{ objname }} {% if objtype == "class" %} 6 | :members: 7 | :inherited-members: 8 | {% endif %} 9 | -------------------------------------------------------------------------------- /examples/1d_example.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "metadata": { 7 | "collapsed": true 8 | }, 9 | "outputs": [], 10 | "source": [ 11 | "from __future__ import print_function, division, absolute_import\n", 12 | "\n", 13 | "import GPy\n", 14 | "import numpy as np\n", 15 | "import matplotlib as mpl\n", 16 | "import matplotlib.pyplot as plt\n", 17 | "%matplotlib inline\n", 18 | "\n", 19 | "import safeopt\n", 20 | "\n", 21 | "mpl.rcParams['figure.figsize'] = (20.0, 10.0)\n", 22 | "mpl.rcParams['font.size'] = 20\n", 23 | "mpl.rcParams['lines.markersize'] = 20" 24 | ] 25 | }, 26 | { 27 | "cell_type": "markdown", 28 | "metadata": {}, 29 | "source": [ 30 | "## Define a kernel and function\n", 31 | "\n", 32 | "Here we define a kernel. The function is drawn at random from the GP and is corrupted my Gaussian noise" 33 | ] 34 | }, 35 | { 36 | "cell_type": "code", 37 | "execution_count": null, 38 | "metadata": { 39 | "collapsed": true 40 | }, 41 | "outputs": [], 42 | "source": [ 43 | "# Measurement noise\n", 44 | "noise_var = 0.05 ** 2\n", 45 | "\n", 46 | "# Bounds on the inputs variable\n", 47 | "bounds = [(-10., 10.)]\n", 48 | "parameter_set = safeopt.linearly_spaced_combinations(bounds, 1000)\n", 49 | "\n", 50 | "# Define Kernel\n", 51 | "kernel = GPy.kern.RBF(input_dim=len(bounds), variance=2., lengthscale=1.0, ARD=True)\n", 52 | "\n", 53 | "# Initial safe point\n", 54 | "x0 = np.zeros((1, len(bounds)))\n", 55 | "\n", 56 | "# Generate function with safe initial point at x=0\n", 57 | "def sample_safe_fun():\n", 58 | " while True:\n", 59 | " fun = safeopt.sample_gp_function(kernel, bounds, noise_var, 100)\n", 60 | " if fun(0, noise=False) > 0.5:\n", 61 | " break\n", 62 | " return fun" 63 | ] 64 | }, 65 | { 66 | "cell_type": "markdown", 67 | "metadata": {}, 68 | "source": [ 69 | "## Interactive run of the algorithm" 70 | ] 71 | }, 72 | { 73 | "cell_type": "code", 74 | "execution_count": null, 75 | "metadata": { 76 | "collapsed": true 77 | }, 78 | "outputs": [], 79 | "source": [ 80 | "# Define the objective function\n", 81 | "fun = sample_safe_fun()\n", 82 | "\n", 83 | "# The statistical model of our objective function\n", 84 | "gp = GPy.models.GPRegression(x0, fun(x0), kernel, noise_var=noise_var)\n", 85 | "\n", 86 | "# The optimization routine\n", 87 | "opt = safeopt.SafeOptSwarm(gp, 0., bounds=bounds, threshold=0.2)\n", 88 | "# opt = safeopt.SafeOpt(gp, parameter_set, 0., lipschitz=None, threshold=0.2)" 89 | ] 90 | }, 91 | { 92 | "cell_type": "code", 93 | "execution_count": null, 94 | "metadata": {}, 95 | "outputs": [], 96 | "source": [ 97 | "def plot_gp():\n", 98 | " # Plot the GP\n", 99 | " opt.plot(1000)\n", 100 | " # Plot the true function\n", 101 | " plt.plot(parameter_set, fun(parameter_set, noise=False), color='C2', alpha=0.3)\n", 102 | " \n", 103 | "plot_gp()" 104 | ] 105 | }, 106 | { 107 | "cell_type": "code", 108 | "execution_count": null, 109 | "metadata": {}, 110 | "outputs": [], 111 | "source": [ 112 | "# Obtain next query point\n", 113 | "x_next = opt.optimize()\n", 114 | "# Get a measurement from the real system\n", 115 | "y_meas = fun(x_next)\n", 116 | "# Add this to the GP model\n", 117 | "opt.add_new_data_point(x_next, y_meas)\n", 118 | "\n", 119 | "plot_gp()" 120 | ] 121 | }, 122 | { 123 | "cell_type": "code", 124 | "execution_count": null, 125 | "metadata": { 126 | "collapsed": true 127 | }, 128 | "outputs": [], 129 | "source": [] 130 | }, 131 | { 132 | "cell_type": "code", 133 | "execution_count": null, 134 | "metadata": { 135 | "collapsed": true 136 | }, 137 | "outputs": [], 138 | "source": [] 139 | }, 140 | { 141 | "cell_type": "code", 142 | "execution_count": null, 143 | "metadata": { 144 | "collapsed": true 145 | }, 146 | "outputs": [], 147 | "source": [] 148 | }, 149 | { 150 | "cell_type": "code", 151 | "execution_count": null, 152 | "metadata": { 153 | "collapsed": true 154 | }, 155 | "outputs": [], 156 | "source": [] 157 | }, 158 | { 159 | "cell_type": "code", 160 | "execution_count": null, 161 | "metadata": { 162 | "collapsed": true 163 | }, 164 | "outputs": [], 165 | "source": [] 166 | } 167 | ], 168 | "metadata": { 169 | "anaconda-cloud": {}, 170 | "kernelspec": { 171 | "display_name": "Python 3", 172 | "language": "python", 173 | "name": "python3" 174 | }, 175 | "language_info": { 176 | "codemirror_mode": { 177 | "name": "ipython", 178 | "version": 3 179 | }, 180 | "file_extension": ".py", 181 | "mimetype": "text/x-python", 182 | "name": "python", 183 | "nbconvert_exporter": "python", 184 | "pygments_lexer": "ipython3", 185 | "version": "3.5.3" 186 | }, 187 | "widgets": { 188 | "state": { 189 | "5df21601a9be49cb816a6ae251a4f52d": { 190 | "views": [ 191 | { 192 | "cell_index": 5 193 | } 194 | ] 195 | }, 196 | "9bc9833ac1af4a64b4b160ad65ca8e0e": { 197 | "views": [ 198 | { 199 | "cell_index": 5 200 | } 201 | ] 202 | } 203 | }, 204 | "version": "1.2.0" 205 | } 206 | }, 207 | "nbformat": 4, 208 | "nbformat_minor": 2 209 | } 210 | -------------------------------------------------------------------------------- /examples/1d_multiple_constraints_example.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "metadata": { 7 | "collapsed": true 8 | }, 9 | "outputs": [], 10 | "source": [ 11 | "from __future__ import print_function, division, absolute_import\n", 12 | "\n", 13 | "import GPy\n", 14 | "import numpy as np\n", 15 | "import matplotlib as mpl\n", 16 | "import matplotlib.pyplot as plt\n", 17 | "%matplotlib inline\n", 18 | "\n", 19 | "import safeopt\n", 20 | "\n", 21 | "mpl.rcParams['figure.figsize'] = (20.0, 10.0)\n", 22 | "mpl.rcParams['font.size'] = 20\n", 23 | "mpl.rcParams['lines.markersize'] = 20" 24 | ] 25 | }, 26 | { 27 | "cell_type": "markdown", 28 | "metadata": {}, 29 | "source": [ 30 | "## Define a kernel and function\n", 31 | "\n", 32 | "Here we define a kernel. The function is drawn at random from the GP and is corrupted my Gaussian noise" 33 | ] 34 | }, 35 | { 36 | "cell_type": "code", 37 | "execution_count": null, 38 | "metadata": { 39 | "collapsed": true 40 | }, 41 | "outputs": [], 42 | "source": [ 43 | "# Measurement noise\n", 44 | "noise_var = 0.05 ** 2\n", 45 | "noise_var2 = 1e-5\n", 46 | "\n", 47 | "# Bounds on the inputs variable\n", 48 | "bounds = [(-10., 10.)]\n", 49 | "\n", 50 | "# Define Kernel\n", 51 | "kernel = GPy.kern.RBF(input_dim=len(bounds), variance=2., lengthscale=1.0, ARD=True)\n", 52 | "kernel2 = kernel.copy()\n", 53 | "\n", 54 | "# set of parameters\n", 55 | "parameter_set = safeopt.linearly_spaced_combinations(bounds, 1000)\n", 56 | "\n", 57 | "# Initial safe point\n", 58 | "x0 = np.zeros((1, len(bounds)))\n", 59 | "\n", 60 | "# Generate function with safe initial point at x=0\n", 61 | "def sample_safe_fun():\n", 62 | " fun = safeopt.sample_gp_function(kernel, bounds, noise_var, 100)\n", 63 | " while True:\n", 64 | " fun2 = safeopt.sample_gp_function(kernel2, bounds, noise_var2, 100)\n", 65 | " if fun2(0, noise=False) > 1:\n", 66 | " break\n", 67 | " \n", 68 | " def combined_fun(x, noise=True):\n", 69 | " return np.hstack([fun(x, noise), fun2(x, noise)])\n", 70 | " return combined_fun" 71 | ] 72 | }, 73 | { 74 | "cell_type": "markdown", 75 | "metadata": {}, 76 | "source": [ 77 | "## Interactive run of the algorithm" 78 | ] 79 | }, 80 | { 81 | "cell_type": "code", 82 | "execution_count": null, 83 | "metadata": { 84 | "collapsed": true 85 | }, 86 | "outputs": [], 87 | "source": [ 88 | "# Define the objective function\n", 89 | "fun = sample_safe_fun()\n", 90 | "\n", 91 | "# The statistical model of our objective function and safety constraint\n", 92 | "y0 = fun(x0)\n", 93 | "gp = GPy.models.GPRegression(x0, y0[:, 0, None], kernel, noise_var=noise_var)\n", 94 | "gp2 = GPy.models.GPRegression(x0, y0[:, 1, None], kernel2, noise_var=noise_var2)\n", 95 | "\n", 96 | "# The optimization routine\n", 97 | "# opt = safeopt.SafeOptSwarm([gp, gp2], [-np.inf, 0.], bounds=bounds, threshold=0.2)\n", 98 | "opt = safeopt.SafeOpt([gp, gp2], parameter_set, [-np.inf, 0.], lipschitz=None, threshold=0.1)\n" 99 | ] 100 | }, 101 | { 102 | "cell_type": "code", 103 | "execution_count": null, 104 | "metadata": {}, 105 | "outputs": [], 106 | "source": [ 107 | "def plot():\n", 108 | " # Plot the GP\n", 109 | " opt.plot(100)\n", 110 | " # Plot the true function\n", 111 | " y = fun(parameter_set, noise=False)\n", 112 | " for manager, true_y in zip(mpl._pylab_helpers.Gcf.get_all_fig_managers(), y.T):\n", 113 | " figure = manager.canvas.figure\n", 114 | " figure.gca().plot(parameter_set, true_y, color='C2', alpha=0.3)\n", 115 | " \n", 116 | "plot()" 117 | ] 118 | }, 119 | { 120 | "cell_type": "code", 121 | "execution_count": null, 122 | "metadata": {}, 123 | "outputs": [], 124 | "source": [ 125 | "# Obtain next query point\n", 126 | "x_next = opt.optimize()\n", 127 | "# Get a measurement from the real system\n", 128 | "y_meas = fun(x_next)\n", 129 | "# Add this to the GP model\n", 130 | "opt.add_new_data_point(x_next, y_meas)\n", 131 | "\n", 132 | "plot()" 133 | ] 134 | }, 135 | { 136 | "cell_type": "code", 137 | "execution_count": null, 138 | "metadata": { 139 | "collapsed": true 140 | }, 141 | "outputs": [], 142 | "source": [] 143 | }, 144 | { 145 | "cell_type": "code", 146 | "execution_count": null, 147 | "metadata": { 148 | "collapsed": true 149 | }, 150 | "outputs": [], 151 | "source": [] 152 | }, 153 | { 154 | "cell_type": "code", 155 | "execution_count": null, 156 | "metadata": { 157 | "collapsed": true 158 | }, 159 | "outputs": [], 160 | "source": [] 161 | }, 162 | { 163 | "cell_type": "code", 164 | "execution_count": null, 165 | "metadata": { 166 | "collapsed": true 167 | }, 168 | "outputs": [], 169 | "source": [] 170 | }, 171 | { 172 | "cell_type": "code", 173 | "execution_count": null, 174 | "metadata": { 175 | "collapsed": true 176 | }, 177 | "outputs": [], 178 | "source": [] 179 | }, 180 | { 181 | "cell_type": "code", 182 | "execution_count": null, 183 | "metadata": { 184 | "collapsed": true 185 | }, 186 | "outputs": [], 187 | "source": [] 188 | }, 189 | { 190 | "cell_type": "code", 191 | "execution_count": null, 192 | "metadata": { 193 | "collapsed": true 194 | }, 195 | "outputs": [], 196 | "source": [] 197 | }, 198 | { 199 | "cell_type": "code", 200 | "execution_count": null, 201 | "metadata": { 202 | "collapsed": true 203 | }, 204 | "outputs": [], 205 | "source": [] 206 | }, 207 | { 208 | "cell_type": "code", 209 | "execution_count": null, 210 | "metadata": { 211 | "collapsed": true 212 | }, 213 | "outputs": [], 214 | "source": [] 215 | } 216 | ], 217 | "metadata": { 218 | "anaconda-cloud": {}, 219 | "kernelspec": { 220 | "display_name": "Python 3", 221 | "language": "python", 222 | "name": "python3" 223 | }, 224 | "language_info": { 225 | "codemirror_mode": { 226 | "name": "ipython", 227 | "version": 3 228 | }, 229 | "file_extension": ".py", 230 | "mimetype": "text/x-python", 231 | "name": "python", 232 | "nbconvert_exporter": "python", 233 | "pygments_lexer": "ipython3", 234 | "version": "3.5.3" 235 | }, 236 | "widgets": { 237 | "state": { 238 | "495396d2cac242a1ac2143b7a4f119ad": { 239 | "views": [ 240 | { 241 | "cell_index": 5 242 | } 243 | ] 244 | }, 245 | "f6d454f3913f46898b83c02667c35060": { 246 | "views": [ 247 | { 248 | "cell_index": 5 249 | } 250 | ] 251 | }, 252 | "ff12471c8cae47a4aafc50325042c218": { 253 | "views": [ 254 | { 255 | "cell_index": 5 256 | } 257 | ] 258 | } 259 | }, 260 | "version": "1.2.0" 261 | } 262 | }, 263 | "nbformat": 4, 264 | "nbformat_minor": 2 265 | } 266 | -------------------------------------------------------------------------------- /examples/2d_example.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "metadata": { 7 | "collapsed": true 8 | }, 9 | "outputs": [], 10 | "source": [ 11 | "from __future__ import print_function, division, absolute_import\n", 12 | "\n", 13 | "import GPy\n", 14 | "import numpy as np\n", 15 | "import matplotlib as mpl\n", 16 | "import matplotlib.pyplot as plt\n", 17 | "%matplotlib inline\n", 18 | "\n", 19 | "import safeopt\n", 20 | "\n", 21 | "mpl.rcParams['figure.figsize'] = (20.0, 10.0)\n", 22 | "mpl.rcParams['font.size'] = 20\n", 23 | "mpl.rcParams['lines.markersize'] = 20" 24 | ] 25 | }, 26 | { 27 | "cell_type": "markdown", 28 | "metadata": {}, 29 | "source": [ 30 | "## Define a kernel and function\n", 31 | "\n", 32 | "Here we define a kernel. The function is drawn at random from the GP and is corrupted my Gaussian noise" 33 | ] 34 | }, 35 | { 36 | "cell_type": "code", 37 | "execution_count": null, 38 | "metadata": { 39 | "collapsed": true 40 | }, 41 | "outputs": [], 42 | "source": [ 43 | "# Measurement noise\n", 44 | "noise_var = 0.05 ** 2\n", 45 | "\n", 46 | "# Bounds on the inputs variable\n", 47 | "bounds = [(-5., 5.), (-5., 5.)]\n", 48 | "\n", 49 | "# Define Kernel\n", 50 | "kernel = GPy.kern.RBF(input_dim=len(bounds), variance=2., lengthscale=1.0,\n", 51 | " ARD=True)\n", 52 | "\n", 53 | "# Initial safe point\n", 54 | "x0 = np.zeros((1, len(bounds)))\n", 55 | "\n", 56 | "# Generate function with safe initial point at x=0\n", 57 | "def sample_safe_fun():\n", 58 | " while True:\n", 59 | " fun = safeopt.sample_gp_function(kernel, bounds, noise_var, 10)\n", 60 | " if fun([0,0], noise=False) > 0.5:\n", 61 | " break\n", 62 | " return fun" 63 | ] 64 | }, 65 | { 66 | "cell_type": "markdown", 67 | "metadata": {}, 68 | "source": [ 69 | "## Interactive run of the algorithm\n", 70 | "\n", 71 | "The slow part of running this is the plotting with matplotlib. Consider switching to the 2D level sets." 72 | ] 73 | }, 74 | { 75 | "cell_type": "code", 76 | "execution_count": null, 77 | "metadata": { 78 | "collapsed": true 79 | }, 80 | "outputs": [], 81 | "source": [ 82 | "# Define the objective function\n", 83 | "fun = sample_safe_fun()\n", 84 | "\n", 85 | "# The statistical model of our objective function\n", 86 | "gp = GPy.models.GPRegression(x0, fun(x0), kernel, noise_var=noise_var)\n", 87 | "\n", 88 | "# The optimization routine\n", 89 | "opt = safeopt.SafeOptSwarm(gp, 0., bounds=bounds, threshold=0.2)\n", 90 | "# parameter_set = safeopt.linearly_spaced_combinations(bounds, 100)\n", 91 | "# opt = safeopt.SafeOpt(gp, parameter_set, 0., lipschitz=None, threshold=0.2)" 92 | ] 93 | }, 94 | { 95 | "cell_type": "code", 96 | "execution_count": null, 97 | "metadata": {}, 98 | "outputs": [], 99 | "source": [ 100 | "opt.plot(100, plot_3d=False)" 101 | ] 102 | }, 103 | { 104 | "cell_type": "code", 105 | "execution_count": null, 106 | "metadata": {}, 107 | "outputs": [], 108 | "source": [ 109 | "# Obtain next query point\n", 110 | "x_next = opt.optimize()\n", 111 | "# Get a measurement from the real system\n", 112 | "y_meas = fun(x_next)\n", 113 | "# Add this to the GP model\n", 114 | "opt.add_new_data_point(x_next, y_meas)\n", 115 | "\n", 116 | "opt.plot(100)" 117 | ] 118 | }, 119 | { 120 | "cell_type": "code", 121 | "execution_count": null, 122 | "metadata": { 123 | "collapsed": true 124 | }, 125 | "outputs": [], 126 | "source": [] 127 | }, 128 | { 129 | "cell_type": "code", 130 | "execution_count": null, 131 | "metadata": { 132 | "collapsed": true 133 | }, 134 | "outputs": [], 135 | "source": [] 136 | }, 137 | { 138 | "cell_type": "code", 139 | "execution_count": null, 140 | "metadata": { 141 | "collapsed": true 142 | }, 143 | "outputs": [], 144 | "source": [] 145 | }, 146 | { 147 | "cell_type": "code", 148 | "execution_count": null, 149 | "metadata": { 150 | "collapsed": true 151 | }, 152 | "outputs": [], 153 | "source": [] 154 | }, 155 | { 156 | "cell_type": "code", 157 | "execution_count": null, 158 | "metadata": { 159 | "collapsed": true 160 | }, 161 | "outputs": [], 162 | "source": [] 163 | } 164 | ], 165 | "metadata": { 166 | "anaconda-cloud": {}, 167 | "kernelspec": { 168 | "display_name": "Python 3", 169 | "language": "python", 170 | "name": "python3" 171 | }, 172 | "language_info": { 173 | "codemirror_mode": { 174 | "name": "ipython", 175 | "version": 3 176 | }, 177 | "file_extension": ".py", 178 | "mimetype": "text/x-python", 179 | "name": "python", 180 | "nbconvert_exporter": "python", 181 | "pygments_lexer": "ipython3", 182 | "version": "3.5.4" 183 | }, 184 | "widgets": { 185 | "state": { 186 | "05cb035d2eea420386e6f6c140aea4f9": { 187 | "views": [ 188 | { 189 | "cell_index": 5 190 | } 191 | ] 192 | }, 193 | "3094a61f798d48c78c1f1479c686b955": { 194 | "views": [ 195 | { 196 | "cell_index": 5 197 | } 198 | ] 199 | }, 200 | "9347db74f059466faa1b6e74f13183ad": { 201 | "views": [ 202 | { 203 | "cell_index": 5 204 | } 205 | ] 206 | } 207 | }, 208 | "version": "1.2.0" 209 | } 210 | }, 211 | "nbformat": 4, 212 | "nbformat_minor": 1 213 | } 214 | -------------------------------------------------------------------------------- /examples/README.md: -------------------------------------------------------------------------------- 1 | ## Examples 2 | 3 | These jupyter notebooks implement examples on how the library works. 4 | 5 | * `1d_example` is a simple one-dimensional example on how to use the library. 6 | * `2d_example` extends this to two-dimensional parameters, which is not very different except for the plotting. 7 | * `1d_multiple_constraints_example` shows how multiple constraints can be incorporated into the library. 8 | * `context_example` shows how contexts, variables that impact the resulting performance, but are fixed by the environment (e.g., temperature, wind) rather than the user, can be included with the library. 9 | 10 | ## Usage 11 | 12 | Clone the Repository and move to the examples folder. Launch a `jupyter-notebook` instance within this folder. In the browser, select the notebook that you want to run and evaluate all cells. 13 | 14 | ## Structure 15 | 16 | These notebooks all follow the same structure. In the first cell, the library is imported, along with other relevant libraries. 17 | The second cell defines the Gaussian process model over functions and specifies the optimization problem (by sampling a function from the Gaussian process distribution). The last step is to plot the GP and to run the safeopt optimization routine to obtain a new data point. 18 | 19 | -------------------------------------------------------------------------------- /examples/context_example.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "metadata": { 7 | "collapsed": true 8 | }, 9 | "outputs": [], 10 | "source": [ 11 | "from __future__ import print_function, division, absolute_import\n", 12 | " \n", 13 | "import GPy\n", 14 | "import numpy as np\n", 15 | "import matplotlib as mpl\n", 16 | "import matplotlib.pyplot as plt\n", 17 | "%matplotlib inline\n", 18 | "\n", 19 | "import safeopt\n", 20 | "\n", 21 | "mpl.rcParams['figure.figsize'] = (20.0, 10.0)\n", 22 | "mpl.rcParams['font.size'] = 20\n", 23 | "mpl.rcParams['lines.markersize'] = 20" 24 | ] 25 | }, 26 | { 27 | "cell_type": "markdown", 28 | "metadata": {}, 29 | "source": [ 30 | "## Define a kernel and function\n", 31 | "\n", 32 | "Here we define a kernel. The function is drawn at random from the GP and is corrupted my Gaussian noise" 33 | ] 34 | }, 35 | { 36 | "cell_type": "code", 37 | "execution_count": null, 38 | "metadata": { 39 | "collapsed": true 40 | }, 41 | "outputs": [], 42 | "source": [ 43 | "# Measurement noise\n", 44 | "noise_var = 0.05 ** 2\n", 45 | "\n", 46 | "# Bounds on the inputs variable\n", 47 | "bounds = [(-5., 5.), (-5., 5.)]\n", 48 | "parameter_set = safeopt.linearly_spaced_combinations([bounds[0]], 1000)\n", 49 | "\n", 50 | "# Define Kernel\n", 51 | "# works on the first column of X, index=0\n", 52 | "k_parameters = GPy.kern.RBF(input_dim=1, variance=2., lengthscale=1.0, active_dims=[0])\n", 53 | "# works on the second column of X, index=1\n", 54 | "k_context = GPy.kern.RBF(input_dim=1, variance=2., lengthscale=1.0, active_dims=[1], name='context')\n", 55 | "kernel = k_parameters * k_context\n", 56 | "\n", 57 | "# set of parameters\n", 58 | "num_contexts = 1\n", 59 | "\n", 60 | "# Initial safe point\n", 61 | "x0 = np.array([[0]])\n", 62 | "\n", 63 | "# Generate function with safe initial point at x=0\n", 64 | "def sample_safe_fun(context=0):\n", 65 | " context = np.atleast_2d(context)\n", 66 | " while True:\n", 67 | " # Joint function over parameters and contexts\n", 68 | " sampled_fun = safeopt.sample_gp_function(kernel.copy(), bounds, noise_var, 10)\n", 69 | " \n", 70 | " if sampled_fun(np.hstack([x0, context]), noise=False) > 0.5:\n", 71 | " break\n", 72 | " \n", 73 | " return sampled_fun" 74 | ] 75 | }, 76 | { 77 | "cell_type": "code", 78 | "execution_count": null, 79 | "metadata": { 80 | "collapsed": true 81 | }, 82 | "outputs": [], 83 | "source": [ 84 | "def plot(context):\n", 85 | " # Make points transparent when they belong to a different context\n", 86 | " context = np.atleast_2d(context)\n", 87 | " opt.context = context\n", 88 | " \n", 89 | " relevance = opt.gp.kern.context.K(np.hstack([[[0]], context]), opt.gp.X)\n", 90 | " relevance /= opt.gp.kern.context.variance\n", 91 | " relevance = np.exp(100 * relevance) / np.exp(100)\n", 92 | " relevance[relevance < 0.25] = 0.25\n", 93 | " point_color = np.zeros((opt.gp.X.shape[0], 4))\n", 94 | " point_color[:, 3] = relevance\n", 95 | " \n", 96 | " # Plot GP\n", 97 | " opt.plot(n_samples=1000, point_color=point_color)\n", 98 | " \n", 99 | " # Plot the true function\n", 100 | " data = np.concatenate((parameter_set, np.broadcast_to(context, (parameter_set.shape[0], context.shape[1]))), axis=1)\n", 101 | " plt.plot(parameter_set, fun(data, noise=False), color='C2', alpha=0.3)" 102 | ] 103 | }, 104 | { 105 | "cell_type": "code", 106 | "execution_count": null, 107 | "metadata": {}, 108 | "outputs": [], 109 | "source": [ 110 | "context = np.array([[0]])\n", 111 | "\n", 112 | "# Define the objective function\n", 113 | "fun = sample_safe_fun(context)\n", 114 | "\n", 115 | "# The statistical model of our objective function\n", 116 | "x = np.hstack([x0, context])\n", 117 | "gp = GPy.models.GPRegression(x, fun(x), kernel, noise_var=noise_var)\n", 118 | "\n", 119 | "# The optimization routine\n", 120 | "opt = safeopt.SafeOpt(gp, parameter_set, 0., num_contexts=1, threshold=0.5)\n", 121 | "opt.context = context\n", 122 | "\n", 123 | "plot(context)" 124 | ] 125 | }, 126 | { 127 | "cell_type": "code", 128 | "execution_count": null, 129 | "metadata": {}, 130 | "outputs": [], 131 | "source": [ 132 | "# Obtain next query point\n", 133 | "x_next = opt.optimize(context)\n", 134 | "\n", 135 | "# Get a measurement from the real system\n", 136 | "y_meas = fun(np.hstack((x_next[None], context)))\n", 137 | "\n", 138 | "# Add this to the GP model\n", 139 | "opt.add_new_data_point(x_next, y_meas, context=context)\n", 140 | "\n", 141 | "plot(context=context)" 142 | ] 143 | }, 144 | { 145 | "cell_type": "code", 146 | "execution_count": null, 147 | "metadata": {}, 148 | "outputs": [], 149 | "source": [ 150 | "context = np.array([[0.1]])\n", 151 | "plot(context)" 152 | ] 153 | }, 154 | { 155 | "cell_type": "code", 156 | "execution_count": null, 157 | "metadata": {}, 158 | "outputs": [], 159 | "source": [ 160 | "# Obtain next query point\n", 161 | "x_next = opt.optimize(context)\n", 162 | "\n", 163 | "# Get a measurement from the real system\n", 164 | "y_meas = fun(np.hstack((x_next[None], context)))\n", 165 | "\n", 166 | "# Add this to the GP model\n", 167 | "opt.add_new_data_point(x_next, y_meas, context=context)\n", 168 | "\n", 169 | "plot(context=context)" 170 | ] 171 | }, 172 | { 173 | "cell_type": "code", 174 | "execution_count": null, 175 | "metadata": { 176 | "collapsed": true 177 | }, 178 | "outputs": [], 179 | "source": [] 180 | } 181 | ], 182 | "metadata": { 183 | "kernelspec": { 184 | "display_name": "Python 3", 185 | "language": "python", 186 | "name": "python3" 187 | }, 188 | "language_info": { 189 | "codemirror_mode": { 190 | "name": "ipython", 191 | "version": 3 192 | }, 193 | "file_extension": ".py", 194 | "mimetype": "text/x-python", 195 | "name": "python", 196 | "nbconvert_exporter": "python", 197 | "pygments_lexer": "ipython3", 198 | "version": "3.5.3" 199 | } 200 | }, 201 | "nbformat": 4, 202 | "nbformat_minor": 2 203 | } 204 | -------------------------------------------------------------------------------- /requirements.dev.txt: -------------------------------------------------------------------------------- 1 | pytest 2 | pytest-cov 3 | flake8>=3.0,<=3.5.0 4 | pydocstyle<=2.0,<=3.0 5 | mock 6 | 7 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | GPy>=0.8 2 | numpy>=1.7,<2.0 3 | scipy>=0.12 4 | matplotlib>=1.3 5 | future 6 | 7 | -------------------------------------------------------------------------------- /safeopt/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | The `safeopt` package implements tools for Safe Bayesian optimization. 3 | 4 | Main classes 5 | ------------ 6 | 7 | These classes provide the main functionality for Safe Bayesian optimization. 8 | :class:`SafeOpt` implements the exact algorithm, which is very inefficient 9 | for large problems. :class:`SafeOptSwarm` scales to higher-dimensional 10 | problems by relying on heuristics and adaptive swarm discretization. 11 | 12 | .. autosummary:: 13 | :template: template.rst 14 | :toctree: 15 | 16 | SafeOpt 17 | SafeOptSwarm 18 | 19 | Utilities 20 | --------- 21 | 22 | The following are utilities to make testing and working with the library more 23 | pleasant. 24 | 25 | .. autosummary:: 26 | :template: template.rst 27 | :toctree: 28 | 29 | sample_gp_function 30 | linearly_spaced_combinations 31 | plot_2d_gp 32 | plot_3d_gp 33 | plot_contour_gp 34 | """ 35 | 36 | from __future__ import absolute_import 37 | 38 | from .utilities import * 39 | from .gp_opt import * 40 | -------------------------------------------------------------------------------- /safeopt/gp_opt.py: -------------------------------------------------------------------------------- 1 | """ 2 | Classes that implement SafeOpt. 3 | 4 | Authors: - Felix Berkenkamp (befelix at inf dot ethz dot ch) 5 | - Nicolas Carion (carion dot nicolas at gmail dot com) 6 | """ 7 | 8 | from __future__ import print_function, absolute_import, division 9 | 10 | from collections import Sequence 11 | from functools import partial 12 | 13 | import numpy as np 14 | from scipy.spatial.distance import cdist 15 | from scipy.special import expit 16 | from scipy.stats import norm 17 | from builtins import range 18 | 19 | from .utilities import (plot_2d_gp, plot_3d_gp, plot_contour_gp, 20 | linearly_spaced_combinations) 21 | from .swarm import SwarmOptimization 22 | 23 | 24 | import logging 25 | 26 | 27 | __all__ = ['SafeOpt', 'SafeOptSwarm'] 28 | 29 | 30 | class GaussianProcessOptimization(object): 31 | """ 32 | Base class for GP optimization. 33 | 34 | Handles common functionality. 35 | 36 | Parameters 37 | ---------- 38 | gp: GPy Gaussian process 39 | fmin : float or list of floats 40 | Safety threshold for the function value. If multiple safety constraints 41 | are used this can also be a list of floats (the first one is always 42 | the one for the values, can be set to None if not wanted). 43 | beta: float or callable 44 | A constant or a function of the time step that scales the confidence 45 | interval of the acquisition function. 46 | threshold: float or list of floats 47 | The algorithm will not try to expand any points that are below this 48 | threshold. This makes the algorithm stop expanding points eventually. 49 | If a list, this represents the stopping criterion for all the gps. 50 | This ignores the scaling factor. 51 | scaling: list of floats or "auto" 52 | A list used to scale the GP uncertainties to compensate for 53 | different input sizes. This should be set to the maximal variance of 54 | each kernel. You should probably leave this to "auto" unless your 55 | kernel is non-stationary. 56 | """ 57 | 58 | def __init__(self, gp, fmin, beta=2, num_contexts=0, threshold=0, 59 | scaling='auto'): 60 | """Initialization, see `GaussianProcessOptimization`.""" 61 | super(GaussianProcessOptimization, self).__init__() 62 | 63 | if isinstance(gp, list): 64 | self.gps = gp 65 | else: 66 | self.gps = [gp] 67 | self.gp = self.gps[0] 68 | 69 | self.fmin = fmin 70 | if not isinstance(self.fmin, list): 71 | self.fmin = [self.fmin] * len(self.gps) 72 | self.fmin = np.atleast_1d(np.asarray(self.fmin).squeeze()) 73 | 74 | if hasattr(beta, '__call__'): 75 | # Beta is a function of t 76 | self.beta = beta 77 | else: 78 | # Assume that beta is a constant 79 | self.beta = lambda t: beta 80 | 81 | if scaling == 'auto': 82 | dummy_point = np.zeros((1, self.gps[0].input_dim)) 83 | self.scaling = [gpm.kern.Kdiag(dummy_point)[0] for gpm in self.gps] 84 | self.scaling = np.sqrt(np.asarray(self.scaling)) 85 | else: 86 | self.scaling = np.asarray(scaling) 87 | if self.scaling.shape[0] != len(self.gps): 88 | raise ValueError("The number of scaling values should be " 89 | "equal to the number of GPs") 90 | 91 | self.threshold = threshold 92 | self._parameter_set = None 93 | self.bounds = None 94 | self.num_samples = 0 95 | self.num_contexts = num_contexts 96 | 97 | self._x = None 98 | self._y = None 99 | self._get_initial_xy() 100 | 101 | @property 102 | def x(self): 103 | return self._x 104 | 105 | @property 106 | def y(self): 107 | return self._y 108 | 109 | @property 110 | def data(self): 111 | """Return the data within the GP models.""" 112 | return self._x, self._y 113 | 114 | @property 115 | def t(self): 116 | """Return the time step (number of measurements).""" 117 | return self._x.shape[0] 118 | 119 | def _get_initial_xy(self): 120 | """Get the initial x/y data from the GPs.""" 121 | self._x = self.gp.X 122 | y = [self.gp.Y] 123 | 124 | for gp in self.gps[1:]: 125 | if np.allclose(self._x, gp.X): 126 | y.append(gp.Y) 127 | else: 128 | raise NotImplemented('The GPs have different measurements.') 129 | 130 | self._y = np.concatenate(y, axis=1) 131 | 132 | def plot(self, n_samples, axis=None, figure=None, plot_3d=False, 133 | **kwargs): 134 | """ 135 | Plot the current state of the optimization. 136 | 137 | Parameters 138 | ---------- 139 | n_samples: int 140 | How many samples to use for plotting 141 | axis: matplotlib axis 142 | The axis on which to draw (does not get cleared first) 143 | figure: matplotlib figure 144 | Ignored if axis is already defined 145 | plot_3d: boolean 146 | If set to true shows a 3D plot for 2 dimensional data 147 | """ 148 | # Fix contexts to their current values 149 | if self.num_contexts > 0 and 'fixed_inputs' not in kwargs: 150 | kwargs.update(fixed_inputs=self.context_fixed_inputs) 151 | 152 | true_input_dim = self.gp.kern.input_dim - self.num_contexts 153 | if true_input_dim == 1 or plot_3d: 154 | inputs = np.zeros((n_samples ** true_input_dim, self.gp.input_dim)) 155 | inputs[:, :true_input_dim] = linearly_spaced_combinations( 156 | self.bounds[:true_input_dim], 157 | n_samples) 158 | 159 | if not isinstance(n_samples, Sequence): 160 | n_samples = [n_samples] * len(self.bounds) 161 | 162 | axes = [] 163 | if self.gp.input_dim - self.num_contexts == 1: 164 | # 2D plots with uncertainty 165 | for gp, fmin in zip(self.gps, self.fmin): 166 | if fmin == -np.inf: 167 | fmin = None 168 | ax = plot_2d_gp(gp, inputs, figure=figure, axis=axis, 169 | fmin=fmin, **kwargs) 170 | axes.append(ax) 171 | else: 172 | if plot_3d: 173 | for gp in self.gps: 174 | plot_3d_gp(gp, inputs, figure=figure, axis=axis, **kwargs) 175 | else: 176 | for gp in self.gps: 177 | plot_contour_gp(gp, 178 | [np.linspace(self.bounds[0][0], 179 | self.bounds[0][1], 180 | n_samples[0]), 181 | np.linspace(self.bounds[1][0], 182 | self.bounds[1][1], 183 | n_samples[1])], 184 | figure=figure, 185 | axis=axis) 186 | 187 | def _add_context(self, x, context): 188 | """Add the context to a vector. 189 | 190 | Parameters 191 | ---------- 192 | x : ndarray 193 | context : ndarray 194 | 195 | Returns 196 | ------- 197 | x_extended : ndarray 198 | """ 199 | context = np.atleast_2d(context) 200 | num_contexts = context.shape[1] 201 | 202 | x2 = np.empty((x.shape[0], x.shape[1] + num_contexts), dtype=float) 203 | x2[:, :x.shape[1]] = x 204 | x2[:, x.shape[1]:] = context 205 | return x2 206 | 207 | def _add_data_point(self, gp, x, y, context=None): 208 | """Add a data point to a particular GP. 209 | 210 | This should only be called on its own if you know what you're doing. 211 | This does not update the global data stores self.x and self.y. 212 | 213 | Parameters 214 | ---------- 215 | x: 2d-array 216 | y: 2d-array 217 | context: array_like 218 | The context(s) used for the data points 219 | gp: instance of GPy.model.GPRegression 220 | If specified, determines the GP to which we add the data point 221 | to. Note that this should only be used if that data point is going 222 | to be removed again. 223 | """ 224 | if context is not None: 225 | x = self._add_context(x, context) 226 | 227 | gp.set_XY(np.vstack([gp.X, x]), 228 | np.vstack([gp.Y, y])) 229 | 230 | def add_new_data_point(self, x, y, context=None): 231 | """ 232 | Add a new function observation to the GPs. 233 | 234 | Parameters 235 | ---------- 236 | x: 2d-array 237 | y: 2d-array 238 | context: array_like 239 | The context(s) used for the data points. 240 | """ 241 | x = np.atleast_2d(x) 242 | y = np.atleast_2d(y) 243 | 244 | if self.num_contexts: 245 | x = self._add_context(x, context) 246 | 247 | for i, gp in enumerate(self.gps): 248 | not_nan = ~np.isnan(y[:, i]) 249 | if np.any(not_nan): 250 | # Add data to GP (context already included in x) 251 | self._add_data_point(gp, x[not_nan, :], y[not_nan, [i]]) 252 | 253 | # Update global data stores 254 | self._x = np.concatenate((self._x, x), axis=0) 255 | self._y = np.concatenate((self._y, y), axis=0) 256 | 257 | def _remove_last_data_point(self, gp): 258 | """Remove the last data point of a specific GP. 259 | 260 | This does not update global data stores, self.x and self.y. 261 | 262 | Parameters 263 | ---------- 264 | gp: Instance of GPy.models.GPRegression 265 | The gp that the last data point should be removed from 266 | """ 267 | gp.set_XY(gp.X[:-1, :], gp.Y[:-1, :]) 268 | 269 | def remove_last_data_point(self): 270 | """Remove the data point that was last added to the GP.""" 271 | last_y = self._y[-1] 272 | 273 | for gp, yi in zip(self.gps, last_y): 274 | if not np.isnan(yi): 275 | gp.set_XY(gp.X[:-1, :], gp.Y[:-1, :]) 276 | 277 | self._x = self._x[:-1, :] 278 | self._y = self._y[:-1, :] 279 | 280 | 281 | class SafeOpt(GaussianProcessOptimization): 282 | """A class for Safe Bayesian Optimization. 283 | 284 | This class implements the `SafeOpt` algorithm. It uses a Gaussian 285 | process model in order to determine parameter combinations that are safe 286 | with high probability. Based on these, it aims to both expand the set of 287 | safe parameters and to find the optimal parameters within the safe set. 288 | 289 | Parameters 290 | ---------- 291 | gp: GPy Gaussian process 292 | A Gaussian process which is initialized with safe, initial data points. 293 | If a list of GPs then the first one is the value, while all the 294 | other ones are safety constraints. 295 | parameter_set: 2d-array 296 | List of parameters 297 | fmin: list of floats 298 | Safety threshold for the function value. If multiple safety constraints 299 | are used this can also be a list of floats (the first one is always 300 | the one for the values, can be set to None if not wanted) 301 | lipschitz: list of floats 302 | The Lipschitz constant of the system, if None the GP confidence 303 | intervals are used directly. 304 | beta: float or callable 305 | A constant or a function of the time step that scales the confidence 306 | interval of the acquisition function. 307 | threshold: float or list of floats 308 | The algorithm will not try to expand any points that are below this 309 | threshold. This makes the algorithm stop expanding points eventually. 310 | If a list, this represents the stopping criterion for all the gps. 311 | This ignores the scaling factor. 312 | scaling: list of floats or "auto" 313 | A list used to scale the GP uncertainties to compensate for 314 | different input sizes. This should be set to the maximal variance of 315 | each kernel. You should probably leave this to "auto" unless your 316 | kernel is non-stationary. 317 | 318 | Examples 319 | -------- 320 | >>> from safeopt import SafeOpt 321 | >>> from safeopt import linearly_spaced_combinations 322 | >>> import GPy 323 | >>> import numpy as np 324 | 325 | Define a Gaussian process prior over the performance 326 | 327 | >>> x = np.array([[0.]]) 328 | >>> y = np.array([[1.]]) 329 | >>> gp = GPy.models.GPRegression(x, y, noise_var=0.01**2) 330 | 331 | >>> bounds = [[-1., 1.]] 332 | >>> parameter_set = linearly_spaced_combinations([[-1., 1.]], 333 | ... num_samples=100) 334 | 335 | Initialize the Bayesian optimization and get new parameters to evaluate 336 | 337 | >>> opt = SafeOpt(gp, parameter_set, fmin=[0.]) 338 | >>> next_parameters = opt.optimize() 339 | 340 | Add a new data point with the parameters and the performance to the GP. The 341 | performance has normally be determined through an external function call. 342 | 343 | >>> performance = np.array([[1.]]) 344 | >>> opt.add_new_data_point(next_parameters, performance) 345 | """ 346 | 347 | def __init__(self, gp, parameter_set, fmin, lipschitz=None, beta=2, 348 | num_contexts=0, threshold=0, scaling='auto'): 349 | """Initialization, see `SafeOpt`.""" 350 | super(SafeOpt, self).__init__(gp, 351 | fmin=fmin, 352 | beta=beta, 353 | num_contexts=num_contexts, 354 | threshold=threshold, 355 | scaling=scaling) 356 | 357 | if self.num_contexts > 0: 358 | context_shape = (parameter_set.shape[0], self.num_contexts) 359 | self.inputs = np.hstack((parameter_set, 360 | np.zeros(context_shape, 361 | dtype=parameter_set.dtype))) 362 | self.parameter_set = self.inputs[:, :-self.num_contexts] 363 | else: 364 | self.inputs = self.parameter_set = parameter_set 365 | 366 | self.liptschitz = lipschitz 367 | 368 | if self.liptschitz is not None: 369 | if not isinstance(self.liptschitz, list): 370 | self.liptschitz = [self.liptschitz] * len(self.gps) 371 | self.liptschitz = np.atleast_1d( 372 | np.asarray(self.liptschitz).squeeze()) 373 | 374 | # Value intervals 375 | self.Q = np.empty((self.inputs.shape[0], 2 * len(self.gps)), 376 | dtype=np.float) 377 | 378 | # Safe set 379 | self.S = np.zeros(self.inputs.shape[0], dtype=np.bool) 380 | 381 | # Switch to use confidence intervals for safety 382 | if lipschitz is None: 383 | self._use_lipschitz = False 384 | else: 385 | self._use_lipschitz = True 386 | 387 | # Set of expanders and maximizers 388 | self.G = self.S.copy() 389 | self.M = self.S.copy() 390 | 391 | @property 392 | def use_lipschitz(self): 393 | """ 394 | Boolean that determines whether to use the Lipschitz constant. 395 | 396 | By default this is set to False, which means the adapted SafeOpt 397 | algorithm is used, that uses the GP confidence intervals directly. 398 | If set to True, the `self.lipschitz` parameter is used to compute 399 | the safe and expanders sets. 400 | """ 401 | return self._use_lipschitz 402 | 403 | @use_lipschitz.setter 404 | def use_lipschitz(self, value): 405 | if value and self.liptschitz is None: 406 | raise ValueError('Lipschitz constant not defined') 407 | self._use_lipschitz = value 408 | 409 | @property 410 | def parameter_set(self): 411 | """Discrete parameter samples for Bayesian optimization.""" 412 | return self._parameter_set 413 | 414 | @parameter_set.setter 415 | def parameter_set(self, parameter_set): 416 | self._parameter_set = parameter_set 417 | 418 | # Plotting bounds (min, max value 419 | self.bounds = list(zip(np.min(self._parameter_set, axis=0), 420 | np.max(self._parameter_set, axis=0))) 421 | self.num_samples = [len(np.unique(self._parameter_set[:, i])) 422 | for i in range(self._parameter_set.shape[1])] 423 | 424 | @property 425 | def context_fixed_inputs(self): 426 | """Return the fixed inputs for the current context.""" 427 | n = self.gp.input_dim - 1 428 | nc = self.num_contexts 429 | if nc > 0: 430 | contexts = self.inputs[0, -self.num_contexts:] 431 | return list(zip(range(n, n - nc, -1), contexts)) 432 | 433 | @property 434 | def context(self): 435 | """Return the current context variables.""" 436 | if self.num_contexts: 437 | return self.inputs[0, -self.num_contexts:] 438 | 439 | @context.setter 440 | def context(self, context): 441 | """Set the current context and update confidence intervals. 442 | 443 | Parameters 444 | ---------- 445 | context: ndarray 446 | New context that should be applied to the input parameters 447 | """ 448 | if self.num_contexts: 449 | if context is None: 450 | raise ValueError('Need to provide value for context.') 451 | self.inputs[:, -self.num_contexts:] = context 452 | 453 | def update_confidence_intervals(self, context=None): 454 | """Recompute the confidence intervals form the GP. 455 | 456 | Parameters 457 | ---------- 458 | context: ndarray 459 | Array that contains the context used to compute the sets 460 | """ 461 | beta = self.beta(self.t) 462 | 463 | # Update context to current setting 464 | self.context = context 465 | 466 | # Iterate over all functions 467 | for i in range(len(self.gps)): 468 | # Evaluate acquisition function 469 | mean, var = self.gps[i].predict_noiseless(self.inputs) 470 | 471 | mean = mean.squeeze() 472 | std_dev = np.sqrt(var.squeeze()) 473 | 474 | # Update confidence intervals 475 | self.Q[:, 2 * i] = mean - beta * std_dev 476 | self.Q[:, 2 * i + 1] = mean + beta * std_dev 477 | 478 | def compute_safe_set(self): 479 | """Compute only the safe set based on the current confidence bounds.""" 480 | # Update safe set 481 | self.S[:] = np.all(self.Q[:, ::2] > self.fmin, axis=1) 482 | 483 | def compute_sets(self, full_sets=False): 484 | """ 485 | Compute the safe set of points, based on current confidence bounds. 486 | 487 | Parameters 488 | ---------- 489 | context: ndarray 490 | Array that contains the context used to compute the sets 491 | full_sets: boolean 492 | Whether to compute the full set of expanders or whether to omit 493 | computations that are not relevant for running SafeOpt 494 | (This option is only useful for plotting purposes) 495 | """ 496 | beta = self.beta(self.t) 497 | 498 | # Update safe set 499 | self.compute_safe_set() 500 | 501 | # Reference to confidence intervals 502 | l, u = self.Q[:, :2].T 503 | 504 | if not np.any(self.S): 505 | self.M[:] = False 506 | self.G[:] = False 507 | return 508 | 509 | # Set of possible maximisers 510 | # Maximizers: safe upper bound above best, safe lower bound 511 | self.M[:] = False 512 | self.M[self.S] = u[self.S] >= np.max(l[self.S]) 513 | max_var = np.max(u[self.M] - l[self.M]) / self.scaling[0] 514 | 515 | # Optimistic set of possible expanders 516 | l = self.Q[:, ::2] 517 | u = self.Q[:, 1::2] 518 | 519 | self.G[:] = False 520 | 521 | # For the run of the algorithm we do not need to calculate the 522 | # full set of potential expanders: 523 | # We can skip the ones already in M and ones that have lower 524 | # variance than the maximum variance in M, max_var or the threshold. 525 | # Amongst the remaining ones we only need to find the 526 | # potential expander with maximum variance 527 | if full_sets: 528 | s = self.S 529 | else: 530 | # skip points in M, they will already be evaluated 531 | s = np.logical_and(self.S, ~self.M) 532 | 533 | # Remove points with a variance that is too small 534 | s[s] = (np.max((u[s, :] - l[s, :]) / self.scaling, axis=1) > 535 | max_var) 536 | s[s] = np.any(u[s, :] - l[s, :] > self.threshold * beta, axis=1) 537 | 538 | if not np.any(s): 539 | # no need to evaluate any points as expanders in G, exit 540 | return 541 | 542 | def sort_generator(array): 543 | """Return the sorted array, largest element first.""" 544 | return array.argsort()[::-1] 545 | 546 | # set of safe expanders 547 | G_safe = np.zeros(np.count_nonzero(s), dtype=np.bool) 548 | 549 | if not full_sets: 550 | # Sort, element with largest variance first 551 | sort_index = sort_generator(np.max(u[s, :] - l[s, :], 552 | axis=1)) 553 | else: 554 | # Sort index is just an enumeration of all safe states 555 | sort_index = range(len(G_safe)) 556 | 557 | for index in sort_index: 558 | if self.use_lipschitz: 559 | # Distance between current index point and all other unsafe 560 | # points 561 | d = cdist(self.inputs[s, :][[index], :], 562 | self.inputs[~self.S, :]) 563 | 564 | # Check if expander for all GPs 565 | for i in range(len(self.gps)): 566 | # Skip evaluation if 'no' safety constraint 567 | if self.fmin[i] == -np.inf: 568 | continue 569 | # Safety: u - L * d >= fmin 570 | G_safe[index] =\ 571 | np.any(u[s, i][index] - self.liptschitz[i] * d >= 572 | self.fmin[i]) 573 | # Stop evaluating if not expander according to one 574 | # safety constraint 575 | if not G_safe[index]: 576 | break 577 | else: 578 | # Check if expander for all GPs 579 | for i, gp in enumerate(self.gps): 580 | # Skip evlauation if 'no' safety constraint 581 | if self.fmin[i] == -np.inf: 582 | continue 583 | 584 | # Add safe point with its max possible value to the gp 585 | self._add_data_point(gp=gp, 586 | x=self.parameter_set[s, :][index, :], 587 | y=u[s, i][index], 588 | context=self.context) 589 | 590 | # Prediction of previously unsafe points based on that 591 | mean2, var2 = gp.predict_noiseless(self.inputs[~self.S]) 592 | 593 | # Remove the fake data point from the GP again 594 | self._remove_last_data_point(gp=gp) 595 | 596 | mean2 = mean2.squeeze() 597 | var2 = var2.squeeze() 598 | l2 = mean2 - beta * np.sqrt(var2) 599 | 600 | # If any unsafe lower bound is suddenly above fmin then 601 | # the point is an expander 602 | G_safe[index] = np.any(l2 >= self.fmin[i]) 603 | 604 | # Break if one safety GP is not an expander 605 | if not G_safe[index]: 606 | break 607 | 608 | # Since we sorted by uncertainty and only the most 609 | # uncertain element gets picked by SafeOpt anyways, we can 610 | # stop after we found the first one 611 | if G_safe[index] and not full_sets: 612 | break 613 | 614 | # Update safe set (if full_sets is False this is at most one point 615 | self.G[s] = G_safe 616 | 617 | def get_new_query_point(self, ucb=False): 618 | """ 619 | Compute a new point at which to evaluate the function. 620 | 621 | Parameters 622 | ---------- 623 | ucb: bool 624 | If True the safe-ucb criteria is used instead. 625 | 626 | Returns 627 | ------- 628 | x: np.array 629 | The next parameters that should be evaluated. 630 | """ 631 | if not np.any(self.S): 632 | raise EnvironmentError('There are no safe points to evaluate.') 633 | 634 | if ucb: 635 | max_id = np.argmax(self.Q[self.S, 1]) 636 | x = self.inputs[self.S, :][max_id, :] 637 | else: 638 | # Get lower and upper bounds 639 | l = self.Q[:, ::2] 640 | u = self.Q[:, 1::2] 641 | 642 | MG = np.logical_or(self.M, self.G) 643 | value = np.max((u[MG] - l[MG]) / self.scaling, axis=1) 644 | x = self.inputs[MG, :][np.argmax(value), :] 645 | 646 | if self.num_contexts: 647 | return x[:-self.num_contexts] 648 | else: 649 | return x 650 | 651 | def optimize(self, context=None, ucb=False): 652 | """Run Safe Bayesian optimization and get the next parameters. 653 | 654 | Parameters 655 | ---------- 656 | context: ndarray 657 | A vector containing the current context 658 | ucb: bool 659 | If True the safe-ucb criteria is used instead. 660 | 661 | Returns 662 | ------- 663 | x: np.array 664 | The next parameters that should be evaluated. 665 | """ 666 | # Update confidence intervals based on current estimate 667 | self.update_confidence_intervals(context=context) 668 | 669 | # Update the sets 670 | if ucb: 671 | self.compute_safe_set() 672 | else: 673 | self.compute_sets() 674 | 675 | return self.get_new_query_point(ucb=ucb) 676 | 677 | def get_maximum(self, context=None): 678 | """ 679 | Return the current estimate for the maximum. 680 | 681 | Parameters 682 | ---------- 683 | context: ndarray 684 | A vector containing the current context 685 | 686 | Returns 687 | ------- 688 | x - ndarray 689 | Location of the maximum 690 | y - 0darray 691 | Maximum value 692 | 693 | Notes 694 | ----- 695 | Uses the current context and confidence intervals! 696 | Run update_confidence_intervals first if you recently added a new data 697 | point. 698 | """ 699 | self.update_confidence_intervals(context=context) 700 | 701 | # Compute the safe set (that's cheap anyways) 702 | self.compute_safe_set() 703 | 704 | # Return nothing if there are no safe points 705 | if not np.any(self.S): 706 | return None 707 | 708 | l = self.Q[self.S, 0] 709 | 710 | max_id = np.argmax(l) 711 | return (self.inputs[self.S, :][max_id, :-self.num_contexts or None], 712 | l[max_id]) 713 | 714 | 715 | class SafeOptSwarm(GaussianProcessOptimization): 716 | """SafeOpt for larger dimensions using a Swarm Optimization heuristic. 717 | 718 | Note that it doesn't support the use of a Lipschitz constant nor contextual 719 | optimization. 720 | 721 | You can set your logging level to INFO to get more insights on the 722 | optimization process. 723 | 724 | Parameters 725 | ---------- 726 | gp: GPy Gaussian process 727 | A Gaussian process which is initialized with safe, initial data points. 728 | If a list of GPs then the first one is the value, while all the 729 | other ones are safety constraints. 730 | fmin: list of floats 731 | Safety threshold for the function value. If multiple safety constraints 732 | are used this can also be a list of floats (the first one is always 733 | the one for the values, can be set to None if not wanted) 734 | bounds: pair of floats or list of pairs of floats 735 | If a list is given, then each pair represents the lower/upper bound in 736 | each dimension. Otherwise, we assume the same bounds for all 737 | dimensions. This is mostly important for plotting or to restrict 738 | particles to a certain domain. 739 | beta: float or callable 740 | A constant or a function of the time step that scales the confidence 741 | interval of the acquisition function. 742 | threshold: float or list of floats 743 | The algorithm will not try to expand any points that are below this 744 | threshold. This makes the algorithm stop expanding points eventually. 745 | If a list, this represents the stopping criterion for all the gps. 746 | This ignores the scaling factor. 747 | scaling: list of floats or "auto" 748 | A list used to scale the GP uncertainties to compensate for 749 | different input sizes. This should be set to the maximal variance of 750 | each kernel. You should probably set this to "auto" unless your kernel 751 | is non-stationary 752 | swarm_size: int 753 | The number of particles in each of the optimization swarms 754 | 755 | Examples 756 | -------- 757 | >>> from safeopt import SafeOptSwarm 758 | >>> import GPy 759 | >>> import numpy as np 760 | 761 | Define a Gaussian process prior over the performance 762 | 763 | >>> x = np.array([[0.]]) 764 | >>> y = np.array([[1.]]) 765 | >>> gp = GPy.models.GPRegression(x, y, noise_var=0.01**2) 766 | 767 | Initialize the Bayesian optimization and get new parameters to evaluate 768 | 769 | >>> opt = SafeOptSwarm(gp, fmin=[0.], bounds=[[-1., 1.]]) 770 | >>> next_parameters = opt.optimize() 771 | 772 | Add a new data point with the parameters and the performance to the GP. The 773 | performance has normally be determined through an external function call. 774 | 775 | >>> performance = np.array([[1.]]) 776 | >>> opt.add_new_data_point(next_parameters, performance) 777 | 778 | """ 779 | 780 | def __init__(self, gp, fmin, bounds, beta=2, scaling='auto', threshold=0, 781 | swarm_size=20): 782 | """Initialization, see `SafeOptSwarm`.""" 783 | super(SafeOptSwarm, self).__init__(gp, 784 | fmin=fmin, 785 | beta=beta, 786 | num_contexts=0, 787 | threshold=threshold, 788 | scaling=scaling) 789 | 790 | # Safe set 791 | self.S = np.asarray(self.gps[0].X) 792 | 793 | self.swarm_size = swarm_size 794 | self.max_iters = 100 # number of swarm iterations 795 | 796 | if not isinstance(bounds, list): 797 | self.bounds = [bounds] * self.S.shape[1] 798 | else: 799 | self.bounds = bounds 800 | 801 | # These are estimates of the best lower bound, and its location 802 | self.best_lower_bound = -np.inf 803 | self.greedy_point = self.S[0, :] 804 | 805 | self.optimal_velocities = self.optimize_particle_velocity() 806 | 807 | swarm_types = ['greedy', 'maximizers', 'expanders'] 808 | 809 | self.swarms = {swarm_type: 810 | SwarmOptimization( 811 | swarm_size, 812 | self.optimal_velocities, 813 | partial(self._compute_particle_fitness, 814 | swarm_type), 815 | bounds=self.bounds) 816 | for swarm_type in swarm_types} 817 | 818 | def optimize_particle_velocity(self): 819 | """Optimize the velocities of the particles. 820 | 821 | Note that this only works well for stationary kernels and constant mean 822 | functions. Otherwise the velocity depends on the position! 823 | 824 | Returns 825 | ------- 826 | velocities: ndarray 827 | The estimated optimal velocities in each direction. 828 | """ 829 | parameters = np.zeros((1, self.gp.input_dim), dtype=np.float) 830 | velocities = np.empty((len(self.gps), self.gp.input_dim), 831 | dtype=np.float) 832 | 833 | for i, gp in enumerate(self.gps): 834 | for j in range(self.gp.input_dim): 835 | tmp_velocities = np.zeros((1, self.gp.input_dim), 836 | dtype=np.float) 837 | 838 | # lower and upper bounds on velocities 839 | upper_velocity = 1000. 840 | lower_velocity = 0. 841 | 842 | # Binary search over optimal velocities 843 | while True: 844 | mid = (upper_velocity + lower_velocity) / 2 845 | tmp_velocities[0, j] = mid 846 | 847 | kernel_matrix = gp.kern.K(parameters, tmp_velocities) 848 | covariance = kernel_matrix.squeeze() / self.scaling[i] ** 2 849 | 850 | # Make sure the correlation is in the sweet spot 851 | velocity_enough = covariance > 0.94 852 | not_too_fast = covariance < 0.95 853 | 854 | if not_too_fast: 855 | upper_velocity = mid 856 | elif velocity_enough: 857 | lower_velocity = mid 858 | 859 | if ((not_too_fast and velocity_enough) or 860 | upper_velocity - lower_velocity < 1e-5): 861 | break 862 | 863 | # Store optimal velocity 864 | velocities[i, j] = mid 865 | 866 | # Select the minimal velocity (for the toughest safety constraint) 867 | velocities = np.min(velocities, axis=0) 868 | 869 | # Scale for number of parameters (this might not be so clever if they 870 | # are all independent, additive kernels). 871 | velocities /= np.sqrt(self.gp.input_dim) 872 | return velocities 873 | 874 | def _compute_penalty(self, slack): 875 | """Return the penalty associated to a constraint violation. 876 | 877 | The penalty is a piecewise linear function that is nonzero only if the 878 | safety constraints are violated. This penalty encourages particles to 879 | stay within the safe set. 880 | 881 | Parameters 882 | ---------- 883 | slack: ndarray 884 | A vector corresponding to how much the constraint was violated. 885 | 886 | Returns 887 | ------- 888 | penalties - ndarray 889 | The value of the penalties 890 | """ 891 | penalties = np.atleast_1d(np.clip(slack, None, 0)) 892 | 893 | penalties[(slack < 0) & (slack > -0.001)] *= 2 894 | penalties[(slack <= -0.001) & (slack > -0.1)] *= 5 895 | penalties[(slack <= -0.1) & (slack > -1)] *= 10 896 | 897 | slack_id = slack < -1 898 | penalties[slack_id] = -300 * penalties[slack_id] ** 2 899 | return penalties 900 | 901 | def _compute_particle_fitness(self, swarm_type, particles): 902 | """ 903 | Return the value of the particles and the safety information. 904 | 905 | Parameters 906 | ---------- 907 | particles : ndarray 908 | A vector containing the coordinates of the particles 909 | swarm_type : string 910 | A string corresponding to the swarm type. It can be any of the 911 | following strings: 912 | 913 | * 'greedy' : Optimal value(best lower bound). 914 | * 'expander' : Expanders (lower bound close to constraint) 915 | * 'maximizer' : Maximizers (Upper bound better than best l) 916 | * 'safe_set' : Only check the safety of the particles 917 | Returns 918 | ------- 919 | values : ndarray 920 | The values of the particles 921 | global_safe : ndarray 922 | A boolean mask indicating safety status of all particles 923 | (note that in the case of a greedy swarm, this is not computed and 924 | we return a True mask) 925 | """ 926 | beta = self.beta(self.t) 927 | 928 | # classify the particle's function values 929 | mean, var = self.gps[0].predict_noiseless(particles) 930 | mean = mean.squeeze() 931 | std_dev = np.sqrt(var.squeeze()) 932 | 933 | # compute the confidence interval 934 | lower_bound = np.atleast_1d(mean - beta * std_dev) 935 | upper_bound = np.atleast_1d(mean + beta * std_dev) 936 | 937 | # the greedy swarm optimizes for the lower bound 938 | if swarm_type == 'greedy': 939 | return lower_bound, np.broadcast_to(True, len(lower_bound)) 940 | 941 | # value we are optimizing for. Expanders and maximizers seek high 942 | # variance points 943 | values = std_dev / self.scaling[0] 944 | 945 | # 946 | is_safe = swarm_type == 'safe_set' 947 | is_expander = swarm_type == 'expanders' 948 | is_maximizer = swarm_type == 'maximizers' 949 | 950 | if is_safe: 951 | interest_function = None 952 | else: 953 | if is_expander: 954 | # For expanders, the interest function is updated depending on 955 | # the lower bounds 956 | interest_function = (len(self.gps) * 957 | np.ones(np.shape(values), dtype=np.float)) 958 | elif is_maximizer: 959 | improvement = upper_bound - self.best_lower_bound 960 | interest_function = expit(10 * improvement / self.scaling[0]) 961 | else: 962 | # unknown particle type (shouldn't happen) 963 | raise AssertionError("Invalid swarm type") 964 | 965 | # boolean mask that tell if the particles are safe according to all gps 966 | global_safe = np.ones(particles.shape[0], dtype=np.bool) 967 | total_penalty = np.zeros(particles.shape[0], dtype=np.float) 968 | 969 | for i, (gp, scaling) in enumerate(zip(self.gps, self.scaling)): 970 | # Only recompute confidence intervals for constraints 971 | if i > 0: 972 | # classify using the current GP 973 | mean, var = gp.predict_noiseless(particles) 974 | mean = mean.squeeze() 975 | std_dev = np.sqrt(var.squeeze()) 976 | lower_bound = mean - beta * std_dev 977 | 978 | values = np.maximum(values, std_dev / scaling) 979 | 980 | # if the current GP has no safety constrain, we skip it 981 | if self.fmin[i] == -np.inf: 982 | continue 983 | 984 | slack = np.atleast_1d(lower_bound - self.fmin[i]) 985 | 986 | # computing penalties 987 | global_safe &= slack >= 0 988 | 989 | # Skip cost update for safety evaluation 990 | if is_safe: 991 | continue 992 | 993 | # Normalize the slack somewhat 994 | slack /= scaling 995 | 996 | total_penalty += self._compute_penalty(slack) 997 | 998 | if is_expander: 999 | # check if the particles are expanders for the current gp 1000 | interest_function *= norm.pdf(slack, scale=0.2) 1001 | 1002 | # this swarm type is only interested in knowing whether the particles 1003 | # are safe. 1004 | if is_safe: 1005 | return lower_bound, global_safe 1006 | 1007 | # add penalty 1008 | values += total_penalty 1009 | 1010 | # apply the mask for current interest function 1011 | values *= interest_function 1012 | 1013 | return values, global_safe 1014 | 1015 | def get_new_query_point(self, swarm_type): 1016 | """ 1017 | Compute a new point at which to evaluate the function. 1018 | 1019 | This function relies on a Particle Swarm Optimization (PSO) to find the 1020 | optimum of the objective function (which depends on the swarm type). 1021 | 1022 | Parameters 1023 | ---------- 1024 | swarm_type: string 1025 | This parameter controls the type of point that should be found. It 1026 | can take one of the following values: 1027 | 1028 | * 'expanders' : find a point that increases the safe set 1029 | * 'maximizers' : find a point that maximizes the objective 1030 | function within the safe set. 1031 | * 'greedy' : retrieve an estimate of the best currently known 1032 | parameters (best lower bound). 1033 | 1034 | Returns 1035 | ------- 1036 | global_best: np.array 1037 | The next parameters that should be evaluated. 1038 | max_std_dev: float 1039 | The current standard deviation in the point to be evaluated. 1040 | """ 1041 | beta = self.beta(self.t) 1042 | safe_size, input_dim = self.S.shape 1043 | 1044 | # Make sure the safe set is still safe 1045 | _, safe = self._compute_particle_fitness('safe_set', self.S) 1046 | 1047 | num_safe = safe.sum() 1048 | if num_safe == 0: 1049 | raise RuntimeError('The safe set is empty.') 1050 | 1051 | # Prune safe set if points in the discrete approximation of the safe 1052 | # ended up being unsafe, but never prune below swarm size to avoid 1053 | # empty safe set. 1054 | if num_safe >= self.swarm_size and num_safe != len(safe): 1055 | # Warn that the safe set has decreased 1056 | logging.warning("Warning: {} unsafe points removed. " 1057 | "Model might be violated" 1058 | .format(np.count_nonzero(~safe))) 1059 | 1060 | # Remove unsafe points 1061 | self.S = self.S[safe] 1062 | safe_size = self.S.shape[0] 1063 | 1064 | # initialize particles 1065 | if swarm_type == 'greedy': 1066 | # we pick particles u.a.r in the safe set 1067 | random_id = np.random.randint(safe_size, size=self.swarm_size - 3) 1068 | best_sampled_point = np.argmax(self.gp.Y) 1069 | 1070 | # Particles are drawn at random from the safe set, but include the 1071 | # - Previous greedy estimate 1072 | # - last point 1073 | # - best sampled point 1074 | particles = np.vstack((self.S[random_id, :], 1075 | self.greedy_point, 1076 | self.gp.X[-1, :], 1077 | self.gp.X[best_sampled_point])) 1078 | else: 1079 | # we pick particles u.a.r in the safe set 1080 | random_id = np.random.randint(safe_size, size=self.swarm_size) 1081 | particles = self.S[random_id, :] 1082 | 1083 | # Run the swarm optimization 1084 | swarm = self.swarms[swarm_type] 1085 | swarm.init_swarm(particles) 1086 | swarm.run_swarm(self.max_iters) 1087 | 1088 | # expand safe set 1089 | if swarm_type != 'greedy': 1090 | num_added = 0 1091 | 1092 | # compute correlation between new candidates and current safe set 1093 | covariance = self.gp.kern.K(swarm.best_positions, 1094 | np.vstack((self.S, 1095 | swarm.best_positions))) 1096 | covariance /= self.scaling[0] ** 2 1097 | 1098 | initial_safe = len(self.S) 1099 | n, m = np.shape(covariance) 1100 | 1101 | # this mask keeps track of the points that we have added in the 1102 | # safe set to account for them when adding a new point 1103 | mask = np.zeros(m, dtype=np.bool) 1104 | mask[:initial_safe] = True 1105 | 1106 | for j in range(n): 1107 | # make sure correlation with old points is relatively low 1108 | if np.all(covariance[j, mask] <= 0.95): 1109 | self.S = np.vstack((self.S, swarm.best_positions[[j], :])) 1110 | num_added += 1 1111 | mask[initial_safe + j] = True 1112 | 1113 | logging.debug("At the end of swarm {}, {} points were appended to" 1114 | " the safeset".format(swarm_type, num_added)) 1115 | else: 1116 | # check whether we found a better estimate of the lower bound 1117 | mean, var = self.gp.predict_noiseless(self.greedy_point[None, :]) 1118 | mean = mean.squeeze() 1119 | std_dev = np.sqrt(var.squeeze()) 1120 | 1121 | lower_bound = mean - beta * std_dev 1122 | if lower_bound < np.max(swarm.best_values): 1123 | self.greedy_point = swarm.global_best.copy() 1124 | 1125 | if swarm_type == 'greedy': 1126 | return swarm.global_best.copy(), np.max(swarm.best_values) 1127 | 1128 | # compute the variance of the point picked 1129 | var = np.empty(len(self.gps), dtype=np.float) 1130 | # max_std_dev = 0. 1131 | for i, (gp, scaling) in enumerate(zip(self.gps, self.scaling)): 1132 | var[i] = gp.predict_noiseless(swarm.global_best[None, :])[1] 1133 | 1134 | return swarm.global_best, np.sqrt(var) 1135 | 1136 | def optimize(self, ucb=False): 1137 | """Run Safe Bayesian optimization and get the next parameters. 1138 | 1139 | Parameters 1140 | ---------- 1141 | ucb: bool 1142 | Whether to only compute maximizers (best upper bound). 1143 | 1144 | Returns 1145 | ------- 1146 | x: np.array 1147 | The next parameters that should be evaluated. 1148 | """ 1149 | # compute estimate of the lower bound 1150 | self.greedy, self.best_lower_bound = self.get_new_query_point('greedy') 1151 | 1152 | # Run both swarms: 1153 | x_maxi, std_maxi = self.get_new_query_point('maximizers') 1154 | if ucb: 1155 | logging.info('Using ucb criterion.') 1156 | return x_maxi 1157 | 1158 | x_exp, std_exp = self.get_new_query_point('expanders') 1159 | 1160 | # Remove expanders below threshold or without safety constraint. 1161 | std_exp[(std_exp < self.threshold) | (self.fmin == -np.inf)] = 0 1162 | 1163 | # Apply scaling 1164 | std_exp /= self.scaling 1165 | std_exp = np.max(std_exp) 1166 | 1167 | std_maxi = std_maxi[0] / self.scaling[0] 1168 | 1169 | logging.info("The best maximizer has std. dev. %f" % std_maxi) 1170 | logging.info("The best expander has std. dev. %f" % std_exp) 1171 | logging.info("The greedy estimate of lower bound has value %f" % 1172 | self.best_lower_bound) 1173 | 1174 | if std_maxi > std_exp: 1175 | return x_maxi 1176 | else: 1177 | return x_exp 1178 | 1179 | def get_maximum(self): 1180 | """ 1181 | Return the current estimate for the maximum. 1182 | 1183 | Returns 1184 | ------- 1185 | x : ndarray 1186 | Location of the maximum 1187 | y : 0darray 1188 | Maximum value 1189 | 1190 | """ 1191 | maxi = np.argmax(self.gp.Y) 1192 | return self.gp.X[maxi, :], self.gp.Y[maxi] 1193 | -------------------------------------------------------------------------------- /safeopt/swarm.py: -------------------------------------------------------------------------------- 1 | """ 2 | General class for constrained swarm optimization. 3 | 4 | Authors: - Felix Berkenkamp (befelix at inf dot ethz dot ch) 5 | - Nicolas Carion (nicolas dot carion at gmail dot com) 6 | """ 7 | 8 | from __future__ import print_function, absolute_import, division 9 | 10 | import numpy as np 11 | from builtins import range 12 | 13 | 14 | __all__ = ['SwarmOptimization'] 15 | 16 | 17 | class SwarmOptimization(object): 18 | """Constrained swarm optimization. 19 | 20 | Parameters 21 | ---------- 22 | swarm_size: int 23 | The number of particles 24 | velocity: ndarray 25 | The base velocities of particles for each dimension. 26 | fitness: callable 27 | A function that takes particles positions and returns two values. The 28 | first one corresponds to the fitness of the particle, while the second 29 | one is an array of booleans indicating whether the particle fulfills 30 | the constraints. 31 | bounds: list, optional 32 | A list of constraints to which particle exploration is limited. Of the 33 | form [(x1_min, x1_max), (x2_min, x2_max)...]. 34 | """ 35 | 36 | def __init__(self, swarm_size, velocity, fitness, bounds=None): 37 | """Initialization, see `SwarmOptimization`.""" 38 | super(SwarmOptimization, self).__init__() 39 | 40 | self.c1 = self.c2 = 1 41 | self.fitness = fitness 42 | 43 | self.bounds = bounds 44 | if self.bounds is not None: 45 | self.bounds = np.asarray(self.bounds) 46 | 47 | self.initial_inertia = 1.0 48 | self.final_inertia = 0.1 49 | self.velocity_scale = velocity 50 | 51 | self.ndim = len(velocity) 52 | self.swarm_size = swarm_size 53 | 54 | self.positions = np.empty((swarm_size, len(velocity)), dtype=np.float) 55 | self.velocities = np.empty_like(self.positions) 56 | 57 | self.best_positions = np.empty_like(self.positions) 58 | self.best_values = np.empty(len(self.best_positions), dtype=np.float) 59 | self.global_best = None 60 | 61 | @property 62 | def max_velocity(self): 63 | """Return the maximum allowed velocity of particles.""" 64 | return 10 * self.velocity_scale 65 | 66 | def init_swarm(self, positions): 67 | """Initialize the swarm. 68 | 69 | Parameters 70 | ---------- 71 | positions: ndarray 72 | The initial positions of the particles. 73 | """ 74 | self.positions = positions 75 | self.velocities = (np.random.rand(*self.velocities.shape) * 76 | self.velocity_scale) 77 | 78 | values, safe = self.fitness(self.positions) 79 | 80 | # Initialize best estimates 81 | self.best_positions[:] = self.positions 82 | self.best_values = values 83 | 84 | self.global_best = self.best_positions[np.argmax(values), :] 85 | 86 | def run_swarm(self, max_iter): 87 | """Let the swarm explore the parameter space. 88 | 89 | Parameters 90 | ---------- 91 | max_iter : int 92 | The number of iterations for which to run the swarm. 93 | """ 94 | # run the core swarm optimization 95 | inertia = self.initial_inertia 96 | inertia_step = (self.final_inertia - self.initial_inertia) / max_iter 97 | 98 | for _ in range(max_iter): 99 | # update velocities 100 | delta_global_best = self.global_best - self.positions 101 | delta_self_best = self.best_positions - self.positions 102 | 103 | # Random update vectors 104 | r = np.random.rand(2 * self.swarm_size, self.ndim) 105 | r1 = r[:self.swarm_size] 106 | r2 = r[self.swarm_size:] 107 | 108 | # Update the velocities 109 | self.velocities *= inertia 110 | self.velocities += ((self.c1 * r1 * delta_self_best + 111 | self.c2 * r2 * delta_global_best) / 112 | self.velocity_scale) 113 | 114 | inertia += inertia_step 115 | 116 | # clip 117 | # np.clip(velocities, -4, 4, out=velocities) 118 | np.clip(self.velocities, 119 | -self.max_velocity, 120 | self.max_velocity, 121 | out=self.velocities) 122 | 123 | # update position 124 | self.positions += self.velocities 125 | 126 | # Clip particles to domain 127 | if self.bounds is not None: 128 | np.clip(self.positions, 129 | self.bounds[:, 0], 130 | self.bounds[:, 1], 131 | out=self.positions) 132 | 133 | # compute fitness 134 | values, safe = self.fitness(self.positions) 135 | 136 | # find out which particles are improving 137 | update_set = values > self.best_values 138 | 139 | # update whenever safety and improvement are guaranteed 140 | update_set &= safe 141 | 142 | self.best_values[update_set] = values[update_set] 143 | self.best_positions[update_set] = self.positions[update_set] 144 | 145 | best_value_id = np.argmax(self.best_values) 146 | self.global_best = self.best_positions[best_value_id, :] 147 | -------------------------------------------------------------------------------- /safeopt/tests/test_gps.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | 3 | import mock 4 | import pytest 5 | 6 | import numpy as np 7 | import GPy 8 | from numpy.testing import assert_allclose 9 | 10 | from safeopt.gp_opt import GaussianProcessOptimization 11 | 12 | 13 | class TestGPOptimization(object): 14 | 15 | @pytest.fixture 16 | def gps(self): 17 | 18 | kernel1 = GPy.kern.RBF(1, variance=2) 19 | kernel2 = GPy.kern.Matern32(1, variance=4) 20 | 21 | gp1 = GPy.models.GPRegression(np.array([[0]]), np.array([[0]]), 22 | kernel=kernel1) 23 | gp2 = GPy.models.GPRegression(np.array([[0]]), np.array([[0]]), 24 | kernel=kernel2) 25 | return gp1, gp2 26 | 27 | def test_init(self, gps): 28 | """Test the initialization and beta functions.""" 29 | gp1, gp2 = gps 30 | 31 | opt = GaussianProcessOptimization(gp1, 32 | fmin=0, 33 | beta=2, 34 | num_contexts=1, 35 | threshold=0, 36 | scaling='auto') 37 | assert opt.beta(0) == 2 38 | 39 | opt = GaussianProcessOptimization(gp1, 40 | fmin=[0], 41 | beta=lambda x: 5, 42 | num_contexts=1, 43 | threshold=0, 44 | scaling='auto') 45 | 46 | assert opt.beta(10) == 5 47 | 48 | def test_multi_init(self, gps): 49 | """Test initialization with multiple GPs""" 50 | gp1, gp2 = gps 51 | 52 | opt = GaussianProcessOptimization([gp1, gp2], 53 | fmin=0, 54 | beta=2, 55 | num_contexts=1, 56 | threshold=0, 57 | scaling='auto') 58 | 59 | # Check scaling 60 | assert_allclose(opt.scaling, np.array([np.sqrt(2), np.sqrt(4)])) 61 | 62 | def test_scaling(self, gps): 63 | """Test the scaling argument.""" 64 | gp1, gp2 = gps 65 | 66 | pytest.raises(ValueError, GaussianProcessOptimization, [gp1, gp2], 2, 67 | scaling=[5]) 68 | 69 | opt = GaussianProcessOptimization([gp1, gp2], 70 | fmin=[1, 0], 71 | beta=2, 72 | num_contexts=1, 73 | threshold=0, 74 | scaling=[1, 2]) 75 | assert_allclose(opt.scaling, np.array([1, 2])) 76 | 77 | def test_data_adding(self, gps): 78 | """Test adding data points.""" 79 | gp1, gp2 = gps 80 | 81 | # Test simple 1D case 82 | gp1.set_XY(np.array([[0.]]), np.array([[1.]])) 83 | opt = GaussianProcessOptimization(gp1, 0) 84 | opt.add_new_data_point(2, 3) 85 | 86 | x, y = opt.data 87 | assert_allclose(x, np.array([[0], [2]])) 88 | assert_allclose(y, np.array([[1], [3]])) 89 | 90 | # Test 2D case 91 | gp1.set_XY(np.array([[0.]]), np.array([[1.]])) 92 | gp2.set_XY(np.array([[0.]]), np.array([[11.]])) 93 | 94 | opt = GaussianProcessOptimization([gp1, gp2], [0, 1]) 95 | opt.add_new_data_point(2, [2, 3]) 96 | x, y = opt.data 97 | assert_allclose(x, np.array([[0], [2]])) 98 | assert_allclose(y, np.array([[1, 11], [2, 3]])) 99 | 100 | # Test adding NAN data 101 | opt.add_new_data_point(3, [2, np.nan]) 102 | 103 | assert_allclose(opt.x, np.array([[0], [2], [3]])) 104 | assert_allclose(opt.y, np.array([[1, 11], [2, 3], [2, np.nan]])) 105 | 106 | for i, gp in enumerate(opt.gps): 107 | not_nan = ~np.isnan(opt.y[:, i]) 108 | assert_allclose(gp.X, opt.x[not_nan, :]) 109 | assert_allclose(gp.Y[:, 0], opt.y[not_nan, i]) 110 | 111 | # Test removing data 112 | opt.remove_last_data_point() 113 | 114 | assert_allclose(opt.x, np.array([[0], [2]])) 115 | assert_allclose(opt.y, np.array([[1, 11], [2, 3]])) 116 | 117 | for i, gp in enumerate(opt.gps): 118 | not_nan = ~np.isnan(opt.y[:, i]) 119 | assert_allclose(gp.X, opt.x[not_nan, :]) 120 | assert_allclose(gp.Y[:, 0], opt.y[not_nan, i]) 121 | 122 | def test_contexts(self): 123 | """Test contexts and adding data.""" 124 | kernel1 = GPy.kern.RBF(2, variance=2) 125 | kernel2 = GPy.kern.Matern32(2, variance=4) 126 | 127 | gp1 = GPy.models.GPRegression(np.array([[0, 0]]), np.array([[5]]), 128 | kernel=kernel1) 129 | gp2 = GPy.models.GPRegression(np.array([[0, 0]]), np.array([[6]]), 130 | kernel=kernel2) 131 | 132 | opt = GaussianProcessOptimization([gp1, gp2], 133 | fmin=[0, 0], 134 | num_contexts=1) 135 | opt.add_new_data_point(1, [3, 4], context=2) 136 | 137 | assert_allclose(opt.x, np.array([[0, 0], [1, 2]])) 138 | assert_allclose(opt.y, np.array([[5, 6], [3, 4]])) 139 | 140 | for i, gp in enumerate(opt.gps): 141 | assert_allclose(gp.X, opt.x) 142 | assert_allclose(gp.Y[:, 0], opt.y[:, i]) 143 | -------------------------------------------------------------------------------- /safeopt/tests/test_swarm.py: -------------------------------------------------------------------------------- 1 | """Test the SafeOptSwarm method""" 2 | 3 | from __future__ import division 4 | 5 | 6 | import GPy 7 | import numpy as np 8 | import pytest 9 | 10 | from safeopt import SafeOptSwarm 11 | 12 | 13 | def test_empty_safe_set(): 14 | """Make sure an error is raised with an empty safe set.""" 15 | 16 | x = np.array([[0.]]) 17 | y = np.array([[-1.]]) 18 | gp = GPy.models.GPRegression(x, y, noise_var=0.01 ** 2) 19 | 20 | opt = SafeOptSwarm(gp, fmin=[0.], bounds=[[-1., 1.]]) 21 | with pytest.raises(RuntimeError): 22 | opt.optimize() 23 | -------------------------------------------------------------------------------- /safeopt/utilities.py: -------------------------------------------------------------------------------- 1 | """ 2 | Utilities for the safeopt library (e.g., sampling). 3 | 4 | Author: Felix Berkenkamp (befelix at inf dot ethz dot ch) 5 | """ 6 | 7 | from __future__ import print_function, absolute_import, division 8 | 9 | from collections import Sequence # isinstance(...,Sequence) 10 | import numpy as np 11 | import scipy as sp 12 | import matplotlib.pyplot as plt 13 | from mpl_toolkits.mplot3d import Axes3D # Create 3D axes 14 | from matplotlib import cm # 3D plot colors 15 | 16 | 17 | __all__ = ['linearly_spaced_combinations', 'sample_gp_function', 'plot_2d_gp', 18 | 'plot_3d_gp', 'plot_contour_gp'] 19 | 20 | 21 | def linearly_spaced_combinations(bounds, num_samples): 22 | """ 23 | Return 2-D array with all linearly spaced combinations with the bounds. 24 | 25 | Parameters 26 | ---------- 27 | bounds: sequence of tuples 28 | The bounds for the variables, [(x1_min, x1_max), (x2_min, x2_max), ...] 29 | num_samples: integer or array_likem 30 | Number of samples to use for every dimension. Can be a constant if 31 | the same number should be used for all, or an array to fine-tune 32 | precision. Total number of data points is num_samples ** len(bounds). 33 | 34 | Returns 35 | ------- 36 | combinations: 2-d array 37 | A 2-d arrray. If d = len(bounds) and l = prod(num_samples) then it 38 | is of size l x d, that is, every row contains one combination of 39 | inputs. 40 | """ 41 | num_vars = len(bounds) 42 | 43 | if not isinstance(num_samples, Sequence): 44 | num_samples = [num_samples] * num_vars 45 | 46 | if len(bounds) == 1: 47 | return np.linspace(bounds[0][0], bounds[0][1], num_samples[0])[:, None] 48 | 49 | # Create linearly spaced test inputs 50 | inputs = [np.linspace(b[0], b[1], n) for b, n in zip(bounds, 51 | num_samples)] 52 | 53 | # Convert to 2-D array 54 | return np.array([x.ravel() for x in np.meshgrid(*inputs)]).T 55 | 56 | 57 | def sample_gp_function(kernel, bounds, noise_var, num_samples, 58 | interpolation='kernel', mean_function=None): 59 | """ 60 | Sample a function from a gp with corresponding kernel within its bounds. 61 | 62 | Parameters 63 | ---------- 64 | kernel: instance of GPy.kern.* 65 | bounds: list of tuples 66 | [(x1_min, x1_max), (x2_min, x2_max), ...] 67 | noise_var: float 68 | Variance of the observation noise of the GP function 69 | num_samples: int or list 70 | If integer draws the corresponding number of samples in all 71 | dimensions and test all possible input combinations. If a list then 72 | the list entries correspond to the number of linearly spaced samples of 73 | the corresponding input 74 | interpolation: string 75 | If 'linear' interpolate linearly between samples, if 'kernel' use the 76 | corresponding mean RKHS-function of the GP. 77 | mean_function: callable 78 | Mean of the sample function 79 | 80 | Returns 81 | ------- 82 | function: object 83 | function(x, noise=True) 84 | A function that takes as inputs new locations x to be evaluated and 85 | returns the corresponding noisy function values. If noise=False is 86 | set the true function values are returned (useful for plotting). 87 | """ 88 | inputs = linearly_spaced_combinations(bounds, num_samples) 89 | cov = kernel.K(inputs) + np.eye(inputs.shape[0]) * 1e-6 90 | output = np.random.multivariate_normal(np.zeros(inputs.shape[0]), 91 | cov) 92 | 93 | if interpolation == 'linear': 94 | 95 | def evaluate_gp_function_linear(x, noise=True): 96 | """ 97 | Evaluate the GP sample function with linear interpolation. 98 | 99 | Parameters 100 | ---------- 101 | x: np.array 102 | 2D array with inputs 103 | noise: bool 104 | Whether to include prediction noise 105 | """ 106 | x = np.atleast_2d(x) 107 | y = sp.interpolate.griddata(inputs, output, x, method='linear') 108 | 109 | # Work around weird dimension squishing in griddata 110 | y = np.atleast_2d(y.squeeze()).T 111 | 112 | if mean_function is not None: 113 | y += mean_function(x) 114 | if noise: 115 | y += np.sqrt(noise_var) * np.random.randn(x.shape[0], 1) 116 | return y 117 | return evaluate_gp_function_linear 118 | 119 | elif interpolation == 'kernel': 120 | cho_factor = sp.linalg.cho_factor(cov) 121 | alpha = sp.linalg.cho_solve(cho_factor, output) 122 | 123 | def evaluate_gp_function_kernel(x, noise=True): 124 | """ 125 | Evaluate the GP sample function with kernel interpolation. 126 | 127 | Parameters 128 | ---------- 129 | x: np.array 130 | 2D array with inputs 131 | noise: bool 132 | Whether to include prediction noise 133 | """ 134 | x = np.atleast_2d(x) 135 | y = kernel.K(x, inputs).dot(alpha) 136 | y = y[:, None] 137 | if mean_function is not None: 138 | y += mean_function(x) 139 | if noise: 140 | y += np.sqrt(noise_var) * np.random.randn(x.shape[0], 1) 141 | return y 142 | 143 | return evaluate_gp_function_kernel 144 | 145 | 146 | def plot_2d_gp(gp, inputs, predictions=None, figure=None, axis=None, 147 | fixed_inputs=None, beta=3, fmin=None, **kwargs): 148 | """ 149 | Plot a 2D GP with uncertainty. 150 | 151 | Parameters 152 | ---------- 153 | gp: Instance of GPy.models.GPRegression 154 | inputs: 2darray 155 | The input parameters at which the GP is to be evaluated 156 | predictions: ndarray 157 | Can be used to manually pass the GP predictions, set to None to 158 | use the gp directly. Is of the form (mean, variance) 159 | figure: matplotlib figure 160 | The figure on which to draw (ignored if axis is provided 161 | axis: matplotlib axis 162 | The axis on which to draw 163 | fixed_inputs: list 164 | A list containing the the fixed inputs and their corresponding 165 | values, e.g., [(0, 3.2), (4, -2.43)]. Set the value to None if 166 | it's not fixed, but should not be a plotted axis either 167 | beta: float 168 | The confidence interval used 169 | fmin : float 170 | The safety threshold value. 171 | 172 | Returns 173 | ------- 174 | axis 175 | """ 176 | if fixed_inputs is None: 177 | if gp.kern.input_dim > 1: 178 | raise NotImplementedError('This only works for 1D inputs') 179 | fixed_inputs = [] 180 | elif gp.kern.input_dim - len(fixed_inputs) != 1: 181 | raise NotImplemented('This only works for 1D inputs') 182 | 183 | ms = kwargs.pop('ms', 10) 184 | mew = kwargs.pop('mew', 3) 185 | point_color = kwargs.pop('point_color', 'k') 186 | 187 | if axis is None: 188 | if figure is None: 189 | figure = plt.figure() 190 | axis = figure.gca() 191 | else: 192 | axis = figure.gca() 193 | 194 | # Get a list of unfixed inputs to plot 195 | unfixed = list(range(gp.kern.input_dim)) 196 | for dim, val in fixed_inputs: 197 | if val is not None: 198 | inputs[:, dim] = val 199 | unfixed.remove(dim) 200 | 201 | # Compute GP predictions if not provided 202 | if predictions is None: 203 | mean, var = gp._raw_predict(inputs) 204 | else: 205 | mean, var = predictions 206 | 207 | output = mean.squeeze() 208 | std_dev = beta * np.sqrt(var.squeeze()) 209 | 210 | axis.fill_between(inputs[:, unfixed[0]], 211 | output - std_dev, 212 | output + std_dev, 213 | facecolor='blue', 214 | alpha=0.3) 215 | 216 | axis.plot(inputs[:, unfixed[0]], output, **kwargs) 217 | axis.scatter(gp.X[:-1, unfixed[0]], gp.Y[:-1, 0], s=20 * ms, 218 | marker='x', linewidths=mew, color=point_color) 219 | axis.scatter(gp.X[-1, unfixed[0]], gp.Y[-1, 0], s=20 * ms, 220 | marker='x', linewidths=mew, color='r') 221 | axis.set_xlim([np.min(inputs[:, unfixed[0]]), 222 | np.max(inputs[:, unfixed[0]])]) 223 | 224 | if fmin is not None: 225 | axis.plot(inputs[[0, -1], unfixed[0]], [fmin, fmin], 'k--') 226 | 227 | return axis 228 | 229 | 230 | def plot_3d_gp(gp, inputs, predictions=None, figure=None, axis=None, 231 | fixed_inputs=None, beta=3, **kwargs): 232 | """ 233 | Plot a 3D gp with uncertainty. 234 | 235 | Parameters 236 | ---------- 237 | gp: Instance of GPy.models.GPRegression 238 | inputs: 2darray 239 | The input parameters at which the GP is to be evaluated 240 | predictions: ndarray 241 | Can be used to manually pass the GP predictions, set to None to 242 | use the gp directly. Is of the form [mean, variance] 243 | figure: matplotlib figure 244 | The figure on which to draw (ignored if axis is provided 245 | axis: matplotlib axis 246 | The axis on which to draw 247 | fixed_inputs: list 248 | A list containing the the fixed inputs and their corresponding 249 | values, e.g., [(0, 3.2), (4, -2.43)]. Set the value to None if 250 | it's not fixed, but should not be a plotted axis either 251 | beta: float 252 | The confidence interval used 253 | 254 | Returns 255 | ------- 256 | surface: matplotlib trisurf plot 257 | data: matplotlib plot for data points 258 | """ 259 | if fixed_inputs is None: 260 | if gp.kern.input_dim > 2: 261 | raise NotImplementedError('This only works for 2D inputs') 262 | fixed_inputs = [] 263 | elif gp.kern.input_dim - len(fixed_inputs) != 2: 264 | raise NotImplemented('Only two inputs can be unfixed') 265 | 266 | if axis is None: 267 | if figure is None: 268 | figure = plt.figure() 269 | axis = Axes3D(figure) 270 | else: 271 | axis = Axes3D(figure) 272 | 273 | # Get a list of unfixed inputs to plot 274 | unfixed = list(range(gp.kern.input_dim)) 275 | for dim, val in fixed_inputs: 276 | if val is not None: 277 | inputs[:, dim] = val 278 | unfixed.remove(dim) 279 | 280 | # Compute GP predictions if not provided 281 | if predictions is None: 282 | mean, var = gp._raw_predict(inputs) 283 | else: 284 | mean, var = predictions 285 | 286 | surf = axis.plot_trisurf(inputs[:, unfixed[0]], 287 | inputs[:, unfixed[1]], 288 | mean[:, 0], 289 | cmap=cm.jet, linewidth=0.2, alpha=0.5) 290 | 291 | data = axis.plot(gp.X[:-1, unfixed[0]], 292 | gp.X[:-1, unfixed[1]], 293 | gp.Y[:-1, 0], 294 | 'o') 295 | axis.plot(gp.X[-1, unfixed[0]], 296 | gp.X[-1, unfixed[1]], 297 | gp.Y[-1, 0], 298 | 'ro') 299 | 300 | axis.set_xlim([np.min(inputs[:, unfixed[0]]), 301 | np.max(inputs[:, unfixed[0]])]) 302 | 303 | axis.set_ylim([np.min(inputs[:, unfixed[1]]), 304 | np.max(inputs[:, unfixed[1]])]) 305 | 306 | return surf, data 307 | 308 | 309 | def plot_contour_gp(gp, inputs, predictions=None, figure=None, axis=None, 310 | colorbar=True, **kwargs): 311 | """ 312 | Plot a 3D gp with uncertainty. 313 | 314 | Parameters 315 | ---------- 316 | gp: Instance of GPy.models.GPRegression 317 | inputs: list of arrays/floats 318 | The input parameters at which the GP is to be evaluated, 319 | here instead of the combinations of inputs the individual inputs 320 | that are spread in a grid are given. Only two of the arrays 321 | should have more than one value (not fixed). 322 | predictions: ndarray 323 | Can be used to manually pass the GP predictions, set to None to 324 | use the gp directly. 325 | figure: matplotlib figure 326 | The figure on which to draw (ignored if axis is provided 327 | axis: matplotlib axis 328 | The axis on which to draw 329 | 330 | Returns 331 | ------- 332 | contour: matplotlib contour plot 333 | colorbar: matplotlib colorbar 334 | points: matplotlib plot 335 | """ 336 | if axis is None: 337 | if figure is None: 338 | figure = plt.figure() 339 | axis = figure.gca() 340 | else: 341 | axis = figure.gca() 342 | 343 | # Find which inputs are fixed to constant values 344 | slices = [] 345 | lengths = [] 346 | for i, inp in enumerate(inputs): 347 | if isinstance(inp, np.ndarray): 348 | slices.append(i) 349 | lengths.append(inp.shape[0]) 350 | 351 | mesh = np.meshgrid(*inputs, indexing='ij') 352 | if predictions is None: 353 | # Convert to array with combinations of inputs 354 | gp_inputs = np.array([x.ravel() for x in mesh]).T 355 | mean = gp._raw_predict(gp_inputs)[0] 356 | else: 357 | mean = predictions[0] 358 | 359 | c_bar = None 360 | if not np.all(mean == mean[0]): 361 | # Need to squeeze the added dimensions caused by fixed inputs 362 | c = axis.contour(mesh[slices[0]].squeeze(), 363 | mesh[slices[1]].squeeze(), 364 | mean.squeeze().reshape(*lengths), 365 | 20, 366 | **kwargs) 367 | if colorbar: 368 | c_bar = plt.colorbar(c) 369 | else: 370 | c = None 371 | 372 | data = axis.plot(gp.X[:-1, slices[0]], gp.X[:-1, slices[1]], 'ob') 373 | axis.plot(gp.X[-1, slices[0]], gp.X[-1, slices[1]], 'or') 374 | 375 | axis.set_xlim([np.min(inputs[slices[0]]), 376 | np.max(inputs[slices[0]])]) 377 | 378 | axis.set_ylim([np.min(inputs[slices[1]]), 379 | np.max(inputs[slices[1]])]) 380 | 381 | return c, c_bar, data 382 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [bdist_wheel] 2 | universal=1 3 | 4 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | from os import path 3 | 4 | 5 | current_dir = path.abspath(path.dirname(__file__)) 6 | 7 | with open(path.join(current_dir, 'README.rst'), 'r') as f: 8 | long_description = f.read() 9 | 10 | with open(path.join(current_dir, 'requirements.txt'), 'r') as f: 11 | install_requires = f.read().split('\n') 12 | 13 | setup( 14 | name='safeopt', 15 | version='0.16', 16 | author='Felix Berkenkamp', 17 | author_email='befelix@inf.ethz.ch', 18 | packages=['safeopt'], 19 | url='https://github.com/befelix/SafeOpt', 20 | license='MIT', 21 | description='Safe Bayesian optimization', 22 | long_description=long_description, 23 | setup_requires='numpy', 24 | install_requires=install_requires, 25 | keywords='Bayesian optimization, Safety', 26 | classifiers=[ 27 | 'Development Status :: 4 - Beta', 28 | 'License :: OSI Approved :: MIT License', 29 | 'Programming Language :: Python :: 2', 30 | 'Programming Language :: Python :: 2.7', 31 | 'Programming Language :: Python :: 3', 32 | 'Programming Language :: Python :: 3.5'], 33 | ) 34 | -------------------------------------------------------------------------------- /test_code.sh: -------------------------------------------------------------------------------- 1 | 2 | #!/usr/bin/env bash 3 | 4 | module="safeopt" 5 | 6 | get_script_dir () { 7 | SOURCE="${BASH_SOURCE[0]}" 8 | # While $SOURCE is a symlink, resolve it 9 | while [ -h "$SOURCE" ]; do 10 | DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )" 11 | SOURCE="$( readlink "$SOURCE" )" 12 | # If $SOURCE was a relative symlink (so no "/" as prefix, need to resolve it relative to the symlink base directory 13 | [[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" 14 | done 15 | DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )" 16 | echo "$DIR" 17 | } 18 | 19 | # Change to script root 20 | cd $(get_script_dir) 21 | GREEN='\033[0;32m' 22 | NC='\033[0m' 23 | 24 | # Run style tests 25 | echo -e "${GREEN}Running style tests.${NC}" 26 | flake8 $module --exclude test*.py,__init__.py --ignore=E402,E731,E741,W503 --show-source 27 | 28 | # Ignore import errors for __init__ and tests 29 | flake8 $module --filename=__init__.py,test*.py --ignore=F,E402,W503 --show-source 30 | 31 | echo -e "${GREEN}Testing docstring conventions.${NC}" 32 | # Test docstring conventions 33 | pydocstyle $module --convention=numpy 34 | 35 | # Run unit tests 36 | echo -e "${GREEN}Running unit tests.${NC}" 37 | pytest --doctest-modules --cov --cov-fail-under=50 $module 38 | 39 | # Export html 40 | coverage html 41 | 42 | --------------------------------------------------------------------------------