├── .coveragerc ├── .gitignore ├── .pre-commit-config.yaml ├── .readthedocs.yml ├── .travis.yml ├── CHANGES.rst ├── CONTRIBUTORS.rst ├── LICENSE.rst ├── MANIFEST.in ├── README.rst ├── docs ├── Makefile ├── changes.rst ├── conf.py ├── contributors.rst ├── index.rst ├── license.rst ├── logging.rst ├── options.rst ├── pics │ └── communication.png ├── poap.rst ├── quickstart.rst ├── requirements.txt ├── source_code.rst └── surrogate_optimization.rst ├── examples ├── ackley.m ├── example_checkpointing_serial.py ├── example_checkpointing_threaded.py ├── example_ei.py ├── example_extra_vals.py ├── example_gp_regression.py ├── example_lcb.py ├── example_mars.py ├── example_matlab_engine.py ├── example_simple.py ├── example_sop.py ├── example_subprocess.py ├── example_subprocess_files.py ├── example_subprocess_partial_info.py ├── makefile ├── mpiexample_simple.py ├── mpiexample_subprocess.py ├── sphere_ext ├── sphere_ext.cpp ├── sphere_ext_files ├── sphere_ext_files.cpp ├── sumfun_ext └── sumfun_ext.cpp ├── notebooks ├── example_optimization_dycors.ipynb ├── example_srbf_sampling.ipynb └── example_surrogates.ipynb ├── pySOT ├── __init__.py ├── auxiliary_problems │ ├── __init__.py │ ├── candidate_dycors.py │ ├── candidate_srbf.py │ ├── candidate_uniform.py │ ├── ei_ga.py │ ├── ei_merit.py │ ├── lcb_ga.py │ └── lcb_merit.py ├── controller │ ├── __init__.py │ └── controller.py ├── experimental_design │ ├── __init__.py │ ├── experimental_design.py │ ├── lhd.py │ ├── slhd.py │ └── two_factorial.py ├── optimization_problems │ ├── __init__.py │ ├── ackley.py │ ├── branin.py │ ├── exponential.py │ ├── goldstein_price.py │ ├── griewank.py │ ├── hartmann3.py │ ├── hartmann6.py │ ├── himmelblau.py │ ├── levy.py │ ├── michaelewicz.py │ ├── optimization_problem.py │ ├── perm.py │ ├── rastrigin.py │ ├── rosenbrock.py │ ├── schwefel.py │ ├── six_hump_camel.py │ ├── sphere.py │ ├── sum_of_squares.py │ ├── weierstrass.py │ └── zakharov.py ├── strategy │ ├── __init__.py │ ├── dycors_strategy.py │ ├── ei_strategy.py │ ├── lcb_strategy.py │ ├── random_strategy.py │ ├── sop_strategy.py │ ├── srbf_strategy.py │ └── surrogate_strategy.py ├── surrogate │ ├── __init__.py │ ├── gp.py │ ├── kernels │ │ ├── __init__.py │ │ ├── cubic_kernel.py │ │ ├── kernel.py │ │ ├── linear_kernel.py │ │ └── tps_kernel.py │ ├── mars.py │ ├── output_transformations │ │ ├── __init__.py │ │ ├── identity.py │ │ └── median_capping.py │ ├── poly.py │ ├── rbf.py │ ├── surrogate.py │ └── tails │ │ ├── __init__.py │ │ ├── constant_tail.py │ │ ├── linear_tail.py │ │ └── tail.py └── utils.py ├── setup.cfg ├── setup.py └── tests ├── test_auxiliary_problems.py ├── test_controller.py ├── test_experimental_designs.py ├── test_optimization_problems.py ├── test_strategies.py ├── test_surrogates.py └── test_utils.py /.coveragerc: -------------------------------------------------------------------------------- 1 | [run] 2 | omit = 3 | pySOT/docs/* 4 | pySOT/examples/* 5 | pySOT/notebooks/* 6 | pySOT/tests/* 7 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | *.log 3 | build/ 4 | dist/ 5 | docs/_build 6 | pySOT.egg-info 7 | *.pysot 8 | .pytest_cache/* 9 | *checkpoint.ipynb 10 | .DS_store 11 | .coverage 12 | .vscode/ 13 | .coverage* 14 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/pre-commit/pre-commit-hooks 3 | rev: v2.4.0 4 | hooks: 5 | - id: flake8 6 | args: [--config=setup.cfg] 7 | exclude: ^(examples/*)|(docs/*) 8 | - id: check-byte-order-marker 9 | - id: check-case-conflict 10 | - id: check-merge-conflict 11 | - id: end-of-file-fixer 12 | - id: forbid-new-submodules 13 | - id: mixed-line-ending 14 | args: [--fix=lf] 15 | - id: trailing-whitespace 16 | - id: debug-statements 17 | - repo: https://github.com/ambv/black 18 | rev: 19.10b0 19 | hooks: 20 | - id: black 21 | exclude: ^(build/*)|(docs/*)|(notebooks/*) 22 | args: [-l 120, --target-version=py36] 23 | - repo: https://github.com/pre-commit/mirrors-isort 24 | rev: v4.3.21 25 | hooks: 26 | - id: isort 27 | language_version: python3 28 | exclude: ^(build/*)|(docs/*)|(notebooks/*) 29 | args: [-w 120, -m 3, -tc, --project=pySOT] 30 | - repo: https://github.com/jumanjihouse/pre-commit-hooks 31 | rev: 1.11.0 32 | hooks: 33 | - id: require-ascii 34 | exclude: ^(notebooks/*) 35 | - id: script-must-have-extension 36 | - id: forbid-binary 37 | exclude: ^(examples/*) 38 | - repo: https://github.com/Lucas-C/pre-commit-hooks 39 | rev: v1.1.7 40 | hooks: 41 | - id: forbid-crlf 42 | - id: forbid-tabs 43 | exclude: ^(examples/makefile) 44 | -------------------------------------------------------------------------------- /.readthedocs.yml: -------------------------------------------------------------------------------- 1 | 2 | # Required 3 | version: 2 4 | 5 | # Build documentation in the docs/ directory with Sphinx 6 | sphinx: 7 | configuration: docs/conf.py 8 | 9 | # Optionally build your docs in additional formats such as PDF 10 | formats: 11 | - pdf 12 | 13 | # Optionally set the version of Python and requirements required to build your docs 14 | python: 15 | version: 3.6 16 | install: 17 | - requirements: docs/requirements.txt 18 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | sudo: true 2 | 3 | language: python 4 | python: 5 | - "3.6" 6 | 7 | install: 8 | # Install Miniconda 9 | - wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh; 10 | - bash miniconda.sh -b -p $HOME/miniconda 11 | - export PATH="$HOME/miniconda/bin:$PATH" 12 | - hash -r 13 | - conda config --set always_yes yes --set changeps1 no 14 | - conda update -q conda 15 | - conda info -a 16 | 17 | # Set up an environment for Python 18 | - conda create -q -n test-environment python=$TRAVIS_PYTHON_VERSION 19 | - source activate test-environment 20 | 21 | # Install mpi4py with mpich 22 | - conda install -n test-environment -q numpy scipy pip 23 | - conda install -n test-environment -q --channel conda-forge mpich mpi4py 24 | 25 | # Install py-earth and code coverage 26 | - pip install --upgrade pip 27 | - pip install six http://github.com/scikit-learn-contrib/py-earth/tarball/master 28 | - pip install codecov 29 | - pip install pytest-cov 30 | 31 | # Install pySOT 32 | - python setup.py install 33 | 34 | script: 35 | - cd ./examples/ 36 | - make 37 | - make example 38 | - make mpiexample 39 | - cd - 40 | - python -m pytest --cov-config .coveragerc --cov=pySOT --disable-warnings 41 | 42 | after_success: 43 | - codecov 44 | -------------------------------------------------------------------------------- /CONTRIBUTORS.rst: -------------------------------------------------------------------------------- 1 | Developed and maintained by: 2 | 3 | - David Bindel 4 | - David Eriksson 5 | - Christine Shoemaker 6 | 7 | with contributions by: 8 | 9 | - Yi Shen 10 | - Taimoor Akhtar 11 | -------------------------------------------------------------------------------- /LICENSE.rst: -------------------------------------------------------------------------------- 1 | Copyright (c) 2015 by David Bindel, David Eriksson, and contributors. 2 | See Contributors for more details. 3 | 4 | All rights reserved. 5 | 6 | Redistribution and use in source and binary forms, with or without 7 | modification, are permitted provided that the following conditions are 8 | met: 9 | 10 | 1. Redistributions of source code must retain the above copyright 11 | notice, this list of conditions and the following disclaimer. 12 | 13 | 2. Redistributions in binary form must reproduce the above copyright 14 | notice, this list of conditions and the following disclaimer in the 15 | documentation and/or other materials provided with the distribution. 16 | 17 | 3. Neither the name of the copyright holder nor the names of its 18 | contributors may be used to endorse or promote products derived from 19 | this software without specific prior written permission. 20 | 21 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 | HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include *.md 2 | include *.rst 3 | include *.txt 4 | recursive-include docs * 5 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | |Travis| |codecov| |ReadTheDocs| |Downloads| |DOI| 2 | 3 | pySOT: Python Surrogate Optimization Toolbox 4 | -------------------------------------------- 5 | 6 | The Python Surrogate Optimization Toolbox (pySOT) is an asynchronous parallel 7 | optimization toolbox for computationally expensive global optimization problems. 8 | pySOT is built on top of the Plumbing for Optimization with Asynchronous Parallelism (POAP), 9 | which is an event-driven framework for building and combining asynchronous optimization 10 | strategies. POAP has support for both threads and MPI. 11 | 12 | pySOT implements many popular surrogate optimization algorithms such as the 13 | Stochastic RBF (SRBF) and DYCORS methods by Regis and Shoemaker, and the SOP 14 | method by Krityakierne et. al. We also support Expected Improvement (EI) and 15 | Lower Confidence Bounds (LCB), which are popular in Bayesian optimization. All 16 | optimization algorithms can be used in serial, synchronous parallel, and 17 | asynchronous parallel and we support both continuous and integer variables. 18 | 19 | The toolbox is hosted on GitHub: https://github.com/dme65/pySOT 20 | 21 | Documentation: http://pysot.readthedocs.io/ 22 | 23 | Installation 24 | ------------ 25 | 26 | Installation instructions are available at: http://pysot.readthedocs.io/en/latest/quickstart.html 27 | 28 | Examples 29 | -------- 30 | 31 | Several pySOT examples and notebooks can be found at: 32 | 33 | https://github.com/dme65/pySOT/tree/master/examples 34 | 35 | https://github.com/dme65/pySOT/tree/master/notebooks 36 | 37 | 38 | Citing Us 39 | --------- 40 | 41 | If you use pySOT, please cite the following paper: 42 | `David Eriksson, David Bindel, Christine A. Shoemaker. pySOT and POAP: An event-driven asynchronous framework for surrogate optimization. arXiv preprint arXiv:1908.00420, 2019`_ 43 | 44 | .. code-block:: html 45 | 46 | @article{eriksson2019pysot, 47 | title={pySOT and POAP: An event-driven asynchronous framework for surrogate optimization}, 48 | author={Eriksson, David and Bindel, David and Shoemaker, Christine A}, 49 | journal={arXiv preprint arXiv:1908.00420}, 50 | year={2019} 51 | } 52 | 53 | FAQ 54 | --- 55 | 56 | | Q: Can I use pySOT with MPI? 57 | | A: Yes. You need to install mpi4py in order to use the MPIController in POAP. 58 | | 59 | | Q: I used pySOT for my research and want to cite it 60 | | A: Please cite our preprint_ which is currently under review! 61 | | 62 | | Q: Is there support for Python 2? 63 | | A: Python 2 support was removed in version 0.2.0 64 | | 65 | | Q: I can't find the MARS interpolant 66 | | A: You need to install py-earth in order to use MARS. More information is 67 | available here: https://github.com/scikit-learn-contrib/py-earth 68 | | 69 | 70 | .. _`David Eriksson, David Bindel, Christine A. Shoemaker. pySOT and POAP: An event-driven asynchronous framework for surrogate optimization. arXiv preprint arXiv:1908.00420, 2019`: https://arxiv.org/abs/1908.00420 71 | .. _preprint: https://arxiv.org/abs/1908.00420 72 | .. |Travis| image:: https://travis-ci.org/dme65/pySOT.svg?branch=master 73 | :target: https://travis-ci.org/dme65/pySOT 74 | .. |ReadTheDocs| image:: https://readthedocs.org/projects/pysot/badge/?version=latest 75 | :target: http://pysot.readthedocs.io/en/latest/?badge=latest 76 | :alt: Documentation Status 77 | .. |DOI| image:: https://zenodo.org/badge/36836292.svg 78 | :target: https://zenodo.org/badge/latestdoi/36836292 79 | .. |codecov| image:: https://codecov.io/gh/dme65/pySOT/branch/dme/graph/badge.svg 80 | :target: https://codecov.io/gh/dme65/pySOT 81 | .. |Downloads| image:: https://pepy.tech/badge/pysot 82 | :target: https://pepy.tech/project/pySOT 83 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | BUILDDIR = _build 9 | 10 | # User-friendly check for sphinx-build 11 | ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) 12 | $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) 13 | endif 14 | 15 | # Internal variables. 16 | PAPEROPT_a4 = -D latex_paper_size=a4 17 | PAPEROPT_letter = -D latex_paper_size=letter 18 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 19 | # the i18n builder cannot share the environment and doctrees with the others 20 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 21 | 22 | .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest coverage gettext 23 | 24 | help: 25 | @echo "Please use \`make ' where is one of" 26 | @echo " html to make standalone HTML files" 27 | @echo " dirhtml to make HTML files named index.html in directories" 28 | @echo " singlehtml to make a single large HTML file" 29 | @echo " pickle to make pickle files" 30 | @echo " json to make JSON files" 31 | @echo " htmlhelp to make HTML files and a HTML help project" 32 | @echo " qthelp to make HTML files and a qthelp project" 33 | @echo " applehelp to make an Apple Help Book" 34 | @echo " devhelp to make HTML files and a Devhelp project" 35 | @echo " epub to make an epub" 36 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 37 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 38 | @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" 39 | @echo " text to make text files" 40 | @echo " man to make manual pages" 41 | @echo " texinfo to make Texinfo files" 42 | @echo " info to make Texinfo files and run them through makeinfo" 43 | @echo " gettext to make PO message catalogs" 44 | @echo " changes to make an overview of all changed/added/deprecated items" 45 | @echo " xml to make Docutils-native XML files" 46 | @echo " pseudoxml to make pseudoxml-XML files for display purposes" 47 | @echo " linkcheck to check all external links for integrity" 48 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 49 | @echo " coverage to run coverage check of the documentation (if enabled)" 50 | 51 | clean: 52 | rm -rf $(BUILDDIR)/* 53 | 54 | html: 55 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 56 | @echo 57 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 58 | 59 | dirhtml: 60 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 61 | @echo 62 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 63 | 64 | singlehtml: 65 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 66 | @echo 67 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 68 | 69 | pickle: 70 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 71 | @echo 72 | @echo "Build finished; now you can process the pickle files." 73 | 74 | json: 75 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 76 | @echo 77 | @echo "Build finished; now you can process the JSON files." 78 | 79 | htmlhelp: 80 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 81 | @echo 82 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 83 | ".hhp project file in $(BUILDDIR)/htmlhelp." 84 | 85 | qthelp: 86 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 87 | @echo 88 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 89 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 90 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/pySOT.qhcp" 91 | @echo "To view the help file:" 92 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/pySOT.qhc" 93 | 94 | applehelp: 95 | $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp 96 | @echo 97 | @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." 98 | @echo "N.B. You won't be able to view it unless you put it in" \ 99 | "~/Library/Documentation/Help or install it in your application" \ 100 | "bundle." 101 | 102 | devhelp: 103 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 104 | @echo 105 | @echo "Build finished." 106 | @echo "To view the help file:" 107 | @echo "# mkdir -p $$HOME/.local/share/devhelp/pySOT" 108 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/pySOT" 109 | @echo "# devhelp" 110 | 111 | epub: 112 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 113 | @echo 114 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 115 | 116 | latex: 117 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 118 | @echo 119 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 120 | @echo "Run \`make' in that directory to run these through (pdf)latex" \ 121 | "(use \`make latexpdf' here to do that automatically)." 122 | 123 | latexpdf: 124 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 125 | @echo "Running LaTeX files through pdflatex..." 126 | $(MAKE) -C $(BUILDDIR)/latex all-pdf 127 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 128 | 129 | latexpdfja: 130 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 131 | @echo "Running LaTeX files through platex and dvipdfmx..." 132 | $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja 133 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 134 | 135 | text: 136 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 137 | @echo 138 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 139 | 140 | man: 141 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 142 | @echo 143 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 144 | 145 | texinfo: 146 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 147 | @echo 148 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." 149 | @echo "Run \`make' in that directory to run these through makeinfo" \ 150 | "(use \`make info' here to do that automatically)." 151 | 152 | info: 153 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 154 | @echo "Running Texinfo files through makeinfo..." 155 | make -C $(BUILDDIR)/texinfo info 156 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." 157 | 158 | gettext: 159 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale 160 | @echo 161 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." 162 | 163 | changes: 164 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 165 | @echo 166 | @echo "The overview file is in $(BUILDDIR)/changes." 167 | 168 | linkcheck: 169 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 170 | @echo 171 | @echo "Link check complete; look for any errors in the above output " \ 172 | "or in $(BUILDDIR)/linkcheck/output.txt." 173 | 174 | doctest: 175 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 176 | @echo "Testing of doctests in the sources finished, look at the " \ 177 | "results in $(BUILDDIR)/doctest/output.txt." 178 | 179 | coverage: 180 | $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage 181 | @echo "Testing of coverage in the sources finished, look at the " \ 182 | "results in $(BUILDDIR)/coverage/python.txt." 183 | 184 | xml: 185 | $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml 186 | @echo 187 | @echo "Build finished. The XML files are in $(BUILDDIR)/xml." 188 | 189 | pseudoxml: 190 | $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml 191 | @echo 192 | @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." 193 | -------------------------------------------------------------------------------- /docs/changes.rst: -------------------------------------------------------------------------------- 1 | Changes 2 | ======= 3 | 4 | .. include:: ../CHANGES.rst 5 | -------------------------------------------------------------------------------- /docs/contributors.rst: -------------------------------------------------------------------------------- 1 | Contributors 2 | ============ 3 | 4 | .. include:: ../CONTRIBUTORS.rst 5 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | Welcome to the pySOT documentation! 2 | =================================== 3 | 4 | This is the documentation for the Surrogate Optimization Toolbox (pySOT) for 5 | global deterministic optimization problems. pySOT is hosted on GitHub: 6 | `https://github.com/dme65/pySOT `_. 7 | 8 | The main purpose of the toolbox is for optimization of computationally 9 | expensive black-box objective functions with continuous and/or integer 10 | variables. 11 | All variables are assumed to have bound constraints in some form where none 12 | of the bounds are infinity. The tighter the bounds, the more efficient are 13 | the algorithms since it reduces the search region and increases the quality 14 | of the constructed surrogate. 15 | This toolbox may not be very efficient for problems with computationally cheap 16 | function evaluations. Surrogate models are intended to be used when function 17 | evaluations take from several minutes to several hours or more. 18 | 19 | 20 | For easier understanding of the algorithms in this toolbox, it is recommended 21 | and helpful to read these papers. If you have any questions, or you encounter 22 | any bugs, please feel free to either submit a bug report on GitHub (recommended) 23 | or to contact me at the email address: dme65@cornell.edu. Keep an eye on the 24 | GitHub repository for updates and changes to both the toolbox and the documentation. 25 | 26 | The toolbox is based on the following published papers: [1_], [2_], [3_] [4_] 27 | 28 | .. toctree:: 29 | :maxdepth: 4 30 | :caption: User Documentation 31 | 32 | quickstart 33 | surrogate_optimization 34 | options 35 | poap 36 | logging 37 | source_code 38 | changes 39 | license 40 | contributors 41 | 42 | 43 | .. [1] Rommel G Regis and Christine A Shoemaker. 44 | A stochastic radial basis function method for the global optimization of expensive functions. 45 | INFORMS Journal on Computing, 19(4): 497–509, 2007. 46 | 47 | .. [2] Rommel G Regis and Christine A Shoemaker. 48 | Parallel stochastic global optimization using radial basis functions. 49 | INFORMS Journal on Computing, 21(3):411–426, 2009. 50 | 51 | .. [3] Rommel G Regis and Christine A Shoemaker. 52 | Combining radial basis function surrogates and dynamic coordinate search in high-dimensional expensive black-box optimization. 53 | Engineering Optimization, 45(5): 529–555, 2013. 54 | 55 | .. [4] Tipaluck Krityakierne, Taimoor Akhtar and Christine A. Shoemaker. 56 | SOP: parallel surrogate global optimization with Pareto center selection for computationally expensive single objective problems. 57 | Journal of Global Optimization, 66(3): 417–437, 2016. 58 | -------------------------------------------------------------------------------- /docs/license.rst: -------------------------------------------------------------------------------- 1 | License 2 | ======= 3 | 4 | .. include:: ../LICENSE.rst 5 | -------------------------------------------------------------------------------- /docs/logging.rst: -------------------------------------------------------------------------------- 1 | Logging 2 | ======= 3 | 4 | pySOT logs all important events that occur during the optimization process. The user can 5 | specify what level of logging he wants to do. The five levels are: 6 | 7 | - critical 8 | - error 9 | - warning 10 | - info 11 | - debug 12 | 13 | Function evaluations are recorded on the info level, so this is the recommended level for pySOT. 14 | There is currently nothing that is being logged on the debug level, but better logging for 15 | debugging will likely be added in the future. Crashed evaluations are recorded on the warning 16 | level. 17 | 18 | More information about logging in Python 2.7 is available at: 19 | `https://docs.python.org/2/library/logging.html `_. 20 | -------------------------------------------------------------------------------- /docs/pics/communication.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dme65/pySOT/c8f04fd4ed30d49bb61adb008134741319b512a4/docs/pics/communication.png -------------------------------------------------------------------------------- /docs/poap.rst: -------------------------------------------------------------------------------- 1 | POAP 2 | ==== 3 | 4 | pySOT uses POAP, which an event-driven framework for building and combining 5 | asynchronous optimization strategies. There are two main components in POAP, 6 | namely controllers and strategies. The controller is capable of asking workers to 7 | run function evaluations and the strategy decides where to evaluate next. POAP 8 | works with external black-box objective functions and handles potential crashes 9 | in the objective function evaluation. There is also a logfile from which all function 10 | evaluations can be accessed after the run finished. In its simplest form, an 11 | optimization code with POAP that evaluates a function predetermined set of 12 | points using NUM_WORKERS threads may look the following way: 13 | 14 | .. code-block:: python 15 | 16 | from poap.strategy import FixedSampleStrategy 17 | from poap.strategy import CheckWorkStrategy 18 | from poap.controller import ThreadController 19 | from poap.controller import BasicWorkerThread 20 | 21 | # samples = list of sample points ... 22 | 23 | controller = ThreadController() 24 | sampler = FixedSampleStrategy(samples) 25 | controller.strategy = CheckWorkerStrategy(controller, sampler) 26 | 27 | for i in range(NUM_WORKERS): 28 | t = BasicWorkerThread(controller, objective) 29 | controller.launch_worker(t) 30 | 31 | result = controller.run() 32 | print 'Best result: {0} at {1}'.format(result.value, result.params) 33 | 34 | Controller 35 | ---------- 36 | 37 | The controller is responsible for accepting or rejecting proposals by the strategy object, 38 | controlling and monitoring the workers, and informing the strategy object of relevant events. 39 | Examples of relevant events are the processing of a proposal, or status updates on a function 40 | evaluation. Interactions between controller and the strategies are organized around proposals 41 | and evaluation records. At the beginning of the optimization and on any later change to the 42 | system state, the controller requests a proposal from the strategy. The proposal consists of an 43 | action (evaluate a function, kill a function, or terminate the optimization), a list of parameters, 44 | and a list of callback functions to be executed once the proposal is processed. The controller 45 | then either accepts the proposal (and sends a command to the worker), or rejects the proposal. 46 | 47 | When the controller accepts a proposal to start a function evaluation, it creates an evaluation 48 | record to share information about the status of the evaluation with the strategy. The evaluation 49 | record includes the evaluation point, the status of the evaluation, the value (if completed), 50 | and a list of callback functions to be executed on any update. Once a proposal has been accepted 51 | or rejected, the controller processes any pending system events (e.g. completed or canceled 52 | function evaluations), notifies the strategy about updates, and requests the next proposed action. 53 | 54 | POAP comes with a serial controller which is the controller of choice when objective function 55 | evaluations are carried out in serial. There is also a threaded controller that dispatches work 56 | to a queue of workers where each worker is able to handle evaluation and kill requests. The 57 | requests are asynchronous in the sense that the workers are not required to complete the 58 | evaluation or termination requests. The worker is forced to respond to evaluation requests, but 59 | may ignore kill requests. When receiving an evaluation request, the worker should either attempt 60 | the evaluation or mark the record as killed. The worker sends status updates back to the controller 61 | by updating the relevant record. There is also a third controller that uses simulated time, which 62 | is very useful for testing asynchronous optimization strategies. 63 | 64 | Strategy 65 | -------- 66 | 67 | The strategy is the heart of the optimization algorithm, since it is responsible for choosing 68 | new evaluations, killing evaluations, and terminating the optimization run when a stopping 69 | criteria is reached. POAP provides some basic default strategies based on non-adaptive sampling 70 | and serial optimization routines and also some strategies that adapt or combine other strategies. 71 | 72 | Different strategies can be composed by combining their control actions, which can be used to 73 | let a strategy cycle through a list of optimization strategies and select the most promising 74 | of their proposals. Strategies can also subscribe to be informed of all new function evaluations 75 | so they incorporate any new function information, even though the evaluation was proposed by another 76 | strategy. This makes it possible to start several independent strategies while still allowing each 77 | strategy to look at the function information that comes from function evaluations proposed by other 78 | strategies. As an example we can have a local optimizer strategy running a gradient based method 79 | where the starting point can be selected based on the best point found by any other strategy. The 80 | flexibility of the POAP framework makes combined strategies like these very straightforward. 81 | 82 | Workers 83 | ------- 84 | 85 | The multi-threaded controller employs a set of workers that are capable of managing concurrent 86 | function evaluations. Each worker does not provide parallelism on its own, but the worker itself 87 | is allowed to exploit parallelism by separate external processes. 88 | 89 | There are workers that are capable of calling Python objective function when asked to do an 90 | evaluation, which only results in parallelism if the objective function implementation itself 91 | allows parallelism. There are workers that use subprocesses in order to carry out external 92 | objective function evaluations that are not necessarily in Python. The user is responsible 93 | for specifying how to evaluate the objective function and how to parse partial information 94 | if available. 95 | 96 | POAP is also capable of having workers connect to a specified TCP/IP port in order to communicate 97 | with the controller. This functionality is useful in a cluster setting, for example, where the workers 98 | should run on compute nodes distinct from the node where the controller is running. It is also very 99 | useful in a setting where the workers run on a supercomputer that has a restriction on the number 100 | of hours per job submission. Having the controller run on a separate machine will allow the 101 | controller to keep running and the workers to reconnect and continue carrying out evaluations. 102 | 103 | Communication between POAP and pySOT 104 | ------------------------------------ 105 | 106 | .. image:: ./pics/communication.png 107 | -------------------------------------------------------------------------------- /docs/quickstart.rst: -------------------------------------------------------------------------------- 1 | Quickstart 2 | ========== 3 | 4 | .. _quickstart-label: 5 | 6 | 7 | Dependencies 8 | ------------ 9 | 10 | Before starting you will need Python 3.4 or newer. You need to have numpy, scipy, and pip 11 | installed and we recommend installing Anaconda/Miniconda for your desired Python version. 12 | 13 | There are a couple of optional components of pySOT that needs to be installed manually: 14 | 15 | 1. **py-earth**: Implementation of MARS. Can be installed using: 16 | 17 | .. code-block:: bash 18 | 19 | pip install six http://github.com/scikit-learn-contrib/py-earth/tarball/master 20 | 21 | or 22 | 23 | .. code-block:: bash 24 | 25 | git clone git://github.com/scikit-learn-contrib/py-earth.git 26 | cd py-earth 27 | pip install six 28 | python setup.py install 29 | 30 | 2. **mpi4py**: This module is necessary in order to use pySOT with MPI. Can be installed through pip: 31 | 32 | .. code-block:: bash 33 | 34 | pip install mpi4py 35 | 36 | or through conda (Anaconda/Miniconda) where it can be channeled with your favorite MPI implementation 37 | such as mpich: 38 | 39 | .. code-block:: bash 40 | 41 | conda install --channel mpi4py mpich mpi4py 42 | 43 | Installation 44 | ------------ 45 | 46 | There are currently two ways to install pySOT: 47 | 48 | 1. **(Recommended)** The easiest way to install pySOT is through pip in which case 49 | the following command should suffice: 50 | 51 | .. code-block:: bash 52 | 53 | pip install pySOT 54 | 55 | 2. The other option is cloning the repository and installing. 56 | 57 | | 2.1. Clone the repository: 58 | 59 | .. code-block:: bash 60 | 61 | git clone https://github.com/dme65/pySOT 62 | 63 | | 2.2. Navigate to the repository using: 64 | 65 | .. code-block:: bash 66 | 67 | cd pySOT 68 | 69 | | 2.3. Install pySOT (you may need to use sudo for UNIX): 70 | 71 | .. code-block:: bash 72 | 73 | python setup.py install 74 | 75 | Several examples are available in ./pySOT/examples and ./pySOT/notebooks -------------------------------------------------------------------------------- /docs/requirements.txt: -------------------------------------------------------------------------------- 1 | sphinx >= 1.4.7 2 | six 3 | scikit-learn 4 | http://github.com/scikit-learn-contrib/py-earth/tarball/master#egg=py-earth-0.1.0 5 | -------------------------------------------------------------------------------- /docs/source_code.rst: -------------------------------------------------------------------------------- 1 | Source code 2 | =========== 3 | 4 | .. toctree:: 5 | 6 | pySOT.auxiliary_problems module 7 | ------------------------------- 8 | 9 | .. automodule:: pySOT.auxiliary_problems 10 | :members: 11 | :undoc-members: 12 | 13 | pySOT.controller module 14 | ----------------------- 15 | 16 | .. automodule:: pySOT.controller 17 | :members: 18 | :undoc-members: 19 | 20 | pySOT.experimental_design module 21 | -------------------------------- 22 | 23 | .. automodule:: pySOT.experimental_design 24 | :members: 25 | :undoc-members: 26 | 27 | pySOT.optimization_problems module 28 | ---------------------------------- 29 | 30 | .. automodule:: pySOT.optimization_problems 31 | :members: 32 | :undoc-members: 33 | 34 | pySOT.strategy module 35 | --------------------- 36 | 37 | .. automodule:: pySOT.strategy 38 | :members: 39 | :undoc-members: 40 | 41 | pySOT.surrogate module 42 | ---------------------- 43 | 44 | .. automodule:: pySOT.surrogate 45 | :members: 46 | :undoc-members: 47 | 48 | pySOT.utils module 49 | ------------------ 50 | 51 | .. automodule:: pySOT.utils 52 | :members: 53 | :undoc-members: 54 | -------------------------------------------------------------------------------- /docs/surrogate_optimization.rst: -------------------------------------------------------------------------------- 1 | Surrogate optimization 2 | ====================== 3 | 4 | Surrogate optimization algorithms generally consist of four components: 5 | 6 | 1. **Strategy:** Algorithm for choosing new evaluations after the experimental 7 | design has been evaluated. 8 | 2. **Experimental design:** Generates an initial set of points for building 9 | the initial surrogate model 10 | 3. **Surrogate model:** Approximates the underlying objective function. 11 | Common choices are RBFs, GPs, MARS, etc. 12 | 4. **Optimization problem:** All of the available information about the 13 | optimization problem, e.g., dimensionality, variable types, objective 14 | function, etc. 15 | 16 | 17 | The surrogate model (or response surfaces) is used to approximate an underlying 18 | function that has been evaluated for a set of points. During the optimization 19 | phase information from the surrogate model is used in order to guide the search 20 | for improved solutions, which has the advantage of not needing as many function 21 | evaluations to find a good solution. 22 | 23 | The general framework for a Surrogate Optimization algorithm is illustrated 24 | in the algorithm below: 25 | 26 | **Inputs:** Optimization problem, Experimental design, Optimization strategy, 27 | Surrogate model, Stopping criterion 28 | 29 | .. code-block:: console 30 | :linenos: 31 | 32 | Generate an initial experimental design 33 | Evaluate the points in the experimental design 34 | Build a Surrogate model from the data 35 | Repeat until stopping criterion met 36 | Use the strategy to generate new point(s) to evaluate 37 | Evaluate the point(s) generated using all computational resources 38 | Update the Surrogate model 39 | 40 | **Outputs:** Best solution and its corresponding function value 41 | 42 | Typically used stopping criteria are a maximum number of allowed function 43 | evaluations (used in this toolbox), a maximum allowed CPU time, or a maximum 44 | number of failed iterative improvement trials. 45 | -------------------------------------------------------------------------------- /examples/ackley.m: -------------------------------------------------------------------------------- 1 | function val = ackley(x) 2 | dim = length(x); 3 | val = -20*exp(-0.2*sqrt(sum(x.^2,2)/dim)) - ... 4 | exp(sum(cos(2*pi*x),2)/dim) + 20 + exp(1); 5 | -------------------------------------------------------------------------------- /examples/example_checkpointing_serial.py: -------------------------------------------------------------------------------- 1 | """ 2 | .. module:: example_checkpointing_serial 3 | :synopsis: Example Checkpointing Serial 4 | .. moduleauthor:: David Eriksson 5 | """ 6 | 7 | import multiprocessing 8 | import os 9 | import time 10 | 11 | import numpy as np 12 | from poap.controller import SerialController 13 | 14 | from pySOT.controller import CheckpointController 15 | from pySOT.experimental_design import SymmetricLatinHypercube 16 | from pySOT.optimization_problems import Ackley 17 | from pySOT.strategy import SRBFStrategy 18 | from pySOT.surrogate import CubicKernel, LinearTail, RBFInterpolant 19 | 20 | max_evals = 200 21 | ackley = Ackley(dim=10) 22 | print(ackley.info) 23 | 24 | fname = "checkpoint.pysot" 25 | 26 | 27 | def example_checkpoint_serial(): 28 | if os.path.isfile(fname): 29 | os.remove(fname) 30 | 31 | # Run for 3 seconds and kill the controller 32 | p = multiprocessing.Process(target=init, args=()) 33 | p.start() 34 | time.sleep(3) 35 | p.terminate() 36 | p.join() 37 | 38 | print("Die controller, die!") 39 | 40 | # Resume the run 41 | resume() 42 | 43 | 44 | def init(): 45 | print("\nInitializing run...") 46 | rbf = RBFInterpolant(dim=ackley.dim, lb=ackley.lb, ub=ackley.ub, kernel=CubicKernel(), tail=LinearTail(ackley.dim)) 47 | slhd = SymmetricLatinHypercube(dim=ackley.dim, num_pts=2 * (ackley.dim + 1)) 48 | 49 | # Create a strategy and a controller 50 | controller = SerialController(ackley.eval) 51 | controller.strategy = SRBFStrategy( 52 | max_evals=max_evals, opt_prob=ackley, exp_design=slhd, surrogate=rbf, asynchronous=True 53 | ) 54 | 55 | print("Number of workers: 1") 56 | print("Maximum number of evaluations: {}".format(max_evals)) 57 | print("Strategy: {}".format(controller.strategy.__class__.__name__)) 58 | print("Experimental design: {}".format(slhd.__class__.__name__)) 59 | print("Surrogate: {}".format(rbf.__class__.__name__)) 60 | 61 | # Wrap controller in checkpoint object 62 | controller = CheckpointController(controller, fname=fname) 63 | result = controller.run() 64 | print("Best value found: {0}".format(result.value)) 65 | print( 66 | "Best solution found: {0}\n".format( 67 | np.array_str(result.params[0], max_line_width=np.inf, precision=5, suppress_small=True) 68 | ) 69 | ) 70 | 71 | 72 | def resume(): 73 | print("Resuming run...\n") 74 | controller = SerialController(ackley.eval) 75 | 76 | # Wrap controller in checkpoint object 77 | controller = CheckpointController(controller, fname=fname) 78 | result = controller.resume() 79 | print("Best value found: {0}".format(result.value)) 80 | print( 81 | "Best solution found: {0}\n".format( 82 | np.array_str(result.params[0], max_line_width=np.inf, precision=5, suppress_small=True) 83 | ) 84 | ) 85 | 86 | 87 | if __name__ == "__main__": 88 | example_checkpoint_serial() 89 | -------------------------------------------------------------------------------- /examples/example_checkpointing_threaded.py: -------------------------------------------------------------------------------- 1 | """ 2 | .. module:: example_checkpointing_threaded 3 | :synopsis: Example Checkpointing Threaded 4 | .. moduleauthor:: David Eriksson 5 | """ 6 | 7 | import multiprocessing 8 | import os 9 | import time 10 | 11 | import numpy as np 12 | from poap.controller import BasicWorkerThread, ThreadController 13 | 14 | from pySOT.controller import CheckpointController 15 | from pySOT.experimental_design import SymmetricLatinHypercube 16 | from pySOT.optimization_problems import Ackley 17 | from pySOT.strategy import SRBFStrategy 18 | from pySOT.surrogate import CubicKernel, LinearTail, RBFInterpolant 19 | 20 | num_threads = 4 21 | max_evals = 200 22 | ackley = Ackley(dim=10) 23 | print(ackley.info) 24 | 25 | fname = "checkpoint.pysot" 26 | 27 | 28 | def example_checkpoint_threaded(): 29 | if os.path.isfile(fname): 30 | os.remove(fname) 31 | 32 | # Run for 3 seconds and kill the controller 33 | p = multiprocessing.Process(target=init, args=()) 34 | p.start() 35 | time.sleep(3) 36 | p.terminate() 37 | p.join() 38 | 39 | print("Die controller, die!") 40 | 41 | # Resume the run 42 | resume() 43 | 44 | 45 | def init(): 46 | print("\nInitializing run...") 47 | 48 | rbf = RBFInterpolant(dim=ackley.dim, lb=ackley.lb, ub=ackley.ub, kernel=CubicKernel(), tail=LinearTail(ackley.dim)) 49 | slhd = SymmetricLatinHypercube(dim=ackley.dim, num_pts=2 * (ackley.dim + 1)) 50 | 51 | # Create a strategy and a controller 52 | controller = ThreadController() 53 | controller.strategy = SRBFStrategy( 54 | max_evals=max_evals, opt_prob=ackley, exp_design=slhd, surrogate=rbf, asynchronous=True, batch_size=num_threads 55 | ) 56 | 57 | print("Number of threads: {}".format(num_threads)) 58 | print("Maximum number of evaluations: {}".format(max_evals)) 59 | print("Strategy: {}".format(controller.strategy.__class__.__name__)) 60 | print("Experimental design: {}".format(slhd.__class__.__name__)) 61 | print("Surrogate: {}".format(rbf.__class__.__name__)) 62 | 63 | # Launch the threads and give them access to the objective function 64 | for _ in range(num_threads): 65 | worker = BasicWorkerThread(controller, ackley.eval) 66 | controller.launch_worker(worker) 67 | 68 | # Wrap controller in checkpoint object 69 | controller = CheckpointController(controller, fname=fname) 70 | result = controller.run() 71 | print("Best value found: {0}".format(result.value)) 72 | print( 73 | "Best solution found: {0}\n".format( 74 | np.array_str(result.params[0], max_line_width=np.inf, precision=5, suppress_small=True) 75 | ) 76 | ) 77 | 78 | 79 | def resume(): 80 | print("Resuming run...\n") 81 | controller = ThreadController() 82 | 83 | # Launch the threads and give them access to the objective function 84 | for _ in range(num_threads): 85 | worker = BasicWorkerThread(controller, ackley.eval) 86 | controller.launch_worker(worker) 87 | 88 | # Wrap controller in checkpoint object 89 | controller = CheckpointController(controller, fname=fname) 90 | result = controller.resume() 91 | print("Best value found: {0}".format(result.value)) 92 | print( 93 | "Best solution found: {0}\n".format( 94 | np.array_str(result.params[0], max_line_width=np.inf, precision=5, suppress_small=True) 95 | ) 96 | ) 97 | 98 | 99 | if __name__ == "__main__": 100 | example_checkpoint_threaded() 101 | -------------------------------------------------------------------------------- /examples/example_ei.py: -------------------------------------------------------------------------------- 1 | """ 2 | .. module:: example_expected_improvement 3 | :synopsis: Example Expected Improvement 4 | .. moduleauthor:: David Eriksson 5 | """ 6 | 7 | import logging 8 | import os.path 9 | 10 | import numpy as np 11 | from poap.controller import BasicWorkerThread, ThreadController 12 | 13 | from pySOT.experimental_design import SymmetricLatinHypercube 14 | from pySOT.optimization_problems import Hartmann6 15 | from pySOT.strategy import EIStrategy 16 | from pySOT.surrogate import GPRegressor 17 | 18 | 19 | def example_expected_improvement(): 20 | if not os.path.exists("./logfiles"): 21 | os.makedirs("logfiles") 22 | if os.path.exists("./logfiles/example_simple.log"): 23 | os.remove("./logfiles/example_simple.log") 24 | logging.basicConfig(filename="./logfiles/example_simple.log", level=logging.INFO) 25 | 26 | num_threads = 4 27 | max_evals = 100 28 | 29 | hart6 = Hartmann6() 30 | gp = GPRegressor(dim=hart6.dim, lb=hart6.lb, ub=hart6.ub) 31 | slhd = SymmetricLatinHypercube(dim=hart6.dim, num_pts=2 * (hart6.dim + 1)) 32 | 33 | # Create a strategy and a controller 34 | controller = ThreadController() 35 | controller.strategy = EIStrategy( 36 | max_evals=max_evals, opt_prob=hart6, exp_design=slhd, surrogate=gp, asynchronous=True 37 | ) 38 | 39 | print("Number of threads: {}".format(num_threads)) 40 | print("Maximum number of evaluations: {}".format(max_evals)) 41 | print("Strategy: {}".format(controller.strategy.__class__.__name__)) 42 | print("Experimental design: {}".format(slhd.__class__.__name__)) 43 | print("Surrogate: {}".format(gp.__class__.__name__)) 44 | 45 | # Launch the threads and give them access to the objective function 46 | for _ in range(num_threads): 47 | worker = BasicWorkerThread(controller, hart6.eval) 48 | controller.launch_worker(worker) 49 | 50 | # Run the optimization strategy 51 | result = controller.run() 52 | 53 | print("Best value found: {0}".format(result.value)) 54 | print( 55 | "Best solution found: {0}\n".format( 56 | np.array_str(result.params[0], max_line_width=np.inf, precision=5, suppress_small=True) 57 | ) 58 | ) 59 | 60 | 61 | if __name__ == "__main__": 62 | example_expected_improvement() 63 | -------------------------------------------------------------------------------- /examples/example_extra_vals.py: -------------------------------------------------------------------------------- 1 | """ 2 | .. module:: example_extra_vals 3 | :synopsis: Example extra vals 4 | .. moduleauthor:: David Eriksson 5 | """ 6 | 7 | import logging 8 | import os.path 9 | 10 | import numpy as np 11 | from poap.controller import BasicWorkerThread, EvalRecord, ThreadController 12 | 13 | from pySOT.experimental_design import SymmetricLatinHypercube 14 | from pySOT.optimization_problems import Ackley 15 | from pySOT.strategy import SRBFStrategy 16 | from pySOT.surrogate import CubicKernel, LinearTail, RBFInterpolant 17 | 18 | 19 | def example_extra_vals(): 20 | if not os.path.exists("./logfiles"): 21 | os.makedirs("logfiles") 22 | if os.path.exists("./logfiles/example_extra_vals.log"): 23 | os.remove("./logfiles/example_extra_vals.log") 24 | logging.basicConfig(filename="./logfiles/example_extra_vals.log", level=logging.INFO) 25 | 26 | num_threads = 4 27 | max_evals = 500 28 | 29 | ackley = Ackley(dim=10) 30 | num_extra = 10 31 | extra = np.random.uniform(ackley.lb, ackley.ub, (num_extra, ackley.dim)) 32 | extra_vals = np.nan * np.ones((num_extra, 1)) 33 | for i in range(num_extra): # Evaluate every second point 34 | if i % 2 == 0: 35 | extra_vals[i] = ackley.eval(extra[i, :]) 36 | 37 | rbf = RBFInterpolant(dim=ackley.dim, lb=ackley.lb, ub=ackley.ub, kernel=CubicKernel(), tail=LinearTail(ackley.dim)) 38 | slhd = SymmetricLatinHypercube(dim=ackley.dim, num_pts=2 * (ackley.dim + 1)) 39 | 40 | # Create a strategy and a controller 41 | controller = ThreadController() 42 | controller.strategy = SRBFStrategy( 43 | max_evals=max_evals, 44 | opt_prob=ackley, 45 | exp_design=slhd, 46 | surrogate=rbf, 47 | asynchronous=True, 48 | batch_size=num_threads, 49 | extra_points=extra, 50 | extra_vals=extra_vals, 51 | ) 52 | 53 | print("Number of threads: {}".format(num_threads)) 54 | print("Maximum number of evaluations: {}".format(max_evals)) 55 | print("Strategy: {}".format(controller.strategy.__class__.__name__)) 56 | print("Experimental design: {}".format(slhd.__class__.__name__)) 57 | print("Surrogate: {}".format(rbf.__class__.__name__)) 58 | 59 | # Append the known function values to the POAP database since 60 | # POAP won't evaluate these points 61 | for i in range(len(extra_vals)): 62 | if not np.isnan(extra_vals[i]): 63 | record = EvalRecord(params=(np.ravel(extra[i, :]),), status="completed") 64 | record.value = extra_vals[i] 65 | record.feasible = True 66 | controller.fevals.append(record) 67 | 68 | # Launch the threads and give them access to the objective function 69 | for _ in range(num_threads): 70 | worker = BasicWorkerThread(controller, ackley.eval) 71 | controller.launch_worker(worker) 72 | 73 | # Run the optimization strategy 74 | result = controller.run() 75 | 76 | print("Best value found: {0}".format(result.value)) 77 | print( 78 | "Best solution found: {0}\n".format( 79 | np.array_str(result.params[0], max_line_width=np.inf, precision=5, suppress_small=True) 80 | ) 81 | ) 82 | 83 | 84 | if __name__ == "__main__": 85 | example_extra_vals() 86 | -------------------------------------------------------------------------------- /examples/example_gp_regression.py: -------------------------------------------------------------------------------- 1 | """ 2 | .. module:: example_gp_regression 3 | :synopsis: Example GP Regression 4 | .. moduleauthor:: David Eriksson 5 | """ 6 | 7 | import logging 8 | import os.path 9 | 10 | import numpy as np 11 | from poap.controller import BasicWorkerThread, ThreadController 12 | 13 | from pySOT.experimental_design import SymmetricLatinHypercube 14 | from pySOT.optimization_problems import Ackley 15 | from pySOT.strategy import SRBFStrategy 16 | from pySOT.surrogate import GPRegressor 17 | 18 | 19 | def example_gp_regression(): 20 | if not os.path.exists("./logfiles"): 21 | os.makedirs("logfiles") 22 | if os.path.exists("./logfiles/example_gp.log"): 23 | os.remove("./logfiles/example_gp.log") 24 | logging.basicConfig(filename="./logfiles/example_gp.log", level=logging.INFO) 25 | 26 | num_threads = 4 27 | max_evals = 50 28 | 29 | ackley = Ackley(dim=4) 30 | gp = GPRegressor(dim=ackley.dim, lb=ackley.lb, ub=ackley.ub) 31 | slhd = SymmetricLatinHypercube(dim=ackley.dim, num_pts=2 * (ackley.dim + 1)) 32 | 33 | # Create a strategy and a controller 34 | controller = ThreadController() 35 | controller.strategy = SRBFStrategy( 36 | max_evals=max_evals, opt_prob=ackley, exp_design=slhd, surrogate=gp, asynchronous=True, batch_size=num_threads 37 | ) 38 | 39 | print("Number of threads: {}".format(num_threads)) 40 | print("Maximum number of evaluations: {}".format(max_evals)) 41 | print("Strategy: {}".format(controller.strategy.__class__.__name__)) 42 | print("Experimental design: {}".format(slhd.__class__.__name__)) 43 | print("Surrogate: {}".format(gp.__class__.__name__)) 44 | 45 | # Launch the threads and give them access to the objective function 46 | for _ in range(num_threads): 47 | worker = BasicWorkerThread(controller, ackley.eval) 48 | controller.launch_worker(worker) 49 | 50 | # Run the optimization strategy 51 | result = controller.run() 52 | 53 | print("Best value found: {0}".format(result.value)) 54 | print( 55 | "Best solution found: {0}\n".format( 56 | np.array_str(result.params[0], max_line_width=np.inf, precision=5, suppress_small=True) 57 | ) 58 | ) 59 | 60 | 61 | if __name__ == "__main__": 62 | example_gp_regression() 63 | -------------------------------------------------------------------------------- /examples/example_lcb.py: -------------------------------------------------------------------------------- 1 | """ 2 | .. module:: example_lower_confidence_bounds 3 | :synopsis: Example Lower Confidence Bounds 4 | .. moduleauthor:: David Eriksson 5 | """ 6 | 7 | import logging 8 | import os.path 9 | 10 | import numpy as np 11 | from poap.controller import BasicWorkerThread, ThreadController 12 | 13 | from pySOT.experimental_design import SymmetricLatinHypercube 14 | from pySOT.optimization_problems import Hartmann6 15 | from pySOT.strategy import LCBStrategy 16 | from pySOT.surrogate import GPRegressor 17 | 18 | 19 | def example_lower_confidence_bounds(): 20 | if not os.path.exists("./logfiles"): 21 | os.makedirs("logfiles") 22 | if os.path.exists("./logfiles/example_lower_confidence_bounds.log"): 23 | os.remove("./logfiles/example_lower_confidence_bounds.log") 24 | logging.basicConfig(filename="./logfiles/example_lower_confidence_bounds.log", level=logging.INFO) 25 | 26 | num_threads = 4 27 | max_evals = 100 28 | 29 | hart6 = Hartmann6() 30 | gp = GPRegressor(dim=hart6.dim, lb=hart6.lb, ub=hart6.ub) 31 | slhd = SymmetricLatinHypercube(dim=hart6.dim, num_pts=2 * (hart6.dim + 1)) 32 | 33 | # Create a strategy and a controller 34 | controller = ThreadController() 35 | controller.strategy = LCBStrategy( 36 | max_evals=max_evals, opt_prob=hart6, exp_design=slhd, surrogate=gp, asynchronous=True 37 | ) 38 | 39 | print("Number of threads: {}".format(num_threads)) 40 | print("Maximum number of evaluations: {}".format(max_evals)) 41 | print("Strategy: {}".format(controller.strategy.__class__.__name__)) 42 | print("Experimental design: {}".format(slhd.__class__.__name__)) 43 | print("Surrogate: {}".format(gp.__class__.__name__)) 44 | 45 | # Launch the threads and give them access to the objective function 46 | for _ in range(num_threads): 47 | worker = BasicWorkerThread(controller, hart6.eval) 48 | controller.launch_worker(worker) 49 | 50 | # Run the optimization strategy 51 | result = controller.run() 52 | 53 | print("Best value found: {0}".format(result.value)) 54 | print( 55 | "Best solution found: {0}\n".format( 56 | np.array_str(result.params[0], max_line_width=np.inf, precision=5, suppress_small=True) 57 | ) 58 | ) 59 | 60 | 61 | if __name__ == "__main__": 62 | example_lower_confidence_bounds() 63 | -------------------------------------------------------------------------------- /examples/example_mars.py: -------------------------------------------------------------------------------- 1 | """ 2 | .. module:: example_mars 3 | :synopsis: Example MARS 4 | .. moduleauthor:: David Eriksson 5 | """ 6 | 7 | import logging 8 | import os.path 9 | 10 | import numpy as np 11 | from poap.controller import BasicWorkerThread, ThreadController 12 | 13 | from pySOT.experimental_design import SymmetricLatinHypercube 14 | from pySOT.optimization_problems import Ackley 15 | from pySOT.strategy import SRBFStrategy 16 | 17 | # Try to import MARS 18 | try: 19 | from pySOT.surrogate import MARSInterpolant 20 | except Exception as err: 21 | print("\nERROR: Failed to import MARS. This is likely " "because py-earth is not installed. Aborting.....\n") 22 | exit() 23 | 24 | 25 | def example_mars(): 26 | if not os.path.exists("./logfiles"): 27 | os.makedirs("logfiles") 28 | if os.path.exists("./logfiles/example_mars.log"): 29 | os.remove("./logfiles/example_mars.log") 30 | logging.basicConfig(filename="./logfiles/example_mars.log", level=logging.INFO) 31 | 32 | num_threads = 4 33 | max_evals = 200 34 | 35 | ackley = Ackley(dim=5) 36 | try: 37 | mars = MARSInterpolant(dim=ackley.dim, lb=ackley.lb, ub=ackley.ub) 38 | except Exception as e: 39 | print(str(e)) 40 | return 41 | slhd = SymmetricLatinHypercube(dim=ackley.dim, num_pts=2 * (ackley.dim + 1)) 42 | 43 | # Create a strategy and a controller 44 | controller = ThreadController() 45 | controller.strategy = SRBFStrategy( 46 | max_evals=max_evals, opt_prob=ackley, exp_design=slhd, surrogate=mars, asynchronous=True, batch_size=num_threads 47 | ) 48 | 49 | print("Number of threads: {}".format(num_threads)) 50 | print("Maximum number of evaluations: {}".format(max_evals)) 51 | print("Strategy: {}".format(controller.strategy.__class__.__name__)) 52 | print("Experimental design: {}".format(slhd.__class__.__name__)) 53 | print("Surrogate: {}".format(mars.__class__.__name__)) 54 | 55 | # Launch the threads and give them access to the objective function 56 | for _ in range(num_threads): 57 | worker = BasicWorkerThread(controller, ackley.eval) 58 | controller.launch_worker(worker) 59 | 60 | # Run the optimization strategy 61 | result = controller.run() 62 | 63 | print("Best value found: {0}".format(result.value)) 64 | print( 65 | "Best solution found: {0}\n".format( 66 | np.array_str(result.params[0], max_line_width=np.inf, precision=5, suppress_small=True) 67 | ) 68 | ) 69 | 70 | 71 | if __name__ == "__main__": 72 | example_mars() 73 | -------------------------------------------------------------------------------- /examples/example_matlab_engine.py: -------------------------------------------------------------------------------- 1 | """ 2 | .. module:: example_matlab_engine 3 | :synopsis: Example with MATLAB objective function 4 | .. moduleauthor:: David Eriksson 5 | """ 6 | 7 | import logging 8 | import os.path 9 | 10 | import numpy as np 11 | from poap.controller import ProcessWorkerThread, ThreadController 12 | 13 | from pySOT.experimental_design import SymmetricLatinHypercube 14 | from pySOT.optimization_problems import Ackley 15 | from pySOT.strategy import SRBFStrategy 16 | from pySOT.surrogate import CubicKernel, LinearTail, RBFInterpolant 17 | 18 | # Try to import the matlab_wrapper module 19 | try: 20 | import matlab.engine 21 | except Exception as err: 22 | print("\nERROR: Failed to import the matlab engine\n") 23 | pass 24 | 25 | 26 | class MatlabWorker(ProcessWorkerThread): 27 | def handle_eval(self, record): 28 | try: 29 | x = matlab.double(record.params[0].tolist()) 30 | val = self.matlab.ackley(x) 31 | if np.isnan(val): 32 | raise ValueError() 33 | self.finish_success(record, val) 34 | finally: 35 | logging.info("WARNING: Incorrect output or crashed evaluation") 36 | self.finish_cancelled(record) 37 | 38 | 39 | def example_matlab_engine(): 40 | if not os.path.exists("./logfiles"): 41 | os.makedirs("logfiles") 42 | if os.path.exists("./logfiles/example_matlab_engine.log"): 43 | os.remove("./logfiles/example_matlab_engine.log") 44 | logging.basicConfig(filename="./logfiles/example_matlab_engine.log", level=logging.INFO) 45 | 46 | num_threads = 4 47 | max_evals = 500 48 | 49 | ackley = Ackley(dim=10) 50 | rbf = RBFInterpolant(dim=ackley.dim, lb=ackley.lb, ub=ackley.ub, kernel=CubicKernel(), tail=LinearTail(ackley.dim)) 51 | slhd = SymmetricLatinHypercube(dim=ackley.dim, num_pts=2 * (ackley.dim + 1)) 52 | 53 | # Use the serial controller (uses only one thread) 54 | controller = ThreadController() 55 | controller.strategy = SRBFStrategy( 56 | max_evals=max_evals, opt_prob=ackley, exp_design=slhd, surrogate=rbf, asynchronous=True, batch_size=num_threads 57 | ) 58 | 59 | print("Number of threads: {}".format(num_threads)) 60 | print("Maximum number of evaluations: {}".format(max_evals)) 61 | print("Strategy: {}".format(controller.strategy.__class__.__name__)) 62 | print("Experimental design: {}".format(slhd.__class__.__name__)) 63 | print("Surrogate: {}".format(rbf.__class__.__name__)) 64 | 65 | # Launch the threads 66 | for _ in range(num_threads): 67 | try: 68 | worker = MatlabWorker(controller) 69 | worker.matlab = matlab.engine.start_matlab() 70 | controller.launch_worker(worker) 71 | except Exception as e: 72 | print("\nERROR: Failed to initialize a MATLAB session.\n") 73 | print(str(e)) 74 | return 75 | 76 | # Run the optimization strategy 77 | result = controller.run() 78 | 79 | # Print the final result 80 | print("Best value found: {0}".format(result.value)) 81 | print( 82 | "Best solution found: {0}\n".format( 83 | np.array_str(result.params[0], max_line_width=np.inf, precision=5, suppress_small=True) 84 | ) 85 | ) 86 | 87 | 88 | if __name__ == "__main__": 89 | example_matlab_engine() 90 | -------------------------------------------------------------------------------- /examples/example_simple.py: -------------------------------------------------------------------------------- 1 | """ 2 | .. module:: example_simple 3 | :synopsis: Example Simple 4 | .. moduleauthor:: David Eriksson 5 | """ 6 | 7 | import logging 8 | import os.path 9 | 10 | import numpy as np 11 | from poap.controller import BasicWorkerThread, ThreadController 12 | 13 | from pySOT.experimental_design import SymmetricLatinHypercube 14 | from pySOT.optimization_problems import Ackley 15 | from pySOT.strategy import SRBFStrategy 16 | from pySOT.surrogate import CubicKernel, LinearTail, RBFInterpolant 17 | 18 | 19 | def example_simple(): 20 | if not os.path.exists("./logfiles"): 21 | os.makedirs("logfiles") 22 | if os.path.exists("./logfiles/example_simple.log"): 23 | os.remove("./logfiles/example_simple.log") 24 | logging.basicConfig(filename="./logfiles/example_simple.log", level=logging.INFO) 25 | 26 | num_threads = 4 27 | max_evals = 500 28 | 29 | ackley = Ackley(dim=10) 30 | rbf = RBFInterpolant(dim=ackley.dim, lb=ackley.lb, ub=ackley.ub, kernel=CubicKernel(), tail=LinearTail(ackley.dim)) 31 | slhd = SymmetricLatinHypercube(dim=ackley.dim, num_pts=2 * (ackley.dim + 1)) 32 | 33 | # Create a strategy and a controller 34 | controller = ThreadController() 35 | controller.strategy = SRBFStrategy( 36 | max_evals=max_evals, opt_prob=ackley, exp_design=slhd, surrogate=rbf, asynchronous=True 37 | ) 38 | 39 | print("Number of threads: {}".format(num_threads)) 40 | print("Maximum number of evaluations: {}".format(max_evals)) 41 | print("Strategy: {}".format(controller.strategy.__class__.__name__)) 42 | print("Experimental design: {}".format(slhd.__class__.__name__)) 43 | print("Surrogate: {}".format(rbf.__class__.__name__)) 44 | 45 | # Launch the threads and give them access to the objective function 46 | for _ in range(num_threads): 47 | worker = BasicWorkerThread(controller, ackley.eval) 48 | controller.launch_worker(worker) 49 | 50 | # Run the optimization strategy 51 | result = controller.run() 52 | 53 | print("Best value found: {0}".format(result.value)) 54 | print( 55 | "Best solution found: {0}\n".format( 56 | np.array_str(result.params[0], max_line_width=np.inf, precision=5, suppress_small=True) 57 | ) 58 | ) 59 | 60 | 61 | if __name__ == "__main__": 62 | example_simple() 63 | -------------------------------------------------------------------------------- /examples/example_sop.py: -------------------------------------------------------------------------------- 1 | """ 2 | .. module:: example_sop 3 | :synopsis: Example SOP 4 | .. moduleauthor:: Taimoor Akhtar 5 | """ 6 | 7 | import logging 8 | import os.path 9 | 10 | import numpy as np 11 | from poap.controller import BasicWorkerThread, ThreadController 12 | 13 | from pySOT.experimental_design import SymmetricLatinHypercube 14 | from pySOT.optimization_problems import Ackley 15 | from pySOT.strategy import SOPStrategy 16 | from pySOT.surrogate import CubicKernel, LinearTail, RBFInterpolant 17 | 18 | 19 | def example_sop(): 20 | if not os.path.exists("./logfiles"): 21 | os.makedirs("logfiles") 22 | if os.path.exists("./logfiles/example_simple.log"): 23 | os.remove("./logfiles/example_simple.log") 24 | logging.basicConfig(filename="./logfiles/example_simple.log", level=logging.INFO) 25 | 26 | print("\nNumber of threads: 8") 27 | print("Maximum number of evaluations: 500") 28 | print("Sampling method: CandidateDYCORS") 29 | print("Experimental design: Symmetric Latin Hypercube") 30 | print("Surrogate: Cubic RBF") 31 | 32 | num_threads = 8 33 | max_evals = 500 34 | 35 | ackley = Ackley(dim=10) 36 | rbf = RBFInterpolant(dim=ackley.dim, lb=ackley.lb, ub=ackley.ub, kernel=CubicKernel(), tail=LinearTail(ackley.dim)) 37 | slhd = SymmetricLatinHypercube(dim=ackley.dim, num_pts=2 * (ackley.dim + 1)) 38 | 39 | # Create a strategy and a controller 40 | controller = ThreadController() 41 | controller.strategy = SOPStrategy( 42 | max_evals=max_evals, 43 | opt_prob=ackley, 44 | exp_design=slhd, 45 | surrogate=rbf, 46 | asynchronous=False, 47 | ncenters=num_threads, 48 | batch_size=num_threads, 49 | ) 50 | 51 | print("Number of threads: {}".format(num_threads)) 52 | print("Maximum number of evaluations: {}".format(max_evals)) 53 | print("Strategy: {}".format(controller.strategy.__class__.__name__)) 54 | print("Experimental design: {}".format(slhd.__class__.__name__)) 55 | print("Surrogate: {}".format(rbf.__class__.__name__)) 56 | 57 | # Launch the threads and give them access to the objective function 58 | for _ in range(num_threads): 59 | worker = BasicWorkerThread(controller, ackley.eval) 60 | controller.launch_worker(worker) 61 | 62 | # Run the optimization strategy 63 | result = controller.run() 64 | 65 | print("Best value found: {0}".format(result.value)) 66 | print( 67 | "Best solution found: {0}\n".format( 68 | np.array_str(result.params[0], max_line_width=np.inf, precision=5, suppress_small=True) 69 | ) 70 | ) 71 | 72 | 73 | if __name__ == "__main__": 74 | example_sop() 75 | -------------------------------------------------------------------------------- /examples/example_subprocess.py: -------------------------------------------------------------------------------- 1 | """ 2 | .. module:: example_subprocess 3 | :synopsis: Example of an external objective function 4 | .. moduleauthor:: David Eriksson 5 | """ 6 | 7 | import logging 8 | import os.path 9 | from subprocess import PIPE, Popen 10 | 11 | import numpy as np 12 | from poap.controller import ProcessWorkerThread, ThreadController 13 | 14 | from pySOT.experimental_design import SymmetricLatinHypercube 15 | from pySOT.optimization_problems import Sphere 16 | from pySOT.strategy import SRBFStrategy 17 | from pySOT.surrogate import CubicKernel, LinearTail, RBFInterpolant 18 | 19 | 20 | def array2str(x): 21 | return ",".join(np.char.mod("%f", x.ravel())) 22 | 23 | 24 | # Find path of the executable 25 | path = os.path.dirname(os.path.abspath(__file__)) + "/sphere_ext" 26 | 27 | 28 | class CppSim(ProcessWorkerThread): 29 | def handle_eval(self, record): 30 | try: 31 | self.process = Popen([path, array2str(record.params[0])], stdout=PIPE, bufsize=1, universal_newlines=True) 32 | val = self.process.communicate()[0] 33 | self.finish_success(record, float(val)) 34 | except ValueError: 35 | self.finish_cancelled(record) 36 | logging.info("WARNING: Incorrect output or crashed evaluation") 37 | 38 | 39 | def example_subprocess(): 40 | if not os.path.exists("./logfiles"): 41 | os.makedirs("logfiles") 42 | if os.path.exists("./logfiles/example_subprocess.log"): 43 | os.remove("./logfiles/example_subprocess.log") 44 | logging.basicConfig(filename="./logfiles/example_subprocess.log", level=logging.INFO) 45 | 46 | print("\nNumber of threads: 1") 47 | print("Maximum number of evaluations: 200") 48 | print("Search strategy: Candidate DYCORS") 49 | print("Experimental design: Symmetric Latin Hypercube") 50 | print("Surrogate: Cubic RBF") 51 | 52 | assert os.path.isfile(path), "You need to build sphere_ext" 53 | 54 | num_threads = 1 55 | max_evals = 200 56 | 57 | sphere = Sphere(dim=10) 58 | rbf = RBFInterpolant(dim=sphere.dim, lb=sphere.lb, ub=sphere.ub, kernel=CubicKernel(), tail=LinearTail(sphere.dim)) 59 | slhd = SymmetricLatinHypercube(dim=sphere.dim, num_pts=2 * (sphere.dim + 1)) 60 | 61 | # Create a strategy and a controller 62 | controller = ThreadController() 63 | controller.strategy = SRBFStrategy( 64 | max_evals=max_evals, opt_prob=sphere, exp_design=slhd, surrogate=rbf, asynchronous=True, batch_size=num_threads 65 | ) 66 | 67 | print("Number of threads: {}".format(num_threads)) 68 | print("Maximum number of evaluations: {}".format(max_evals)) 69 | print("Strategy: {}".format(controller.strategy.__class__.__name__)) 70 | print("Experimental design: {}".format(slhd.__class__.__name__)) 71 | print("Surrogate: {}".format(rbf.__class__.__name__)) 72 | 73 | # Launch the threads and give them access to the objective function 74 | for _ in range(num_threads): 75 | controller.launch_worker(CppSim(controller)) 76 | 77 | # Run the optimization strategy 78 | result = controller.run() 79 | 80 | print("Best value found: {0}".format(result.value)) 81 | print( 82 | "Best solution found: {0}\n".format( 83 | np.array_str(result.params[0], max_line_width=np.inf, precision=5, suppress_small=True) 84 | ) 85 | ) 86 | 87 | 88 | if __name__ == "__main__": 89 | example_subprocess() 90 | -------------------------------------------------------------------------------- /examples/example_subprocess_files.py: -------------------------------------------------------------------------------- 1 | """ 2 | .. module:: example_subprocess_files 3 | :synopsis: Example of an external objective function with input text files 4 | .. moduleauthor:: David Eriksson 5 | """ 6 | 7 | import logging 8 | import os.path 9 | from subprocess import PIPE, Popen 10 | 11 | import numpy as np 12 | from poap.controller import ProcessWorkerThread, ThreadController 13 | 14 | from pySOT.experimental_design import SymmetricLatinHypercube 15 | from pySOT.optimization_problems import Sphere 16 | from pySOT.strategy import SRBFStrategy 17 | from pySOT.surrogate import LinearTail, RBFInterpolant, TPSKernel 18 | 19 | 20 | def array2str(x): 21 | return ",".join(np.char.mod("%f", x)) 22 | 23 | 24 | # Find path of the executable 25 | path = os.path.dirname(os.path.abspath(__file__)) + "/sphere_ext_files" 26 | 27 | 28 | class CppSim(ProcessWorkerThread): 29 | def handle_eval(self, record): 30 | try: 31 | # Print to the input file 32 | f = open(self.my_filename, "w") 33 | f.write(array2str(record.params[0])) 34 | f.close() 35 | 36 | self.process = Popen([path, self.my_filename], stdout=PIPE, bufsize=1, universal_newlines=True) 37 | val = self.process.communicate()[0] 38 | 39 | self.finish_success(record, float(val)) 40 | os.remove(self.my_filename) # Remove input file 41 | except ValueError: 42 | logging.info("WARNING: Incorrect output or crashed evaluation") 43 | os.remove(self.my_filename) # Remove input file 44 | self.finish_cancelled(record) 45 | 46 | 47 | def example_subprocess_files(): 48 | if not os.path.exists("./logfiles"): 49 | os.makedirs("logfiles") 50 | if os.path.exists("./logfiles/example_subprocess_files.log"): 51 | os.remove("./logfiles/example_subprocess_files.log") 52 | logging.basicConfig(filename="./logfiles/example_subprocess_files.log", level=logging.INFO) 53 | 54 | print("\nNumber of threads: 4") 55 | print("Maximum number of evaluations: 200") 56 | print("Sampling method: Candidate DYCORS") 57 | print("Experimental design: Symmetric Latin Hypercube") 58 | print("Surrogate: Cubic RBF") 59 | 60 | assert os.path.isfile(path), "You need to build sphere_ext_files" 61 | 62 | num_threads = 4 63 | max_evals = 200 64 | 65 | sphere = Sphere(dim=10) 66 | rbf = RBFInterpolant(dim=sphere.dim, lb=sphere.lb, ub=sphere.ub, kernel=TPSKernel(), tail=LinearTail(sphere.dim)) 67 | slhd = SymmetricLatinHypercube(dim=sphere.dim, num_pts=2 * (sphere.dim + 1)) 68 | 69 | # Create a strategy and a controller 70 | controller = ThreadController() 71 | controller.strategy = SRBFStrategy( 72 | max_evals=max_evals, opt_prob=sphere, exp_design=slhd, surrogate=rbf, asynchronous=False, batch_size=num_threads 73 | ) 74 | 75 | print("Number of threads: {}".format(num_threads)) 76 | print("Maximum number of evaluations: {}".format(max_evals)) 77 | print("Strategy: {}".format(controller.strategy.__class__.__name__)) 78 | print("Experimental design: {}".format(slhd.__class__.__name__)) 79 | print("Surrogate: {}".format(rbf.__class__.__name__)) 80 | 81 | # Launch the threads and give them access to the objective function 82 | for i in range(num_threads): 83 | worker = CppSim(controller) 84 | worker.my_filename = str(i) + ".txt" 85 | controller.launch_worker(worker) 86 | 87 | # Run the optimization strategy 88 | result = controller.run() 89 | 90 | print("Best value found: {0}".format(result.value)) 91 | print( 92 | "Best solution found: {0}\n".format( 93 | np.array_str(result.params[0], max_line_width=np.inf, precision=5, suppress_small=True) 94 | ) 95 | ) 96 | 97 | 98 | if __name__ == "__main__": 99 | example_subprocess_files() 100 | -------------------------------------------------------------------------------- /examples/example_subprocess_partial_info.py: -------------------------------------------------------------------------------- 1 | """ 2 | .. module:: example_subprocess_partial_info 3 | :synopsis: Example an external objective function with partial info 4 | .. moduleauthor:: David Eriksson 5 | """ 6 | 7 | import logging 8 | import os.path 9 | from subprocess import PIPE, Popen 10 | 11 | import numpy as np 12 | from poap.controller import ProcessWorkerThread, ThreadController 13 | 14 | from pySOT.experimental_design import SymmetricLatinHypercube 15 | from pySOT.optimization_problems import OptimizationProblem 16 | from pySOT.strategy import SRBFStrategy 17 | from pySOT.surrogate import CubicKernel, LinearTail, RBFInterpolant 18 | 19 | 20 | def array2str(x): 21 | return ",".join(np.char.mod("%f", x)) 22 | 23 | 24 | # Find path of the executable 25 | path = os.path.dirname(os.path.abspath(__file__)) + "/sumfun_ext" 26 | 27 | 28 | class SumfunExt(OptimizationProblem): 29 | def __init__(self, dim=10): 30 | self.dim = dim 31 | self.lb = -5 * np.ones(self.dim) 32 | self.ub = 5 * np.ones(self.dim) 33 | self.cont_var = np.arange(0, self.dim) 34 | self.int_var = np.array([]) 35 | self.info = str(dim) + "-dimensional Sumfun function \n" + "Global optimum: f(0,0,...,0) = 0" 36 | self.min = 0 37 | 38 | def eval(self, xx): 39 | pass 40 | 41 | 42 | class CppSim(ProcessWorkerThread): 43 | def handle_eval(self, record): 44 | val = np.nan 45 | # Continuously check for new outputs from the subprocess 46 | self.process = Popen([path, array2str(record.params[0])], stdout=PIPE, bufsize=1, universal_newlines=True) 47 | 48 | for line in self.process.stdout: 49 | try: 50 | val = float(line.strip()) # Try to parse output 51 | if val > 250.0: # Terminate if too large 52 | self.process.terminate() 53 | self.finish_success(record, 250.0) 54 | return 55 | except ValueError: # If the output is nonsense we terminate 56 | logging.warning("Incorrect output") 57 | self.process.terminate() 58 | self.finish_cancelled(record) 59 | return 60 | self.process.wait() 61 | 62 | rc = self.process.poll() # Check the return code 63 | if rc < 0 or np.isnan(val): 64 | logging.info("WARNING: Incorrect output or crashed evaluation") 65 | self.finish_cancelled(record) 66 | else: 67 | self.finish_success(record, val) 68 | 69 | 70 | def example_subprocess_partial_info(): 71 | if not os.path.exists("./logfiles"): 72 | os.makedirs("logfiles") 73 | if os.path.exists("./logfiles/example_subprocess_partial_info.log"): 74 | os.remove("./logfiles/example_subprocess_partial_info.log") 75 | logging.basicConfig(filename="./logfiles/example_subprocess_partial_info.log", level=logging.INFO) 76 | 77 | assert os.path.isfile(path), "You need to build sumfun_ext" 78 | 79 | num_threads = 4 80 | max_evals = 200 81 | 82 | sumfun = SumfunExt(dim=10) 83 | rbf = RBFInterpolant(dim=sumfun.dim, lb=sumfun.lb, ub=sumfun.ub, kernel=CubicKernel(), tail=LinearTail(sumfun.dim)) 84 | slhd = SymmetricLatinHypercube(dim=sumfun.dim, num_pts=2 * (sumfun.dim + 1)) 85 | 86 | # Create a strategy and a controller 87 | controller = ThreadController() 88 | controller.strategy = SRBFStrategy( 89 | max_evals=max_evals, opt_prob=sumfun, exp_design=slhd, surrogate=rbf, asynchronous=True, batch_size=num_threads 90 | ) 91 | 92 | print("Number of threads: {}".format(num_threads)) 93 | print("Maximum number of evaluations: {}".format(max_evals)) 94 | print("Strategy: {}".format(controller.strategy.__class__.__name__)) 95 | print("Experimental design: {}".format(slhd.__class__.__name__)) 96 | print("Surrogate: {}".format(rbf.__class__.__name__)) 97 | 98 | # Launch the threads and give them access to the objective function 99 | for _ in range(num_threads): 100 | controller.launch_worker(CppSim(controller)) 101 | 102 | # Run the optimization strategy 103 | result = controller.run() 104 | 105 | print("Best value found: {0}".format(result.value)) 106 | print( 107 | "Best solution found: {0}\n".format( 108 | np.array_str(result.params[0], max_line_width=np.inf, precision=5, suppress_small=True) 109 | ) 110 | ) 111 | 112 | 113 | if __name__ == "__main__": 114 | example_subprocess_partial_info() 115 | -------------------------------------------------------------------------------- /examples/makefile: -------------------------------------------------------------------------------- 1 | .PHONY: example mpiexample clean 2 | export PYTHONPATH=../: 3 | 4 | all: 5 | g++ -o sphere_ext sphere_ext.cpp -std=c++0x 6 | g++ -o sphere_ext_files sphere_ext_files.cpp -std=c++0x 7 | g++ -o sumfun_ext sumfun_ext.cpp -std=c++0x 8 | 9 | example: sphere_ext sphere_ext_files sumfun_ext 10 | for f in example*.py; do python3 $$f || break 0; done 11 | 12 | mpiexample: sphere_ext 13 | for f in mpiexample_*.py; do mpiexec -n 5 python3 $$f || break 0; done 14 | 15 | clean: 16 | rm -f sphere_ext 17 | rm -f sphere_ext_files 18 | rm -f sumfun_ext 19 | rm -rf ./logfiles 20 | -------------------------------------------------------------------------------- /examples/mpiexample_simple.py: -------------------------------------------------------------------------------- 1 | """ 2 | .. module:: mpiexample_simple_mpi 3 | :synopsis: Simple MPI example 4 | .. moduleauthor:: David Eriksson 5 | """ 6 | 7 | import logging 8 | import os.path 9 | 10 | import numpy as np 11 | from poap.mpiserve import MPIController, MPISimpleWorker 12 | 13 | from pySOT.experimental_design import SymmetricLatinHypercube 14 | from pySOT.optimization_problems import Ackley 15 | from pySOT.strategy import SRBFStrategy 16 | from pySOT.surrogate import CubicKernel, LinearTail, RBFInterpolant 17 | 18 | # Try to import mpi4py 19 | try: 20 | from mpi4py import MPI 21 | except Exception as err: 22 | print("ERROR: You need mpi4py to use the POAP MPI controller.") 23 | exit() 24 | 25 | 26 | def main_worker(objfunction): 27 | MPISimpleWorker(objfunction).run() 28 | 29 | 30 | def main_master(opt_prob, num_workers): 31 | if not os.path.exists("./logfiles"): 32 | os.makedirs("logfiles") 33 | if os.path.exists("./logfiles/mpiexample_mpi.log"): 34 | os.remove("./logfiles/mpiexample_mpi.log") 35 | logging.basicConfig(filename="./logfiles/mpiexample_mpi.log", level=logging.INFO) 36 | 37 | max_evals = 500 38 | 39 | rbf = RBFInterpolant( 40 | dim=opt_prob.dim, lb=opt_prob.lb, ub=opt_prob.ub, kernel=CubicKernel(), tail=LinearTail(opt_prob.dim) 41 | ) 42 | slhd = SymmetricLatinHypercube(dim=opt_prob.dim, num_pts=2 * (opt_prob.dim + 1)) 43 | 44 | # Create a strategy and a controller 45 | strategy = SRBFStrategy( 46 | max_evals=max_evals, 47 | opt_prob=opt_prob, 48 | exp_design=slhd, 49 | surrogate=rbf, 50 | asynchronous=True, 51 | batch_size=num_workers, 52 | ) 53 | controller = MPIController(strategy) 54 | 55 | print("Number of workers: {}".format(num_workers)) 56 | print("Maximum number of evaluations: {}".format(max_evals)) 57 | print("Strategy: {}".format(controller.strategy.__class__.__name__)) 58 | print("Experimental design: {}".format(slhd.__class__.__name__)) 59 | print("Surrogate: {}".format(rbf.__class__.__name__)) 60 | 61 | result = controller.run() 62 | print("Best value found: {0}".format(result.value)) 63 | print( 64 | "Best solution found: {0}\n".format( 65 | np.array_str(result.params[0], max_line_width=np.inf, precision=5, suppress_small=True) 66 | ) 67 | ) 68 | 69 | 70 | def mpiexample_simple(): 71 | # Optimization problem 72 | ackley = Ackley(dim=10) 73 | 74 | # Extract the rank 75 | comm = MPI.COMM_WORLD 76 | rank = comm.Get_rank() 77 | nprocs = comm.Get_size() 78 | 79 | if rank == 0: 80 | main_master(ackley, nprocs) 81 | else: 82 | main_worker(ackley.eval) 83 | 84 | 85 | if __name__ == "__main__": 86 | mpiexample_simple() 87 | -------------------------------------------------------------------------------- /examples/mpiexample_subprocess.py: -------------------------------------------------------------------------------- 1 | """ 2 | .. module:: mpiexample_subprocess_mpi 3 | :synopsis: Example of an external objective function with MPI 4 | .. moduleauthor:: David Eriksson 5 | """ 6 | 7 | import logging 8 | import os.path 9 | from subprocess import PIPE, Popen 10 | 11 | import numpy as np 12 | from poap.mpiserve import MPIController, MPIProcessWorker 13 | 14 | from pySOT.experimental_design import SymmetricLatinHypercube 15 | from pySOT.optimization_problems import Sphere 16 | from pySOT.strategy import SRBFStrategy 17 | from pySOT.surrogate import CubicKernel, LinearTail, RBFInterpolant 18 | 19 | # Try to import mpi4py 20 | try: 21 | from mpi4py import MPI 22 | except Exception as err: 23 | print("ERROR: You need mpi4py to use the POAP MPI controller") 24 | exit() 25 | 26 | 27 | def array2str(x): 28 | return ",".join(np.char.mod("%f", x)) 29 | 30 | 31 | # Find path of the executable 32 | path = os.path.dirname(os.path.abspath(__file__)) + "/sphere_ext" 33 | 34 | 35 | class CppSim(MPIProcessWorker): 36 | def eval(self, record_id, params, extra_args=None): 37 | try: 38 | self.process = Popen([path, array2str(params[0])], stdout=PIPE, bufsize=1, universal_newlines=True) 39 | val = self.process.communicate()[0] 40 | self.finish_success(record_id, float(val)) 41 | except ValueError: 42 | logging.info("WARNING: Incorrect output or crashed evaluation") 43 | self.finish_cancel(record_id) 44 | 45 | 46 | def main_worker(): 47 | logging.basicConfig(filename="./logfiles/test_subprocess_mpi.log", level=logging.INFO) 48 | CppSim().run() 49 | 50 | 51 | def main_master(num_workers): 52 | if not os.path.exists("./logfiles"): 53 | os.makedirs("logfiles") 54 | if os.path.exists("./logfiles/test_subprocess_mpi.log"): 55 | os.remove("./logfiles/test_subprocess_mpi.log") 56 | logging.basicConfig(filename="./logfiles/test_subprocess_mpi.log", level=logging.INFO) 57 | 58 | print("\nTesting the POAP MPI controller with {0} workers".format(num_workers)) 59 | print("Maximum number of evaluations: 200") 60 | print("Search strategy: Candidate DYCORS") 61 | print("Experimental design: Symmetric Latin Hypercube") 62 | print("Surrogate: Cubic RBF") 63 | 64 | assert os.path.isfile(path), "You need to build sphere_ext" 65 | 66 | max_evals = 200 67 | 68 | sphere = Sphere(dim=10) 69 | rbf = RBFInterpolant(dim=sphere.dim, lb=sphere.lb, ub=sphere.ub, kernel=CubicKernel(), tail=LinearTail(sphere.dim)) 70 | slhd = SymmetricLatinHypercube(dim=sphere.dim, num_pts=2 * (sphere.dim + 1)) 71 | 72 | # Create a strategy and a controller 73 | strategy = SRBFStrategy( 74 | max_evals=max_evals, opt_prob=sphere, exp_design=slhd, surrogate=rbf, asynchronous=True, batch_size=num_workers 75 | ) 76 | controller = MPIController(strategy) 77 | 78 | print("Number of threads: {}".format(num_workers)) 79 | print("Maximum number of evaluations: {}".format(max_evals)) 80 | print("Strategy: {}".format(controller.strategy.__class__.__name__)) 81 | print("Experimental design: {}".format(slhd.__class__.__name__)) 82 | print("Surrogate: {}".format(rbf.__class__.__name__)) 83 | 84 | # Run the optimization strategy 85 | result = controller.run() 86 | print("Best value found: {0}".format(result.value)) 87 | print( 88 | "Best solution found: {0}\n".format( 89 | np.array_str(result.params[0], max_line_width=np.inf, precision=5, suppress_small=True) 90 | ) 91 | ) 92 | 93 | 94 | def mpiexample_subprocess_mpi(): 95 | # Extract the rank 96 | comm = MPI.COMM_WORLD 97 | rank = comm.Get_rank() 98 | nprocs = comm.Get_size() 99 | 100 | if rank == 0: 101 | main_master(nprocs) 102 | else: 103 | main_worker() 104 | 105 | 106 | if __name__ == "__main__": 107 | mpiexample_subprocess_mpi() 108 | -------------------------------------------------------------------------------- /examples/sphere_ext: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dme65/pySOT/c8f04fd4ed30d49bb61adb008134741319b512a4/examples/sphere_ext -------------------------------------------------------------------------------- /examples/sphere_ext.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | /* 9 | Simple routine that takes an input of the form 'x1,x2,...,xn' and converts this 10 | input to a standard vector of floats. With probability 0.9 this routine computes 11 | the sphere function (sum of square) of the input and prints it to the screen and 12 | with probability 0.1 the routine does nothing. This is meant to simulate an 13 | external objective function that randomly crashes and hence gives no meaningful 14 | output. 15 | */ 16 | 17 | int main(int argc, char** argv) { 18 | // Random number generator 19 | std::random_device rand_dev; 20 | std::mt19937 generator(rand_dev()); 21 | std::uniform_real_distribution distr(0.0, 1.0); 22 | 23 | // Pretend the simulation crashes with probability 0.1 24 | if(distr(generator) > 0.1) { 25 | 26 | // Convert input to a standard vector 27 | std::vector vect; 28 | std::stringstream ss(argv[1]); 29 | float f; 30 | 31 | while (ss >> f) { 32 | vect.push_back(f); 33 | if (ss.peek() == ',') 34 | ss.ignore(); 35 | } 36 | printf("%g\n", std::inner_product(vect.begin(), vect.end(), 37 | vect.begin(), 0.0 )); 38 | } 39 | return 0; 40 | } 41 | -------------------------------------------------------------------------------- /examples/sphere_ext_files: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dme65/pySOT/c8f04fd4ed30d49bb61adb008134741319b512a4/examples/sphere_ext_files -------------------------------------------------------------------------------- /examples/sphere_ext_files.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | /* 10 | Simple routine that reads an input of the form 'x1,x2,...,xn' from an external file 11 | and converts this input to a standard vector of floats. With probability 0.9 this 12 | routine computes the sphere function (sum of square) of the input and prints it to 13 | the screen and with probability 0.1 the routine does nothing. This is meant to simulate 14 | an external objective function that randomly crashes and hence gives no meaningful output. 15 | */ 16 | 17 | int main(int argc, char** argv) { 18 | // Random number generator 19 | std::random_device rand_dev; 20 | std::mt19937 generator(rand_dev()); 21 | std::uniform_real_distribution distr(0.0, 1.0); 22 | 23 | // Pretend the simulation crashes with probability 0.1 24 | if(distr(generator) > 0.1) { 25 | 26 | // Parse the filename and read the input file 27 | std::ifstream infile(argv[1]); 28 | std::string line; 29 | std::getline(infile, line); 30 | 31 | // Convert input to a standard vector 32 | std::vector vect; 33 | std::stringstream ss(line); 34 | float f; 35 | 36 | while (ss >> f) { 37 | vect.push_back(f); 38 | if (ss.peek() == ',') 39 | ss.ignore(); 40 | } 41 | 42 | printf("%g\n", std::inner_product(vect.begin(), vect.end(), 43 | vect.begin(), 0.0 )); 44 | } 45 | return 0; 46 | } 47 | -------------------------------------------------------------------------------- /examples/sumfun_ext: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dme65/pySOT/c8f04fd4ed30d49bb61adb008134741319b512a4/examples/sumfun_ext -------------------------------------------------------------------------------- /examples/sumfun_ext.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | /* 11 | Simple routine that takes an input of the form 'x1,x2,...,xn' and converts this 12 | input to a standard vector of floats. The range of objective function values is 13 | from 0 to 436.6. 14 | */ 15 | 16 | int main(int argc, char** argv) { 17 | // Random number generator 18 | std::random_device rand_dev; 19 | std::mt19937 generator(rand_dev()); 20 | std::uniform_real_distribution distr(0.0, 1.0); 21 | 22 | // Pretend the simulation crashes with probability 0.1 23 | if(distr(generator) > 0.1) { 24 | 25 | // Convert input to a standard vector 26 | std::vector vect; 27 | std::stringstream ss(argv[1]); 28 | float f; 29 | 30 | while (ss >> f) { 31 | vect.push_back(f); 32 | if (ss.peek() == ',') 33 | ss.ignore(); 34 | } 35 | 36 | double prod = 1.0; 37 | for(int i=0; i 0: 70 | scalefactors[ind] = np.maximum(scalefactors[ind], 1.0) 71 | 72 | # Generate candidate points 73 | if len(subset) == 1: # Fix when nlen is 1 74 | ar = np.ones((num_cand, 1)) 75 | else: 76 | ar = np.random.rand(num_cand, len(subset)) < prob_perturb 77 | ind = np.where(np.sum(ar, axis=1) == 0)[0] 78 | ar[ind, np.random.randint(0, len(subset) - 1, size=len(ind))] = 1 79 | 80 | cand = np.multiply(np.ones((num_cand, opt_prob.dim)), xbest) 81 | for i in subset: 82 | lower, upper, sigma = opt_prob.lb[i], opt_prob.ub[i], scalefactors[i] 83 | ind = np.where(ar[:, i] == 1)[0] 84 | cand[ind, subset[i]] = stats.truncnorm.rvs( 85 | a=(lower - xbest[i]) / sigma, b=(upper - xbest[i]) / sigma, loc=xbest[i], scale=sigma, size=len(ind) 86 | ) 87 | 88 | # Round integer variables 89 | cand = round_vars(cand, opt_prob.int_var, opt_prob.lb, opt_prob.ub) 90 | 91 | # Make selections 92 | return weighted_distance_merit( 93 | num_pts=num_pts, surrogate=surrogate, X=X, fX=fX, Xpend=Xpend, cand=cand, dtol=dtol, weights=weights 94 | ) 95 | -------------------------------------------------------------------------------- /pySOT/auxiliary_problems/candidate_srbf.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import scipy.spatial as scpspatial 3 | import scipy.stats as stats 4 | 5 | from ..utils import round_vars, unit_rescale 6 | 7 | 8 | def weighted_distance_merit(num_pts, surrogate, X, fX, cand, weights, Xpend=None, dtol=1e-3): 9 | """Compute the weighted distance merit function. 10 | 11 | :param num_pts: Number of points to generate 12 | :type num_pts: int 13 | :param surrogate: Surrogate model object 14 | :type surrogate: object 15 | :param X: Previously evaluated points, of size n x dim 16 | :type X: numpy.array 17 | :param fX: Values at previously evaluated points, of size n x 1 18 | :type fX: numpy.array 19 | :param cand: Candidate points to select from, of size m x dim 20 | :type cand: numpy.array 21 | :param weights: num_pts weights in [0, 1] for merit function 22 | :type weights: list or numpy.array 23 | :param Xpend: Pending evaluation, of size k x dim 24 | :type Xpend: numpy.array 25 | :param dtol: Minimum distance between evaluated and pending points 26 | :type dtol: float 27 | 28 | :return: The num_pts new points chosen from the candidate points 29 | :rtype: numpy.array of size num_pts x dim 30 | """ 31 | # Distance 32 | dim = X.shape[1] 33 | if Xpend is None: # cdist can't handle None arguments 34 | Xpend = np.empty([0, dim]) 35 | dists = scpspatial.distance.cdist(cand, np.vstack((X, Xpend))) 36 | dmerit = np.amin(dists, axis=1, keepdims=True) 37 | 38 | # Values 39 | fvals = surrogate.predict(cand) 40 | fvals = unit_rescale(fvals) 41 | 42 | # Pick candidate points 43 | new_points = np.ones((num_pts, dim)) 44 | for i in range(num_pts): 45 | w = weights[i] 46 | merit = w * fvals + (1.0 - w) * (1.0 - unit_rescale(np.copy(dmerit))) 47 | 48 | merit[dmerit < dtol] = np.inf 49 | jj = np.argmin(merit) 50 | fvals[jj] = np.inf 51 | new_points[i, :] = cand[jj, :].copy() 52 | 53 | # Update distances and weights 54 | ds = scpspatial.distance.cdist(cand, np.atleast_2d(new_points[i, :])) 55 | dmerit = np.minimum(dmerit, ds) 56 | 57 | return new_points 58 | 59 | 60 | def candidate_srbf( 61 | num_pts, opt_prob, surrogate, X, fX, weights, Xpend=None, sampling_radius=0.2, subset=None, dtol=1e-3, num_cand=None 62 | ): 63 | """Select new evaluations using Stochastic RBF (SRBF). 64 | 65 | :param num_pts: Number of points to generate 66 | :type num_pts: int 67 | :param opt_prob: Optimization problem 68 | :type opt_prob: object 69 | :param surrogate: Surrogate model object 70 | :type surrogate: object 71 | :param X: Previously evaluated points, of size n x dim 72 | :type X: numpy.array 73 | :param fX: Values at previously evaluated points, of size n x 1 74 | :type fX: numpy.array 75 | :param weights: num_pts weights in [0, 1] for merit function 76 | :type weights: list or numpy.array 77 | :param Xpend: Pending evaluation, of size k x dim 78 | :type Xpend: numpy.array 79 | :param sampling_radius: Perturbation radius 80 | :type sampling_radius: float 81 | :param subset: Coordinates that should be perturbed, use None for all 82 | :type subset: list or numpy.array 83 | :param dtol: Minimum distance between evaluated and pending points 84 | :type dtol: float 85 | :param num_cand: Number of candidate points 86 | :type num_cand: int 87 | 88 | :return: The num_pts new points to evaluate 89 | :rtype: numpy.array of size num_pts x dim 90 | """ 91 | # Find best solution 92 | xbest = np.copy(X[np.argmin(fX), :]).ravel() 93 | 94 | # Fix default values 95 | if num_cand is None: 96 | num_cand = 100 * opt_prob.dim 97 | if subset is None: 98 | subset = np.arange(0, opt_prob.dim) 99 | 100 | # Compute scale factors for each dimension and make sure they 101 | # are correct for integer variables (at least 1) 102 | scalefactors = sampling_radius * (opt_prob.ub - opt_prob.lb) 103 | ind = np.intersect1d(opt_prob.int_var, subset) 104 | if len(ind) > 0: 105 | scalefactors[ind] = np.maximum(scalefactors[ind], 1.0) 106 | 107 | # Generate candidate points 108 | cand = np.multiply(np.ones((num_cand, opt_prob.dim)), xbest) 109 | for i in subset: 110 | lower, upper, sigma = opt_prob.lb[i], opt_prob.ub[i], scalefactors[i] 111 | cand[:, i] = stats.truncnorm.rvs( 112 | a=(lower - xbest[i]) / sigma, b=(upper - xbest[i]) / sigma, loc=xbest[i], scale=sigma, size=num_cand 113 | ) 114 | 115 | # Round integer variables 116 | cand = round_vars(cand, opt_prob.int_var, opt_prob.lb, opt_prob.ub) 117 | 118 | # Make selections 119 | return weighted_distance_merit( 120 | num_pts=num_pts, surrogate=surrogate, X=X, fX=fX, Xpend=Xpend, cand=cand, dtol=dtol, weights=weights 121 | ) 122 | -------------------------------------------------------------------------------- /pySOT/auxiliary_problems/candidate_uniform.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from ..utils import round_vars 4 | from .candidate_srbf import weighted_distance_merit 5 | 6 | 7 | def candidate_uniform(num_pts, opt_prob, surrogate, X, fX, weights, Xpend=None, subset=None, dtol=1e-3, num_cand=None): 8 | """Select new evaluations from uniform candidate points. 9 | 10 | :param num_pts: Number of points to generate 11 | :type num_pts: int 12 | :param opt_prob: Optimization problem 13 | :type opt_prob: object 14 | :param surrogate: Surrogate model object 15 | :type surrogate: object 16 | :param X: Previously evaluated points, of size n x dim 17 | :type X: numpy.array 18 | :param fX: Values at previously evaluated points, of size n x 1 19 | :type fX: numpy.array 20 | :param weights: num_pts weights in [0, 1] for merit function 21 | :type weights: list or numpy.array 22 | :param Xpend: Pending evaluations 23 | :type Xpend: numpy.array 24 | :param subset: Coordinates that should be perturbed, use None for all 25 | :type subset: list or numpy.array 26 | :param dtol: Minimum distance between evaluated and pending points 27 | :type dtol: float 28 | :param num_cand: Number of candidate points 29 | :type num_cand: int 30 | 31 | :return: The num_pts new points to evaluate 32 | :rtype: numpy.array of size num_pts x dim 33 | """ 34 | # Find best solution 35 | xbest = np.copy(X[np.argmin(fX), :]).ravel() 36 | 37 | # Fix default values 38 | if num_cand is None: 39 | num_cand = 100 * opt_prob.dim 40 | if subset is None: 41 | subset = np.arange(0, opt_prob.dim) 42 | 43 | # Generate uniformly random candidate points 44 | cand = np.multiply(np.ones((num_cand, opt_prob.dim)), xbest) 45 | cand[:, subset] = np.random.uniform(opt_prob.lb[subset], opt_prob.ub[subset], (num_cand, len(subset))) 46 | 47 | # Round integer variables 48 | cand = round_vars(cand, opt_prob.int_var, opt_prob.lb, opt_prob.ub) 49 | 50 | # Make selections 51 | return weighted_distance_merit( 52 | num_pts=num_pts, surrogate=surrogate, X=X, fX=fX, Xpend=Xpend, cand=cand, dtol=dtol, weights=weights 53 | ) 54 | -------------------------------------------------------------------------------- /pySOT/auxiliary_problems/ei_ga.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from ..utils import GeneticAlgorithm as GA 4 | from ..utils import round_vars 5 | from .ei_merit import ei_merit 6 | 7 | 8 | def ei_ga(num_pts, opt_prob, surrogate, X, fX, Xpend=None, dtol=1e-3, ei_tol=1e-6): 9 | """Maximize EI using a genetic algorithm. 10 | 11 | :param num_pts: Number of points to generate 12 | :type num_pts: int 13 | :param opt_prob: Optimization problem 14 | :type opt_prob: object 15 | :param surrogate: Surrogate model object 16 | :type surrogate: object 17 | :param X: Previously evaluated points, of size n x dim 18 | :type X: numpy.array 19 | :param fX: Values at previously evaluated points, of size n x 1 20 | :type fX: numpy.array 21 | :param Xpend: Pending evaluations 22 | :type Xpend: numpy.array 23 | :param dtol: Minimum distance between evaluated and pending points 24 | :type dtol: float 25 | :param ei_tol: Return None if we don't find an EI of at least this value 26 | :type ei_tol: float 27 | 28 | :return: num_pts new points to evaluate 29 | :rtype: numpy.array of size num_pts x dim 30 | """ 31 | if Xpend is None: # cdist can't handle None arguments 32 | Xpend = np.empty([0, opt_prob.dim]) 33 | XX = np.vstack((X, Xpend)) 34 | 35 | new_points = np.zeros((num_pts, opt_prob.dim)) 36 | for i in range(num_pts): 37 | 38 | def obj(Y): 39 | """Round integer variables and compute negative EI.""" 40 | Y = round_vars(Y.copy(), opt_prob.int_var, opt_prob.lb, opt_prob.ub) 41 | ei = ei_merit(X=Y, surrogate=surrogate, fX=fX, XX=XX, dtol=dtol) 42 | return -ei # Remember that we are minimizing!!! 43 | 44 | ga = GA( 45 | function=obj, 46 | dim=opt_prob.dim, 47 | lb=opt_prob.lb, 48 | ub=opt_prob.ub, 49 | int_var=opt_prob.int_var, 50 | pop_size=max([2 * opt_prob.dim, 100]), 51 | num_gen=100, 52 | ) 53 | x_best, f_min = ga.optimize() 54 | 55 | ei_max = -f_min 56 | if ei_max < ei_tol: 57 | return None # Give up 58 | 59 | new_points[i, :] = x_best 60 | XX = np.vstack((XX, x_best)) 61 | return new_points 62 | -------------------------------------------------------------------------------- /pySOT/auxiliary_problems/ei_merit.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import scipy.spatial as scpspatial 3 | from scipy.stats import norm 4 | 5 | 6 | def ei_merit(X, surrogate, fX, XX=None, dtol=0): 7 | """Compute the expected improvement merit function. 8 | 9 | :param X: Points where to compute EI, of size n x dim 10 | :type X: numpy.array 11 | :param surrogate: Surrogate model object, must implement predict_std 12 | :type surrogate: object 13 | :param fX: Values at previously evaluated points, of size m x 1 14 | :type fX: numpy.array 15 | :param XX: Previously evaluated points, of size m x 1 16 | :type XX: numpy.array 17 | :param dtol: Minimum distance between evaluated and pending points 18 | :type dtol: float 19 | 20 | :return: Evaluate the expected improvement for points X 21 | :rtype: numpy.array of length X.shape[0] 22 | """ 23 | mu, sig = surrogate.predict(X), surrogate.predict_std(X) 24 | gamma = (np.min(fX) - mu) / sig 25 | beta = gamma * norm.cdf(gamma) + norm.pdf(gamma) 26 | ei = sig * beta 27 | 28 | if dtol > 0: 29 | dists = scpspatial.distance.cdist(X, XX) 30 | dmerit = np.amin(dists, axis=1, keepdims=True) 31 | ei[dmerit < dtol] = 0.0 32 | return ei 33 | -------------------------------------------------------------------------------- /pySOT/auxiliary_problems/lcb_ga.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from ..utils import GeneticAlgorithm as GA 4 | from ..utils import round_vars 5 | from .lcb_merit import lcb_merit 6 | 7 | 8 | def lcb_ga(num_pts, opt_prob, surrogate, X, fX, Xpend=None, kappa=2.0, dtol=1e-3, lcb_target=None): 9 | """Minimize the LCB using a genetic algorithm. 10 | 11 | :param num_pts: Number of points to generate 12 | :type num_pts: int 13 | :param opt_prob: Optimization problem 14 | :type opt_prob: object 15 | :param surrogate: Surrogate model object 16 | :type surrogate: object 17 | :param X: Previously evaluated points, of size n x dim 18 | :type X: numpy.array 19 | :param fX: Values at previously evaluated points, of size n x 1 20 | :type fX: numpy.array 21 | :param Xpend: Pending evaluations 22 | :type Xpend: numpy.array 23 | :param dtol: Minimum distance between evaluated and pending points 24 | :type dtol: float 25 | :param lcb_target: Return None if we don't find an LCB value <= lcb_target 26 | :type lcb_target: float 27 | 28 | :return: num_pts new points to evaluate 29 | :rtype: numpy.array of size num_pts x dim 30 | """ 31 | 32 | if Xpend is None: # cdist can't handle None arguments 33 | Xpend = np.empty([0, opt_prob.dim]) 34 | XX = np.vstack((X, Xpend)) 35 | 36 | new_points = np.zeros((num_pts, opt_prob.dim)) 37 | for i in range(num_pts): 38 | 39 | def obj(Y): 40 | """Round integer variables and compute LCB.""" 41 | Y = round_vars(Y.copy(), opt_prob.int_var, opt_prob.lb, opt_prob.ub) 42 | return lcb_merit(X=Y, surrogate=surrogate, fX=fX, XX=XX, dtol=dtol, kappa=kappa) 43 | 44 | ga = GA( 45 | function=obj, 46 | dim=opt_prob.dim, 47 | lb=opt_prob.lb, 48 | ub=opt_prob.ub, 49 | int_var=opt_prob.int_var, 50 | pop_size=max([2 * opt_prob.dim, 100]), 51 | num_gen=100, 52 | ) 53 | x_best, f_min = ga.optimize() 54 | 55 | if f_min > lcb_target: 56 | return None # Give up 57 | 58 | new_points[i, :] = x_best 59 | XX = np.vstack((XX, x_best)) 60 | return new_points 61 | -------------------------------------------------------------------------------- /pySOT/auxiliary_problems/lcb_merit.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import scipy.spatial as scpspatial 3 | 4 | 5 | def lcb_merit(X, surrogate, fX, XX=None, dtol=0.0, kappa=2.0): 6 | """Compute the lcb merit function. 7 | 8 | :param X: Points where to compute LCB, of size n x dim 9 | :type X: numpy.array 10 | :param surrogate: Surrogate model object, must implement predict_std 11 | :type surrogate: object 12 | :param fX: Values at previously evaluated points, of size m x 1 13 | :type fX: numpy.array 14 | :param XX: Previously evaluated points, of size m x 1 15 | :type XX: numpy.array 16 | :param dtol: Minimum distance between evaluated and pending points 17 | :type dtol: float 18 | :param kappa: Constant in front of standard deviation 19 | Default: 2.0 20 | :type kappa: float 21 | 22 | :return: Evaluate the lower confidence bound for points X 23 | :rtype: numpy.array of length X.shape[0] 24 | """ 25 | mu, sig = surrogate.predict(X), surrogate.predict_std(X) 26 | lcb = mu - kappa * sig 27 | 28 | if dtol > 0: 29 | dists = scpspatial.distance.cdist(X, XX) 30 | dmerit = np.amin(dists, axis=1, keepdims=True) 31 | lcb[dmerit < dtol] = np.inf 32 | return lcb 33 | -------------------------------------------------------------------------------- /pySOT/controller/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | from .controller import CheckpointController 3 | 4 | __all__ = ["CheckpointController"] 5 | -------------------------------------------------------------------------------- /pySOT/controller/controller.py: -------------------------------------------------------------------------------- 1 | """ 2 | .. module:: controller 3 | :synopsis: pySOT controllers 4 | 5 | .. moduleauthor:: David Eriksson , 6 | 7 | :Module: controller 8 | :Author: David Eriksson , 9 | 10 | """ 11 | 12 | import copy 13 | import os.path 14 | 15 | import dill 16 | 17 | 18 | class CheckpointController(object): 19 | """Checkpoint controller 20 | 21 | Controller that uses dill to take snapshots of the strategy each time 22 | an evaluation is completed, killed, or the run is terminated. We assume 23 | that the strategy can be pickled, or this won't work. We currently do not 24 | respect potential termination callbacks and failed evaluation callbacks. 25 | The strategy needs to implement a resume method that is called when a run 26 | is resumed. The strategy object can assume that all pending evaluations 27 | have been killed and that their respective callbacks won't be executed 28 | 29 | :param controller: POAP controller 30 | :type controller: Controller 31 | :param fname: Filename for checkpoint file (file cannot exist for new run) 32 | :type fname: string 33 | 34 | :ivar controller: POAP controller 35 | :ivar fname: Filename for snapshot 36 | """ 37 | 38 | def __init__(self, controller, fname="checkpoint.pysot"): 39 | controller.add_feval_callback(self._add_on_update) 40 | controller.add_feval_callback(self.on_new_feval) 41 | controller.add_term_callback(self.on_terminate) 42 | self.controller = controller 43 | self.fname = fname 44 | 45 | def _add_on_update(self, record): 46 | """Internal handler -- add on_update callback to all new fevals. 47 | 48 | :param record: Evaluation record 49 | :type record: EvalRecord 50 | """ 51 | record.add_callback(self.on_update) 52 | 53 | def on_new_feval(self, record): 54 | """Handle new function evaluation request. 55 | 56 | :param record: Evaluation record 57 | :type record: EvalRecord 58 | """ 59 | pass 60 | 61 | def _save(self): 62 | """Save the strategy by calling the save method.""" 63 | self.controller.strategy.save(self.fname) 64 | 65 | def resume(self): 66 | """Resume an optimization run. 67 | 68 | :return: The record corresponding to the best solution 69 | :rtype: EvalRecord 70 | """ 71 | if not os.path.isfile(self.fname): 72 | raise IOError("Checkpoint file does not exist") 73 | with open(self.fname, "rb") as input: 74 | self.controller.strategy = dill.load(input) 75 | fevals = copy.copy(self.controller.strategy.fevals) 76 | self.controller.fevals = fevals 77 | self.controller.strategy.resume() 78 | return self.controller.run() 79 | 80 | def on_update(self, record): 81 | """Handle feval update. 82 | 83 | :param record: Evaluation record 84 | :type record: EvalRecord 85 | """ 86 | if record.is_completed: 87 | self.on_complete(record) 88 | elif record.is_killed: 89 | self.on_kill(record) 90 | elif record.is_cancelled: 91 | self.on_cancel(record) 92 | 93 | def on_complete(self, record): 94 | """Handle feval completion. 95 | 96 | :param record: Evaluation record 97 | :type record: EvalRecord 98 | """ 99 | self._save() 100 | 101 | def on_kill(self, record): 102 | """"Handle record killed. 103 | 104 | :param record: Evaluation record 105 | :type record: EvalRecord 106 | """ 107 | self._save() 108 | 109 | def on_cancel(self, record): 110 | """"Handle record cancelled. 111 | 112 | :param record: Evaluation record 113 | :type record: EvalRecord 114 | """ 115 | self._save() 116 | 117 | def on_terminate(self): 118 | """"Handle termination.""" 119 | self._save() 120 | 121 | def run(self): 122 | """Start the optimization run. 123 | 124 | Make sure we do not overwrite any existing checkpointing files 125 | 126 | :return: The record corresponding to the best solution 127 | :rtype: EvalRecord 128 | """ 129 | if os.path.isfile(self.fname): 130 | raise IOError("Checkpoint file already exists, aborting...") 131 | return self.controller.run() 132 | -------------------------------------------------------------------------------- /pySOT/experimental_design/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | from .experimental_design import ExperimentalDesign 3 | from .lhd import LatinHypercube 4 | from .slhd import SymmetricLatinHypercube 5 | from .two_factorial import TwoFactorial 6 | 7 | __all__ = [ 8 | "ExperimentalDesign", 9 | "LatinHypercube", 10 | "SymmetricLatinHypercube", 11 | "TwoFactorial", 12 | ] 13 | -------------------------------------------------------------------------------- /pySOT/experimental_design/experimental_design.py: -------------------------------------------------------------------------------- 1 | """ 2 | .. module:: experimental_design 3 | :synopsis: Methods for generating an experimental design. 4 | 5 | .. moduleauthor:: David Eriksson , 6 | Yi Shen 7 | 8 | :Module: experimental_design 9 | :Author: David Eriksson 10 | Yi Shen 11 | """ 12 | 13 | import abc 14 | 15 | import numpy as np 16 | import six 17 | from numpy.linalg import matrix_rank as rank 18 | from scipy.spatial.distance import cdist 19 | 20 | from ..utils import from_unit_box, round_vars 21 | 22 | 23 | @six.add_metaclass(abc.ABCMeta) 24 | class ExperimentalDesign(object): 25 | """Base class for experimental designs. 26 | 27 | :ivar dim: Number of dimensions 28 | :ivar num_pts: Number of points in the experimental design 29 | """ 30 | 31 | __metaclass__ = abc.ABCMeta 32 | 33 | def __init__(self): # pragma: no cover 34 | self.dim = None 35 | self.num_pts = None 36 | 37 | @abc.abstractmethod 38 | def generate_points(self, lb=None, ub=None, int_var=None): # pragma: no cover 39 | pass 40 | 41 | 42 | def _expdes_dist(gen, iterations, lb, ub, int_var): 43 | """Helper method for picking the best experimental design. 44 | 45 | We generate iterations designs and picks the one the maximizes the 46 | minimum distance between points. This isn't a perfect criterion, but 47 | it will help avoid rank-defficient designs such as y=x. 48 | 49 | :param lb: Lower bounds 50 | :type lb: numpy.array 51 | :param ub: Upper bounds 52 | :type ub: numpy.array 53 | :param int_var: Indices of integer variables. 54 | :type int_var: numpy.array 55 | 56 | :return: Experimental design of size num_pts x dim 57 | :rtype: numpy.ndarray 58 | """ 59 | 60 | X = None 61 | best_score = 0 62 | for _ in range(iterations): 63 | cand = gen() # Generate a new design 64 | if all([x is not None for x in [lb, ub]]): # Map and round 65 | cand = round_vars(from_unit_box(cand, lb, ub), int_var, lb, ub) 66 | 67 | dists = cdist(cand, cand) 68 | np.fill_diagonal(dists, np.inf) # Since these are zero 69 | score = dists.min().min() 70 | 71 | if score > best_score and rank(cand) == cand.shape[1]: 72 | best_score = score 73 | X = cand.copy() 74 | 75 | if X is None: 76 | raise ValueError("No valid design found, increase num_pts?") 77 | return X 78 | -------------------------------------------------------------------------------- /pySOT/experimental_design/lhd.py: -------------------------------------------------------------------------------- 1 | import warnings 2 | 3 | import numpy as np 4 | import pyDOE2 as pydoe 5 | 6 | from .experimental_design import ExperimentalDesign, _expdes_dist 7 | 8 | 9 | class LatinHypercube(ExperimentalDesign): 10 | """Latin Hypercube experimental design. 11 | 12 | :param dim: Number of dimensions 13 | :type dim: int 14 | :param num_pts: Number of desired sampling points 15 | :type num_pts: int 16 | :param criterion: Previously passed to pyDOE, now deprecated 17 | :type criterion: string 18 | :param iterations: Number of designs to choose from 19 | :type iterations: int 20 | 21 | :ivar dim: Number of dimensions 22 | :ivar num_pts: Number of points in the experimental design 23 | :ivar iterations: Number of points in the experimental design 24 | """ 25 | 26 | def __init__(self, dim, num_pts, criterion=None, iterations=1000): 27 | if criterion is not None: 28 | warnings.warn("Criterion is deprecated and will be removed.") 29 | self.dim = dim 30 | self.num_pts = num_pts 31 | self.iterations = iterations 32 | 33 | def generate_points(self, lb=None, ub=None, int_var=None): 34 | """Generate a new experimental design. 35 | 36 | You can specify lb, ub, int_var to have the design mapped to a 37 | specific domain. These inputs are ignored if one of lb 38 | or ub is None. The design is generated in [0, 1]^d in this case. 39 | 40 | :param lb: Lower bounds 41 | :type lb: numpy.array 42 | :param ub: Upper bounds 43 | :type ub: numpy.array 44 | :param int_var: Indices of integer variables. If None, [], or 45 | np.array([]) we assume all variables are continuous. 46 | :type int_var: numpy.array 47 | 48 | :return: Experimental design of size num_pts x dim 49 | :rtype: numpy.ndarray 50 | """ 51 | if int_var is None or len(int_var) == 0: 52 | int_var = np.array([]) 53 | 54 | def wrapper(): 55 | return pydoe.lhs(self.dim, self.num_pts, iterations=1) 56 | 57 | return _expdes_dist(wrapper, self.iterations, lb, ub, int_var) 58 | -------------------------------------------------------------------------------- /pySOT/experimental_design/slhd.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from .experimental_design import ExperimentalDesign, _expdes_dist 4 | 5 | 6 | class SymmetricLatinHypercube(ExperimentalDesign): 7 | """Symmetric Latin hypercube experimental design. 8 | 9 | :param dim: Number of dimensions 10 | :type dim: int 11 | :param num_pts: Number of desired sampling points 12 | :type num_pts: int 13 | :param iterations: Number of designs to generate and pick the best from 14 | :type iterations: int 15 | 16 | :ivar dim: Number of dimensions 17 | :ivar num_pts: Number of points in the experimental design 18 | :ivar iterations: Number of points in the experimental design 19 | """ 20 | 21 | def __init__(self, dim, num_pts, iterations=1000): 22 | self.dim = dim 23 | self.num_pts = num_pts 24 | self.iterations = iterations 25 | 26 | def generate_points(self, lb=None, ub=None, int_var=None): 27 | """Generate a new experimental design. 28 | 29 | You can specify lb, ub, int_var to have the design mapped to a 30 | specific domain. These inputs are ignored if one of lb 31 | or ub is None. The design is generated in [0, 1]^d in this case. 32 | 33 | :param lb: Lower bounds 34 | :type lb: numpy.array 35 | :param ub: Upper bounds 36 | :type ub: numpy.array 37 | :param int_var: Indices of integer variables. If None, [], or 38 | np.array([]) we assume all variables are continuous. 39 | :type int_var: numpy.array 40 | 41 | :return: Experimental design of size num_pts x dim 42 | :rtype: numpy.ndarray 43 | """ 44 | if int_var is None or len(int_var) == 0: 45 | int_var = np.array([]) 46 | 47 | def wrapper(): 48 | return self._slhd() 49 | 50 | return _expdes_dist(wrapper, self.iterations, lb, ub, int_var) 51 | 52 | def _slhd(self): 53 | """Generate a symmetric Latin hypercube design in the unit hypercube. 54 | 55 | :return: Symmetric Latin hypercube design in the unit hypercube 56 | of size num_pts x dim 57 | :rtype: numpy.ndarray 58 | """ 59 | # Generate a one-dimensional array based on sample number 60 | points = np.zeros([self.num_pts, self.dim]) 61 | points[:, 0] = np.arange(1, self.num_pts + 1) 62 | 63 | # Get the last index of the row in the top half of the hypercube 64 | middleind = self.num_pts // 2 65 | 66 | # special manipulation if odd number of rows 67 | if self.num_pts % 2 == 1: 68 | points[middleind, :] = middleind + 1 69 | 70 | # Generate the top half of the hypercube matrix 71 | for j in range(1, self.dim): 72 | for i in range(middleind): 73 | if np.random.random() < 0.5: 74 | points[i, j] = self.num_pts - i 75 | else: 76 | points[i, j] = i + 1 77 | np.random.shuffle(points[:middleind, j]) 78 | 79 | # Generate the bottom half of the hypercube matrix 80 | for i in range(middleind, self.num_pts): 81 | points[i, :] = self.num_pts + 1 - points[self.num_pts - 1 - i, :] 82 | 83 | return (points - 1) / (self.num_pts - 1) # Map to [0, 1]^d 84 | -------------------------------------------------------------------------------- /pySOT/experimental_design/two_factorial.py: -------------------------------------------------------------------------------- 1 | import itertools 2 | 3 | import numpy as np 4 | 5 | from ..utils import from_unit_box, round_vars 6 | from .experimental_design import ExperimentalDesign 7 | 8 | 9 | class TwoFactorial(ExperimentalDesign): 10 | """Two-factorial experimental design. 11 | 12 | The two-factorial experimental design consists of the corners 13 | of the unit hypercube, and hence :math:`2^{dim}` points. 14 | 15 | :param dim: Number of dimensions 16 | :type dim: int 17 | 18 | :ivar dim: Number of dimensions 19 | :ivar num_pts: Number of points in the experimental design 20 | 21 | :raises ValueError: If dim >= 15 22 | """ 23 | 24 | def __init__(self, dim): 25 | if dim >= 15: 26 | raise ValueError("Refusing to use >= 2^15 points.") 27 | self.dim = dim 28 | self.num_pts = 2 ** dim 29 | 30 | def generate_points(self, lb=None, ub=None, int_var=None): 31 | """Generate a two factorial design in the unit hypercube. 32 | 33 | You can specify lb, ub, int_var to have the design mapped to a 34 | specific domain. These inputs are ignored if one of lb 35 | or ub is None. The design is generated in [0, 1]^d in this case. 36 | 37 | :param lb: Lower bounds 38 | :type lb: numpy.array 39 | :param ub: Upper bounds 40 | :type ub: numpy.array 41 | :param int_var: Indices of integer variables. If None, [], or 42 | np.array([]) we assume all variables are continuous. 43 | :type int_var: numpy.array 44 | 45 | :return: Two factorial design in unit hypercube of size num_pts x dim 46 | :rtype: numpy.array 47 | """ 48 | if int_var is None or len(int_var) == 0: 49 | int_var = np.array([]) 50 | 51 | X = np.array(list(itertools.product([0, 1], repeat=self.dim))) 52 | if all([x is not None for x in [lb, ub]]): # Map and round 53 | X = round_vars(from_unit_box(X, lb, ub), int_var, lb, ub) 54 | return X 55 | -------------------------------------------------------------------------------- /pySOT/optimization_problems/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | from .ackley import Ackley 3 | from .branin import Branin 4 | from .exponential import Exponential 5 | from .goldstein_price import GoldsteinPrice 6 | from .griewank import Griewank 7 | from .hartmann3 import Hartmann3 8 | from .hartmann6 import Hartmann6 9 | from .himmelblau import Himmelblau 10 | from .levy import Levy 11 | from .michaelewicz import Michalewicz 12 | from .optimization_problem import OptimizationProblem 13 | from .perm import Perm 14 | from .rastrigin import Rastrigin 15 | from .rosenbrock import Rosenbrock 16 | from .schwefel import Schwefel 17 | from .six_hump_camel import SixHumpCamel 18 | from .sphere import Sphere 19 | from .sum_of_squares import SumOfSquares 20 | from .weierstrass import Weierstrass 21 | from .zakharov import Zakharov 22 | 23 | __all__ = [ 24 | "OptimizationProblem", 25 | "Ackley", 26 | "Branin", 27 | "Exponential", 28 | "GoldsteinPrice", 29 | "Griewank", 30 | "Hartmann3", 31 | "Hartmann6", 32 | "Himmelblau", 33 | "Levy", 34 | "Michalewicz", 35 | "Perm", 36 | "Rastrigin", 37 | "Rosenbrock", 38 | "Schwefel", 39 | "SixHumpCamel", 40 | "Sphere", 41 | "SumOfSquares", 42 | "Weierstrass", 43 | "Zakharov", 44 | ] 45 | -------------------------------------------------------------------------------- /pySOT/optimization_problems/ackley.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from .optimization_problem import OptimizationProblem 4 | 5 | 6 | class Ackley(OptimizationProblem): 7 | """Ackley function 8 | 9 | .. math:: 10 | f(x_1,\\ldots,x_n) = -20\\exp\\left( -0.2 \\sqrt{\\frac{1}{n} \ 11 | \\sum_{j=1}^n x_j^2} \\right) -\\exp \\left( \\frac{1}{n} \ 12 | \\sum{j=1}^n \\cos(2 \\pi x_j) \\right) + 20 - e 13 | 14 | subject to 15 | 16 | .. math:: 17 | -15 \\leq x_i \\leq 20 18 | 19 | Global optimum: :math:`f(0,0,...,0)=0` 20 | 21 | :ivar dim: Number of dimensions 22 | :ivar lb: Lower variable bounds 23 | :ivar ub: Upper variable bounds 24 | :ivar int_var: Integer variables 25 | :ivar cont_var: Continuous variables 26 | :ivar min: Global minimum value 27 | :ivar minimum: Global minimizer 28 | :ivar info: String with problem info 29 | """ 30 | 31 | def __init__(self, dim=10): 32 | self.dim = dim 33 | self.min = 0 34 | self.minimum = np.zeros(dim) 35 | self.lb = -15 * np.ones(dim) 36 | self.ub = 20 * np.ones(dim) 37 | self.int_var = np.array([]) 38 | self.cont_var = np.arange(0, dim) 39 | self.info = str(dim) + "-dimensional Ackley function \n" + "Global optimum: f(0,0,...,0) = 0" 40 | 41 | def eval(self, x): 42 | """Evaluate the Ackley function at x 43 | 44 | :param x: Data point 45 | :type x: numpy.array 46 | :return: Value at x 47 | :rtype: float 48 | """ 49 | self.__check_input__(x) 50 | d = float(self.dim) 51 | return ( 52 | -20.0 * np.exp(-0.2 * np.sqrt(np.sum(x ** 2) / d)) 53 | - np.exp(np.sum(np.cos(2.0 * np.pi * x)) / d) 54 | + 20 55 | + np.exp(1) 56 | ) 57 | -------------------------------------------------------------------------------- /pySOT/optimization_problems/branin.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from .optimization_problem import OptimizationProblem 4 | 5 | 6 | class Branin(OptimizationProblem): 7 | """Branin function 8 | 9 | Details: http://www.sfu.ca/~ssurjano/branin.html 10 | 11 | Global optimum: :math:`f(-\\pi,12.275)=0.397887` 12 | 13 | :ivar dim: Number of dimensions 14 | :ivar lb: Lower variable bounds 15 | :ivar ub: Upper variable bounds 16 | :ivar int_var: Integer variables 17 | :ivar cont_var: Continuous variables 18 | :ivar min: Global minimum value 19 | :ivar minimum: Global minimizer 20 | :ivar info: String with problem info 21 | """ 22 | 23 | def __init__(self): 24 | self.min = 0.397887 25 | self.minimum = np.array([-np.pi, 12.275]) 26 | self.dim = 2 27 | self.lb = -3.0 * np.ones(2) 28 | self.ub = 3.0 * np.ones(2) 29 | self.int_var = np.array([]) 30 | self.cont_var = np.arange(0, 2) 31 | self.info = "2-dimensional Branin function \nGlobal optimum: " + "f(-pi, 12.275) = 0.397887" 32 | 33 | def eval(self, x): 34 | """Evaluate the Branin function at x 35 | 36 | :param x: Data point 37 | :type x: numpy.array 38 | :return: Value at x 39 | :rtype: float 40 | """ 41 | self.__check_input__(x) 42 | x1 = x[0] 43 | x2 = x[1] 44 | 45 | t = 1 / (8 * np.pi) 46 | s = 10 47 | r = 6 48 | c = 5 / np.pi 49 | b = 5.1 / (4 * np.pi ** 2) 50 | a = 1 51 | 52 | term1 = a * (x2 - b * x1 ** 2 + c * x1 - r) ** 2 53 | term2 = s * (1 - t) * np.cos(x1) 54 | 55 | return term1 + term2 + s 56 | -------------------------------------------------------------------------------- /pySOT/optimization_problems/exponential.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from .optimization_problem import OptimizationProblem 4 | 5 | 6 | class Exponential(OptimizationProblem): 7 | """Exponential function 8 | 9 | .. math:: 10 | f(x_1,\\ldots,x_n) = \\sum_{j=1}^n e^{jx_j} - \\sum_{j=1} e^{-5.12 j} 11 | 12 | subject to 13 | 14 | .. math:: 15 | -5.12 \\leq x_i \\leq 5.12 16 | 17 | Global optimum: :math:`f(0,0,...,0)=0` 18 | 19 | :ivar dim: Number of dimensions 20 | :ivar lb: Lower variable bounds 21 | :ivar ub: Upper variable bounds 22 | :ivar int_var: Integer variables 23 | :ivar cont_var: Continuous variables 24 | :ivar min: Global minimum value 25 | :ivar minimum: Global minimizer 26 | :ivar info: String with problem info 27 | """ 28 | 29 | def __init__(self, dim=10): 30 | self.dim = dim 31 | self.min = 0 32 | self.minimum = -5.12 * np.ones(dim) 33 | self.lb = -5.12 * np.ones(dim) 34 | self.ub = 5.12 * np.ones(dim) 35 | self.int_var = np.array([]) 36 | self.cont_var = np.arange(0, dim) 37 | self.info = str(dim) + "-dimensional Exponential function \n" + "Global optimum: f(-5.12,-5.12,...,-5.12) = 0" 38 | 39 | def eval(self, x): 40 | """Evaluate the Exponential function at x. 41 | 42 | :param x: Data point 43 | :type x: numpy.array 44 | :return: Value at x 45 | :rtype: float 46 | """ 47 | self.__check_input__(x) 48 | total = 0.0 49 | for i in range(len(x)): 50 | total += np.exp((i + 1) * x[i - 1]) - np.exp(-5.12 * (i + 1)) 51 | return total 52 | -------------------------------------------------------------------------------- /pySOT/optimization_problems/goldstein_price.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from .optimization_problem import OptimizationProblem 4 | 5 | 6 | class GoldsteinPrice(OptimizationProblem): 7 | def __init__(self): 8 | self.info = "2-dimensional Goldstein-Price function" 9 | self.min = 3.0 10 | self.minimum = np.array([0, -1]) 11 | self.dim = 2 12 | self.lb = -2.0 * np.ones(2) 13 | self.ub = 2.0 * np.ones(2) 14 | self.int_var = np.array([]) 15 | self.cont_var = np.arange(0, 2) 16 | 17 | def eval(self, x): 18 | """Evaluate the GoldStein Price function at x 19 | 20 | :param x: Data point 21 | :type x: numpy.array 22 | :return: Value at x 23 | :rtype: float 24 | """ 25 | self.__check_input__(x) 26 | 27 | x1 = x[0] 28 | x2 = x[1] 29 | 30 | fact1a = (x1 + x2 + 1) ** 2 31 | fact1b = 19 - 14 * x1 + 3 * x1 ** 2 - 14 * x2 + 6 * x1 * x2 + 3 * x2 ** 2 32 | fact1 = 1 + fact1a * fact1b 33 | 34 | fact2a = (2 * x1 - 3 * x2) ** 2 35 | fact2b = 18 - 32 * x1 + 12 * x1 ** 2 + 48 * x2 - 36 * x1 * x2 + 27 * x2 ** 2 36 | fact2 = 30 + fact2a * fact2b 37 | 38 | return fact1 * fact2 39 | -------------------------------------------------------------------------------- /pySOT/optimization_problems/griewank.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from .optimization_problem import OptimizationProblem 4 | 5 | 6 | class Griewank(OptimizationProblem): 7 | """Griewank function 8 | 9 | .. math:: 10 | f(x_1,\\ldots,x_n) = 1 + \\frac{1}{4000} \\sum_{j=1}^n x_j^2 - \ 11 | \\prod_{j=1}^n \\cos \\left( \\frac{x_i}{\\sqrt{i}} \\right) 12 | 13 | subject to 14 | 15 | .. math:: 16 | -512 \\leq x_i \\leq 512 17 | 18 | Global optimum: :math:`f(0,0,...,0)=0` 19 | 20 | :ivar dim: Number of dimensions 21 | :ivar lb: Lower variable bounds 22 | :ivar ub: Upper variable bounds 23 | :ivar int_var: Integer variables 24 | :ivar cont_var: Continuous variables 25 | :ivar min: Global minimum value 26 | :ivar minimum: Global minimizer 27 | :ivar info: String with problem info 28 | """ 29 | 30 | def __init__(self, dim=10): 31 | self.dim = dim 32 | self.min = 0 33 | self.minimum = np.zeros(dim) 34 | self.lb = -512 * np.ones(dim) 35 | self.ub = 512 * np.ones(dim) 36 | self.int_var = np.array([]) 37 | self.cont_var = np.arange(0, dim) 38 | self.info = str(dim) + "-dimensional Griewank function \n" + "Global optimum: f(0,0,...,0) = 0" 39 | 40 | def eval(self, x): 41 | """Evaluate the Griewank function at x. 42 | 43 | :param x: Data point 44 | :type x: numpy.array 45 | :return: Value at x 46 | :rtype: float 47 | """ 48 | self.__check_input__(x) 49 | total = 1 50 | for i, y in enumerate(x): 51 | total *= np.cos(y / np.sqrt(i + 1)) 52 | return 1.0 / 4000.0 * sum([y ** 2 for y in x]) - total + 1 53 | -------------------------------------------------------------------------------- /pySOT/optimization_problems/hartmann3.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from .optimization_problem import OptimizationProblem 4 | 5 | 6 | class Hartmann3(OptimizationProblem): 7 | """Hartmann 3 function 8 | 9 | Details: http://www.sfu.ca/~ssurjano/hart3.html 10 | 11 | Global optimum: :math:`f(0.114614,0.555649,0.852547)=-3.86278` 12 | 13 | :ivar dim: Number of dimensions 14 | :ivar lb: Lower variable bounds 15 | :ivar ub: Upper variable bounds 16 | :ivar int_var: Integer variables 17 | :ivar cont_var: Continuous variables 18 | :ivar min: Global minimum value 19 | :ivar minimum: Global minimizer 20 | :ivar info: String with problem info 21 | """ 22 | 23 | def __init__(self): 24 | self.dim = 3 25 | self.lb = np.zeros(3) 26 | self.ub = np.ones(3) 27 | self.int_var = np.array([]) 28 | self.cont_var = np.arange(0, 3) 29 | self.min = -3.86278 30 | self.minimum = np.array([0.114614, 0.555649, 0.852547]) 31 | self.info = "3-dimensional Hartmann function \nGlobal optimum: " + "f(0.114614,0.555649,0.852547) = -3.86278" 32 | 33 | def eval(self, x): 34 | """Evaluate the Hartmann 3 function at x 35 | 36 | :param x: Data point 37 | :type x: numpy.array 38 | :return: Value at x 39 | :rtype: float 40 | """ 41 | self.__check_input__(x) 42 | alpha = np.array([1, 1.2, 3, 3.2]) 43 | A = np.array([[3.0, 10.0, 30.0], [0.1, 10.0, 35.0], [3.0, 10.0, 30.0], [0.1, 10.0, 35.0]]) 44 | P = np.array( 45 | [[0.3689, 0.1170, 0.2673], [0.4699, 0.4387, 0.747], [0.1091, 0.8732, 0.5547], [0.0381, 0.5743, 0.8828]] 46 | ) 47 | outer = 0 48 | for ii in range(4): 49 | inner = 0 50 | for jj in range(3): 51 | xj = x[jj] 52 | Aij = A[ii, jj] 53 | Pij = P[ii, jj] 54 | inner += Aij * ((xj - Pij) ** 2) 55 | outer += alpha[ii] * np.exp(-inner) 56 | return -outer 57 | -------------------------------------------------------------------------------- /pySOT/optimization_problems/hartmann6.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from .optimization_problem import OptimizationProblem 4 | 5 | 6 | class Hartmann6(OptimizationProblem): 7 | """Hartmann 6 function 8 | 9 | Details: http://www.sfu.ca/~ssurjano/hart6.html 10 | 11 | Global optimum: :math:`f(0.201,0.150,0.476,0.275,0.311,0.657)=-3.322` 12 | 13 | :ivar dim: Number of dimensions 14 | :ivar lb: Lower variable bounds 15 | :ivar ub: Upper variable bounds 16 | :ivar int_var: Integer variables 17 | :ivar cont_var: Continuous variables 18 | :ivar min: Global minimum value 19 | :ivar minimum: Global minimizer 20 | :ivar info: String with problem info 21 | """ 22 | 23 | def __init__(self): 24 | self.min = -3.32237 25 | self.minimum = np.array([0.20169, 0.150011, 0.476874, 0.275332, 0.311652, 0.6573]) 26 | self.dim = 6 27 | self.lb = np.zeros(6) 28 | self.ub = np.ones(6) 29 | self.int_var = np.array([]) 30 | self.cont_var = np.arange(0, 6) 31 | self.info = ( 32 | "6-dimensional Hartmann function \nGlobal optimum: " 33 | + "f(0.2016,0.15001,0.47687,0.27533,0.31165,0.657) = -3.3223" 34 | ) 35 | 36 | def eval(self, x): 37 | """Evaluate the Hartmann 6 function at x 38 | 39 | :param x: Data point 40 | :type x: numpy.array 41 | :return: Value at x 42 | :rtype: float 43 | """ 44 | self.__check_input__(x) 45 | alpha = np.array([1.0, 1.2, 3.0, 3.2]) 46 | A = np.array( 47 | [ 48 | [10.0, 3.0, 17.0, 3.5, 1.7, 8.0], 49 | [0.05, 10.0, 17.0, 0.1, 8.0, 14.0], 50 | [3.0, 3.5, 1.7, 10.0, 17.0, 8.0], 51 | [17.0, 8.0, 0.05, 10.0, 0.1, 14.0], 52 | ] 53 | ) 54 | P = 1e-4 * np.array( 55 | [ 56 | [1312.0, 1696.0, 5569.0, 124.0, 8283.0, 5886.0], 57 | [2329.0, 4135.0, 8307.0, 3736.0, 1004.0, 9991.0], 58 | [2348.0, 1451.0, 3522.0, 2883.0, 3047.0, 6650.0], 59 | [4047.0, 8828.0, 8732.0, 5743.0, 1091.0, 381.0], 60 | ] 61 | ) 62 | outer = 0 63 | for ii in range(4): 64 | inner = 0 65 | for jj in range(6): 66 | xj = x[jj] 67 | Aij = A[ii, jj] 68 | Pij = P[ii, jj] 69 | inner += Aij * ((xj - Pij) ** 2) 70 | outer += alpha[ii] * np.exp(-inner) 71 | return -outer 72 | -------------------------------------------------------------------------------- /pySOT/optimization_problems/himmelblau.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from .optimization_problem import OptimizationProblem 4 | 5 | 6 | class Himmelblau(OptimizationProblem): 7 | """Himmelblau function 8 | 9 | .. math:: 10 | f(x_1,\\ldots,x_n) = 10n - 11 | \\frac{1}{2n} \\sum_{i=1}^n (x_i^4 - 16x_i^2 + 5x_i) 12 | 13 | Global optimum: :math:`f(-2.903,...,-2.903)=-39.166` 14 | 15 | :ivar dim: Number of dimensions 16 | :ivar lb: Lower variable bounds 17 | :ivar ub: Upper variable bounds 18 | :ivar int_var: Integer variables 19 | :ivar cont_var: Continuous variables 20 | :ivar min: Global minimum value 21 | :ivar minimum: Global minimizer 22 | :ivar info: String with problem info 23 | """ 24 | 25 | def __init__(self, dim=10): 26 | self.dim = dim 27 | self.min = -39.166165703771412 28 | self.minimum = -2.903534027771178 * np.ones(dim) 29 | self.lb = -5.12 * np.ones(dim) 30 | self.ub = 5.12 * np.ones(dim) 31 | self.int_var = np.array([]) 32 | self.cont_var = np.arange(0, dim) 33 | self.info = str(dim) + "-dimensional Himmelblau function \n" + "Global optimum: f(-2.903,...,-2.903) = -39.166" 34 | 35 | def eval(self, x): 36 | """Evaluate the Himmelblau function at x. 37 | 38 | :param x: Data point 39 | :type x: numpy.array 40 | :return: Value at x 41 | :rtype: float 42 | """ 43 | self.__check_input__(x) 44 | return 0.5 * np.sum(x ** 4 - 16 * x ** 2 + 5 * x) / float(self.dim) 45 | -------------------------------------------------------------------------------- /pySOT/optimization_problems/levy.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from .optimization_problem import OptimizationProblem 4 | 5 | 6 | class Levy(OptimizationProblem): 7 | """Levy function 8 | 9 | Details: https://www.sfu.ca/~ssurjano/levy.html 10 | 11 | Global optimum: :math:`f(1,1,...,1)=0` 12 | 13 | :ivar dim: Number of dimensions 14 | :ivar lb: Lower variable bounds 15 | :ivar ub: Upper variable bounds 16 | :ivar int_var: Integer variables 17 | :ivar cont_var: Continuous variables 18 | :ivar min: Global minimum value 19 | :ivar minimum: Global minimizer 20 | :ivar info: String with problem info 21 | """ 22 | 23 | def __init__(self, dim=10): 24 | self.dim = dim 25 | self.min = 0.0 26 | self.minimum = np.ones(dim) 27 | self.lb = -5 * np.ones(dim) 28 | self.ub = 5 * np.ones(dim) 29 | self.int_var = np.array([]) 30 | self.cont_var = np.arange(0, dim) 31 | self.info = str(dim) + "-dimensional Levy function \n" + "Global optimum: f(1,1,...,1) = 0" 32 | 33 | def eval(self, x): 34 | """Evaluate the Levy function at x. 35 | 36 | :param x: Data point 37 | :type x: numpy.array 38 | :return: Value at x 39 | :rtype: float 40 | """ 41 | self.__check_input__(x) 42 | w = 1 + (x - 1.0) / 4.0 43 | d = self.dim 44 | return ( 45 | np.sin(np.pi * w[0]) ** 2 46 | + np.sum((w[1 : d - 1] - 1) ** 2 * (1 + 10 * np.sin(np.pi * w[1 : d - 1] + 1) ** 2)) 47 | + (w[d - 1] - 1) ** 2 * (1 + np.sin(2 * np.pi * w[d - 1]) ** 2) 48 | ) 49 | -------------------------------------------------------------------------------- /pySOT/optimization_problems/michaelewicz.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from .optimization_problem import OptimizationProblem 4 | 5 | 6 | class Michalewicz(OptimizationProblem): 7 | """Michalewicz function 8 | 9 | .. math:: 10 | f(x_1,\\ldots,x_n) = -\\sum_{i=1}^n \\sin(x_i) \\sin^{20} 11 | \\left( \\frac{ix_i^2}{\\pi} \\right) 12 | 13 | subject to 14 | 15 | .. math:: 16 | 0 \\leq x_i \\leq \\pi 17 | 18 | :ivar dim: Number of dimensions 19 | :ivar lb: Lower variable bounds 20 | :ivar ub: Upper variable bounds 21 | :ivar int_var: Integer variables 22 | :ivar cont_var: Continuous variables 23 | :ivar min: Global minimum value 24 | :ivar minimum: Global minimizer 25 | :ivar info: String with problem info 26 | """ 27 | 28 | def __init__(self, dim=10): 29 | self.dim = dim 30 | self.lb = np.zeros(dim) 31 | self.ub = np.pi * np.ones(dim) 32 | self.int_var = np.array([]) 33 | self.cont_var = np.arange(0, dim) 34 | self.info = str(dim) + "-dimensional Michalewicz function \n" + "Global optimum: ??" 35 | 36 | def eval(self, x): 37 | """Evaluate the Michalewicz function at x. 38 | 39 | :param x: Data point 40 | :type x: numpy.array 41 | :return: Value at x 42 | :rtype: float 43 | """ 44 | self.__check_input__(x) 45 | return -np.sum(np.sin(x) * (np.sin(((1 + np.arange(self.dim)) * x ** 2) / np.pi)) ** 20) 46 | -------------------------------------------------------------------------------- /pySOT/optimization_problems/optimization_problem.py: -------------------------------------------------------------------------------- 1 | import abc 2 | from abc import abstractmethod 3 | 4 | 5 | class OptimizationProblem(object): 6 | """Base class for optimization problems.""" 7 | 8 | __metaclass__ = abc.ABCMeta 9 | 10 | def __init__(self): 11 | self.dim = None 12 | self.lb = None 13 | self.ub = None 14 | self.int_var = None 15 | self.cont_var = None 16 | 17 | def __check_input__(self, x): 18 | if len(x) != self.dim: 19 | raise ValueError("Dimension mismatch") 20 | 21 | @abstractmethod 22 | def eval(self, record): # pragma: no cover 23 | pass 24 | -------------------------------------------------------------------------------- /pySOT/optimization_problems/perm.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from .optimization_problem import OptimizationProblem 4 | 5 | 6 | class Perm(OptimizationProblem): 7 | """Perm function 8 | 9 | Global optimum: :math:`f(1,1/2,1/3,...,1/n)=0` 10 | 11 | :ivar dim: Number of dimensions 12 | :ivar lb: Lower variable bounds 13 | :ivar ub: Upper variable bounds 14 | :ivar int_var: Integer variables 15 | :ivar cont_var: Continuous variables 16 | :ivar min: Global minimum value 17 | :ivar minimum: Global minimizer 18 | :ivar info: String with problem info 19 | """ 20 | 21 | def __init__(self, dim=10): 22 | self.dim = dim 23 | self.min = 0.0 24 | self.minimum = np.ones(dim) / np.arange(1, dim + 1) 25 | self.lb = -5 * np.ones(dim) 26 | self.ub = 5 * np.ones(dim) 27 | self.int_var = np.array([]) 28 | self.cont_var = np.arange(0, dim) 29 | self.info = str(dim) + "-dimensional Perm function \n" + "Global optimum: f(1,1/2,1/3...,1/d) = 0" 30 | 31 | def eval(self, x): 32 | """Evaluate the Perm function at x. 33 | 34 | :param x: Data point 35 | :type x: numpy.array 36 | :return: Value at x 37 | :rtype: float 38 | """ 39 | self.__check_input__(x) 40 | beta = 10.0 41 | d = len(x) 42 | outer = 0.0 43 | for ii in range(d): 44 | inner = 0.0 45 | for jj in range(d): 46 | xj = x[jj] 47 | inner += ((jj + 1) + beta) * (xj ** (ii + 1) - (1.0 / (jj + 1)) ** (ii + 1)) 48 | outer += inner ** 2 49 | return outer 50 | -------------------------------------------------------------------------------- /pySOT/optimization_problems/rastrigin.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from .optimization_problem import OptimizationProblem 4 | 5 | 6 | class Rastrigin(OptimizationProblem): 7 | """Rastrigin function 8 | 9 | .. math:: 10 | f(x_1,\\ldots,x_n)=10n-\\sum_{i=1}^n (x_i^2 - 10 \\cos(2 \\pi x_i)) 11 | 12 | subject to 13 | 14 | .. math:: 15 | -5.12 \\leq x_i \\leq 5.12 16 | 17 | Global optimum: :math:`f(0,0,...,0)=0` 18 | 19 | :ivar dim: Number of dimensions 20 | :ivar lb: Lower variable bounds 21 | :ivar ub: Upper variable bounds 22 | :ivar int_var: Integer variables 23 | :ivar cont_var: Continuous variables 24 | :ivar min: Global minimum value 25 | :ivar minimum: Global minimizer 26 | :ivar info: String with problem info 27 | """ 28 | 29 | def __init__(self, dim=10): 30 | self.dim = dim 31 | self.min = 0 32 | self.minimum = np.zeros(dim) 33 | self.lb = -5.12 * np.ones(dim) 34 | self.ub = 5.12 * np.ones(dim) 35 | self.int_var = np.array([]) 36 | self.cont_var = np.arange(0, dim) 37 | self.info = str(dim) + "-dimensional Rastrigin function \n" + "Global optimum: f(0,0,...,0) = 0" 38 | 39 | def eval(self, x): 40 | """Evaluate the Rastrigin function at x 41 | 42 | :param x: Data point 43 | :type x: numpy.array 44 | :return: Value at x 45 | :rtype: float 46 | """ 47 | self.__check_input__(x) 48 | return 10 * self.dim + sum(x ** 2 - 10 * np.cos(2 * np.pi * x)) 49 | -------------------------------------------------------------------------------- /pySOT/optimization_problems/rosenbrock.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from .optimization_problem import OptimizationProblem 4 | 5 | 6 | class Rosenbrock(OptimizationProblem): 7 | """Rosenbrock function 8 | 9 | .. math:: 10 | f(x_1,\\ldots,x_n) = \\sum_{j=1}^{n-1} \ 11 | \\left( 100(x_j^2-x_{j+1})^2 + (1-x_j)^2 \\right) 12 | 13 | subject to 14 | 15 | .. math:: 16 | -2.048 \\leq x_i \\leq 2.048 17 | 18 | Global optimum: :math:`f(1,1,...,1)=0` 19 | 20 | :ivar dim: Number of dimensions 21 | :ivar lb: Lower variable bounds 22 | :ivar ub: Upper variable bounds 23 | :ivar int_var: Integer variables 24 | :ivar cont_var: Continuous variables 25 | :ivar min: Global minimum value 26 | :ivar minimum: Global minimizer 27 | :ivar info: String with problem info 28 | """ 29 | 30 | def __init__(self, dim=10): 31 | self.dim = dim 32 | self.min = 0 33 | self.minimum = np.ones(dim) 34 | self.lb = -2.048 * np.ones(dim) 35 | self.ub = 2.048 * np.ones(dim) 36 | self.int_var = np.array([]) 37 | self.cont_var = np.arange(0, dim) 38 | self.info = str(dim) + "-dimensional Rosenbrock function \n" + "Global optimum: f(1,1,...,1) = 0" 39 | 40 | def eval(self, x): 41 | """Evaluate the Rosenbrock function at x 42 | 43 | :param x: Data point 44 | :type x: numpy.array 45 | :return: Value at x 46 | :rtype: float 47 | """ 48 | self.__check_input__(x) 49 | total = 0 50 | for i in range(len(x) - 1): 51 | total += 100 * (x[i] ** 2 - x[i + 1]) ** 2 + (x[i] - 1) ** 2 52 | return total 53 | -------------------------------------------------------------------------------- /pySOT/optimization_problems/schwefel.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from .optimization_problem import OptimizationProblem 4 | 5 | 6 | class Schwefel(OptimizationProblem): 7 | """Schwefel function 8 | 9 | .. math:: 10 | f(x_1,\\ldots,x_n) = \\sum_{j=1}^{n} \ 11 | \\left( -x_j \\sin(\\sqrt{|x_j|}) \\right) + 418.982997 n 12 | 13 | subject to 14 | 15 | .. math:: 16 | -512 \\leq x_i \\leq 512 17 | 18 | Global optimum: :math:`f(420.968746,420.968746,...,420.968746)=0` 19 | 20 | :ivar dim: Number of dimensions 21 | :ivar lb: Lower variable bounds 22 | :ivar ub: Upper variable bounds 23 | :ivar int_var: Integer variables 24 | :ivar cont_var: Continuous variables 25 | :ivar min: Global minimum value 26 | :ivar minimum: Global minimizer 27 | :ivar info: String with problem info 28 | """ 29 | 30 | def __init__(self, dim=10): 31 | self.dim = dim 32 | self.min = 0 33 | self.minimum = 420.968746 * np.ones(dim) 34 | self.lb = -512 * np.ones(dim) 35 | self.ub = 512 * np.ones(dim) 36 | self.int_var = np.array([]) 37 | self.cont_var = np.arange(0, dim) 38 | self.info = str(dim) + "-dimensional Schwefel function \n" + "Global optimum: f(420.9687,...,420.9687) = 0" 39 | 40 | def eval(self, x): 41 | """Evaluate the Schwefel function at x. 42 | 43 | :param x: Data point 44 | :type x: numpy.array 45 | :return: Value at x 46 | :rtype: float 47 | """ 48 | self.__check_input__(x) 49 | return 418.9829 * self.dim - sum([y * np.sin(np.sqrt(abs(y))) for y in x]) 50 | -------------------------------------------------------------------------------- /pySOT/optimization_problems/six_hump_camel.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from .optimization_problem import OptimizationProblem 4 | 5 | 6 | class SixHumpCamel(OptimizationProblem): 7 | """Six-hump camel function 8 | 9 | Details: https://www.sfu.ca/~ssurjano/camel6.html 10 | 11 | Global optimum: :math:`f(0.0898,-0.7126)=-1.0316` 12 | 13 | :ivar dim: Number of dimensions 14 | :ivar lb: Lower variable bounds 15 | :ivar ub: Upper variable bounds 16 | :ivar int_var: Integer variables 17 | :ivar cont_var: Continuous variables 18 | :ivar min: Global minimum value 19 | :ivar minimum: Global minimizer 20 | :ivar info: String with problem info 21 | """ 22 | 23 | def __init__(self): 24 | self.min = -1.0316 25 | self.minimum = np.array([0.0898, -0.7126]) 26 | self.dim = 2 27 | self.lb = -3.0 * np.ones(2) 28 | self.ub = 3.0 * np.ones(2) 29 | self.int_var = np.array([]) 30 | self.cont_var = np.arange(0, 2) 31 | self.info = "2-dimensional Six-hump function \nGlobal optimum: " + "f(0.0898, -0.7126) = -1.0316" 32 | 33 | def eval(self, x): 34 | """Evaluate the Six Hump Camel function at x 35 | 36 | :param x: Data point 37 | :type x: numpy.array 38 | :return: Value at x 39 | :rtype: float 40 | """ 41 | self.__check_input__(x) 42 | return (4.0 - 2.1 * x[0] ** 2 + (x[0] ** 4) / 3.0) * x[0] ** 2 + x[0] * x[1] + (-4 + 4 * x[1] ** 2) * x[1] ** 2 43 | -------------------------------------------------------------------------------- /pySOT/optimization_problems/sphere.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from .optimization_problem import OptimizationProblem 4 | 5 | 6 | class Sphere(OptimizationProblem): 7 | """Sphere function 8 | 9 | .. math:: 10 | f(x_1,\\ldots,x_n) = \\sum_{j=1}^n x_j^2 11 | 12 | subject to 13 | 14 | .. math:: 15 | -5.12 \\leq x_i \\leq 5.12 16 | 17 | Global optimum: :math:`f(0,0,...,0)=0` 18 | 19 | :ivar dim: Number of dimensions 20 | :ivar lb: Lower variable bounds 21 | :ivar ub: Upper variable bounds 22 | :ivar int_var: Integer variables 23 | :ivar cont_var: Continuous variables 24 | :ivar min: Global minimum value 25 | :ivar minimum: Global minimizer 26 | :ivar info: String with problem info 27 | """ 28 | 29 | def __init__(self, dim=10): 30 | self.dim = dim 31 | self.min = 0 32 | self.minimum = np.zeros(dim) 33 | self.lb = -5.12 * np.ones(dim) 34 | self.ub = 5.12 * np.ones(dim) 35 | self.int_var = np.array([]) 36 | self.cont_var = np.arange(0, dim) 37 | self.info = str(dim) + "-dimensional Sphere function \n" + "Global optimum: f(0,0,...,0) = 0" 38 | 39 | def eval(self, x): 40 | """Evaluate the Sphere function at x. 41 | 42 | :param x: Data point 43 | :type x: numpy.array 44 | :return: Value at x 45 | :rtype: float 46 | """ 47 | self.__check_input__(x) 48 | return np.sum(x ** 2) 49 | -------------------------------------------------------------------------------- /pySOT/optimization_problems/sum_of_squares.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from .optimization_problem import OptimizationProblem 4 | 5 | 6 | class SumOfSquares(OptimizationProblem): 7 | """Sum of squares function 8 | 9 | .. math:: 10 | f(x_1,\\ldots,x_n)=\\sum_{i=1}^n ix_i^2 11 | 12 | Global optimum: :math:`f(0,0,...,0)=0` 13 | 14 | :ivar dim: Number of dimensions 15 | :ivar lb: Lower variable bounds 16 | :ivar ub: Upper variable bounds 17 | :ivar int_var: Integer variables 18 | :ivar cont_var: Continuous variables 19 | :ivar min: Global minimum value 20 | :ivar minimum: Global minimizer 21 | :ivar info: String with problem info 22 | """ 23 | 24 | def __init__(self, dim=10): 25 | self.dim = dim 26 | self.min = 0.0 27 | self.minimum = np.zeros(dim) 28 | self.lb = -5 * np.ones(dim) 29 | self.ub = 5 * np.ones(dim) 30 | self.int_var = np.array([]) 31 | self.cont_var = np.arange(0, dim) 32 | self.info = str(dim) + "-dimensional SumOfSquares function \n" + "Global optimum: f(0,0,...,0) = 0" 33 | 34 | def eval(self, x): 35 | """Evaluate the Sum of squares function at x. 36 | 37 | :param x: Data point 38 | :type x: numpy.array 39 | :return: Value at x 40 | :rtype: float 41 | """ 42 | self.__check_input__(x) 43 | return np.sum((1 + np.arange(self.dim)) * x ** 2) 44 | -------------------------------------------------------------------------------- /pySOT/optimization_problems/weierstrass.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from .optimization_problem import OptimizationProblem 4 | 5 | 6 | class Weierstrass(OptimizationProblem): 7 | def __init__(self, dim=10): 8 | self.dim = dim 9 | self.min = 0 10 | self.minimum = np.zeros(dim) 11 | self.lb = -5 * np.ones(dim) 12 | self.ub = 5 * np.ones(dim) 13 | self.int_var = np.array([]) 14 | self.cont_var = np.arange(0, dim) 15 | self.info = str(dim) + "-dimensional Weierstrass function" 16 | 17 | def eval(self, x): 18 | """Evaluate the Weierstrass function at x. 19 | 20 | :param x: Data point 21 | :type x: numpy.array 22 | :return: Value at x 23 | :rtype: float 24 | """ 25 | self.__check_input__(x) 26 | d = len(x) 27 | f0, val = 0.0, 0.0 28 | for k in range(12): 29 | f0 += 1.0 / (2 ** k) * np.cos(np.pi * (3 ** k)) 30 | for i in range(d): 31 | val += 1.0 / (2 ** k) * np.cos(2 * np.pi * (3 ** k) * (x[i] + 0.5)) 32 | return 10 * ((1.0 / float(d) * val - f0) ** 3) 33 | -------------------------------------------------------------------------------- /pySOT/optimization_problems/zakharov.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from .optimization_problem import OptimizationProblem 4 | 5 | 6 | class Zakharov(OptimizationProblem): 7 | """Zakharov function 8 | 9 | Global optimum: :math:`f(0,0,...,0)=1` 10 | 11 | :ivar dim: Number of dimensions 12 | :ivar lb: Lower variable bounds 13 | :ivar ub: Upper variable bounds 14 | :ivar int_var: Integer variables 15 | :ivar cont_var: Continuous variables 16 | :ivar min: Global minimum value 17 | :ivar minimum: Global minimizer 18 | :ivar info: String with problem info 19 | """ 20 | 21 | def __init__(self, dim=10): 22 | self.dim = dim 23 | self.min = 0.0 24 | self.minimum = np.zeros(dim) 25 | self.lb = -5 * np.ones(dim) 26 | self.ub = 10 * np.ones(dim) 27 | self.int_var = np.array([]) 28 | self.cont_var = np.arange(0, dim) 29 | self.info = str(dim) + "-dimensional Zakharov function \n" + "Global optimum: f(0,0,...,0) = 1" 30 | 31 | def eval(self, x): 32 | """Evaluate the Zakharov function at x. 33 | 34 | :param x: Data point 35 | :type x: numpy.array 36 | :return: Value at x 37 | :rtype: float 38 | """ 39 | self.__check_input__(x) 40 | return ( 41 | np.sum(x ** 2) 42 | + np.sum(0.5 * (1 + np.arange(self.dim)) * x) ** 2 43 | + np.sum(0.5 * (1 + np.arange(self.dim)) * x) ** 4 44 | ) 45 | -------------------------------------------------------------------------------- /pySOT/strategy/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | from .dycors_strategy import DYCORSStrategy 3 | from .ei_strategy import EIStrategy 4 | from .lcb_strategy import LCBStrategy 5 | from .random_strategy import RandomStrategy 6 | from .sop_strategy import SOPStrategy 7 | from .srbf_strategy import SRBFStrategy 8 | from .surrogate_strategy import SurrogateBaseStrategy 9 | 10 | __all__ = [ 11 | "SurrogateBaseStrategy", 12 | "DYCORSStrategy", 13 | "EIStrategy", 14 | "LCBStrategy", 15 | "RandomStrategy", 16 | "SOPStrategy", 17 | "SRBFStrategy", 18 | ] 19 | -------------------------------------------------------------------------------- /pySOT/strategy/dycors_strategy.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import numpy as np 4 | 5 | from ..auxiliary_problems import candidate_dycors 6 | from .srbf_strategy import SRBFStrategy 7 | 8 | # Get module-level logger 9 | logger = logging.getLogger(__name__) 10 | 11 | 12 | class DYCORSStrategy(SRBFStrategy): 13 | """DYCORS optimization strategy. 14 | 15 | This is an implementation of the DYCORS strategy by Regis and Shoemaker: 16 | 17 | Rommel G Regis and Christine A Shoemaker. 18 | Combining radial basis function surrogates and dynamic coordinate \ 19 | search in high-dimensional expensive black-box optimization. 20 | Engineering Optimization, 45(5): 529-555, 2013. 21 | 22 | This is an extension of the SRBF strategy that changes how the candidate 23 | points are generated. The main idea is that many objective functions depend 24 | only on a few directions so it may be advantageous to perturb only a few 25 | directions. In particular, we use a perturbation probability to perturb a 26 | given coordinate and decrease this probability after each function 27 | evaluation so fewer coordinates are perturbed later in the optimization. 28 | 29 | :param max_evals: Evaluation budget 30 | :type max_evals: int 31 | :param opt_prob: Optimization problem object 32 | :type opt_prob: OptimizationProblem 33 | :param exp_design: Experimental design object 34 | :type exp_design: ExperimentalDesign 35 | :param surrogate: Surrogate object 36 | :type surrogate: Surrogate 37 | :param asynchronous: Whether or not to use asynchrony (True/False) 38 | :type asynchronous: bool 39 | :param batch_size: Size of the batch (use 1 for serial, ignored if async) 40 | :type batch_size: int 41 | :param extra_points: Extra points to add to the experimental design 42 | :type extra_points: numpy.array of size n x dim 43 | :param extra_vals: Values for extra_points (np.nan/np.inf if unknown) 44 | :type extra_vals: numpy.array of size n x 1 45 | :param use_restarts: Whether or not to restart after convergence 46 | :type use_restarts: bool 47 | :param weights: Weights for merit function, default = [0.3, 0.5, 0.8, 0.95] 48 | :type weights: list of np.array 49 | :param num_cand: Number of candidate points, default = 100*dim 50 | :type num_cand: int 51 | """ 52 | 53 | def __init__( 54 | self, 55 | max_evals, 56 | opt_prob, 57 | exp_design, 58 | surrogate, 59 | asynchronous=True, 60 | batch_size=None, 61 | extra_points=None, 62 | extra_vals=None, 63 | use_restarts=True, 64 | weights=None, 65 | num_cand=None, 66 | ): 67 | 68 | self.num_exp = exp_design.num_pts # We need this later 69 | 70 | super().__init__( 71 | max_evals=max_evals, 72 | opt_prob=opt_prob, 73 | exp_design=exp_design, 74 | surrogate=surrogate, 75 | asynchronous=asynchronous, 76 | batch_size=batch_size, 77 | extra_points=extra_points, 78 | extra_vals=extra_vals, 79 | use_restarts=use_restarts, 80 | weights=weights, 81 | num_cand=num_cand, 82 | ) 83 | 84 | def generate_evals(self, num_pts): 85 | """Generate the next adaptive sample points.""" 86 | num_evals = len(self.X) + len(self.Xpend) - self.num_exp + 1.0 87 | min_prob = np.min([1.0, 1.0 / self.opt_prob.dim]) 88 | budget = self.max_evals - self.num_exp 89 | prob_perturb = min([20.0 / self.opt_prob.dim, 1.0]) * (1.0 - (np.log(num_evals) / np.log(budget))) 90 | prob_perturb = max(prob_perturb, min_prob) 91 | 92 | weights = self.get_weights(num_pts=num_pts) 93 | new_points = candidate_dycors( 94 | opt_prob=self.opt_prob, 95 | num_pts=num_pts, 96 | surrogate=self.surrogate, 97 | X=self._X, 98 | fX=self._fX, 99 | Xpend=self.Xpend, 100 | weights=weights, 101 | num_cand=self.num_cand, 102 | sampling_radius=self.sampling_radius, 103 | prob_perturb=prob_perturb, 104 | ) 105 | 106 | for i in range(num_pts): 107 | self.batch_queue.append(np.copy(np.ravel(new_points[i, :]))) 108 | -------------------------------------------------------------------------------- /pySOT/strategy/ei_strategy.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import numpy as np 4 | 5 | from ..auxiliary_problems import ei_ga 6 | from ..surrogate import GPRegressor 7 | from .surrogate_strategy import SurrogateBaseStrategy 8 | 9 | # Get module-level logger 10 | logger = logging.getLogger(__name__) 11 | 12 | 13 | class EIStrategy(SurrogateBaseStrategy): 14 | """Expected Improvement strategy. 15 | 16 | This is an implementation of Expected Improvement (EI), arguably the most 17 | popular acquisition function in Bayesian optimization. Under a Gaussian 18 | process (GP) prior, the expected value of the improvement: 19 | 20 | I(x) := max(f_best - f(x), 0) 21 | EI[x] := E[I(x)] 22 | 23 | can be computed analytically, where f_best is the best observed function 24 | value.EI is one-step optimal in the sense that selecting the maximizer of 25 | EI is the optimal action if we have exactly one function value remaining 26 | and must return a solution with a known function value. 27 | 28 | When using parallelism, we constrain each new evaluation to be a distance 29 | dtol away from previous and pending evaluations to avoid that the same 30 | point is being evaluated multiple times. We use a default value of 31 | dtol = 1e-3 * norm(ub - lb), but note that this value has not been 32 | tuned carefully and may be far from optimal. 33 | 34 | The optimization strategy terminates when the evaluatio budget has been 35 | exceeded or when the EI of the next point falls below some threshold, 36 | where the default threshold is 1e-6 * (max(fX) - min(fX)). 37 | 38 | :param max_evals: Evaluation budget 39 | :type max_evals: int 40 | :param opt_prob: Optimization problem object 41 | :type opt_prob: OptimizationProblem 42 | :param exp_design: Experimental design object 43 | :type exp_design: ExperimentalDesign 44 | :param surrogate: Surrogate object 45 | :type surrogate: Surrogate 46 | :param asynchronous: Whether or not to use asynchrony (True/False) 47 | :type asynchronous: bool 48 | :param batch_size: Size of the batch (use 1 for serial, ignored if async) 49 | :type batch_size: int 50 | :param extra_points: Extra points to add to the experimental design 51 | :type extra_points: numpy.array of size n x dim 52 | :param extra_vals: Values for extra_points (np.nan/np.inf if unknown) 53 | :type extra_vals: numpy.array of size n x 1 54 | :param reset_surrogate: Whether or not to reset surrogate model 55 | :type reset_surrogate: boo 56 | :param use_restarts: Whether or not to restart after convergence 57 | :type use_restarts: bool 58 | :param ei_tol: Terminate if the largest EI falls below this threshold 59 | Default: 1e-6 * (max(fX) - min(fX)) 60 | :type ei_tol: float 61 | :param dtol: Minimum distance between new and pending/finished evaluations 62 | Default: 1e-3 * norm(ub - lb) 63 | :type dtol: float 64 | """ 65 | 66 | def __init__( 67 | self, 68 | max_evals, 69 | opt_prob, 70 | exp_design, 71 | surrogate, 72 | asynchronous=True, 73 | batch_size=None, 74 | extra_points=None, 75 | extra_vals=None, 76 | use_restarts=True, 77 | ei_tol=None, 78 | dtol=None, 79 | ): 80 | 81 | if dtol is None: 82 | dtol = 1e-3 * np.linalg.norm(opt_prob.ub - opt_prob.lb) 83 | self.dtol = dtol 84 | self.ei_tol = ei_tol 85 | 86 | super().__init__( 87 | max_evals=max_evals, 88 | opt_prob=opt_prob, 89 | exp_design=exp_design, 90 | surrogate=surrogate, 91 | asynchronous=asynchronous, 92 | batch_size=batch_size, 93 | extra_points=extra_points, 94 | extra_vals=extra_vals, 95 | use_restarts=use_restarts, 96 | ) 97 | 98 | def check_input(self): 99 | super().check_input() 100 | assert isinstance(self.surrogate, GPRegressor) 101 | 102 | def generate_evals(self, num_pts): 103 | """Generate the next adaptive sample points.""" 104 | ei_tol = self.ei_tol 105 | if ei_tol is None: 106 | ei_tol = 1e-6 * (self.fX.max() - self.fX.min()) 107 | 108 | new_points = ei_ga( 109 | num_pts=num_pts, 110 | opt_prob=self.opt_prob, 111 | surrogate=self.surrogate, 112 | X=self._X, 113 | fX=self._fX, 114 | Xpend=self.Xpend, 115 | dtol=self.dtol, 116 | ei_tol=ei_tol, 117 | ) 118 | 119 | if new_points is None: # Not enough improvement 120 | self.converged = True 121 | else: 122 | for i in range(num_pts): 123 | self.batch_queue.append(np.copy(np.ravel(new_points[i, :]))) 124 | -------------------------------------------------------------------------------- /pySOT/strategy/lcb_strategy.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import numpy as np 4 | 5 | from ..auxiliary_problems import lcb_ga 6 | from ..surrogate import GPRegressor 7 | from .surrogate_strategy import SurrogateBaseStrategy 8 | 9 | # Get module-level logger 10 | logger = logging.getLogger(__name__) 11 | 12 | 13 | class LCBStrategy(SurrogateBaseStrategy): 14 | """Lower confidence bound strategy. 15 | 16 | This is an implementation of Lower Confidence Bound (LCB), a 17 | popular acquisition function in Bayesian optimization. The main idea 18 | is to minimize: 19 | 20 | LCB(x) := E[x] - kappa * sqrt(V[x]) 21 | 22 | where E[x] is the predicted function value, V[x] is the predicted 23 | variance, and kappa is a constant that balances exploration and 24 | exploitation. We use a default value of kappa = 2. 25 | 26 | When using parallelism, we constrain each new evaluation to be a distance 27 | dtol away from previous and pending evaluations to avoid that the same 28 | point is being evaluated multiple times. We use a default value of 29 | dtol = 1e-3 * norm(ub - lb), but note that this value has not been 30 | tuned carefully and may be far from optimal. 31 | 32 | The optimization strategy terminates when the evaluatio budget has been 33 | exceeded or when the LCB of the next point falls below some threshold, 34 | where the default threshold is 1e-6 * (max(fX) - min(fX)). 35 | 36 | :param max_evals: Evaluation budget 37 | :type max_evals: int 38 | :param opt_prob: Optimization problem object 39 | :type opt_prob: OptimizationProblem 40 | :param exp_design: Experimental design object 41 | :type exp_design: ExperimentalDesign 42 | :param surrogate: Surrogate object 43 | :type surrogate: Surrogate 44 | :param asynchronous: Whether or not to use asynchrony (True/False) 45 | :type asynchronous: bool 46 | :param batch_size: Size of the batch (use 1 for serial, ignored if async) 47 | :type batch_size: int 48 | :param extra_points: Extra points to add to the experimental design 49 | :type extra_points: numpy.array of size n x dim 50 | :param extra_vals: Values for extra_points (np.nan/np.inf if unknown) 51 | :type extra_vals: numpy.array of size n x 1 52 | :param reset_surrogate: Whether or not to reset surrogate model 53 | :type reset_surrogate: bool 54 | :param kappa: Constant in the LCB merit function 55 | :type kappa: float 56 | :param dtol: Minimum distance between new and pending evaluations 57 | Default: 1e-3 * norm(ub - lb) 58 | :type dtol: float 59 | :param lcb_tol: Terminate if min(fX) - min(LCB(x)) < lcb_tol 60 | Default: 1e-6 * (max(fX) - min(fX)) 61 | :type lcb_tol: float 62 | """ 63 | 64 | def __init__( 65 | self, 66 | max_evals, 67 | opt_prob, 68 | exp_design, 69 | surrogate, 70 | asynchronous=True, 71 | batch_size=None, 72 | extra_points=None, 73 | extra_vals=None, 74 | use_restarts=True, 75 | kappa=2.0, 76 | dtol=None, 77 | lcb_tol=None, 78 | ): 79 | 80 | if dtol is None: 81 | dtol = 1e-3 * np.linalg.norm(opt_prob.ub - opt_prob.lb) 82 | self.dtol = dtol 83 | self.lcb_tol = lcb_tol 84 | self.kappa = kappa 85 | 86 | super().__init__( 87 | max_evals=max_evals, 88 | opt_prob=opt_prob, 89 | exp_design=exp_design, 90 | surrogate=surrogate, 91 | asynchronous=asynchronous, 92 | batch_size=batch_size, 93 | extra_points=extra_points, 94 | extra_vals=extra_vals, 95 | use_restarts=use_restarts, 96 | ) 97 | 98 | def check_input(self): 99 | super().check_input() 100 | assert isinstance(self.surrogate, GPRegressor) 101 | 102 | def generate_evals(self, num_pts): 103 | """Generate the next adaptive sample points.""" 104 | lcb_tol = self.lcb_tol 105 | if lcb_tol is None: 106 | lcb_tol = 1e-6 * (self.fX.max() - self.fX.min()) 107 | lcb_target = self.fX.min() - lcb_tol 108 | 109 | new_points = lcb_ga( 110 | num_pts=num_pts, 111 | opt_prob=self.opt_prob, 112 | surrogate=self.surrogate, 113 | X=self._X, 114 | fX=self._fX, 115 | Xpend=self.Xpend, 116 | kappa=self.kappa, 117 | dtol=self.dtol, 118 | lcb_target=lcb_target, 119 | ) 120 | 121 | if new_points is None: # Not enough improvement 122 | self.converged = True 123 | else: 124 | for i in range(num_pts): 125 | self.batch_queue.append(np.copy(np.ravel(new_points[i, :]))) 126 | -------------------------------------------------------------------------------- /pySOT/strategy/random_strategy.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import numpy as np 4 | from poap.strategy import BaseStrategy, RetryStrategy 5 | 6 | from ..utils import check_opt_prob 7 | 8 | # Get module-level logger 9 | logger = logging.getLogger(__name__) 10 | 11 | 12 | class RandomStrategy(BaseStrategy): 13 | """Random sampling strategy. 14 | 15 | We generate and evaluate a fixed number of points using all resources. 16 | The optimization problem must implement OptimizationProblem and max_evals 17 | must be a positive integer. 18 | 19 | :param max_evals: Evaluation budget 20 | :type max_evals: int 21 | :param opt_prob: Optimization problem 22 | :type opt_prob: OptimizationProblem 23 | """ 24 | 25 | def __init__(self, max_evals, opt_prob): 26 | check_opt_prob(opt_prob) 27 | if not isinstance(max_evals, int) and max_evals > 0: 28 | raise ValueError("max_evals must be an integer >= exp_des.num_pts") 29 | 30 | self.opt_prob = opt_prob 31 | self.max_evals = max_evals 32 | self.retry = RetryStrategy() 33 | for _ in range(max_evals): # Generate the random points 34 | x = np.random.uniform(low=opt_prob.lb, high=opt_prob.ub) 35 | proposal = self.propose_eval(x) 36 | self.retry.rput(proposal) 37 | 38 | def propose_action(self): 39 | """Propose an action based on outstanding points.""" 40 | if not self.retry.empty(): # Propose next point 41 | return self.retry.get() 42 | elif self.retry.num_eval_outstanding == 0: # Budget exhausted 43 | return self.propose_terminate() 44 | -------------------------------------------------------------------------------- /pySOT/surrogate/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | from .gp import GPRegressor 3 | from .kernels import CubicKernel, Kernel, LinearKernel, TPSKernel 4 | from .mars import MARSInterpolant 5 | from .output_transformations import identity, median_capping 6 | from .poly import PolyRegressor 7 | from .rbf import RBFInterpolant 8 | from .surrogate import Surrogate 9 | from .tails import ConstantTail, LinearTail, Tail 10 | 11 | __all__ = [ 12 | "Surrogate", 13 | "GPRegressor", 14 | "MARSInterpolant", 15 | "PolyRegressor", 16 | "RBFInterpolant", 17 | # 18 | "Kernel", 19 | "CubicKernel", 20 | "LinearKernel", 21 | "TPSKernel", 22 | # 23 | "Tail", 24 | "ConstantTail", 25 | "LinearTail", 26 | # 27 | "identity", 28 | "median_capping", 29 | ] 30 | -------------------------------------------------------------------------------- /pySOT/surrogate/gp.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from sklearn.gaussian_process import GaussianProcessRegressor 3 | from sklearn.gaussian_process.kernels import RBF, ConstantKernel, WhiteKernel 4 | 5 | from ..utils import to_unit_box 6 | from .surrogate import Surrogate 7 | 8 | 9 | class GPRegressor(Surrogate): 10 | """Gaussian process (GP) regressor. 11 | 12 | Wrapper around the GPRegressor in scikit-learn. 13 | 14 | :param dim: Number of dimensions 15 | :type dim: int 16 | :param lb: Lower variable bounds 17 | :type lb: numpy.array 18 | :param ub: Upper variable bounds 19 | :type ub: numpy.array 20 | :param output_transformation: Transformation applied to values before fitting 21 | :type output_transformation: Callable 22 | :param gp: GPRegressor model 23 | :type gp: object 24 | :param n_restarts_optimizer: Number of restarts in hyperparam fitting 25 | :type n_restarts_optimizer: int 26 | 27 | :ivar dim: Number of dimensions 28 | :ivar lb: Lower variable bounds 29 | :ivar ub: Upper variable bounds 30 | :ivar output_transformation: Transformation to apply to function values before fitting 31 | :ivar num_pts: Number of points in surrogate model 32 | :ivar X: Point incorporated in surrogate model (num_pts x dim) 33 | :ivar fX: Function values in surrogate model (num_pts x 1) 34 | :ivar updated: True if model is up-to-date (no refit needed) 35 | :ivar model: GPRegressor object 36 | """ 37 | 38 | def __init__(self, dim, lb, ub, output_transformation=None, gp=None, n_restarts_optimizer=5): 39 | super().__init__(dim=dim, lb=lb, ub=ub, output_transformation=output_transformation) 40 | 41 | if gp is None: # Use the SE kernel 42 | kernel = ConstantKernel(1, (0.01, 100)) * RBF( 43 | length_scale=0.5 * np.ones(self.dim,), length_scale_bounds=(0.05, 2.0) 44 | ) + WhiteKernel(1e-4, (1e-6, 1e-2)) 45 | self.model = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=n_restarts_optimizer) 46 | else: 47 | self.model = gp 48 | if not isinstance(gp, GaussianProcessRegressor): 49 | raise TypeError("gp is not of type GaussianProcessRegressor") 50 | 51 | def _fit(self): 52 | """Compute new coefficients if the GP is not updated.""" 53 | if not self.updated: 54 | fX = self.output_transformation(self.fX.copy()) 55 | self._mu, self._sigma = np.mean(fX), max([np.std(fX), 1e-6]) 56 | fX = (fX - self._mu) / self._sigma 57 | self.model.fit(self._X, fX) 58 | self.updated = True 59 | 60 | def predict(self, xx): 61 | """Evaluate the GP regressor at the points xx. 62 | 63 | :param xx: Prediction points, must be of size num_pts x dim or (dim, ) 64 | :type xx: numpy.ndarray 65 | 66 | :return: Prediction of size num_pts x 1 67 | :rtype: numpy.ndarray 68 | """ 69 | self._fit() 70 | xx = to_unit_box(np.atleast_2d(xx), self.lb, self.ub) 71 | return self._mu + self._sigma * self.model.predict(xx) 72 | 73 | def predict_std(self, xx): 74 | """Predict standard deviation at points xx. 75 | 76 | :param xx: Prediction points, must be of size num_pts x dim or (dim, ) 77 | :type xx: numpy.ndarray 78 | 79 | :return: Predicted standard deviation, of size num_pts x 1 80 | :rtype: numpy.ndarray 81 | """ 82 | self._fit() 83 | xx = to_unit_box(np.atleast_2d(xx), self.lb, self.ub) 84 | _, std = self.model.predict(xx, return_std=True) 85 | return self._sigma * np.expand_dims(std, axis=1) 86 | 87 | def predict_deriv(self, xx): 88 | """TODO: Not implemented""" 89 | raise NotImplementedError 90 | -------------------------------------------------------------------------------- /pySOT/surrogate/kernels/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | from .cubic_kernel import CubicKernel 3 | from .kernel import Kernel 4 | from .linear_kernel import LinearKernel 5 | from .tps_kernel import TPSKernel 6 | 7 | __all__ = [ 8 | "Kernel", 9 | "CubicKernel", 10 | "LinearKernel", 11 | "TPSKernel", 12 | ] 13 | -------------------------------------------------------------------------------- /pySOT/surrogate/kernels/cubic_kernel.py: -------------------------------------------------------------------------------- 1 | from .kernel import Kernel 2 | 3 | 4 | class CubicKernel(Kernel): 5 | """Cubic RBF kernel 6 | 7 | This is a class for the Cubic RBF kernel: :math:`\\varphi(r) = r^3` which 8 | is conditionally positive definite of order 2. 9 | """ 10 | 11 | def __init__(self): 12 | super().__init__() 13 | self.order = 2 14 | 15 | def eval(self, dists): 16 | """Evaluates the Cubic kernel for a distance matrix 17 | 18 | :param dists: Distance input matrix 19 | :type dists: numpy.array 20 | 21 | :returns: a matrix where element :math:`(i,j)` is 22 | :math:`\\|x_i - x_j \\|^3` 23 | :rtype: numpy.array 24 | """ 25 | return dists ** 3 26 | 27 | def deriv(self, dists): 28 | """Evaluates the derivative of the Cubic kernel for a distance matrix. 29 | 30 | :param dists: Distance input matrix 31 | :type dists: numpy.array 32 | 33 | :returns: a matrix where element :math:`(i,j)` is 34 | :math:`3 \\| x_i - x_j \\|^2` 35 | :rtype: numpy.array 36 | """ 37 | return 3 * dists ** 2 38 | -------------------------------------------------------------------------------- /pySOT/surrogate/kernels/kernel.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | 3 | 4 | class Kernel(ABC): 5 | """Base class for a radial kernel. 6 | 7 | :ivar order: Order of the conditionally positive definite kernel 8 | """ 9 | 10 | def __init__(self): # pragma: no cover 11 | self.order = None 12 | 13 | @abstractmethod 14 | def eval(self, dists): # pragma: no cover 15 | """Evaluate the radial kernel. 16 | 17 | :param dists: Array of size n x n with pairwise distances 18 | :type dists: numpy.ndarray 19 | 20 | :return: Array of size n x n with kernel values 21 | :rtype: numpy.ndarray 22 | """ 23 | pass 24 | 25 | @abstractmethod 26 | def deriv(self, dists): # pragma: no cover 27 | """Evaluate derivatives of radial kernel wrt distance. 28 | 29 | :param dists: Array of size n x n with pairwise distances 30 | :type dists: numpy.ndarray 31 | 32 | :return: Array of size n x n with kernel derivatives 33 | :rtype: numpy.ndarray 34 | """ 35 | pass 36 | -------------------------------------------------------------------------------- /pySOT/surrogate/kernels/linear_kernel.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from .kernel import Kernel 4 | 5 | 6 | class LinearKernel(Kernel): 7 | """Linear RBF kernel. 8 | 9 | This is a basic class for the Linear RBF kernel: 10 | :math:`\\varphi(r) = r` which is 11 | conditionally positive definite of order 1. 12 | """ 13 | 14 | def __init__(self): 15 | super().__init__() 16 | self.order = 1 17 | 18 | def eval(self, dists): 19 | """Evaluate the Linear kernel. 20 | 21 | :param dists: Distance input matrix 22 | :type dists: numpy.array 23 | 24 | :returns: a matrix where element :math:`(i,j)` is 25 | :math:`\\|x_i - x_j \\|` 26 | :rtype: numpy.array 27 | """ 28 | return dists 29 | 30 | def deriv(self, dists): 31 | """Evaluate the derivative of the Linear kernel. 32 | 33 | :param dists: Distance input matrix 34 | :type dists: numpy.array 35 | 36 | :returns: a matrix where element :math:`(i,j)` is 1 37 | :rtype: numpy.array 38 | """ 39 | return np.ones(dists.shape) 40 | -------------------------------------------------------------------------------- /pySOT/surrogate/kernels/tps_kernel.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from .kernel import Kernel 4 | 5 | 6 | class TPSKernel(Kernel): 7 | """Thin-plate spline RBF kernel. 8 | 9 | This is a basic class for the TPS RBF kernel: 10 | :math:`\\varphi(r) = r^2 \\log(r)` which is 11 | conditionally positive definite of order 2. 12 | """ 13 | 14 | def __init__(self): 15 | super().__init__() 16 | self.order = 2 17 | 18 | def eval(self, dists): 19 | """Evaluate the TPS kernel. 20 | 21 | :param dists: Distance input matrix 22 | :type dists: numpy.array 23 | 24 | :returns: a matrix where element :math:`(i,j)` is 25 | :math:`\\|x_i - x_j \\|^2 \\log (\\|x_i - x_j \\|)` 26 | :rtype: numpy.array 27 | """ 28 | dists[dists < np.finfo(float).eps] = np.finfo(float).eps 29 | return (dists ** 2) * np.log(dists) 30 | 31 | def deriv(self, dists): 32 | """Evaluate the derivative of the TPS kernel. 33 | 34 | :param dists: Distance input matrix 35 | :type dists: numpy.array 36 | 37 | :returns: a matrix where element :math:`(i,j)` is 38 | :math:`\\|x_i - x_j \\|(1 + 2 \\log (\\|x_i - x_j \\|) )` 39 | :rtype: numpy.array 40 | """ 41 | dists[dists < np.finfo(float).eps] = np.finfo(float).eps 42 | return dists * (1 + 2 * np.log(dists)) 43 | -------------------------------------------------------------------------------- /pySOT/surrogate/mars.py: -------------------------------------------------------------------------------- 1 | import warnings 2 | 3 | import numpy as np 4 | 5 | from ..utils import to_unit_box 6 | from .surrogate import Surrogate 7 | 8 | 9 | class MARSInterpolant(Surrogate): 10 | """Compute and evaluate a MARS interpolant 11 | 12 | MARS builds a model of the form 13 | 14 | .. math:: 15 | 16 | \\hat{f}(x) = \\sum_{i=1}^{k} c_i B_i(x). 17 | 18 | The model is a weighted sum of basis functions :math:`B_i(x)`. Each basis 19 | function :math:`B_i(x)` takes one of the following three forms: 20 | 21 | 1. a constant 1. 22 | 2. a hinge function of the form :math:`\\max(0, x - const)` or \ 23 | :math:`\\max(0, const - x)`. MARS automatically selects variables \ 24 | and values of those variables for knots of the hinge functions. 25 | 3. a product of two or more hinge functions. These basis functions c \ 26 | an model interaction between two or more variables. 27 | 28 | :param dim: Number of dimensions 29 | :type dim: int 30 | :param lb: Lower variable bounds 31 | :type lb: numpy.array 32 | :param ub: Upper variable bounds 33 | :type ub: numpy.array 34 | :param output_transformation: Transformation applied to values before fitting 35 | :type output_transformation: Callable 36 | 37 | :ivar dim: Number of dimensions 38 | :ivar lb: Lower variable bounds 39 | :ivar ub: Upper variable bounds 40 | :ivar output_transformation: Transformation to apply to function values before fitting 41 | :ivar num_pts: Number of points in surrogate model 42 | :ivar X: Point incorporated in surrogate model (num_pts x dim) 43 | :ivar fX: Function values in surrogate model (num_pts x 1) 44 | :ivar updated: True if model is up-to-date (no refit needed) 45 | :ivar model: Earth object 46 | """ 47 | 48 | def __init__(self, dim, lb, ub, output_transformation=None): 49 | super().__init__(dim=dim, lb=lb, ub=ub, output_transformation=output_transformation) 50 | 51 | try: 52 | from pyearth import Earth 53 | 54 | self.model = Earth() 55 | except ImportError as err: 56 | print("Failed to import pyearth") 57 | raise err 58 | 59 | def _fit(self): 60 | """Compute new coefficients if the MARS interpolant is not updated.""" 61 | with warnings.catch_warnings(): 62 | warnings.simplefilter("ignore") # Surpress deprecation warnings 63 | if self.updated is False: 64 | fX = self.output_transformation(self.fX.copy()) 65 | self.model.fit(self._X, fX) 66 | self.updated = True 67 | 68 | def predict(self, xx): 69 | """Evaluate the MARS interpolant at the points xx 70 | 71 | :param xx: Prediction points, must be of size num_pts x dim or (dim, ) 72 | :type xx: numpy.ndarray 73 | 74 | :return: Prediction of size num_pts x 1 75 | :rtype: numpy.ndarray 76 | """ 77 | self._fit() 78 | xx = to_unit_box(np.atleast_2d(xx), self.lb, self.ub) 79 | return np.expand_dims(self.model.predict(xx), axis=1) 80 | 81 | def predict_deriv(self, xx): 82 | """Evaluate the derivative of the MARS interpolant at points xx 83 | 84 | :param xx: Prediction points, must be of size num_pts x dim or (dim, ) 85 | :type xx: numpy.array 86 | 87 | :return: Derivative of the RBF interpolant at xx 88 | :rtype: numpy.array 89 | """ 90 | self._fit() 91 | xx = to_unit_box(np.atleast_2d(xx), self.lb, self.ub) 92 | dfx = self.model.predict_deriv(xx, variables=None) 93 | return dfx[0] / (self.ub - self.lb) 94 | -------------------------------------------------------------------------------- /pySOT/surrogate/output_transformations/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | from .identity import identity 3 | from .median_capping import median_capping 4 | 5 | __all__ = ["identity", "median_capping"] 6 | -------------------------------------------------------------------------------- /pySOT/surrogate/output_transformations/identity.py: -------------------------------------------------------------------------------- 1 | def identity(x): 2 | """Identity mapping. 3 | 4 | :param x: Array to be transformed 5 | :type x: numpy.array 6 | 7 | :return: x 8 | :rtype: numpy.array 9 | """ 10 | return x 11 | -------------------------------------------------------------------------------- /pySOT/surrogate/output_transformations/median_capping.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def median_capping(x): 5 | """Replace values above the median by the median. 6 | 7 | :param x: Array to be transformed 8 | :type x: numpy.array 9 | 10 | :return: x 11 | :rtype: numpy.array 12 | """ 13 | x = x.copy() 14 | medf = np.median(x) 15 | x[x > medf] = medf 16 | return x 17 | -------------------------------------------------------------------------------- /pySOT/surrogate/poly.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from sklearn.linear_model import Ridge 3 | from sklearn.pipeline import make_pipeline 4 | from sklearn.preprocessing import PolynomialFeatures 5 | 6 | from ..utils import to_unit_box 7 | from .surrogate import Surrogate 8 | 9 | 10 | class PolyRegressor(Surrogate): 11 | """Multi-variate polynomial regression with cross-terms 12 | 13 | :param dim: Number of dimensions 14 | :type dim: int 15 | :param lb: Lower variable bounds 16 | :type lb: numpy.array 17 | :param ub: Upper variable bounds 18 | :type ub: numpy.array 19 | :param output_transformation: Transformation applied to values before fitting 20 | :type output_transformation: Callable 21 | :param degree: Polynomial degree 22 | :type degree: int 23 | 24 | :ivar dim: Number of dimensions 25 | :ivar lb: Lower variable bounds 26 | :ivar ub: Upper variable bounds 27 | :ivar output_transformation: Transformation to apply to function values before fitting 28 | :ivar num_pts: Number of points in surrogate model 29 | :ivar X: Point incorporated in surrogate model (num_pts x dim) 30 | :ivar fX: Function values in surrogate model (num_pts x 1) 31 | :ivar updated: True if model is up-to-date (no refit needed) 32 | :ivar model: scikit-learn pipeline for polynomial regression 33 | """ 34 | 35 | def __init__(self, dim, lb, ub, output_transformation=None, degree=2): 36 | super().__init__(dim=dim, lb=lb, ub=ub, output_transformation=output_transformation) 37 | self.model = make_pipeline(PolynomialFeatures(degree), Ridge()) 38 | 39 | def _fit(self): 40 | """Update the polynomial regression model.""" 41 | if not self.updated: 42 | fX = self.output_transformation(self.fX.copy()) 43 | self.model.fit(self._X, fX) 44 | self.updated = True 45 | 46 | def predict(self, xx): 47 | """Evaluate the polynomial regressor at the points xx 48 | 49 | :param xx: Prediction points, must be of size num_pts x dim or (dim, ) 50 | :type xx: numpy.ndarray 51 | 52 | :return: Prediction of size num_pts x 1 53 | :rtype: numpy.ndarray 54 | """ 55 | self._fit() 56 | xx = to_unit_box(np.atleast_2d(xx), self.lb, self.ub) 57 | return self.model.predict(xx) 58 | 59 | def predict_deriv(self, xx): 60 | """TODO: Not implemented""" 61 | raise NotImplementedError 62 | -------------------------------------------------------------------------------- /pySOT/surrogate/rbf.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import scipy.linalg as scplinalg 3 | import scipy.spatial as scpspatial 4 | 5 | from ..utils import to_unit_box 6 | from .kernels import CubicKernel, Kernel 7 | from .surrogate import Surrogate 8 | from .tails import LinearTail, Tail 9 | 10 | 11 | class RBFInterpolant(Surrogate): 12 | """Compute and evaluate RBF interpolant. 13 | 14 | Manages an expansion of the form 15 | 16 | .. math:: 17 | 18 | s(x) = \\sum_j c_j \\phi(\\|x-x_j\\|) + \\sum_j \\lambda_j p_j(x) 19 | 20 | where the functions :math:`p_j(x)` are low-degree polynomials. 21 | The fitting equations are 22 | 23 | .. math:: 24 | \\begin{bmatrix} \\eta I & P^T \\\\ P & \\Phi+\\eta I \\end{bmatrix} 25 | \\begin{bmatrix} \\lambda \\\\ c \\end{bmatrix} = 26 | \\begin{bmatrix} 0 \\\\ f \\end{bmatrix} 27 | 28 | where :math:`P_{ij} = p_j(x_i)` and :math:`\\Phi_{ij}=\\phi(\\|x_i-x_j\\|)` 29 | The regularization parameter :math:`\\eta` allows us to avoid problems 30 | with potential poor conditioning of the system. Consider using the 31 | SurrogateUnitBox wrapper or manually scaling the domain to the unit 32 | hypercube to avoid issues with the domain scaling. 33 | 34 | We add k new points to the RBFInterpolant in :math:`O(kn^2)` flops by 35 | updating the LU factorization of the old RBF system. This is better 36 | than computing the RBF coefficients from scratch, which costs 37 | :math:`O(n^3)` flops. 38 | 39 | :param dim: Number of dimensions 40 | :type dim: int 41 | :param lb: Lower variable bounds 42 | :type lb: numpy.array 43 | :param ub: Upper variable bounds 44 | :type ub: numpy.array 45 | :param output_transformation: Transformation applied to values before fitting 46 | :type output_transformation: Callable 47 | :param kernel: RBF kernel object 48 | :type kernel: Kernel 49 | :param tail: RBF polynomial tail object 50 | :type tail: Tail 51 | :param eta: Regularization parameter 52 | :type eta: float 53 | 54 | :ivar dim: Number of dimensions 55 | :ivar lb: Lower variable bounds 56 | :ivar ub: Upper variable bounds 57 | :ivar output_transformation: Transformation to apply to function values before fitting 58 | :ivar num_pts: Number of points in surrogate model 59 | :ivar X: Point incorporated in surrogate model (num_pts x dim) 60 | :ivar fX: Function values in surrogate model (num_pts x 1) 61 | :ivar updated: True if model is up-to-date (no refit needed) 62 | :ivar kernel: RBF kernel 63 | :ivar tail: RBF tail 64 | :ivar eta: Regularization parameter 65 | """ 66 | 67 | def __init__(self, dim, lb, ub, output_transformation=None, kernel=None, tail=None, eta=1e-6): 68 | super().__init__(dim=dim, lb=lb, ub=ub, output_transformation=output_transformation) 69 | 70 | if kernel is None or tail is None: 71 | kernel = CubicKernel() 72 | tail = LinearTail(dim) 73 | assert isinstance(kernel, Kernel) and isinstance(tail, Tail) 74 | 75 | self.kernel = kernel 76 | self.tail = tail 77 | self.ntail = tail.dim_tail 78 | self.A = None 79 | self.L = None 80 | self.U = None 81 | self.piv = None 82 | self.c = None 83 | self.eta = eta 84 | 85 | if kernel.order - 1 > tail.degree: 86 | raise ValueError("Kernel and tail mismatch") 87 | assert self.dim == self.tail.dim 88 | 89 | def reset(self): 90 | """Reset the RBF interpolant.""" 91 | super().reset() 92 | self.L = None 93 | self.U = None 94 | self.piv = None 95 | self.c = None 96 | 97 | def _fit(self): 98 | """Compute new coefficients if the RBF is not updated. 99 | 100 | We try to update an existing LU factorization by computing a Cholesky 101 | factorization of the Schur complemented system. This may fail if the 102 | system is ill-conditioned, in which case we compute a new LU 103 | factorization. 104 | """ 105 | if not self.updated: 106 | n = self.num_pts 107 | ntail = self.ntail 108 | nact = ntail + n 109 | 110 | if self.c is None: # Initial fit 111 | assert self.num_pts >= ntail 112 | 113 | X = self._X[0:n, :] 114 | D = scpspatial.distance.cdist(X, X) 115 | Phi = self.kernel.eval(D) + self.eta * np.eye(n) 116 | P = self.tail.eval(X) 117 | 118 | # Set up the systems matrix 119 | A1 = np.hstack((np.zeros((ntail, ntail)), P.T)) 120 | A2 = np.hstack((P, Phi)) 121 | A = np.vstack((A1, A2)) 122 | 123 | [LU, piv] = scplinalg.lu_factor(A) 124 | self.L = np.tril(LU, -1) + np.eye(nact) 125 | self.U = np.triu(LU) 126 | 127 | # Construct the usual pivoting vector so that we can increment 128 | self.piv = np.arange(0, nact) 129 | for i in range(nact): 130 | self.piv[i], self.piv[piv[i]] = self.piv[piv[i]], self.piv[i] 131 | 132 | else: # Extend LU factorization 133 | k = self.c.shape[0] - ntail 134 | numnew = n - k 135 | kact = ntail + k 136 | 137 | X = self._X[:n, :] 138 | XX = self._X[k:n, :] 139 | D = scpspatial.distance.cdist(X, XX) 140 | Pnew = np.vstack((self.tail.eval(XX).T, self.kernel.eval(D[:k, :]))) 141 | Phinew = self.kernel.eval(D[k:, :]) + self.eta * np.eye(numnew) 142 | 143 | L21 = np.zeros((kact, numnew)) 144 | U12 = np.zeros((kact, numnew)) 145 | for i in range(numnew): # TODO: Can we use level-3 BLAS? 146 | L21[:, i] = scplinalg.solve_triangular(a=self.U, b=Pnew[:kact, i], lower=False, trans="T") 147 | U12[:, i] = scplinalg.solve_triangular(a=self.L, b=Pnew[self.piv[:kact], i], lower=True, trans="N") 148 | L21 = L21.T 149 | try: # Compute Cholesky factorization of the Schur complement 150 | C = scplinalg.cholesky(a=Phinew - np.dot(L21, U12), lower=True) 151 | except: # Compute a new LU factorization if Cholesky fails 152 | self.c = None 153 | return self._fit() 154 | 155 | self.piv = np.hstack((self.piv, np.arange(kact, nact))) 156 | self.L = np.vstack((self.L, L21)) 157 | L2 = np.vstack((np.zeros((kact, numnew)), C)) 158 | self.L = np.hstack((self.L, L2)) 159 | self.U = np.hstack((self.U, U12)) 160 | U2 = np.hstack((np.zeros((numnew, kact)), C.T)) 161 | self.U = np.vstack((self.U, U2)) 162 | 163 | # Update coefficients 164 | fX = self.output_transformation(self.fX.copy()) 165 | rhs = np.vstack((np.zeros((ntail, 1)), fX)) 166 | self.c = scplinalg.solve_triangular(a=self.L, b=rhs[self.piv], lower=True) 167 | self.c = scplinalg.solve_triangular(a=self.U, b=self.c, lower=False) 168 | self.updated = True 169 | 170 | def predict(self, xx): 171 | """Evaluate the RBF interpolant at the points xx 172 | 173 | :param xx: Prediction points, must be of size num_pts x dim or (dim, ) 174 | :type xx: numpy.ndarray 175 | 176 | :return: Prediction of size num_pts x 1 177 | :rtype: numpy.ndarray 178 | """ 179 | self._fit() 180 | xx = to_unit_box(np.atleast_2d(xx), self.lb, self.ub) 181 | ds = scpspatial.distance.cdist(xx, self._X) 182 | ntail = self.ntail 183 | return np.dot(self.kernel.eval(ds), self.c[ntail : ntail + self.num_pts]) + np.dot( 184 | self.tail.eval(xx), self.c[:ntail] 185 | ) 186 | 187 | def predict_deriv(self, xx): 188 | """Evaluate the derivative of the RBF interpolant at a point xx 189 | 190 | :param xx: Prediction points, must be of size num_pts x dim or (dim, ) 191 | :type xx: numpy.array 192 | 193 | :return: Derivative of the RBF interpolant at xx 194 | :rtype: numpy.array 195 | """ 196 | self._fit() 197 | xx = to_unit_box(np.atleast_2d(xx), self.lb, self.ub) 198 | if xx.shape[1] != self.dim: 199 | raise ValueError("Input has incorrect number of dimensions") 200 | ds = scpspatial.distance.cdist(self._X, xx) 201 | ds[ds < np.finfo(float).eps] = np.finfo(float).eps # Avoid 0*inf 202 | 203 | dfxx = np.zeros((xx.shape[0], self.dim)) 204 | for i in range(xx.shape[0]): 205 | x = np.atleast_2d(xx[i, :]) 206 | ntail = self.ntail 207 | dpx = self.tail.deriv(x) 208 | dfx = np.dot(dpx, self.c[:ntail]).transpose() 209 | dsx = -(self._X.copy()) 210 | dsx += x 211 | dss = np.atleast_2d(ds[:, i]).T 212 | dsx *= np.multiply(self.kernel.deriv(dss), self.c[ntail:]) / dss 213 | dfx += np.sum(dsx, 0) 214 | dfxx[i, :] = dfx 215 | return dfxx / (self.ub - self.lb) 216 | -------------------------------------------------------------------------------- /pySOT/surrogate/surrogate.py: -------------------------------------------------------------------------------- 1 | """ 2 | .. module:: surrogate 3 | :synopsis: Surrogate models 4 | 5 | .. moduleauthor:: David Eriksson 6 | 7 | :Module: surrogate 8 | :Author: David Eriksson 9 | 10 | """ 11 | 12 | from abc import ABC, abstractmethod 13 | 14 | import numpy as np 15 | 16 | from ..utils import to_unit_box 17 | from .output_transformations import identity 18 | 19 | 20 | class Surrogate(ABC): 21 | """Base class for a surrogate model. 22 | 23 | :ivar dim: Number of dimensions 24 | :ivar lb: Lower variable bounds 25 | :ivar ub: Upper variable bounds 26 | :ivar output_transformation: Transformation to apply to function values before fitting 27 | :ivar num_pts: Number of points in surrogate model 28 | :ivar X: Point incorporated in surrogate model (num_pts x dim) 29 | :ivar fX: Function values in surrogate model (num_pts x 1) 30 | :ivar updated: True if model is up-to-date (no refit needed) 31 | """ 32 | 33 | def __init__(self, dim, lb, ub, output_transformation=None): # pragma: no cover 34 | self.dim = dim 35 | self.lb = lb 36 | self.ub = ub 37 | if output_transformation is None: 38 | output_transformation = identity 39 | self.output_transformation = output_transformation 40 | self.reset() 41 | 42 | def reset(self): 43 | """Reset the surrogate.""" 44 | self.num_pts = 0 45 | self.X = np.empty([0, self.dim]) 46 | self._X = np.empty([0, self.dim]) 47 | self.fX = np.empty([0, 1]) 48 | self.updated = False 49 | 50 | def add_points(self, xx, fx): 51 | """Add new function evaluations. 52 | 53 | This method SHOULD NOT trigger a new fit, it just updates X 54 | and fX but leaves the original surrogate object intact 55 | 56 | :param xx: Points to add 57 | :type xx: numpy.ndarray 58 | :param fx: The function values of the point to add 59 | :type fx: numpy.array or float 60 | """ 61 | xx = np.atleast_2d(xx) 62 | if isinstance(fx, float): 63 | fx = np.array([fx]) 64 | if fx.ndim == 0: 65 | fx = np.expand_dims(fx, axis=0) 66 | if fx.ndim == 1: 67 | fx = np.expand_dims(fx, axis=1) 68 | assert xx.shape[0] == fx.shape[0] and xx.shape[1] == self.dim 69 | newpts = xx.shape[0] 70 | self.X = np.vstack((self.X, xx)) 71 | self._X = to_unit_box(self.X, self.lb, self.ub) 72 | self.fX = np.vstack((self.fX, fx)) 73 | self.num_pts += newpts 74 | self.updated = False 75 | 76 | @abstractmethod 77 | def predict(self, xx): # pragma: no cover 78 | """Evaluate surroagte at points xx. 79 | 80 | :param xx: xx must be of size num_pts x dim or (dim, ) 81 | :type xx: numpy.ndarray 82 | 83 | :return: Surrogate predictions, of size num_pts x 1 84 | :rtype: numpy.ndarray 85 | """ 86 | raise NotImplementedError 87 | 88 | @abstractmethod 89 | def predict_deriv(self, xx): # pragma: no cover 90 | """Evaluate derivative of interpolant at points xx. 91 | 92 | :param xx: xx must be of size num_pts x dim or (dim, ) 93 | :type xx: numpy.ndarray 94 | 95 | :return: Surrogate derivative predictions, of size num_pts x dim 96 | :rtype: numpy.ndarray 97 | """ 98 | raise NotImplementedError 99 | -------------------------------------------------------------------------------- /pySOT/surrogate/tails/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | from .constant_tail import ConstantTail 3 | from .linear_tail import LinearTail 4 | from .tail import Tail 5 | 6 | __all__ = [ 7 | "Tail", 8 | "ConstantTail", 9 | "LinearTail", 10 | ] 11 | -------------------------------------------------------------------------------- /pySOT/surrogate/tails/constant_tail.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from .tail import Tail 4 | 5 | 6 | class ConstantTail(Tail): 7 | """Constant polynomial tail. 8 | 9 | Constant polynomial in d-dimension, built from the basis :math:`\\{ 1 \\}`. 10 | """ 11 | 12 | def __init__(self, dim): 13 | super().__init__() 14 | self.degree = 0 15 | self.dim = dim 16 | self.dim_tail = 1 17 | 18 | def eval(self, X): 19 | """Evaluate the constant polynomial tail. 20 | 21 | :param X: Points to evaluate, of size num_pts x dim 22 | :type X: numpy.array 23 | 24 | :returns: A numpy.array of size num_pts x dim_tail(dim) 25 | :rtype: numpy.array 26 | """ 27 | X = np.atleast_2d(X) 28 | if X.shape[1] != self.dim: 29 | raise ValueError("Input has the wrong number of dimensions") 30 | return np.ones((X.shape[0], 1)) 31 | 32 | def deriv(self, x): 33 | """Evaluate the derivative of the constant polynomial tail. 34 | 35 | :param x: Point to evaluate, of size (1, dim) or (dim,) 36 | :type x: numpy.array 37 | 38 | :returns: A numpy.array of size dim_tail x dim 39 | :rtype: numpy.array 40 | """ 41 | x = np.atleast_2d(x) 42 | if x.shape[1] != self.dim: 43 | raise ValueError("Input has the wrong number of dimensions") 44 | return np.zeros((x.shape[1], 1)) 45 | -------------------------------------------------------------------------------- /pySOT/surrogate/tails/linear_tail.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from .tail import Tail 4 | 5 | 6 | class LinearTail(Tail): 7 | """Linear polynomial tail. 8 | 9 | This is a standard linear polynomial in d-dimension, built from 10 | the basis :math:`\\{1,x_1,x_2,\\ldots,x_d\\}`. 11 | """ 12 | 13 | def __init__(self, dim): 14 | super().__init__() 15 | self.degree = 1 16 | self.dim = dim 17 | self.dim_tail = 1 + dim 18 | 19 | def eval(self, X): 20 | """Evaluate the linear polynomial tail. 21 | 22 | :param X: Points to evaluate, of size num_pts x dim 23 | :type X: numpy.array 24 | 25 | :returns: A numpy.array of size num_pts x dim_tail 26 | :rtype: numpy.array 27 | """ 28 | X = np.atleast_2d(X) 29 | if X.shape[1] != self.dim: 30 | raise ValueError("Input has the wrong number of dimensions") 31 | return np.hstack((np.ones((X.shape[0], 1)), X)) 32 | 33 | def deriv(self, x): 34 | """Evaluate the derivative of the linear polynomial tail 35 | 36 | :param x: Point to evaluate, of size (1, dim) or (dim,) 37 | :type x: numpy.array 38 | 39 | :returns: A numpy.array of size dim_tail x dim 40 | :rtype: numpy.array 41 | """ 42 | x = np.atleast_2d(x) 43 | if x.shape[1] != self.dim: 44 | raise ValueError("Input has the wrong number of dimensions") 45 | return np.hstack((np.zeros((x.shape[1], 1)), np.eye((x.shape[1])))) 46 | -------------------------------------------------------------------------------- /pySOT/surrogate/tails/tail.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | 3 | 4 | class Tail(ABC): 5 | """Base class for a polynomial tail. 6 | 7 | "ivar dim: Dimensionality of the original space 8 | :ivar dim_tail: Dimensionality of the polynomial space \ 9 | (number of basis functions) 10 | """ 11 | 12 | def __init__(self): # pragma: no cover 13 | self.degree = None 14 | self.dim = None 15 | self.dim_tail = None 16 | 17 | @abstractmethod 18 | def eval(self, X): # pragma: no cover 19 | """Evaluate the polynomial tail. 20 | 21 | :param X: Array of size num_pts x dim 22 | :type X: numpy.ndarray 23 | 24 | :return: Array of size num_pts x dim_tail 25 | :rtype: numpy.ndarray 26 | """ 27 | pass 28 | 29 | @abstractmethod 30 | def deriv(self, x): # pragma: no cover 31 | """Evaluate derivative of the polynomial tail. 32 | 33 | :param x: Array of size 1 x dim or (dim,) 34 | :type x: numpy.ndarray 35 | 36 | :return: Array of size dim_tail x dim 37 | :rtype: numpy.ndarray 38 | """ 39 | pass 40 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [bdist_wheel] 2 | universal = 1 3 | 4 | [metadata] 5 | description-file = README.rst 6 | 7 | [pep8] 8 | max-line-length = 120 9 | 10 | [flake8] 11 | max-line-length = 120 12 | ignore = E203, F403, F405, E731, W503, W605 13 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import find_packages, setup 2 | 3 | long_description = open("README.rst").read() 4 | 5 | setup( 6 | name="pySOT", 7 | version="0.3.3", 8 | packages=find_packages(), 9 | url="https://github.com/dme65/pySOT", 10 | license="LICENSE.rst", 11 | author="David Eriksson, David Bindel, Christine Shoemaker", 12 | author_email="dme65@cornell.edu, bindel@cornell.edu, shoemaker@nus.edu.sg", 13 | description="Surrogate Optimization Toolbox", 14 | long_description=long_description, 15 | setup_requires=["numpy"], 16 | install_requires=["scipy", "pyDOE2", "POAP>=0.1.25", "pytest", "dill", "scikit-learn"], 17 | classifiers=[ 18 | "Intended Audience :: Science/Research", 19 | "Programming Language :: Python", 20 | "Topic :: Scientific/Engineering", 21 | "Operating System :: Microsoft :: Windows", 22 | "Operating System :: POSIX", 23 | "Operating System :: Unix", 24 | "Operating System :: MacOS", 25 | "Programming Language :: Python :: 3.4", 26 | "Programming Language :: Python :: 3.5", 27 | "Programming Language :: Python :: 3.6", 28 | "Programming Language :: Python :: 3.7", 29 | ], 30 | ) 31 | -------------------------------------------------------------------------------- /tests/test_auxiliary_problems.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from pySOT.auxiliary_problems import candidate_srbf, candidate_uniform, ei_ga 4 | from pySOT.optimization_problems import Ackley 5 | from pySOT.surrogate import GPRegressor 6 | 7 | 8 | def test_srbf(): 9 | np.random.seed(0) 10 | ackley = Ackley(dim=1) 11 | X = np.expand_dims([-15, -10, 0, 1, 20], axis=1) 12 | fX = np.array([ackley.eval(x) for x in X]) 13 | 14 | gp = GPRegressor(dim=ackley.dim, lb=ackley.lb, ub=ackley.ub) 15 | gp.add_points(X, fX) 16 | 17 | # Find the next point with w = 0.25 18 | x_true = 10.50 19 | x_next = candidate_uniform( 20 | num_pts=1, X=X, Xpend=None, fX=fX, num_cand=10000, surrogate=gp, opt_prob=ackley, weights=[0.25] 21 | ) 22 | assert np.isclose(x_next, x_true, atol=1e-2) 23 | 24 | x_next = candidate_srbf( 25 | num_pts=1, 26 | X=X, 27 | Xpend=None, 28 | fX=fX, 29 | num_cand=10000, 30 | surrogate=gp, 31 | opt_prob=ackley, 32 | weights=[0.25], 33 | sampling_radius=0.5, 34 | ) 35 | assert np.isclose(x_next, x_true, atol=1e-2) 36 | 37 | # Find the next point with w = 0.75 38 | x_true = -1.5050 39 | x_next = candidate_uniform( 40 | num_pts=1, X=X, Xpend=None, fX=fX, num_cand=10000, surrogate=gp, opt_prob=ackley, weights=[0.75] 41 | ) 42 | assert np.isclose(x_next, x_true, atol=1e-2) 43 | 44 | x_next = candidate_srbf( 45 | num_pts=1, 46 | X=X, 47 | Xpend=None, 48 | fX=fX, 49 | num_cand=10000, 50 | surrogate=gp, 51 | opt_prob=ackley, 52 | weights=[0.75], 53 | sampling_radius=0.5, 54 | ) 55 | assert np.isclose(x_next, x_true, atol=1e-2) 56 | 57 | 58 | def test_ei(): 59 | np.random.seed(0) 60 | ackley = Ackley(dim=1) 61 | X = np.expand_dims([-15, -10, 0, 1, 20], axis=1) 62 | fX = np.array([ackley.eval(x) for x in X]) 63 | 64 | gp = GPRegressor(dim=ackley.dim, lb=ackley.lb, ub=ackley.ub) 65 | gp.add_points(X, fX) 66 | 67 | # Find the global optimizer of EI 68 | x_true = -1.7558 69 | x_next = ei_ga(X=X, Xpend=None, dtol=0.0, ei_tol=0, fX=fX, num_pts=1, opt_prob=ackley, surrogate=gp) 70 | assert np.isclose(x_next, x_true, atol=1e-2) 71 | 72 | # Find the optimizer at least distance 5 from other points 73 | x_true = 10.6656 74 | x_next = ei_ga(X=X, Xpend=None, dtol=5.0, ei_tol=0, fX=fX, num_pts=1, opt_prob=ackley, surrogate=gp) 75 | assert np.isclose(x_next, x_true, atol=1e-2) 76 | 77 | 78 | if __name__ == "__main__": 79 | test_ei() 80 | test_srbf() 81 | -------------------------------------------------------------------------------- /tests/test_controller.py: -------------------------------------------------------------------------------- 1 | import multiprocessing 2 | import os 3 | import time 4 | 5 | import numpy as np 6 | import pytest 7 | from poap.controller import SerialController 8 | 9 | from pySOT.controller import CheckpointController 10 | from pySOT.experimental_design import SymmetricLatinHypercube 11 | from pySOT.optimization_problems import Ackley 12 | from pySOT.strategy import DYCORSStrategy 13 | from pySOT.surrogate import CubicKernel, LinearTail, RBFInterpolant 14 | 15 | np.random.seed(0) 16 | max_evals = 200 17 | ackley = Ackley(dim=5) 18 | 19 | fname = "checkpoint.pysot" 20 | 21 | 22 | def check_strategy(controller): 23 | """Make sure the strategy object is correct.""" 24 | 25 | # Check the strategy object 26 | assert controller.strategy.num_evals == controller.strategy.max_evals 27 | assert controller.strategy.pending_evals == 0 28 | assert controller.strategy.X.shape == (controller.strategy.num_evals, ackley.dim) 29 | assert controller.strategy.fX.shape == (controller.strategy.num_evals, 1) 30 | assert controller.strategy.Xpend.shape == (0, ackley.dim) 31 | assert len(controller.strategy.fevals) == controller.strategy.num_evals 32 | 33 | # Check that the strategy and controller have the same information 34 | assert len(controller.fevals) == controller.strategy.num_evals 35 | for i in range(controller.strategy.num_evals): 36 | if controller.fevals[i].status == "completed": 37 | idx = np.where((controller.strategy.X == controller.fevals[i].params[0]).all(axis=1))[0] 38 | 39 | assert len(idx) == 1 40 | assert np.all(controller.fevals[i].params[0] == controller.strategy.X[idx, :]) 41 | assert controller.fevals[i].value == controller.strategy.fX[idx] 42 | assert np.all(controller.fevals[i].params[0] <= ackley.ub) 43 | assert np.all(controller.fevals[i].params[0] >= ackley.lb) 44 | 45 | 46 | def test_checkpoint_serial(): 47 | if os.path.isfile(fname): 48 | os.remove(fname) 49 | 50 | # Run for 1 seconds and kill the controller 51 | p = multiprocessing.Process(target=init_serial, args=()) 52 | p.start() 53 | time.sleep(3) 54 | p.terminate() 55 | p.join() 56 | 57 | # Resume the run 58 | controller = SerialController(ackley.eval) 59 | resume(controller) 60 | 61 | 62 | def init_serial(): 63 | rbf = RBFInterpolant(dim=ackley.dim, lb=ackley.lb, ub=ackley.ub, kernel=CubicKernel(), tail=LinearTail(ackley.dim)) 64 | slhd = SymmetricLatinHypercube(dim=ackley.dim, num_pts=2 * (ackley.dim + 1)) 65 | 66 | # Create a strategy and a controller 67 | controller = SerialController(ackley.eval) 68 | controller.strategy = DYCORSStrategy( 69 | max_evals=max_evals, opt_prob=ackley, exp_design=slhd, surrogate=rbf, asynchronous=True 70 | ) 71 | 72 | # Wrap controller in checkpoint object 73 | controller = CheckpointController(controller, fname=fname) 74 | controller.run() 75 | 76 | 77 | def resume(controller): 78 | # Wrap controller in checkpoint object 79 | controller = CheckpointController(controller, fname=fname) 80 | result = controller.resume() 81 | assert result.value < 2.0 # To make sure performance is the same 82 | 83 | check_strategy(controller.controller) 84 | 85 | # Try to resume again and make sure an exception is raised 86 | with pytest.raises(IOError): 87 | result = controller.run() 88 | 89 | 90 | if __name__ == "__main__": 91 | test_checkpoint_serial() 92 | -------------------------------------------------------------------------------- /tests/test_experimental_designs.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pytest 3 | 4 | from pySOT.experimental_design import ExperimentalDesign, LatinHypercube, SymmetricLatinHypercube, TwoFactorial 5 | 6 | 7 | def test_lhd(): 8 | lhd = LatinHypercube(dim=4, num_pts=10) 9 | X = lhd.generate_points() 10 | assert isinstance(lhd, ExperimentalDesign) 11 | assert np.all(X.shape == (10, 4)) 12 | assert lhd.num_pts == 10 13 | assert lhd.dim == 4 14 | 15 | 16 | def test_lhd_round(): 17 | num_pts = 10 18 | dim = 3 19 | lb = np.array([1, 2, 3]) 20 | ub = np.array([3, 4, 5]) 21 | int_var = np.array([1]) 22 | 23 | np.random.seed(0) 24 | lhd = LatinHypercube(dim=dim, num_pts=num_pts) 25 | X = lhd.generate_points(lb=lb, ub=ub, int_var=int_var) 26 | 27 | assert np.all(np.round(X[:, 1] == X[:, 1])) # Should be integers 28 | assert np.all(np.max(X, axis=0) <= ub) 29 | assert np.all(np.min(X, axis=0) >= lb) 30 | 31 | 32 | def test_slhd(): 33 | for i in range(10, 12): # To test even and odd 34 | slhd = SymmetricLatinHypercube(dim=3, num_pts=i) 35 | X = slhd.generate_points() 36 | assert isinstance(slhd, ExperimentalDesign) 37 | assert np.all(X.shape == (i, 3)) 38 | assert slhd.num_pts == i 39 | assert slhd.dim == 3 40 | 41 | 42 | def test_slhd_round(): 43 | num_pts = 10 44 | dim = 3 45 | lb = np.array([1, 2, 3]) 46 | ub = np.array([3, 4, 5]) 47 | int_var = np.array([1]) 48 | 49 | np.random.seed(0) 50 | slhd = SymmetricLatinHypercube(dim=dim, num_pts=num_pts) 51 | X = slhd.generate_points(lb=lb, ub=ub, int_var=int_var) 52 | assert np.all(np.round(X[:, 1] == X[:, 1])) # Should be integers 53 | assert np.all(np.max(X, axis=0) == ub) 54 | assert np.all(np.min(X, axis=0) == lb) 55 | 56 | 57 | def test_full_factorial(): 58 | ff = TwoFactorial(dim=3) 59 | X = ff.generate_points() 60 | assert isinstance(ff, ExperimentalDesign) 61 | assert np.all(X.shape == (8, 3)) 62 | assert ff.num_pts == 8 63 | assert ff.dim == 3 64 | assert np.all(np.logical_or(X == 1, X == 0)) 65 | 66 | with pytest.raises(ValueError): # This should raise an exception 67 | TwoFactorial(20) 68 | 69 | 70 | def test_full_factorial_round(): 71 | lb = np.array([1, 2, 3]) 72 | ub = np.array([3, 4, 5]) 73 | int_var = np.array([1]) 74 | 75 | ff = TwoFactorial(dim=3) 76 | X = ff.generate_points(lb=lb, ub=ub, int_var=int_var) 77 | assert np.all(np.logical_or(X == lb, X == ub)) 78 | 79 | 80 | if __name__ == "__main__": 81 | test_full_factorial() 82 | test_lhd() 83 | test_slhd() 84 | 85 | test_lhd_round() 86 | test_slhd_round() 87 | test_full_factorial_round() 88 | -------------------------------------------------------------------------------- /tests/test_optimization_problems.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | import inspect 3 | 4 | import numpy as np 5 | import pytest 6 | 7 | import pySOT.optimization_problems 8 | from pySOT.optimization_problems import OptimizationProblem 9 | from pySOT.utils import check_opt_prob 10 | 11 | 12 | def test_all(): 13 | module = importlib.import_module("pySOT.optimization_problems") 14 | for name, obj in inspect.getmembers(pySOT.optimization_problems): 15 | if inspect.isclass(obj) and name != "OptimizationProblem": 16 | opt = getattr(module, name) 17 | opt = opt() 18 | assert isinstance(opt, OptimizationProblem) 19 | 20 | if hasattr(opt, "minimum"): 21 | val = opt.eval(opt.minimum) 22 | assert abs(val - opt.min) < 1e-3 23 | else: 24 | val = opt.eval(np.zeros(opt.dim)) 25 | with pytest.raises(ValueError): # This should raise an exception 26 | opt.eval(np.zeros(opt.dim + 1)) 27 | 28 | # Sanity check all methods 29 | check_opt_prob(opt) 30 | 31 | 32 | if __name__ == "__main__": 33 | test_all() 34 | -------------------------------------------------------------------------------- /tests/test_utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from pySOT.utils import ( 4 | POSITIVE_INFINITY, 5 | GeneticAlgorithm, 6 | check_radius_rule, 7 | from_unit_box, 8 | nd_front, 9 | nd_sorting, 10 | round_vars, 11 | to_unit_box, 12 | unit_rescale, 13 | ) 14 | 15 | 16 | def test_unit_box_map(): 17 | X = np.random.rand(5, 3) 18 | lb = -1 * np.ones((3,)) 19 | ub = 2 * np.ones((3,)) 20 | 21 | X1 = to_unit_box(X, lb, ub) 22 | np.testing.assert_equal(X.shape, X1.shape) 23 | np.testing.assert_almost_equal(X1, to_unit_box(X, np.atleast_2d(lb), np.atleast_2d(ub))) 24 | assert X.max() <= 1.0 and X.min() >= 0 25 | 26 | # Try to map back to what we started with 27 | X2 = from_unit_box(X1, lb, ub) 28 | np.testing.assert_equal(X.shape, X2.shape) 29 | np.testing.assert_almost_equal(X2, from_unit_box(X1, np.atleast_2d(lb), np.atleast_2d(ub))) 30 | np.testing.assert_almost_equal(X2, X) 31 | 32 | 33 | def test_unit_rescale(): 34 | X = np.random.rand(5, 3) 35 | X1 = unit_rescale(X) 36 | np.testing.assert_equal(X.shape, X1.shape) 37 | np.testing.assert_almost_equal(X1.max(), 1.0) 38 | np.testing.assert_almost_equal(X1.min(), 0.0) 39 | 40 | X = X.flatten() # Test for 1D array as well 41 | X1 = unit_rescale(X) 42 | np.testing.assert_equal(X.shape, X1.shape) 43 | np.testing.assert_almost_equal(X1.max(), 1.0) 44 | np.testing.assert_almost_equal(X1.min(), 0.0) 45 | 46 | X = 0.5 * np.ones((5, 3)) 47 | X1 = unit_rescale(X) 48 | np.testing.assert_equal(X.shape, X1.shape) 49 | np.testing.assert_almost_equal(X1.max(), 1.0) 50 | np.testing.assert_almost_equal(X1.min(), 1.0) 51 | 52 | 53 | def test_round_vars(): 54 | X = np.random.rand(5, 4) 55 | cont_var = np.array([1, 3]) 56 | int_var = np.array([0, 2]) 57 | lb = np.zeros((3,)) 58 | ub = np.ones((3,)) 59 | X1 = round_vars(X, int_var, lb, ub) 60 | np.testing.assert_equal(X.shape, X1.shape) 61 | np.testing.assert_almost_equal(X1[:, int_var], np.round(X[:, int_var])) 62 | np.testing.assert_almost_equal(X1[:, cont_var], X[:, cont_var]) 63 | 64 | 65 | def test_ga(): 66 | dim = 10 67 | 68 | # Vectorized Ackley function in dim dimensions 69 | def obj_function(x): 70 | return ( 71 | -20.0 * np.exp(-0.2 * np.sqrt(np.sum(x ** 2, axis=1) / dim)) 72 | - np.exp(np.sum(np.cos(2.0 * np.pi * x), axis=1) / dim) 73 | + 20 74 | + np.exp(1) 75 | ) 76 | 77 | ga = GeneticAlgorithm( 78 | obj_function, dim, -15 * np.ones(dim), 20 * np.ones(dim), pop_size=100, num_gen=100, start="SLHD" 79 | ) 80 | x_best, f_best = ga.optimize() 81 | 82 | ga = GeneticAlgorithm( 83 | obj_function, dim, -15 * np.ones(dim), 20 * np.ones(dim), np.array([0]), pop_size=100, num_gen=100, start="SLHD" 84 | ) 85 | x_best, f_best = ga.optimize() 86 | np.testing.assert_almost_equal(x_best[0], np.round(x_best[0])) 87 | 88 | 89 | def test_nd_front(): 90 | npts = 100 91 | nobj = 2 92 | F = np.random.rand(nobj, npts) 93 | (nd_index, d_index) = nd_front(F) 94 | 95 | # check sum of indices equals the number of pts 96 | assert len(nd_index) + len(d_index) == npts 97 | 98 | # check if all index refereces to non-dom and dom pts are unique 99 | assert len(nd_index) == len(set(nd_index)) 100 | assert len(d_index) == len(set(d_index)) 101 | 102 | # check if a better point is added to set it dominates all others 103 | new_p = np.asarray([-0.1, -0.5]) 104 | F_new = np.vstack((F.transpose(), new_p)) 105 | (nd_index, d_index) = nd_front(F_new.transpose()) 106 | assert len(nd_index) == 1 and nd_index[0] == npts 107 | 108 | # check if a worst point is added to set it is dominated 109 | (nd_index, d_index) = nd_front(F) 110 | new_p = np.asarray([1.1, 1.4]) 111 | npts_nd = len(nd_index) 112 | F_new = np.vstack((F[:, nd_index].transpose(), new_p)) 113 | (nd_index, d_index) = nd_front(F_new.transpose()) 114 | assert len(d_index) == 1 and d_index[0] == npts_nd 115 | 116 | 117 | def test_nd_sorting(): 118 | npts = 100 119 | nmax = npts 120 | nobj = 2 121 | F = np.random.rand(nobj, npts) 122 | ranks = nd_sorting(F, nmax) 123 | # make sure that every point has a rank 124 | assert len(ranks) == npts 125 | # make sure that number of ranks = maximum rank 126 | assert len(set(ranks)) == int(max(ranks)) 127 | 128 | # check if nmax < npts, then atleast nmax points are ranked 129 | npts = 200 130 | nmax = 150 131 | nobj = 2 132 | F = np.random.rand(nobj, npts) 133 | ranks = nd_sorting(F, nmax) 134 | assert list(ranks).count(POSITIVE_INFINITY) <= npts - nmax 135 | 136 | 137 | def test_radius_rules(): 138 | dim = 2 139 | nc = 32 140 | sigma = 0.2 141 | X_c = np.zeros((nc, dim + 5)) 142 | X_c[:, 0:dim] = np.random.rand(nc, dim) 143 | X = np.random.rand(1, dim) 144 | d_thresh = 0.7 145 | 146 | # ensure that radius rule functions return a 0 or 1 147 | flag = check_radius_rule(X, X_c, sigma, dim, nc, d_thresh) 148 | assert flag == 0 or flag == 1 149 | 150 | # ensure that flag is 1 if d_thresh = 0 151 | d_thresh = 0.0 152 | flag = check_radius_rule(X, X_c, sigma, dim, nc, d_thresh) 153 | assert flag == 1 154 | 155 | # ensure that flag is 1 if X is in X_c (i.e., via radius rule) 156 | d_thresh = 0.7 157 | X = X_c[15, 0:dim] 158 | flag = check_radius_rule(X, X_c, sigma, dim, nc, d_thresh) 159 | assert flag == 0 160 | 161 | 162 | if __name__ == "__main__": 163 | test_ga() 164 | test_round_vars() 165 | test_unit_box_map() 166 | test_unit_rescale() 167 | test_nd_front() 168 | test_nd_sorting() 169 | test_radius_rules() 170 | --------------------------------------------------------------------------------