├── .codecov.yml ├── .coveragerc ├── .flake8 ├── .gitattributes ├── .github └── workflows │ ├── docs.yml │ ├── pre-commit.yml │ ├── publish-pypi.yml │ └── testing.yml ├── .gitignore ├── .isort.cfg ├── .pre-commit-config.yaml ├── LICENSE ├── README.rst ├── docs ├── Makefile ├── make.bat ├── source │ ├── _static │ │ ├── .placeholder │ │ └── xrt_blop_layout_w.jpg │ ├── agent.rst │ ├── conf.py │ ├── dofs.rst │ ├── index.rst │ ├── installation.rst │ ├── min_versions.rst │ ├── objectives.rst │ ├── release-history.rst │ ├── tutorials.rst │ ├── tutorials │ │ ├── hyperparameters.ipynb │ │ ├── introduction.ipynb │ │ ├── kb-mirrors.ipynb │ │ ├── pareto-fronts.ipynb │ │ ├── passive-dofs.ipynb │ │ └── xrt-blop-demo.ipynb │ └── usage.rst └── wip │ ├── constrained-himmelblau copy.ipynb │ ├── custom-acquisition.ipynb │ ├── introduction.ipynb │ ├── latent-toroid-dimensions.ipynb │ └── multi-task-sirepo.ipynb ├── examples ├── benchmark.py ├── bluesky_adaptive_agent.py ├── prepare_bluesky.py ├── prepare_chx_shadow.py ├── prepare_tes_shadow.py └── prepare_tes_srw.py ├── pyproject.toml ├── pytest.ini ├── scripts └── gui.py └── src └── blop ├── __init__.py ├── agent.py ├── bayesian ├── __init__.py ├── acquisition │ ├── __init__.py │ ├── analytic.py │ ├── config.yml │ └── monte_carlo.py ├── kernels.py └── models.py ├── de ├── __init__.py ├── de_opt_utils.py ├── de_optimization.py └── hardware_flyer.py ├── digestion ├── __init__.py └── tests.py ├── dofs.py ├── objectives.py ├── plans.py ├── plotting.py ├── sim ├── __init__.py ├── beamline.py ├── handlers.py ├── xrt_beamline.py └── xrt_kb_model.py ├── tests ├── __init__.py ├── conftest.py ├── test_acqfs.py ├── test_agents.py ├── test_dofs.py ├── test_pareto.py └── test_sims.py └── utils ├── __init__.py ├── functions.py ├── prepare_re_env.py └── sets.py /.codecov.yml: -------------------------------------------------------------------------------- 1 | # show coverage in CI status, not as a comment. 2 | comment: off 3 | coverage: 4 | status: 5 | project: 6 | default: 7 | target: auto 8 | patch: 9 | default: 10 | target: auto 11 | -------------------------------------------------------------------------------- /.coveragerc: -------------------------------------------------------------------------------- 1 | [run] 2 | source = 3 | blop 4 | [report] 5 | omit = 6 | */python?.?/* 7 | */site-packages/nose/* 8 | # ignore _version.py and versioneer.py 9 | .*version.* 10 | *_version.py 11 | 12 | exclude_lines = 13 | if __name__ == '__main__': 14 | -------------------------------------------------------------------------------- /.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | exclude = 3 | .git, 4 | __pycache__, 5 | blop/_version.py, 6 | build, 7 | dist, 8 | docs/source/conf.py 9 | examples/*.py, 10 | blop/tests/test_bayesian_shadow.py, 11 | versioneer.py, 12 | max-line-length = 125 13 | # Ignore some style 'errors' produced while formatting by 'black' 14 | ignore = E203, W503, E704 15 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | blop/_version.py export-subst 2 | -------------------------------------------------------------------------------- /.github/workflows/docs.yml: -------------------------------------------------------------------------------- 1 | name: Build Documentation 2 | 3 | on: 4 | push: 5 | pull_request: 6 | workflow_dispatch: 7 | release: 8 | types: [published] 9 | 10 | jobs: 11 | build_docs: 12 | # pull requests are a duplicate of a branch push if within the same repo. 13 | if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.repository 14 | 15 | runs-on: ubuntu-latest 16 | strategy: 17 | matrix: 18 | python-version: ["3.10"] 19 | fail-fast: false 20 | 21 | defaults: 22 | run: 23 | shell: bash -l {0} 24 | 25 | steps: 26 | - name: Set env vars 27 | run: | 28 | export REPOSITORY_NAME=${GITHUB_REPOSITORY#*/} # just the repo, as opposed to org/repo 29 | echo "REPOSITORY_NAME=${REPOSITORY_NAME}" >> $GITHUB_ENV 30 | 31 | - name: Checkout the code 32 | uses: actions/checkout@v3 33 | with: 34 | fetch-depth: 0 35 | 36 | # - name: Start MongoDB 37 | # uses: supercharge/mongodb-github-action@1.6.0 38 | 39 | # - name: Start Sirepo Docker container 40 | # uses: NSLS-II/start-sirepo-action@v2 41 | # with: 42 | # docker-binary: docker 43 | 44 | # - name: Copy databroker config file 45 | # run: | 46 | # set -vxeuo pipefail 47 | # mkdir -v -p ~/.config/databroker/ 48 | # wget https://raw.githubusercontent.com/NSLS-II/sirepo-bluesky/main/examples/local.yml -O ~/.config/databroker/local.yml 49 | 50 | # - name: Set up Python ${{ matrix.python-version }} with conda 51 | # uses: conda-incubator/setup-miniconda@v2 52 | # with: 53 | # activate-environment: ${{ env.REPOSITORY_NAME }}-py${{ matrix.python-version }} 54 | # auto-update-conda: true 55 | # miniconda-version: "latest" 56 | # python-version: ${{ matrix.python-version }} 57 | # mamba-version: "*" 58 | # channels: conda-forge 59 | 60 | - name: Set up Python ${{ matrix.python-version }} 61 | uses: actions/setup-python@v4 62 | with: 63 | python-version: ${{ matrix.python-version }} 64 | 65 | - name: Install documentation-building requirements with apt/dpkg 66 | run: | 67 | set -vxeuo pipefail 68 | wget --progress=dot:giga "https://github.com/jgm/pandoc/releases/download/3.1.6.1/pandoc-3.1.6.1-1-amd64.deb" -O /tmp/pandoc.deb 69 | sudo dpkg -i /tmp/pandoc.deb 70 | # conda install -c conda-forge -y pandoc 71 | which pandoc 72 | pandoc --version 73 | 74 | - name: Install documentation-building requirements with pip 75 | run: | 76 | # For reference: https://www.gnu.org/software/bash/manual/html_node/The-Set-Builtin.html. 77 | set -vxeo pipefail 78 | 79 | pip install --upgrade pip wheel 80 | pip install -v .[dev,xrt] 81 | pip list 82 | 83 | - name: Build Docs 84 | run: make -C docs/ html 85 | 86 | - uses: actions/upload-artifact@v4 87 | with: 88 | name: ${{ env.REPOSITORY_NAME }}-docs 89 | path: docs/build/html/ 90 | 91 | - name: Deploy documentation to nsls-ii.github.io 92 | # if: github.repository_owner == 'NSLS-II' && github.ref_name == 'main' 93 | if: github.event_name == 'release' 94 | # We pin to the SHA, not the tag, for security reasons. 95 | # https://docs.github.com/en/free-pro-team@latest/actions/learn-github-actions/security-hardening-for-github-actions#using-third-party-actions 96 | uses: peaceiris/actions-gh-pages@bbdfb200618d235585ad98e965f4aafc39b4c501 # v3.7.3 97 | with: 98 | deploy_key: ${{ secrets.ACTIONS_DOCUMENTATION_DEPLOY_KEY }} 99 | publish_branch: master 100 | publish_dir: ./docs/build/html 101 | external_repository: NSLS-II/NSLS-II.github.io 102 | destination_dir: ${{ env.REPOSITORY_NAME }} 103 | keep_files: true # Keep old files. 104 | force_orphan: false # Keep git history. 105 | -------------------------------------------------------------------------------- /.github/workflows/pre-commit.yml: -------------------------------------------------------------------------------- 1 | name: pre-commit 2 | 3 | on: 4 | push: 5 | pull_request: 6 | workflow_dispatch: 7 | 8 | jobs: 9 | pre-commit: 10 | # pull requests are a duplicate of a branch push if within the same repo. 11 | if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.repository 12 | 13 | runs-on: ubuntu-latest 14 | steps: 15 | - uses: actions/checkout@v3 16 | - uses: actions/setup-python@v4 17 | - name: Install pre-commit dependencies 18 | run: pip install .[pre-commit] 19 | - uses: pre-commit/action@v3.0.0 20 | with: 21 | extra_args: --all-files 22 | -------------------------------------------------------------------------------- /.github/workflows/publish-pypi.yml: -------------------------------------------------------------------------------- 1 | # This workflows will upload a Python Package when a release is created. 2 | # For more information see: 3 | # - https://docs.pypi.org/trusted-publishers/adding-a-publisher/ 4 | # - https://github.com/pypa/gh-action-pypi-publish 5 | 6 | name: PyPI upload 7 | 8 | on: 9 | release: 10 | types: [published] 11 | workflow_dispatch: 12 | 13 | jobs: 14 | publish_pypi: 15 | name: Publish package to PyPI 16 | runs-on: ubuntu-latest 17 | permissions: 18 | id-token: write 19 | 20 | steps: 21 | - uses: actions/checkout@v3 22 | with: 23 | fetch-depth: 0 24 | 25 | - name: Set up Python 26 | uses: actions/setup-python@v4 27 | with: 28 | python-version: '3.x' 29 | 30 | - name: Build package 31 | run: | 32 | set -vxeuo pipefail 33 | python -m pip install --upgrade pip 34 | pip install build 35 | python -m build 36 | 37 | - name: Publish wheels to PyPI 38 | uses: pypa/gh-action-pypi-publish@release/v1 39 | with: 40 | packages-dir: ./dist/ 41 | -------------------------------------------------------------------------------- /.github/workflows/testing.yml: -------------------------------------------------------------------------------- 1 | name: Unit Tests 2 | 3 | on: 4 | push: 5 | pull_request: 6 | workflow_dispatch: 7 | schedule: 8 | - cron: '00 6 * * *' # daily at 6AM UTC 9 | 10 | jobs: 11 | run_tests: 12 | # pull requests are a duplicate of a branch push if within the same repo. 13 | if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.repository 14 | 15 | runs-on: ${{ matrix.host-os }} 16 | strategy: 17 | matrix: 18 | host-os: ["ubuntu-latest"] 19 | python-version: ["3.10", "3.11", "3.12"] 20 | fail-fast: false 21 | 22 | defaults: 23 | run: 24 | shell: bash -l {0} 25 | 26 | steps: 27 | - name: Set env vars 28 | run: | 29 | export REPOSITORY_NAME=${GITHUB_REPOSITORY#*/} # just the repo, as opposed to org/repo 30 | echo "REPOSITORY_NAME=${REPOSITORY_NAME}" >> $GITHUB_ENV 31 | 32 | - name: Checkout the code 33 | uses: actions/checkout@v3 34 | 35 | # - name: Start MongoDB 36 | # uses: supercharge/mongodb-github-action@1.6.0 37 | 38 | # - name: Start Sirepo Docker container 39 | # uses: NSLS-II/start-sirepo-action@v2 40 | # with: 41 | # docker-binary: docker 42 | 43 | # - name: Copy databroker config file 44 | # run: | 45 | # set -vxeuo pipefail 46 | # mkdir -v -p ~/.config/databroker/ 47 | # wget https://raw.githubusercontent.com/NSLS-II/sirepo-bluesky/main/examples/local.yml -O ~/.config/databroker/local.yml 48 | 49 | # - name: Set up Python ${{ matrix.python-version }} with conda 50 | # uses: conda-incubator/setup-miniconda@v2 51 | # with: 52 | # activate-environment: ${{ env.REPOSITORY_NAME }}-py${{ matrix.python-version }} 53 | # auto-update-conda: true 54 | # miniconda-version: "latest" 55 | # python-version: ${{ matrix.python-version }} 56 | # mamba-version: "*" 57 | # channels: conda-forge 58 | 59 | - name: Set up Python ${{ matrix.python-version }} 60 | uses: actions/setup-python@v4 61 | with: 62 | python-version: ${{ matrix.python-version }} 63 | 64 | - name: Install the package and its dependencies 65 | run: | 66 | # For reference: https://www.gnu.org/software/bash/manual/html_node/The-Set-Builtin.html. 67 | set -vxeo pipefail 68 | 69 | pip install --upgrade pip wheel 70 | pip install -v .[dev] 71 | pip list 72 | 73 | - name: Test with pytest 74 | run: | 75 | set -vxeuo pipefail 76 | coverage run -m pytest -vv -s 77 | coverage report -m 78 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # setuptools_scm 2 | src/*/_version.py 3 | 4 | # Byte-compiled / optimized / DLL files 5 | __pycache__/ 6 | *.py[cod] 7 | *$py.class 8 | 9 | # C extensions 10 | *.so 11 | 12 | # Distribution / packaging 13 | .Python 14 | env/ 15 | build/ 16 | develop-eggs/ 17 | dist/ 18 | downloads/ 19 | eggs/ 20 | .eggs/ 21 | lib/ 22 | lib64/ 23 | parts/ 24 | sdist/ 25 | var/ 26 | venv/ 27 | *.egg-info/ 28 | .installed.cfg 29 | *.egg 30 | 31 | # PyInstaller 32 | # Usually these files are written by a python script from a template 33 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 34 | *.manifest 35 | *.spec 36 | 37 | # Installer logs 38 | pip-log.txt 39 | pip-delete-this-directory.txt 40 | 41 | # Unit test / coverage reports 42 | htmlcov/ 43 | .tox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *,cover 50 | .hypothesis/ 51 | 52 | # Translations 53 | *.mo 54 | *.pot 55 | 56 | # Django stuff: 57 | *.log 58 | 59 | # Sphinx documentation 60 | docs/build/ 61 | docs/source/generated/ 62 | 63 | # pytest 64 | .pytest_cache/ 65 | 66 | # PyBuilder 67 | target/ 68 | 69 | # Editor files 70 | # mac 71 | .DS_Store 72 | *~ 73 | 74 | # vim 75 | *.swp 76 | *.swo 77 | 78 | # pycharm 79 | .idea/ 80 | 81 | # VSCode 82 | .vscode/ 83 | 84 | # Ipython Notebook 85 | .ipynb_checkpoints 86 | -------------------------------------------------------------------------------- /.isort.cfg: -------------------------------------------------------------------------------- 1 | [settings] 2 | line_length = 125 3 | multi_line_output = 3 4 | include_trailing_comma = True 5 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/pre-commit/pre-commit-hooks 3 | rev: v5.0.0 4 | hooks: 5 | - id: check-added-large-files 6 | - id: check-yaml 7 | - id: check-merge-conflict 8 | - id: end-of-file-fixer 9 | 10 | - repo: local 11 | hooks: 12 | - id: ruff 13 | name: lint with ruff 14 | language: system 15 | entry: ruff check --force-exclude 16 | types: [python] 17 | require_serial: true 18 | 19 | - id: ruff-format 20 | name: format with ruff 21 | language: system 22 | entry: ruff format --force-exclude 23 | types: [python] 24 | require_serial: true 25 | 26 | - id: import-linter 27 | name: ensure import directionality 28 | pass_filenames: false 29 | language: system 30 | entry: lint-imports 31 | types: [python] 32 | require_serial: false 33 | 34 | - repo: https://github.com/kynan/nbstripout 35 | rev: 0.8.1 36 | hooks: 37 | - id: nbstripout 38 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | BSD 3-Clause License 2 | 3 | Copyright (c) 2020, Brookhaven National Laboratory 4 | All rights reserved. 5 | 6 | Redistribution and use in source and binary forms, with or without 7 | modification, are permitted provided that the following conditions are met: 8 | 9 | 1. Redistributions of source code must retain the above copyright notice, this 10 | list of conditions and the following disclaimer. 11 | 12 | 2. Redistributions in binary form must reproduce the above copyright notice, 13 | this list of conditions and the following disclaimer in the documentation 14 | and/or other materials provided with the distribution. 15 | 16 | 3. Neither the name of the copyright holder nor the names of its contributors 17 | may be used to endorse or promote products derived from this software 18 | without specific prior written permission. 19 | 20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | ========= 2 | blop 3 | ========= 4 | 5 | .. image:: https://github.com/NSLS-II/blop/actions/workflows/testing.yml/badge.svg 6 | :target: https://github.com/NSLS-II/blop/actions/workflows/testing.yml 7 | 8 | 9 | .. image:: https://img.shields.io/pypi/v/blop.svg 10 | :target: https://pypi.python.org/pypi/blop 11 | 12 | .. image:: https://img.shields.io/conda/vn/conda-forge/blop.svg 13 | :target: https://anaconda.org/conda-forge/blop 14 | 15 | Beamline Optimization Tools 16 | 17 | * Free software: 3-clause BSD license 18 | * Documentation: https://NSLS-II.github.io/blop. 19 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = "-W" # This flag turns warnings into errors. 6 | SPHINXBUILD = sphinx-build 7 | SPHINXPROJ = PackagingScientificPython 8 | SOURCEDIR = source 9 | BUILDDIR = build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) -j auto 21 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=source 11 | set BUILDDIR=build 12 | set SPHINXPROJ=PackagingScientificPython 13 | 14 | if "%1" == "" goto help 15 | 16 | %SPHINXBUILD% >NUL 2>NUL 17 | if errorlevel 9009 ( 18 | echo. 19 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 20 | echo.installed, then set the SPHINXBUILD environment variable to point 21 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 22 | echo.may add the Sphinx directory to PATH. 23 | echo. 24 | echo.If you don't have Sphinx installed, grab it from 25 | echo.http://sphinx-doc.org/ 26 | exit /b 1 27 | ) 28 | 29 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% 30 | goto end 31 | 32 | :help 33 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% 34 | 35 | :end 36 | popd 37 | -------------------------------------------------------------------------------- /docs/source/_static/.placeholder: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NSLS-II/blop/46ba5cbfaf557871c58508063e6dcbf7afd0af6d/docs/source/_static/.placeholder -------------------------------------------------------------------------------- /docs/source/_static/xrt_blop_layout_w.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NSLS-II/blop/46ba5cbfaf557871c58508063e6dcbf7afd0af6d/docs/source/_static/xrt_blop_layout_w.jpg -------------------------------------------------------------------------------- /docs/source/agent.rst: -------------------------------------------------------------------------------- 1 | Agent 2 | +++++ 3 | 4 | The blop ``Agent`` takes care of the entire optimization loop, from data acquisition to model fitting. 5 | 6 | .. code-block:: python 7 | 8 | from blop import DOF, Objective, Agent 9 | 10 | dofs = [ 11 | DOF(name="x1", description="the first DOF", search_domain=(-10, 10)) 12 | DOF(name="x2", description="another DOF", search_domain=(-5, 5)) 13 | DOF(name="x3", description="yet another DOF", search_domain=(0, 1)) 14 | ] 15 | 16 | objective = [ 17 | Objective(name="y1", description="something to minimize", target="min") 18 | Objective(name="y2", description="something to maximize", target="max") 19 | ] 20 | 21 | dets = [ 22 | my_detector, # an ophyd device with a .trigger() method that determines "y1" 23 | my_other_detector # a detector that measures "y2" 24 | ] 25 | 26 | agent = Agent(dofs=dofs, objectives=objectives, dets=dets) 27 | 28 | 29 | This creates an ``Agent`` with no data about the world, and thus no way to model it. 30 | We have to start with asking the ``Agent`` to learn by randomly sampling the parameter space. 31 | The ``Agent`` learns with Bluesky plans emitted by the ``agent.learn()`` method, which can be passed to a ``RunEngine``: 32 | 33 | .. code-block:: python 34 | 35 | RE(agent.learn("qr", n=16)) # the agent chooses 16 quasi-random points, samples them, and fits models to them 36 | -------------------------------------------------------------------------------- /docs/source/conf.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # blop documentation build configuration file, created by 4 | # sphinx-quickstart on Thu Jun 28 12:35:56 2018. 5 | # 6 | # This file is execfile()d with the current directory set to its 7 | # containing dir. 8 | # 9 | # Note that not all possible configuration values are present in this 10 | # autogenerated file. 11 | # 12 | # All configuration values have a default; values that are commented out 13 | # serve to show the default. 14 | 15 | # If extensions (or modules to document with autodoc) are in another directory, 16 | # add these directories to sys.path here. If the directory is relative to the 17 | # documentation root, use os.path.abspath to make it absolute, like shown here. 18 | # 19 | # import os 20 | # import sys 21 | # sys.path.insert(0, os.path.abspath('.')) 22 | 23 | # The version info for the project you're documenting, acts as replacement for 24 | # |version| and |release|, also used in various other places throughout the 25 | # built documents. 26 | # 27 | import blop 28 | 29 | # -- General configuration ------------------------------------------------ 30 | 31 | # If your documentation needs a minimal Sphinx version, state it here. 32 | # 33 | # needs_sphinx = '1.0' 34 | 35 | # Add any Sphinx extension module names here, as strings. They can be 36 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 37 | # ones. 38 | extensions = [ 39 | "sphinx.ext.autodoc", 40 | "sphinx.ext.autosummary", 41 | "sphinx.ext.githubpages", 42 | "sphinx.ext.intersphinx", 43 | "sphinx.ext.mathjax", 44 | "sphinx.ext.viewcode", 45 | "IPython.sphinxext.ipython_directive", 46 | "IPython.sphinxext.ipython_console_highlighting", 47 | "matplotlib.sphinxext.plot_directive", 48 | "numpydoc", 49 | "sphinx_copybutton", 50 | "nbsphinx", 51 | ] 52 | 53 | # Configuration options for plot_directive. See: 54 | # https://github.com/matplotlib/matplotlib/blob/f3ed922d935751e08494e5fb5311d3050a3b637b/lib/matplotlib/sphinxext/plot_directive.py#L81 55 | plot_html_show_source_link = False 56 | plot_html_show_formats = False 57 | 58 | # Generate the API documentation when building 59 | autosummary_generate = True 60 | numpydoc_show_class_members = False 61 | 62 | # Add any paths that contain templates here, relative to this directory. 63 | templates_path = ["_templates"] 64 | 65 | # The suffix(es) of source filenames. 66 | # You can specify multiple suffix as a list of string: 67 | # 68 | # source_suffix = ['.rst', '.md'] 69 | source_suffix = ".rst" 70 | 71 | # The master toctree document. 72 | master_doc = "index" 73 | 74 | # General information about the project. 75 | project = "blop" 76 | copyright = "2020, Brookhaven National Laboratory" 77 | author = "Brookhaven National Laboratory" 78 | 79 | # The short X.Y version. 80 | version = blop.__version__ 81 | # The full version, including alpha/beta/rc tags. 82 | release = blop.__version__ 83 | 84 | # The language for content autogenerated by Sphinx. Refer to documentation 85 | # for a list of supported languages. 86 | # 87 | # This is also used if you do content translation via gettext catalogs. 88 | # Usually you set "language" from the command line for these cases. 89 | language = "en" 90 | 91 | # List of patterns, relative to source directory, that match files and 92 | # directories to ignore when looking for source files. 93 | # This patterns also effect to html_static_path and html_extra_path 94 | exclude_patterns = [] 95 | 96 | # The name of the Pygments (syntax highlighting) style to use. 97 | pygments_style = "sphinx" 98 | 99 | # If true, `todo` and `todoList` produce output, else they produce nothing. 100 | todo_include_todos = False 101 | 102 | 103 | # -- Options for HTML output ---------------------------------------------- 104 | 105 | # The theme to use for HTML and HTML Help pages. See the documentation for 106 | # a list of builtin themes. 107 | # 108 | html_theme = "furo" 109 | 110 | # Theme options are theme-specific and customize the look and feel of a theme 111 | # further. For a list of options available for each theme, see the 112 | # documentation. 113 | # 114 | # html_theme_options = {} 115 | 116 | # Add any paths that contain custom static files (such as style sheets) here, 117 | # relative to this directory. They are copied after the builtin static files, 118 | # so a file named "default.css" will overwrite the builtin "default.css". 119 | html_static_path = ["_static"] 120 | 121 | # Custom sidebar templates, must be a dictionary that maps document names 122 | # to template names. 123 | 124 | 125 | # -- Options for HTMLHelp output ------------------------------------------ 126 | 127 | # Output file base name for HTML help builder. 128 | htmlhelp_basename = "blop" 129 | 130 | 131 | # -- Options for LaTeX output --------------------------------------------- 132 | 133 | latex_elements = { 134 | # The paper size ('letterpaper' or 'a4paper'). 135 | # 'papersize': 'letterpaper', 136 | # 137 | # The font size ('10pt', '11pt' or '12pt'). 138 | # 'pointsize': '10pt', 139 | # 140 | # Additional stuff for the LaTeX preamble. 141 | # 'preamble': '', 142 | # 143 | # Latex figure (float) alignment 144 | # 'figure_align': 'htbp', 145 | } 146 | 147 | # Grouping the document tree into LaTeX files. List of tuples 148 | # (source start file, target name, title, 149 | # author, documentclass [howto, manual, or own class]). 150 | latex_documents = [ 151 | ( 152 | master_doc, 153 | "blop.tex", 154 | "blop Documentation", 155 | "Contributors", 156 | "manual", 157 | ), 158 | ] 159 | 160 | 161 | # -- Options for manual page output --------------------------------------- 162 | 163 | # One entry per manual page. List of tuples 164 | # (source start file, name, description, authors, manual section). 165 | man_pages = [ 166 | ( 167 | master_doc, 168 | "blop", 169 | "blop Documentation", 170 | [author], 171 | 1, 172 | ) 173 | ] 174 | 175 | 176 | # -- Options for Texinfo output ------------------------------------------- 177 | 178 | # Grouping the document tree into Texinfo files. List of tuples 179 | # (source start file, target name, title, author, 180 | # dir menu entry, description, category) 181 | texinfo_documents = [ 182 | ( 183 | master_doc, 184 | "blop", 185 | "blop Documentation", 186 | author, 187 | "blop", 188 | "Beamline Optimization Tools", 189 | "Miscellaneous", 190 | ), 191 | ] 192 | 193 | 194 | # Example configuration for intersphinx: refer to the Python standard library. 195 | intersphinx_mapping = { 196 | "python": ("https://docs.python.org/3/", None), 197 | "numpy": ("https://numpy.org/doc/stable/", None), 198 | "scipy": ("https://docs.scipy.org/doc/scipy/reference/", None), 199 | "pandas": ("https://pandas.pydata.org/pandas-docs/stable", None), 200 | "matplotlib": ("https://matplotlib.org/stable", None), 201 | } 202 | -------------------------------------------------------------------------------- /docs/source/dofs.rst: -------------------------------------------------------------------------------- 1 | Degrees of freedom (DOFs) 2 | +++++++++++++++++++++++++ 3 | 4 | Continuous degrees of freedom 5 | ----------------------------- 6 | 7 | A degree of freedom is a variable that affects our optimization objective. We can define a simple DOF as 8 | 9 | .. code-block:: python 10 | 11 | from blop import DOF 12 | 13 | dof = DOF(name="x1", description="my first DOF", search_domain=(lower, upper)) 14 | 15 | This will instantiate a bunch of stuff under the hood, so that our agent knows how to move things and where to search. 16 | Typically, this will correspond to a real, physical device available in Python. In that case, we can pass the DOF an ophyd device in place of a name 17 | 18 | .. code-block:: python 19 | 20 | from blop import DOF 21 | 22 | dof = DOF(device=my_ophyd_device, description="a real piece of hardware", search_domain=(lower, upper)) 23 | 24 | In this case, the agent will control the device as it sees fit, moving it between the search bounds. 25 | 26 | Sometimes, a DOF may be something we can't directly control (e.g. a changing synchrotron current or a changing sample temperature) but want our agent to be aware of. 27 | In this case, we can define a read-only DOF as 28 | 29 | .. code-block:: python 30 | 31 | from blop import DOF 32 | 33 | dof = DOF(device=a_read_only_ophyd_device, description="a thermometer or something", read_only=True, trust_domain=(lower, upper)) 34 | 35 | and the agent will use the received values to model its objective, but won't try to move it. 36 | We can also pass a set of ``trust_domain``, so that our agent will ignore experiments where the DOF value jumps outside of the interval. 37 | 38 | 39 | Discrete degrees of freedom 40 | --------------------------- 41 | 42 | In addition to degrees of freedom that vary continuously between a lower and upper bound, we can define discrete degrees of freedom. 43 | One kind is a binary degree of freedom, where the input can take one of two values, e.g. 44 | 45 | .. code-block:: python 46 | 47 | discrete_dof = DOF(name="x1", description="A discrete DOF", type="discrete", search_domain={"in", "out"}) 48 | 49 | Another is an ordinal degree of freedom, which takes more than two discrete values but has some ordering, e.g. 50 | 51 | .. code-block:: python 52 | 53 | ordinal_dof = DOF(name="x1", description="An ordinal DOF", type="ordinal", search_domain={"low", "medium", "high"}) 54 | 55 | The last is a categorical degree of freedom, which can take many different discrete values with no ordering, e.g. 56 | 57 | .. code-block:: python 58 | 59 | categorical_dof = DOF(name="x1", description="A categorical DOF", type="categorical", search_domain={"banana", "mango", "papaya"}) 60 | -------------------------------------------------------------------------------- /docs/source/index.rst: -------------------------------------------------------------------------------- 1 | .. Packaging Scientific Python documentation master file, created by 2 | sphinx-quickstart on Thu Jun 28 12:35:56 2018. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | blop Documentation 7 | ======================= 8 | 9 | .. toctree:: 10 | :maxdepth: 2 11 | 12 | installation 13 | usage 14 | tutorials 15 | release-history 16 | min_versions 17 | -------------------------------------------------------------------------------- /docs/source/installation.rst: -------------------------------------------------------------------------------- 1 | ========================= 2 | Installation instructions 3 | ========================= 4 | 5 | Installation 6 | ------------ 7 | 8 | The package works with Python 3.10+ and can be installed from both PyPI and conda-forge. 9 | 10 | To install the package using the ``pip`` package manager, run the following command: 11 | 12 | .. code:: bash 13 | 14 | $ python3 -m pip install blop 15 | 16 | To install the package using the ``conda`` package manager, run the following command: 17 | 18 | .. code:: bash 19 | 20 | $ conda install -c conda-forge blop 21 | 22 | If you'd like to use the Sirepo backend and ``sirepo-bluesky`` ophyd objects, please 23 | follow the `Sirepo/Sirepo-Bluesky installation & configuration instructions 24 | `_. 25 | 26 | 27 | Run tests 28 | --------- 29 | 30 | .. code:: bash 31 | 32 | $ pytest -vv -s -x --pdb 33 | 34 | 35 | Build documentation 36 | ------------------- 37 | 38 | .. code:: bash 39 | 40 | $ make -C docs/ html 41 | -------------------------------------------------------------------------------- /docs/source/min_versions.rst: -------------------------------------------------------------------------------- 1 | =================================== 2 | Minimum Version of Python and NumPy 3 | =================================== 4 | 5 | 6 | - This project supports at least the minor versions of Python 7 | initially released 42 months prior to a planned project release 8 | date. 9 | - The project will always support at least the 2 latest minor 10 | versions of Python. 11 | - The project will support minor versions of ``numpy`` initially 12 | released in the 24 months prior to a planned project release date or 13 | the oldest version that supports the minimum Python version 14 | (whichever is higher). 15 | - The project will always support at least the 3 latest minor 16 | versions of NumPy. 17 | 18 | The minimum supported version of Python will be set to 19 | ``python_requires`` in ``setup``. All supported minor versions of 20 | Python will be in the test matrix and have binary artifacts built 21 | for releases. 22 | 23 | The project should adjust upward the minimum Python and NumPy 24 | version support on every minor and major release, but never on a 25 | patch release. 26 | 27 | This is consistent with NumPy `NEP 29 28 | `__. 29 | -------------------------------------------------------------------------------- /docs/source/objectives.rst: -------------------------------------------------------------------------------- 1 | Objectives 2 | ++++++++++ 3 | 4 | Objectives are what control how optimal the output of our experiment is, and are defined by ``Objective`` objects. 5 | 6 | ``blop`` combines one or many ``Objective`` objects into an ``ObjectiveList``, which encapsulates how we model and optimize our outputs. 7 | 8 | Fitness 9 | ------- 10 | 11 | A fitness objective is an ``Objective`` that minimizes or maximizes a given value. 12 | 13 | * Maximize the flux of a beam of light. 14 | * Minimize the size of a beam. 15 | 16 | We can construct an objective to maximize some output with 17 | 18 | .. code-block:: python 19 | 20 | from blop import Objective 21 | 22 | objective = Objective(name="some_output", target="max") # or "min" 23 | 24 | Given some data, the ``Objective`` object will try to model the quantity ``some_output`` and find the corresponding inputs that maximize it. 25 | We can also apply a transform to the value to make it more Gaussian when we fit to it. 26 | This is especially useful when the quantity tends to be non-Gaussian, like with a beam flux. 27 | 28 | .. code-block:: python 29 | 30 | from blop import Objective 31 | 32 | objective_with_log_transform = Objective(name="some_output", target="max", transform="log") 33 | 34 | objective_with_arctanh_transform = Objective(name="some_output", target="max", transform="arctanh") 35 | 36 | 37 | .. code-block:: python 38 | 39 | objective = Objective(name="y1", target=(1, 3)) # find any input that puts "y1" between 1 and 3. 40 | 41 | 42 | Often, the objective being modeled is some strictly positive quantity (e.g. the size of a beam being aligned). 43 | In this case, a smart thing to do is to set ``log=True``, which will model and subsequently optimize the logarithm of the objective: 44 | 45 | .. code-block:: python 46 | 47 | from blop import Objective 48 | 49 | objective = Objective(name="some_strictly_positive_output", target="max", log=True) 50 | 51 | 52 | Constraints 53 | ----------- 54 | 55 | A constraint objective doesn't try to minimize or maximize a value, and instead just tries to maximize the chance that the objective is within some acceptable range. 56 | This is useful in optimization problems like 57 | 58 | * Require a beam to be within some box. 59 | * Require the wavelength of light to be a certain color. 60 | * We want a beam to be focused enough to perform some experiment, but not necessarily optimal. 61 | 62 | .. code-block:: python 63 | 64 | # ensure the color is approximately green 65 | color_bjective = Objective(name="peak_wavelength", target=(520, 530), units="nm") 66 | 67 | # ensure the beam is smaller than 10 microns 68 | width_objective = Objective(name="beam_width", target=(-np.inf, 10), units="um", transform="log") 69 | 70 | # ensure our flux is at least some value 71 | flux_objective = Objective(name="beam_flux", target=(1.0, np.inf), transform="log") 72 | 73 | 74 | 75 | Validity 76 | -------- 77 | 78 | A common problem in beamline optimization is in the random or systematically invalid measurement of objectives. This arises in different ways, like when 79 | 80 | * The beam misses the detector, leading our beam parser to return some absurdly small or large value. 81 | * Some part of the experiment glitches, leading to an uncharacteristic data point. 82 | * Some part of the data postprocessing pipeline fails, giving no value for the output. 83 | 84 | We obviously want to exclude these points from our model fitting, but if we stopped there, inputs that always lead to invalid outputs will lead to an infinite loop of trying to sample an interesting but invalid points (as the points get immediately removed every time). 85 | The set of points that border valid and invalid data points are often highly nonlinear and unknown *a priori*. 86 | We solve this by implementing a validity model for each ``Objective``, which constructs and fits a probabilistic model for validity for all inputs. 87 | Using this model, we constrain acquisition functions to take into account the possibility that the output value is invalid, meaning it will eventually learn to ignore infeasible points. 88 | 89 | We can control the exclusion of data points in two ways. The first is to specify a ``trust_domain`` for the objective, so that the model only "trusts" points in that domain. For example: 90 | 91 | .. code-block:: python 92 | 93 | # any beam smaller than two pixels shouldn't be trusted. 94 | # any beam larger than 100 pixels will mess up our model and aren't interesting anyway 95 | objective = Objective(name="beam_size", trust_domain=(2, 100), units="pixels") 96 | 97 | This will set any value outside of the ``trust_domain`` to ``NaN``, which the model will learn to avoid. 98 | The second way is to ensure that any invalid values are converted to ``NaN`` in the diagnostics, before the agent ever sees them. 99 | -------------------------------------------------------------------------------- /docs/source/release-history.rst: -------------------------------------------------------------------------------- 1 | =============== 2 | Release History 3 | =============== 4 | 5 | v0.7.4 (2025-03-04) 6 | ------------------- 7 | * Add missing files for documentation 8 | * Fix trigger condition for releases on PyPI and documentation 9 | 10 | v0.7.3 (2025-03-04) 11 | ------------------- 12 | What's Changed 13 | .............. 14 | * Fix documentation CI error by `@jennmald `_ in https://github.com/NSLS-II/blop/pull/84 15 | * Fix fitness and constraint plots by `@jennmald `_ in https://github.com/NSLS-II/blop/pull/80 16 | * Refactor: Make agent default compatible with Bluesky Adaptive by `@maffettone `_ in https://github.com/NSLS-II/blop/pull/86 17 | * Ruff linter support; Removal of black, flake8, and isort by `@thomashopkins32 `_ in https://github.com/NSLS-II/blop/pull/95 18 | * Add type hints by `@thomashopkins32 `_ in https://github.com/NSLS-II/blop/pull/87 19 | * Remove Python 3.9 support by `@thomashopkins32 `_ in https://github.com/NSLS-II/blop/pull/101 20 | * Add XRT demo to blop tutorials by `@jennmald `_ in https://github.com/NSLS-II/blop/pull/102 21 | 22 | New Contributors 23 | ................ 24 | * `@jennmald `_ made their first contribution in https://github.com/NSLS-II/blop/pull/84 25 | * `@maffettone `_ made their first contribution in https://github.com/NSLS-II/blop/pull/86 26 | * `@thomashopkins32 `_ made their first contribution in https://github.com/NSLS-II/blop/pull/95 27 | 28 | **Full Changelog**: https://github.com/NSLS-II/blop/compare/v0.7.2...v0.7.3 29 | 30 | v0.7.2 (2025-01-31) 31 | ------------------- 32 | - Renamed package in PyPI to `blop `_. 33 | - `bloptools `_ is still avaliable on PyPI. 34 | 35 | v0.7.1 (2024-09-26) 36 | ------------------- 37 | - Add simulated hardware. 38 | - Added a method to prune bad data. 39 | 40 | v0.7.0 (2024-05-13) 41 | ------------------- 42 | - Added functionality for Pareto optimization. 43 | - Support for discrete degrees of freedom. 44 | 45 | v0.6.0 (2024-02-01) 46 | ------------------- 47 | - More sophisticated targeting capabilities for different objectives. 48 | - More user-friendly agent controls. 49 | 50 | v0.5.0 (2023-11-09) 51 | ------------------- 52 | - Added hypervolume acquisition and constraints. 53 | - Better specification of latent dimensions. 54 | - Implemented Monte Carlo acquisition functions. 55 | - Added classes for DOFs and objectives. 56 | 57 | v0.4.0 (2023-08-11) 58 | ------------------- 59 | 60 | - Easier-to-use syntax when building the agent. 61 | - Modular and stateful agent design for better usability. 62 | - Added the ability to save/load both data and hyperparameters. 63 | - Added passive degrees of freedom. 64 | - Added a number of `test functions / artificial landscapes for optimization 65 | `_. 66 | - Updated the Sphinx documentation theme to `furo `_. 67 | 68 | 69 | v0.3.0 (2023-06-17) 70 | ------------------- 71 | 72 | - Implemented multi-task optimization. 73 | - Simplified the syntax on initializing the agent. 74 | - Resolved issues discovered at NSLS-II ISS. 75 | 76 | 77 | v0.2.0 (2023-04-25) 78 | ------------------- 79 | 80 | - Rebased the Bayesian optimization models to be compatible with ``botorch`` code. 81 | - Optimization objectives can be customized with ``experiment`` modules. 82 | - Added optimization test functions for quicker testing and development. 83 | 84 | 85 | v0.1.0 (2023-03-10) 86 | ------------------- 87 | 88 | - Changed from using ``SafeConfigParser`` to ``ConfigParser``. 89 | - Implemented the initial version of the GP optimizer. 90 | - Updated the repo structure based on the new cookiecutter. 91 | - Added tests to the CI. 92 | 93 | 94 | v0.0.2 (2021-05-14) 95 | ------------------- 96 | 97 | Fixed ``_run_flyers()`` for sirepo optimization. 98 | 99 | 100 | v0.0.1 - Initial Release (2020-09-01) 101 | ------------------------------------- 102 | 103 | Initial release of the Beamline Optimization library. 104 | 105 | Used in: 106 | 107 | - https://github.com/NSLS-II-TES/profile_simulated_hardware 108 | - https://github.com/NSLS-II-TES/profile_sirepo 109 | 110 | Planned: 111 | 112 | - https://github.com/NSLS-II-TES/profile_collection 113 | -------------------------------------------------------------------------------- /docs/source/tutorials.rst: -------------------------------------------------------------------------------- 1 | Tutorials 2 | ========= 3 | 4 | .. toctree:: 5 | :maxdepth: 2 6 | 7 | tutorials/introduction.ipynb 8 | tutorials/hyperparameters.ipynb 9 | tutorials/pareto-fronts.ipynb 10 | tutorials/passive-dofs.ipynb 11 | tutorials/kb-mirrors.ipynb 12 | tutorials/xrt-blop-demo.ipynb 13 | -------------------------------------------------------------------------------- /docs/source/tutorials/hyperparameters.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "attachments": {}, 5 | "cell_type": "markdown", 6 | "id": "0", 7 | "metadata": {}, 8 | "source": [ 9 | "# Hyperparameters\n", 10 | "\n", 11 | "Consider the Booth test function (below). This function varies differently in different directions, and these directions are somewhat skewed with respect to the inputs. Our agent will automatically fit the right hyperparameters to account for this." 12 | ] 13 | }, 14 | { 15 | "cell_type": "code", 16 | "execution_count": null, 17 | "id": "1", 18 | "metadata": {}, 19 | "outputs": [], 20 | "source": [ 21 | "import matplotlib as mpl\n", 22 | "import numpy as np\n", 23 | "from matplotlib import pyplot as plt\n", 24 | "\n", 25 | "from blop.utils import functions\n", 26 | "\n", 27 | "x1 = x2 = np.linspace(-10, 10, 256)\n", 28 | "X1, X2 = np.meshgrid(x1, x2)\n", 29 | "\n", 30 | "F = functions.booth(X1, X2)\n", 31 | "\n", 32 | "plt.pcolormesh(x1, x2, F, norm=mpl.colors.LogNorm(), shading=\"auto\")\n", 33 | "plt.colorbar()\n", 34 | "plt.xlabel(\"x1\")\n", 35 | "plt.ylabel(\"x2\")" 36 | ] 37 | }, 38 | { 39 | "attachments": {}, 40 | "cell_type": "markdown", 41 | "id": "2", 42 | "metadata": {}, 43 | "source": [ 44 | "The optimization goes faster if our model understands how the function changes as we change the inputs in different ways. The way it picks up on this is by starting from a general model that could describe a lot of functions, and making it specific to this one by choosing the right hyperparameters. Our Bayesian agent is very good at this, and only needs a few samples to figure out what the function looks like:" 45 | ] 46 | }, 47 | { 48 | "cell_type": "code", 49 | "execution_count": null, 50 | "id": "3", 51 | "metadata": {}, 52 | "outputs": [], 53 | "source": [ 54 | "def digestion(df):\n", 55 | " for index, entry in df.iterrows():\n", 56 | " df.loc[index, \"booth\"] = functions.booth(entry.x1, entry.x2)\n", 57 | "\n", 58 | " return df" 59 | ] 60 | }, 61 | { 62 | "cell_type": "code", 63 | "execution_count": null, 64 | "id": "4", 65 | "metadata": { 66 | "tags": [] 67 | }, 68 | "outputs": [], 69 | "source": [ 70 | "from blop.utils import prepare_re_env # noqa\n", 71 | "\n", 72 | "%run -i $prepare_re_env.__file__ --db-type=temp\n", 73 | "\n", 74 | "from blop import DOF, Agent, Objective\n", 75 | "\n", 76 | "dofs = [\n", 77 | " DOF(name=\"x1\", search_domain=(-6, 6)),\n", 78 | " DOF(name=\"x2\", search_domain=(-6, 6)),\n", 79 | "]\n", 80 | "\n", 81 | "objectives = [\n", 82 | " Objective(name=\"booth\", target=\"min\"),\n", 83 | "]\n", 84 | "\n", 85 | "\n", 86 | "agent = Agent(\n", 87 | " dofs=dofs,\n", 88 | " objectives=objectives,\n", 89 | " digestion=digestion,\n", 90 | " db=db,\n", 91 | ")\n", 92 | "\n", 93 | "RE(agent.learn(acqf=\"qr\", n=16))\n", 94 | "\n", 95 | "agent.plot_objectives()" 96 | ] 97 | }, 98 | { 99 | "attachments": {}, 100 | "cell_type": "markdown", 101 | "id": "5", 102 | "metadata": {}, 103 | "source": [ 104 | "In addition to modeling the fitness of the task, the agent models the probability that an input will be feasible:" 105 | ] 106 | }, 107 | { 108 | "cell_type": "code", 109 | "execution_count": null, 110 | "id": "6", 111 | "metadata": { 112 | "tags": [] 113 | }, 114 | "outputs": [], 115 | "source": [ 116 | "agent.plot_acquisition(acqf=\"qei\")" 117 | ] 118 | }, 119 | { 120 | "cell_type": "code", 121 | "execution_count": null, 122 | "id": "7", 123 | "metadata": {}, 124 | "outputs": [], 125 | "source": [ 126 | "RE(agent.learn(\"qei\", n=4, iterations=4))\n", 127 | "agent.plot_objectives()" 128 | ] 129 | }, 130 | { 131 | "cell_type": "code", 132 | "execution_count": null, 133 | "id": "8", 134 | "metadata": {}, 135 | "outputs": [], 136 | "source": [ 137 | "agent.best" 138 | ] 139 | } 140 | ], 141 | "metadata": { 142 | "kernelspec": { 143 | "display_name": "blop-dev", 144 | "language": "python", 145 | "name": "python3" 146 | }, 147 | "language_info": { 148 | "codemirror_mode": { 149 | "name": "ipython", 150 | "version": 3 151 | }, 152 | "file_extension": ".py", 153 | "mimetype": "text/x-python", 154 | "name": "python", 155 | "nbconvert_exporter": "python", 156 | "pygments_lexer": "ipython3", 157 | "version": "3.12.8" 158 | } 159 | }, 160 | "nbformat": 4, 161 | "nbformat_minor": 5 162 | } 163 | -------------------------------------------------------------------------------- /docs/source/tutorials/introduction.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "attachments": {}, 5 | "cell_type": "markdown", 6 | "id": "0", 7 | "metadata": {}, 8 | "source": [ 9 | "# Introduction (Himmelblau's function)\n", 10 | "\n" 11 | ] 12 | }, 13 | { 14 | "attachments": {}, 15 | "cell_type": "markdown", 16 | "id": "1", 17 | "metadata": {}, 18 | "source": [ 19 | "Let's use ``blop`` to minimize Himmelblau's function, which has four global minima:" 20 | ] 21 | }, 22 | { 23 | "cell_type": "code", 24 | "execution_count": null, 25 | "id": "2", 26 | "metadata": {}, 27 | "outputs": [], 28 | "source": [ 29 | "from blop.utils import prepare_re_env # noqa\n", 30 | "\n", 31 | "%run -i $prepare_re_env.__file__ --db-type=temp" 32 | ] 33 | }, 34 | { 35 | "cell_type": "code", 36 | "execution_count": null, 37 | "id": "3", 38 | "metadata": {}, 39 | "outputs": [], 40 | "source": [ 41 | "import matplotlib as mpl\n", 42 | "import numpy as np\n", 43 | "from matplotlib import pyplot as plt\n", 44 | "\n", 45 | "from blop.utils import functions\n", 46 | "\n", 47 | "x1 = x2 = np.linspace(-6, 6, 256)\n", 48 | "X1, X2 = np.meshgrid(x1, x2)\n", 49 | "\n", 50 | "F = functions.himmelblau(X1, X2)\n", 51 | "\n", 52 | "plt.pcolormesh(x1, x2, F, norm=mpl.colors.LogNorm(vmin=1e-1, vmax=1e3), cmap=\"magma_r\")\n", 53 | "plt.colorbar()\n", 54 | "plt.xlabel(\"x1\")\n", 55 | "plt.ylabel(\"x2\")" 56 | ] 57 | }, 58 | { 59 | "cell_type": "markdown", 60 | "id": "4", 61 | "metadata": {}, 62 | "source": [ 63 | "There are several things that our agent will need. The first ingredient is some degrees of freedom (these are always `ophyd` devices) which the agent will move around to different inputs within each DOF's bounds (the second ingredient). We define these here:" 64 | ] 65 | }, 66 | { 67 | "cell_type": "code", 68 | "execution_count": null, 69 | "id": "5", 70 | "metadata": {}, 71 | "outputs": [], 72 | "source": [ 73 | "from blop import DOF\n", 74 | "\n", 75 | "dofs = [\n", 76 | " DOF(name=\"x1\", search_domain=(-6, 6)),\n", 77 | " DOF(name=\"x2\", search_domain=(-6, 6)),\n", 78 | "]" 79 | ] 80 | }, 81 | { 82 | "cell_type": "markdown", 83 | "id": "6", 84 | "metadata": {}, 85 | "source": [ 86 | "We also need to give the agent something to do. We want our agent to look in the feedback for a variable called 'himmelblau', and try to minimize it." 87 | ] 88 | }, 89 | { 90 | "cell_type": "code", 91 | "execution_count": null, 92 | "id": "7", 93 | "metadata": {}, 94 | "outputs": [], 95 | "source": [ 96 | "from blop import Objective\n", 97 | "\n", 98 | "objectives = [Objective(name=\"himmelblau\", description=\"Himmeblau's function\", target=\"min\")]" 99 | ] 100 | }, 101 | { 102 | "attachments": {}, 103 | "cell_type": "markdown", 104 | "id": "8", 105 | "metadata": {}, 106 | "source": [ 107 | "In our digestion function, we define our objective as a deterministic function of the inputs:" 108 | ] 109 | }, 110 | { 111 | "cell_type": "code", 112 | "execution_count": null, 113 | "id": "9", 114 | "metadata": {}, 115 | "outputs": [], 116 | "source": [ 117 | "def digestion(df):\n", 118 | " for index, entry in df.iterrows():\n", 119 | " df.loc[index, \"himmelblau\"] = functions.himmelblau(entry.x1, entry.x2)\n", 120 | "\n", 121 | " return df" 122 | ] 123 | }, 124 | { 125 | "attachments": {}, 126 | "cell_type": "markdown", 127 | "id": "10", 128 | "metadata": {}, 129 | "source": [ 130 | "We then combine these ingredients into an agent, giving it an instance of ``databroker`` so that it can see the output of the plans it runs." 131 | ] 132 | }, 133 | { 134 | "cell_type": "code", 135 | "execution_count": null, 136 | "id": "11", 137 | "metadata": { 138 | "tags": [] 139 | }, 140 | "outputs": [], 141 | "source": [ 142 | "from blop import Agent\n", 143 | "\n", 144 | "agent = Agent(\n", 145 | " dofs=dofs,\n", 146 | " objectives=objectives,\n", 147 | " digestion=digestion,\n", 148 | " db=db,\n", 149 | ")" 150 | ] 151 | }, 152 | { 153 | "cell_type": "markdown", 154 | "id": "12", 155 | "metadata": {}, 156 | "source": [ 157 | "Without any data, we can't make any inferences about what the function looks like, and so we can't use any non-trivial acquisition functions. Let's start by quasi-randomly sampling the parameter space, and plotting our model of the function:" 158 | ] 159 | }, 160 | { 161 | "cell_type": "code", 162 | "execution_count": null, 163 | "id": "13", 164 | "metadata": {}, 165 | "outputs": [], 166 | "source": [ 167 | "RE(agent.learn(\"quasi-random\", n=36))\n", 168 | "agent.plot_objectives()" 169 | ] 170 | }, 171 | { 172 | "cell_type": "markdown", 173 | "id": "14", 174 | "metadata": {}, 175 | "source": [ 176 | "To decide which points to sample, the agent needs an acquisition function. The available acquisition function are here:" 177 | ] 178 | }, 179 | { 180 | "cell_type": "code", 181 | "execution_count": null, 182 | "id": "15", 183 | "metadata": {}, 184 | "outputs": [], 185 | "source": [ 186 | "agent.all_acqfs" 187 | ] 188 | }, 189 | { 190 | "attachments": {}, 191 | "cell_type": "markdown", 192 | "id": "16", 193 | "metadata": {}, 194 | "source": [ 195 | "Now we can start to learn intelligently. Using the shorthand acquisition functions shown above, we can see the output of a few different ones:" 196 | ] 197 | }, 198 | { 199 | "cell_type": "code", 200 | "execution_count": null, 201 | "id": "17", 202 | "metadata": {}, 203 | "outputs": [], 204 | "source": [ 205 | "agent.plot_acquisition(acqf=\"qei\")" 206 | ] 207 | }, 208 | { 209 | "attachments": {}, 210 | "cell_type": "markdown", 211 | "id": "18", 212 | "metadata": {}, 213 | "source": [ 214 | "To decide where to go, the agent will find the inputs that maximize a given acquisition function:" 215 | ] 216 | }, 217 | { 218 | "cell_type": "code", 219 | "execution_count": null, 220 | "id": "19", 221 | "metadata": { 222 | "tags": [] 223 | }, 224 | "outputs": [], 225 | "source": [ 226 | "agent.ask(\"qei\", n=1)" 227 | ] 228 | }, 229 | { 230 | "attachments": {}, 231 | "cell_type": "markdown", 232 | "id": "20", 233 | "metadata": {}, 234 | "source": [ 235 | "We can also ask the agent for multiple points to sample and it will jointly maximize the acquisition function over all sets of inputs, and find the most efficient route between them:" 236 | ] 237 | }, 238 | { 239 | "cell_type": "code", 240 | "execution_count": null, 241 | "id": "21", 242 | "metadata": { 243 | "tags": [] 244 | }, 245 | "outputs": [], 246 | "source": [ 247 | "res = agent.ask(\"qei\", n=8, route=True)\n", 248 | "agent.plot_acquisition(acqf=\"qei\")\n", 249 | "plt.scatter(res[\"points\"][\"x1\"], res[\"points\"][\"x2\"], marker=\"d\", facecolor=\"w\", edgecolor=\"k\")\n", 250 | "plt.plot(res[\"points\"][\"x1\"], res[\"points\"][\"x2\"], color=\"r\")" 251 | ] 252 | }, 253 | { 254 | "cell_type": "markdown", 255 | "id": "22", 256 | "metadata": {}, 257 | "source": [ 258 | "All of this is automated inside the ``learn`` method, which will find a point (or points) to sample, sample them, and retrain the model and its hyperparameters with the new data. To do 4 learning iterations of 8 points each, we can run" 259 | ] 260 | }, 261 | { 262 | "cell_type": "code", 263 | "execution_count": null, 264 | "id": "23", 265 | "metadata": {}, 266 | "outputs": [], 267 | "source": [ 268 | "RE(agent.learn(\"qei\", n=4, iterations=8))" 269 | ] 270 | }, 271 | { 272 | "cell_type": "markdown", 273 | "id": "24", 274 | "metadata": {}, 275 | "source": [ 276 | "Our agent has found all the global minima of Himmelblau's function using Bayesian optimization, and we can ask it for the best point: " 277 | ] 278 | }, 279 | { 280 | "cell_type": "code", 281 | "execution_count": null, 282 | "id": "25", 283 | "metadata": {}, 284 | "outputs": [], 285 | "source": [ 286 | "agent.plot_objectives()\n", 287 | "print(agent.best)" 288 | ] 289 | } 290 | ], 291 | "metadata": { 292 | "kernelspec": { 293 | "display_name": "blop-dev", 294 | "language": "python", 295 | "name": "python3" 296 | }, 297 | "language_info": { 298 | "codemirror_mode": { 299 | "name": "ipython", 300 | "version": 3 301 | }, 302 | "file_extension": ".py", 303 | "mimetype": "text/x-python", 304 | "name": "python", 305 | "nbconvert_exporter": "python", 306 | "pygments_lexer": "ipython3", 307 | "version": "3.12.8" 308 | } 309 | }, 310 | "nbformat": 4, 311 | "nbformat_minor": 5 312 | } 313 | -------------------------------------------------------------------------------- /docs/source/tutorials/kb-mirrors.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "0", 6 | "metadata": {}, 7 | "source": [ 8 | "# KB Mirrors\n", 9 | "\n", 10 | "This example simulates the alignment of a KB mirror endstation (with four degrees of freedom)." 11 | ] 12 | }, 13 | { 14 | "cell_type": "code", 15 | "execution_count": null, 16 | "id": "1", 17 | "metadata": {}, 18 | "outputs": [], 19 | "source": [ 20 | "from blop.utils import prepare_re_env # noqa\n", 21 | "\n", 22 | "%run -i $prepare_re_env.__file__ --db-type=temp\n", 23 | "bec.disable_plots()" 24 | ] 25 | }, 26 | { 27 | "cell_type": "code", 28 | "execution_count": null, 29 | "id": "2", 30 | "metadata": {}, 31 | "outputs": [], 32 | "source": [ 33 | "from blop.sim import Beamline\n", 34 | "\n", 35 | "beamline = Beamline(name=\"bl\")" 36 | ] 37 | }, 38 | { 39 | "cell_type": "code", 40 | "execution_count": null, 41 | "id": "3", 42 | "metadata": {}, 43 | "outputs": [], 44 | "source": [ 45 | "from blop import DOF, Agent, Objective\n", 46 | "from blop.digestion import beam_stats_digestion\n", 47 | "\n", 48 | "dofs = [\n", 49 | " DOF(description=\"KBV downstream\", device=beamline.kbv_dsv, search_domain=(-5.0, 5.0)),\n", 50 | " DOF(description=\"KBV upstream\", device=beamline.kbv_usv, search_domain=(-5.0, 5.0)),\n", 51 | " DOF(description=\"KBH downstream\", device=beamline.kbh_dsh, search_domain=(-5.0, 5.0)),\n", 52 | " DOF(description=\"KBH upstream\", device=beamline.kbh_ush, search_domain=(-5.0, 5.0)),\n", 53 | "]\n", 54 | "\n", 55 | "objectives = [\n", 56 | " Objective(name=\"bl_det_sum\", target=\"max\", transform=\"log\", trust_domain=(200, np.inf)),\n", 57 | " Objective(name=\"bl_det_wid_x\", target=\"min\", transform=\"log\", latent_groups=[(\"bl_kbh_dsh\", \"bl_kbh_ush\")]),\n", 58 | " Objective(name=\"bl_det_wid_y\", target=\"min\", transform=\"log\", latent_groups=[(\"bl_kbv_dsv\", \"bl_kbv_usv\")]),\n", 59 | "]\n", 60 | "\n", 61 | "agent = Agent(\n", 62 | " dofs=dofs,\n", 63 | " objectives=objectives,\n", 64 | " detectors=[beamline.det],\n", 65 | " digestion=beam_stats_digestion,\n", 66 | " digestion_kwargs={\"image_key\": \"bl_det_image\"},\n", 67 | " verbose=True,\n", 68 | " db=db,\n", 69 | " tolerate_acquisition_errors=False,\n", 70 | " enforce_all_objectives_valid=True,\n", 71 | " train_every=3,\n", 72 | ")\n", 73 | "\n", 74 | "(uid,) = RE(agent.learn(\"qr\", n=32))" 75 | ] 76 | }, 77 | { 78 | "cell_type": "code", 79 | "execution_count": null, 80 | "id": "4", 81 | "metadata": {}, 82 | "outputs": [], 83 | "source": [ 84 | "RE(agent.learn(\"qei\", n=4, iterations=4))" 85 | ] 86 | }, 87 | { 88 | "cell_type": "code", 89 | "execution_count": null, 90 | "id": "5", 91 | "metadata": {}, 92 | "outputs": [], 93 | "source": [ 94 | "plt.imshow(agent.best.bl_det_image)" 95 | ] 96 | }, 97 | { 98 | "cell_type": "code", 99 | "execution_count": null, 100 | "id": "6", 101 | "metadata": {}, 102 | "outputs": [], 103 | "source": [ 104 | "agent.plot_objectives(axes=(2, 3))" 105 | ] 106 | } 107 | ], 108 | "metadata": { 109 | "kernelspec": { 110 | "display_name": "blop-dev", 111 | "language": "python", 112 | "name": "python3" 113 | }, 114 | "language_info": { 115 | "codemirror_mode": { 116 | "name": "ipython", 117 | "version": 3 118 | }, 119 | "file_extension": ".py", 120 | "mimetype": "text/x-python", 121 | "name": "python", 122 | "nbconvert_exporter": "python", 123 | "pygments_lexer": "ipython3", 124 | "version": "3.12.8" 125 | } 126 | }, 127 | "nbformat": 4, 128 | "nbformat_minor": 5 129 | } 130 | -------------------------------------------------------------------------------- /docs/source/tutorials/pareto-fronts.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "attachments": {}, 5 | "cell_type": "markdown", 6 | "id": "0", 7 | "metadata": {}, 8 | "source": [ 9 | "# Multiobjective optimization with Pareto front mapping\n", 10 | "\n", 11 | "One way to do multiobjective optimization is with Pareto optimization, which explores the set of Pareto-efficient points. A point is Pareto-efficient if there are no other valid points that are better at every objective: it shows the \"trade-off\" between several objectives. " 12 | ] 13 | }, 14 | { 15 | "cell_type": "code", 16 | "execution_count": null, 17 | "id": "1", 18 | "metadata": {}, 19 | "outputs": [], 20 | "source": [ 21 | "from blop.utils import prepare_re_env # noqa\n", 22 | "\n", 23 | "%run -i $prepare_re_env.__file__ --db-type=temp" 24 | ] 25 | }, 26 | { 27 | "cell_type": "code", 28 | "execution_count": null, 29 | "id": "2", 30 | "metadata": {}, 31 | "outputs": [], 32 | "source": [ 33 | "import numpy as np\n", 34 | "\n", 35 | "from blop import DOF, Agent, Objective\n", 36 | "\n", 37 | "\n", 38 | "def digestion(df):\n", 39 | " for index, entry in df.iterrows():\n", 40 | " x1, x2 = entry.x1, entry.x2\n", 41 | "\n", 42 | " df.loc[index, \"f1\"] = (x1 - 2) ** 2 + (x2 - 1) + 2\n", 43 | " df.loc[index, \"f2\"] = 9 * x1 - (x2 - 1) + 2\n", 44 | " df.loc[index, \"c1\"] = x1**2 + x2**2\n", 45 | " df.loc[index, \"c2\"] = x1 - 3 * x2 + 10\n", 46 | "\n", 47 | " return df\n", 48 | "\n", 49 | "\n", 50 | "dofs = [\n", 51 | " DOF(name=\"x1\", search_domain=(-20, 20)),\n", 52 | " DOF(name=\"x2\", search_domain=(-20, 20)),\n", 53 | "]\n", 54 | "\n", 55 | "\n", 56 | "objectives = [\n", 57 | " Objective(name=\"f1\", target=\"min\"),\n", 58 | " Objective(name=\"f2\", target=\"min\"),\n", 59 | " Objective(name=\"c1\", constraint=(-np.inf, 225)),\n", 60 | " Objective(name=\"c2\", constraint=(-np.inf, 0)),\n", 61 | "]\n", 62 | "\n", 63 | "agent = Agent(\n", 64 | " dofs=dofs,\n", 65 | " objectives=objectives,\n", 66 | " digestion=digestion,\n", 67 | " db=db,\n", 68 | ")\n", 69 | "\n", 70 | "(uid,) = RE(agent.learn(\"qr\", n=64))" 71 | ] 72 | }, 73 | { 74 | "cell_type": "markdown", 75 | "id": "3", 76 | "metadata": {}, 77 | "source": [ 78 | "We can plot our fitness and constraint objectives to see their models:" 79 | ] 80 | }, 81 | { 82 | "cell_type": "code", 83 | "execution_count": null, 84 | "id": "4", 85 | "metadata": {}, 86 | "outputs": [], 87 | "source": [ 88 | "agent.plot_objectives()" 89 | ] 90 | }, 91 | { 92 | "cell_type": "markdown", 93 | "id": "5", 94 | "metadata": {}, 95 | "source": [ 96 | "We can plot the Pareto front (the set of all Pareto-efficient points), which shows the trade-off between the two fitnesses. The points in blue comprise the Pareto front, while the points in red are either not Pareto efficient or are invalidated by one of the constraints." 97 | ] 98 | }, 99 | { 100 | "cell_type": "code", 101 | "execution_count": null, 102 | "id": "6", 103 | "metadata": {}, 104 | "outputs": [], 105 | "source": [ 106 | "agent.plot_pareto_front()" 107 | ] 108 | }, 109 | { 110 | "cell_type": "markdown", 111 | "id": "7", 112 | "metadata": {}, 113 | "source": [ 114 | "We can explore the Pareto front by choosing a random point on the Pareto front and computing the expected improvement in the hypervolume of all fitness objectives with respect to that point (called the \"reference point\"). All this is done automatically with the `qnehvi` acquisition function:" 115 | ] 116 | }, 117 | { 118 | "cell_type": "code", 119 | "execution_count": null, 120 | "id": "8", 121 | "metadata": {}, 122 | "outputs": [], 123 | "source": [ 124 | "# this is broken now but is fixed in the next PR\n", 125 | "# RE(agent.learn(\"qnehvi\", n=4))" 126 | ] 127 | } 128 | ], 129 | "metadata": { 130 | "kernelspec": { 131 | "display_name": "blop-dev", 132 | "language": "python", 133 | "name": "python3" 134 | }, 135 | "language_info": { 136 | "codemirror_mode": { 137 | "name": "ipython", 138 | "version": 3 139 | }, 140 | "file_extension": ".py", 141 | "mimetype": "text/x-python", 142 | "name": "python", 143 | "nbconvert_exporter": "python", 144 | "pygments_lexer": "ipython3", 145 | "version": "3.12.8" 146 | } 147 | }, 148 | "nbformat": 4, 149 | "nbformat_minor": 5 150 | } 151 | -------------------------------------------------------------------------------- /docs/source/tutorials/passive-dofs.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "attachments": {}, 5 | "cell_type": "markdown", 6 | "id": "0", 7 | "metadata": {}, 8 | "source": [ 9 | "# Passive degrees of freedom\n", 10 | "\n" 11 | ] 12 | }, 13 | { 14 | "attachments": {}, 15 | "cell_type": "markdown", 16 | "id": "1", 17 | "metadata": {}, 18 | "source": [ 19 | "Passive dofs!" 20 | ] 21 | }, 22 | { 23 | "cell_type": "code", 24 | "execution_count": null, 25 | "id": "2", 26 | "metadata": {}, 27 | "outputs": [], 28 | "source": [ 29 | "from blop.utils import prepare_re_env # noqa\n", 30 | "\n", 31 | "%run -i $prepare_re_env.__file__ --db-type=temp" 32 | ] 33 | }, 34 | { 35 | "cell_type": "code", 36 | "execution_count": null, 37 | "id": "3", 38 | "metadata": {}, 39 | "outputs": [], 40 | "source": [ 41 | "from blop import DOF, Agent, Objective\n", 42 | "from blop.digestion.tests import constrained_himmelblau_digestion\n", 43 | "from blop.dofs import BrownianMotion\n", 44 | "\n", 45 | "dofs = [\n", 46 | " DOF(name=\"x1\", search_domain=(-5.0, 5.0)),\n", 47 | " DOF(name=\"x2\", search_domain=(-5.0, 5.0)),\n", 48 | " DOF(name=\"x3\", search_domain=(-5.0, 5.0), active=False),\n", 49 | " DOF(device=BrownianMotion(name=\"brownian1\"), read_only=True),\n", 50 | " DOF(device=BrownianMotion(name=\"brownian2\"), read_only=True, active=False),\n", 51 | "]\n", 52 | "\n", 53 | "objectives = [\n", 54 | " Objective(name=\"himmelblau\", target=\"min\"),\n", 55 | "]\n", 56 | "\n", 57 | "agent = Agent(\n", 58 | " dofs=dofs,\n", 59 | " objectives=objectives,\n", 60 | " digestion=constrained_himmelblau_digestion,\n", 61 | " db=db,\n", 62 | " verbose=True,\n", 63 | " tolerate_acquisition_errors=False,\n", 64 | ")\n", 65 | "\n", 66 | "RE(agent.learn(\"qr\", n=16))" 67 | ] 68 | }, 69 | { 70 | "cell_type": "code", 71 | "execution_count": null, 72 | "id": "4", 73 | "metadata": {}, 74 | "outputs": [], 75 | "source": [ 76 | "agent.plot_objectives()" 77 | ] 78 | } 79 | ], 80 | "metadata": { 81 | "kernelspec": { 82 | "display_name": "blop-dev", 83 | "language": "python", 84 | "name": "python3" 85 | }, 86 | "language_info": { 87 | "codemirror_mode": { 88 | "name": "ipython", 89 | "version": 3 90 | }, 91 | "file_extension": ".py", 92 | "mimetype": "text/x-python", 93 | "name": "python", 94 | "nbconvert_exporter": "python", 95 | "pygments_lexer": "ipython3", 96 | "version": "3.12.8" 97 | } 98 | }, 99 | "nbformat": 4, 100 | "nbformat_minor": 5 101 | } 102 | -------------------------------------------------------------------------------- /docs/source/tutorials/xrt-blop-demo.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "0", 6 | "metadata": {}, 7 | "source": [ 8 | "# XRT Blop Demo" 9 | ] 10 | }, 11 | { 12 | "cell_type": "markdown", 13 | "id": "1", 14 | "metadata": {}, 15 | "source": [ 16 | "For ophyd beamline setup see: \n", 17 | "- https://github.com/NSLS-II/blop/blob/main/src/blop/sim/xrt_beamline.py\n", 18 | "- https://github.com/NSLS-II/blop/blob/main/src/blop/sim/xrt_kb_model.py\n", 19 | "\n", 20 | "The picture below displays beam from geometric source propagating through a pair of toroidal mirrors focusing the beam on screen. Simulation of a KB setup.\n", 21 | "\n", 22 | "![xrt_blop_layout_w.jpg](../_static/xrt_blop_layout_w.jpg)" 23 | ] 24 | }, 25 | { 26 | "cell_type": "code", 27 | "execution_count": null, 28 | "id": "2", 29 | "metadata": {}, 30 | "outputs": [], 31 | "source": [ 32 | "from blop.utils import prepare_re_env # noqa\n", 33 | "%run -i $prepare_re_env.__file__ --db-type=temp\n", 34 | "bec.disable_plots()" 35 | ] 36 | }, 37 | { 38 | "cell_type": "code", 39 | "execution_count": null, 40 | "id": "3", 41 | "metadata": {}, 42 | "outputs": [], 43 | "source": [ 44 | "import time\n", 45 | "\n", 46 | "from matplotlib import pyplot as plt\n", 47 | "\n", 48 | "from blop import DOF, Agent, Objective\n", 49 | "from blop.digestion import beam_stats_digestion\n", 50 | "from blop.sim.xrt_beamline import Beamline" 51 | ] 52 | }, 53 | { 54 | "cell_type": "code", 55 | "execution_count": null, 56 | "id": "4", 57 | "metadata": {}, 58 | "outputs": [], 59 | "source": [ 60 | "plt.ion()\n", 61 | "\n", 62 | "h_opt = 0\n", 63 | "dh = 5\n", 64 | "\n", 65 | "R1, dR1 = 40000, 10000\n", 66 | "R2, dR2 = 20000, 10000" 67 | ] 68 | }, 69 | { 70 | "cell_type": "code", 71 | "execution_count": null, 72 | "id": "5", 73 | "metadata": {}, 74 | "outputs": [], 75 | "source": [ 76 | "beamline = Beamline(name=\"bl\")\n", 77 | "time.sleep(1)\n", 78 | "dofs = [\n", 79 | " DOF(description=\"KBV R\",\n", 80 | " device=beamline.kbv_dsv,\n", 81 | " search_domain=(R1 - dR1, R1 + dR1)),\n", 82 | " DOF(description=\"KBH R\",\n", 83 | " device=beamline.kbh_dsh,\n", 84 | " search_domain=(R2 - dR2, R2 + dR2)),\n", 85 | "\n", 86 | "]" 87 | ] 88 | }, 89 | { 90 | "cell_type": "code", 91 | "execution_count": null, 92 | "id": "6", 93 | "metadata": {}, 94 | "outputs": [], 95 | "source": [ 96 | "objectives = [\n", 97 | " Objective(name=\"bl_det_sum\",\n", 98 | " target=\"max\",\n", 99 | " transform=\"log\",\n", 100 | " trust_domain=(20, 1e12)),\n", 101 | "\n", 102 | " Objective(name=\"bl_det_wid_x\",\n", 103 | " target=\"min\",\n", 104 | " transform=\"log\",\n", 105 | " # trust_domain=(0, 1e12),\n", 106 | " latent_groups=[(\"bl_kbh_dsh\", \"bl_kbv_dsv\")]),\n", 107 | " Objective(name=\"bl_det_wid_y\",\n", 108 | " target=\"min\",\n", 109 | " transform=\"log\",\n", 110 | " # trust_domain=(0, 1e12),\n", 111 | " latent_groups=[(\"bl_kbh_dsh\", \"bl_kbv_dsv\")]),\n", 112 | "]" 113 | ] 114 | }, 115 | { 116 | "cell_type": "code", 117 | "execution_count": null, 118 | "id": "7", 119 | "metadata": {}, 120 | "outputs": [], 121 | "source": [ 122 | "agent = Agent(\n", 123 | " dofs=dofs,\n", 124 | " objectives=objectives,\n", 125 | " detectors=[beamline.det],\n", 126 | " digestion=beam_stats_digestion,\n", 127 | " digestion_kwargs={\"image_key\": \"bl_det_image\"},\n", 128 | " verbose=True,\n", 129 | " db=db,\n", 130 | " tolerate_acquisition_errors=False,\n", 131 | " enforce_all_objectives_valid=True,\n", 132 | " train_every=3,\n", 133 | ")" 134 | ] 135 | }, 136 | { 137 | "cell_type": "code", 138 | "execution_count": null, 139 | "id": "8", 140 | "metadata": {}, 141 | "outputs": [], 142 | "source": [ 143 | "RE(agent.learn(\"qr\", n=16))\n", 144 | "RE(agent.learn(\"qei\", n=16, iterations=4))\n" 145 | ] 146 | }, 147 | { 148 | "cell_type": "code", 149 | "execution_count": null, 150 | "id": "9", 151 | "metadata": {}, 152 | "outputs": [], 153 | "source": [ 154 | "agent.plot_objectives(axes=(0, 1))" 155 | ] 156 | } 157 | ], 158 | "metadata": { 159 | "kernelspec": { 160 | "display_name": "xrt-blop", 161 | "language": "python", 162 | "name": "python3" 163 | }, 164 | "language_info": { 165 | "codemirror_mode": { 166 | "name": "ipython", 167 | "version": 3 168 | }, 169 | "file_extension": ".py", 170 | "mimetype": "text/x-python", 171 | "name": "python", 172 | "nbconvert_exporter": "python", 173 | "pygments_lexer": "ipython3", 174 | "version": "3.11.0" 175 | } 176 | }, 177 | "nbformat": 4, 178 | "nbformat_minor": 5 179 | } 180 | -------------------------------------------------------------------------------- /docs/source/usage.rst: -------------------------------------------------------------------------------- 1 | Usage 2 | ===== 3 | 4 | .. toctree:: 5 | :maxdepth: 2 6 | 7 | objectives.rst 8 | dofs.rst 9 | agent.rst 10 | -------------------------------------------------------------------------------- /docs/wip/constrained-himmelblau copy.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "attachments": {}, 5 | "cell_type": "markdown", 6 | "id": "0", 7 | "metadata": {}, 8 | "source": [ 9 | "# Introduction (Himmelblau's function)\n", 10 | "\n" 11 | ] 12 | }, 13 | { 14 | "attachments": {}, 15 | "cell_type": "markdown", 16 | "id": "1", 17 | "metadata": {}, 18 | "source": [ 19 | "Let's use ``blop`` to minimize Himmelblau's function, subject to the constraint that $x_1^2 + x_2^2 < 50$. Our function looks like this:" 20 | ] 21 | }, 22 | { 23 | "cell_type": "code", 24 | "execution_count": null, 25 | "id": "2", 26 | "metadata": {}, 27 | "outputs": [], 28 | "source": [ 29 | "import matplotlib as mpl\n", 30 | "import numpy as np\n", 31 | "from matplotlib import pyplot as plt\n", 32 | "\n", 33 | "from blop.tasks import Task\n", 34 | "from blop.utils import functions\n", 35 | "\n", 36 | "x1 = x2 = np.linspace(-8, 8, 256)\n", 37 | "X1, X2 = np.meshgrid(x1, x2)\n", 38 | "\n", 39 | "task = Task(name=\"himmelblau\", kind=\"min\")\n", 40 | "F = functions.constrained_himmelblau(X1, X2)\n", 41 | "\n", 42 | "plt.pcolormesh(x1, x2, F, norm=mpl.colors.LogNorm(), cmap=\"gnuplot\")\n", 43 | "plt.colorbar()\n", 44 | "plt.xlabel(\"x1\")\n", 45 | "plt.ylabel(\"x2\")" 46 | ] 47 | }, 48 | { 49 | "cell_type": "markdown", 50 | "id": "3", 51 | "metadata": {}, 52 | "source": [ 53 | "There are several things that our agent will need. The first ingredient is some degrees of freedom (these are always `ophyd` devices) which the agent will move around to different inputs within each DOF's bounds (the second ingredient). We define these here:" 54 | ] 55 | }, 56 | { 57 | "cell_type": "code", 58 | "execution_count": null, 59 | "id": "4", 60 | "metadata": {}, 61 | "outputs": [], 62 | "source": [ 63 | "from blop import devices\n", 64 | "\n", 65 | "dofs = [\n", 66 | " {\"device\": devices.DOF(name=\"x1\"), \"limits\": (-8, 8), \"kind\": \"active\"},\n", 67 | " {\"device\": devices.DOF(name=\"x2\"), \"limits\": (-8, 8), \"kind\": \"active\"},\n", 68 | "]" 69 | ] 70 | }, 71 | { 72 | "cell_type": "markdown", 73 | "id": "5", 74 | "metadata": {}, 75 | "source": [ 76 | "We also need to give the agent something to do. We want our agent to look in the feedback for a variable called \"himmelblau\", and try to minimize it." 77 | ] 78 | }, 79 | { 80 | "cell_type": "code", 81 | "execution_count": null, 82 | "id": "6", 83 | "metadata": {}, 84 | "outputs": [], 85 | "source": [ 86 | "tasks = [\n", 87 | " {\"key\": \"himmelblau\", \"kind\": \"minimize\"},\n", 88 | "]" 89 | ] 90 | }, 91 | { 92 | "attachments": {}, 93 | "cell_type": "markdown", 94 | "id": "7", 95 | "metadata": {}, 96 | "source": [ 97 | "In our digestion function, we define our objective as a deterministic function of the inputs, returning a `NaN` when we violate the constraint:" 98 | ] 99 | }, 100 | { 101 | "cell_type": "code", 102 | "execution_count": null, 103 | "id": "8", 104 | "metadata": {}, 105 | "outputs": [], 106 | "source": [ 107 | "def digestion(db, uid):\n", 108 | " products = db[uid].table()\n", 109 | "\n", 110 | " for index, entry in products.iterrows():\n", 111 | " products.loc[index, \"himmelblau\"] = functions.constrained_himmelblau(entry.x1, entry.x2)\n", 112 | "\n", 113 | " return products" 114 | ] 115 | }, 116 | { 117 | "attachments": {}, 118 | "cell_type": "markdown", 119 | "id": "9", 120 | "metadata": {}, 121 | "source": [ 122 | "We then combine these ingredients into an agent, giving it an instance of ``databroker`` so that it can see the output of the plans it runs." 123 | ] 124 | }, 125 | { 126 | "cell_type": "code", 127 | "execution_count": null, 128 | "id": "10", 129 | "metadata": { 130 | "tags": [] 131 | }, 132 | "outputs": [], 133 | "source": [ 134 | "from blop.utils import prepare_re_env # noqa: F401\n", 135 | "\n", 136 | "%run -i $prepare_re_env.__file__ --db-type=temp\n", 137 | "from blop.bayesian import Agent\n", 138 | "\n", 139 | "agent = Agent(\n", 140 | " dofs=dofs,\n", 141 | " tasks=tasks,\n", 142 | " digestion=digestion,\n", 143 | " db=db,\n", 144 | ")" 145 | ] 146 | }, 147 | { 148 | "cell_type": "code", 149 | "execution_count": null, 150 | "id": "11", 151 | "metadata": {}, 152 | "outputs": [], 153 | "source": [ 154 | "import blop\n", 155 | "\n", 156 | "blop.bayesian.acquisition.parse_acq_func_identifier(acq_func_identifier=\"quasi-random\")" 157 | ] 158 | }, 159 | { 160 | "cell_type": "markdown", 161 | "id": "12", 162 | "metadata": {}, 163 | "source": [ 164 | "Without any data, we can't make any inferences about what the function looks like, and so we can't use any non-trivial acquisition functions. " 165 | ] 166 | }, 167 | { 168 | "cell_type": "code", 169 | "execution_count": null, 170 | "id": "13", 171 | "metadata": {}, 172 | "outputs": [], 173 | "source": [ 174 | "RE(agent.learn(\"quasi-random\", n=64))\n", 175 | "agent.plot_objectives()" 176 | ] 177 | }, 178 | { 179 | "attachments": {}, 180 | "cell_type": "markdown", 181 | "id": "14", 182 | "metadata": {}, 183 | "source": [ 184 | "In addition to modeling the fitness of the task, the agent models the probability that an input will be feasible:" 185 | ] 186 | }, 187 | { 188 | "cell_type": "code", 189 | "execution_count": null, 190 | "id": "15", 191 | "metadata": { 192 | "tags": [] 193 | }, 194 | "outputs": [], 195 | "source": [ 196 | "agent.plot_constraint(cmap=\"viridis\")" 197 | ] 198 | }, 199 | { 200 | "attachments": {}, 201 | "cell_type": "markdown", 202 | "id": "16", 203 | "metadata": {}, 204 | "source": [ 205 | "It combines the estimate of the objective and the estimate of the feasibility in deciding where to go:" 206 | ] 207 | }, 208 | { 209 | "cell_type": "code", 210 | "execution_count": null, 211 | "id": "17", 212 | "metadata": { 213 | "tags": [] 214 | }, 215 | "outputs": [], 216 | "source": [ 217 | "X = agent.ask(\"qei\", n=8)" 218 | ] 219 | }, 220 | { 221 | "cell_type": "code", 222 | "execution_count": null, 223 | "id": "18", 224 | "metadata": {}, 225 | "outputs": [], 226 | "source": [ 227 | "import scipy as sp\n", 228 | "\n", 229 | "X = sp.interpolate.interp1d(np.arange(len(X)), X, axis=0, kind=\"cubic\")(np.linspace(0, len(X) - 1, 16))\n", 230 | "plt.plot(*X.T)" 231 | ] 232 | }, 233 | { 234 | "cell_type": "code", 235 | "execution_count": null, 236 | "id": "19", 237 | "metadata": {}, 238 | "outputs": [], 239 | "source": [ 240 | "agent.plot_acquisition(acq_func=[\"ei\", \"pi\", \"ucb\"], cmap=\"viridis\")\n", 241 | "plt.plot(*X.T, c=\"r\", marker=\"x\")" 242 | ] 243 | }, 244 | { 245 | "cell_type": "code", 246 | "execution_count": null, 247 | "id": "20", 248 | "metadata": {}, 249 | "outputs": [], 250 | "source": [ 251 | "import yaml\n", 252 | "\n", 253 | "with open(\"config.yml\", \"w\") as f:\n", 254 | " yaml.safe_dump(ACQ_FUNC_CONFIG, f)" 255 | ] 256 | }, 257 | { 258 | "cell_type": "code", 259 | "execution_count": null, 260 | "id": "21", 261 | "metadata": {}, 262 | "outputs": [], 263 | "source": [ 264 | "RE(agent.learn(\"qei\", n_per_iter=4))" 265 | ] 266 | }, 267 | { 268 | "attachments": {}, 269 | "cell_type": "markdown", 270 | "id": "22", 271 | "metadata": {}, 272 | "source": [ 273 | "The agent automatically tries to avoid infeasible points, but will end up naturally exploring the boundary of the constraint. Let's see where the agent is thinking of going:" 274 | ] 275 | }, 276 | { 277 | "cell_type": "code", 278 | "execution_count": null, 279 | "id": "23", 280 | "metadata": { 281 | "tags": [] 282 | }, 283 | "outputs": [], 284 | "source": [ 285 | "agent.plot_objectives()\n", 286 | "agent.plot_acquisition(strategy=[\"ei\", \"pi\", \"ucb\"])" 287 | ] 288 | }, 289 | { 290 | "attachments": {}, 291 | "cell_type": "markdown", 292 | "id": "24", 293 | "metadata": {}, 294 | "source": [ 295 | "The agent will naturally explore the whole parameter space" 296 | ] 297 | }, 298 | { 299 | "cell_type": "code", 300 | "execution_count": null, 301 | "id": "25", 302 | "metadata": {}, 303 | "outputs": [], 304 | "source": [ 305 | "RE(agent.learn(\"ei\", n_iter=16))\n", 306 | "agent.plot_objectives()" 307 | ] 308 | } 309 | ], 310 | "metadata": { 311 | "kernelspec": { 312 | "display_name": "Python 3.11.4 ('bluesky')", 313 | "language": "python", 314 | "name": "python3" 315 | }, 316 | "language_info": { 317 | "codemirror_mode": { 318 | "name": "ipython", 319 | "version": 3 320 | }, 321 | "file_extension": ".py", 322 | "mimetype": "text/x-python", 323 | "name": "python", 324 | "nbconvert_exporter": "python", 325 | "pygments_lexer": "ipython3", 326 | "version": "3.11.4" 327 | }, 328 | "vscode": { 329 | "interpreter": { 330 | "hash": "eee21ccc240bdddd7cf04478199e20f7257541e2f592ca1a4d34ebdc0225d742" 331 | } 332 | } 333 | }, 334 | "nbformat": 4, 335 | "nbformat_minor": 5 336 | } 337 | -------------------------------------------------------------------------------- /docs/wip/custom-acquisition.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "attachments": {}, 5 | "cell_type": "markdown", 6 | "id": "0", 7 | "metadata": {}, 8 | "source": [ 9 | "# Custom acquisition plans\n", 10 | "\n", 11 | "The simplest acqusition plan for a beamline is to move some motor inputs to some positions, and then trigger some detectors. Often, though, we want to investigate behaviors more complex than this. Consider the example of aligning the spectrometer at the Inner-Shell Spectroscopy (ISS) beamline at NSLS-II, which operates by modulating the energy of a beam on a sample and watching the resulting flux rise, peak, and then fall. \n", 12 | "\n", 13 | "We can build a toy model of this spectrometer using custom acquisition and digestion functions. Let's pretend that we can vary two inputs $\\mathbf{x} = (x_1, x_2)$, and that the resolution of the spectrometer is equal to \n", 14 | "\n", 15 | "$\\nu_\\sigma = \\big (1 + x_1^2 + (x_2 - 1)^2 \\big)^{1/2}$\n", 16 | "\n", 17 | "which has a single minimum at $(0,1)$. We can't sample this value directly, rather we can sample how the resulting flux peaks as we vary the energy $\\nu$. Let's pretend that it looks like a Gaussian:\n", 18 | "\n", 19 | "$I(\\nu) = \\exp \\Big [ - 0.5 (\\nu - \\nu_0)^2 / \\nu_\\sigma^2 \\Big ]$\n", 20 | "\n", 21 | "To find the inputs that lead to the tightest spectrum, we need to vary $\\mathbf{x}$, scan over $\\nu$, and then estimate the resolution for the agent to optimize over. Let's write acquisition and digestion functions to do this: " 22 | ] 23 | }, 24 | { 25 | "cell_type": "code", 26 | "execution_count": null, 27 | "id": "1", 28 | "metadata": {}, 29 | "outputs": [], 30 | "source": [ 31 | "import numpy as np\n", 32 | "\n", 33 | "\n", 34 | "def acquisition(dofs, inputs, dets):\n", 35 | " _ = yield from bp.list_scan\n", 36 | "\n", 37 | " for x in inputs:\n", 38 | " _ = np.sqrt(1 + np.square(x).sum()) # our resolution is\n", 39 | "\n", 40 | " nu_sigma = np.sqrt(1 + x1**2 + (x2 - 1) ** 2)\n", 41 | "\n", 42 | " _ = np.exp(-0.5 * np.square((nu - nu_0) / nu_sigma))" 43 | ] 44 | }, 45 | { 46 | "cell_type": "code", 47 | "execution_count": null, 48 | "id": "2", 49 | "metadata": {}, 50 | "outputs": [], 51 | "source": [ 52 | "from matplotlib import pyplot as plt\n", 53 | "\n", 54 | "nu_0 = 100\n", 55 | "\n", 56 | "nu = np.linspace(90, 110, 256)\n", 57 | "\n", 58 | "for x1, x2 in [(0, 0), (-2, 2), (-1, 0), (0, 1)]:\n", 59 | " nu_sigma = np.sqrt(1 + x1**2 + (x2 - 1) ** 2)\n", 60 | "\n", 61 | " flux = np.exp(-0.5 * np.square((nu - nu_0) / nu_sigma))\n", 62 | "\n", 63 | " plt.plot(nu, flux, label=f\"(x1, x2) = ({x1}, {x2})\")\n", 64 | "\n", 65 | "plt.legend()" 66 | ] 67 | }, 68 | { 69 | "attachments": {}, 70 | "cell_type": "markdown", 71 | "id": "3", 72 | "metadata": {}, 73 | "source": [ 74 | "To find the inputs that lead to the tightest spectrum, we need to vary $\\mathbf{x}$, scan over $\\nu$, and then estimate the resolution for the agent to optimize over. Let's write acquisition and digestion functions to do this: " 75 | ] 76 | } 77 | ], 78 | "metadata": { 79 | "kernelspec": { 80 | "display_name": "Python 3 (ipykernel)", 81 | "language": "python", 82 | "name": "python3" 83 | }, 84 | "language_info": { 85 | "codemirror_mode": { 86 | "name": "ipython", 87 | "version": 3 88 | }, 89 | "file_extension": ".py", 90 | "mimetype": "text/x-python", 91 | "name": "python", 92 | "nbconvert_exporter": "python", 93 | "pygments_lexer": "ipython3", 94 | "version": "3.9.16 (main, Mar 8 2023, 14:00:05) \n[GCC 11.2.0]" 95 | }, 96 | "vscode": { 97 | "interpreter": { 98 | "hash": "9aced674e98d511b4f654e147532c84d38dc986fe042b1e92785fb9d8df41f75" 99 | } 100 | } 101 | }, 102 | "nbformat": 4, 103 | "nbformat_minor": 5 104 | } 105 | -------------------------------------------------------------------------------- /docs/wip/introduction.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "attachments": {}, 5 | "cell_type": "markdown", 6 | "id": "0", 7 | "metadata": {}, 8 | "source": [ 9 | "# Bayesian optimization" 10 | ] 11 | }, 12 | { 13 | "attachments": {}, 14 | "cell_type": "markdown", 15 | "id": "1", 16 | "metadata": {}, 17 | "source": [ 18 | "This tutorial is an introduction to the syntax used by the optimizer, as well as the principles of Bayesian optimization in general.\n", 19 | "\n", 20 | "We'll start by minimizing the Styblinski-Tang function in one dimension, which looks like this:" 21 | ] 22 | }, 23 | { 24 | "cell_type": "code", 25 | "execution_count": null, 26 | "id": "2", 27 | "metadata": {}, 28 | "outputs": [], 29 | "source": [ 30 | "import numpy as np\n", 31 | "from matplotlib import pyplot as plt\n", 32 | "\n", 33 | "from blop.utils import functions\n", 34 | "\n", 35 | "x = np.linspace(-5, 5, 256)\n", 36 | "\n", 37 | "plt.plot(x, functions.styblinski_tang(x), c=\"b\")\n", 38 | "plt.xlim(-5, 5)" 39 | ] 40 | }, 41 | { 42 | "attachments": {}, 43 | "cell_type": "markdown", 44 | "id": "3", 45 | "metadata": {}, 46 | "source": [ 47 | "There are several things that our agent will need. The first ingredient is some degrees of freedom (these are always `ophyd` devices) which the agent will move around to different inputs within each DOF's bounds (the second ingredient). We define these here:" 48 | ] 49 | }, 50 | { 51 | "cell_type": "code", 52 | "execution_count": null, 53 | "id": "4", 54 | "metadata": {}, 55 | "outputs": [], 56 | "source": [ 57 | "from blop import devices\n", 58 | "\n", 59 | "dofs = [\n", 60 | " {\"device\": devices.DOF(name=\"x\"), \"limits\": (-5, 5), \"kind\": \"active\"},\n", 61 | "]" 62 | ] 63 | }, 64 | { 65 | "cell_type": "code", 66 | "execution_count": null, 67 | "id": "5", 68 | "metadata": {}, 69 | "outputs": [], 70 | "source": [ 71 | "tasks = [\n", 72 | " {\"key\": \"styblinski-tang\", \"kind\": \"minimize\"},\n", 73 | "]" 74 | ] 75 | }, 76 | { 77 | "attachments": {}, 78 | "cell_type": "markdown", 79 | "id": "6", 80 | "metadata": {}, 81 | "source": [ 82 | "\n", 83 | "This degree of freedom will move around a variable called `x1`. The agent automatically samples at different inputs, but we often need some post-processing after data collection. In this case, we need to give the agent a way to compute the Styblinski-Tang function. We accomplish this with a digestion function, which always takes `(db, uid)` as an input. For each entry, we compute the function:\n" 84 | ] 85 | }, 86 | { 87 | "cell_type": "code", 88 | "execution_count": null, 89 | "id": "7", 90 | "metadata": {}, 91 | "outputs": [], 92 | "source": [ 93 | "def digestion(db, uid):\n", 94 | " products = db[uid].table()\n", 95 | "\n", 96 | " for index, entry in products.iterrows():\n", 97 | " products.loc[index, \"styblinski-tang\"] = functions.styblinski_tang(entry.x)\n", 98 | "\n", 99 | " return products" 100 | ] 101 | }, 102 | { 103 | "attachments": {}, 104 | "cell_type": "markdown", 105 | "id": "8", 106 | "metadata": {}, 107 | "source": [ 108 | "The next ingredient is a task, which gives the agent something to do. We want it to minimize the Styblinski-Tang function, so we make a task that will try to minimize the output of the digestion function called \"styblinski-tang\"." 109 | ] 110 | }, 111 | { 112 | "cell_type": "code", 113 | "execution_count": null, 114 | "id": "9", 115 | "metadata": { 116 | "tags": [] 117 | }, 118 | "outputs": [], 119 | "source": [ 120 | "from blop.utils import prepare_re_env # noqa: F401\n", 121 | "\n", 122 | "%run -i $prepare_re_env.__file__ --db-type=temp\n", 123 | "\n", 124 | "from blop.bayesian import Agent\n", 125 | "\n", 126 | "agent = Agent(\n", 127 | " dofs=dofs,\n", 128 | " tasks=tasks,\n", 129 | " digestion=digestion,\n", 130 | " db=db,\n", 131 | ")\n", 132 | "\n", 133 | "RE(agent.initialize(\"qr\", n_init=4))" 134 | ] 135 | }, 136 | { 137 | "attachments": {}, 138 | "cell_type": "markdown", 139 | "id": "10", 140 | "metadata": {}, 141 | "source": [ 142 | "We initialized the GP with the \"quasi-random\" strategy, as it doesn't require any prior data. We can view the state of the optimizer's posterior of the tasks over the input parameters:" 143 | ] 144 | }, 145 | { 146 | "cell_type": "code", 147 | "execution_count": null, 148 | "id": "11", 149 | "metadata": { 150 | "tags": [] 151 | }, 152 | "outputs": [], 153 | "source": [ 154 | "# what are the points?\n", 155 | "\n", 156 | "agent.plot_objectives()" 157 | ] 158 | }, 159 | { 160 | "attachments": {}, 161 | "cell_type": "markdown", 162 | "id": "12", 163 | "metadata": {}, 164 | "source": [ 165 | "Note that the value of the fitness is the negative value of the function: we always want to maximize the fitness of the tasks.\n", 166 | "\n", 167 | "An important concept in Bayesian optimization is the acquisition function, which is how the agent decides where to sample next. Under the hood, the agent will see what inputs maximize the acquisition function to make its decision.\n", 168 | "\n", 169 | "We can see what the agent is thinking by asking it to plot a few different acquisition functions in its current state." 170 | ] 171 | }, 172 | { 173 | "cell_type": "code", 174 | "execution_count": null, 175 | "id": "13", 176 | "metadata": {}, 177 | "outputs": [], 178 | "source": [ 179 | "agent.all_acq_funcs" 180 | ] 181 | }, 182 | { 183 | "cell_type": "code", 184 | "execution_count": null, 185 | "id": "14", 186 | "metadata": { 187 | "tags": [] 188 | }, 189 | "outputs": [], 190 | "source": [ 191 | "agent.plot_acqfuisition(acq_funcs=[\"ei\", \"pi\", \"ucb\"])" 192 | ] 193 | }, 194 | { 195 | "attachments": {}, 196 | "cell_type": "markdown", 197 | "id": "15", 198 | "metadata": {}, 199 | "source": [ 200 | "Let's tell the agent to learn a little bit more. We just have to tell it what acquisition function to use (by passing a `strategy`) and how many iterations we'd like it to perform (by passing `n_iter`)." 201 | ] 202 | }, 203 | { 204 | "cell_type": "code", 205 | "execution_count": null, 206 | "id": "16", 207 | "metadata": {}, 208 | "outputs": [], 209 | "source": [ 210 | "RE(agent.learn(\"ei\", n_iter=4))\n", 211 | "agent.plot_objectives()" 212 | ] 213 | } 214 | ], 215 | "metadata": { 216 | "kernelspec": { 217 | "display_name": "Python 3.10.12 ('bluesky')", 218 | "language": "python", 219 | "name": "python3" 220 | }, 221 | "language_info": { 222 | "codemirror_mode": { 223 | "name": "ipython", 224 | "version": 3 225 | }, 226 | "file_extension": ".py", 227 | "mimetype": "text/x-python", 228 | "name": "python", 229 | "nbconvert_exporter": "python", 230 | "pygments_lexer": "ipython3", 231 | "version": "3.11.4" 232 | }, 233 | "vscode": { 234 | "interpreter": { 235 | "hash": "eee21ccc240bdddd7cf04478199e20f7257541e2f592ca1a4d34ebdc0225d742" 236 | } 237 | } 238 | }, 239 | "nbformat": 4, 240 | "nbformat_minor": 5 241 | } 242 | -------------------------------------------------------------------------------- /docs/wip/latent-toroid-dimensions.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "attachments": {}, 5 | "cell_type": "markdown", 6 | "id": "0", 7 | "metadata": {}, 8 | "source": [ 9 | "# Finding latent dimensions for the toroidal mirror \n", 10 | "\n", 11 | "It is common that beamline inputs are highly coupled, and so the effect of an input on the beam cannot be understood except in concert with the others. In this example, we show how our agent figures out latent dimensions, as well as the benefit of doing so. " 12 | ] 13 | }, 14 | { 15 | "cell_type": "code", 16 | "execution_count": null, 17 | "id": "1", 18 | "metadata": { 19 | "tags": [] 20 | }, 21 | "outputs": [], 22 | "source": [ 23 | "from blop.utils import prepare_re_env # noqa: F401\n", 24 | "\n", 25 | "%run -i $prepare_re_env.__file__ --db-type=temp\n", 26 | "%run -i ../../../examples/prepare_tes_shadow.py" 27 | ] 28 | }, 29 | { 30 | "cell_type": "code", 31 | "execution_count": null, 32 | "id": "2", 33 | "metadata": { 34 | "tags": [] 35 | }, 36 | "outputs": [], 37 | "source": [ 38 | "import blop\n", 39 | "from blop.experiments.sirepo.tes import w8_digestion\n", 40 | "\n", 41 | "dofs = [\n", 42 | " {\"device\": toroid.x_rot, \"limits\": (-0.001, 0.001), \"kind\": \"active\"},\n", 43 | " {\"device\": toroid.offz, \"limits\": (-0.5, 0.5), \"kind\": \"active\"},\n", 44 | "]\n", 45 | "\n", 46 | "tasks = [{\"key\": \"flux\", \"kind\": \"maximize\", \"transform\": \"log\"}]\n", 47 | "\n", 48 | "agent = blop.bayesian.Agent(\n", 49 | " dofs=dofs,\n", 50 | " tasks=tasks,\n", 51 | " dets=[w8],\n", 52 | " digestion=w8_digestion,\n", 53 | " db=db,\n", 54 | ")\n", 55 | "\n", 56 | "RE(agent.initialize(\"qr\", n_init=24))" 57 | ] 58 | }, 59 | { 60 | "attachments": {}, 61 | "cell_type": "markdown", 62 | "id": "3", 63 | "metadata": {}, 64 | "source": [ 65 | "We can see that the beam is only not cut off (i.e. it has a non-zero flux) in a diagonal strip, and that in fact this is really just a one-dimensional optimization problem in some diagonal dimension. Our agent has figured this out, with a transformation matrix that has a long coherence length in one dimension and a short coherence length orthogonal to it:" 66 | ] 67 | }, 68 | { 69 | "cell_type": "code", 70 | "execution_count": null, 71 | "id": "4", 72 | "metadata": {}, 73 | "outputs": [], 74 | "source": [ 75 | "agent.tasks[0][\"model\"].covar_module.latent_transform" 76 | ] 77 | }, 78 | { 79 | "cell_type": "code", 80 | "execution_count": null, 81 | "id": "5", 82 | "metadata": { 83 | "tags": [] 84 | }, 85 | "outputs": [], 86 | "source": [ 87 | "agent.plot_objectives()\n", 88 | "agent.plot_constraint()\n", 89 | "agent.plot_acquisition(strategy=[\"ei\", \"pi\", \"ucb\"])" 90 | ] 91 | } 92 | ], 93 | "metadata": { 94 | "kernelspec": { 95 | "display_name": "Python 3.11.5 64-bit", 96 | "language": "python", 97 | "name": "python3" 98 | }, 99 | "language_info": { 100 | "codemirror_mode": { 101 | "name": "ipython", 102 | "version": 3 103 | }, 104 | "file_extension": ".py", 105 | "mimetype": "text/x-python", 106 | "name": "python", 107 | "nbconvert_exporter": "python", 108 | "pygments_lexer": "ipython3", 109 | "version": "3.11.5" 110 | }, 111 | "vscode": { 112 | "interpreter": { 113 | "hash": "b0fa6594d8f4cbf19f97940f81e996739fb7646882a419484c72d19e05852a7e" 114 | } 115 | } 116 | }, 117 | "nbformat": 4, 118 | "nbformat_minor": 5 119 | } 120 | -------------------------------------------------------------------------------- /docs/wip/multi-task-sirepo.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "attachments": {}, 5 | "cell_type": "markdown", 6 | "id": "0", 7 | "metadata": {}, 8 | "source": [ 9 | "# Multi-task optimization of KB mirrors\n", 10 | "\n", 11 | "Often, we want to optimize multiple aspects of a system; in this real-world example aligning the Kirkpatrick-Baez mirrors at the TES beamline's endstation, we care about the horizontal and vertical beam size, as well as the flux. \n", 12 | "\n", 13 | "We could try to model these as a single task by combining them into a single number (i.e., optimization the beam density as flux divided by area), but our model then loses all information about how different inputs affect different outputs. We instead give the optimizer multiple \"tasks\", and then direct it based on its prediction of those tasks. " 14 | ] 15 | }, 16 | { 17 | "cell_type": "code", 18 | "execution_count": null, 19 | "id": "1", 20 | "metadata": { 21 | "tags": [] 22 | }, 23 | "outputs": [], 24 | "source": [ 25 | "from blop.utils import prepare_re_env # noqa: F401\n", 26 | "\n", 27 | "%run -i $prepare_re_env.__file__ --db-type=temp\n", 28 | "%run -i ../../../examples/prepare_tes_shadow.py" 29 | ] 30 | }, 31 | { 32 | "cell_type": "code", 33 | "execution_count": null, 34 | "id": "2", 35 | "metadata": { 36 | "tags": [] 37 | }, 38 | "outputs": [], 39 | "source": [ 40 | "from blop.bayesian import Agent\n", 41 | "from blop.experiments.sirepo.tes import w9_digestion\n", 42 | "\n", 43 | "dofs = [\n", 44 | " {\"device\": kbv.x_rot, \"limits\": (-0.1, 0.1), \"kind\": \"active\"},\n", 45 | " {\"device\": kbh.x_rot, \"limits\": (-0.1, 0.1), \"kind\": \"active\"},\n", 46 | "]\n", 47 | "\n", 48 | "tasks = [\n", 49 | " {\"key\": \"flux\", \"kind\": \"maximize\", \"transform\": \"log\"},\n", 50 | " {\"key\": \"w9_fwhm_x\", \"kind\": \"minimize\", \"transform\": \"log\"},\n", 51 | " {\"key\": \"w9_fwhm_y\", \"kind\": \"minimize\", \"transform\": \"log\"},\n", 52 | "]\n", 53 | "\n", 54 | "agent = Agent(\n", 55 | " dofs=dofs,\n", 56 | " tasks=tasks,\n", 57 | " dets=[w9],\n", 58 | " digestion=w9_digestion,\n", 59 | " db=db,\n", 60 | ")\n", 61 | "\n", 62 | "RE(agent.initialize(\"qr\", n_init=4))" 63 | ] 64 | }, 65 | { 66 | "cell_type": "code", 67 | "execution_count": null, 68 | "id": "3", 69 | "metadata": {}, 70 | "outputs": [], 71 | "source": [ 72 | "RE(agent.learn(\"ei\"))" 73 | ] 74 | }, 75 | { 76 | "attachments": {}, 77 | "cell_type": "markdown", 78 | "id": "4", 79 | "metadata": {}, 80 | "source": [ 81 | "For each task, we plot the sampled data and the model's posterior with respect to two inputs to the KB mirrors. We can see that each tasks responds very differently to different motors, which is very useful to the optimizer. " 82 | ] 83 | }, 84 | { 85 | "cell_type": "code", 86 | "execution_count": null, 87 | "id": "5", 88 | "metadata": { 89 | "tags": [] 90 | }, 91 | "outputs": [], 92 | "source": [ 93 | "agent.plot_objectives()\n", 94 | "agent.plot_acqfuisition(strategy=[\"ei\", \"pi\", \"ucb\"])" 95 | ] 96 | }, 97 | { 98 | "attachments": {}, 99 | "cell_type": "markdown", 100 | "id": "6", 101 | "metadata": {}, 102 | "source": [ 103 | "We should find our optimum (or something close to it) on the very next iteration:" 104 | ] 105 | }, 106 | { 107 | "cell_type": "code", 108 | "execution_count": null, 109 | "id": "7", 110 | "metadata": { 111 | "tags": [] 112 | }, 113 | "outputs": [], 114 | "source": [ 115 | "RE(agent.learn(\"ei\", n_iter=2))\n", 116 | "agent.plot_objectives()" 117 | ] 118 | }, 119 | { 120 | "attachments": {}, 121 | "cell_type": "markdown", 122 | "id": "8", 123 | "metadata": {}, 124 | "source": [ 125 | "The agent has learned that certain dimensions affect different tasks differently!" 126 | ] 127 | } 128 | ], 129 | "metadata": { 130 | "kernelspec": { 131 | "display_name": "Python 3.11.4 64-bit", 132 | "language": "python", 133 | "name": "python3" 134 | }, 135 | "language_info": { 136 | "codemirror_mode": { 137 | "name": "ipython", 138 | "version": 3 139 | }, 140 | "file_extension": ".py", 141 | "mimetype": "text/x-python", 142 | "name": "python", 143 | "nbconvert_exporter": "python", 144 | "pygments_lexer": "ipython3", 145 | "version": "3.11.5" 146 | }, 147 | "vscode": { 148 | "interpreter": { 149 | "hash": "b0fa6594d8f4cbf19f97940f81e996739fb7646882a419484c72d19e05852a7e" 150 | } 151 | } 152 | }, 153 | "nbformat": 4, 154 | "nbformat_minor": 5 155 | } 156 | -------------------------------------------------------------------------------- /examples/benchmark.py: -------------------------------------------------------------------------------- 1 | """ 2 | This script should be run inside of the IPython environment with pre-defined 3 | objects RE, db, etc. 4 | 5 | % run -i benchmark.py 6 | """ 7 | 8 | import time as ttime 9 | 10 | import numpy as np 11 | 12 | from blop import gp 13 | 14 | bo = gp.BayesianOptimizer( 15 | init_scheme="quasi-random", 16 | n_init=64, 17 | detectors=[vstream, I0], 18 | shutter=psh, 19 | run_engine=RE, 20 | db=db, 21 | dofs=dofs, 22 | dof_bounds=hard_bounds, 23 | verbose=True, 24 | ) 25 | 26 | timeout = 300 27 | start_time = ttime.monotonic() 28 | while ttime.monotonic() - start_time < timeout: 29 | bo.learn( 30 | n_iter=1, 31 | n_per_iter=16, 32 | strategy="ei", 33 | greedy=True, 34 | ) 35 | 36 | timestamps = bo.data.time.astype(int).values / 1e9 37 | 38 | plt.plot( 39 | timestamps - timestamps[0], 40 | [ 41 | np.nanmax(bo.data.fitness.values[: i + 1]) if not all(np.isnan(bo.data.fitness.values[: i + 1])) else np.nan 42 | for i in range(len(bo.data.fitness.values)) 43 | ], 44 | ) 45 | 46 | bo.data.drop(columns=["vstream_image"], inplace=True) 47 | bo.data.to_hdf(f"/nsls2/data/tes/shared/config/gpo-benchmarks-230331/{int(timestamps[0])}.h5", "data") 48 | 49 | del bo 50 | -------------------------------------------------------------------------------- /examples/bluesky_adaptive_agent.py: -------------------------------------------------------------------------------- 1 | from collections.abc import Callable, Sequence 2 | from typing import Any 3 | 4 | import pandas as pd 5 | from bluesky_adaptive.agents.base import Agent as BlueskyAdaptiveBaseAgent # type: ignore[import-untyped] 6 | from databroker.client import BlueskyRun # type: ignore[import-untyped] 7 | from numpy.typing import ArrayLike 8 | 9 | from blop.agent import BaseAgent as BlopAgent # type: ignore[import-untyped] 10 | from blop.digestion import default_digestion_function # type: ignore[import-untyped] 11 | 12 | 13 | class BlueskyAdaptiveAgent(BlueskyAdaptiveBaseAgent, BlopAgent): 14 | """A BlueskyAdaptiveAgent that uses Blop for the underlying agent.""" 15 | 16 | # TODO: Move into main package once databroker V2 is supported 17 | 18 | def __init__( 19 | self, 20 | *, 21 | acqf_string: str, 22 | route: bool, 23 | sequential: bool, 24 | upsample: int, 25 | acqf_kwargs: dict[str, Any], 26 | detector_names: list[str] | None = None, 27 | **kwargs, 28 | ): 29 | super().__init__(**kwargs) 30 | self._acqf_string = acqf_string 31 | self._route = route 32 | self._sequential = sequential 33 | self._upsample = upsample 34 | self._acqf_kwargs = acqf_kwargs 35 | self._detector_names = detector_names or [] 36 | 37 | @property 38 | def detector_names(self) -> list[str]: 39 | return [str(name) for name in self._detector_names] 40 | 41 | @detector_names.setter 42 | def detector_names(self, names: list[str]): 43 | self._detector_names = list(names) 44 | 45 | @property 46 | def acquisition_function(self) -> str: 47 | return str(self._acqf_string) 48 | 49 | @acquisition_function.setter 50 | def acquisition_function(self, acqf_string: str): 51 | self._acqf_string = str(acqf_string) 52 | 53 | @property 54 | def route(self) -> bool: 55 | return bool(self._route) 56 | 57 | @route.setter 58 | def route(self, route: bool): 59 | self._route = route 60 | 61 | @property 62 | def sequential(self) -> bool: 63 | return bool(self._sequential) 64 | 65 | @sequential.setter 66 | def sequential(self, sequential: bool): 67 | self._sequential = sequential 68 | 69 | @property 70 | def upsample(self) -> int: 71 | return int(self._upsample) 72 | 73 | @upsample.setter 74 | def upsample(self, upsample: int): 75 | self._upsample = int(upsample) 76 | 77 | @property 78 | def acqf_kwargs(self) -> dict[str, str]: 79 | return {str(k): str(v) for k, v in self._acqf_kwargs.items()} 80 | 81 | def update_acqf_kwargs(self, **kwargs): 82 | self._acqf_kwargs.update(kwargs) 83 | 84 | def server_registrations(self) -> list[str]: 85 | """This is how we make these avaialble to the REST API.""" 86 | self._register_method("Update Acquistion Function Kwargs", self.update_acqf_kwargs) 87 | self._register_property("Acquisition Function", self.acquisition_function, self.acquisition_function) 88 | self._register_property("Route Points", self.route, self.route) 89 | self._register_property("Sequential Points", self.sequential, self.sequential) 90 | self._register_property("Upsample Points", self.upsample, self.upsample) 91 | return super().server_registrations() 92 | 93 | def ask(self, batch_size: int) -> tuple[Sequence[dict[str, ArrayLike]], Sequence[ArrayLike]]: 94 | default_result = super().ask( 95 | n=batch_size, 96 | acqf=self._acqf_string, 97 | route=self._route, 98 | sequential=self._sequential, 99 | upsample=self._upsample, 100 | **self._acqf_kwargs, 101 | ) 102 | 103 | """res = { 104 | "points": {dof.name: list(points[..., i]) for i, dof in enumerate(active_dofs(read_only=False))}, 105 | "acqf_name": acqf_config["name"], 106 | "acqf_obj": list(np.atleast_1d(acqf_obj.numpy())), 107 | "acqf_kwargs": acqf_kwargs, 108 | "duration_ms": duration, 109 | "sequential": sequential, 110 | "upsample": upsample, 111 | "read_only_values": read_only_values, 112 | # "posterior": p, 113 | } 114 | """ 115 | 116 | points: dict[str, list[ArrayLike]] = default_result.pop("points") 117 | acqf_obj: list[ArrayLike] = default_result.pop("acqf_obj") 118 | # Turn dict of list of points into list of consistently sized points 119 | points: list[tuple[ArrayLike]] = list(zip(*[value for _, value in points.items()], strict=False)) 120 | dicts = [] 121 | for point, obj in zip(points, acqf_obj, strict=False): 122 | d = default_result.copy() 123 | d["point"] = point 124 | d["acqf_obj"] = obj 125 | dicts.append(d) 126 | return points, dicts 127 | 128 | def tell(self, x: dict[str, ArrayLike], y: dict[str, ArrayLike]): 129 | x = {key: x_i for x_i, key in zip(x, self.dofs.names, strict=False)} 130 | y = {key: y_i for y_i, key in zip(y, self.objectives.names, strict=False)} 131 | super().tell(data={**x, **y}) 132 | return {**x, **y} 133 | 134 | def report(self) -> dict[str, Any]: 135 | raise NotImplementedError("Report is not implmented for BlueskyAdaptiveAgent") 136 | 137 | def unpack_run(self, run: BlueskyRun) -> tuple[list[ArrayLike], list[ArrayLike]]: 138 | """Use my DOFs to convert the run into an independent array, and my objectives to create the dependent array. 139 | In practice for shape management, we will use lists not np.arrays at this stage. 140 | Parameters 141 | ---------- 142 | run : BlueskyRun 143 | 144 | Returns 145 | ------- 146 | independent_var : 147 | The independent variable of the measurement 148 | dependent_var : 149 | The measured data, processed for relevance 150 | """ 151 | if not self.digestion or self.digestion == default_digestion_function: 152 | # Assume all raw data is available in primary stream as keys 153 | return ( 154 | [run.primary.data[key].read() for key in self.dofs.names], 155 | [run.primary.data[key].read() for key in self.objectives.names], 156 | ) 157 | else: 158 | # Hope and pray that the digestion function designed for DataFrame can handle the XArray 159 | data: pd.DataFrame = self.digestion(run.primary.data.read(), **self.digestion_kwargs) 160 | return [data.loc[:, key] for key in self.dofs.names], [data.loc[:, key] for key in self.objectives.names] 161 | 162 | def measurement_plan(self, point: ArrayLike) -> tuple[str, list[Any], dict[str, Any]]: 163 | """Fetch the string name of a registered plan, as well as the positional and keyword 164 | arguments to pass that plan. 165 | 166 | Args/Kwargs is a common place to transform relative into absolute motor coords, or 167 | other device specific parameters. 168 | 169 | By default, this measurement plan attempts to use in the built in functionality in a QueueServer compatible way. 170 | Signals and Devices are not passed as objects, but serialized as strings for the RE as a service to use. 171 | 172 | Parameters 173 | ---------- 174 | point : ArrayLike 175 | Next point to measure using a given plan 176 | 177 | Returns 178 | ------- 179 | plan_name : str 180 | plan_args : List 181 | List of arguments to pass to plan from a point to measure. 182 | plan_kwargs : dict 183 | Dictionary of keyword arguments to pass the plan, from a point to measure. 184 | """ 185 | if isinstance(self.acquisition_plan, Callable): 186 | plan_name = self.acquisition_plan.__name__ 187 | else: 188 | plan_name = self.acquisition_plan 189 | if plan_name == "default_acquisition_plan": 190 | # Convert point back to dict form for the sake of compatability with default plan 191 | acquisition_dofs = self.dofs(active=True, read_only=False) 192 | 193 | return self.acquisition_plan.__name__, [ 194 | acquisition_dofs, 195 | {dof.name: point[i] for i, dof in enumerate(acquisition_dofs)}, 196 | [*self.detector_names, *[dev.__name__ for dev in self.dofs.devices]], 197 | ] 198 | else: 199 | raise NotImplementedError("Only default_acquisition_plan is implemented") 200 | -------------------------------------------------------------------------------- /examples/prepare_bluesky.py: -------------------------------------------------------------------------------- 1 | import json # noqa F401 2 | 3 | import bluesky.plan_stubs as bps # noqa F401 4 | import bluesky.plans as bp # noqa F401 5 | import databroker 6 | import matplotlib as mpl # noqa F401 7 | import pandas as pd # noqa F401 8 | from bluesky.callbacks import best_effort 9 | from bluesky.run_engine import RunEngine 10 | from databroker import Broker 11 | 12 | RE = RunEngine({}) 13 | 14 | bec = best_effort.BestEffortCallback() 15 | bec.disable_plots() 16 | 17 | RE.subscribe(bec) 18 | 19 | # MongoDB backend: 20 | db = Broker.named("local") # mongodb backend 21 | try: 22 | databroker.assets.utils.install_sentinels(db.reg.config, version=1) 23 | except Exception: 24 | pass 25 | 26 | RE.subscribe(db.insert) 27 | -------------------------------------------------------------------------------- /examples/prepare_chx_shadow.py: -------------------------------------------------------------------------------- 1 | db.reg.register_handler("srw", SRWFileHandler, overwrite=True) 2 | db.reg.register_handler("shadow", ShadowFileHandler, overwrite=True) 3 | db.reg.register_handler("SIREPO_FLYER", SRWFileHandler, overwrite=True) 4 | 5 | plt.ion() 6 | 7 | root_dir = "/tmp/sirepo-bluesky-data" 8 | _ = make_dir_tree(datetime.datetime.now().year, base_path=root_dir) 9 | 10 | connection = SirepoBluesky("http://localhost:8000") 11 | 12 | data, schema = connection.auth("shadow", "I1Flcbdw") 13 | classes, objects = create_classes(connection=connection) 14 | globals().update(**objects) 15 | 16 | bec.disable_baseline() 17 | bec.disable_heading() 18 | bec.disable_table() 19 | -------------------------------------------------------------------------------- /examples/prepare_tes_shadow.py: -------------------------------------------------------------------------------- 1 | import warnings 2 | 3 | from ophyd.utils import make_dir_tree 4 | from sirepo_bluesky.shadow_handler import ShadowFileHandler 5 | from sirepo_bluesky.sirepo_bluesky import SirepoBluesky 6 | from sirepo_bluesky.sirepo_ophyd import create_classes 7 | from sirepo_bluesky.srw_handler import SRWFileHandler 8 | 9 | warnings.filterwarnings("ignore", module="sirepo_bluesky") 10 | 11 | db.reg.register_handler("shadow", ShadowFileHandler, overwrite=True) 12 | db.reg.register_handler("SIREPO_FLYER", SRWFileHandler, overwrite=True) 13 | 14 | plt.ion() 15 | 16 | root_dir = "/tmp/sirepo-bluesky-data" 17 | _ = make_dir_tree(datetime.datetime.now().year, base_path=root_dir) 18 | 19 | connection = SirepoBluesky("http://localhost:8000") 20 | 21 | data, schema = connection.auth("shadow", "00000002") 22 | classes, objects = create_classes(connection=connection) 23 | globals().update(**objects) 24 | 25 | data["models"]["simulation"]["npoint"] = 100000 26 | data["models"]["watchpointReport12"]["histogramBins"] = 32 27 | # w9.duration.kind = "hinted" 28 | 29 | bec.disable_baseline() 30 | bec.disable_heading() 31 | # bec.disable_table() 32 | -------------------------------------------------------------------------------- /examples/prepare_tes_srw.py: -------------------------------------------------------------------------------- 1 | db.reg.register_handler("srw", SRWFileHandler, overwrite=True) 2 | db.reg.register_handler("SIREPO_FLYER", SRWFileHandler, overwrite=True) 3 | 4 | plt.ion() 5 | 6 | root_dir = "/tmp/sirepo-bluesky-data" 7 | _ = make_dir_tree(datetime.datetime.now().year, base_path=root_dir) 8 | 9 | connection = SirepoBluesky("http://localhost:8000") 10 | 11 | data, schema = connection.auth("srw", "00000002") 12 | classes, objects = create_classes(connection=connection) 13 | globals().update(**objects) 14 | 15 | # w9.duration.kind = "hinted" 16 | 17 | bec.disable_baseline() 18 | bec.disable_heading() 19 | bec.disable_table() 20 | 21 | # This should be done by installing the package with `pip install -e .` or something similar. 22 | # import sys 23 | # sys.path.insert(0, "../") 24 | 25 | kb_dofs = [kbv.grazingAngle, kbv.verticalOffset, kbh.grazingAngle, kbh.horizontalOffset] 26 | kb_bounds = np.array([[3.5, 3.7], [-0.10, +0.10], [3.5, 3.7], [-0.10, +0.10]]) 27 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["hatchling", "hatch-vcs", "setuptools_scm"] 3 | build-backend = "hatchling.build" 4 | 5 | [project] 6 | name = "blop" 7 | description = "Beamline optimization with machine learning" 8 | readme = { file = "README.rst", content-type = "text/x-rst" } 9 | authors = [ 10 | { name = "Brookhaven National Laboratory", email = "tmorris@bnl.gov" }, 11 | ] 12 | maintainers = [ 13 | { name = "Brookhaven National Laboratory", email = "tmorris@bnl.gov" }, 14 | ] 15 | requires-python = ">=3.10" 16 | dependencies = [ 17 | "area-detector-handlers", 18 | "bluesky", 19 | "botorch", 20 | "databroker", 21 | "gpytorch", 22 | "h5py", 23 | "matplotlib", 24 | "numpy", 25 | "ophyd", 26 | "python-tsp", 27 | "scipy", 28 | "tables", 29 | "torch", 30 | "tiled", 31 | ] 32 | classifiers = [ 33 | "Development Status :: 4 - Beta", 34 | "License :: OSI Approved :: BSD License", 35 | "Programming Language :: Python :: 3 :: Only", 36 | "Programming Language :: Python :: 3.9", 37 | "Programming Language :: Python :: 3.10", 38 | "Programming Language :: Python :: 3.11", 39 | "Topic :: Scientific/Engineering :: Physics", 40 | ] 41 | dynamic = ["version"] 42 | 43 | [project.optional-dependencies] 44 | sirepo = ["sirepo-bluesky"] 45 | napari = ["napari"] 46 | gui = ["nicegui"] 47 | pre-commit = [ 48 | "ruff", 49 | "import-linter", 50 | "nbstripout", 51 | ] 52 | adaptive = ["bluesky-adaptive"] 53 | xrt = ["xrt"] 54 | 55 | dev = [ 56 | "pytest-codecov", 57 | "coverage", 58 | "furo", 59 | "nbstripout", 60 | "pre-commit", 61 | "pre-commit-hooks", 62 | "pytest", 63 | "sphinx", 64 | "twine", 65 | "ipython", 66 | "jupyter", 67 | "matplotlib", 68 | "nbsphinx", 69 | "numpydoc", 70 | "pandoc", 71 | "sphinx-copybutton", 72 | "sphinx_rtd_theme", 73 | "ruff", 74 | "import-linter", 75 | "pandas-stubs", 76 | "types-PyYAML", 77 | "mypy", 78 | ] 79 | 80 | [project.urls] 81 | Homepage = "https://github.com/NSLS-II/blop" 82 | Documentation = "https://nsls-ii.github.io/blop" 83 | "Bug Reports" = "https://github.com/NSLS-II/blop/issues" 84 | 85 | 86 | [tool.hatch.build.targets.wheel] 87 | only-include = ["src/blop"] 88 | 89 | [tool.hatch.build.targets.wheel.sources] 90 | "src" = "" 91 | 92 | [tool.setuptools_scm] 93 | version_file = "src/blop/_version.py" 94 | 95 | [tool.hatch] 96 | version.source = "vcs" 97 | build.hooks.vcs.version-file = "src/blop/_version.py" 98 | 99 | [tool.hatch.version.raw-options] 100 | local_scheme = "no-local-version" 101 | 102 | [tool.ruff] 103 | src = ["src", "examples", "docs/source/tutorials"] 104 | line-length = 125 105 | lint.select = [ 106 | "B", # flake8-bugbear - https://docs.astral.sh/ruff/rules/#flake8-bugbear-b 107 | "C4", # flake8-comprehensions - https://docs.astral.sh/ruff/rules/#flake8-comprehensions-c4 108 | "E", # pycodestyle errors - https://docs.astral.sh/ruff/rules/#error-e 109 | "F", # pyflakes rules - https://docs.astral.sh/ruff/rules/#pyflakes-f 110 | "W", # pycodestyle warnings - https://docs.astral.sh/ruff/rules/#warning-w 111 | "I", # isort - https://docs.astral.sh/ruff/rules/#isort-i 112 | "UP", # pyupgrade - https://docs.astral.sh/ruff/rules/#pyupgrade-up 113 | "SLF", # self - https://docs.astral.sh/ruff/settings/#lintflake8-self 114 | "PLC2701", # private import - https://docs.astral.sh/ruff/rules/import-private-name/ 115 | "LOG015", # root logger call - https://docs.astral.sh/ruff/rules/root-logger-call/ 116 | "S101", # assert - https://docs.astral.sh/ruff/rules/assert/ 117 | "D", # docstring - https://docs.astral.sh/ruff/rules/#pydocstyle-d 118 | ] 119 | lint.ignore = [ 120 | "D", # TODO: Add docstrings, then enforce these errors 121 | "SLF001", # TODO: Fix private member access, https://github.com/NSLS-II/blop/issues/94 122 | ] 123 | lint.preview = true # so that preview mode PLC2701, and LOG015 is enabled 124 | 125 | [tool.ruff.lint.pydocstyle] 126 | convention = "google" 127 | 128 | [tool.ruff.lint.per-file-ignores] 129 | "src/blop/tests/**/*" = ["S101", "SLF001", "D"] 130 | # Ignore F821: undefined name '...' since the ipython profiles are dynamically loaded into the namespace 131 | "docs/**/*" = ["F821"] 132 | "examples/**/*" = ["F821"] 133 | 134 | [tool.importlinter] 135 | root_package = "blop" 136 | -------------------------------------------------------------------------------- /pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | markers = 3 | shadow: optimization with Shadow backend (quicker simulations) 4 | srw: optimization with SRW backend (slower simulations) 5 | test_func: optimization on test functions (no latency) 6 | -------------------------------------------------------------------------------- /scripts/gui.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | import databroker 4 | import matplotlib as mpl 5 | import numpy as np 6 | from bluesky.callbacks import best_effort 7 | from bluesky.run_engine import RunEngine 8 | from databroker import Broker 9 | from nicegui import ui 10 | 11 | from blop import DOF, Agent, Objective 12 | from blop.utils import functions 13 | 14 | # MongoDB backend: 15 | db = Broker.named("temp") # mongodb backend 16 | try: 17 | databroker.assets.utils.install_sentinels(db.reg.config, version=1) 18 | except Exception: 19 | pass 20 | 21 | loop = asyncio.new_event_loop() 22 | loop.set_debug(True) 23 | RE = RunEngine({}, loop=loop) 24 | RE.subscribe(db.insert) 25 | 26 | bec = best_effort.BestEffortCallback() 27 | RE.subscribe(bec) 28 | 29 | bec.disable_baseline() 30 | bec.disable_heading() 31 | bec.disable_table() 32 | bec.disable_plots() 33 | 34 | 35 | dofs = [ 36 | DOF(name="x1", description="x1", search_domain=(-5.0, 5.0)), 37 | DOF(name="x2", description="x2", search_domain=(-5.0, 5.0)), 38 | ] 39 | 40 | objectives = [Objective(name="himmelblau", target="min")] 41 | 42 | agent = Agent( 43 | dofs=dofs, 44 | objectives=objectives, 45 | digestion=functions.himmelblau_digestion, 46 | db=db, 47 | verbose=True, 48 | tolerate_acquisition_errors=False, 49 | ) 50 | 51 | agent.acqf_index = 0 52 | 53 | agent.acqf_number = 2 54 | 55 | 56 | with ui.pyplot(figsize=(10, 4), dpi=160) as obj_plt: 57 | extent = [*agent.dofs[0].search_domain, *agent.dofs[1].search_domain] 58 | 59 | ax1 = obj_plt.fig.add_subplot(131) 60 | ax1.set_title("Samples") 61 | im1 = ax1.scatter([], [], cmap="magma") 62 | 63 | ax2 = obj_plt.fig.add_subplot(132, sharex=ax1, sharey=ax1) 64 | ax2.set_title("Posterior mean") 65 | im2 = ax2.imshow(np.random.standard_normal(size=(32, 32)), extent=extent, cmap="magma") 66 | 67 | ax3 = obj_plt.fig.add_subplot(133, sharex=ax1, sharey=ax1) 68 | ax3.set_title("Posterior error") 69 | im3 = ax3.imshow(np.random.standard_normal(size=(32, 32)), extent=extent, cmap="magma") 70 | 71 | data_cbar = obj_plt.fig.colorbar(mappable=im1, ax=[ax1, ax2], location="bottom", aspect=32) 72 | err_cbar = obj_plt.fig.colorbar(mappable=im3, ax=[ax3], location="bottom", aspect=16) 73 | 74 | for ax in [ax1, ax2, ax3]: 75 | ax.set_xlabel(agent.dofs[0].label_with_units) 76 | ax.set_ylabel(agent.dofs[1].label_with_units) 77 | 78 | 79 | acqf_configs = { 80 | 0: {"name": "qr", "long_name": r"quasi-random sampling"}, 81 | 1: {"name": "qei", "long_name": r"$q$-expected improvement"}, 82 | 2: {"name": "qpi", "long_name": r"$q$-probability of improvement"}, 83 | 3: {"name": "qucb", "long_name": r"$q$-upper confidence bound"}, 84 | } 85 | 86 | with ui.pyplot(figsize=(10, 3), dpi=160) as acq_plt: 87 | extent = [*agent.dofs[0].search_domain, *agent.dofs[1].search_domain] 88 | 89 | acqf_plt_objs = {} 90 | 91 | for iax, config in acqf_configs.items(): 92 | if iax == 0: 93 | continue 94 | 95 | acqf = config["name"] 96 | 97 | acqf_plt_objs[acqf] = {} 98 | 99 | acqf_plt_objs[acqf]["ax"] = ax = acq_plt.fig.add_subplot(1, len(acqf_configs) - 1, iax) 100 | 101 | ax.set_title(config["long_name"]) 102 | acqf_plt_objs[acqf]["im"] = ax.imshow([[]], extent=extent, cmap="gray_r") 103 | acqf_plt_objs[acqf]["hist"] = ax.scatter([], []) 104 | acqf_plt_objs[acqf]["best"] = ax.scatter([], []) 105 | 106 | ax.set_xlabel(agent.dofs[0].label_with_units) 107 | ax.set_ylabel(agent.dofs[1].label_with_units) 108 | 109 | 110 | acqf_button_options = {index: config["name"] for index, config in acqf_configs.items()} 111 | 112 | v = ui.checkbox("visible", value=True) 113 | with ui.column().bind_visibility_from(v, "value"): 114 | ui.toggle(acqf_button_options).bind_value(agent, "acqf_index") 115 | ui.number().bind_value(agent, "acqf_number") 116 | 117 | 118 | def reset(): 119 | agent.reset() 120 | 121 | print(agent.table) 122 | 123 | 124 | def learn(): 125 | acqf_config = acqf_configs[agent.acqf_index] 126 | 127 | acqf = acqf_config["name"] 128 | 129 | n = int(agent.acqf_number) if acqf != "qr" else 16 130 | 131 | ui.notify(f'sampling {n} points with acquisition function "{acqf_config["long_name"]}"') 132 | 133 | RE(agent.learn(acqf, n=n)) 134 | 135 | with obj_plt: 136 | obj = agent.objectives[0] 137 | 138 | x_samples = agent.raw_inputs().detach().numpy() 139 | y_samples = agent.raw_targets(obj.name).detach().numpy()[..., 0] 140 | 141 | x = agent.sample(method="grid", n=20000) # (n, n, 1, d) 142 | model_x = agent.dofs.transform(x) 143 | p = obj.model.posterior(model_x) 144 | 145 | m = p.mean.squeeze(-1, -2).detach().numpy() 146 | e = p.variance.sqrt().squeeze(-1, -2).detach().numpy() 147 | 148 | im1.set_offsets(x_samples) 149 | im1.set_array(y_samples) 150 | im1.set_cmap("magma") 151 | 152 | im2.set_data(m.T[::-1]) 153 | im3.set_data(e.T[::-1]) 154 | 155 | obj_norm = mpl.colors.Normalize(vmin=np.nanmin(y_samples), vmax=np.nanmax(y_samples)) 156 | err_norm = mpl.colors.LogNorm(vmin=np.nanmin(e), vmax=np.nanmax(e)) 157 | 158 | im1.set_norm(obj_norm) 159 | im2.set_norm(obj_norm) 160 | im3.set_norm(err_norm) 161 | 162 | for ax in [ax1, ax2, ax3]: 163 | ax.set_xlim(*agent.dofs[0].search_domain) 164 | ax.set_ylim(*agent.dofs[1].search_domain) 165 | 166 | with acq_plt: 167 | x = agent.sample(method="grid", n=20000) # (n, n, 1, d) 168 | model_x = agent.dofs.transform(x) 169 | x_samples = agent.train_inputs().detach().numpy() 170 | 171 | for acqf in acqf_plt_objs.keys(): 172 | ax = acqf_plt_objs[acqf]["ax"] 173 | 174 | acqf_obj = getattr(agent, acqf)(model_x).detach().numpy() 175 | 176 | acqf_norm = mpl.colors.Normalize(vmin=np.nanmin(acqf_obj), vmax=np.nanmax(acqf_obj)) 177 | acqf_plt_objs[acqf]["im"].set_data(acqf_obj.T[::-1]) 178 | acqf_plt_objs[acqf]["im"].set_norm(acqf_norm) 179 | 180 | res = agent.ask(acqf, n=int(agent.acqf_number)) 181 | 182 | acqf_plt_objs[acqf]["hist"].remove() 183 | acqf_plt_objs[acqf]["hist"] = ax.scatter(*x_samples.T, ec="b", fc="none", marker="o") 184 | 185 | acqf_plt_objs[acqf]["best"].remove() 186 | acqf_plt_objs[acqf]["best"] = ax.scatter(*res["points"].T, c="r", marker="x", s=64) 187 | 188 | ax.set_xlim(*agent.dofs[0].search_domain) 189 | ax.set_ylim(*agent.dofs[1].search_domain) 190 | 191 | 192 | ui.button("Learn", on_click=learn) 193 | 194 | ui.button("Reset", on_click=reset) 195 | 196 | ui.run(port=8004) 197 | -------------------------------------------------------------------------------- /src/blop/__init__.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from . import utils # noqa F401 4 | from ._version import __version__, __version_tuple__ # noqa: F401 5 | from .agent import Agent # noqa F401 6 | from .dofs import DOF # noqa F401 7 | from .objectives import Objective # noqa F401 8 | 9 | logging.basicConfig( 10 | level=logging.INFO, 11 | format="%(asctime)s.%(msecs)03d %(levelname)s: %(message)s", 12 | datefmt="%Y-%m-%d %H:%M:%S", 13 | ) 14 | 15 | logger = logging.getLogger("blop") 16 | -------------------------------------------------------------------------------- /src/blop/bayesian/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NSLS-II/blop/46ba5cbfaf557871c58508063e6dcbf7afd0af6d/src/blop/bayesian/__init__.py -------------------------------------------------------------------------------- /src/blop/bayesian/acquisition/__init__.py: -------------------------------------------------------------------------------- 1 | import os 2 | from typing import TYPE_CHECKING, Any 3 | 4 | import pandas as pd 5 | import yaml 6 | from botorch.acquisition.acquisition import AcquisitionFunction # type: ignore[import-untyped] 7 | 8 | from . import analytic, monte_carlo 9 | from .analytic import * # noqa F401 10 | from .monte_carlo import * # noqa F401 11 | 12 | if TYPE_CHECKING: 13 | from ...agent import BaseAgent 14 | 15 | # from botorch.utils.transforms import normalize 16 | 17 | 18 | here, this_filename = os.path.split(__file__) 19 | 20 | # TODO: Move this into the code, rather than a config file 21 | with open(f"{here}/config.yml") as f: 22 | config = yaml.safe_load(f) 23 | 24 | 25 | def all_acqfs(columns: tuple[str, ...] = ("identifier", "type", "multitask_only", "description")) -> pd.DataFrame: 26 | cols = list(columns) 27 | acqfs = pd.DataFrame(config).T[cols] 28 | acqfs.index.name = "name" 29 | return acqfs.sort_values(["type", "name"]) 30 | 31 | 32 | def parse_acqf_identifier(identifier: str, strict: bool = True) -> dict[str, Any] | None: 33 | for acqf_name in config.keys(): 34 | if identifier.lower() in [acqf_name, config[acqf_name]["identifier"]]: 35 | return {"name": acqf_name, **config[acqf_name]} 36 | if strict: 37 | raise ValueError(f"'{identifier}' is not a valid acquisition function identifier.") 38 | return None 39 | 40 | 41 | def _construct_acqf(agent: "BaseAgent", acqf_name: str, **acqf_kwargs: Any) -> tuple[AcquisitionFunction, dict[str, Any]]: 42 | """Generates an acquisition function from a supplied identifier. A list of acquisition functions and 43 | their identifiers can be found at `agent.all_acqfs`. 44 | 45 | Args: 46 | agent: The optimization agent 47 | acqf_name: Name of the acquisition function 48 | **acqf_kwargs: Additional keyword arguments for the acquisition function 49 | 50 | Returns: 51 | tuple: (acquisition_function, acquisition_function_kwargs) 52 | """ 53 | 54 | acqf_config = config["upper_confidence_bound"] 55 | 56 | if config[acqf_name]["multitask_only"] and (len(agent.objectives) == 1): 57 | raise ValueError(f'Acquisition function "{acqf_name}" is only for multi-task optimization problems!') 58 | 59 | # there is probably a better way to structure this 60 | if acqf_name == "expected_improvement": 61 | acqf_kwargs["best_f"] = agent.best_f(weights="default") 62 | 63 | acqf = analytic.ConstrainedLogExpectedImprovement( 64 | constraint=agent.constraint, 65 | model=agent.fitness_model, 66 | posterior_transform=agent.fitness_scalarization(weights="default"), 67 | **acqf_kwargs, 68 | ) 69 | 70 | elif acqf_name == "monte_carlo_expected_improvement": 71 | acqf_kwargs["best_f"] = agent.best_f(weights="default") 72 | 73 | acqf = monte_carlo.qConstrainedExpectedImprovement( 74 | constraint=agent.constraint, 75 | model=agent.fitness_model, 76 | posterior_transform=agent.fitness_scalarization(weights="default"), 77 | **acqf_kwargs, 78 | ) 79 | 80 | elif acqf_name == "probability_of_improvement": 81 | acqf_kwargs["best_f"] = agent.best_f(weights="default") 82 | 83 | acqf = analytic.ConstrainedLogProbabilityOfImprovement( 84 | constraint=agent.constraint, 85 | model=agent.fitness_model, 86 | posterior_transform=agent.fitness_scalarization(), 87 | **acqf_kwargs, 88 | ) 89 | 90 | elif acqf_name == "monte_carlo_probability_of_improvement": 91 | acqf = monte_carlo.qConstrainedProbabilityOfImprovement( 92 | constraint=agent.constraint, 93 | model=agent.fitness_model, 94 | best_f=agent.best_f(), 95 | posterior_transform=agent.fitness_scalarization(), 96 | ) 97 | 98 | elif acqf_name == "lower_bound_max_value_entropy": 99 | acqf = monte_carlo.qConstrainedLowerBoundMaxValueEntropy( 100 | constraint=agent.constraint, 101 | model=agent.fitness_model, 102 | candidate_set=agent.sample(n=1024).squeeze(1), 103 | ) 104 | 105 | elif acqf_name == "monte_carlo_noisy_expected_hypervolume_improvement": 106 | acqf_kwargs["ref_point"] = acqf_kwargs.get("ref_point", agent.random_ref_point) 107 | 108 | acqf = monte_carlo.qConstrainedNoisyExpectedHypervolumeImprovement( 109 | constraint=agent.constraint, 110 | model=agent.fitness_model, 111 | # X_baseline=agent.input_normalization.forward(agent.train_inputs())[], 112 | X_baseline=agent.dofs(active=True).transform(agent.train_inputs(active=True)), 113 | prune_baseline=True, 114 | **acqf_kwargs, 115 | ) 116 | 117 | elif acqf_name == "upper_confidence_bound": 118 | acqf_kwargs["beta"] = acqf_kwargs.get("beta", acqf_config["default_kwargs"]["beta"]) 119 | 120 | acqf = analytic.ConstrainedUpperConfidenceBound( 121 | constraint=agent.constraint, 122 | model=agent.fitness_model, 123 | posterior_transform=agent.fitness_scalarization(), 124 | **acqf_kwargs, 125 | ) 126 | 127 | elif acqf_name == "monte_carlo_upper_confidence_bound": 128 | acqf_kwargs["beta"] = acqf_kwargs.get("beta", acqf_config["default_kwargs"]["beta"]) 129 | 130 | acqf = monte_carlo.qConstrainedUpperConfidenceBound( 131 | constraint=agent.constraint, 132 | model=agent.fitness_model, 133 | posterior_transform=agent.fitness_scalarization(), 134 | **acqf_kwargs, 135 | ) 136 | 137 | elif acqf_name == "expected_mean": 138 | acqf, _ = _construct_acqf(agent, acqf_name="upper_confidence_bound", beta=0) 139 | acqf_kwargs = {} 140 | 141 | elif acqf_name == "monte_carlo_expected_mean": 142 | acqf, _ = _construct_acqf(agent, acqf_name="monte_carlo_upper_confidence_bound", beta=0) 143 | acqf_kwargs = {} 144 | 145 | return acqf, acqf_kwargs 146 | -------------------------------------------------------------------------------- /src/blop/bayesian/acquisition/analytic.py: -------------------------------------------------------------------------------- 1 | import math 2 | from collections.abc import Callable 3 | 4 | import numpy as np 5 | import torch 6 | from botorch.acquisition.analytic import ( # type: ignore[import-untyped] 7 | LogExpectedImprovement, 8 | LogProbabilityOfImprovement, 9 | UpperConfidenceBound, 10 | ) 11 | from botorch.models.model import Model # type: ignore[import-untyped] 12 | from torch import Tensor 13 | 14 | 15 | class ConstrainedUpperConfidenceBound(UpperConfidenceBound): 16 | """Upper confidence bound, but scaled by some constraint. 17 | NOTE: Because the UCB can be negative, we constrain it by adjusting the Gaussian quantile. 18 | 19 | Parameters 20 | ---------- 21 | model: 22 | A BoTorch model over which to compute the acquisition function. 23 | constraint: 24 | A callable which when evaluated on inputs returns the probability of feasibility. 25 | """ 26 | 27 | def __init__(self, model: Model, constraint: Callable[[Tensor], Tensor], **kwargs) -> None: 28 | super().__init__(model=model, **kwargs) 29 | self.constraint = constraint 30 | 31 | def forward(self, x: Tensor) -> Tensor: 32 | *input_shape, _, _ = x.shape 33 | transformed_posterior = self.posterior_transform(self.model.posterior(x)) 34 | mean = transformed_posterior.mean.reshape(input_shape) 35 | sigma = transformed_posterior.variance.sqrt().reshape(input_shape) 36 | 37 | p_eff = ( 38 | 0.5 39 | * (1 + torch.special.erf(self.beta.sqrt() / math.sqrt(2))) 40 | * torch.clamp(self.constraint(x).reshape(input_shape), min=1e-6) 41 | ) 42 | 43 | return mean + sigma * np.sqrt(2) * torch.special.erfinv(2 * p_eff - 1) 44 | 45 | 46 | class ConstrainedLogExpectedImprovement(LogExpectedImprovement): 47 | """Log expected improvement, but scaled by some constraint. 48 | 49 | Parameters 50 | ---------- 51 | model: 52 | A BoTorch model over which to compute the acquisition function. 53 | constraint: 54 | A callable which when evaluated on inputs returns the probability of feasibility. 55 | """ 56 | 57 | def __init__(self, model: Model, constraint: Callable[[Tensor], Tensor], **kwargs) -> None: 58 | super().__init__(model=model, **kwargs) 59 | self.constraint = constraint 60 | 61 | def forward(self, x: Tensor) -> Tensor: 62 | return (super().forward(x) + self.constraint(x).log().squeeze(-1)).exp() 63 | 64 | 65 | class ConstrainedLogProbabilityOfImprovement(LogProbabilityOfImprovement): 66 | """Log probability of improvement acquisition function, but scaled by some constraint. 67 | 68 | Parameters 69 | ---------- 70 | model: 71 | A BoTorch model over which to compute the acquisition function. 72 | constraint: 73 | A callable which when evaluated on inputs returns the probability of feasibility. 74 | """ 75 | 76 | def __init__(self, model: Model, constraint: Callable[[Tensor], Tensor], **kwargs) -> None: 77 | super().__init__(model=model, **kwargs) 78 | self.constraint = constraint 79 | 80 | def forward(self, x: Tensor) -> Tensor: 81 | return (super().forward(x) + self.constraint(x).log().squeeze(-1)).exp() 82 | -------------------------------------------------------------------------------- /src/blop/bayesian/acquisition/config.yml: -------------------------------------------------------------------------------- 1 | expected_improvement: 2 | pretty_name: Expected improvement 3 | description: The expected value of max(f(x) - \nu, 0), where \nu is the current maximum. 4 | identifier: ei 5 | multitask_only: false 6 | type: analytic 7 | 8 | monte_carlo_expected_improvement: 9 | description: The expected value of max(f(x) - \nu, 0), where \nu is the current maximum. 10 | identifier: qei 11 | multitask_only: false 12 | pretty_name: Monte Carlo Expected improvement 13 | type: monte_carlo 14 | 15 | expected_mean: 16 | description: The expected value at each input. 17 | identifier: em 18 | multitask_only: false 19 | pretty_name: Expected mean 20 | type: analytic 21 | 22 | monte_carlo_expected_mean: 23 | description: The expected value at each input. 24 | identifier: qem 25 | multitask_only: false 26 | pretty_name: Monte Carlo expected mean 27 | type: monte_carlo 28 | 29 | lower_bound_max_value_entropy: 30 | description: Max entropy search, basically 31 | identifier: lbmve 32 | multitask_only: false 33 | pretty_name: Lower bound max value entropy 34 | type: monte_carlo 35 | 36 | noisy_expected_hypervolume_improvement: 37 | description: It's like a big box. How big is the box? 38 | identifier: nehvi 39 | multitask_only: true 40 | pretty_name: Noisy expected hypervolume improvement 41 | type: analytic 42 | 43 | monte_carlo_noisy_expected_hypervolume_improvement: 44 | description: It's like a big box. How big is the box? 45 | identifier: qnehvi 46 | multitask_only: true 47 | pretty_name: Noisy expected hypervolume improvement 48 | type: monte_carlo 49 | 50 | probability_of_improvement: 51 | description: The probability that this input improves on the current maximum. 52 | identifier: pi 53 | multitask_only: false 54 | pretty_name: Probability of improvement 55 | type: analytic 56 | 57 | monte_carlo_probability_of_improvement: 58 | description: The probability that this input improves on the current maximum. 59 | identifier: qpi 60 | multitask_only: false 61 | pretty_name: Monte Carlo probability of improvement 62 | type: monte_carlo 63 | 64 | random: 65 | description: Uniformly-sampled random points. 66 | identifier: r 67 | multitask_only: false 68 | pretty_name: Random 69 | type: random 70 | 71 | quasi-random: 72 | description: Sobol-sampled quasi-random points. 73 | identifier: qr 74 | multitask_only: false 75 | pretty_name: Quasi-random 76 | type: random 77 | 78 | grid: 79 | description: A grid scan over the parameters. 80 | identifier: g 81 | multitask_only: false 82 | pretty_name: Grid scan 83 | type: random 84 | 85 | upper_confidence_bound: 86 | default_kwargs: 87 | beta: 4 88 | description: The expected value, plus some multiple of the uncertainty (typically \mu + 2\sigma). 89 | identifier: ucb 90 | multitask_only: false 91 | pretty_name: Upper confidence bound 92 | type: analytic 93 | 94 | monte_carlo_upper_confidence_bound: 95 | default_kwargs: 96 | beta: 4 97 | description: The expected value, plus some multiple of the uncertainty (typically \mu + 2\sigma). 98 | identifier: qucb 99 | multitask_only: false 100 | pretty_name: Monte Carlo upper confidence bound 101 | type: monte_carlo 102 | -------------------------------------------------------------------------------- /src/blop/bayesian/acquisition/monte_carlo.py: -------------------------------------------------------------------------------- 1 | import math 2 | from collections.abc import Callable 3 | 4 | import numpy as np 5 | import torch 6 | from botorch.acquisition.max_value_entropy_search import qLowerBoundMaxValueEntropy # type: ignore[import-untyped] 7 | from botorch.acquisition.monte_carlo import ( # type: ignore[import-untyped] 8 | qExpectedImprovement, 9 | qProbabilityOfImprovement, 10 | qUpperConfidenceBound, 11 | ) 12 | from botorch.acquisition.multi_objective.monte_carlo import ( # type: ignore[import-untyped] 13 | qNoisyExpectedHypervolumeImprovement, 14 | ) 15 | from botorch.models.model import Model # type: ignore[import-untyped] 16 | from torch import Tensor 17 | 18 | 19 | class qConstrainedUpperConfidenceBound(qUpperConfidenceBound): 20 | """Monte Carlo expected improvement, but scaled by some constraint. 21 | NOTE: Because the UCB can be negative, we constrain it by adjusting the Gaussian quantile. 22 | 23 | Parameters 24 | ---------- 25 | model: 26 | A BoTorch model over which to compute the acquisition function. 27 | constraint: 28 | A callable which when evaluated on inputs returns the probability of feasibility. 29 | """ 30 | 31 | def __init__(self, constraint: Callable[[Tensor], Tensor], beta: float = 4, **kwargs) -> None: 32 | super().__init__(beta=beta, **kwargs) 33 | self.constraint = constraint 34 | self.beta = torch.tensor(beta) 35 | 36 | def forward(self, x: Tensor) -> Tensor: 37 | *input_shape, _, _ = x.shape 38 | 39 | transformed_posterior = self.posterior_transform(self.model.posterior(x)) 40 | mean = transformed_posterior.mean.reshape(input_shape) 41 | sigma = transformed_posterior.variance.sqrt().reshape(input_shape) 42 | 43 | p_eff = ( 44 | 0.5 45 | * (1 + torch.special.erf(self.beta.sqrt() / math.sqrt(2))) 46 | * torch.clamp(self.constraint(x).reshape(input_shape), min=1e-6) 47 | ) 48 | 49 | return mean + sigma * np.sqrt(2) * torch.special.erfinv(2 * p_eff - 1) 50 | 51 | 52 | class qConstrainedExpectedImprovement(qExpectedImprovement): 53 | """Monte Carlo expected improvement, but scaled by some constraint. 54 | 55 | Parameters 56 | ---------- 57 | model: 58 | A BoTorch model over which to compute the acquisition function. 59 | constraint: 60 | A callable which when evaluated on inputs returns the probability of feasibility. 61 | """ 62 | 63 | def __init__(self, model: Model, constraint: Callable[[Tensor], Tensor], **kwargs) -> None: 64 | super().__init__(model=model, **kwargs) 65 | self.constraint = constraint 66 | 67 | def forward(self, x: Tensor) -> Tensor: 68 | return super().forward(x) * self.constraint(x).squeeze(-1) 69 | 70 | 71 | class qConstrainedProbabilityOfImprovement(qProbabilityOfImprovement): 72 | """Monte Carlo probability of improvement, but scaled by some constraint. 73 | 74 | Parameters 75 | ---------- 76 | model: 77 | A BoTorch model over which to compute the acquisition function. 78 | constraint: 79 | A callable which when evaluated on inputs returns the probability of feasibility. 80 | """ 81 | 82 | def __init__(self, model: Model, constraint: Callable[[Tensor], Tensor], **kwargs) -> None: 83 | super().__init__(model=model, **kwargs) 84 | self.constraint = constraint 85 | 86 | def forward(self, x: Tensor) -> Tensor: 87 | return super().forward(x) * self.constraint(x).squeeze(-1) 88 | 89 | 90 | class qConstrainedNoisyExpectedHypervolumeImprovement(qNoisyExpectedHypervolumeImprovement): 91 | """Monte Carlo noisy expected hypervolume improvement, but scaled by some constraint. 92 | Only works with multi-objective models. 93 | 94 | Parameters 95 | ---------- 96 | model: 97 | A multi-objective BoTorch model over which to compute the acquisition function. 98 | constraint: 99 | A callable which when evaluated on inputs returns the probability of feasibility. 100 | """ 101 | 102 | def __init__(self, model: Model, constraint: Callable[[Tensor], Tensor], **kwargs) -> None: 103 | super().__init__(model=model, **kwargs) 104 | self.constraint = constraint 105 | 106 | def forward(self, x: Tensor) -> Tensor: 107 | return super().forward(x) * self.constraint(x).squeeze(-1) 108 | 109 | 110 | class qConstrainedLowerBoundMaxValueEntropy(qLowerBoundMaxValueEntropy): 111 | """GIBBON (General-purpose Information-Based Bayesian OptimisatioN), but scaled by some constraint. 112 | 113 | Parameters 114 | ---------- 115 | model: 116 | A multi-objective BoTorch model over which to compute the acquisition function. 117 | constraint: 118 | A callable which when evaluated on inputs returns the probability of feasibility. 119 | """ 120 | 121 | def __init__(self, model: Model, constraint: Callable[[Tensor], Tensor], **kwargs) -> None: 122 | super().__init__(model=model, **kwargs) 123 | self.constraint = constraint 124 | 125 | def forward(self, x: Tensor) -> Tensor: 126 | return super().forward(x) * self.constraint(x).squeeze(-1) 127 | -------------------------------------------------------------------------------- /src/blop/bayesian/kernels.py: -------------------------------------------------------------------------------- 1 | from collections.abc import Iterable 2 | 3 | import gpytorch # type: ignore[import-untyped] 4 | import numpy as np 5 | import torch 6 | 7 | 8 | class LatentKernel(gpytorch.kernels.Kernel): 9 | is_stationary: bool = True 10 | num_outputs: int = 1 11 | batch_inverse_lengthscale: float = 1e6 12 | 13 | def __init__( 14 | self, 15 | num_inputs: int = 1, 16 | skew_dims: bool | Iterable[tuple[int, ...]] = True, 17 | priors: bool = True, 18 | scale_output: bool = True, 19 | **kwargs, 20 | ) -> None: 21 | super().__init__() 22 | 23 | self.num_inputs: int = num_inputs 24 | self.scale_output: bool = scale_output 25 | 26 | self.nu: float = kwargs.get("nu", 2.5) 27 | 28 | if type(skew_dims) is bool: 29 | if skew_dims: 30 | self.skew_dims: list[torch.Tensor] = [torch.arange(self.num_inputs)] 31 | else: 32 | self.skew_dims = [torch.arange(num_inputs)] 33 | elif hasattr(skew_dims, "__iter__"): 34 | self.skew_dims = [torch.tensor(np.atleast_1d(skew_group)) for skew_group in skew_dims] 35 | else: 36 | raise ValueError('arg "skew_dims" must be True, False, or an iterable of tuples of ints.') 37 | 38 | # if not all([len(skew_group) >= 2 for skew_group in self.skew_dims]): 39 | # raise ValueError("must have at least two dims per skew group") 40 | skewed_dims: list[int] = [dim.item() for skew_group in self.skew_dims for dim in skew_group] 41 | if not len(set(skewed_dims)) == len(skewed_dims): 42 | raise ValueError("values in skew_dims must be unique") 43 | if not max(skewed_dims) < self.num_inputs: 44 | raise ValueError("invalid dimension index in skew_dims") 45 | 46 | skew_group_submatrix_indices: list[torch.Tensor] = [] 47 | for dim in range(self.num_outputs): 48 | for skew_group in self.skew_dims: 49 | j, k = skew_group[torch.triu_indices(len(skew_group), len(skew_group), 1)].unsqueeze(1) 50 | i = dim * torch.ones(j.shape).long() 51 | skew_group_submatrix_indices.append(torch.cat((i, j, k), dim=0)) 52 | 53 | self.diag_matrix_indices: list[torch.Tensor] = [ 54 | torch.kron(torch.arange(self.num_outputs), torch.ones(self.num_inputs)).long(), 55 | *2 * [torch.arange(self.num_inputs).repeat(self.num_outputs)], 56 | ] 57 | 58 | self.skew_matrix_indices: tuple[torch.Tensor, ...] = ( 59 | tuple(torch.cat(skew_group_submatrix_indices, dim=1)) 60 | if len(skew_group_submatrix_indices) > 0 61 | else (torch.tensor([]), torch.tensor([])) 62 | ) 63 | 64 | self.n_skew_entries: int = len(self.skew_matrix_indices[0]) 65 | 66 | lengthscale_constraint = gpytorch.constraints.Positive() 67 | raw_lengthscales_initial = lengthscale_constraint.inverse_transform(torch.tensor(1e-1)) * torch.ones( 68 | self.num_outputs, self.num_inputs, dtype=torch.double 69 | ) 70 | 71 | self.register_parameter(name="raw_lengthscales", parameter=torch.nn.Parameter(raw_lengthscales_initial)) 72 | self.register_constraint(param_name="raw_lengthscales", constraint=lengthscale_constraint) 73 | 74 | if priors: 75 | self.register_prior( 76 | name="lengthscales_prior", 77 | prior=gpytorch.priors.GammaPrior(concentration=3, rate=6), 78 | param_or_closure=lambda m: m.lengthscales, 79 | setting_closure=lambda m, v: m._set_lengthscales(v), 80 | ) 81 | 82 | if self.n_skew_entries > 0: 83 | skew_entries_constraint = gpytorch.constraints.Interval(-2 * np.pi, 2 * np.pi) 84 | skew_entries_initial = torch.zeros((self.num_outputs, self.n_skew_entries), dtype=torch.float64) 85 | self.register_parameter(name="raw_skew_entries", parameter=torch.nn.Parameter(skew_entries_initial)) 86 | self.register_constraint(param_name="raw_skew_entries", constraint=skew_entries_constraint) 87 | 88 | if self.scale_output: 89 | outputscale_constraint = gpytorch.constraints.Positive() 90 | outputscale_prior = gpytorch.priors.GammaPrior(concentration=2, rate=0.15) 91 | 92 | self.register_parameter( 93 | name="raw_outputscale", 94 | parameter=torch.nn.Parameter(torch.ones(1, dtype=torch.double)), 95 | ) 96 | 97 | self.register_constraint("raw_outputscale", constraint=outputscale_constraint) 98 | 99 | self.register_prior( 100 | name="outputscale_prior", 101 | prior=outputscale_prior, 102 | param_or_closure=lambda m: m.outputscale, 103 | setting_closure=lambda m, v: m._set_outputscale(v), 104 | ) 105 | 106 | @property 107 | def lengthscales(self) -> torch.Tensor: 108 | return self.raw_lengthscales_constraint.transform(self.raw_lengthscales) 109 | 110 | @lengthscales.setter 111 | def lengthscales(self, value: torch.Tensor | float | np.ndarray) -> None: 112 | self._set_lengthscales(value) 113 | 114 | @property 115 | def skew_entries(self) -> torch.Tensor: 116 | return self.raw_skew_entries_constraint.transform(self.raw_skew_entries) 117 | 118 | @skew_entries.setter 119 | def skew_entries(self, value: torch.Tensor | float | np.ndarray) -> None: 120 | self._set_skew_entries(value) 121 | 122 | @property 123 | def outputscale(self) -> torch.Tensor: 124 | return self.raw_outputscale_constraint.transform(self.raw_outputscale) 125 | 126 | @outputscale.setter 127 | def outputscale(self, value: torch.Tensor | float | np.ndarray) -> None: 128 | self._set_outputscale(value) 129 | 130 | def _set_lengthscales(self, value: torch.Tensor | float | np.ndarray) -> None: 131 | if not torch.is_tensor(value): 132 | value = torch.as_tensor(value).to(self.raw_lengthscales) 133 | self.initialize(raw_lengthscales=self.raw_lengthscales_constraint.inverse_transform(value)) 134 | 135 | def _set_skew_entries(self, value: torch.Tensor | float | np.ndarray) -> None: 136 | if not torch.is_tensor(value): 137 | value = torch.as_tensor(value).to(self.raw_skew_entries) 138 | self.initialize(raw_skew_entries=self.raw_skew_entries_constraint.inverse_transform(value)) 139 | 140 | def _set_outputscale(self, value: torch.Tensor | float | np.ndarray) -> None: 141 | if not torch.is_tensor(value): 142 | value = torch.as_tensor(value).to(self.raw_outputscale) 143 | self.initialize(raw_outputscale=self.raw_outputscale_constraint.inverse_transform(value)) 144 | 145 | @property 146 | def skew_matrix(self) -> torch.Tensor: 147 | S = torch.zeros((self.num_outputs, self.num_inputs, self.num_inputs), dtype=torch.float64) 148 | if self.n_skew_entries > 0: 149 | # to construct an orthogonal matrix. fun fact: exp(skew(N)) is the generator of SO(N) 150 | S[self.skew_matrix_indices] = self.skew_entries 151 | S += -S.transpose(-1, -2) 152 | return torch.linalg.matrix_exp(S) 153 | 154 | @property 155 | def diag_matrix(self) -> torch.Tensor: 156 | D = torch.zeros((self.num_outputs, self.num_inputs, self.num_inputs), dtype=torch.float64) 157 | D[self.diag_matrix_indices] = self.lengthscales.ravel() ** -1 158 | return D 159 | 160 | @property 161 | def latent_transform(self) -> torch.Tensor: 162 | return torch.matmul(self.diag_matrix, self.skew_matrix) 163 | 164 | def forward(self, x1: torch.Tensor, x2: torch.Tensor, diag: bool = False, **params: dict) -> torch.Tensor: 165 | # adapted from the Matern kernel 166 | mean = x1.reshape(-1, x1.size(-1)).mean(0)[(None,) * (x1.dim() - 1)] 167 | 168 | transform = self.latent_transform.unsqueeze(1) 169 | 170 | trans_x1 = torch.matmul(transform, (x1 - mean).unsqueeze(-1)).squeeze(-1) 171 | trans_x2 = torch.matmul(transform, (x2 - mean).unsqueeze(-1)).squeeze(-1) 172 | 173 | distance = self.covar_dist(trans_x1, trans_x2, diag=diag, **params) 174 | 175 | if self.num_outputs == 1: 176 | distance = distance.squeeze(0) 177 | 178 | outputscale = self.outputscale if self.scale_output else 1.0 179 | 180 | # special cases of the Matern function 181 | if self.nu == 0.5: 182 | return outputscale * torch.exp(-distance) 183 | if self.nu == 1.5: 184 | return outputscale * (1 + distance) * torch.exp(-distance) 185 | if self.nu == 2.5: 186 | return outputscale * (1 + distance + distance**2 / 3) * torch.exp(-distance) 187 | raise ValueError(f"nu = {self.nu} is not supported") 188 | -------------------------------------------------------------------------------- /src/blop/bayesian/models.py: -------------------------------------------------------------------------------- 1 | from typing import Any 2 | 3 | import botorch # type: ignore[import-untyped] 4 | import gpytorch # type: ignore[import-untyped] 5 | import torch 6 | from botorch.models.gp_regression import SingleTaskGP # type: ignore[import-untyped] 7 | 8 | from . import kernels 9 | 10 | 11 | def train_model( 12 | model: SingleTaskGP, 13 | hypers: dict[str, Any] | None = None, 14 | max_fails: int = 4, 15 | **kwargs: Any, 16 | ) -> None: 17 | """Fit all of the agent's models. All kwargs are passed to `botorch.fit.fit_gpytorch_mll`.""" 18 | fails = 0 19 | while True: 20 | try: 21 | if hypers is not None: 22 | model.load_state_dict(hypers) 23 | else: 24 | botorch.fit.fit_gpytorch_mll(gpytorch.mlls.ExactMarginalLogLikelihood(model.likelihood, model), **kwargs) 25 | model.trained = True 26 | return 27 | except Exception as e: 28 | if fails < max_fails: 29 | fails += 1 30 | else: 31 | raise e 32 | 33 | 34 | def construct_single_task_model( 35 | X: torch.Tensor, 36 | y: torch.Tensor, 37 | skew_dims: list[tuple[int, ...]] | None = None, 38 | min_noise: float = 1e-6, 39 | max_noise: float = 1e0, 40 | ) -> "LatentGP": 41 | """ 42 | Construct an untrained model for an objective. 43 | """ 44 | 45 | skew_dims = skew_dims if skew_dims is not None else [(i,) for i in range(X.shape[-1])] 46 | 47 | likelihood = gpytorch.likelihoods.GaussianLikelihood( 48 | noise_constraint=gpytorch.constraints.Interval( 49 | torch.tensor(min_noise), 50 | torch.tensor(max_noise), 51 | ), 52 | ) 53 | 54 | input_transform = botorch.models.transforms.input.Normalize(d=X.shape[-1]) 55 | outcome_transform = botorch.models.transforms.outcome.Standardize(m=1) # , batch_shape=torch.Size((1,))) 56 | 57 | if not X.isfinite().all(): 58 | raise ValueError("'X' must not contain points that are inf or NaN.") 59 | if not y.isfinite().all(): 60 | raise ValueError("'y' must not contain points that are inf or NaN.") 61 | 62 | return LatentGP( 63 | train_inputs=X, 64 | train_targets=y, 65 | likelihood=likelihood, 66 | skew_dims=skew_dims, 67 | input_transform=input_transform, 68 | outcome_transform=outcome_transform, 69 | ) 70 | 71 | 72 | class LatentGP(SingleTaskGP): 73 | def __init__( 74 | self, 75 | train_inputs: torch.Tensor, 76 | train_targets: torch.Tensor, 77 | skew_dims: bool | list[tuple[int, ...]] = True, 78 | *args: Any, 79 | **kwargs: Any, 80 | ) -> None: 81 | super().__init__(train_inputs, train_targets, *args, **kwargs) 82 | 83 | self.mean_module = gpytorch.means.ConstantMean(constant_prior=gpytorch.priors.NormalPrior(loc=0, scale=1)) 84 | 85 | self.covar_module = kernels.LatentKernel( 86 | num_inputs=train_inputs.shape[-1], 87 | num_outputs=train_targets.shape[-1], 88 | skew_dims=skew_dims, 89 | priors=True, 90 | scale=True, 91 | **kwargs, 92 | ) 93 | 94 | self.trained: bool = False 95 | 96 | 97 | class LatentConstraintModel(LatentGP): 98 | def __init__( 99 | self, 100 | train_inputs: torch.Tensor, 101 | train_targets: torch.Tensor, 102 | skew_dims: bool | list[tuple[int, ...]] = True, 103 | *args: Any, 104 | **kwargs: Any, 105 | ) -> None: 106 | super().__init__(train_inputs, train_targets, skew_dims, *args, **kwargs) 107 | 108 | self.trained: bool = False 109 | 110 | def fitness(self, x: torch.Tensor, n_samples: int = 1024) -> torch.Tensor: 111 | """ 112 | Takes in a (..., m) dimension tensor and returns a (..., n_classes) tensor 113 | """ 114 | *input_shape, n_dim = x.shape 115 | samples = self.posterior(x.reshape(-1, n_dim)).sample(torch.Size((n_samples,))).exp() 116 | return (samples / samples.sum(-1, keepdim=True)).mean(0).reshape(*input_shape, -1) 117 | 118 | 119 | class LatentDirichletClassifier(LatentGP): 120 | def __init__( 121 | self, 122 | train_inputs: torch.Tensor, 123 | train_targets: torch.Tensor, 124 | skew_dims: bool | list[tuple[int, ...]] = True, 125 | *args: Any, 126 | **kwargs: Any, 127 | ) -> None: 128 | super().__init__(train_inputs, train_targets, skew_dims, *args, **kwargs) 129 | 130 | self.trained: bool = False 131 | 132 | def probabilities(self, x: torch.Tensor, n_samples: int = 256) -> torch.Tensor: 133 | """ 134 | Takes in a (..., m) dimension tensor and returns a (..., n_classes) tensor 135 | """ 136 | *input_shape, n_dim = x.shape 137 | samples = self.posterior(x.reshape(-1, n_dim)).sample(torch.Size((n_samples,))).exp() 138 | return (samples / samples.sum(-1, keepdim=True)).mean(0).reshape(*input_shape, -1) 139 | -------------------------------------------------------------------------------- /src/blop/de/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NSLS-II/blop/46ba5cbfaf557871c58508063e6dcbf7afd0af6d/src/blop/de/__init__.py -------------------------------------------------------------------------------- /src/blop/de/hardware_flyer.py: -------------------------------------------------------------------------------- 1 | # Bluesky hardware flyer for DE optimization 2 | 3 | import time as ttime 4 | from collections import deque 5 | 6 | from ophyd.sim import NullStatus # type: ignore[import-untyped] 7 | 8 | 9 | class BlueskyFlyer: 10 | def __init__(self): 11 | self.name = "bluesky_flyer" 12 | self._asset_docs_cache = deque() 13 | self._resource_uids = [] 14 | self._datum_counter = None 15 | self._datum_ids = [] 16 | 17 | def kickoff(self): 18 | return NullStatus() 19 | 20 | def complete(self): 21 | return NullStatus() 22 | 23 | def describe_collect(self): 24 | return {self.name: {}} 25 | 26 | def collect(self): 27 | now = ttime.time() 28 | data = {} 29 | yield { 30 | "data": data, 31 | "timestamps": dict.fromkeys(data, now), 32 | "time": now, 33 | "filled": dict.fromkeys(data, False), 34 | } 35 | 36 | def collect_asset_docs(self): 37 | items = list(self._asset_docs_cache) 38 | self._asset_docs_cache.clear() 39 | yield from items 40 | 41 | 42 | class HardwareFlyer(BlueskyFlyer): 43 | def __init__( 44 | self, 45 | params_to_change, 46 | velocities, 47 | time_to_travel, 48 | detector, 49 | motors, 50 | start_det, 51 | read_det, 52 | stop_det, 53 | watch_func, 54 | ): 55 | super().__init__() 56 | self.name = "hardware_flyer" 57 | # TODO: These 3 lists to be merged later 58 | self.params_to_change = params_to_change # dict of dicts; {motor_name: {'position':...}} 59 | self.velocities = velocities # dictionary with motor names as keys 60 | self.time_to_travel = time_to_travel # dictionary with motor names as keys 61 | self.detector = detector 62 | self.motors = motors 63 | self.watch_positions = {name: {"position": []} for name in self.motors} 64 | self.watch_intensities = [] 65 | self.watch_timestamps = [] 66 | self.motor_move_status = None 67 | self.start_det = start_det 68 | self.read_det = read_det 69 | self.stop_det = stop_det 70 | self.watch_func = watch_func 71 | 72 | def kickoff(self): 73 | slowest_motor = sorted(self.time_to_travel, key=lambda x: self.time_to_travel[x], reverse=True)[0] 74 | self.start_det(self.detector) 75 | ttime.sleep(1.0) 76 | for motor_name, field in self.motors.items(): 77 | for _, motor_obj in field.items(): 78 | motor_obj.velocity.put(self.velocities[motor_name]) 79 | for motor_name, field in self.motors.items(): 80 | for field_name, motor_obj in field.items(): 81 | if motor_name == slowest_motor: 82 | self.motor_move_status = motor_obj.set(self.params_to_change[motor_name][field_name]) 83 | else: 84 | motor_obj.set(self.params_to_change[motor_name][field_name]) 85 | # Call this function once before we start moving all motors to collect the first points. 86 | self._watch_function() 87 | self.motor_move_status.watch(self._watch_function) 88 | return NullStatus() 89 | 90 | def complete(self): 91 | return self.motor_move_status 92 | 93 | def describe_collect(self): 94 | return_dict = { 95 | self.name: { 96 | f"{self.name}_intensity": {"source": f"{self.name}_intensity", "dtype": "number", "shape": []}, 97 | } 98 | } 99 | motor_dict = {} 100 | for motor_name in self.motors.keys(): 101 | motor_dict[f"{self.name}_{motor_name}_velocity"] = { 102 | "source": f"{self.name}_{motor_name}_velocity", 103 | "dtype": "number", 104 | "shape": [], 105 | } 106 | motor_dict[f"{self.name}_{motor_name}_position"] = { 107 | "source": f"{self.name}_{motor_name}_position", 108 | "dtype": "number", 109 | "shape": [], 110 | } 111 | return_dict[self.name].update(motor_dict) 112 | return return_dict 113 | 114 | def collect(self): 115 | self.stop_det(self.detector) 116 | for ind in range(len(self.watch_intensities)): 117 | motor_dict = {} 118 | for motor_name, field in self.motors.items(): 119 | for field_name, _ in field.items(): 120 | motor_dict.update( 121 | { 122 | f"{self.name}_{motor_name}_velocity": self.velocities[motor_name], 123 | f"{self.name}_{motor_name}_position": self.watch_positions[motor_name][field_name][ind], 124 | } 125 | ) 126 | data = {f"{self.name}_intensity": self.watch_intensities[ind]} 127 | data.update(motor_dict) 128 | yield { 129 | "data": data, 130 | "timestamps": dict.fromkeys(data, self.watch_timestamps[ind]), 131 | "time": self.watch_timestamps[ind], 132 | "filled": dict.fromkeys(data, False), 133 | } 134 | 135 | # # This will produce one event with dictionaries in the <...>_parameters field. 136 | # motor_params_dict = {} 137 | # for motor_name, motor_obj in self.motors.items(): 138 | # motor_parameters = {'timestamps': self.watch_timestamps, 139 | # 'velocity': self.velocities[motor_name], 140 | # 'positions': self.watch_positions[motor_name]} 141 | # motor_params_dict[motor_name] = motor_parameters 142 | # 143 | # data = {f'{self.name}_{self.detector.channel1.rois.roi01.name}': self.watch_intensities, 144 | # f'{self.name}_parameters': motor_params_dict} 145 | # 146 | # now = ttime.time() 147 | # yield {'data': data, 148 | # 'timestamps': {key: now for key in data}, 'time': now, 149 | # 'filled': {key: False for key in data}} 150 | 151 | def _watch_function(self, *args, **kwargs): 152 | watch_pos, watch_int, watch_time = self.watch_func(self.motors, self.detector) 153 | for motor_name, field in self.motors.items(): 154 | for field_name, _ in field.items(): 155 | self.watch_positions[motor_name][field_name].extend(watch_pos[motor_name][field_name]) 156 | self.watch_intensities.extend(watch_int) 157 | self.watch_timestamps.extend(watch_time) 158 | -------------------------------------------------------------------------------- /src/blop/digestion/__init__.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | 3 | from ..utils import get_beam_stats 4 | 5 | 6 | def default_digestion_function(df: pd.DataFrame) -> pd.DataFrame: 7 | return df 8 | 9 | 10 | def beam_stats_digestion(df: pd.DataFrame, image_key: str, **kwargs) -> pd.DataFrame: 11 | # Get the beam stats for each image in the dataframe and add them as new columns 12 | df = pd.concat([df, df[image_key].apply(lambda img: pd.Series(get_beam_stats(img, **kwargs)))], axis=1) 13 | return df 14 | -------------------------------------------------------------------------------- /src/blop/digestion/tests.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pandas as pd 3 | 4 | from ..utils import functions 5 | 6 | 7 | def himmelblau_digestion(df: pd.DataFrame) -> pd.DataFrame: 8 | """ 9 | Digests Himmelblau's function into the feedback. 10 | """ 11 | df["x1"] = df["x1"].fillna(0) if "x1" in df.columns else 0 12 | df["x2"] = df["x2"].fillna(0) if "x2" in df.columns else 0 13 | df["himmelblau"] = functions.himmelblau(x1=df.x1, x2=df.x2) 14 | df["himmelblau_transpose"] = functions.himmelblau(x1=df.x2, x2=df.x1) 15 | return df 16 | 17 | 18 | def constrained_himmelblau_digestion(df: pd.DataFrame) -> pd.DataFrame: 19 | """ 20 | Digests Himmelblau's function into the feedback, constrained with NaN for a distance of more than 6 from the origin. 21 | """ 22 | 23 | df = himmelblau_digestion(df) 24 | df.loc[:, "himmelblau"] = np.where( 25 | np.array(df.x1.values) ** 2 + np.array(df.x2) ** 2 < 36, np.array(df.himmelblau), np.nan 26 | ) 27 | 28 | return df 29 | 30 | 31 | def sketchy_himmelblau_digestion(df: pd.DataFrame, p: float = 0.1) -> pd.DataFrame: 32 | """ 33 | Evaluates the constrained Himmelblau, where every point is bad with probability p. 34 | """ 35 | 36 | df = constrained_himmelblau_digestion(df) 37 | bad = np.random.choice(a=[True, False], size=len(df), p=[p, 1 - p]) 38 | df.loc[:, "himmelblau"] = np.where(bad, np.nan, np.array(df.himmelblau)) 39 | 40 | return df 41 | 42 | 43 | """ 44 | Chankong and Haimes function from https://en.wikipedia.org/wiki/Test_functions_for_optimization 45 | """ 46 | 47 | 48 | def chankong_and_haimes_digestion(df: pd.DataFrame) -> pd.DataFrame: 49 | df["f1"] = (df.x1 - 2) ** 2 + (df.x2 - 1) + 2 50 | df["f2"] = 9 * df.x1 - (df.x2 - 1) + 2 51 | df["c1"] = df.x1**2 + df.x2**2 52 | df["c2"] = df.x1 - 3 * df.x2 + 10 53 | return df 54 | 55 | 56 | def mock_kbs_digestion(df: pd.DataFrame) -> pd.DataFrame: 57 | """ 58 | Digests a beam waist and height into the feedback. 59 | """ 60 | sigma_x = functions.gaussian_beam_waist(df.x1.values, df.x2.values) 61 | sigma_y = functions.gaussian_beam_waist(df.x3.values, df.x4.values) 62 | df["x_width"] = 2 * sigma_x 63 | df["y_width"] = 2 * sigma_y 64 | return df 65 | 66 | 67 | def binh_korn_digestion(df: pd.DataFrame) -> pd.DataFrame: 68 | """ 69 | Digests Himmelblau's function into the feedback. 70 | """ 71 | 72 | f1, f2 = functions.binh_korn(df.x1.values, df.x2.values) 73 | df["f1"] = f1 74 | df["f2"] = f2 75 | return df 76 | -------------------------------------------------------------------------------- /src/blop/plans.py: -------------------------------------------------------------------------------- 1 | from collections.abc import Generator, Mapping, Sequence 2 | from typing import Any 3 | 4 | import bluesky.plan_stubs as bps 5 | import bluesky.plans as bp 6 | from bluesky.protocols import Movable 7 | from bluesky.run_engine import Msg 8 | from ophyd import Signal # type: ignore[import-untyped] 9 | 10 | from .dofs import DOF 11 | 12 | 13 | def list_scan_with_delay(*args: Any, delay: float = 0, **kwargs: Any) -> Generator[Msg, None, str]: 14 | "Accepts all the normal 'scan' parameters, plus an optional delay." 15 | 16 | def one_nd_step_with_delay( 17 | detectors: Sequence[Signal], step: Mapping[Movable, Any], pos_cache: Mapping[Movable, Any] 18 | ) -> Generator[Msg, None, None]: 19 | "This is a copy of bluesky.plan_stubs.one_nd_step with a sleep added." 20 | motors = step.keys() 21 | yield from bps.move_per_step(step, pos_cache) 22 | yield from bps.sleep(delay) 23 | yield from bps.trigger_and_read(list(detectors) + list(motors)) 24 | 25 | kwargs.setdefault("per_step", one_nd_step_with_delay) 26 | uid = yield from bp.list_scan(*args, **kwargs) 27 | return uid 28 | 29 | 30 | def default_acquisition_plan( 31 | dofs: Sequence[DOF], inputs: Mapping[str, Sequence[Any]], dets: Sequence[Signal], **kwargs: Any 32 | ) -> Generator[Msg, None, str]: 33 | """ 34 | Parameters 35 | ---------- 36 | x : list of DOFs or DOFList 37 | A list of DOFs 38 | inputs: dict 39 | A dict of a list of inputs per dof, keyed by dof.name 40 | dets: list 41 | A list of detectors to trigger 42 | """ 43 | delay = kwargs.get("delay", 0) 44 | args = [] 45 | for dof in dofs: 46 | args.append(dof.device) 47 | args.append(inputs[dof.name]) 48 | 49 | uid = yield from list_scan_with_delay(dets, *args, delay=delay) 50 | return uid 51 | -------------------------------------------------------------------------------- /src/blop/sim/__init__.py: -------------------------------------------------------------------------------- 1 | from .beamline import Beamline, Detector # noqa 2 | from .handlers import HDF5Handler # noqa 3 | -------------------------------------------------------------------------------- /src/blop/sim/beamline.py: -------------------------------------------------------------------------------- 1 | import itertools 2 | from collections import deque 3 | from collections.abc import Generator, Iterator 4 | from datetime import datetime 5 | from pathlib import Path 6 | from typing import Any 7 | 8 | import h5py # type: ignore[import-untyped] 9 | import numpy as np 10 | import scipy as sp # type: ignore[import-untyped] 11 | from event_model import compose_resource # type: ignore[import-untyped] 12 | from ophyd import Component as Cpt # type: ignore[import-untyped] 13 | from ophyd import Device, Signal # type: ignore[import-untyped] 14 | from ophyd.sim import NullStatus, new_uid # type: ignore[import-untyped] 15 | from ophyd.utils import make_dir_tree # type: ignore[import-untyped] 16 | 17 | from ..utils import get_beam_stats 18 | from .handlers import ExternalFileReference 19 | 20 | 21 | class Detector(Device): 22 | sum = Cpt(Signal, kind="hinted") 23 | max = Cpt(Signal, kind="normal") 24 | area = Cpt(Signal, kind="normal") 25 | cen_x = Cpt(Signal, kind="hinted") 26 | cen_y = Cpt(Signal, kind="hinted") 27 | wid_x = Cpt(Signal, kind="hinted") 28 | wid_y = Cpt(Signal, kind="hinted") 29 | image = Cpt(ExternalFileReference, kind="normal") 30 | image_shape = Cpt(Signal, value=(300, 400), kind="normal") 31 | noise = Cpt(Signal, kind="normal") 32 | 33 | def __init__( 34 | self, root_dir: str = "/tmp/blop/sim", verbose: bool = True, noise: bool = True, *args: Any, **kwargs: Any 35 | ) -> None: 36 | super().__init__(*args, **kwargs) 37 | 38 | _ = make_dir_tree(datetime.now().year, base_path=root_dir) 39 | 40 | self._root_dir = root_dir 41 | self._verbose = verbose 42 | 43 | # Used for the emulated cameras only. 44 | self._img_dir = None 45 | 46 | # Resource/datum docs related variables. 47 | self._asset_docs_cache: deque[tuple[str, dict[str, Any]]] = deque() 48 | self._resource_document: dict[str, Any] | None = None 49 | self._datum_factory: Any | None = None 50 | self._dataset: h5py.Dataset | None = None 51 | self._h5file_desc: h5py.File | None = None 52 | self._counter: Iterator[int] | None = None 53 | 54 | self.noise.put(noise) 55 | 56 | def trigger(self) -> NullStatus: 57 | if not self._counter: 58 | raise RuntimeError("Counter not initialized, make sure to call stage() first.") 59 | if not self._dataset: 60 | raise RuntimeError("Dataset not initialized, make sure to call stage() first.") 61 | if not self._datum_factory: 62 | raise RuntimeError("Datum factory not initialized, make sure to call stage() first.") 63 | super().trigger() 64 | raw_image = self.generate_beam(noise=self.noise.get()) 65 | 66 | current_frame = next(self._counter) 67 | 68 | self._dataset.resize((current_frame + 1, *self.image_shape.get())) 69 | self._dataset[current_frame, :, :] = raw_image 70 | 71 | datum_document = self._datum_factory(datum_kwargs={"frame": current_frame}) 72 | self._asset_docs_cache.append(("datum", datum_document)) 73 | 74 | stats = get_beam_stats(raw_image) 75 | self.image.put(datum_document["datum_id"]) 76 | 77 | for attr in ["max", "sum", "cen_x", "cen_y", "wid_x", "wid_y"]: 78 | getattr(self, attr).put(stats[attr]) 79 | 80 | super().trigger() 81 | 82 | return NullStatus() 83 | 84 | def stage(self) -> list[Any]: 85 | devices = super().stage() 86 | date = datetime.now() 87 | self._assets_dir = date.strftime("%Y/%m/%d") 88 | data_file = f"{new_uid()}.h5" 89 | 90 | self._resource_document, self._datum_factory, _ = compose_resource( 91 | start={"uid": "needed for compose_resource() but will be discarded"}, 92 | spec="HDF5", 93 | root=self._root_dir, 94 | resource_path=str(Path(self._assets_dir) / Path(data_file)), 95 | resource_kwargs={}, 96 | ) 97 | 98 | if not self._resource_document: 99 | raise RuntimeError("Resource document not initialized.") 100 | 101 | self._data_file = str(Path(self._resource_document["root"]) / Path(self._resource_document["resource_path"])) 102 | 103 | # now discard the start uid, a real one will be added later 104 | self._resource_document.pop("run_start") 105 | self._asset_docs_cache.append(("resource", self._resource_document)) 106 | 107 | self._h5file_desc = h5py.File(self._data_file, "x") 108 | group = self._h5file_desc.create_group("/entry") 109 | self._dataset = group.create_dataset( 110 | "image", 111 | data=np.full(fill_value=np.nan, shape=(1, *self.image_shape.get())), 112 | maxshape=(None, *self.image_shape.get()), 113 | chunks=(1, *self.image_shape.get()), 114 | dtype="float64", 115 | compression="lzf", 116 | ) 117 | self._counter = itertools.count() 118 | return devices 119 | 120 | def unstage(self) -> list[Any]: 121 | devices = super().unstage() 122 | del self._dataset 123 | if self._h5file_desc: 124 | self._h5file_desc.close() 125 | self._resource_document = None 126 | self._datum_factory = None 127 | return devices 128 | 129 | def collect_asset_docs(self) -> Generator[tuple[str, dict[str, Any]], None, None]: 130 | items = list(self._asset_docs_cache) 131 | self._asset_docs_cache.clear() 132 | yield from items 133 | 134 | def generate_beam(self, noise: bool = True) -> np.ndarray: 135 | nx, ny = self.image_shape.get() 136 | 137 | x = np.linspace(-10, 10, ny) 138 | y = np.linspace(-10, 10, nx) 139 | X, Y = np.meshgrid(x, y) 140 | 141 | x0 = self.parent.kbh_ush.get() - self.parent.kbh_dsh.get() 142 | y0 = self.parent.kbv_usv.get() - self.parent.kbv_dsv.get() 143 | x_width = np.sqrt(0.2 + 5e-1 * (self.parent.kbh_ush.get() + self.parent.kbh_dsh.get() - 1) ** 2) 144 | y_width = np.sqrt(0.1 + 5e-1 * (self.parent.kbv_usv.get() + self.parent.kbv_dsv.get() - 2) ** 2) 145 | 146 | beam = np.exp(-0.5 * (((X - x0) / x_width) ** 4 + ((Y - y0) / y_width) ** 4)) / ( 147 | np.sqrt(2 * np.pi) * x_width * y_width 148 | ) 149 | 150 | mask = X > self.parent.ssa_inboard.get() 151 | mask &= X < self.parent.ssa_outboard.get() 152 | mask &= Y > self.parent.ssa_lower.get() 153 | mask &= Y < self.parent.ssa_upper.get() 154 | mask = sp.ndimage.gaussian_filter(mask.astype(float), sigma=1) 155 | 156 | image = beam * mask 157 | 158 | if noise: 159 | kx = np.fft.fftfreq(n=len(x), d=0.1) 160 | ky = np.fft.fftfreq(n=len(y), d=0.1) 161 | KX, KY = np.meshgrid(kx, ky) 162 | 163 | power_spectrum = 1 / (1e-2 + KX**2 + KY**2) 164 | 165 | white_noise = 1e-3 * np.random.standard_normal(size=X.shape) 166 | pink_noise = 1e-3 * np.real(np.fft.ifft2(power_spectrum * np.fft.fft2(np.random.standard_normal(size=X.shape)))) 167 | # background = 5e-3 * (X - Y) / X.max() 168 | 169 | image += white_noise + pink_noise 170 | 171 | return image 172 | 173 | 174 | class Beamline(Device): 175 | det = Cpt(Detector) 176 | 177 | kbh_ush = Cpt(Signal, kind="hinted") 178 | kbh_dsh = Cpt(Signal, kind="hinted") 179 | kbv_usv = Cpt(Signal, kind="hinted") 180 | kbv_dsv = Cpt(Signal, kind="hinted") 181 | 182 | ssa_inboard = Cpt(Signal, value=-5.0, kind="hinted") 183 | ssa_outboard = Cpt(Signal, value=5.0, kind="hinted") 184 | ssa_lower = Cpt(Signal, value=-5.0, kind="hinted") 185 | ssa_upper = Cpt(Signal, value=5.0, kind="hinted") 186 | 187 | def __init__(self, *args: Any, **kwargs: Any) -> None: 188 | super().__init__(*args, **kwargs) 189 | -------------------------------------------------------------------------------- /src/blop/sim/handlers.py: -------------------------------------------------------------------------------- 1 | import h5py # type: ignore[import-untyped] 2 | from area_detector_handlers.handlers import HandlerBase # type: ignore[import-untyped] 3 | from ophyd import Signal # type: ignore[import-untyped] 4 | 5 | 6 | class HDF5Handler(HandlerBase): 7 | specs = {"HDF5"} 8 | 9 | def __init__(self, filename): 10 | self._name = filename 11 | 12 | def __call__(self, frame): 13 | with h5py.File(self._name, "r") as f: 14 | entry = f["/entry/image"] 15 | return entry[frame] 16 | 17 | 18 | class ExternalFileReference(Signal): 19 | """ 20 | A pure software Signal that describe()s an image in an external file. 21 | """ 22 | 23 | def describe(self): 24 | resource_document_data = super().describe() 25 | resource_document_data[self.name].update( 26 | { 27 | "external": "FILESTORE:", 28 | "dtype": "array", 29 | } 30 | ) 31 | return resource_document_data 32 | -------------------------------------------------------------------------------- /src/blop/sim/xrt_beamline.py: -------------------------------------------------------------------------------- 1 | import itertools 2 | import time 3 | from collections import deque 4 | from datetime import datetime 5 | from pathlib import Path 6 | 7 | import h5py 8 | import matplotlib as mpl 9 | import numpy as np 10 | import scipy as sp 11 | from event_model import compose_resource 12 | from ophyd import Component as Cpt 13 | from ophyd import Device, EpicsSignal, Signal 14 | from ophyd.sim import NullStatus, new_uid 15 | from ophyd.utils import make_dir_tree 16 | 17 | from blop.sim.handlers import ExternalFileReference 18 | from blop.sim.xrt_kb_model import build_beamline, build_histRGB, run_process 19 | from blop.utils import get_beam_stats 20 | 21 | TEST = False 22 | 23 | 24 | class xrtEpicsScreen(Device): 25 | sum = Cpt(Signal, kind="hinted") 26 | max = Cpt(Signal, kind="normal") 27 | area = Cpt(Signal, kind="normal") 28 | cen_x = Cpt(Signal, kind="hinted") 29 | cen_y = Cpt(Signal, kind="hinted") 30 | wid_x = Cpt(Signal, kind="hinted") 31 | wid_y = Cpt(Signal, kind="hinted") 32 | image = Cpt(EpicsSignal, "BL:Screen1:Array", kind="normal") 33 | acquire = Cpt(EpicsSignal, "BL:Screen1:Acquire", kind="normal") 34 | image_shape = Cpt(Signal, value=(300, 400), kind="normal") 35 | noise = Cpt(Signal, kind="normal") 36 | 37 | def __init__(self, root_dir: str = "/tmp/blop/sim", verbose: bool = True, noise: bool = True, *args, **kwargs): 38 | _ = make_dir_tree(datetime.now().year, base_path=root_dir) 39 | 40 | self._root_dir = root_dir 41 | self._verbose = verbose 42 | 43 | # Used for the emulated cameras only. 44 | self._img_dir = None 45 | 46 | # Resource/datum docs related variables. 47 | self._asset_docs_cache = deque() 48 | self._resource_document = None 49 | self._datum_factory = None 50 | super().__init__(*args, **kwargs) 51 | 52 | def trigger(self): 53 | super().trigger() 54 | self.acquire.put(1) 55 | while self.acquire.get() > 0: 56 | time.sleep(0.01) 57 | raw_image = self.image.get() 58 | image = raw_image.reshape(*self.image_shape.get()) 59 | 60 | current_frame = next(self._counter) 61 | 62 | self._dataset.resize((current_frame + 1, *self.image_shape.get())) 63 | 64 | self._dataset[current_frame, :, :] = image 65 | 66 | datum_document = self._datum_factory(datum_kwargs={"frame": current_frame}) 67 | self._asset_docs_cache.append(("datum", datum_document)) 68 | 69 | stats = get_beam_stats(image) 70 | 71 | for attr in ["max", "sum", "cen_x", "cen_y", "wid_x", "wid_y"]: 72 | getattr(self, attr).put(stats[attr]) 73 | 74 | return NullStatus() 75 | 76 | def stage(self): 77 | super().stage() 78 | date = datetime.now() 79 | self._assets_dir = date.strftime("%Y/%m/%d") 80 | data_file = f"{new_uid()}.h5" 81 | 82 | self._resource_document, self._datum_factory, _ = compose_resource( 83 | start={"uid": "needed for compose_resource() but will be discarded"}, 84 | spec="HDF5", 85 | root=self._root_dir, 86 | resource_path=str(Path(self._assets_dir) / Path(data_file)), 87 | resource_kwargs={}, 88 | ) 89 | 90 | self._data_file = str(Path(self._resource_document["root"]) / Path(self._resource_document["resource_path"])) 91 | 92 | # now discard the start uid, a real one will be added later 93 | self._resource_document.pop("run_start") 94 | self._asset_docs_cache.append(("resource", self._resource_document)) 95 | 96 | self._h5file_desc = h5py.File(self._data_file, "x") 97 | group = self._h5file_desc.create_group("/entry") 98 | self._dataset = group.create_dataset( 99 | "image", 100 | data=np.full(fill_value=np.nan, shape=(1, *self.image_shape.get())), 101 | maxshape=(None, *self.image_shape.get()), 102 | chunks=(1, *self.image_shape.get()), 103 | dtype="float64", 104 | compression="lzf", 105 | ) 106 | self._counter = itertools.count() 107 | 108 | def unstage(self): 109 | super().unstage() 110 | del self._dataset 111 | self._h5file_desc.close() 112 | self._resource_document = None 113 | self._datum_factory = None 114 | 115 | 116 | class Detector(Device): 117 | sum = Cpt(Signal, kind="hinted") 118 | max = Cpt(Signal, kind="normal") 119 | area = Cpt(Signal, kind="normal") 120 | cen_x = Cpt(Signal, kind="hinted") 121 | cen_y = Cpt(Signal, kind="hinted") 122 | wid_x = Cpt(Signal, kind="hinted") 123 | wid_y = Cpt(Signal, kind="hinted") 124 | image = Cpt(ExternalFileReference, kind="normal") 125 | image_shape = Cpt(Signal, value=(300, 400), kind="normal") 126 | noise = Cpt(Signal, kind="normal") 127 | 128 | def __init__(self, root_dir: str = "/tmp/blop/sim", verbose: bool = True, noise: bool = True, *args, **kwargs): 129 | super().__init__(*args, **kwargs) 130 | 131 | _ = make_dir_tree(datetime.now().year, base_path=root_dir) 132 | 133 | self._root_dir = root_dir 134 | self._verbose = verbose 135 | 136 | # Used for the emulated cameras only. 137 | self._img_dir = None 138 | 139 | # Resource/datum docs related variables. 140 | self._asset_docs_cache = deque() 141 | self._resource_document = None 142 | self._datum_factory = None 143 | self.noise.put(noise) 144 | self.limits = [[-0.6, 0.6], [-0.45, 0.45]] 145 | if TEST: 146 | self.mplFig = mpl.figure.Figure() 147 | self.mplFig.subplots_adjust(left=0.15, bottom=0.15, top=0.92) 148 | self.mplAx = self.mplFig.add_subplot(111) 149 | 150 | xv = np.random.rand(400, 300) 151 | self.im = self.mplAx.imshow( 152 | xv.T, 153 | aspect="auto", 154 | origin="lower", 155 | vmin=0, 156 | vmax=1e3, 157 | cmap="jet", 158 | extent=(self.limits[0][0], self.limits[0][1], self.limits[1][0], self.limits[1][1]), 159 | ) 160 | self.counter = 0 161 | self.beamLine = build_beamline() 162 | 163 | def trigger(self): 164 | super().trigger() 165 | raw_image = self.generate_beam(noise=self.noise.get()) 166 | 167 | current_frame = next(self._counter) 168 | 169 | self._dataset.resize((current_frame + 1, *self.image_shape.get())) 170 | 171 | self._dataset[current_frame, :, :] = raw_image 172 | 173 | datum_document = self._datum_factory(datum_kwargs={"frame": current_frame}) 174 | self._asset_docs_cache.append(("datum", datum_document)) 175 | 176 | stats = get_beam_stats(raw_image) 177 | self.image.put(datum_document["datum_id"]) 178 | 179 | for attr in ["max", "sum", "cen_x", "cen_y", "wid_x", "wid_y"]: 180 | getattr(self, attr).put(stats[attr]) 181 | 182 | return NullStatus() 183 | 184 | def stage(self): 185 | super().stage() 186 | date = datetime.now() 187 | self._assets_dir = date.strftime("%Y/%m/%d") 188 | data_file = f"{new_uid()}.h5" 189 | 190 | self._resource_document, self._datum_factory, _ = compose_resource( 191 | start={"uid": "needed for compose_resource() but will be discarded"}, 192 | spec="HDF5", 193 | root=self._root_dir, 194 | resource_path=str(Path(self._assets_dir) / Path(data_file)), 195 | resource_kwargs={}, 196 | ) 197 | 198 | self._data_file = str(Path(self._resource_document["root"]) / Path(self._resource_document["resource_path"])) 199 | 200 | # now discard the start uid, a real one will be added later 201 | self._resource_document.pop("run_start") 202 | self._asset_docs_cache.append(("resource", self._resource_document)) 203 | 204 | self._h5file_desc = h5py.File(self._data_file, "x") 205 | group = self._h5file_desc.create_group("/entry") 206 | self._dataset = group.create_dataset( 207 | "image", 208 | data=np.full(fill_value=np.nan, shape=(1, *self.image_shape.get())), 209 | maxshape=(None, *self.image_shape.get()), 210 | chunks=(1, *self.image_shape.get()), 211 | dtype="float64", 212 | compression="lzf", 213 | ) 214 | self._counter = itertools.count() 215 | 216 | def unstage(self): 217 | super().unstage() 218 | del self._dataset 219 | self._h5file_desc.close() 220 | self._resource_document = None 221 | self._datum_factory = None 222 | 223 | def collect_asset_docs(self): 224 | items = list(self._asset_docs_cache) 225 | self._asset_docs_cache.clear() 226 | yield from items 227 | 228 | def generate_beam_func(self, noise: bool = True): 229 | nx, ny = self.image_shape.get() 230 | 231 | x = np.linspace(-10, 10, ny) 232 | y = np.linspace(-10, 10, nx) 233 | X, Y = np.meshgrid(x, y) 234 | 235 | x0 = self.parent.kbh_ush.get() - self.parent.kbh_dsh.get() 236 | y0 = self.parent.kbv_usv.get() - self.parent.kbv_dsv.get() 237 | x_width = np.sqrt(0.2 + 5e-1 * (self.parent.kbh_ush.get() + self.parent.kbh_dsh.get() - 1) ** 2) 238 | y_width = np.sqrt(0.1 + 5e-1 * (self.parent.kbv_usv.get() + self.parent.kbv_dsv.get() - 2) ** 2) 239 | 240 | beam = np.exp(-0.5 * (((X - x0) / x_width) ** 4 + ((Y - y0) / y_width) ** 4)) / ( 241 | np.sqrt(2 * np.pi) * x_width * y_width 242 | ) 243 | 244 | mask = X > self.parent.ssa_inboard.get() 245 | mask &= X < self.parent.ssa_outboard.get() 246 | mask &= Y > self.parent.ssa_lower.get() 247 | mask &= Y < self.parent.ssa_upper.get() 248 | mask = sp.ndimage.gaussian_filter(mask.astype(float), sigma=1) 249 | 250 | image = beam * mask 251 | 252 | if noise: 253 | kx = np.fft.fftfreq(n=len(x), d=0.1) 254 | ky = np.fft.fftfreq(n=len(y), d=0.1) 255 | KX, KY = np.meshgrid(kx, ky) 256 | 257 | power_spectrum = 1 / (1e-2 + KX**2 + KY**2) 258 | 259 | white_noise = 1e-3 * np.random.standard_normal(size=X.shape) 260 | pink_noise = 1e-3 * np.real(np.fft.ifft2(power_spectrum * np.fft.fft2(np.random.standard_normal(size=X.shape)))) 261 | # background = 5e-3 * (X - Y) / X.max() 262 | 263 | image += white_noise + pink_noise 264 | 265 | return image 266 | 267 | def generate_beam_xrt(self, noise: bool = True): 268 | R2 = self.parent.kbh_dsh.get() 269 | R1 = self.parent.kbv_dsv.get() 270 | 271 | self.beamLine.toroidMirror01.R = R1 272 | self.beamLine.toroidMirror02.R = R2 273 | outDict = run_process(self.beamLine) 274 | lb = outDict["screen01beamLocal01"] 275 | 276 | hist2d, _, _ = build_histRGB(lb, lb, limits=self.limits, isScreen=True, shape=[400, 300]) 277 | image = hist2d 278 | _ = np.max(image) 279 | image += 1e-3 * np.abs(np.random.standard_normal(size=image.shape)) 280 | self.counter += 1 281 | 282 | return image 283 | 284 | def generate_beam(self, *args, **kwargs): 285 | return self.generate_beam_xrt(*args, **kwargs) 286 | 287 | 288 | class BeamlineEpics(Device): 289 | det = Cpt(xrtEpicsScreen, name="DetectorScreen") 290 | 291 | kbh_ush = Cpt(Signal, kind="hinted") 292 | kbh_dsh = Cpt(EpicsSignal, ":TM_HOR:R", kind="hinted") 293 | kbv_usv = Cpt(Signal, kind="hinted") 294 | kbv_dsv = Cpt(EpicsSignal, ":TM_VERT:R", kind="hinted") 295 | 296 | ssa_inboard = Cpt(Signal, value=-5.0, kind="hinted") 297 | ssa_outboard = Cpt(Signal, value=5.0, kind="hinted") 298 | ssa_lower = Cpt(Signal, value=-5.0, kind="hinted") 299 | ssa_upper = Cpt(Signal, value=5.0, kind="hinted") 300 | 301 | def __init__(self, *args, **kwargs): 302 | self.beamline = build_beamline() 303 | super().__init__(*args, **kwargs) 304 | 305 | 306 | class Beamline(Device): 307 | det = Cpt(Detector) 308 | 309 | kbh_ush = Cpt(Signal, kind="hinted") 310 | kbh_dsh = Cpt(Signal, kind="hinted") 311 | kbv_usv = Cpt(Signal, kind="hinted") 312 | kbv_dsv = Cpt(Signal, kind="hinted") 313 | 314 | ssa_inboard = Cpt(Signal, value=-5.0, kind="hinted") 315 | ssa_outboard = Cpt(Signal, value=5.0, kind="hinted") 316 | ssa_lower = Cpt(Signal, value=-5.0, kind="hinted") 317 | ssa_upper = Cpt(Signal, value=5.0, kind="hinted") 318 | 319 | def __init__(self, *args, **kwargs): 320 | super().__init__(*args, **kwargs) 321 | -------------------------------------------------------------------------------- /src/blop/sim/xrt_kb_model.py: -------------------------------------------------------------------------------- 1 | """ 2 | 3 | __author__ = "Konstantin Klementiev", "Roman Chernikov" 4 | __date__ = "2024-09-27" 5 | 6 | Created with xrtQook 7 | 8 | 9 | 10 | 11 | """ 12 | 13 | import numpy as np 14 | import xrt.backends.raycing as raycing 15 | import xrt.backends.raycing.oes as roes 16 | import xrt.backends.raycing.run as rrun 17 | import xrt.backends.raycing.screens as rscreens 18 | import xrt.backends.raycing.sources as rsources 19 | import xrt.plotter as xrtplot 20 | import xrt.runner as xrtrun 21 | 22 | limits = [[-0.6, 0.6], [-0.45, 0.45]] 23 | 24 | 25 | def build_histRGB(lb, gb, limits=None, isScreen=False, shape=None): 26 | if shape is None: 27 | shape = [256, 256] 28 | good = (lb.state == 1) | (lb.state == 2) 29 | if isScreen: 30 | x, y, z = lb.x[good], lb.z[good], lb.y[good] 31 | else: 32 | x, y, z = lb.x[good], lb.y[good], lb.z[good] 33 | goodlen = len(lb.x[good]) 34 | hist2dRGB = np.zeros((shape[1], shape[0], 3), dtype=np.float64) 35 | hist2d = np.zeros((shape[1], shape[0]), dtype=np.float64) 36 | 37 | if limits is None and goodlen > 0: 38 | limits = np.array([[np.min(x), np.max(x)], [np.min(y), np.max(y)], [np.min(z), np.max(z)]]) 39 | 40 | if goodlen > 0: 41 | beamLimits = [limits[1], limits[0]] or None 42 | flux = gb.Jss[good] + gb.Jpp[good] 43 | hist2d, _, _ = np.histogram2d(y, x, bins=[shape[1], shape[0]], range=beamLimits, weights=flux) 44 | hist2dRGB = None 45 | return hist2d, hist2dRGB, limits 46 | 47 | 48 | def build_beamline(): 49 | beamLine = raycing.BeamLine() 50 | 51 | beamLine.geometricSource01 = rsources.GeometricSource( 52 | bl=beamLine, center=[0, 0, 0], nrays=25000, energies=(9000, 100), distE="normal", dx=0.2, dz=0.1, dxprime=0.00015 53 | ) 54 | 55 | beamLine.toroidMirror01 = roes.ToroidMirror( 56 | bl=beamLine, 57 | center=[0, 10000, 0], 58 | pitch=r"5deg", 59 | limPhysX=[-20.0, 20.0], 60 | limPhysY=[-150.0, 150.0], 61 | R=38245, 62 | r=100000000.0, 63 | ) 64 | 65 | beamLine.toroidMirror02 = roes.ToroidMirror( 66 | bl=beamLine, 67 | center=[0, 11000, r"auto"], 68 | pitch=r"5deg", 69 | yaw=r"10deg", 70 | positionRoll=r"90deg", 71 | rotationSequence=r"RyRxRz", 72 | limPhysX=[-20, 20], 73 | limPhysY=[-150, 150], 74 | R=21035, 75 | r=100000000.0, 76 | ) 77 | 78 | beamLine.screen01 = rscreens.Screen(bl=beamLine, center=[164.87347936545572, 11935, 343.73164815693235]) 79 | 80 | return beamLine 81 | 82 | 83 | def run_process(beamLine): 84 | geometricSource01beamGlobal01 = beamLine.geometricSource01.shine() 85 | 86 | toroidMirror01beamGlobal01, toroidMirror01beamLocal01 = beamLine.toroidMirror01.reflect( 87 | beam=geometricSource01beamGlobal01 88 | ) 89 | 90 | toroidMirror02beamGlobal01, toroidMirror02beamLocal01 = beamLine.toroidMirror02.reflect(beam=toroidMirror01beamGlobal01) 91 | 92 | screen01beamLocal01 = beamLine.screen01.expose(beam=toroidMirror02beamGlobal01) 93 | 94 | outDict = { 95 | "geometricSource01beamGlobal01": geometricSource01beamGlobal01, 96 | "toroidMirror01beamGlobal01": toroidMirror01beamGlobal01, 97 | "toroidMirror01beamLocal01": toroidMirror01beamLocal01, 98 | "toroidMirror02beamGlobal01": toroidMirror02beamGlobal01, 99 | "toroidMirror02beamLocal01": toroidMirror02beamLocal01, 100 | "screen01beamLocal01": screen01beamLocal01, 101 | } 102 | beamLine.prepare_flow() 103 | return outDict 104 | 105 | 106 | rrun.run_process = run_process 107 | 108 | 109 | def define_plots(): 110 | plots = [] 111 | 112 | plot01 = xrtplot.XYCPlot( 113 | beam=r"screen01beamLocal01", 114 | xaxis=xrtplot.XYCAxis(label=r"x", limits=limits[0], bins=400, ppb=1, fwhmFormatStr="%.3f"), 115 | yaxis=xrtplot.XYCAxis(label=r"z", limits=limits[1], bins=300, ppb=1, fwhmFormatStr="%.3f"), 116 | caxis=xrtplot.XYCAxis(label=r"energy", unit=r"eV", bins=300, ppb=1), 117 | title=r"plot01", 118 | aspect="auto", 119 | ) 120 | plots.append(plot01) 121 | return plots 122 | 123 | 124 | def main(): 125 | beamLine = build_beamline() 126 | beamLine.glow() 127 | E0 = list(beamLine.geometricSource01.energies)[0] 128 | beamLine.alignE = E0 129 | plots = define_plots() 130 | xrtrun.run_ray_tracing(plots=plots, backend=r"raycing", beamLine=beamLine) 131 | 132 | 133 | if __name__ == "__main__": 134 | main() 135 | -------------------------------------------------------------------------------- /src/blop/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NSLS-II/blop/46ba5cbfaf557871c58508063e6dcbf7afd0af6d/src/blop/tests/__init__.py -------------------------------------------------------------------------------- /src/blop/tests/conftest.py: -------------------------------------------------------------------------------- 1 | # content of conftest.py 2 | import asyncio 3 | import logging 4 | 5 | import databroker # type: ignore[import-untyped] 6 | import numpy as np 7 | import pytest 8 | from bluesky.callbacks import best_effort 9 | from bluesky.run_engine import RunEngine 10 | from databroker import Broker 11 | 12 | from blop import DOF, Agent, Objective 13 | from blop.digestion.tests import chankong_and_haimes_digestion, sketchy_himmelblau_digestion 14 | from blop.dofs import BrownianMotion 15 | from blop.sim import HDF5Handler 16 | 17 | logger = logging.getLogger("blop") 18 | logger.setLevel(logging.DEBUG) 19 | 20 | 21 | @pytest.fixture(scope="function") 22 | def db(): 23 | """Return a data broker""" 24 | # MongoDB backend: 25 | db = Broker.named("temp") # mongodb backend 26 | try: 27 | databroker.assets.utils.install_sentinels(db.reg.config, version=1) 28 | except Exception: 29 | pass 30 | 31 | db.reg.register_handler("HDF5", HDF5Handler, overwrite=True) 32 | 33 | return db 34 | 35 | 36 | @pytest.fixture(scope="function") 37 | def RE(db): 38 | loop = asyncio.new_event_loop() 39 | loop.set_debug(True) 40 | RE = RunEngine({}, loop=loop) 41 | RE.subscribe(db.insert) 42 | 43 | bec = best_effort.BestEffortCallback() 44 | RE.subscribe(bec) 45 | 46 | bec.disable_baseline() 47 | bec.disable_heading() 48 | bec.disable_table() 49 | bec.disable_plots() 50 | 51 | return RE 52 | 53 | 54 | single_task_agents = [ 55 | "1d_1f", 56 | "2d_1f", 57 | "2d_1f_1c", 58 | "2d_2f_2c", 59 | "3d_2r_2f_1c", 60 | ] 61 | 62 | nonpareto_multitask_agents = ["2d_2c"] 63 | 64 | pareto_agents = ["2d_2f_2c", "3d_2r_2f_1c"] 65 | 66 | all_agents = [*single_task_agents, *nonpareto_multitask_agents, *pareto_agents] 67 | 68 | 69 | def get_agent(param): 70 | """ 71 | Generate a bunch of different agents. 72 | """ 73 | 74 | if param == "1d_1f": 75 | return Agent( 76 | dofs=[DOF(description="The first DOF", name="x1", search_domain=(-5.0, 5.0))], 77 | objectives=[Objective(description="Himmelblau’s function", name="himmelblau", target="min")], 78 | digestion=sketchy_himmelblau_digestion, 79 | ) 80 | 81 | elif param == "1d_1c": 82 | return Agent( 83 | dofs=[DOF(description="The first DOF", name="x1", search_domain=(-5.0, 5.0))], 84 | objectives=[Objective(description="Himmelblau’s function", name="himmelblau", constraint=(95, 105))], 85 | digestion=sketchy_himmelblau_digestion, 86 | ) 87 | 88 | elif param == "2d_1f": 89 | return Agent( 90 | dofs=[ 91 | DOF(description="The first DOF", name="x1", search_domain=(-5.0, 5.0)), 92 | DOF(description="The first DOF", name="x2", search_domain=(-5.0, 5.0)), 93 | ], 94 | objectives=[Objective(description="Himmelblau’s function", name="himmelblau", target="min")], 95 | digestion=sketchy_himmelblau_digestion, 96 | ) 97 | 98 | elif param == "2d_2c": 99 | return Agent( 100 | dofs=[ 101 | DOF(description="The first DOF", name="x1", search_domain=(-5.0, 5.0)), 102 | DOF(description="The first DOF", name="x2", search_domain=(-5.0, 5.0)), 103 | ], 104 | objectives=[ 105 | Objective(description="Himmelblau’s function", name="himmelblau", constraint=(95, 105)), 106 | Objective(description="Himmelblau’s function", name="himmelblau", constraint=(95, 105)), 107 | ], 108 | digestion=sketchy_himmelblau_digestion, 109 | ) 110 | 111 | elif param == "2d_1f_1c": 112 | return Agent( 113 | dofs=[ 114 | DOF(description="The first DOF", name="x1", search_domain=(-5.0, 5.0)), 115 | DOF(description="The first DOF", name="x2", search_domain=(-5.0, 5.0)), 116 | ], 117 | objectives=[ 118 | Objective(description="Himmelblau’s function", name="himmelblau", target="min"), 119 | Objective(description="Himmelblau’s function", name="himmelblau", constraint=(95, 105)), 120 | ], 121 | digestion=sketchy_himmelblau_digestion, 122 | ) 123 | 124 | elif param == "2d_2f_2c": 125 | return Agent( 126 | dofs=[ 127 | DOF(description="The first DOF", name="x1", search_domain=(-5.0, 5.0)), 128 | DOF(description="The first DOF", name="x2", search_domain=(-5.0, 5.0)), 129 | ], 130 | objectives=[ 131 | Objective(description="f1", name="f1", target="min"), 132 | Objective(description="f2", name="f2", target="min"), 133 | Objective(description="c1", name="c1", constraint=(-np.inf, 225)), 134 | Objective(description="c2", name="c2", constraint=(-np.inf, 0)), 135 | ], 136 | digestion=chankong_and_haimes_digestion, 137 | ) 138 | 139 | elif param == "3d_2r_2f_1c": 140 | return Agent( 141 | dofs=[ 142 | DOF(name="x1", search_domain=(-5.0, 5.0)), 143 | DOF(name="x2", search_domain=(-5.0, 5.0)), 144 | DOF(name="x3", search_domain=(-5.0, 5.0), active=False), 145 | DOF(device=BrownianMotion(name="brownian1"), read_only=True), 146 | DOF(device=BrownianMotion(name="brownian2"), read_only=True, active=False), 147 | ], 148 | objectives=[ 149 | Objective(name="himmelblau", target="min"), 150 | Objective(name="himmelblau_transpose", target="min"), 151 | Objective(description="Himmelblau’s function", name="himmelblau", constraint=(95, 105)), 152 | ], 153 | digestion=sketchy_himmelblau_digestion, 154 | ) 155 | 156 | else: 157 | raise ValueError(f"Invalid agent parameter '{param}'.") 158 | 159 | 160 | @pytest.fixture 161 | def agent(request): 162 | agent = get_agent(request.param) 163 | 164 | # add a useless DOF to try and break things 165 | agent.dofs.add(DOF(name="dummy", search_domain=(0, 1), active=False)) 166 | 167 | return agent 168 | -------------------------------------------------------------------------------- /src/blop/tests/test_acqfs.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from .conftest import all_agents 4 | 5 | 6 | @pytest.mark.parametrize("acqf", ["ei", "pi", "em", "ucb"]) 7 | @pytest.mark.parametrize("agent", all_agents, indirect=True) 8 | def test_analytic_acqfs(agent, RE, db, acqf): 9 | agent.db = db 10 | RE(agent.learn("qr", n=32)) 11 | RE(agent.learn(acqf, n=1)) 12 | getattr(agent, acqf) 13 | 14 | 15 | @pytest.mark.parametrize("acqf", ["qei", "qpi", "qem", "qucb"]) 16 | @pytest.mark.parametrize("agent", all_agents, indirect=True) 17 | def test_monte_carlo_acqfs(agent, RE, db, acqf): 18 | agent.db = db 19 | RE(agent.learn("qr", n=32)) 20 | RE(agent.learn(acqf, n=1)) 21 | getattr(agent, acqf) 22 | -------------------------------------------------------------------------------- /src/blop/tests/test_agents.py: -------------------------------------------------------------------------------- 1 | import pytest # noqa F401 2 | import numpy as np 3 | 4 | from .conftest import all_agents 5 | 6 | 7 | @pytest.mark.parametrize("agent", all_agents, indirect=True) 8 | def test_agent(agent, RE, db): 9 | """ 10 | All agents should be able to do these things. 11 | """ 12 | 13 | agent.db = db 14 | RE(agent.learn("qr", n=32)) 15 | 16 | best = agent.best 17 | assert [dof.name in best for dof in agent.dofs] 18 | assert [obj.name in best for obj in agent.objectives] 19 | assert agent.dofs.x1 is agent.dofs[0] 20 | 21 | print(agent.dofs) 22 | print(agent.objectives) 23 | 24 | RE(agent.learn("qei", n=2)) 25 | 26 | # test turning on the dummy DOF 27 | agent.dofs.dummy.activate() 28 | RE(agent.learn("qei", n=2)) 29 | agent.dofs.dummy.deactivate() 30 | 31 | # test forgetting 32 | RE(agent.learn("qr", n=4)) 33 | agent.forget(last=2) 34 | 35 | # test some functions 36 | agent.refresh() 37 | agent.redigest() 38 | 39 | # test trust domains for DOFs 40 | dof = agent.dofs(active=True)[0] 41 | raw_x = agent.raw_inputs(dof.name).numpy() 42 | dof.trust_domain = tuple(np.nanquantile(raw_x, q=[0.2, 0.8])) 43 | 44 | RE(agent.learn("qei", n=2)) 45 | 46 | # test trust domains for DOFs 47 | obj = agent.objectives(active=True)[0] 48 | raw_y = agent.raw_targets(index=obj.name).numpy() 49 | obj.trust_domain = tuple(np.nanquantile(raw_y, q=[0.2, 0.8])) 50 | 51 | RE(agent.learn("qei", n=2)) 52 | 53 | # save the data, reset the agent, and get the data back 54 | agent.save_data("/tmp/test_save_data.h5") 55 | agent.reset() 56 | agent.load_data("/tmp/test_save_data.h5") 57 | 58 | RE(agent.learn("qei", n=2)) 59 | 60 | # test plots 61 | agent.plot_objectives() 62 | agent.plot_acquisition() 63 | agent.plot_validity() 64 | agent.plot_history() 65 | 66 | 67 | @pytest.mark.parametrize("agent", all_agents, indirect=True) 68 | def test_benchmark(agent, RE, db): 69 | agent.db = db 70 | per_iter_learn_kwargs_list = [{"acqf": "qr", "n": 32}, {"acqf": "qei", "n": 2, "iterations": 2}] 71 | RE(agent.benchmark(output_dir="/tmp/blop", iterations=1, per_iter_learn_kwargs_list=per_iter_learn_kwargs_list)) 72 | -------------------------------------------------------------------------------- /src/blop/tests/test_dofs.py: -------------------------------------------------------------------------------- 1 | import pytest # noqa F401 2 | 3 | from blop.dofs import DOF, DOFList 4 | 5 | 6 | def test_dof_types(): 7 | dof1 = DOF(description="A continuous DOF", type="continuous", name="length", search_domain=(0, 5), units="mm") 8 | dof2 = DOF( 9 | description="A binary DOF", 10 | type="binary", 11 | name="in_or_out", 12 | search_domain={True, False}, 13 | ) 14 | dof3 = DOF( 15 | description="An ordinal DOF", 16 | type="ordinal", 17 | name="noise_level", 18 | search_domain={"low", "medium", "high"}, 19 | ) 20 | dof4 = DOF( 21 | description="A categorical DOF", 22 | type="categorical", 23 | name="fruit", 24 | search_domain={"mango", "banana", "papaya"}, 25 | trust_domain={"mango", "orange", "banana", "papaya", "cantaloupe"}, 26 | ) 27 | 28 | dofs = DOFList([dof1, dof2, dof3, dof4]) # noqa 29 | dofs.__repr__() 30 | -------------------------------------------------------------------------------- /src/blop/tests/test_pareto.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from .conftest import pareto_agents 4 | 5 | 6 | @pytest.mark.parametrize("agent", pareto_agents, indirect=True) 7 | def test_pareto(agent, RE, db): 8 | agent.db = db 9 | RE(agent.learn("qr", n=16)) 10 | agent.plot_pareto_front() 11 | 12 | 13 | @pytest.mark.parametrize("acqf", ["qnehvi"]) 14 | @pytest.mark.parametrize("agent", pareto_agents, indirect=True) 15 | def test_monte_carlo_pareto_acqfs(agent, RE, db, acqf): 16 | agent.db = db 17 | RE(agent.learn("qr", n=32)) 18 | RE(agent.learn(acqf, n=2)) 19 | agent.dofs[0].deactivate() 20 | RE(agent.learn(acqf, n=2)) 21 | getattr(agent, acqf) 22 | -------------------------------------------------------------------------------- /src/blop/tests/test_sims.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from blop import DOF, Agent, Objective 4 | from blop.digestion import beam_stats_digestion 5 | from blop.sim import Beamline 6 | 7 | 8 | def test_kb_simulation(RE, db): 9 | beamline = Beamline(name="bl") 10 | beamline.det.noise.put(False) 11 | 12 | dofs = [ 13 | DOF(description="KBV downstream", device=beamline.kbv_dsv, search_domain=(-5.0, 5.0)), 14 | DOF(description="KBV upstream", device=beamline.kbv_usv, search_domain=(-5.0, 5.0)), 15 | DOF(description="KBH downstream", device=beamline.kbh_dsh, search_domain=(-5.0, 5.0)), 16 | DOF(description="KBH upstream", device=beamline.kbh_ush, search_domain=(-5.0, 5.0)), 17 | ] 18 | 19 | objectives = [ 20 | Objective(name="bl_det_sum", target="max", transform="log", trust_domain=(200, np.inf)), 21 | Objective(name="bl_det_wid_x", target="min", transform="log", latent_groups=[("bl_kbh_dsh", "bl_kbh_ush")]), 22 | Objective(name="bl_det_wid_y", target="min", transform="log", latent_groups=[("bl_kbv_dsv", "bl_kbv_usv")]), 23 | ] 24 | 25 | agent = Agent( 26 | dofs=dofs, 27 | objectives=objectives, 28 | detectors=[beamline.det], 29 | digestion=beam_stats_digestion, 30 | digestion_kwargs={"image_key": "bl_det_image"}, 31 | verbose=True, 32 | db=db, 33 | tolerate_acquisition_errors=False, 34 | enforce_all_objectives_valid=True, 35 | train_every=3, 36 | ) 37 | 38 | RE(agent.learn("qr", n=16)) 39 | RE(agent.learn("qei", n=4, iterations=4)) 40 | -------------------------------------------------------------------------------- /src/blop/utils/__init__.py: -------------------------------------------------------------------------------- 1 | import botorch # type: ignore[import-untyped] 2 | import numpy as np 3 | import scipy as sp # type: ignore[import-untyped] 4 | import torch 5 | from python_tsp.heuristics import solve_tsp_simulated_annealing # type: ignore[import-untyped] 6 | 7 | from . import functions # noqa 8 | 9 | 10 | def get_beam_stats(image: np.ndarray, threshold: float = 0.5) -> dict[str, float | np.ndarray]: 11 | ny, nx = image.shape 12 | 13 | fim = image.copy() 14 | fim -= np.median(fim, axis=0) 15 | fim -= np.median(fim, axis=1)[:, None] 16 | 17 | fim = sp.ndimage.median_filter(fim, size=3) 18 | fim = sp.ndimage.gaussian_filter(fim, sigma=1) 19 | 20 | m = fim > threshold * fim.max() 21 | area = m.sum() 22 | 23 | cs_x = np.cumsum(m.sum(axis=0)) / area 24 | cs_y = np.cumsum(m.sum(axis=1)) / area 25 | 26 | q_min, q_max = [0.15865, 0.84135] # one sigma 27 | q_min, q_max = [0.05, 0.95] # 90% 28 | 29 | x_min, x_max = np.interp([q_min, q_max], cs_x, np.arange(nx)) 30 | y_min, y_max = np.interp([q_min, q_max], cs_y, np.arange(ny)) 31 | 32 | stats = { 33 | "max": fim.max(), 34 | "sum": fim.sum(), 35 | "area": area, 36 | "cen_x": (x_min + x_max) / 2, 37 | "cen_y": (y_min + y_max) / 2, 38 | "wid_x": x_max - x_min, 39 | "wid_y": y_max - y_min, 40 | "x_min": x_min, 41 | "x_max": x_max, 42 | "y_min": y_min, 43 | "y_max": y_max, 44 | "bbox": [[x_min, x_max, x_max, x_min, x_min], [y_min, y_min, y_max, y_max, y_min]], 45 | } 46 | 47 | return stats 48 | 49 | 50 | def cummax(x: np.ndarray) -> list[float]: 51 | return [np.nanmax(x[: i + 1]) for i in range(len(np.atleast_1d(x)))] 52 | 53 | 54 | def sobol_sampler(bounds: torch.Tensor, n: int, q: int = 1) -> torch.Tensor: 55 | """ 56 | Returns $n$ quasi-randomly sampled points within the bounds (a 2 by d tensor) 57 | and batch size $q$ 58 | """ 59 | return botorch.utils.sampling.draw_sobol_samples(bounds, n=n, q=q) 60 | 61 | 62 | def normalized_sobol_sampler(n: int, d: int) -> torch.Tensor: 63 | """ 64 | Returns $n$ quasi-randomly sampled points in the [0,1]^d hypercube 65 | """ 66 | normalized_bounds = torch.outer(torch.tensor([0, 1]), torch.ones(d)) 67 | return sobol_sampler(normalized_bounds, n=n, q=1) 68 | 69 | 70 | def estimate_root_indices(x: np.ndarray) -> np.ndarray: 71 | # or, indices_before_sign_changes 72 | i_whole = np.where(np.sign(x[1:]) != np.sign(x[:-1]))[0] 73 | i_part = 1 - x[i_whole + 1] / (x[i_whole + 1] - x[i_whole]) 74 | return i_whole + i_part 75 | 76 | 77 | def _fast_psd_inverse(M: np.ndarray) -> np.ndarray: 78 | """ 79 | About twice as fast as np.linalg.inv for large, PSD matrices. 80 | """ 81 | cholesky, _ = sp.linalg.lapack.dpotrf(M) 82 | invM, _ = sp.linalg.lapack.dpotri(cholesky) 83 | return np.where(invM, invM, invM.T) 84 | 85 | 86 | def mprod(*M: np.ndarray) -> np.ndarray: 87 | res = M[0] 88 | for m in M[1:]: 89 | res = np.matmul(res, m) 90 | return res 91 | 92 | 93 | def route(start_point: np.ndarray, points: np.ndarray, dim_weights: float | np.ndarray = 1) -> np.ndarray: 94 | """ 95 | Returns the indices of the most efficient way to visit `points`, starting from `start_point`. 96 | """ 97 | 98 | total_points = np.concatenate( 99 | [start_point[None], points], axis=0 100 | ) # add the starting point, even though we won't go there 101 | points_dim_range = np.ptp(total_points, axis=0) 102 | dim_mask = points_dim_range > 0 103 | scaled_points = (total_points - total_points.min(axis=0)) * ( 104 | dim_weights / np.where(points_dim_range > 0, points_dim_range, 1) 105 | ) 106 | D = np.sqrt(np.square(scaled_points[:, None, :] - scaled_points[None, :, :]).sum(axis=-1)) 107 | D = (D / np.where(D > 0, D, np.inf).min()).astype(int) 108 | D[:, 0] = 0 # zero cost to return, since we don't care where we end up 109 | 110 | if dim_mask.sum() == 0: 111 | return np.arange(len(points)) 112 | 113 | permutation, _ = solve_tsp_simulated_annealing(D / np.where(D > 0, D, np.inf).min()) 114 | return np.array(permutation[1:]) - 1 # ignore the starting point since we're there already 115 | 116 | 117 | def get_movement_time(x: float | np.ndarray, v_max: float, a: float) -> float | np.ndarray: 118 | """ 119 | How long does it take an object to go distance $x$ with acceleration $a$ and maximum velocity $v_max$? 120 | That's what this function answers. 121 | """ 122 | return 2 * np.sqrt(np.abs(x) / a) * (np.abs(x) < v_max**2 / a) + (np.abs(x) / v_max + v_max / a) * ( 123 | np.abs(x) > v_max**2 / a 124 | ) 125 | -------------------------------------------------------------------------------- /src/blop/utils/functions.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | 4 | 5 | def approximate_erf(x): 6 | """ 7 | An approximation of erf(x), to compute the definite integral of the Gaussian PDF 8 | This is faster and better-conditioned near +/- infinity 9 | """ 10 | return torch.tanh(1.20278247 * x) 11 | 12 | 13 | def sigmoid(x): 14 | return 1 / (1 + np.exp(-x)) 15 | 16 | 17 | def inverse_sigmoid(x): 18 | return np.log(x / (1 - x)) 19 | 20 | 21 | def booth(x1, x2): 22 | """ 23 | The Booth function (https://en.wikipedia.org/wiki/Test_functions_for_optimization) 24 | """ 25 | return (x1 + 2 * x2 - 7) ** 2 + (2 * x1 + x2 - 5) ** 2 26 | 27 | 28 | def matyas(x1, x2): 29 | """ 30 | The Matyas function (https://en.wikipedia.org/wiki/Test_functions_for_optimization) 31 | """ 32 | return 0.26 * (x1**2 + x2**2) - 0.48 * x1 * x2 33 | 34 | 35 | def himmelblau(x1, x2): 36 | """ 37 | Himmelblau's function (https://en.wikipedia.org/wiki/Himmelblau%27s_function) 38 | """ 39 | return (x1**2 + x2 - 11) ** 2 + (x1 + x2**2 - 7) ** 2 40 | 41 | 42 | def constrained_himmelblau(x1, x2): 43 | """ 44 | Himmelblau's function, returns NaN outside the constraint 45 | """ 46 | return np.where(x1**2 + x2**2 < 50, himmelblau(x1, x2), np.nan) 47 | 48 | 49 | def binh_korn(x1, x2): 50 | """ 51 | Binh and Korn function 52 | """ 53 | f1 = 4 * x1**2 + 4 * x2**2 54 | f2 = (x1 - 5) ** 2 + (x2 - 5) ** 2 55 | g1 = (x1 - 5) ** 2 + x2**2 <= 25 56 | g2 = (x1 - 8) ** 2 + (x2 + 3) ** 2 >= 7.7 57 | 58 | c = g1 & g2 59 | 60 | return np.where(c, f1, np.nan), np.where(c, f2, np.nan) 61 | 62 | 63 | def skewed_himmelblau(x1, x2): 64 | """ 65 | Himmelblau's function, with skewed coordinates 66 | """ 67 | _x1 = 2 * x1 + x2 68 | _x2 = 0.5 * (x1 - 2 * x2) 69 | 70 | return constrained_himmelblau(_x1, _x2) 71 | 72 | 73 | def bukin(x1, x2): 74 | """ 75 | Bukin function N.6 (https://en.wikipedia.org/wiki/Test_functions_for_optimization) 76 | """ 77 | return 100 * np.sqrt(np.abs(x2 - 1e-2 * x1**2)) + 0.01 * np.abs(x1) 78 | 79 | 80 | def rastrigin(*x): 81 | """ 82 | The Rastrigin function in arbitrary dimensions (https://en.wikipedia.org/wiki/Rastrigin_function) 83 | """ 84 | X = np.c_[x] 85 | return 10 * X.shape[-1] + (X**2 - 10 * np.cos(2 * np.pi * X)).sum(axis=1) 86 | 87 | 88 | def styblinski_tang(*x): 89 | """ 90 | Styblinski-Tang function in arbitrary dimensions (https://en.wikipedia.org/wiki/Test_functions_for_optimization) 91 | """ 92 | X = np.c_[x] 93 | return 0.5 * (X**4 - 16 * X**2 + 5 * X).sum(axis=1) 94 | 95 | 96 | def ackley(*x): 97 | """ 98 | The Ackley function in arbitrary dimensions (https://en.wikipedia.org/wiki/Ackley_function) 99 | """ 100 | X = np.c_[x] 101 | return ( 102 | -20 * np.exp(-0.2 * np.sqrt(0.5 * (X**2).sum(axis=1))) - np.exp(0.5 * np.cos(2 * np.pi * X).sum(axis=1)) + np.e + 20 103 | ) 104 | 105 | 106 | def gaussian_beam_waist(x1, x2): 107 | """ 108 | Simulating a misaligned Gaussian beam. The optimum is at (1, 1) 109 | """ 110 | return np.sqrt(1 + 0.25 * (x1 - x2) ** 2 + 16 * (x1 + x2 - 2) ** 2) 111 | 112 | 113 | def hartmann4(*x): 114 | X = np.c_[x] 115 | 116 | alpha = np.array([1.0, 1.2, 3.0, 3.2]) 117 | 118 | A = np.array([[10, 3, 17, 3.5], [0.05, 10, 17, 0.1], [3, 3.5, 1.7, 10], [17, 8, 0.05, 10]]) 119 | 120 | P = 1e-4 * np.array( 121 | [ 122 | [1312, 1696, 5569, 124], 123 | [2329, 4135, 8307, 3736], 124 | [2348, 1451, 3522, 2883], 125 | [4047, 8828, 8732, 5743], 126 | ] 127 | ) 128 | 129 | return -(alpha * np.exp(-(A * np.square(X - P)).sum(axis=1))).sum() 130 | 131 | 132 | def hartmann6(*x): 133 | X = np.c_[x] 134 | 135 | alpha = np.array([1.0, 1.2, 3.0, 3.2]) 136 | 137 | A = np.array( 138 | [[10, 3, 17, 3.5, 1.7, 8], [0.05, 10, 17, 0.1, 8, 14], [3, 3.5, 1.7, 10, 17, 8], [17, 8, 0.05, 10, 0.1, 14]] 139 | ) 140 | 141 | P = 1e-4 * np.array( 142 | [ 143 | [1312, 1696, 5569, 124, 8283, 5886], 144 | [2329, 4135, 8307, 3736, 1004, 9991], 145 | [2348, 1451, 3522, 2883, 3047, 6650], 146 | [4047, 8828, 8732, 5743, 1091, 381], 147 | ] 148 | ) 149 | 150 | return -(alpha * np.exp(-(A * np.square(X - P)).sum(axis=1))).sum() 151 | 152 | 153 | def kb_tradeoff_2d(x1, x2): 154 | width = np.sqrt(1 + 0.25 * (x1 - x2) ** 2 + 16 * (x1 + x2 - 2) ** 2) 155 | d = np.sqrt(x1**2 + x2**2) 156 | flux = np.exp(-0.5 * np.where(d < 5, np.where(d > -5, 0, d + 5), d - 5) ** 2) 157 | 158 | return width, flux 159 | 160 | 161 | def kb_tradeoff_4d(x1, x2, x3, x4): 162 | x_width = np.sqrt(1 + 0.25 * (x1 - x2) ** 2 + 16 * (x1 + x2 - 2) ** 2) 163 | y_width = np.sqrt(1 + 0.25 * (x3 - x4) ** 2 + 16 * (x3 + x4 - 2) ** 2) 164 | d = np.sqrt(x1**2 + x2**2 + x3**2 + x4**2) 165 | flux = np.exp(-0.5 * np.where(d < 5, np.where(d > -5, 0, d + 5), d - 5) ** 2) 166 | 167 | return x_width, y_width, flux 168 | -------------------------------------------------------------------------------- /src/blop/utils/prepare_re_env.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import datetime 3 | import json # noqa F401 4 | 5 | import bluesky.plan_stubs as bps # noqa F401 6 | import bluesky.plans as bp # noqa F401 7 | import databroker # type: ignore[import-untyped] 8 | import matplotlib.pyplot as plt 9 | import numpy as np # noqa F401 10 | from bluesky.callbacks import best_effort 11 | from bluesky.run_engine import RunEngine 12 | from databroker import Broker 13 | from ophyd.utils import make_dir_tree # type: ignore[import-untyped] 14 | 15 | from blop.sim import HDF5Handler 16 | 17 | DEFAULT_DB_TYPE = "local" 18 | DEFAULT_ROOT_DIR = "/tmp/sirepo-bluesky-data" 19 | DEFAULT_ENV_TYPE = "stepper" 20 | DEFAULT_USE_SIREPO = False 21 | 22 | 23 | def re_env(db_type=DEFAULT_DB_TYPE, root_dir=DEFAULT_ROOT_DIR): 24 | RE = RunEngine({}) 25 | bec = best_effort.BestEffortCallback() 26 | RE.subscribe(bec) 27 | 28 | db = Broker.named(db_type) 29 | db.reg.register_handler("HDF5", HDF5Handler, overwrite=True) 30 | try: 31 | databroker.assets.utils.install_sentinels(db.reg.config, version=1) 32 | except Exception: 33 | pass 34 | RE.subscribe(db.insert) 35 | 36 | _ = make_dir_tree(datetime.datetime.now().year, base_path=root_dir) 37 | 38 | return { 39 | "RE": RE, 40 | "db": db, 41 | "bec": bec, 42 | } 43 | 44 | 45 | def register_handlers(db, handlers): 46 | for handler_spec, handler_class in handlers.items(): 47 | db.reg.register_handler(handler_spec, handler_class, overwrite=True) 48 | 49 | 50 | if __name__ == "__main__": 51 | parser = argparse.ArgumentParser(description="Prepare bluesky environment") 52 | parser.add_argument( 53 | "-d", 54 | "--db-type", 55 | dest="db_type", 56 | default=DEFAULT_DB_TYPE, 57 | help="Type of databroker ('local', 'temp', etc.)", 58 | ) 59 | parser.add_argument( 60 | "-r", 61 | "--root-dir", 62 | dest="root_dir", 63 | default=DEFAULT_ROOT_DIR, 64 | help="The root dir to create YYYY/MM/DD dir structure.", 65 | ) 66 | 67 | parser.add_argument( 68 | "-s", 69 | "--use-sirepo", 70 | dest="use_sirepo", 71 | default=DEFAULT_USE_SIREPO, 72 | help="The root dir to create YYYY/MM/DD dir structure.", 73 | ) 74 | 75 | env_choices = ["stepper", "flyer"] 76 | parser.add_argument( 77 | "-e", 78 | "--env-type", 79 | dest="env_type", 80 | choices=env_choices, 81 | default=DEFAULT_ENV_TYPE, 82 | help="Type of RE environment.", 83 | ) 84 | 85 | parser.add_argument( 86 | "-f", 87 | "--file", 88 | dest="file", 89 | default="", 90 | ) 91 | 92 | args = parser.parse_args() 93 | kwargs_re = {"db_type": args.db_type, "root_dir": args.root_dir} 94 | ret = re_env(**kwargs_re) 95 | globals().update(**ret) 96 | bec = ret["bec"] 97 | db = ret["db"] 98 | 99 | if args.use_sirepo: 100 | from sirepo_bluesky.srw_handler import SRWFileHandler # type: ignore[import-untyped] 101 | 102 | if args.env_type == "stepper": 103 | from sirepo_bluesky.shadow_handler import ShadowFileHandler # type: ignore[import-untyped] 104 | 105 | handlers = {"srw": SRWFileHandler, "SIREPO_FLYER": SRWFileHandler, "shadow": ShadowFileHandler} 106 | plt.ion() 107 | elif args.env_type == "flyer": 108 | from sirepo_bluesky.madx_handler import MADXFileHandler # type: ignore[import-untyped] 109 | 110 | handlers = {"srw": SRWFileHandler, "SIREPO_FLYER": SRWFileHandler, "madx": MADXFileHandler} 111 | bec.disable_plots() # noqa: F821 112 | else: 113 | raise RuntimeError(f"Unknown environment type: {args.env_type}.\nAvailable environment types: {env_choices}") 114 | 115 | register_handlers(db, handlers) # noqa: F821 116 | -------------------------------------------------------------------------------- /src/blop/utils/sets.py: -------------------------------------------------------------------------------- 1 | from typing import cast 2 | 3 | import numpy as np 4 | 5 | 6 | def validate_set(s, type="continuous") -> set | tuple[float, float]: 7 | """ 8 | Check 9 | """ 10 | if type == "continuous": 11 | s = cast(tuple[float, float], s) 12 | if len(s) == 2: 13 | try: 14 | x1, x2 = float(s[0]), float(s[1]) 15 | if x1 <= x2: 16 | return cast(tuple[float, float], (x1, x2)) 17 | except Exception: 18 | pass 19 | raise ValueError( 20 | f"Invalid continuous set {s}; valid continuous sets it must be a tuple of two numbers x1, x2 such that x2 >= x1" 21 | ) 22 | else: 23 | return cast(set, s) 24 | 25 | 26 | def element_of(x, s, type: str = "continuous") -> bool: 27 | """ 28 | Check if x is an element of s. 29 | """ 30 | valid_set = validate_set(s, type=type) 31 | if type == "continuous": 32 | valid_set = cast(tuple[float, float], valid_set) 33 | return (x >= valid_set[0]) & (x <= valid_set[1]) 34 | else: 35 | return np.isin(list(x), list(cast(set, valid_set))) 36 | 37 | 38 | def is_subset(s1, s2, type: str = "continuous", proper: bool = False) -> bool: 39 | """ 40 | Check if the set x1 is a subset of x2. 41 | """ 42 | validate_set(s1, type=type) 43 | validate_set(s2, type=type) 44 | if type == "continuous": 45 | s1 = cast(tuple[float, float], s1) 46 | s2 = cast(tuple[float, float], s2) 47 | if proper: 48 | if (s1[0] > s2[0]) and (s1[1] < s2[1]): 49 | return True 50 | else: 51 | if (s1[0] >= s2[0]) and (s1[1] <= s2[1]): 52 | return True 53 | return False 54 | else: 55 | s1 = cast(set, s1) 56 | s2 = cast(set, s2) 57 | return np.isin(list(s1), list(s2)).all() 58 | 59 | 60 | def union(s1, s2, type: str = "continuous") -> tuple: 61 | """ 62 | Compute the union of sets x1 and x2. 63 | """ 64 | validate_set(s1, type=type) 65 | validate_set(s2, type=type) 66 | if type == "continuous": 67 | s1 = cast(tuple[float, float], s1) 68 | s2 = cast(tuple[float, float], s2) 69 | new_min, new_max = min(s1[0], s2[0]), max(s1[1], s2[1]) 70 | if new_min <= new_max: 71 | return (new_min, new_max) 72 | return None 73 | else: 74 | s1 = cast(set, s1) 75 | s2 = cast(set, s2) 76 | return s1 | s2 77 | 78 | 79 | def intersection(s1, s2, type: str = "continuous") -> tuple: 80 | """ 81 | Compute the intersection of sets x1 and x2. 82 | """ 83 | validate_set(s1, type=type) 84 | validate_set(s2, type=type) 85 | if type == "continuous": 86 | s1 = cast(tuple[float, float], s1) 87 | s2 = cast(tuple[float, float], s2) 88 | new_min, new_max = max(s1[0], s2[0]), min(s1[1], s2[1]) 89 | if new_min <= new_max: 90 | return (new_min, new_max) 91 | return None 92 | else: 93 | s1 = cast(set, s1) 94 | s2 = cast(set, s2) 95 | return s1 & s2 96 | --------------------------------------------------------------------------------