├── optimas ├── generators │ ├── ax │ │ ├── __init__.py │ │ ├── developer │ │ │ ├── __init__.py │ │ │ └── ax_metric.py │ │ ├── service │ │ │ └── __init__.py │ │ ├── import_error_dummy_generator.py │ │ └── base.py │ ├── external.py │ ├── __init__.py │ ├── grid_sampling.py │ ├── random_sampling.py │ └── line_sampling.py ├── __init__.py ├── utils │ ├── __init__.py │ ├── ax │ │ ├── __init__.py │ │ └── other.py │ ├── logger.py │ └── other.py ├── explorations │ └── __init__.py ├── diagnostics │ └── __init__.py ├── evaluators │ ├── __init__.py │ ├── function_evaluator.py │ ├── base.py │ ├── multitask_evaluator.py │ └── chain_evaluator.py └── core │ ├── __init__.py │ ├── task.py │ └── evaluation.py ├── setup.py ├── tests ├── resources │ ├── env_script.sh │ ├── template_simulation_script_2.py │ ├── template_simulation_script_moo.py │ └── template_simulation_script.py ├── test_exploration_run_exception.py ├── test_comms.py ├── test_env_script.py ├── test_grid_sampling.py ├── test_grid_sampling_mpi.py ├── test_line_sampling.py ├── test_analyzed_parameters.py ├── test_chain_evaluator.py ├── test_random_sampling.py └── test_template_evaluator.py ├── doc ├── source │ ├── _static │ │ ├── logo.png │ │ └── favicon_128x128.png │ ├── _templates │ │ └── autosummary │ │ │ ├── base.rst │ │ │ └── class.rst │ ├── api │ │ ├── utils.rst │ │ ├── exploration.rst │ │ ├── diagnostics.rst │ │ ├── evaluators.rst │ │ ├── core.rst │ │ ├── index.rst │ │ └── generators.rst │ ├── examples │ │ ├── index.rst │ │ ├── bo_basic.rst │ │ ├── bo_with_hipace.rst │ │ ├── bo_with_warpx.rst │ │ ├── bo_multitask_fbpic_waket.rst │ │ ├── ps_grid_sampling.rst │ │ ├── bo_with_fbpic.rst │ │ ├── bo_with_astra.rst │ │ ├── ps_line_sampling.rst │ │ └── ps_random_sampling.rst │ ├── user_guide │ │ ├── index.rst │ │ ├── citation.rst │ │ ├── installation_local.rst │ │ ├── installation_perlmutter.rst │ │ ├── dependencies.rst │ │ ├── installation_maxwell.rst │ │ ├── installation_juwels.rst │ │ └── basic_usage │ │ │ ├── analyze_output.rst │ │ │ └── exploration_diagnostics.ipynb │ └── index.rst ├── environment.yaml ├── Makefile └── make.bat ├── .gitignore ├── examples ├── wake_t │ ├── analysis_script.py │ ├── template_simulation_script.py │ └── run_example.py ├── wake_t_fbpic_mt │ ├── analysis_script.py │ └── run_example.py ├── dummy │ ├── template_simulation_script.py │ └── run_example.py ├── dummy_random │ ├── template_simulation_script.py │ └── run_example.py ├── dummy_grid_sampling │ ├── template_simulation_script.py │ └── run_example.py ├── dummy_line_sampling │ ├── template_simulation_script.py │ └── run_example.py ├── dummy_mf │ ├── template_simulation_script.py │ └── run_example.py ├── dummy_mt │ ├── template_simulation_script.py │ └── run_example.py ├── astra │ ├── ASTRA_example.in │ ├── analysis_script.py │ └── run_optimization_serial_ASTRA.py ├── hipace │ ├── template_simulation_script │ ├── run_example.py │ └── analysis_script.py ├── multi_stage │ ├── run_example.py │ └── analysis_script.py ├── ionization_injection │ ├── run_example.py │ └── analysis_script.py ├── ionization_injection_mf │ ├── run_example.py │ └── analysis_script.py └── multitask_lpa_fbpic_waket │ ├── run_opt.py │ ├── analysis_script.py │ └── bunch_utils.py ├── .readthedocs.yaml ├── legal.txt ├── .github └── workflows │ ├── publish-to-pypi.yml │ ├── unix.yml │ ├── unix-openmpi.yml │ └── unix-noax.yml ├── pyproject.toml ├── .zenodo.json ├── license.txt └── .pre-commit-config.yaml /optimas/generators/ax/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /optimas/generators/ax/developer/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /optimas/generators/ax/service/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /optimas/__init__.py: -------------------------------------------------------------------------------- 1 | __version__ = "0.9.0" 2 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | 3 | setup() 4 | -------------------------------------------------------------------------------- /optimas/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .ax import AxModelManager 2 | 3 | __all__ = ["AxModelManager"] 4 | -------------------------------------------------------------------------------- /tests/resources/env_script.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export LIBE_TEST_SUB_ENV_VAR="testvalue" 3 | -------------------------------------------------------------------------------- /optimas/explorations/__init__.py: -------------------------------------------------------------------------------- 1 | from .base import Exploration 2 | 3 | 4 | __all__ = ["Exploration"] 5 | -------------------------------------------------------------------------------- /doc/source/_static/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/optimas-org/optimas/HEAD/doc/source/_static/logo.png -------------------------------------------------------------------------------- /optimas/utils/ax/__init__.py: -------------------------------------------------------------------------------- 1 | from .ax_model_manager import AxModelManager 2 | 3 | __all__ = ["AxModelManager"] 4 | -------------------------------------------------------------------------------- /doc/source/_static/favicon_128x128.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/optimas-org/optimas/HEAD/doc/source/_static/favicon_128x128.png -------------------------------------------------------------------------------- /optimas/diagnostics/__init__.py: -------------------------------------------------------------------------------- 1 | from .exploration_diagnostics import ExplorationDiagnostics 2 | 3 | __all__ = ["ExplorationDiagnostics"] 4 | -------------------------------------------------------------------------------- /doc/source/_templates/autosummary/base.rst: -------------------------------------------------------------------------------- 1 | {{ name | escape | underline}} 2 | 3 | .. currentmodule:: {{ module }} 4 | 5 | .. auto{{ objtype }}:: {{ objname }} 6 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Python cache 2 | __pycache__ 3 | 4 | # IDE and development directories 5 | .venv 6 | .vscode 7 | 8 | # Installation folders 9 | build 10 | optimas.egg-info 11 | -------------------------------------------------------------------------------- /doc/source/api/utils.rst: -------------------------------------------------------------------------------- 1 | Utilities 2 | ========= 3 | 4 | .. currentmodule:: optimas.utils 5 | 6 | .. autosummary:: 7 | :toctree: _autosummary 8 | 9 | AxModelManager 10 | -------------------------------------------------------------------------------- /doc/source/api/exploration.rst: -------------------------------------------------------------------------------- 1 | Explorations 2 | ============ 3 | 4 | .. currentmodule:: optimas.explorations 5 | 6 | .. autosummary:: 7 | :toctree: _autosummary 8 | 9 | Exploration 10 | -------------------------------------------------------------------------------- /doc/source/api/diagnostics.rst: -------------------------------------------------------------------------------- 1 | Diagnostics 2 | =========== 3 | 4 | .. currentmodule:: optimas.diagnostics 5 | 6 | .. autosummary:: 7 | :toctree: _autosummary 8 | 9 | ExplorationDiagnostics 10 | -------------------------------------------------------------------------------- /doc/source/api/evaluators.rst: -------------------------------------------------------------------------------- 1 | Evaluators 2 | ========== 3 | 4 | .. currentmodule:: optimas.evaluators 5 | 6 | .. autosummary:: 7 | :toctree: _autosummary 8 | 9 | FunctionEvaluator 10 | TemplateEvaluator 11 | MultitaskEvaluator 12 | ChainEvaluator 13 | -------------------------------------------------------------------------------- /doc/source/api/core.rst: -------------------------------------------------------------------------------- 1 | Core 2 | ==== 3 | 4 | .. currentmodule:: optimas.core 5 | 6 | .. autosummary:: 7 | :toctree: _autosummary 8 | 9 | Parameter 10 | VaryingParameter 11 | TrialParameter 12 | Objective 13 | Trial 14 | Evaluation 15 | Task 16 | -------------------------------------------------------------------------------- /doc/source/api/index.rst: -------------------------------------------------------------------------------- 1 | API reference 2 | ============= 3 | 4 | This reference manual details all classes included in optimas. 5 | 6 | .. toctree:: 7 | :maxdepth: 2 8 | 9 | core 10 | generators 11 | evaluators 12 | exploration 13 | diagnostics 14 | utils 15 | -------------------------------------------------------------------------------- /examples/wake_t/analysis_script.py: -------------------------------------------------------------------------------- 1 | """Defines the analysis function that runs after the simulation.""" 2 | 3 | import numpy as np 4 | 5 | 6 | def analyze_simulation(simulation_directory, output_params): 7 | """Read the simulation result and give it back to optimas.""" 8 | a_x_abs = np.load("a_x_abs.npy") 9 | output_params["f"] = a_x_abs 10 | 11 | return output_params 12 | -------------------------------------------------------------------------------- /optimas/evaluators/__init__.py: -------------------------------------------------------------------------------- 1 | from .function_evaluator import FunctionEvaluator 2 | from .template_evaluator import TemplateEvaluator 3 | from .multitask_evaluator import MultitaskEvaluator 4 | from .chain_evaluator import ChainEvaluator 5 | 6 | 7 | __all__ = [ 8 | "FunctionEvaluator", 9 | "TemplateEvaluator", 10 | "MultitaskEvaluator", 11 | "ChainEvaluator", 12 | ] 13 | -------------------------------------------------------------------------------- /examples/wake_t_fbpic_mt/analysis_script.py: -------------------------------------------------------------------------------- 1 | """Defines the analysis function that runs after the simulation.""" 2 | 3 | import numpy as np 4 | 5 | 6 | def analyze_simulation(simulation_directory, output_params): 7 | """Read the simulation result and give it back to optimas.""" 8 | a_x_abs = np.loadtxt("a_x_abs.txt") 9 | output_params["f"] = a_x_abs 10 | 11 | return output_params 12 | -------------------------------------------------------------------------------- /doc/source/api/generators.rst: -------------------------------------------------------------------------------- 1 | .. _generators: 2 | 3 | Generators 4 | ========== 5 | 6 | .. currentmodule:: optimas.generators 7 | 8 | .. autosummary:: 9 | :toctree: _autosummary 10 | 11 | GridSamplingGenerator 12 | LineSamplingGenerator 13 | RandomSamplingGenerator 14 | AxSingleFidelityGenerator 15 | AxMultiFidelityGenerator 16 | AxMultitaskGenerator 17 | AxClientGenerator 18 | -------------------------------------------------------------------------------- /optimas/core/__init__.py: -------------------------------------------------------------------------------- 1 | from .evaluation import Evaluation 2 | from .parameter import Parameter, VaryingParameter, TrialParameter, Objective 3 | from .task import Task 4 | from .trial import Trial, TrialStatus 5 | 6 | 7 | __all__ = [ 8 | "Evaluation", 9 | "Parameter", 10 | "VaryingParameter", 11 | "TrialParameter", 12 | "Objective", 13 | "Task", 14 | "Trial", 15 | "TrialStatus", 16 | ] 17 | -------------------------------------------------------------------------------- /tests/resources/template_simulation_script_2.py: -------------------------------------------------------------------------------- 1 | """Dummy simulation template used for testing a ChainedEvaluator. 2 | 3 | The template takes the result from a previous evaluation and uses 4 | it to perform a followup evaluation. 5 | """ 6 | 7 | with open("result.txt", "r") as f: 8 | result_1 = float(f.read()) 9 | 10 | result_2 = 2 * result_1 11 | 12 | with open("result_2.txt", "w") as f: 13 | f.write("%f" % result_2) 14 | -------------------------------------------------------------------------------- /doc/environment.yaml: -------------------------------------------------------------------------------- 1 | name: readthedocs 2 | dependencies: 3 | - python=3.11 4 | - mpi4py 5 | - pip 6 | - pip: 7 | - -e .. 8 | - ax-platform >= 0.5.0, < 1.0.0 9 | - autodoc_pydantic >= 2.0.1 10 | - ipykernel 11 | - matplotlib 12 | - nbsphinx 13 | - numpydoc 14 | - git+https://github.com/campa-consortium/gest-api.git 15 | - pydata-sphinx-theme 16 | - sphinx-copybutton 17 | - sphinx-design 18 | - sphinx-gallery 19 | -------------------------------------------------------------------------------- /doc/source/examples/index.rst: -------------------------------------------------------------------------------- 1 | .. _examples: 2 | 3 | Examples 4 | ======== 5 | 6 | .. toctree:: 7 | :caption: Parameter sampling 8 | :maxdepth: 1 9 | 10 | ps_random_sampling 11 | ps_grid_sampling 12 | ps_line_sampling 13 | 14 | 15 | .. toctree:: 16 | :caption: Bayesian optimization 17 | :maxdepth: 1 18 | 19 | bo_basic 20 | bo_with_fbpic 21 | bo_with_hipace 22 | bo_multitask_fbpic_waket 23 | bo_with_warpx 24 | bo_with_astra 25 | -------------------------------------------------------------------------------- /examples/dummy/template_simulation_script.py: -------------------------------------------------------------------------------- 1 | """Simple template script used for demonstration. 2 | 3 | The script evaluates an analytical expression and stores the results in a 4 | `result.txt` file that is later read by the analysis function. 5 | """ 6 | 7 | import numpy as np 8 | 9 | # 2D function with multiple minima 10 | result = -({{x0}} + 10 * np.cos({{x0}})) * ({{x1}} + 5 * np.cos({{x1}})) 11 | 12 | with open("result.txt", "w") as f: 13 | f.write("%f" % result) 14 | -------------------------------------------------------------------------------- /examples/dummy_random/template_simulation_script.py: -------------------------------------------------------------------------------- 1 | """Simple template script used for demonstration. 2 | 3 | The script evaluates an analytical expression and stores the results in a 4 | `result.txt` file that is later read by the analysis function. 5 | """ 6 | 7 | import numpy as np 8 | 9 | # 2D function with multiple minima 10 | result = -({{x0}} + 10 * np.cos({{x0}})) * ({{x1}} + 5 * np.cos({{x1}})) 11 | 12 | with open("result.txt", "w") as f: 13 | f.write("%f" % result) 14 | -------------------------------------------------------------------------------- /examples/dummy_grid_sampling/template_simulation_script.py: -------------------------------------------------------------------------------- 1 | """Simple template script used for demonstration. 2 | 3 | The script evaluates an analytical expression and stores the results in a 4 | `result.txt` file that is later read by the analysis function. 5 | """ 6 | 7 | import numpy as np 8 | 9 | # 2D function with multiple minima 10 | result = -({{x0}} + 10 * np.cos({{x0}})) * ({{x1}} + 5 * np.cos({{x1}})) 11 | 12 | with open("result.txt", "w") as f: 13 | f.write("%f" % result) 14 | -------------------------------------------------------------------------------- /examples/dummy_line_sampling/template_simulation_script.py: -------------------------------------------------------------------------------- 1 | """Simple template script used for demonstration. 2 | 3 | The script evaluates an analytical expression and stores the results in a 4 | `result.txt` file that is later read by the analysis function. 5 | """ 6 | 7 | import numpy as np 8 | 9 | # 2D function with multiple minima 10 | result = -({{x0}} + 10 * np.cos({{x0}})) * ({{x1}} + 5 * np.cos({{x1}})) 11 | 12 | with open("result.txt", "w") as f: 13 | f.write("%f" % result) 14 | -------------------------------------------------------------------------------- /examples/dummy_mf/template_simulation_script.py: -------------------------------------------------------------------------------- 1 | """Simple template script used for demonstration. 2 | 3 | The script evaluates an analytical expression and stores the results in a 4 | `result.txt` file that is later read by the analysis function. 5 | """ 6 | 7 | import numpy as np 8 | 9 | # 2D function with multiple minima 10 | result = -( 11 | ({{x0}} + 10 * np.cos({{x0}} + 0.1 * {{resolution}})) 12 | * ({{x1}} + 5 * np.cos({{x1}} - 0.2 * {{resolution}})) 13 | ) 14 | 15 | with open("result.txt", "w") as f: 16 | f.write("%f" % result) 17 | -------------------------------------------------------------------------------- /tests/resources/template_simulation_script_moo.py: -------------------------------------------------------------------------------- 1 | """Dummy simulation template used for MOO testing. 2 | 3 | The template takes two parameters x0 and x1 as input 4 | and stores the result in `f1.txt` and `f2.txt`. 5 | """ 6 | 7 | import numpy as np 8 | 9 | 10 | # 2D function with multiple minima 11 | f1 = -({{x0}} + 10 * np.cos({{x0}})) * ({{x1}} + 5 * np.cos({{x1}})) 12 | f2 = -({{x0}} + 10 * np.cos({{x0}} + 10)) * ({{x1}} + 5 * np.cos({{x1}} - 5)) 13 | 14 | with open("f1.txt", "w") as f: 15 | f.write("%f" % f1) 16 | 17 | with open("f2.txt", "w") as f: 18 | f.write("%f" % f2) 19 | -------------------------------------------------------------------------------- /.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | # .readthedocs.yaml 2 | # Read the Docs configuration file 3 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details 4 | 5 | # Required 6 | version: 2 7 | 8 | # Set the version of Python and other tools you might need 9 | build: 10 | os: "ubuntu-22.04" 11 | tools: 12 | python: "mambaforge-22.9" 13 | 14 | conda: 15 | environment: doc/environment.yaml 16 | 17 | # Build documentation in the docs/ directory with Sphinx 18 | sphinx: 19 | configuration: doc/source/conf.py 20 | fail_on_warning: true 21 | 22 | # Optionally build your docs in additional formats such as PDF 23 | formats: 24 | - pdf 25 | -------------------------------------------------------------------------------- /optimas/generators/ax/import_error_dummy_generator.py: -------------------------------------------------------------------------------- 1 | """Contains the definition of dummy generator that raises an import error.""" 2 | 3 | 4 | class AxImportErrorDummyGenerator(object): 5 | """Class that raises an error when instantiated, telling the user to install ax-platform. 6 | 7 | This class replaces all other Ax-based classes, 8 | when Ax is not installed 9 | """ 10 | 11 | def __init__(self, *args, **kwargs) -> None: 12 | raise RuntimeError( 13 | "You need to install ax-platform, in order " 14 | "to use Ax-based generators in optimas.\n" 15 | "e.g. with `pip install 'ax-platform<1.0.0'`" 16 | ) 17 | -------------------------------------------------------------------------------- /doc/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = source 9 | BUILDDIR = build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | -------------------------------------------------------------------------------- /examples/dummy_mt/template_simulation_script.py: -------------------------------------------------------------------------------- 1 | """Simple template script used for demonstration. 2 | 3 | The script evaluates an analytical expression and stores the results in a 4 | `result.txt` file that is later read by the analysis function. 5 | """ 6 | 7 | import numpy as np 8 | import time 9 | 10 | task = {{task}} 11 | 12 | if task == "expensive_model": 13 | resolution = 3 14 | elif task == "cheap_model": 15 | resolution = 1 16 | 17 | # 2D function with multiple minima 18 | result = -( 19 | ({{x0}} + 10 * np.cos({{x0}} + 0.1 * resolution)) 20 | * ({{x1}} + 5 * np.cos({{x1}} - 0.2 * resolution)) 21 | ) 22 | time.sleep(resolution) 23 | 24 | with open("result.txt", "w") as f: 25 | f.write("%f" % result) 26 | -------------------------------------------------------------------------------- /optimas/generators/external.py: -------------------------------------------------------------------------------- 1 | """Contains the definition of an external generator.""" 2 | 3 | from .base import Generator 4 | 5 | 6 | class ExternalGenerator(Generator): 7 | """Supports a generator in the CAMPA generator standard.""" 8 | 9 | def __init__( 10 | self, 11 | ext_gen, 12 | **kwargs, 13 | ): 14 | super().__init__( 15 | **kwargs, 16 | ) 17 | self.gen = ext_gen 18 | 19 | def suggest(self, n_trials): 20 | """Request the next set of points to evaluate.""" 21 | return self.gen.suggest(n_trials) 22 | 23 | def ingest(self, trials): 24 | """Send the results of evaluations to the generator.""" 25 | self.gen.ingest(trials) 26 | -------------------------------------------------------------------------------- /doc/source/user_guide/index.rst: -------------------------------------------------------------------------------- 1 | .. _user-guide: 2 | 3 | User guide 4 | ========== 5 | 6 | .. toctree:: 7 | :maxdepth: 1 8 | :caption: Installation 9 | 10 | dependencies 11 | installation_local 12 | installation_maxwell 13 | installation_juwels 14 | installation_perlmutter 15 | 16 | .. toctree:: 17 | :maxdepth: 2 18 | :caption: Basic usage 19 | 20 | basic_usage/basic_setup 21 | basic_usage/running_with_simulations 22 | basic_usage/analyze_output 23 | basic_usage/exploration_diagnostics 24 | 25 | .. toctree:: 26 | :maxdepth: 2 27 | :caption: Advanced usage 28 | 29 | advanced_usage/build_gp_surrogates 30 | 31 | .. toctree:: 32 | :maxdepth: 1 33 | :caption: Citation 34 | 35 | citation 36 | -------------------------------------------------------------------------------- /optimas/core/task.py: -------------------------------------------------------------------------------- 1 | """Contains the definition of the Task class for multitask optimization.""" 2 | 3 | from pydantic import BaseModel 4 | 5 | 6 | class Task(BaseModel): 7 | """Defines a task to be used in multitask optimization. 8 | 9 | Parameters 10 | ---------- 11 | name : str 12 | Name of the task. 13 | n_init : int 14 | Number of task evaluations to perform in the initialization batch. 15 | n_opt : int 16 | Number of task evaluations to perform per optimization batch. 17 | 18 | """ 19 | 20 | name: str 21 | n_init: int 22 | n_opt: int 23 | 24 | def __init__(self, name: str, n_init: int, n_opt: int) -> None: 25 | super().__init__(name=name, n_init=n_init, n_opt=n_opt) 26 | -------------------------------------------------------------------------------- /examples/astra/ASTRA_example.in: -------------------------------------------------------------------------------- 1 | &NEWRUN 2 | Head=' Example of ASTRA users manual' 3 | RUN=1 4 | Distribution = 'Example.ini', Xoff=0.0, Yoff=0.0, 5 | TRACK_ALL=T, Auto_phase=F 6 | 7 | H_max=0.001, H_min=0.00 8 | / 9 | 10 | 11 | &OUTPUT 12 | ZSTART=0.0, ZSTOP=2.5 13 | Zemit=500, Zphase=1 14 | RefS=T 15 | EmitS=T, PhaseS=T 16 | / 17 | 18 | 19 | &CHARGE 20 | LSPCH=F 21 | Nrad=10, Cell_var=2.0, Nlong_in=10 22 | min_grid=0.0 23 | Max_Scale=0.05 24 | / 25 | 26 | 27 | &CAVITY 28 | LEField=T, 29 | File_Efield(1)='3_cell_L-Band.dat', C_pos(1)=0.3 30 | Nue(1)=1.3, MaxE(1)=40.0, Phi(1)={{RF_phase}}, 31 | / 32 | 33 | &SOLENOID 34 | LBField=T, 35 | File_Bfield(1)='Solenoid.dat', S_pos(1)=1.2 36 | MaxB(1)={{B_sol}}, S_smooth(1)=10 37 | / 38 | -------------------------------------------------------------------------------- /doc/source/_templates/autosummary/class.rst: -------------------------------------------------------------------------------- 1 | {{ objname | escape | underline}} 2 | 3 | .. currentmodule:: {{ module }} 4 | 5 | .. autoclass:: {{ objname }} 6 | 7 | {% block methods %} 8 | 9 | {% if methods %} 10 | .. rubric:: {{ _('Methods') }} 11 | 12 | .. autosummary:: 13 | :toctree: 14 | {% for item in methods %} 15 | {% if not item in skipmethods %} 16 | ~{{ name }}.{{ item }} 17 | {% endif %} 18 | {%- endfor %} 19 | {% endif %} 20 | {% endblock %} 21 | 22 | {% block attributes %} 23 | {% if attributes %} 24 | .. rubric:: {{ _('Attributes') }} 25 | 26 | .. autosummary:: 27 | :toctree: 28 | {% for item in attributes %} 29 | {% if item.0 != item.upper().0 %} 30 | ~{{ name }}.{{ item }} 31 | {% endif %} 32 | {%- endfor %} 33 | {% endif %} 34 | {% endblock %} 35 | -------------------------------------------------------------------------------- /legal.txt: -------------------------------------------------------------------------------- 1 | optimas Copyright (c) 2023, The Regents of the University of California, 2 | through Lawrence Berkeley National Laboratory (subject to receipt of 3 | any required approvals from the U.S. Dept. of Energy) and Deutsches 4 | Elektronen-Synchrotron . All rights reserved. 5 | 6 | If you have questions about your rights to use or distribute this software, 7 | please contact Berkeley Lab's Intellectual Property Office at 8 | IPO@lbl.gov. 9 | 10 | NOTICE. This Software was developed under funding from the U.S. Department 11 | of Energy and the U.S. Government consequently retains certain rights. As 12 | such, the U.S. Government has been granted for itself and others acting on 13 | its behalf a paid-up, nonexclusive, irrevocable, worldwide license in the 14 | Software to reproduce, distribute copies to the public, prepare derivative 15 | works, and perform publicly and display publicly, and to permit others to do so. 16 | -------------------------------------------------------------------------------- /tests/resources/template_simulation_script.py: -------------------------------------------------------------------------------- 1 | """Dummy simulation template used for testing. 2 | 3 | The template takes two parameters x0 and x1 as input 4 | and stores the result in `result.txt`. 5 | """ 6 | 7 | import os 8 | import time 9 | import numpy as np 10 | 11 | test_env_var = os.getenv("LIBE_TEST_SUB_ENV_VAR") 12 | sleep = os.getenv("OPTIMAS_TEST_SLEEP") 13 | cuda_visible_devices = os.getenv("CUDA_VISIBLE_DEVICES") 14 | 15 | if sleep is not None: 16 | time.sleep(float(sleep)) 17 | 18 | # 2D function with multiple minima 19 | result = -({{x0}} + 10 * np.cos({{x0}})) * ({{x1}} + 5 * np.cos({{x1}})) 20 | 21 | with open("result.txt", "w") as f: 22 | output = [str(result) + "\n"] 23 | if test_env_var is not None: 24 | output.append(test_env_var) 25 | f.writelines(output) 26 | 27 | if cuda_visible_devices: 28 | with open("cuda_visible_devices.txt", "w") as f: 29 | f.write(cuda_visible_devices) 30 | -------------------------------------------------------------------------------- /doc/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=source 11 | set BUILDDIR=build 12 | 13 | if "%1" == "" goto help 14 | 15 | %SPHINXBUILD% >NUL 2>NUL 16 | if errorlevel 9009 ( 17 | echo. 18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 19 | echo.installed, then set the SPHINXBUILD environment variable to point 20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 21 | echo.may add the Sphinx directory to PATH. 22 | echo. 23 | echo.If you don't have Sphinx installed, grab it from 24 | echo.https://www.sphinx-doc.org/ 25 | exit /b 1 26 | ) 27 | 28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 29 | goto end 30 | 31 | :help 32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 33 | 34 | :end 35 | popd 36 | -------------------------------------------------------------------------------- /optimas/utils/logger.py: -------------------------------------------------------------------------------- 1 | """Definition of logging utilities.""" 2 | 3 | import sys 4 | import logging 5 | 6 | 7 | def get_logger(name, level=logging.INFO) -> logging.Logger: 8 | """Get a logger. 9 | 10 | Parameters 11 | ---------- 12 | name : str 13 | Name of the logger. 14 | level : int or str, optional 15 | Logging level, by default logging.INFO 16 | 17 | Returns 18 | ------- 19 | logging.Logger 20 | 21 | """ 22 | # Create logger. 23 | logger = logging.getLogger(name) 24 | 25 | # Set level. 26 | logger.setLevel(level) 27 | 28 | # Set up format. 29 | formatter = logging.Formatter( 30 | fmt="[%(levelname)s %(asctime)s] %(name)s: %(message)s", 31 | datefmt="%m-%d %H:%M:%S", 32 | ) 33 | 34 | # Add handler to log to standard output. 35 | sth = logging.StreamHandler(stream=sys.stdout) 36 | sth.setFormatter(formatter) 37 | logger.addHandler(sth) 38 | return logger 39 | -------------------------------------------------------------------------------- /.github/workflows/publish-to-pypi.yml: -------------------------------------------------------------------------------- 1 | name: Publish package to PyPI 2 | 3 | on: 4 | release: 5 | types: [created] 6 | 7 | jobs: 8 | build-n-publish: 9 | name: Build and publish package to PyPI 10 | runs-on: ubuntu-latest 11 | permissions: 12 | # IMPORTANT: this permission is mandatory for trusted publishing 13 | id-token: write 14 | steps: 15 | - uses: actions/checkout@v3 16 | - name: Set up Python 3.11 17 | uses: actions/setup-python@v4 18 | with: 19 | python-version: 3.11 20 | - name: Install pypa/build 21 | run: >- 22 | python3 -m 23 | pip install 24 | build 25 | --user 26 | - name: Build a binary wheel and a source tarball 27 | run: >- 28 | python3 -m 29 | build 30 | --sdist 31 | --wheel 32 | --outdir dist/ 33 | . 34 | - name: Publish package distribution to PyPI 35 | uses: pypa/gh-action-pypi-publish@release/v1 36 | -------------------------------------------------------------------------------- /optimas/generators/ax/developer/ax_metric.py: -------------------------------------------------------------------------------- 1 | """Contains the definition of the Ax metric used for multitask optimization.""" 2 | 3 | import pandas as pd 4 | from ax import Metric 5 | from ax.core.batch_trial import BatchTrial 6 | from ax.core.data import Data 7 | from ax.utils.common.result import Ok 8 | 9 | 10 | class AxMetric(Metric): 11 | """Custom metric to be optimized during the experiment.""" 12 | 13 | def fetch_trial_data(self, trial: BatchTrial): 14 | """Fetch data for one trial.""" 15 | records = [] 16 | for arm_name, arm in trial.arms_by_name.items(): 17 | mean, sem = trial.run_metadata[arm_name]["f"] 18 | records.append( 19 | { 20 | "arm_name": arm_name, 21 | "metric_name": self.name, 22 | "trial_index": trial.index, 23 | "mean": mean, 24 | "sem": sem, 25 | } 26 | ) 27 | return Ok(value=Data(df=pd.DataFrame.from_records(records))) 28 | -------------------------------------------------------------------------------- /.github/workflows/unix.yml: -------------------------------------------------------------------------------- 1 | name: Unix-MPICH 2 | 3 | on: 4 | pull_request: 5 | # Run daily at midnight (UTC). 6 | schedule: 7 | - cron: '0 0 * * *' 8 | 9 | jobs: 10 | test: 11 | runs-on: ubuntu-latest 12 | strategy: 13 | matrix: 14 | python-version: ['3.10', 3.11, 3.12] 15 | 16 | steps: 17 | - uses: actions/checkout@v4 18 | - uses: conda-incubator/setup-miniconda@v3 19 | name: Setup conda 20 | with: 21 | auto-update-conda: true 22 | activate-environment: testing 23 | auto-activate-base: false 24 | channels: defaults 25 | channel-priority: true 26 | python-version: ${{ matrix.python-version }} 27 | 28 | - shell: bash -l {0} 29 | name: Install dependencies 30 | run: | 31 | conda install -c conda-forge pytorch-cpu 32 | conda install -c pytorch numpy pandas 33 | conda install -c conda-forge mpi4py mpich 34 | pip install .[test] 35 | pip install git+https://github.com/campa-consortium/gest-api.git 36 | - shell: bash -l {0} 37 | name: Run unit tests with MPICH 38 | run: | 39 | python -m pytest tests/ 40 | mpirun -np 3 python -m pytest --with-mpi tests/test_grid_sampling_mpi.py 41 | -------------------------------------------------------------------------------- /optimas/core/evaluation.py: -------------------------------------------------------------------------------- 1 | """Contains the definition of the ``Evaluation`` class.""" 2 | 3 | from typing import Optional 4 | 5 | from .parameter import Parameter 6 | 7 | 8 | class Evaluation: 9 | """Class used to store the evaluation of a parameter. 10 | 11 | The evaluation consists of the observed value and the observation noise. 12 | 13 | Parameters 14 | ---------- 15 | parameter : Parameter 16 | The parameter that has been evaluated. 17 | value : float 18 | The observed value of the evaluation. 19 | sem : float, optional 20 | The observation noise of the evaluation. 21 | """ 22 | 23 | def __init__( 24 | self, parameter: Parameter, value: float, sem: Optional[float] = None 25 | ) -> None: 26 | self._parameter = parameter 27 | self._value = value 28 | self._sem = sem 29 | 30 | @property 31 | def parameter(self) -> Parameter: 32 | """Get the evaluated parameter.""" 33 | return self._parameter 34 | 35 | @property 36 | def value(self) -> float: 37 | """Get the evaluation value.""" 38 | return self._value 39 | 40 | @property 41 | def sem(self) -> float: 42 | """Get the evaluation noise.""" 43 | return self._sem 44 | -------------------------------------------------------------------------------- /.github/workflows/unix-openmpi.yml: -------------------------------------------------------------------------------- 1 | name: Unix-OpenMPI 2 | 3 | on: 4 | pull_request: 5 | # Run daily at midnight (UTC). 6 | schedule: 7 | - cron: '0 0 * * *' 8 | 9 | jobs: 10 | test: 11 | runs-on: ubuntu-latest 12 | strategy: 13 | matrix: 14 | python-version: ['3.10', 3.11, 3.12] 15 | 16 | steps: 17 | - uses: actions/checkout@v4 18 | - uses: conda-incubator/setup-miniconda@v3 19 | name: Setup conda 20 | with: 21 | auto-update-conda: true 22 | activate-environment: testing 23 | auto-activate-base: false 24 | channels: defaults 25 | channel-priority: true 26 | python-version: ${{ matrix.python-version }} 27 | 28 | - shell: bash -l {0} 29 | name: Install dependencies 30 | run: | 31 | conda install -c conda-forge pytorch-cpu 32 | conda install -c pytorch numpy pandas 33 | conda install -c conda-forge mpi4py openmpi=5.* 34 | pip install .[test] 35 | pip install git+https://github.com/campa-consortium/gest-api.git 36 | - shell: bash -l {0} 37 | name: Run unit tests with openMPI 38 | run: | 39 | python -m pytest tests/ 40 | mpirun -np 3 --oversubscribe python -m pytest --with-mpi tests/test_grid_sampling_mpi.py 41 | -------------------------------------------------------------------------------- /.github/workflows/unix-noax.yml: -------------------------------------------------------------------------------- 1 | name: Unix-Without-Ax 2 | 3 | on: 4 | pull_request: 5 | # Run daily at midnight (UTC). 6 | schedule: 7 | - cron: '0 0 * * *' 8 | 9 | jobs: 10 | test: 11 | runs-on: ubuntu-latest 12 | strategy: 13 | matrix: 14 | python-version: ['3.10', 3.11, 3.12] 15 | 16 | steps: 17 | - uses: actions/checkout@v4 18 | - uses: conda-incubator/setup-miniconda@v3 19 | name: Setup conda 20 | with: 21 | auto-update-conda: true 22 | activate-environment: testing 23 | auto-activate-base: false 24 | channels: defaults 25 | channel-priority: true 26 | python-version: ${{ matrix.python-version }} 27 | 28 | - shell: bash -l {0} 29 | name: Install dependencies 30 | run: | 31 | conda install numpy pandas pytorch cpuonly -c pytorch 32 | conda install -c conda-forge mpi4py mpich 33 | pip install .[test] 34 | pip install git+https://github.com/campa-consortium/gest-api.git 35 | pip uninstall --yes ax-platform # Run without Ax 36 | - shell: bash -l {0} 37 | name: Run unit tests without Ax 38 | run: | 39 | python -m pytest tests/ --ignore=tests/test_ax_generators.py --ignore=tests/test_ax_model_manager.py --ignore=tests/test_gpu_resources.py 40 | mpirun -np 3 python -m pytest --with-mpi tests/test_grid_sampling_mpi.py 41 | -------------------------------------------------------------------------------- /examples/hipace/template_simulation_script: -------------------------------------------------------------------------------- 1 | max_step = 300 2 | amr.n_cell = 256 256 256 3 | 4 | amr.max_level = 0 5 | 6 | hipace.max_time = 0.3/clight 7 | diagnostic.output_period = 30 8 | hipace.verbose = 1 9 | 10 | hipace.depos_order_xy = 2 11 | hipace.dt = adaptive 12 | hipace.nt_per_betatron = 30 13 | 14 | boundary.field = Periodic 15 | boundary.particle = Periodic 16 | 17 | geometry.prob_lo = -250.e-6 -250.e-6 -250.e-6 # physical domain 18 | geometry.prob_hi = 250.e-6 250.e-6 110.e-6 19 | 20 | beams.names = driver witness 21 | 22 | driver.position_mean = 0. 0. 0. 23 | driver.position_std = 2.e-6 2.e-6 30.e-6 24 | driver.injection_type = fixed_weight 25 | driver.num_particles = 1000000 26 | driver.total_charge = .6e-9 27 | driver.u_mean = 0. 0. 1000. 28 | driver.u_std = 2. 2. 10. 29 | driver.do_symmetrize = 1 30 | 31 | witness.position_mean = 0. 0. -160.e-6 32 | witness.position_std = 2.e-6 2.e-6 5.e-6 33 | witness.injection_type = fixed_weight 34 | witness.num_particles = 1000000 35 | witness.total_charge = {{witness_charge}}e-9 36 | witness.u_mean = 0. 0. 1000. 37 | witness.u_std = 2. 2. 10. 38 | witness.do_symmetrize = 1 39 | 40 | plasmas.names = electron ion 41 | 42 | electron.density(x,y,z) = 2.e22 43 | electron.ppc = 1 1 44 | electron.u_mean = 0.0 0.0 0. 45 | electron.element = electron 46 | 47 | ion.density(x,y,z) = 2.e22 48 | ion.ppc = 1 1 49 | ion.u_mean = 0.0 0.0 0. 50 | ion.element = H 51 | 52 | diagnostic.diag_type = xz 53 | -------------------------------------------------------------------------------- /examples/wake_t/template_simulation_script.py: -------------------------------------------------------------------------------- 1 | """Template simulation script for optimizing an APL with Wake-T.""" 2 | 3 | import numpy as np 4 | from wake_t.utilities.bunch_generation import get_gaussian_bunch_from_twiss 5 | from wake_t.beamline_elements import ActivePlasmaLens 6 | from aptools.data_analysis.beam_diagnostics import twiss_parameters 7 | 8 | 9 | def run_simulation(g_lens): 10 | """Run a Wake-T simulation of an active plasma lens.""" 11 | # Generate particle bunch. 12 | g_x = 600 13 | b_x = 1 14 | a_x = -np.sqrt(b_x * g_x - 1) 15 | em_x = 1e-6 # m 16 | gamma_avg = 300 / 0.511 17 | ene_sp = 1 # % 18 | Q = 100 # pC 19 | s_t = 10 # fs 20 | n_part = 1e4 21 | bunch = get_gaussian_bunch_from_twiss( 22 | em_x, 23 | em_x, 24 | a_x, 25 | a_x, 26 | b_x, 27 | b_x, 28 | gamma_avg, 29 | ene_sp, 30 | s_t, 31 | 0, 32 | q_tot=Q, 33 | n_part=n_part, 34 | ) 35 | 36 | # Define plasma lens. 37 | p_lens = ActivePlasmaLens(3e-2, g_lens, n_out=2) 38 | 39 | # Perform tracking. 40 | p_lens.track(bunch) 41 | 42 | # Analyze bunch. 43 | a_x, b_x, g_x = twiss_parameters(bunch.x, bunch.px, bunch.pz, w=bunch.q) 44 | 45 | # Save parameter to file for `analysis_script.py`. 46 | np.save("a_x_abs", np.abs(a_x)) 47 | 48 | 49 | if __name__ == "__main__": 50 | g_lens = {{g_lens}} 51 | run_simulation(g_lens) 52 | -------------------------------------------------------------------------------- /optimas/generators/__init__.py: -------------------------------------------------------------------------------- 1 | # Import Ax generators 2 | try: 3 | from .ax.service.single_fidelity import AxSingleFidelityGenerator 4 | from .ax.service.multi_fidelity import AxMultiFidelityGenerator 5 | from .ax.service.ax_client import AxClientGenerator 6 | from .ax.developer.multitask import AxMultitaskGenerator 7 | except ImportError as e: 8 | if e.__str__() == "No module named 'ax'": 9 | # Replace generators by dummy generators that will 10 | # raise an error only if the user tries to instantiate them 11 | # and tell them to install ax-platform 12 | from .ax.import_error_dummy_generator import AxImportErrorDummyGenerator 13 | 14 | AxSingleFidelityGenerator = AxImportErrorDummyGenerator 15 | AxMultiFidelityGenerator = AxImportErrorDummyGenerator 16 | AxClientGenerator = AxImportErrorDummyGenerator 17 | AxMultitaskGenerator = AxImportErrorDummyGenerator 18 | else: 19 | raise (e) 20 | 21 | # Import optimas native generators 22 | from .grid_sampling import GridSamplingGenerator 23 | from .line_sampling import LineSamplingGenerator 24 | from .random_sampling import RandomSamplingGenerator 25 | from .external import ExternalGenerator 26 | 27 | __all__ = [ 28 | "AxSingleFidelityGenerator", 29 | "AxMultiFidelityGenerator", 30 | "AxMultitaskGenerator", 31 | "AxClientGenerator", 32 | "GridSamplingGenerator", 33 | "LineSamplingGenerator", 34 | "RandomSamplingGenerator", 35 | "ExternalGenerator", 36 | ] 37 | -------------------------------------------------------------------------------- /examples/wake_t/run_example.py: -------------------------------------------------------------------------------- 1 | """Example Bayesian optimization with Wake-T. 2 | 3 | This example optimizes the focusing strength of an active plasma lens using 4 | Wake-T simulations. 5 | 6 | The Wake-T simulations are performed using the template defined in the 7 | `template_simulation_script.py` file. 8 | 9 | The calculation of the objective `f` is performed in the `analyze_simulation` 10 | function, which for convenience is here defined in the `analysis_script.py` 11 | file. 12 | """ 13 | 14 | from optimas.generators import AxSingleFidelityGenerator 15 | from optimas.evaluators import TemplateEvaluator 16 | from optimas.explorations import Exploration 17 | from gest_api.vocs import VOCS 18 | 19 | from analysis_script import analyze_simulation 20 | 21 | 22 | # Create VOCS object. 23 | vocs = VOCS( 24 | variables={ 25 | "g_lens": [100.0, 1000.0], 26 | }, 27 | objectives={"f": "MINIMIZE"}, 28 | ) 29 | 30 | 31 | # Create generator. 32 | gen = AxSingleFidelityGenerator(vocs=vocs, n_init=12) 33 | 34 | 35 | # Create evaluator. 36 | ev = TemplateEvaluator( 37 | sim_template="template_simulation_script.py", 38 | analysis_func=analyze_simulation, 39 | ) 40 | 41 | 42 | # Create exploration. 43 | exp = Exploration( 44 | generator=gen, evaluator=ev, max_evals=100, sim_workers=12, run_async=False 45 | ) 46 | 47 | 48 | # To safely perform exploration, run it in the block below (this is needed 49 | # for some flavours of multiprocessing, namely spawn and forkserver) 50 | if __name__ == "__main__": 51 | exp.run() 52 | -------------------------------------------------------------------------------- /doc/source/user_guide/citation.rst: -------------------------------------------------------------------------------- 1 | Citing optimas 2 | ============== 3 | 4 | If your usage of ``optimas`` leads to a scientific publication, please consider 5 | citing the original `paper `_: 6 | 7 | .. code-block:: bibtex 8 | 9 | @article{PhysRevAccelBeams.26.084601, 10 | title = {Bayesian optimization of laser-plasma accelerators assisted by reduced physical models}, 11 | author = {Ferran Pousa, A. and Jalas, S. and Kirchen, M. and Martinez de la Ossa, A. and Th\'evenet, M. and Hudson, S. and Larson, J. and Huebl, A. and Vay, J.-L. and Lehe, R.}, 12 | journal = {Phys. Rev. Accel. Beams}, 13 | volume = {26}, 14 | issue = {8}, 15 | pages = {084601}, 16 | numpages = {9}, 17 | year = {2023}, 18 | month = {Aug}, 19 | publisher = {American Physical Society}, 20 | doi = {10.1103/PhysRevAccelBeams.26.084601}, 21 | url = {https://link.aps.org/doi/10.1103/PhysRevAccelBeams.26.084601} 22 | } 23 | 24 | and libEnsemble: 25 | 26 | .. code-block:: bibtex 27 | 28 | @article{Hudson2022, 29 | title = {{libEnsemble}: A Library to Coordinate the Concurrent 30 | Evaluation of Dynamic Ensembles of Calculations}, 31 | author = {Stephen Hudson and Jeffrey Larson and John-Luke Navarro and Stefan M. Wild}, 32 | journal = {{IEEE} Transactions on Parallel and Distributed Systems}, 33 | volume = {33}, 34 | number = {4}, 35 | pages = {977--988}, 36 | year = {2022}, 37 | doi = {10.1109/tpds.2021.3082815} 38 | } 39 | -------------------------------------------------------------------------------- /tests/test_exploration_run_exception.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from gest_api.vocs import VOCS 4 | from optimas.explorations import Exploration 5 | from optimas.generators import RandomSamplingGenerator 6 | from optimas.evaluators import FunctionEvaluator 7 | 8 | 9 | def eval_func(input_params, output_params): 10 | raise ValueError("Exception to break exploration") 11 | 12 | 13 | def test_exception_during_exploration_run(): 14 | """Test that the Exploration handles exceptions during the run correctly. 15 | 16 | When using `create_evaluation_dirs=True`, the current working directory 17 | will change during exploration and should be restored when `.run` finishes, 18 | even if an exception occurs. 19 | """ 20 | # Define variables and objectives. 21 | vocs = VOCS( 22 | variables={"x0": [-50.0, 5.0], "x1": [-5.0, 15.0]}, 23 | objectives={"f": "MAXIMIZE"}, 24 | ) 25 | 26 | # Create generator. 27 | gen = RandomSamplingGenerator(vocs=vocs) 28 | 29 | # Create function evaluator. 30 | ev = FunctionEvaluator(function=eval_func, create_evaluation_dirs=True) 31 | 32 | # Create exploration. 33 | exploration = Exploration( 34 | generator=gen, 35 | evaluator=ev, 36 | max_evals=10, 37 | sim_workers=2, 38 | exploration_dir_path="./tests_output/test_exception_during_run", 39 | ) 40 | 41 | cwd = os.getcwd() 42 | 43 | # Run exploration without raising an exception. 44 | exploration.run() 45 | 46 | # Check that the cwd remains unchanged after a failed run. 47 | assert os.getcwd() == cwd 48 | 49 | 50 | if __name__ == "__main__": 51 | test_exception_during_exploration_run() 52 | -------------------------------------------------------------------------------- /tests/test_comms.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from gest_api.vocs import VOCS 3 | 4 | from optimas.explorations import Exploration 5 | from optimas.generators import RandomSamplingGenerator 6 | from optimas.evaluators import FunctionEvaluator 7 | 8 | 9 | def eval_func(input_params, output_params): 10 | """Evaluation function used for testing""" 11 | x0 = input_params["x0"] 12 | x1 = input_params["x1"] 13 | result = -(x0 + 10 * np.cos(x0)) * (x1 + 5 * np.cos(x1)) 14 | output_params["f"] = result 15 | 16 | 17 | def test_libe_comms(): 18 | """Test local and local_threading communications.""" 19 | # Define variables and objectives. 20 | vocs = VOCS( 21 | variables={"x0": [-50.0, 5.0], "x1": [-5.0, 15.0]}, 22 | objectives={"f": "MAXIMIZE"}, 23 | ) 24 | 25 | max_evals = 10 26 | 27 | for comm in ["local", "local_threading"]: 28 | # Create generator. 29 | gen = RandomSamplingGenerator(vocs=vocs) 30 | 31 | # Create function evaluator. 32 | ev = FunctionEvaluator(function=eval_func) 33 | 34 | # Create exploration. 35 | exploration = Exploration( 36 | generator=gen, 37 | evaluator=ev, 38 | max_evals=max_evals, 39 | sim_workers=2, 40 | exploration_dir_path=f"./tests_output/test_comms_{comm}", 41 | libe_comms=comm, 42 | ) 43 | 44 | # Run exploration. 45 | exploration.run() 46 | 47 | # Check that all trials were evaluated. 48 | assert np.all(exploration.history["f"] != 0.0) 49 | assert len(exploration.history) == max_evals 50 | 51 | 52 | if __name__ == "__main__": 53 | test_libe_comms() 54 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools", "wheel"] 3 | build-backend = "setuptools.build_meta" 4 | 5 | [project] 6 | name = 'optimas' 7 | description = 'Optimization at scale, powered by libEnsemble' 8 | authors = [ 9 | {name = 'Optimas Developers', email = 'angel.ferran.pousa@desy.de'}, 10 | ] 11 | readme = 'README.md' 12 | requires-python = '>=3.10' 13 | keywords = ['optimization', 'scale', 'bayesian'] 14 | license = {text = 'BSD-3-Clause-LBNL'} 15 | classifiers = [ 16 | 'Development Status :: 3 - Alpha', 17 | 'Intended Audience :: Science/Research', 18 | 'Topic :: Scientific/Engineering', 19 | 'Operating System :: OS Independent', 20 | 'Programming Language :: Python :: 3.10', 21 | 'Programming Language :: Python :: 3.11', 22 | 'Programming Language :: Python :: 3.12', 23 | ] 24 | dependencies = [ 25 | 'libensemble >= 1.3.0', 26 | 'jinja2', 27 | 'pandas', 28 | 'mpi4py', 29 | 'pydantic >= 2.0', 30 | ] 31 | dynamic = ['version'] 32 | 33 | [project.optional-dependencies] 34 | test = [ 35 | 'flake8', 36 | 'pytest', 37 | 'pytest-mpi', 38 | 'ax-platform >=0.5.0, <1.0.0', 39 | 'matplotlib', 40 | ] 41 | all = [ 42 | 'ax-platform >=0.5.0, <1.0.0', 43 | 'matplotlib' 44 | ] 45 | 46 | [project.urls] 47 | Documentation = 'https://optimas.readthedocs.io/' 48 | 49 | [tool.setuptools.dynamic] 50 | version = {attr = "optimas.__version__"} 51 | 52 | [tool.setuptools.packages.find] 53 | include = [ 54 | 'optimas', 55 | 'optimas.*', 56 | ] 57 | 58 | [tool.black] 59 | line-length = 80 60 | target-version = ['py310', 'py311', 'py312'] 61 | 62 | [tool.pydocstyle] 63 | convention = "numpy" 64 | add-ignore = ["D104", "D106", "D107", "D203", "D213"] 65 | -------------------------------------------------------------------------------- /doc/source/user_guide/installation_local.rst: -------------------------------------------------------------------------------- 1 | Installation on a local computer 2 | -------------------------------- 3 | 4 | The recommended approach is to install optimas in a ``conda`` environment. 5 | 6 | Install ``mpi4py`` 7 | ~~~~~~~~~~~~~~~~~~ 8 | If your system has already an MPI implementation installed, install ``mpi4py`` 9 | using ``pip``: 10 | 11 | .. code:: 12 | 13 | pip install mpi4py 14 | 15 | This will make sure that optimas uses the existing MPI. The recommended 16 | MPI implementation is MPICH. 17 | 18 | If you don't have an existing MPI installation, the recommended approach is to 19 | install ``mpi4py`` from ``conda``, including the MPI implementation corresponding 20 | to your operating system. 21 | 22 | On Linux and macOS: 23 | 24 | .. code:: 25 | 26 | conda install -c conda-forge mpi4py mpich 27 | 28 | On Windows: 29 | 30 | .. code:: 31 | 32 | conda install -c conda-forge mpi4py msmpi 33 | 34 | Install optimas from PyPI (recommended) 35 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 36 | This will install the latest stable release. 37 | 38 | Installing with only the **basic** dependencies: 39 | 40 | .. code:: 41 | 42 | pip install optimas 43 | 44 | Installing with **all** dependencies: 45 | 46 | .. code:: 47 | 48 | pip install 'optimas[all]' 49 | 50 | Use this option if you plan to do Bayesian optimization 51 | (see :ref:`dependencies` for more details). 52 | 53 | Install from conda-forge 54 | ~~~~~~~~~~~~~~~~~~~~~~~~ 55 | 56 | .. code:: 57 | 58 | conda install optimas --channel conda-forge 59 | 60 | Install from GitHub 61 | ~~~~~~~~~~~~~~~~~~~ 62 | This will install the latest development version with all dependencies. 63 | 64 | .. code:: 65 | 66 | pip install "optimas[all] @ git+https://github.com/optimas-org/optimas.git" 67 | -------------------------------------------------------------------------------- /examples/multi_stage/run_example.py: -------------------------------------------------------------------------------- 1 | """Example Bayesian optimization of a multistage LPA with Warp-X. 2 | 3 | The Warp-X simulations are performed using the template defined in the 4 | `template_simulation_script` file. 5 | 6 | In addition to the objective `f`, four additional parameters 7 | are analyzed for each simulation and included in the optimization 8 | history. The calculation of `f` and the additional parameters is performed 9 | in the `analyze_simulation` function, which for convenience is here defined in 10 | the `analysis_script.py` file. 11 | """ 12 | 13 | from optimas.generators import AxSingleFidelityGenerator 14 | from optimas.evaluators import TemplateEvaluator 15 | from optimas.explorations import Exploration 16 | from gest_api.vocs import VOCS 17 | 18 | from analysis_script import analyze_simulation 19 | 20 | 21 | # Create VOCS object. 22 | vocs = VOCS( 23 | variables={ 24 | "adjust_factor": [0.7, 1.05], 25 | "lens_start": [0.32, 0.347], 26 | }, 27 | objectives={"f": "MINIMIZE"}, 28 | observables=["energy_std", "energy_avg", "charge", "emittance"], 29 | ) 30 | 31 | 32 | # Create generator. 33 | gen = AxSingleFidelityGenerator( 34 | vocs=vocs, 35 | n_init=4, 36 | ) 37 | 38 | 39 | # Create evaluator. 40 | ev = TemplateEvaluator( 41 | sim_template="template_simulation_script", 42 | analysis_func=analyze_simulation, 43 | executable="warpx", 44 | n_gpus=1, 45 | ) 46 | 47 | 48 | # Create exploration. 49 | exp = Exploration( 50 | generator=gen, evaluator=ev, max_evals=1000, sim_workers=4, run_async=True 51 | ) 52 | 53 | 54 | # To safely perform exploration, run it in the block below (this is needed 55 | # for some flavours of multiprocessing, namely spawn and forkserver) 56 | if __name__ == "__main__": 57 | exp.run() 58 | -------------------------------------------------------------------------------- /optimas/utils/ax/other.py: -------------------------------------------------------------------------------- 1 | """Contains the definition of various utilities for using Ax.""" 2 | 3 | from typing import List, Dict 4 | 5 | import numpy as np 6 | from ax.service.utils.instantiation import ObjectiveProperties 7 | 8 | from optimas.core import VaryingParameter, Objective 9 | 10 | 11 | def convert_optimas_to_ax_parameters( 12 | varying_parameters: List[VaryingParameter], 13 | ) -> List[Dict]: 14 | """Create list of Ax parameters from optimas varying parameters.""" 15 | parameters = [] 16 | for var in varying_parameters: 17 | # Determine parameter type. 18 | value_dtype = np.dtype(var.dtype) 19 | if value_dtype.kind == "f": 20 | value_type = "float" 21 | elif value_dtype.kind == "i": 22 | value_type = "int" 23 | else: 24 | raise ValueError( 25 | "Ax range parameter can only be of type 'float'ot 'int', " 26 | f"not {var.dtype}." 27 | ) 28 | # Create parameter dict and append to list. 29 | parameters.append( 30 | { 31 | "name": var.name, 32 | "type": "range", 33 | "bounds": [var.lower_bound, var.upper_bound], 34 | "is_fidelity": var.is_fidelity, 35 | "target_value": var.fidelity_target_value, 36 | "value_type": value_type, 37 | } 38 | ) 39 | return parameters 40 | 41 | 42 | def convert_optimas_to_ax_objectives( 43 | objectives: List[Objective], 44 | ) -> Dict[str, ObjectiveProperties]: 45 | """Create list of Ax objectives from optimas objectives.""" 46 | ax_objectives = {} 47 | for obj in objectives: 48 | ax_objectives[obj.name] = ObjectiveProperties(minimize=obj.minimize) 49 | return ax_objectives 50 | -------------------------------------------------------------------------------- /doc/source/user_guide/installation_perlmutter.rst: -------------------------------------------------------------------------------- 1 | Installation on Perlmutter (NERSC) 2 | ---------------------------------- 3 | 4 | Installation 5 | ~~~~~~~~~~~~ 6 | 7 | Execute the following commands in order to create a dedicated Python 8 | environment, in which to install *optimas*. 9 | 10 | :: 11 | 12 | module load cray-python/3.9.13.1 13 | 14 | python3 -m pip install --user --upgrade pip 15 | python3 -m pip install --user virtualenv 16 | python3 -m pip cache purge 17 | python3 -m venv $HOME/sw/perlmutter/gpu/venvs/optimas 18 | source $HOME/sw/perlmutter/gpu/venvs/optimas/bin/activate 19 | 20 | pip install 'optimas[all]' 21 | 22 | Running an optimas job 23 | ~~~~~~~~~~~~~~~~~~~~~~ 24 | 25 | In order to run a new optimas job, create a new folder in the ``$SCRATCH`` 26 | directory, and add the files needed to describe your optimization, i.e. typically: 27 | 28 | * a file ``run_optimization.py`` 29 | * a template simulation input script 30 | * a file ``analysis_script.py`` that postprocesses the simulation results 31 | 32 | (See a typical example `here `_.) 33 | 34 | Then, create a file ``submission_script`` with the following content: 35 | 36 | :: 37 | 38 | #!/bin/bash -l 39 | #SBATCH -t 40 | #SBATCH -N 41 | #SBATCH -A 42 | #SBATCH -C gpu 43 | #SBATCH -q regular 44 | 45 | module load cray-python/3.9.13.1 46 | source $HOME/sw/perlmutter/gpu/venvs/optimas/bin/activate 47 | 48 | python run_optimization.py 49 | 50 | where ````, ```` and ```` should be replaced 51 | by the wall time, number of nodes and NERSC account number that you want to use. 52 | 53 | Then run: 54 | 55 | :: 56 | 57 | sbatch submission_script 58 | -------------------------------------------------------------------------------- /examples/multi_stage/analysis_script.py: -------------------------------------------------------------------------------- 1 | """Defines the analysis function that runs after the simulation.""" 2 | 3 | import os 4 | 5 | import numpy as np 6 | from openpmd_viewer.addons import LpaDiagnostics 7 | 8 | 9 | def get_emittance(ts, t): 10 | """Calculate the beam emittance at the given time step.""" 11 | w, x, ux = ts.get_particle(["w", "x", "ux"], t=t) 12 | if len(w) <= 2: 13 | return 0 14 | else: 15 | x2 = np.average(x**2, weights=w) 16 | u2 = np.average(ux**2, weights=w) 17 | xu = np.average(x * ux, weights=w) 18 | return np.sqrt(x2 * u2 - xu**2) 19 | 20 | 21 | def analyze_simulation(simulation_directory, output_params): 22 | """Analyze the output of the WarpX simulation. 23 | 24 | The function calculates the objective function 'f' as well as the 25 | diagnostic quantities listed as `analyzed_parameters` in the generator. 26 | """ 27 | ts = LpaDiagnostics(os.path.join(simulation_directory, "diag")) 28 | t0 = 4.0e-11 # Time (boosted-frame) at which we calculate beam properties. 29 | 30 | charge_i = ts.get_charge(t=0) 31 | emittance_i = get_emittance(ts, t=0) 32 | charge_f = ts.get_charge(t=t0) 33 | emittance_f = get_emittance(ts, t=t0) 34 | energy_avg, energy_std = ts.get_mean_gamma(t=t0) 35 | 36 | # Here: Build a quantity to minimize (f) that encompasses 37 | # emittance AND charge loss 1% charge loss has the 38 | # same impact as doubling the initial emittance. 39 | # we minimize f! 40 | output_params["f"] = np.log( 41 | emittance_f + emittance_i * (1.0 - charge_f / charge_i) * 100 42 | ) 43 | output_params["energy_std"] = energy_std 44 | output_params["energy_avg"] = energy_avg 45 | output_params["charge"] = charge_f 46 | output_params["emittance"] = emittance_f 47 | 48 | return output_params 49 | -------------------------------------------------------------------------------- /doc/source/user_guide/dependencies.rst: -------------------------------------------------------------------------------- 1 | .. _dependencies: 2 | 3 | Dependencies 4 | ============ 5 | 6 | Optimas relies on the following packages: 7 | 8 | * `jinja2 `_ - Needed to generate simulation scripts from templates. 9 | * `libEnsemble `_ - The backbone of optimas, orchestrates the concurrent evaluation of simulations, the resource detection and allocation, and the communication between simulations and manager. 10 | * `mpi4py `_ - Python bindings for MPI. Required for launching parallel simulations. 11 | * `pandas `_ - Enable output as pandas DataFrames. 12 | * `pydantic `_ - Input validation and object serialization. 13 | * (optional) `Ax `_ - Algorithms for Bayesian optimization. 14 | 15 | 16 | The installed dependencies will determine which generators are available for use. 17 | See table below for a summary. 18 | 19 | .. list-table:: Available generators and their dependencies 20 | :widths: 35 25 25 21 | :header-rows: 1 22 | 23 | * - Generator 24 | - ``pip install optimas`` 25 | - ``pip install 'optimas[all]'`` 26 | * - :class:`~optimas.generators.LineSamplingGenerator` 27 | - ✅ 28 | - ✅ 29 | * - :class:`~optimas.generators.GridSamplingGenerator` 30 | - ✅ 31 | - ✅ 32 | * - :class:`~optimas.generators.RandomSamplingGenerator` 33 | - ✅ 34 | - ✅ 35 | * - :class:`~optimas.generators.AxSingleFidelityGenerator` 36 | - ❌ 37 | - ✅ 38 | * - :class:`~optimas.generators.AxMultiFidelityGenerator` 39 | - ❌ 40 | - ✅ 41 | * - :class:`~optimas.generators.AxMultitaskGenerator` 42 | - ❌ 43 | - ✅ 44 | * - :class:`~optimas.generators.AxClientGenerator` 45 | - ❌ 46 | - ✅ 47 | -------------------------------------------------------------------------------- /examples/ionization_injection/run_example.py: -------------------------------------------------------------------------------- 1 | """Example Bayesian optimization of an LPA with FBPIC. 2 | 3 | This example optimizes an LPA based on ionization injection using FBPIC 4 | simulations. 5 | 6 | The FBPIC simulations are performed using the template defined in the 7 | `template_simulation_script.py` file. 8 | 9 | In addition to the objective `f`, three additional parameters 10 | are analyzed for each simulation and including in the optimization 11 | history. The calculation of `f` and the additional parameters is performed 12 | in the `analyze_simulation` function, which for convenience is here defined in 13 | the `analysis_script.py` file. 14 | """ 15 | 16 | from optimas.generators import AxSingleFidelityGenerator 17 | from optimas.evaluators import TemplateEvaluator 18 | from optimas.explorations import Exploration 19 | from gest_api.vocs import VOCS 20 | 21 | from analysis_script import analyze_simulation 22 | 23 | 24 | # Create VOCS object. 25 | vocs = VOCS( 26 | variables={ 27 | "laser_scale": [0.7, 1.05], 28 | "z_foc": [3.0, 7.5], 29 | "mult": [0.1, 1.5], 30 | "plasma_scale": [0.6, 0.8], 31 | }, 32 | objectives={"f": "MAXIMIZE"}, 33 | observables=["energy_med", "energy_mad", "charge"], 34 | ) 35 | 36 | 37 | # Create generator. 38 | gen = AxSingleFidelityGenerator( 39 | vocs=vocs, 40 | n_init=4, 41 | ) 42 | 43 | 44 | # Create evaluator. 45 | ev = TemplateEvaluator( 46 | sim_template="template_simulation_script.py", 47 | analysis_func=analyze_simulation, 48 | n_gpus=1, # Use 1 GPU per simulation. 49 | ) 50 | 51 | 52 | # Create exploration. 53 | exp = Exploration( 54 | generator=gen, evaluator=ev, max_evals=100, sim_workers=4, run_async=True 55 | ) 56 | 57 | 58 | # To safely perform exploration, run it in the block below (this is needed 59 | # for some flavours of multiprocessing, namely spawn and forkserver) 60 | if __name__ == "__main__": 61 | exp.run() 62 | -------------------------------------------------------------------------------- /tests/test_env_script.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import numpy as np 4 | from gest_api.vocs import VOCS 5 | 6 | from optimas.explorations import Exploration 7 | from optimas.generators import RandomSamplingGenerator 8 | from optimas.evaluators import TemplateEvaluator 9 | 10 | 11 | def analysis_func(sim_dir, output_params): 12 | """Analysis function used by the template evaluator.""" 13 | # Read back result from file 14 | with open("result.txt") as f: 15 | result = f.readlines() 16 | f = float(result[0]) 17 | test_var = result[1] 18 | output_params["f"] = f 19 | output_params["test_var"] = test_var 20 | 21 | 22 | def test_env_script(): 23 | # Define variables and objectives. 24 | vocs = VOCS( 25 | variables={"x0": [-50.0, 5.0], "x1": [-5.0, 15.0]}, 26 | objectives={"f": "MAXIMIZE"}, 27 | observables={"test_var": "U10"}, 28 | ) 29 | 30 | # Define variables and objectives. 31 | gen = RandomSamplingGenerator(vocs=vocs) 32 | 33 | # Create template evaluator. 34 | ev = TemplateEvaluator( 35 | sim_template=os.path.join( 36 | os.path.abspath(os.path.dirname(__file__)), 37 | "resources", 38 | "template_simulation_script.py", 39 | ), 40 | analysis_func=analysis_func, 41 | env_script=os.path.join( 42 | os.path.abspath(os.path.dirname(__file__)), 43 | "resources", 44 | "env_script.sh", 45 | ), 46 | ) 47 | 48 | # Create exploration. 49 | exploration = Exploration( 50 | generator=gen, 51 | evaluator=ev, 52 | max_evals=10, 53 | sim_workers=2, 54 | exploration_dir_path="./tests_output/test_env_script", 55 | ) 56 | 57 | # Run exploration. 58 | exploration.run() 59 | 60 | assert np.all(exploration.history["test_var"] == "testvalue") 61 | 62 | 63 | if __name__ == "__main__": 64 | test_env_script() 65 | -------------------------------------------------------------------------------- /examples/hipace/run_example.py: -------------------------------------------------------------------------------- 1 | """Example Bayesian optimization a PWFA with HiPACE++. 2 | 3 | The HiPACE++ simulations are performed using the template defined in the 4 | `template_simulation_script` file. 5 | 6 | In addition to the objective `f`, three additional parameters 7 | are analyzed for each simulation and including in the optimization 8 | history. The calculation of `f` and the additional parameters is performed 9 | in the `analyze_simulation` function, which for convenience is here defined in 10 | the `analysis_script.py` file. 11 | """ 12 | 13 | from optimas.generators import AxSingleFidelityGenerator 14 | from optimas.evaluators import TemplateEvaluator 15 | from optimas.explorations import Exploration 16 | from gest_api.vocs import VOCS 17 | 18 | from analysis_script import analyze_simulation 19 | 20 | 21 | # Create VOCS object. 22 | vocs = VOCS( 23 | variables={ 24 | "witness_charge": [0.05, 1.0], 25 | }, 26 | objectives={"f": "MAXIMIZE"}, 27 | observables=["energy_med", "energy_mad", "charge"], 28 | ) 29 | 30 | 31 | # Create generator. 32 | gen = AxSingleFidelityGenerator( 33 | vocs=vocs, 34 | n_init=4, 35 | ) 36 | 37 | 38 | # Create evaluator. 39 | ev = TemplateEvaluator( 40 | sim_template="template_simulation_script", 41 | analysis_func=analyze_simulation, 42 | executable="/path/to/build/bin/hipace", 43 | n_gpus=2, # Use 2 GPUs per simulation. 44 | # Uncomment if HiPACE is installed in a different environment than optimas. 45 | # env_script='/path/to/profile.hipace', 46 | # Uncomment if `env_script` loads a different MPI to that used by optimas. 47 | # env_mpi='openmpi' 48 | ) 49 | 50 | 51 | # Create exploration. 52 | exp = Exploration(generator=gen, evaluator=ev, max_evals=100, sim_workers=2) 53 | 54 | 55 | # To safely perform exploration, run it in the block below (this is needed 56 | # for some flavours of multiprocessing, namely spawn and forkserver) 57 | if __name__ == "__main__": 58 | exp.run() 59 | -------------------------------------------------------------------------------- /doc/source/examples/bo_basic.rst: -------------------------------------------------------------------------------- 1 | .. _bo-basic: 2 | 3 | Basic optimization with simulations 4 | =================================== 5 | 6 | 7 | Description 8 | ~~~~~~~~~~~ 9 | 10 | This example illustrates how to run a generic Bayesian optimization with 11 | simulations. This typically requires: 12 | 13 | - An *optimas* script for defining and running the optimization. 14 | - A template simulation script. 15 | - A function to analyze the simulation output. 16 | 17 | In this generic example, the "simulations" will be simple evaluations of an 18 | analytical equation. For a real use case, the simple template that evaluates 19 | this expression can be replaced by an actual simulation script. 20 | 21 | .. note:: 22 | 23 | If you want to adapt this example to a case where the simulation template 24 | is not a Python script, make sure to pass ``executable=`` 25 | as an argument to the ``TemplateEvaluator``, where ```` is 26 | the path to the executable that will run your simulation script. 27 | 28 | For additional details about how to set up an template simulation script see 29 | :ref:`optimas-with-simulations`. 30 | 31 | 32 | Scripts 33 | ~~~~~~~ 34 | 35 | The two files needed to run the optimization should be located in a folder 36 | (named e.g., ``optimization``) with the following structure: 37 | 38 | .. code-block:: bash 39 | 40 | optimization 41 | ├── run_example.py 42 | └── template_simulation_script.py 43 | 44 | The optimization is started by executing: 45 | 46 | .. code-block:: bash 47 | 48 | python run_example.py 49 | 50 | You can find both example scripts below. 51 | 52 | 53 | .. literalinclude:: ../../../examples/dummy/run_example.py 54 | :language: python 55 | :caption: run_example.py (:download:`download <../../../examples/dummy/run_example.py>`) 56 | 57 | .. literalinclude:: ../../../examples/dummy/template_simulation_script.py 58 | :language: python 59 | :caption: template_simulation_script.py (:download:`download <../../../examples/dummy/template_simulation_script.py>`) 60 | -------------------------------------------------------------------------------- /examples/ionization_injection_mf/run_example.py: -------------------------------------------------------------------------------- 1 | """Example multifidelity Bayesian optimization of an LPA with FBPIC. 2 | 3 | This example optimizes an LPA based on ionization injection using FBPIC 4 | simulations with varying fidelity (resolution). 5 | 6 | The FBPIC simulations are performed using the template defined in the 7 | `template_simulation_script.py` file. 8 | 9 | In addition to the objective `f`, three additional parameters 10 | are analyzed for each simulation and including in the optimization 11 | history. The calculation of `f` and the additional parameters is performed 12 | in the `analyze_simulation` function, which for convenience is here defined in 13 | the `analysis_script.py` file. 14 | """ 15 | 16 | from optimas.generators import AxMultiFidelityGenerator 17 | from optimas.evaluators import TemplateEvaluator 18 | from optimas.explorations import Exploration 19 | from gest_api.vocs import VOCS 20 | 21 | from analysis_script import analyze_simulation 22 | 23 | 24 | # Create VOCS object. 25 | vocs = VOCS( 26 | variables={ 27 | "laser_scale": [0.7, 1.05], 28 | "z_foc": [3.0, 7.5], 29 | "mult": [0.1, 1.5], 30 | "plasma_scale": [0.6, 0.8], 31 | "resolution": [2.0, 4.0], 32 | }, 33 | objectives={"f": "MINIMIZE"}, 34 | observables=["energy_med", "energy_mad", "charge"], 35 | ) 36 | 37 | 38 | # Create generator. 39 | gen = AxMultiFidelityGenerator( 40 | vocs=vocs, 41 | n_init=4, 42 | ) 43 | gen.set_fidelity_param("resolution", fidelity_target_value=4.0) 44 | 45 | 46 | # Create evaluator. 47 | ev = TemplateEvaluator( 48 | sim_template="template_simulation_script.py", 49 | analysis_func=analyze_simulation, 50 | n_gpus=1, # Use 1 GPU per simulation. 51 | ) 52 | 53 | 54 | # Create exploration. 55 | exp = Exploration( 56 | generator=gen, evaluator=ev, max_evals=100, sim_workers=4, run_async=True 57 | ) 58 | 59 | 60 | # To safely perform exploration, run it in the block below (this is needed 61 | # for some flavours of multiprocessing, namely spawn and forkserver) 62 | if __name__ == "__main__": 63 | exp.run() 64 | -------------------------------------------------------------------------------- /examples/dummy/run_example.py: -------------------------------------------------------------------------------- 1 | """Basic example of parallel Bayesian optimization with Ax.""" 2 | 3 | from gest_api.vocs import VOCS 4 | from optimas.generators import AxSingleFidelityGenerator 5 | from optimas.evaluators import TemplateEvaluator 6 | from optimas.explorations import Exploration 7 | 8 | 9 | def analyze_simulation(simulation_directory, output_params): 10 | """Analyze the simulation output. 11 | 12 | This method analyzes the output generated by the simulation to 13 | obtain the value of the optimization objective and other observables. 14 | The value of these parameters has to be given to the 15 | `output_params` dictionary. 16 | 17 | Parameters 18 | ---------- 19 | simulation_directory : str 20 | Path to the simulation folder where the output was generated. 21 | output_params : dict 22 | Dictionary where the value of the objectives and observables 23 | will be stored. There is one entry per parameter, where the key 24 | is the name of the parameter given by the user. 25 | 26 | Returns 27 | ------- 28 | dict 29 | The `output_params` dictionary with the results from the analysis. 30 | 31 | """ 32 | # Read back result from file 33 | with open("result.txt") as f: 34 | result = float(f.read()) 35 | # Fill in output parameters. 36 | output_params["f"] = result 37 | return output_params 38 | 39 | 40 | # Create VOCS object defining variables, objectives. 41 | vocs = VOCS( 42 | variables={ 43 | "x0": [0.0, 15.0], 44 | "x1": [0.0, 15.0], 45 | }, 46 | objectives={"f": "MINIMIZE"}, 47 | ) 48 | 49 | 50 | # Create generator. 51 | gen = AxSingleFidelityGenerator(vocs=vocs, n_init=2) 52 | 53 | 54 | # Create evaluator. 55 | ev = TemplateEvaluator( 56 | sim_template="template_simulation_script.py", 57 | analysis_func=analyze_simulation, 58 | ) 59 | 60 | 61 | # Create exploration. 62 | exp = Exploration(generator=gen, evaluator=ev, max_evals=15, sim_workers=2) 63 | 64 | 65 | # To safely perform exploration, run it in the block below (this is needed 66 | # for some flavours of multiprocessing, namely spawn and forkserver) 67 | if __name__ == "__main__": 68 | exp.run() 69 | -------------------------------------------------------------------------------- /examples/dummy_grid_sampling/run_example.py: -------------------------------------------------------------------------------- 1 | """Basic example of parallel grid sampling with simulations.""" 2 | 3 | from gest_api.vocs import VOCS 4 | from optimas.generators import GridSamplingGenerator 5 | from optimas.evaluators import TemplateEvaluator 6 | from optimas.explorations import Exploration 7 | 8 | 9 | def analyze_simulation(simulation_directory, output_params): 10 | """Analyze the simulation output. 11 | 12 | This method analyzes the output generated by the simulation to 13 | obtain the value of the optimization objective and other observables. 14 | The value of these parameters has to be given to the 15 | `output_params` dictionary. 16 | 17 | Parameters 18 | ---------- 19 | simulation_directory : str 20 | Path to the simulation folder where the output was generated. 21 | output_params : dict 22 | Dictionary where the value of the objectives and observables 23 | will be stored. There is one entry per parameter, where the key 24 | is the name of the parameter given by the user. 25 | 26 | Returns 27 | ------- 28 | dict 29 | The `output_params` dictionary with the results from the analysis. 30 | 31 | """ 32 | # Read back result from file 33 | with open("result.txt") as f: 34 | result = float(f.read()) 35 | # Fill in output parameters. 36 | output_params["f"] = result 37 | return output_params 38 | 39 | 40 | # Create VOCS object defining variables, objectives. 41 | vocs = VOCS( 42 | variables={ 43 | "x0": [0.0, 15.0], 44 | "x1": [0.0, 15.0], 45 | }, 46 | objectives={"f": "MAXIMIZE"}, 47 | ) 48 | 49 | 50 | # Create generator. 51 | gen = GridSamplingGenerator(vocs=vocs, n_steps=[5, 7]) 52 | 53 | 54 | # Create evaluator. 55 | ev = TemplateEvaluator( 56 | sim_template="template_simulation_script.py", 57 | analysis_func=analyze_simulation, 58 | ) 59 | 60 | 61 | # Create exploration. 62 | exp = Exploration( 63 | generator=gen, evaluator=ev, max_evals=10, sim_workers=4, run_async=True 64 | ) 65 | 66 | 67 | # To safely perform exploration, run it in the block below (this is needed 68 | # for some flavours of multiprocessing, namely spawn and forkserver) 69 | if __name__ == "__main__": 70 | exp.run() 71 | -------------------------------------------------------------------------------- /examples/dummy_random/run_example.py: -------------------------------------------------------------------------------- 1 | """Basic example of parallel random sampling with simulations.""" 2 | 3 | from gest_api.vocs import VOCS 4 | from optimas.generators import RandomSamplingGenerator 5 | from optimas.evaluators import TemplateEvaluator 6 | from optimas.explorations import Exploration 7 | 8 | 9 | def analyze_simulation(simulation_directory, output_params): 10 | """Analyze the simulation output. 11 | 12 | This method analyzes the output generated by the simulation to 13 | obtain the value of the optimization objective and other observables. 14 | The value of these parameters has to be given to the 15 | `output_params` dictionary. 16 | 17 | Parameters 18 | ---------- 19 | simulation_directory : str 20 | Path to the simulation folder where the output was generated. 21 | output_params : dict 22 | Dictionary where the value of the objectives and observables 23 | will be stored. There is one entry per parameter, where the key 24 | is the name of the parameter given by the user. 25 | 26 | Returns 27 | ------- 28 | dict 29 | The `output_params` dictionary with the results from the analysis. 30 | 31 | """ 32 | # Read back result from file 33 | with open("result.txt") as f: 34 | result = float(f.read()) 35 | # Fill in output parameters. 36 | output_params["f"] = result 37 | return output_params 38 | 39 | 40 | # Create VOCS object defining variables, objectives. 41 | vocs = VOCS( 42 | variables={ 43 | "x0": [0.0, 15.0], 44 | "x1": [0.0, 15.0], 45 | }, 46 | objectives={"f": "MAXIMIZE"}, 47 | ) 48 | 49 | 50 | # Create generator. 51 | gen = RandomSamplingGenerator(vocs=vocs, distribution="normal") 52 | 53 | 54 | # Create evaluator. 55 | ev = TemplateEvaluator( 56 | sim_template="template_simulation_script.py", 57 | analysis_func=analyze_simulation, 58 | ) 59 | 60 | 61 | # Create exploration. 62 | exp = Exploration( 63 | generator=gen, evaluator=ev, max_evals=10, sim_workers=4, run_async=True 64 | ) 65 | 66 | 67 | # To safely perform exploration, run it in the block below (this is needed 68 | # for some flavours of multiprocessing, namely spawn and forkserver) 69 | if __name__ == "__main__": 70 | exp.run() 71 | -------------------------------------------------------------------------------- /.zenodo.json: -------------------------------------------------------------------------------- 1 | { 2 | "upload_type": "software", 3 | "creators": [ 4 | { 5 | "affiliation": "Deutsches Elektronen-Synchrotron", 6 | "name": "Ferran Pousa, Angel", 7 | "orcid": "0000-0001-6705-516X" 8 | }, 9 | { 10 | "affiliation": "Deutsches Elektronen-Synchrotron", 11 | "name": "Jalas, Soeren", 12 | "orcid": "0000-0002-5379-7514" 13 | }, 14 | { 15 | "affiliation": "Deutsches Elektronen-Synchrotron", 16 | "name": "Kirchen, Manuel", 17 | "orcid": "0000-0002-1897-6656" 18 | }, 19 | { 20 | "affiliation": "Deutsches Elektronen-Synchrotron", 21 | "name": "Martinez de la Ossa, Alberto", 22 | "orcid": "0000-0001-8158-0980" 23 | }, 24 | { 25 | "affiliation": "Deutsches Elektronen-Synchrotron, previously LBNL", 26 | "name": "Th\u00e9venet, Maxence", 27 | "orcid": "0000-0001-7216-2277" 28 | }, 29 | { 30 | "affiliation": "Argonne National Laboratory", 31 | "name": "Larson, Jeffrey", 32 | "orcid": "0000-0001-9924-2082" 33 | }, 34 | { 35 | "affiliation": "Argonne National Laboratory", 36 | "name": "Hudson, Stephen", 37 | "orcid": "0000-0002-7500-6138" 38 | }, 39 | { 40 | "affiliation": "Lawrence Berkeley National Laboratory", 41 | "name": "Huebl, Axel", 42 | "orcid": "0000-0003-1943-7141" 43 | }, 44 | { 45 | "affiliation": "Lawrence Berkeley National Laboratory", 46 | "name": "Vay, Jean-Luc", 47 | "orcid": "0000-0002-0040-799X" 48 | }, 49 | { 50 | "affiliation": "Lawrence Berkeley National Laboratory", 51 | "name": "Lehe, Remi", 52 | "orcid": "0000-0002-3656-9659" 53 | } 54 | ], 55 | "access_right": "open", 56 | "license": { 57 | "id": "BSD-3-Clause-LBNL" 58 | }, 59 | "language": "eng", 60 | "keywords": [ 61 | "research", 62 | "scalable", 63 | "optimization", 64 | "bayesian", 65 | "simulation", 66 | "high-performance-computing", 67 | "hpc", 68 | "gpu" 69 | ] 70 | } 71 | -------------------------------------------------------------------------------- /examples/multitask_lpa_fbpic_waket/run_opt.py: -------------------------------------------------------------------------------- 1 | """Multitask optimization of an LPA with Wake-T and FBPIC.""" 2 | 3 | from multiprocessing import set_start_method 4 | 5 | from optimas.core import Task 6 | from optimas.generators import AxMultitaskGenerator 7 | from optimas.evaluators import TemplateEvaluator, MultitaskEvaluator 8 | from optimas.explorations import Exploration 9 | from gest_api.vocs import VOCS 10 | 11 | from analysis_script import analyze_simulation 12 | 13 | 14 | # Create VOCS object. 15 | vocs = VOCS( 16 | variables={ 17 | "beam_i_1": [1.0, 10.0], # kA 18 | "beam_i_2": [1.0, 10.0], # kA 19 | "beam_z_i_2": [-10.0, 10.0], # µm 20 | "beam_length": [1.0, 20.0], # µm 21 | "trial_type": {"wake-t", "fbpic"}, 22 | }, 23 | objectives={"f": "MINIMIZE"}, 24 | observables=["energy_med", "energy_mad", "charge"], 25 | ) 26 | 27 | 28 | # Create tasks. 29 | lofi_task = Task("wake-t", n_init=96, n_opt=96) 30 | hifi_task = Task("fbpic", n_init=3, n_opt=3) 31 | 32 | 33 | # Create generator. 34 | gen = AxMultitaskGenerator( 35 | vocs=vocs, 36 | use_cuda=True, 37 | dedicated_resources=True, 38 | hifi_task=hifi_task, 39 | lofi_task=lofi_task, 40 | ) 41 | 42 | 43 | # Create evaluators for each task. 44 | ev_lofi = TemplateEvaluator( 45 | sim_template="template_simulation_script.py", 46 | analysis_func=analyze_simulation, 47 | sim_files=["bunch_utils.py", "custom_fld_diags.py", "custom_ptcl_diags.py"], 48 | ) 49 | ev_hifi = TemplateEvaluator( 50 | sim_template="template_simulation_script.py", 51 | analysis_func=analyze_simulation, 52 | sim_files=["bunch_utils.py", "custom_fld_diags.py", "custom_ptcl_diags.py"], 53 | ) 54 | 55 | # Create a multitask evaluator. This associates each task to each task 56 | # evaluator. 57 | ev = MultitaskEvaluator( 58 | tasks=[lofi_task, hifi_task], task_evaluators=[ev_lofi, ev_hifi] 59 | ) 60 | 61 | # Create exploration. 62 | n_batches = 50 63 | exp = Exploration( 64 | generator=gen, 65 | evaluator=ev, 66 | max_evals=( 67 | (lofi_task.n_opt + hifi_task.n_opt) * n_batches 68 | + lofi_task.n_init 69 | + hifi_task.n_init 70 | ), 71 | sim_workers=96, 72 | run_async=False, 73 | ) 74 | 75 | # Run exploration. 76 | if __name__ == "__main__": 77 | set_start_method("spawn") 78 | exp.run() 79 | -------------------------------------------------------------------------------- /examples/dummy_line_sampling/run_example.py: -------------------------------------------------------------------------------- 1 | """Basic example of parallel line sampling with simulations.""" 2 | 3 | from gest_api.vocs import VOCS, ContinuousVariable 4 | from optimas.generators import LineSamplingGenerator 5 | from optimas.evaluators import TemplateEvaluator 6 | from optimas.explorations import Exploration 7 | 8 | 9 | def analyze_simulation(simulation_directory, output_params): 10 | """Analyze the simulation output. 11 | 12 | This method analyzes the output generated by the simulation to 13 | obtain the value of the optimization objective and other observables. 14 | The value of these parameters has to be given to the 15 | `output_params` dictionary. 16 | 17 | Parameters 18 | ---------- 19 | simulation_directory : str 20 | Path to the simulation folder where the output was generated. 21 | output_params : dict 22 | Dictionary where the value of the objectives and observables 23 | will be stored. There is one entry per parameter, where the key 24 | is the name of the parameter given by the user. 25 | 26 | Returns 27 | ------- 28 | dict 29 | The `output_params` dictionary with the results from the analysis. 30 | 31 | """ 32 | # Read back result from file 33 | with open("result.txt") as f: 34 | result = float(f.read()) 35 | # Fill in output parameters. 36 | output_params["f"] = result 37 | return output_params 38 | 39 | 40 | # Create VOCS object defining variables, objectives. 41 | vocs = VOCS( 42 | variables={ 43 | "x0": ContinuousVariable(domain=[0.0, 15.0], default_value=5.0), 44 | "x1": ContinuousVariable(domain=[0.0, 15.0], default_value=6.0), 45 | }, 46 | objectives={"f": "MAXIMIZE"}, 47 | ) 48 | 49 | 50 | # Create generator. 51 | gen = LineSamplingGenerator(vocs=vocs, n_steps=[5, 7]) 52 | 53 | 54 | # Create evaluator. 55 | ev = TemplateEvaluator( 56 | sim_template="template_simulation_script.py", 57 | analysis_func=analyze_simulation, 58 | ) 59 | 60 | 61 | # Create exploration. 62 | exp = Exploration( 63 | generator=gen, evaluator=ev, max_evals=10, sim_workers=4, run_async=True 64 | ) 65 | 66 | 67 | # To safely perform exploration, run it in the block below (this is needed 68 | # for some flavours of multiprocessing, namely spawn and forkserver) 69 | if __name__ == "__main__": 70 | exp.run() 71 | -------------------------------------------------------------------------------- /tests/test_grid_sampling.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from gest_api.vocs import VOCS 3 | 4 | from optimas.explorations import Exploration 5 | from optimas.generators import GridSamplingGenerator 6 | from optimas.evaluators import FunctionEvaluator 7 | 8 | 9 | def eval_func(input_params, output_params): 10 | """Evaluation function for single-fidelity test""" 11 | x0 = input_params["x0"] 12 | x1 = input_params["x1"] 13 | result = -(x0 + 10 * np.cos(x0)) * (x1 + 5 * np.cos(x1)) 14 | output_params["f"] = result 15 | 16 | 17 | def test_grid_sampling(): 18 | """Test that grid sampling generates the expected configurations.""" 19 | 20 | # Create varying parameters. 21 | lower_bounds = [-3.0, 2.0] 22 | upper_bounds = [1.0, 5.0] 23 | n_steps = [7, 15] 24 | 25 | # Set number of evaluations. 26 | n_evals = np.prod(n_steps) 27 | 28 | vocs = VOCS( 29 | variables={ 30 | "x0": [lower_bounds[0], upper_bounds[0]], 31 | "x1": [lower_bounds[1], upper_bounds[1]], 32 | }, 33 | objectives={"f": "MAXIMIZE"}, 34 | ) 35 | 36 | # Create generator and run exploration. 37 | gen = GridSamplingGenerator(vocs=vocs, n_steps=n_steps) 38 | ev = FunctionEvaluator(function=eval_func) 39 | exploration = Exploration( 40 | generator=gen, 41 | evaluator=ev, 42 | max_evals=n_evals, 43 | sim_workers=2, 44 | exploration_dir_path="./tests_output/test_grid_sampling", 45 | ) 46 | exploration.run() 47 | 48 | # Get generated points. 49 | h = exploration.history 50 | h = h[h["sim_ended"]] 51 | x0_gen = h["x0"] 52 | x1_gen = h["x1"] 53 | 54 | # Get expected 1D steps along each variable. 55 | x0_steps = np.linspace(lower_bounds[0], upper_bounds[0], n_steps[0]) 56 | x1_steps = np.linspace(lower_bounds[1], upper_bounds[1], n_steps[1]) 57 | 58 | # Check that the scan along each variable is as expected. 59 | np.testing.assert_array_equal(np.unique(x0_gen), x0_steps) 60 | np.testing.assert_array_equal(np.unique(x1_gen), x1_steps) 61 | 62 | # Check that for every x0 step, the expected x1 steps are performed. 63 | for x0_step in x0_steps: 64 | x1_in_x0_step = x1_gen[x0_gen == x0_step] 65 | np.testing.assert_array_equal(x1_in_x0_step, x1_steps) 66 | 67 | 68 | if __name__ == "__main__": 69 | test_grid_sampling() 70 | -------------------------------------------------------------------------------- /examples/dummy_mf/run_example.py: -------------------------------------------------------------------------------- 1 | """Basic example of parallel multi-fidelity Bayesian optimization with Ax.""" 2 | 3 | from gest_api.vocs import VOCS 4 | from optimas.generators import AxMultiFidelityGenerator 5 | from optimas.evaluators import TemplateEvaluator 6 | from optimas.explorations import Exploration 7 | 8 | 9 | def analyze_simulation(simulation_directory, output_params): 10 | """Analyze the simulation output. 11 | 12 | This method analyzes the output generated by the simulation to 13 | obtain the value of the optimization objective and other observables. 14 | The value of these parameters has to be given to the 15 | `output_params` dictionary. 16 | 17 | Parameters 18 | ---------- 19 | simulation_directory : str 20 | Path to the simulation folder where the output was generated. 21 | output_params : dict 22 | Dictionary where the value of the objectives and observables 23 | will be stored. There is one entry per parameter, where the key 24 | is the name of the parameter given by the user. 25 | 26 | Returns 27 | ------- 28 | dict 29 | The `output_params` dictionary with the results from the analysis. 30 | 31 | """ 32 | # Read back result from file 33 | with open("result.txt") as f: 34 | result = float(f.read()) 35 | # Fill in output parameters. 36 | output_params["f"] = result 37 | return output_params 38 | 39 | 40 | # Create VOCS object defining variables, objectives. 41 | vocs = VOCS( 42 | variables={ 43 | "x0": [0.0, 15.0], 44 | "x1": [0.0, 15.0], 45 | "resolution": [1.0, 8.0], 46 | }, 47 | objectives={"f": "MINIMIZE"}, 48 | ) 49 | 50 | 51 | # Create generator. 52 | gen = AxMultiFidelityGenerator( 53 | vocs=vocs, 54 | n_init=4, 55 | fidel_cost_intercept=2.0, 56 | ) 57 | gen.set_fidelity_param("resolution", fidelity_target_value=8.0) 58 | 59 | 60 | # Create evaluator. 61 | ev = TemplateEvaluator( 62 | sim_template="template_simulation_script.py", 63 | analysis_func=analyze_simulation, 64 | ) 65 | 66 | 67 | # Create exploration. 68 | exp = Exploration( 69 | generator=gen, evaluator=ev, max_evals=10, sim_workers=2, run_async=True 70 | ) 71 | 72 | 73 | # To safely perform exploration, run it in the block below (this is needed 74 | # for some flavours of multiprocessing, namely spawn and forkserver) 75 | if __name__ == "__main__": 76 | exp.run() 77 | -------------------------------------------------------------------------------- /examples/wake_t_fbpic_mt/run_example.py: -------------------------------------------------------------------------------- 1 | """Example multitask optimization with Wake-T and FBPIC. 2 | 3 | This example optimizes the focusing strength of an active plasma lens using 4 | Wake-T and FBPIC simulations by employing a multitask generator. 5 | 6 | The simulations are performed using the template defined in the 7 | `template_simulation_script.py` file, which executes Wake-T or FBPIC 8 | depending on the value of the `task` parameter. 9 | 10 | The calculation of the objective `f` is performed in the `analyze_simulation` 11 | function, which for convenience is here defined in the `analysis_script.py` 12 | file. 13 | """ 14 | 15 | from optimas.core import Task 16 | from optimas.generators import AxMultitaskGenerator 17 | from optimas.evaluators import TemplateEvaluator, MultitaskEvaluator 18 | from optimas.explorations import Exploration 19 | from gest_api.vocs import VOCS 20 | 21 | from analysis_script import analyze_simulation 22 | 23 | 24 | # Create VOCS object. 25 | vocs = VOCS( 26 | variables={ 27 | "g_lens": [100.0, 1000.0], 28 | "trial_type": {"wake-t", "fbpic"}, 29 | }, 30 | objectives={"f": "MINIMIZE"}, 31 | ) 32 | 33 | # Create tasks. 34 | lofi_task = Task("wake-t", n_init=12, n_opt=12) 35 | hifi_task = Task("fbpic", n_init=2, n_opt=2) 36 | 37 | 38 | # Create generator. 39 | gen = AxMultitaskGenerator( 40 | vocs=vocs, 41 | lofi_task=lofi_task, 42 | hifi_task=hifi_task, 43 | ) 44 | 45 | 46 | # Create one evaluator for each task. In this example, both tasks use the same 47 | # template, but in principle they can have different template, executor, 48 | # analysis function, resources, etc. 49 | ev_lofi = TemplateEvaluator( 50 | sim_template="template_simulation_script.py", 51 | analysis_func=analyze_simulation, 52 | ) 53 | ev_hifi = TemplateEvaluator( 54 | sim_template="template_simulation_script.py", 55 | analysis_func=analyze_simulation, 56 | n_gpus=2, 57 | ) 58 | 59 | 60 | # Create a multitask evaluator. This associates each task to each task 61 | # evaluator. 62 | ev = MultitaskEvaluator( 63 | tasks=[lofi_task, hifi_task], task_evaluators=[ev_lofi, ev_hifi] 64 | ) 65 | 66 | 67 | # Create exploration. 68 | exp = Exploration(generator=gen, evaluator=ev, max_evals=100, sim_workers=12) 69 | 70 | 71 | # To safely perform exploration, run it in the block below (this is needed 72 | # for some flavours of multiprocessing, namely spawn and forkserver) 73 | if __name__ == "__main__": 74 | exp.run() 75 | -------------------------------------------------------------------------------- /tests/test_grid_sampling_mpi.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pytest 3 | from gest_api.vocs import VOCS 4 | 5 | from optimas.explorations import Exploration 6 | from optimas.generators import GridSamplingGenerator 7 | from optimas.evaluators import FunctionEvaluator 8 | 9 | 10 | def eval_func(input_params, output_params): 11 | """Evaluation function for single-fidelity test""" 12 | x0 = input_params["x0"] 13 | x1 = input_params["x1"] 14 | result = -(x0 + 10 * np.cos(x0)) * (x1 + 5 * np.cos(x1)) 15 | output_params["f"] = result 16 | 17 | 18 | @pytest.mark.mpi 19 | def test_grid_sampling(): 20 | """Test that grid sampling generates the expected configurations.""" 21 | 22 | # Create varying parameters. 23 | lower_bounds = [-3.0, 2.0] 24 | upper_bounds = [1.0, 5.0] 25 | n_steps = [7, 15] 26 | 27 | # Set number of evaluations. 28 | n_evals = np.prod(n_steps) 29 | 30 | vocs = VOCS( 31 | variables={ 32 | "x0": [lower_bounds[0], upper_bounds[0]], 33 | "x1": [lower_bounds[1], upper_bounds[1]], 34 | }, 35 | objectives={"f": "MAXIMIZE"}, 36 | ) 37 | 38 | # Create generator and run exploration. 39 | gen = GridSamplingGenerator(vocs=vocs, n_steps=n_steps) 40 | ev = FunctionEvaluator(function=eval_func) 41 | exploration = Exploration( 42 | generator=gen, 43 | evaluator=ev, 44 | max_evals=n_evals, 45 | sim_workers=2, 46 | exploration_dir_path="./tests_output/test_grid_sampling", 47 | libe_comms="mpi", 48 | ) 49 | 50 | exploration.run() 51 | 52 | # Get generated points. 53 | if exploration.is_manager: 54 | h = exploration.history 55 | h = h[h["sim_ended"]] 56 | x0_gen = h["x0"] 57 | x1_gen = h["x1"] 58 | 59 | # Get expected 1D steps along each variable. 60 | x0_steps = np.linspace(lower_bounds[0], upper_bounds[0], n_steps[0]) 61 | x1_steps = np.linspace(lower_bounds[1], upper_bounds[1], n_steps[1]) 62 | 63 | # Check that the scan along each variable is as expected. 64 | np.testing.assert_array_equal(np.unique(x0_gen), x0_steps) 65 | np.testing.assert_array_equal(np.unique(x1_gen), x1_steps) 66 | 67 | # Check that for every x0 step, the expected x1 steps are performed. 68 | for x0_step in x0_steps: 69 | x1_in_x0_step = x1_gen[x0_gen == x0_step] 70 | np.testing.assert_array_equal(x1_in_x0_step, x1_steps) 71 | 72 | 73 | if __name__ == "__main__": 74 | test_grid_sampling() 75 | -------------------------------------------------------------------------------- /license.txt: -------------------------------------------------------------------------------- 1 | optimas Copyright (c) 2023, The Regents of the University of California, 2 | through Lawrence Berkeley National Laboratory (subject to receipt of 3 | any required approvals from the U.S. Dept. of Energy) and Deutsches 4 | Elektronen-Synchrotron . All rights reserved. 5 | 6 | Redistribution and use in source and binary forms, with or without 7 | modification, are permitted provided that the following conditions are met: 8 | 9 | (1) Redistributions of source code must retain the above copyright notice, 10 | this list of conditions and the following disclaimer. 11 | 12 | (2) Redistributions in binary form must reproduce the above copyright 13 | notice, this list of conditions and the following disclaimer in the 14 | documentation and/or other materials provided with the distribution. 15 | 16 | (3) Neither the name of the University of California, Lawrence Berkeley 17 | National Laboratory, U.S. Dept. of Energy, Deutsches 18 | Elektronen-Synchrotron nor the names of its contributors 19 | may be used to endorse or promote products derived from this software 20 | without specific prior written permission. 21 | 22 | 23 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 24 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 | ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 27 | LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 28 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 29 | SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 30 | INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 31 | CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 32 | ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 33 | POSSIBILITY OF SUCH DAMAGE. 34 | 35 | You are under no obligation whatsoever to provide any bug fixes, patches, 36 | or upgrades to the features, functionality or performance of the source 37 | code ("Enhancements") to anyone; however, if you choose to make your 38 | Enhancements available either publicly, or directly to Lawrence Berkeley 39 | National Laboratory, without imposing a separate written license agreement 40 | for such Enhancements, then you hereby grant the following license: a 41 | non-exclusive, royalty-free perpetual license to install, use, modify, 42 | prepare derivative works, incorporate into other computer software, 43 | distribute, and sublicense such enhancements or derivative works thereof, 44 | in binary and source code form. 45 | -------------------------------------------------------------------------------- /doc/source/examples/bo_with_hipace.rst: -------------------------------------------------------------------------------- 1 | .. _bo-with-hipace: 2 | 3 | Optimization with HiPACE++ 4 | ========================== 5 | 6 | 7 | Description 8 | ~~~~~~~~~~~ 9 | 10 | This examples shows how to perform a Bayesian optimization of a PWFA using 11 | `HiPACE++ `_. 12 | 13 | The setup is a simple driver-witness configuration where the witness is 14 | optimized to maximize the objetive 15 | 16 | .. math:: 17 | 18 | f = \frac{\sqrt{Q} E_{MED}}{100 E_{MAD}} 19 | 20 | 21 | where :math:`Q` is the beam charge, :math:`E_{MED}` is the median energy, and 22 | :math:`E_{MAD}` is the median absolute deviation energy spread. The only 23 | optimization parameter is the charge: 24 | 25 | - ``'witness_charge'``: parameter in the range :math:`[0.05, 1.]` in units of 26 | :math:`\mathrm{nC}`. 27 | 28 | The optimization is carried out using an 29 | :class:`~optimas.generators.AxSingleFidelityGenerator` and a 30 | :class:`~optimas.evaluators.TemplateEvaluator`. In this case, the function 31 | ``analyze_simulation`` that analyzes the output of each simulation is defined 32 | in a separate file ``analysis_script.py`` and imported into the main 33 | optimas script. 34 | 35 | The example is set up to make use of a system of 4 GPUs, where each HiPACE++ 36 | simulation uses a 2 GPUs and 2 simulations are carried out in parallel. 37 | 38 | If HiPACE++ is installed in a different environment than ``optimas``, make 39 | sure to specify ``env_script`` and ``env_mpi`` in the ``TemplateEvaluator``. 40 | 41 | Scripts 42 | ~~~~~~~ 43 | 44 | The files needed to run the optimization should be located in a folder 45 | (named e.g., ``optimization``) with the following structure: 46 | 47 | .. code-block:: bash 48 | 49 | optimization 50 | ├── run_example.py 51 | ├── template_simulation_script.py 52 | └── analysis_script.py 53 | 54 | The optimization is started by executing: 55 | 56 | .. code-block:: bash 57 | 58 | python run_example.py 59 | 60 | The scripts needed to run this example can be seen below. 61 | 62 | .. literalinclude:: ../../../examples/hipace/run_example.py 63 | :language: python 64 | :caption: run_example.py (:download:`download <../../../examples/hipace/run_example.py>`) 65 | 66 | .. literalinclude:: ../../../examples/hipace/template_simulation_script 67 | :language: python 68 | :caption: template_simulation_script.py (:download:`download <../../../examples/hipace/template_simulation_script>`) 69 | 70 | .. literalinclude:: ../../../examples/hipace/analysis_script.py 71 | :language: python 72 | :caption: analysis_script.py (:download:`download <../../../examples/hipace/analysis_script.py>`) 73 | -------------------------------------------------------------------------------- /examples/astra/analysis_script.py: -------------------------------------------------------------------------------- 1 | """Defines the analysis function that runs after the simulation. 2 | 3 | This is an example calculation of the bunch length in µm, a combined 4 | normalized transverse emittance, and the emittances in both transverse planes. 5 | """ 6 | 7 | import numpy as np 8 | 9 | 10 | # Function to analyze the simulation result 11 | def analyze_simulation(simulation_directory, output_params): 12 | """Analyze the simulation output. 13 | 14 | This method analyzes the output generated by the simulation to 15 | obtain the value of the optimization objective and other analyzed 16 | parameters, if specified. The value of these parameters has to be 17 | given to the `output_params` dictionary. 18 | 19 | Parameters 20 | ---------- 21 | simulation_directory : str 22 | Path to the simulation folder where the output was generated. 23 | output_params : dict 24 | Dictionary where the value of the objectives and analyzed parameters 25 | will be stored. There is one entry per parameter, where the key 26 | is the name of the parameter given by the user. 27 | 28 | Returns 29 | ------- 30 | dict 31 | The `output_params` dictionary with the results from the analysis. 32 | 33 | """ 34 | try: 35 | # Read back results from files 36 | s, t, x_av, x_rms, xp_rms, em_n_x, x_xp = np.loadtxt( 37 | simulation_directory + "/ASTRA_example.Xemit.001", unpack=True 38 | ) 39 | s, t, y_av, y_rms, yp_rms, em_n_y, y_yp = np.loadtxt( 40 | simulation_directory + "/ASTRA_example.Yemit.001", unpack=True 41 | ) 42 | x, y, z, px, py, pz, t, charge, idx, flag = np.loadtxt( 43 | simulation_directory + "/ASTRA_example.0250.001", unpack=True 44 | ) 45 | z[1:] = z[1:] + z[0] # adding the position of the reference particle 46 | 47 | output_params["bunch_length"] = np.std(z) * 1e6 48 | output_params["emittance"] = np.log10( 49 | em_n_x[-1] * em_n_y[-1] * 1e12 50 | ) # normalized emittances in µm, logarithm for better optimization 51 | output_params["emittance_x"] = em_n_x[-1] 52 | output_params["emittance_y"] = em_n_y[-1] 53 | except Exception as exc: 54 | logf = open("exception.log", "w") 55 | logf.write( 56 | "Failed to open or evaluate {0}: {1}\n".format( 57 | str(simulation_directory), str(exc) 58 | ) 59 | ) 60 | return output_params 61 | 62 | 63 | # Not needed, for debugging only 64 | if __name__ == "__main__": 65 | analyze_simulation("path_to_simulation_result", {}) 66 | -------------------------------------------------------------------------------- /optimas/evaluators/function_evaluator.py: -------------------------------------------------------------------------------- 1 | """Contains the definition of the FunctionEvaluator class.""" 2 | 3 | from typing import Callable, Dict, List 4 | 5 | from optimas.sim_functions import run_function 6 | from optimas.core import VaryingParameter, Objective, Parameter 7 | from .base import Evaluator 8 | 9 | 10 | class FunctionEvaluator(Evaluator): 11 | """Evaluator class for executing an arbitrary function. 12 | 13 | Parameters 14 | ---------- 15 | function : callable 16 | The function to be evaluated. 17 | create_evaluation_dirs : bool 18 | Whether to create a directory for each evaluation. The directories will 19 | be located in `./evaluations` and be named `sim{trial_index}`. When 20 | using this option, the current working directory inside the ``function`` 21 | will be changed to the corresponding evaluation directory. 22 | By default, ``False``. 23 | redirect_logs_to_file : bool 24 | Whether to redirect the logs (stdout and stderr) of the evaluation 25 | function to a file (log.out and log.err). This can be useful to keep the 26 | logs of the exploration clean, preventing many processes from writing to the 27 | terminal at once. If enabled, `create_evaluation_dirs` will be set to `True`. 28 | 29 | """ 30 | 31 | def __init__( 32 | self, 33 | function: Callable, 34 | create_evaluation_dirs: bool = False, 35 | redirect_logs_to_file: bool = False, 36 | ) -> None: 37 | super().__init__(sim_function=run_function) 38 | self.function = function 39 | self._create_evaluation_dirs = create_evaluation_dirs 40 | self._redirect_logs_to_file = redirect_logs_to_file 41 | if self._redirect_logs_to_file: 42 | self._create_evaluation_dirs = True 43 | 44 | def get_sim_specs( 45 | self, 46 | varying_parameters: List[VaryingParameter], 47 | objectives: List[Objective], 48 | analyzed_parameters: List[Parameter], 49 | ) -> Dict: 50 | """Get the `sim_specs` for `libEnsemble`.""" 51 | # Get base sim_specs. 52 | sim_specs = super().get_sim_specs( 53 | varying_parameters, objectives, analyzed_parameters 54 | ) 55 | # Add evaluation function to sim_specs. 56 | sim_specs["user"]["evaluation_func"] = self.function 57 | sim_specs["user"]["redirect_logs_to_file"] = self._redirect_logs_to_file 58 | return sim_specs 59 | 60 | def get_libe_specs(self) -> Dict: 61 | """Get the `libE_specs` for `libEnsemble`.""" 62 | libE_specs = super().get_libe_specs() 63 | libE_specs["sim_dirs_make"] = self._create_evaluation_dirs 64 | return libE_specs 65 | -------------------------------------------------------------------------------- /optimas/generators/grid_sampling.py: -------------------------------------------------------------------------------- 1 | """Contains the definition of the grid sampling generator.""" 2 | 3 | from typing import List, Optional 4 | 5 | import numpy as np 6 | 7 | from optimas.core import Trial 8 | from gest_api.vocs import VOCS 9 | from .base import Generator 10 | 11 | 12 | class GridSamplingGenerator(Generator): 13 | r"""Sample an n-dimensional space with a uniform grid. 14 | 15 | This generator samples the given objectives in a uniform grid of ``n`` 16 | dimensions, where ``n`` is the number of ``varying_parameters``. Along each 17 | direction :math:`i` (i.e., along each varying parameter), the space is 18 | divided in :math:`n_\mathrm{steps,i}` evenly spaced steps, resulting in a 19 | total number of evaluations :math:`\prod_i n_\mathrm{steps,i}`. 20 | 21 | Parameters 22 | ---------- 23 | vocs : VOCS 24 | VOCS object specifying variables, objectives, constraints, and observables. 25 | n_steps : list of int 26 | Number of grid steps along each direction. 27 | 28 | """ 29 | 30 | def __init__( 31 | self, 32 | vocs: VOCS, 33 | n_steps: List[int], 34 | ) -> None: 35 | super().__init__(vocs=vocs) 36 | self._n_steps = n_steps if n_steps is np.ndarray else np.array(n_steps) 37 | self._create_configurations() 38 | 39 | def _create_configurations(self) -> None: 40 | """Create a list will all configurations to be evaluated.""" 41 | # Create a flattened meshgrid for each varying parameter. 42 | var_linspaces = [] 43 | for var, n_steps_var in zip(self._varying_parameters, self._n_steps): 44 | var_linspaces.append( 45 | np.linspace(var.lower_bound, var.upper_bound, n_steps_var) 46 | ) 47 | var_mgrids = np.meshgrid(*var_linspaces, indexing="ij") 48 | var_mgrids_flat = [np.ravel(var_mgrid) for var_mgrid in var_mgrids] 49 | 50 | # Calculate total amount of trials and create all configurations. 51 | all_configs = [] 52 | n_trials = np.prod(self._n_steps) 53 | for i in range(n_trials): 54 | config = {} 55 | for var, mgrid in zip(self._varying_parameters, var_mgrids_flat): 56 | config[var.name] = mgrid[i] 57 | all_configs.append(config) 58 | self._all_configs = all_configs 59 | 60 | def suggest(self, num_points: Optional[int]) -> List[dict]: 61 | """Request the next set of points to evaluate.""" 62 | points = [] 63 | for _ in range(num_points): 64 | if self._all_configs: 65 | config = self._all_configs.pop(0) 66 | points.append(config) 67 | return points 68 | 69 | def _mark_trial_as_failed(self, trial: Trial): 70 | """No need to do anything, since there is no surrogate model.""" 71 | pass 72 | -------------------------------------------------------------------------------- /doc/source/examples/bo_with_warpx.rst: -------------------------------------------------------------------------------- 1 | .. _bo-with-warpx: 2 | 3 | Optimization with WarpX 4 | ======================= 5 | 6 | 7 | Description 8 | ~~~~~~~~~~~ 9 | 10 | This examples shows how to perform a Bayesian optimization of a laser-plasma 11 | accelerator (LPA) using WarpX simulations. 12 | 13 | The objective function to optimize (minimize) is defined as 14 | 15 | .. math:: 16 | f = \epsilon_f + 100\epsilon_i \left(1.0 - \frac{Q_f}{Q_i}\right) 17 | 18 | 19 | where :math:`\epsilon_i` and :math:`\epsilon_f` are the initial and final beam emittances, respectively, 20 | and :math:`Q_i` and :math:`Q_f` are the initial and final beam charges. 21 | This objective is optimized by tuning 2 parameters: 22 | 23 | - ``'adjust_factor'``: parameter in the range :math:`[0.7, 1.05]` that scales the 24 | strength of the magnetic field between the first and second stage. 25 | The value ``adjust_factor=1`` corresponds to a focusing strength of :math:`454535.7\, \mathrm{T/m}`. 26 | - ``'lens_start'``: the position of the start (i.e. left edge) of the focusing plasma lens in millimetres, with range 27 | :math:`[0.32, 0.347]`. 28 | 29 | The optimization is carried out using an 30 | :class:`~optimas.generators.AxSingleFidelityGenerator` and a 31 | :class:`~optimas.evaluators.TemplateEvaluator`. In this case, the function 32 | ``analyze_simulation`` that analyzes the output of each simulation is defined 33 | in a separate file ``analysis_script.py`` and imported into the main 34 | optimas script. 35 | 36 | The example is set up to make use of a system of 4 GPUs, where each WarpX 37 | simulation uses a single GPU and 4 simulations are carried out in parallel. 38 | 39 | 40 | Scripts 41 | ~~~~~~~ 42 | 43 | The files needed to run the optimization should be located in a folder 44 | (named e.g., ``optimization``) with the following structure: 45 | 46 | .. code-block:: bash 47 | 48 | optimization 49 | ├── run_example.py 50 | ├── template_simulation_script 51 | ├── analysis_script.py 52 | └── warpx 53 | 54 | Note that the ``WarpX`` RZ executable ``warpx`` needs to be in the ``optimization`` folder. 55 | The optimization is started by executing: 56 | 57 | .. code-block:: bash 58 | 59 | python run_example.py 60 | 61 | The scripts needed to run this example can be seen below. 62 | 63 | .. literalinclude:: ../../../examples/multi_stage/run_example.py 64 | :language: python 65 | :caption: run_example.py (:download:`download <../../../examples/multi_stage/run_example.py>`) 66 | 67 | .. literalinclude:: ../../../examples/multi_stage/template_simulation_script 68 | :language: bash 69 | :caption: template_simulation_script (:download:`download <../../../examples/multi_stage/template_simulation_script>`) 70 | 71 | .. literalinclude:: ../../../examples/multi_stage/analysis_script.py 72 | :language: python 73 | :caption: analysis_script.py (:download:`download <../../../examples/multi_stage/analysis_script.py>`) 74 | -------------------------------------------------------------------------------- /doc/source/user_guide/installation_maxwell.rst: -------------------------------------------------------------------------------- 1 | Installation on Maxwell (DESY) 2 | ------------------------------ 3 | 4 | Log into a Maxwell display node 5 | 6 | .. code:: 7 | 8 | ssh @max-display.desy.de 9 | 10 | 11 | Create a ``profile.optimas`` file in your home directory with the following content 12 | 13 | .. code:: 14 | 15 | source /etc/profile.d/modules.sh 16 | module purge 17 | module load intel/2022 18 | module load maxwell cuda/11.8 19 | module load mpi/mpich-x86_64 20 | module load maxwell mamba 21 | . mamba-init 22 | 23 | you can do this from the command line by executing ``cat > ~/profile.optimas`` and 24 | pasting the lines above. To save the file, finalize by pressing ``Ctrl+D``. 25 | 26 | Load the source file 27 | 28 | .. code:: 29 | 30 | source ~/profile.optimas 31 | 32 | 33 | Create a new environment for optimas 34 | 35 | .. code:: 36 | 37 | mamba create -n optimas_env python=3.11 38 | 39 | 40 | Activate the environment 41 | 42 | .. code:: 43 | 44 | mamba activate optimas_env 45 | 46 | 47 | Install ``mpi4py`` 48 | 49 | .. code:: 50 | 51 | pip install mpi4py --no-cache-dir 52 | 53 | 54 | Install ``optimas`` with all dependencies if you plan to do Bayesian optimization 55 | (see :ref:`dependencies` for more details). 56 | 57 | .. code:: 58 | 59 | pip install 'optimas[all]' 60 | 61 | 62 | Installing FBPIC and Wake-T (optional) 63 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 64 | A common use case for ``optimas`` is to optimize the output of a plasma acceleration 65 | simulation with FBPIC or Wake-T (or both). If you need any of these tools, you 66 | can follow the instructions below to install them in your ``optimas_env`` 67 | environment. 68 | 69 | Install FBPIC: 70 | 71 | .. code:: 72 | 73 | mamba install cudatoolkit=11.8 74 | pip install cupy-cuda11x 75 | pip install fbpic 76 | 77 | 78 | Install Wake-T: 79 | 80 | .. code:: 81 | 82 | pip install Wake-T 83 | 84 | Install openPMD-viewer for data analysis 85 | 86 | .. code:: 87 | 88 | pip install openPMD-viewer 89 | 90 | 91 | Running an optimas job 92 | ~~~~~~~~~~~~~~~~~~~~~~ 93 | The following script can be used to submit an ``optimas`` job to Maxwell. 94 | This example sends the job to the ``maxgpu`` queue. It assumes that you 95 | need one node with 4 GPUs (NVIDIA A100) and that your optimas script is called 96 | ``run_optimization.py``. 97 | 98 | .. code:: 99 | 100 | #!/bin/bash 101 | 102 | #SBATCH --partition=maxgpu 103 | #SBATCH --time=24:00:00 104 | #SBATCH --nodes=1 105 | #SBATCH --constraint="A100&GPUx4" 106 | #SBATCH --job-name optimas 107 | #SBATCH --output stdout 108 | #SBATCH --error stderr 109 | #SBATCH --mail-type BEGIN,END 110 | 111 | export SLURM_EXACT=1 112 | 113 | # Activate environment 114 | source ~/profile.optimas 115 | mamba activate optimas_env 116 | 117 | python run_optimization.py 118 | -------------------------------------------------------------------------------- /examples/astra/run_optimization_serial_ASTRA.py: -------------------------------------------------------------------------------- 1 | """Example of multiobjective Bayesian optimization using the serial ASTRA version. 2 | 3 | This example uses an ASTRA example from the ASTRA webpage: 4 | https://www.desy.de/~mpyflo/EXAMPLES/Manual_Example/ 5 | In order to run this example, please download the files "3_cell_L-Band.dat", 6 | "Solenoid.dat", and create the particle distribution "Example.ini" by running 7 | the generator.in file. 8 | Further optimas documentation and examples: 9 | https://optimas.readthedocs.io/en/latest/index.html 10 | """ 11 | 12 | from optimas.generators import AxSingleFidelityGenerator 13 | from optimas.evaluators import TemplateEvaluator 14 | from optimas.explorations import Exploration 15 | from gest_api.vocs import VOCS 16 | from analysis_script import analyze_simulation 17 | 18 | # Create VOCS object. 19 | # name of parameter, lower bound of values to be explored, 20 | # upper bound of values to be explored 21 | vocs = VOCS( 22 | variables={ 23 | "RF_phase": [-2.5, 2.5], 24 | "B_sol": [0.12, 0.38], 25 | }, 26 | objectives={ 27 | "bunch_length": "MINIMIZE", 28 | "emittance": "MINIMIZE", 29 | }, 30 | observables=["emittance_x", "emittance_y"], 31 | ) 32 | 33 | # Create generator. 34 | # Pick the generator to be used, here Single-fidelity Bayesian optimization. 35 | gen = AxSingleFidelityGenerator( 36 | vocs=vocs, 37 | n_init=8, 38 | ) 39 | 40 | 41 | # Create evaluator. 42 | # sim_template is the ASTRA input template, here the parameters that are going 43 | # to be varied need to be changed to the name given in var_1 etc. The format 44 | # in the ASTRA input is e.g. Phi(1)={{RF_phase}}. 45 | # analysis_func is the function that will analyze the output. 46 | # sim_files contains the path to the particle distribution and other files 47 | # needed for the ASTRA simulation like field maps. 48 | # executable is the path to the ASTRA executable 49 | ev = TemplateEvaluator( 50 | sim_template="ASTRA_example.in", 51 | analysis_func=analyze_simulation, 52 | sim_files=[ 53 | "Example.ini", 54 | "3_cell_L-Band.dat", 55 | "Solenoid.dat", 56 | ], 57 | executable="/path_to_ASTRA/Astra", 58 | n_procs=1, 59 | ) 60 | 61 | 62 | # Create exploration. 63 | # max_evals is the maximum number of evaluations. 64 | # max_evalvs / sim_worker is the number of simulation batches that are sent. 65 | # sim_workers is the number of simulations that are launched in parallel. 66 | # sim_workers should be smaller than the number of available CPU cores. 67 | # In case you already have some data from an optimization run but would like 68 | # to add further datapoints, set resume=True and max_evals to a higher number. 69 | exp = Exploration( 70 | generator=gen, evaluator=ev, max_evals=500, sim_workers=8, resume=False 71 | ) 72 | 73 | 74 | # To safely perform exploration, run it in the block below (this is needed 75 | # for some flavours of multiprocessing, namely spawn and forkserver) 76 | if __name__ == "__main__": 77 | exp.run() 78 | -------------------------------------------------------------------------------- /optimas/utils/other.py: -------------------------------------------------------------------------------- 1 | """Definition of other utilities used internally by optimas.""" 2 | 3 | from typing import Union, List, Dict 4 | 5 | import numpy as np 6 | import pandas as pd 7 | 8 | 9 | def convert_to_dataframe( 10 | data: Union[Dict, List[Dict], np.ndarray, pd.DataFrame], 11 | ) -> pd.DataFrame: 12 | """Convert input data to a pandas DataFrame. 13 | 14 | Parameters 15 | ---------- 16 | data : dict of lists, list of dicts, ndarray or DataFrame 17 | The input data, which can be a dictionary of lists, a list of 18 | dictionaries, a numpy structured array or a pandas dataframe. 19 | 20 | Returns 21 | ------- 22 | pd.DataFrame 23 | The converted input data. 24 | 25 | Raises 26 | ------ 27 | ValueError 28 | If the type of the input data is not supported. 29 | """ 30 | # Get fields in given data. 31 | if isinstance(data, np.ndarray): 32 | # Labels with multidimensional arrays are converted to a list of lists. 33 | d = { 34 | label: data[label].tolist() if data[label].ndim > 1 else data[label] 35 | for label in data.dtype.names 36 | } 37 | return pd.DataFrame(d) 38 | elif isinstance(data, pd.DataFrame): 39 | return data 40 | elif isinstance(data, dict): 41 | # Check whether the elements in the dictionary are arrays or not. 42 | # If they are not, covert to 1-element arrays for DataFrame initialization. 43 | element = data[list(data.keys())[0]] 44 | if not hasattr(element, "__len__"): 45 | for key, value in data.items(): 46 | data[key] = np.ones(1, dtype=type(value)) * value 47 | return pd.DataFrame(data) 48 | elif isinstance(data, list): 49 | fields = list(data[0].keys()) 50 | fields.sort() 51 | for row in data: 52 | row_fields = list(row.keys()) 53 | row_fields.sort() 54 | if row_fields != fields: 55 | raise ValueError("Not all dictionaries contain the same keys.") 56 | return pd.DataFrame(data) 57 | else: 58 | raise ValueError(f"Cannot convert {type(data)} to a pandas dataframe.") 59 | 60 | 61 | def get_df_with_selection(df: pd.DataFrame, select: Dict) -> pd.DataFrame: 62 | """Return the DataFrame after applying selection criterium. 63 | 64 | Parameters 65 | ---------- 66 | df : DataFrame 67 | The DataFrame object 68 | select: dict 69 | A dictionary containing the selection criteria to apply. 70 | e.g. {'f' : [None, -10.]} (get data with f < -10) 71 | """ 72 | condition = "" 73 | for key in select: 74 | if select[key][0] is not None: 75 | if condition != "": 76 | condition += " and " 77 | condition += "%s > %f" % (key, select[key][0]) 78 | if select[key][1] is not None: 79 | if condition != "": 80 | condition += " and " 81 | condition += "%s < %f" % (key, select[key][1]) 82 | 83 | return df.query(condition) 84 | -------------------------------------------------------------------------------- /doc/source/examples/bo_multitask_fbpic_waket.rst: -------------------------------------------------------------------------------- 1 | Multitask optimization with FBPIC and Wake-T 2 | ============================================ 3 | 4 | Description 5 | ~~~~~~~~~~~ 6 | 7 | This is an advanced example that shows how perform a multitask Bayesian 8 | optimization using two simulations codes of different fidelity 9 | (FBPIC and Wake-T). The scripts provided here can be used to reproduce 10 | the results from the paper 11 | 12 | - "Bayesian optimization of laser-plasma accelerators 13 | assisted by reduced physical models" by A. Ferran Pousa, S. Jalas, M. Kirchen, 14 | A. Martinez de la Ossa, M. Thévenet, J. Larson, S. Hudson, A. Huebl, J.-L. Vay, 15 | and R. Lehe (`link `_). 16 | 17 | 18 | Requirements 19 | ~~~~~~~~~~~~ 20 | In addition to optimas, the following packages should be installed: 21 | 22 | - `FBPIC `_ 23 | - `Wake-T `_ 24 | - `VisualPIC `_ 25 | 26 | 27 | Scripts 28 | ~~~~~~~ 29 | 30 | Files included: 31 | 32 | - :download:`run_opt.py <../../../examples/multitask_lpa_fbpic_waket/run_opt.py>`: 33 | defines and launches the optimization with *optimas*. 34 | - :download:`template_simulation_script.py <../../../examples/multitask_lpa_fbpic_waket/template_simulation_script.py>`: 35 | template used by *optimas* to generate the FBPIC and Wake-T simulation 36 | scripts. 37 | - :download:`analysis_script.py <../../../examples/multitask_lpa_fbpic_waket/analysis_script.py>`: 38 | defines how the simulation data is analyzed to yield the value of the 39 | objective function. 40 | - :download:`bunch_utils.py <../../../examples/multitask_lpa_fbpic_waket/bunch_utils.py>`: 41 | contains methods for generating the beam particle distributions given to the 42 | simulations. 43 | - :download:`custom_fld_diags.py <../../../examples/multitask_lpa_fbpic_waket/custom_fld_diags.py>`: 44 | custom FBPIC field diagnostics that have been 45 | to generate the output with the same location and periodicity as 46 | Wake-T. 47 | - :download:`custom_ptcl_diags.py <../../../examples/multitask_lpa_fbpic_waket/custom_ptcl_diags.py>`: 48 | custom FBPIC particle diagnostics that have been 49 | modified to generate the output with the same location and periodicity as 50 | Wake-T. 51 | 52 | You can have a look at the main scripts below: 53 | 54 | .. literalinclude:: ../../../examples/multitask_lpa_fbpic_waket/run_opt.py 55 | :language: python 56 | :caption: run_opt.py (:download:`download <../../../examples/multitask_lpa_fbpic_waket/run_opt.py>`) 57 | 58 | .. literalinclude:: ../../../examples/multitask_lpa_fbpic_waket/template_simulation_script.py 59 | :language: python 60 | :caption: template_simulation_script.py (:download:`download <../../../examples/multitask_lpa_fbpic_waket/template_simulation_script.py>`) 61 | 62 | .. literalinclude:: ../../../examples/multitask_lpa_fbpic_waket/analysis_script.py 63 | :language: python 64 | :caption: analysis_script.py (:download:`download <../../../examples/multitask_lpa_fbpic_waket/analysis_script.py>`) 65 | -------------------------------------------------------------------------------- /examples/dummy_mt/run_example.py: -------------------------------------------------------------------------------- 1 | """Basic example of parallel multitask Bayesian optimization with Ax.""" 2 | 3 | from gest_api.vocs import VOCS 4 | from optimas.core import Task 5 | from optimas.generators import AxMultitaskGenerator 6 | from optimas.evaluators import TemplateEvaluator, MultitaskEvaluator 7 | from optimas.explorations import Exploration 8 | 9 | 10 | def analyze_simulation(simulation_directory, output_params): 11 | """Analyze the simulation output. 12 | 13 | This method analyzes the output generated by the simulation to 14 | obtain the value of the optimization objective and other observables. 15 | The value of these parameters has to be given to the 16 | `output_params` dictionary. 17 | 18 | Parameters 19 | ---------- 20 | simulation_directory : str 21 | Path to the simulation folder where the output was generated. 22 | output_params : dict 23 | Dictionary where the value of the objectives and observables 24 | will be stored. There is one entry per parameter, where the key 25 | is the name of the parameter given by the user. 26 | 27 | Returns 28 | ------- 29 | dict 30 | The `output_params` dictionary with the results from the analysis. 31 | 32 | """ 33 | # Read back result from file 34 | with open("result.txt") as f: 35 | result = float(f.read()) 36 | # Fill in output parameters. 37 | output_params["f"] = result 38 | return output_params 39 | 40 | 41 | # Create VOCS object defining variables, objectives. 42 | vocs = VOCS( 43 | variables={ 44 | "x0": [0.0, 15.0], 45 | "x1": [0.0, 15.0], 46 | "trial_type": {"cheap_model", "expensive_model"}, 47 | }, 48 | objectives={"f": "MINIMIZE"}, 49 | ) 50 | 51 | 52 | # Create tasks. 53 | lofi_task = Task("cheap_model", n_init=10, n_opt=3) 54 | hifi_task = Task("expensive_model", n_init=2, n_opt=1) 55 | 56 | 57 | # Create generator. 58 | gen = AxMultitaskGenerator( 59 | vocs=vocs, 60 | lofi_task=lofi_task, 61 | hifi_task=hifi_task, 62 | ) 63 | 64 | 65 | # Create one evaluator for each task. In this example, both tasks use the same 66 | # template, but in principle they can have different template, executor, 67 | # analysis function, resources, etc. 68 | ev_lofi = TemplateEvaluator( 69 | sim_template="template_simulation_script.py", 70 | analysis_func=analyze_simulation, 71 | ) 72 | ev_hifi = TemplateEvaluator( 73 | sim_template="template_simulation_script.py", 74 | analysis_func=analyze_simulation, 75 | ) 76 | 77 | 78 | # Create a multitask evaluator. This associates each task to each task 79 | # evaluator. 80 | ev = MultitaskEvaluator( 81 | tasks=[lofi_task, hifi_task], task_evaluators=[ev_lofi, ev_hifi] 82 | ) 83 | 84 | 85 | # Create exploration. 86 | exp = Exploration( 87 | generator=gen, evaluator=ev, max_evals=30, sim_workers=4, run_async=True 88 | ) 89 | 90 | 91 | # To safely perform exploration, run it in the block below (this is needed 92 | # for some flavours of multiprocessing, namely spawn and forkserver) 93 | if __name__ == "__main__": 94 | exp.run() 95 | -------------------------------------------------------------------------------- /examples/ionization_injection_mf/analysis_script.py: -------------------------------------------------------------------------------- 1 | """Defines the analysis function that runs after the simulation.""" 2 | 3 | import os 4 | from openpmd_viewer.addons import LpaDiagnostics 5 | import numpy as np 6 | from scipy.constants import e 7 | 8 | 9 | def weighted_median(data, weights): 10 | """Compute the weighted quantile of a 1D numpy array. 11 | 12 | Parameters 13 | ---------- 14 | data : ndarray 15 | Input array (one dimension). 16 | weights : ndarray 17 | Array with the weights of the same size of `data`. 18 | quantile : float 19 | Quantile to compute. It must have a value between 0 and 1. 20 | 21 | Returns 22 | ------- 23 | quantile_1D : float 24 | The output value. 25 | 26 | """ 27 | quantile = 0.5 28 | # Check the data 29 | if not isinstance(data, np.matrix): 30 | data = np.asarray(data) 31 | if not isinstance(weights, np.matrix): 32 | weights = np.asarray(weights) 33 | nd = data.ndim 34 | if nd != 1: 35 | raise TypeError("data must be a one dimensional array") 36 | ndw = weights.ndim 37 | if ndw != 1: 38 | raise TypeError("weights must be a one dimensional array") 39 | if data.shape != weights.shape: 40 | raise TypeError("the length of data and weights must be the same") 41 | if (quantile > 1.0) or (quantile < 0.0): 42 | raise ValueError("quantile must have a value between 0. and 1.") 43 | # Sort the data 44 | ind_sorted = np.argsort(data) 45 | sorted_data = data[ind_sorted] 46 | sorted_weights = weights[ind_sorted] 47 | # Compute the auxiliary arrays 48 | Sn = np.cumsum(sorted_weights) 49 | # TODO: Check that the weights do not sum zero 50 | # assert Sn != 0, "The sum of the weights must not be zero" 51 | Pn = (Sn - 0.5 * sorted_weights) / Sn[-1] 52 | # Get the value of the weighted median 53 | return np.interp(quantile, Pn, sorted_data) 54 | 55 | 56 | def weighted_mad(x, w): 57 | """Calculate weighted median absolute deviation.""" 58 | med = weighted_median(x, w) 59 | mad = weighted_median(np.abs(x - med), w) 60 | return med, mad 61 | 62 | 63 | def analyze_simulation(simulation_directory, output_params): 64 | """Analyze the output of the simulation.""" 65 | # Define/calculate the objective function 'f' as well as the diagnostic 66 | # quantities listed in `analyzed_quantities` above. 67 | d = LpaDiagnostics(os.path.join(simulation_directory, "diags/hdf5")) 68 | 69 | uz, w = d.get_particle( 70 | ["uz", "w"], 71 | iteration=1, 72 | select={"uz": [10, None], "x": [-15e-6, 15e-6], "y": [-15e-6, 15e-6]}, 73 | ) 74 | q = w.sum() * e * 1e12 75 | if len(w) < 2: # Need at least 2 particles to calculate energy spread 76 | output_params["f"] = 0 77 | else: 78 | med, mad = weighted_mad(uz / 2, w) 79 | output_params["f"] = -np.sqrt(q) * med / mad / 100 80 | output_params["charge"] = q 81 | output_params["energy_med"] = med 82 | output_params["energy_mad"] = mad 83 | 84 | return output_params 85 | -------------------------------------------------------------------------------- /doc/source/examples/ps_grid_sampling.rst: -------------------------------------------------------------------------------- 1 | Grid sampling 2 | ============= 3 | 4 | 5 | Description 6 | ~~~~~~~~~~~ 7 | 8 | This example shows how to perform a grid sampling parameter scan using a 9 | :class:`~optimas.generators.GridSamplingGenerator` and a 10 | :class:`~optimas.evaluators.TemplateEvaluator`. 11 | 12 | The template simulation script evaluates a 13 | simple function of two parameters :math:`x_0` and :math:`x_1`: 14 | 15 | .. math:: 16 | 17 | f(x_0, x_1) = -(x_0 + 10 \cos(x_0)) (x_1 + 5\cos(x_1)) 18 | 19 | and stores the outcome in a text file ``result.txt``. The ``analysis_func`` 20 | simply reads the value in this file. 21 | 22 | You can adapt this example to your needs by replacing this 23 | basic template with an actual simulation and writing the corresponding 24 | analysis function. See see :ref:`optimas-with-simulations` for more details. 25 | 26 | The :class:`~optimas.generators.GridSamplingGenerator` generates a uniform 27 | multidimensional grid of samples to evaluate. The grid extends from the lower 28 | to the upper bound of each :class:`~optimas.core.VaryingParameter` and is 29 | divided in ``n_steps`` steps. In this case, 30 | where :math:`l_b=0` and :math:`u_b=15`, the grid of sample looks like: 31 | 32 | 33 | .. plot:: 34 | :show-source-link: False 35 | 36 | 37 | import importlib.util 38 | 39 | spec = importlib.util.spec_from_file_location('run_example', '../../../examples/dummy_grid_sampling/run_example.py') 40 | module = importlib.util.module_from_spec(spec) 41 | spec.loader.exec_module(module) 42 | gen = module.gen 43 | 44 | all_trials = [] 45 | while True: 46 | trial = gen.ask_trials(1) 47 | if trial: 48 | all_trials.append(trial[0]) 49 | else: 50 | break 51 | x0 = np.zeros(len(all_trials)) 52 | x1 = np.zeros(len(all_trials)) 53 | 54 | for i, trial in enumerate(all_trials): 55 | trial_params = trial.parameters_as_dict() 56 | x0[i] = trial_params['x0'] 57 | x1[i] = trial_params['x1'] 58 | 59 | fig, ax = plt.subplots() 60 | ax.scatter(x0, x1, s=3, label='generated evaluations') 61 | ax.set( 62 | xlabel=gen.varying_parameters[0].name, 63 | ylabel=gen.varying_parameters[1].name 64 | ) 65 | ax.legend(loc='upper right') 66 | 67 | 68 | Scripts 69 | ~~~~~~~ 70 | 71 | The two files needed to run this example should be located in the same folder 72 | (named e.g., ``example``): 73 | 74 | .. code-block:: bash 75 | 76 | example 77 | ├── run_example.py 78 | └── template_simulation_script.py 79 | 80 | The example is executed by running 81 | 82 | .. code-block:: bash 83 | 84 | python run_example.py 85 | 86 | .. literalinclude:: ../../../examples/dummy_grid_sampling/run_example.py 87 | :language: python 88 | :caption: run_example.py (:download:`download <../../../examples/dummy_grid_sampling/run_example.py>`) 89 | 90 | .. literalinclude:: ../../../examples/dummy_grid_sampling/template_simulation_script.py 91 | :language: python 92 | :caption: template_simulation_script.py (:download:`download <../../../examples/dummy_grid_sampling/template_simulation_script.py>`) 93 | -------------------------------------------------------------------------------- /doc/source/user_guide/installation_juwels.rst: -------------------------------------------------------------------------------- 1 | Installation on JUWELS Booster (JSC) 2 | ------------------------------------ 3 | 4 | Log into a JUWELS booster node either with ``ssh`` or by opening a terminal 5 | in the `JupyterLabs `_ (choose JUWELS as 6 | system and LoginNodeBooster as Partition). 7 | 8 | 9 | Create a ``profile.optimas`` file in your home directory with the following content 10 | 11 | .. code:: 12 | 13 | module purge 14 | module load Stages/2023 15 | module load GCC 16 | module load ParaStationMPI 17 | module load CUDA 18 | module load SciPy-bundle 19 | module load mpi4py 20 | 21 | you can do this from the command line by executing ``cat > ~/profile.optimas`` and 22 | pasting the lines above. To save the file, finalize by pressing ``Ctrl+D``. 23 | 24 | Load the source file 25 | 26 | .. code:: 27 | 28 | source ~/profile.optimas 29 | 30 | 31 | Create a new environment for optimas 32 | 33 | .. code:: 34 | 35 | python -m venv $PROJECT//pyenvs/optimas_env 36 | 37 | 38 | Activate the environment 39 | 40 | .. code:: 41 | 42 | source $PROJECT//pyenvs/optimas_env/bin/activate 43 | 44 | 45 | Install ``optimas`` with all dependencies if you plan to do Bayesian optimization 46 | (see :ref:`dependencies` for more details). 47 | 48 | .. code:: 49 | 50 | pip install 'optimas[all]' 51 | 52 | 53 | Installing FBPIC and Wake-T (optional) 54 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 55 | A common use case for ``optimas`` is to optimize the output of a plasma acceleration 56 | simulation with FBPIC or Wake-T (or both). If you need any of these tools, you 57 | can follow the instructions below to install them in your ``optimas_env`` 58 | environment. 59 | 60 | Install FBPIC: 61 | 62 | .. code:: 63 | 64 | pip install cupy-cuda117 65 | pip install fbpic 66 | 67 | 68 | Install Wake-T: 69 | 70 | .. code:: 71 | 72 | pip install Wake-T 73 | 74 | Install openPMD-viewer for data analysis 75 | 76 | .. code:: 77 | 78 | pip install openPMD-viewer 79 | 80 | 81 | Running an optimas job 82 | ~~~~~~~~~~~~~~~~~~~~~~ 83 | The following script can be used to submit an ``optimas`` job to the JUWELS 84 | booster (i.e., the ``booster`` partition). For testing, use the 85 | ``develbooster`` partition instead. This script assumes that you 86 | need one node with 4 GPUs and that your optimas script is called 87 | ``run_optimization.py``. 88 | 89 | Make sure to replace ``project_account``, ``user_email`` and ``username`` with 90 | your data. 91 | 92 | .. code:: 93 | 94 | #!/bin/bash 95 | 96 | #SBATCH --job-name optimas 97 | #SBATCH --nodes=1 98 | #SBATCH --partition=booster 99 | #SBATCH --account= 100 | #SBATCH --time=12:00:00 101 | #SBATCH --output=stdout 102 | #SBATCH --error=stderr 103 | #SBATCH --mail-type=END 104 | #SBATCH --mail-user= 105 | 106 | # Activate environment 107 | source ~/profile.optimas 108 | source $PROJECT//pyenvs/optimas_env/bin/activate 109 | 110 | python run_optimization.py 111 | -------------------------------------------------------------------------------- /doc/source/examples/bo_with_fbpic.rst: -------------------------------------------------------------------------------- 1 | .. _bo-with-fbpic: 2 | 3 | Optimization with FBPIC 4 | ======================= 5 | 6 | 7 | Description 8 | ~~~~~~~~~~~ 9 | 10 | This examples shows how to perform a Bayesian optimization of a laser-plasma 11 | accelerator (LPA) using FBPIC simulations. 12 | 13 | The LPA to be optimized is based on the LUX design [1]_ using ionization 14 | injection. 15 | 16 | The objective function to optimize (maximize) is defined as 17 | 18 | .. math:: 19 | 20 | f = \frac{\sqrt{Q} E_{MED}}{100 E_{MAD}} 21 | 22 | 23 | where :math:`Q` is the beam charge, :math:`E_{MED}` is the median energy, and 24 | :math:`E_{MAD}` is the median absolute deviation energy spread. This objective 25 | is optimized by tuning 4 parameters: 26 | 27 | - ``'laser_scale'``: parameter in the range :math:`[0.7, 1.05]` that scales 28 | the energy of the laser, which for ``laser_scale=1`` is 29 | :math:`2.56 \, \mathrm{J}`. 30 | - ``'z_foc'``: the focal position of the laser in millimetres, with range 31 | :math:`[3, 7.5]`. 32 | - ``'mult'``: parameter in the range :math:`[0.1, 1.5]` that scales the 33 | concentration of nitrogen in the injection region. 34 | - ``'plasma_scale'``: parameter in the range :math:`[0.6, 0.8]` that scales 35 | the plasma density of all species. 36 | 37 | The optimization is carried out using an 38 | :class:`~optimas.generators.AxSingleFidelityGenerator` and a 39 | :class:`~optimas.evaluators.TemplateEvaluator`. In this case, the function 40 | ``analyze_simulation`` that analyzes the output of each simulation is defined 41 | in a separate file ``analysis_script.py`` and imported into the main 42 | optimas script. 43 | 44 | The example is set up to make use of a system of 4 GPUs, where each FBPIC 45 | simulation uses a single GPU and 4 simulations are carried out in parallel. 46 | 47 | 48 | Scripts 49 | ~~~~~~~ 50 | 51 | The files needed to run the optimization should be located in a folder 52 | (named e.g., ``optimization``) with the following structure: 53 | 54 | .. code-block:: bash 55 | 56 | optimization 57 | ├── run_example.py 58 | ├── template_simulation_script.py 59 | └── analysis_script.py 60 | 61 | The optimization is started by executing: 62 | 63 | .. code-block:: bash 64 | 65 | python run_example.py 66 | 67 | The scripts needed to run this example can be seen below. 68 | 69 | .. literalinclude:: ../../../examples/ionization_injection/run_example.py 70 | :language: python 71 | :caption: run_example.py (:download:`download <../../../examples/ionization_injection/run_example.py>`) 72 | 73 | .. literalinclude:: ../../../examples/ionization_injection/template_simulation_script.py 74 | :language: python 75 | :caption: template_simulation_script.py (:download:`download <../../../examples/ionization_injection/template_simulation_script.py>`) 76 | 77 | .. literalinclude:: ../../../examples/ionization_injection/analysis_script.py 78 | :language: python 79 | :caption: analysis_script.py (:download:`download <../../../examples/ionization_injection/analysis_script.py>`) 80 | 81 | 82 | References 83 | ~~~~~~~~~~ 84 | 85 | .. [1] Sören Jalas, Manuel Kirchen, Philipp Messner, Paul Winkler, Lars Hübner, 86 | Julian Dirkwinkel, Matthias Schnepp, Remi Lehe, and Andreas R. Maier 87 | `Phys. Rev. Lett. 126, 104801 `_ 88 | (2021) 89 | -------------------------------------------------------------------------------- /doc/source/examples/bo_with_astra.rst: -------------------------------------------------------------------------------- 1 | .. _bo-with-astra: 2 | 3 | Multi-objective optimization with ASTRA 4 | ======================================= 5 | 6 | 7 | Description 8 | ~~~~~~~~~~~ 9 | 10 | This example shows how to perform a multi-objective Bayesian optimization of 11 | beam parameters using `ASTRA `_. 12 | 13 | The setup is based on a beamline example from the ASTRA manual which can be found 14 | `here `_. 15 | 16 | Two optimization parameters are used: 17 | 18 | - the RF phase of the cavity ``'RF_phase'``, which is varied in the range :math:`[-2.5, 2.5]`, 19 | - and the solenoid strength ``'B_sol'``, which is varied in the range :math:`[0.12, 0.38]`. 20 | 21 | Two beam parameters are minimized: 22 | 23 | - the bunch_length, 24 | - and the transverse emittances ``'emittance'`` in :math:`\mathrm{µm}`, which are combined into one single parameter: :math:`\log \epsilon_{n,x} \epsilon_{n,y}` and where the logarithm is used for better optimization as the emittance can span over several orders of magnitude. 25 | 26 | In addition, the transverse normalized emittances in :math:`x` and :math:`y` are stored as additional analyzed parameters ``'emittance_x'`` and ``'emittance_y'``. 27 | 28 | The optimization is carried out using an 29 | :class:`~optimas.generators.AxSingleFidelityGenerator` and a 30 | :class:`~optimas.evaluators.TemplateEvaluator`. In this case, the function 31 | ``analyze_simulation`` that analyzes the output of each simulation is defined 32 | in a separate file ``analysis_script.py`` and imported into the main 33 | optimas script. 34 | 35 | The ASTRA simulation template ``ASTRA_example.in`` requires additional files. These can be downloaded from the ASTRA `website `_ and are the input particle distribution ``Example.ini``, the RF field profile ``3_cell_L-Band.dat``, and the solenoid field profile ``Solenoid.dat``. 36 | These files need to be passed to the ``TemplateEvaluator`` using the ``sim_files`` argument. 37 | 38 | The path to the ASTRA executable needs to be specified in the ``TemplateEvaluator`` using the ``executable`` argument. 39 | 40 | Scripts 41 | ~~~~~~~ 42 | 43 | The files needed to run the optimization should be located in a folder 44 | (named e.g., ``optimization``) with the following structure: 45 | 46 | .. code-block:: bash 47 | 48 | optimization 49 | ├── run_optimization_serial_ASTRA.py 50 | ├── ASTRA_example.in 51 | ├── analysis_script.py 52 | ├── Example.ini 53 | ├── 3_cell_L-Band.dat 54 | └── Solenoid.dat 55 | 56 | The optimization is started by executing: 57 | 58 | .. code-block:: bash 59 | 60 | python run_optimization_serial_ASTRA.py 61 | 62 | The main scripts needed to run this example can be seen below. 63 | 64 | .. literalinclude:: ../../../examples/astra/run_optimization_serial_ASTRA.py 65 | :language: python 66 | :caption: run_optimization_serial_ASTRA.py (:download:`download <../../../examples/astra/run_optimization_serial_ASTRA.py>`) 67 | 68 | .. literalinclude:: ../../../examples/astra/ASTRA_example.in 69 | :language: none 70 | :caption: ASTRA_example.in (:download:`download <../../../examples/astra/ASTRA_example.in>`) 71 | 72 | .. literalinclude:: ../../../examples/astra/analysis_script.py 73 | :language: python 74 | :caption: analysis_script.py (:download:`download <../../../examples/astra/analysis_script.py>`) 75 | -------------------------------------------------------------------------------- /doc/source/examples/ps_line_sampling.rst: -------------------------------------------------------------------------------- 1 | Line sampling 2 | ============= 3 | 4 | 5 | Description 6 | ~~~~~~~~~~~ 7 | 8 | This example shows how to perform a line sampling parameter scan using a 9 | :class:`~optimas.generators.LineSamplingGenerator` and a 10 | :class:`~optimas.evaluators.TemplateEvaluator`. 11 | 12 | The template simulation script evaluates a 13 | simple function of two parameters :math:`x_0` and :math:`x_1`: 14 | 15 | .. math:: 16 | 17 | f(x_0, x_1) = -(x_0 + 10 \cos(x_0)) (x_1 + 5\cos(x_1)) 18 | 19 | and stores the outcome in a text file ``result.txt``. The ``analysis_func`` 20 | simply reads the value in this file. 21 | 22 | You can adapt this example to your needs by replacing this 23 | basic template with an actual simulation and writing the corresponding 24 | analysis function. See see :ref:`optimas-with-simulations` for more details. 25 | 26 | The :class:`~optimas.generators.LineSamplingGenerator` generates a 27 | multidimensional distribution of samples where, as opposed to the 28 | :class:`~optimas.generators.GridSamplingGenerator`, only one 29 | :class:`~optimas.core.VaryingParameter` is varied at a time while the other 30 | remain with their default values. Each parameter varies between its lower 31 | and upper bound in ``n_steps`` equally-spaced steps. In this case, 32 | where :math:`x_0` and :math:`x_1` have a default values of :math:`5` and 33 | :math:`6`, respectively, and lower and upper bounds :math:`l_b=0` and 34 | :math:`u_b=15`, the grid of samples look like: 35 | 36 | 37 | .. plot:: 38 | :show-source-link: False 39 | 40 | import importlib.util 41 | 42 | spec = importlib.util.spec_from_file_location('run_example', '../../../examples/dummy_line_sampling/run_example.py') 43 | module = importlib.util.module_from_spec(spec) 44 | spec.loader.exec_module(module) 45 | gen = module.gen 46 | 47 | all_trials = [] 48 | while True: 49 | trial = gen.ask_trials(1) 50 | if trial: 51 | all_trials.append(trial[0]) 52 | else: 53 | break 54 | x0 = np.zeros(len(all_trials)) 55 | x1 = np.zeros(len(all_trials)) 56 | 57 | for i, trial in enumerate(all_trials): 58 | trial_params = trial.parameters_as_dict() 59 | x0[i] = trial_params['x0'] 60 | x1[i] = trial_params['x1'] 61 | 62 | fig, ax = plt.subplots() 63 | ax.scatter(x0, x1, s=3, label='generated evaluations') 64 | ax.set( 65 | xlabel=gen.varying_parameters[0].name, 66 | ylabel=gen.varying_parameters[1].name 67 | ) 68 | ax.legend(loc='upper right') 69 | 70 | 71 | Scripts 72 | ~~~~~~~ 73 | 74 | The two files needed to run this example should be located in the same folder 75 | (named e.g., ``example``): 76 | 77 | .. code-block:: bash 78 | 79 | example 80 | ├── run_example.py 81 | └── template_simulation_script.py 82 | 83 | The example is executed by running 84 | 85 | .. code-block:: bash 86 | 87 | python run_example.py 88 | 89 | You can find both example scripts below. 90 | 91 | .. literalinclude:: ../../../examples/dummy_line_sampling/run_example.py 92 | :language: python 93 | :caption: run_example.py (:download:`download <../../../examples/dummy_line_sampling/run_example.py>`) 94 | 95 | .. literalinclude:: ../../../examples/dummy_line_sampling/template_simulation_script.py 96 | :language: python 97 | :caption: template_simulation_script.py (:download:`download <../../../examples/dummy_line_sampling/template_simulation_script.py>`) 98 | -------------------------------------------------------------------------------- /doc/source/index.rst: -------------------------------------------------------------------------------- 1 | :html_theme.sidebar_secondary.remove: 2 | 3 | optimas documentation 4 | ===================== 5 | 6 | 7 | **Date**: |today| **Version**: |release| 8 | 9 | ``optimas`` is an `open-source `_ 10 | Python library that enables highly scalable 11 | parallel optimization, from a typical laptop to exascale HPC systems. It is 12 | built on top of 13 | `libEnsemble `_. 14 | 15 | .. grid:: 1 1 3 3 16 | :gutter: 2 17 | 18 | .. grid-item-card:: User guide 19 | :text-align: center 20 | 21 | :octicon:`book;3em` 22 | 23 | Instructions for getting the code up and running and basic information 24 | about optimas. 25 | 26 | +++ 27 | 28 | .. button-ref:: user_guide/index 29 | :expand: 30 | :color: primary 31 | :click-parent: 32 | 33 | To the user guide 34 | 35 | 36 | .. grid-item-card:: Examples 37 | :text-align: center 38 | 39 | :octicon:`light-bulb;3em` 40 | 41 | Quick examples showcasing how to use the code. 42 | 43 | +++ 44 | 45 | .. button-ref:: examples/index 46 | :expand: 47 | :color: primary 48 | :click-parent: 49 | 50 | To the examples 51 | 52 | .. grid-item-card:: API reference 53 | :text-align: center 54 | 55 | :octicon:`code;3em` 56 | 57 | Detailed documentation of the optimas API. 58 | 59 | +++ 60 | 61 | .. button-ref:: api/index 62 | :expand: 63 | :color: primary 64 | :click-parent: 65 | 66 | To the reference guide 67 | 68 | 69 | Contributing institutions 70 | ~~~~~~~~~~~~~~~~~~~~~~~~~ 71 | 72 | .. rst-class:: text-center 73 | 74 | |pic1| |pic1b| |nbsp| |nbsp| |nbsp| |pic2| |pic2b| |nbsp| |nbsp| |nbsp| |pic3| |pic3b| 75 | 76 | 77 | .. |nbsp| unicode:: U+00A0 .. NO-BREAK SPACE 78 | 79 | .. |pic1| image:: https://upload.wikimedia.org/wikipedia/commons/5/55/Desy_logo_3c_web.svg 80 | :height: 40px 81 | :target: https://www.desy.de/ 82 | :class: only-light 83 | 84 | .. |pic1b| image:: https://images.squarespace-cdn.com/content/v1/56ebbc46746fb9377676109c/071bcfa5-f535-4d69-a1ca-a8212c85ac91/DESY_logo_white_web.png 85 | :height: 40px 86 | :target: https://www.desy.de/ 87 | :class: only-dark 88 | 89 | .. |pic2| image:: https://github.com/optimas-org/optimas/assets/20479420/203e3eb8-1bd2-49a3-a22f-139409919558 90 | :height: 50px 91 | :target: https://www.lbl.gov/ 92 | :class: only-light 93 | 94 | .. |pic2b| image:: https://github.com/optimas-org/optimas/assets/20479420/e4a3b2d2-5178-48ca-9d71-0f13f90e763c 95 | :height: 50px 96 | :target: https://www.lbl.gov/ 97 | :class: only-dark 98 | 99 | .. |pic3| image:: https://www.alcf.anl.gov/sites/default/files/styles/large/public/2020-01/Argonne_cmyk_black.png?itok=-df89dHw 100 | :height: 30px 101 | :target: https://www.anl.gov/ 102 | :class: only-light 103 | 104 | .. |pic3b| image:: https://www.alcf.anl.gov/sites/default/files/styles/large/public/2020-01/Argonne_wireframe_white_vector.png?itok=mXUvxazY 105 | :height: 30px 106 | :target: https://www.anl.gov/ 107 | :class: only-dark 108 | 109 | 110 | .. toctree:: 111 | :hidden: 112 | :maxdepth: 4 113 | 114 | user_guide/index 115 | examples/index 116 | api/index 117 | -------------------------------------------------------------------------------- /doc/source/examples/ps_random_sampling.rst: -------------------------------------------------------------------------------- 1 | Random sampling 2 | =============== 3 | 4 | 5 | Description 6 | ~~~~~~~~~~~ 7 | 8 | This example shows how to perform a random parameter scan using a 9 | :class:`~optimas.generators.RandomSamplingGenerator` and a 10 | :class:`~optimas.evaluators.TemplateEvaluator`. 11 | 12 | The template simulation script evaluates a 13 | simple function of two parameters :math:`x_0` and :math:`x_1`: 14 | 15 | .. math:: 16 | 17 | f(x_0, x_1) = -(x_0 + 10 \cos(x_0)) (x_1 + 5\cos(x_1)) 18 | 19 | and stores the outcome in a text file ``result.txt``. The ``analysis_func`` 20 | simply reads the value in this file. 21 | 22 | You can adapt this example to your needs by replacing this 23 | basic template with an actual simulation and writing the corresponding 24 | analysis function. See see :ref:`optimas-with-simulations` for more details. 25 | 26 | The :class:`~optimas.generators.RandomSamplingGenerator` draws samples from 27 | a ``'normal'`` distribution that, for each parameter, is centered at 28 | :math:`c = l_b - u_b` with standard deviation :math:`\\sigma = u_b - c`, 29 | where :math:`l_b` and :math:`u_b` are, respectively, the lower and upper 30 | bounds of the parameter. Other distributions are also available. In this case, 31 | where :math:`l_b=0` and :math:`u_b=15`, the drawn samples result in a 32 | distribution such as: 33 | 34 | 35 | .. plot:: 36 | :show-source-link: False 37 | 38 | import importlib.util 39 | from matplotlib.patches import Ellipse 40 | 41 | spec = importlib.util.spec_from_file_location('run_example', '../../../examples/dummy_random/run_example.py') 42 | module = importlib.util.module_from_spec(spec) 43 | spec.loader.exec_module(module) 44 | gen = module.gen 45 | 46 | all_trials = [] 47 | while len(all_trials) <= 100: 48 | trial = gen.ask_trials(1) 49 | if trial: 50 | all_trials.append(trial[0]) 51 | else: 52 | break 53 | x0 = np.zeros(len(all_trials)) 54 | x1 = np.zeros(len(all_trials)) 55 | 56 | for i, trial in enumerate(all_trials): 57 | trial_params = trial.parameters_as_dict() 58 | x0[i] = trial_params['x0'] 59 | x1[i] = trial_params['x1'] 60 | 61 | fig, ax = plt.subplots() 62 | ax.scatter(x0, x1, s=3, label='generated evaluations') 63 | ax.set( 64 | xlabel=gen.varying_parameters[0].name, 65 | ylabel=gen.varying_parameters[1].name 66 | ) 67 | ax.scatter(gen._center[0], gen._center[1], s=10, color='tab:red', label='center', marker='x') 68 | ellipse = Ellipse((gen._center[0], gen._center[1]), 69 | width=gen._width[0] * 2, 70 | height=gen._width[1] * 2, 71 | facecolor='none', 72 | edgecolor='tab:red', 73 | label='standard deviation') 74 | ax.add_patch(ellipse) 75 | ax.legend(loc='upper right') 76 | 77 | 78 | Scripts 79 | ~~~~~~~ 80 | 81 | The two files needed to run this example should be located in the same folder 82 | (named e.g., ``example``): 83 | 84 | .. code-block:: bash 85 | 86 | example 87 | ├── run_example.py 88 | └── template_simulation_script.py 89 | 90 | The example is executed by running 91 | 92 | .. code-block:: bash 93 | 94 | python run_example.py 95 | 96 | You can find both example scripts below. 97 | 98 | .. literalinclude:: ../../../examples/dummy_random/run_example.py 99 | :language: python 100 | :caption: run_example.py (:download:`download <../../../examples/dummy_random/run_example.py>`) 101 | 102 | .. literalinclude:: ../../../examples/dummy_random/template_simulation_script.py 103 | :language: python 104 | :caption: template_simulation_script.py (:download:`download <../../../examples/dummy_random/template_simulation_script.py>`) 105 | -------------------------------------------------------------------------------- /tests/test_line_sampling.py: -------------------------------------------------------------------------------- 1 | import re 2 | 3 | import numpy as np 4 | import pytest 5 | from gest_api.vocs import VOCS, ContinuousVariable 6 | 7 | from optimas.explorations import Exploration 8 | from optimas.generators import LineSamplingGenerator 9 | from optimas.evaluators import FunctionEvaluator 10 | 11 | 12 | def eval_func(input_params, output_params): 13 | """Evaluation function for single-fidelity test""" 14 | x0 = input_params["x0"] 15 | x1 = input_params["x1"] 16 | result = -(x0 + 10 * np.cos(x0)) * (x1 + 5 * np.cos(x1)) 17 | output_params["f"] = result 18 | 19 | 20 | def test_line_sampling(): 21 | """Test that line sampling generates the expected configurations.""" 22 | 23 | # Define test parameters 24 | lower_bounds = [-3.0, 2.0] 25 | upper_bounds = [1.0, 5.0] 26 | defaults = [0, 0] 27 | n_steps = [7, 15] 28 | 29 | # Set number of evaluations. 30 | n_evals = np.sum(n_steps) 31 | 32 | vocs = VOCS( 33 | variables={ 34 | "x0": ContinuousVariable( 35 | domain=[lower_bounds[0], upper_bounds[0]], 36 | default_value=defaults[0], 37 | ), 38 | "x1": ContinuousVariable( 39 | domain=[lower_bounds[1], upper_bounds[1]], 40 | default_value=defaults[1], 41 | ), 42 | }, 43 | objectives={"f": "MAXIMIZE"}, 44 | ) 45 | 46 | # Create generator and run exploration. 47 | gen = LineSamplingGenerator(vocs=vocs, n_steps=n_steps) 48 | ev = FunctionEvaluator(function=eval_func) 49 | exploration = Exploration( 50 | generator=gen, 51 | evaluator=ev, 52 | max_evals=n_evals, 53 | sim_workers=2, 54 | exploration_dir_path="./tests_output/test_line_sampling", 55 | ) 56 | exploration.run() 57 | 58 | # Get generated points. 59 | h = exploration.history 60 | h = h[h["sim_ended"]] 61 | x0_gen = h["x0"] 62 | x1_gen = h["x1"] 63 | 64 | # Check that the amount of evaluations with default values is as expected. 65 | assert len(x0_gen[x0_gen == defaults[0]]) == n_steps[1] 66 | assert len(x1_gen[x1_gen == defaults[1]]) == n_steps[0] 67 | 68 | # Check that the line scans along each direction are as expected. 69 | np.testing.assert_array_equal( 70 | x0_gen[: n_steps[0]], 71 | np.linspace(lower_bounds[0], upper_bounds[0], n_steps[0]), 72 | ) 73 | np.testing.assert_array_equal( 74 | x1_gen[n_steps[0] :], 75 | np.linspace(lower_bounds[1], upper_bounds[1], n_steps[1]), 76 | ) 77 | 78 | 79 | def test_line_sampling_errors(): 80 | """Test that the line sampling raises the correct exceptions.""" 81 | 82 | vocs = VOCS( 83 | variables={"x0": [-3, 1], "x1": [-3, 1]}, objectives={"f": "MAXIMIZE"} 84 | ) 85 | 86 | # Check that an exception is raised when default values are missing. 87 | with pytest.raises( 88 | ValueError, match="Variable 'x0' does not have a default value." 89 | ): 90 | gen = LineSamplingGenerator(vocs=vocs, n_steps=[3, 5]) 91 | 92 | vocs = VOCS( 93 | variables={ 94 | "x0": ContinuousVariable(domain=[-3, 1], default_value=0.0), 95 | "x1": ContinuousVariable(domain=[-3, 1], default_value=0.0), 96 | }, 97 | objectives={"f": "MAXIMIZE"}, 98 | ) 99 | 100 | # Check that an exception is raised when n_steps is not correct. 101 | with pytest.raises( 102 | AssertionError, 103 | match=re.escape( 104 | "Length of `n_steps` (1) and" 105 | " `varying_parameters` (2) do not match." 106 | ), 107 | ): 108 | LineSamplingGenerator(vocs=vocs, n_steps=[3]) 109 | 110 | 111 | if __name__ == "__main__": 112 | test_line_sampling() 113 | test_line_sampling_errors() 114 | -------------------------------------------------------------------------------- /tests/test_analyzed_parameters.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from gest_api.vocs import VOCS 3 | 4 | from optimas.explorations import Exploration 5 | from optimas.generators import RandomSamplingGenerator 6 | from optimas.evaluators import FunctionEvaluator 7 | 8 | 9 | def eval_func(input_params, output_params): 10 | """Evaluation function used for testing""" 11 | x0 = input_params["x0"] 12 | x1 = input_params["x1"] 13 | result = -(x0 + 10 * np.cos(x0)) * (x1 + 5 * np.cos(x1)) 14 | output_params["f"] = result 15 | output_params["analyzed_parameter_1"] = result * 3 16 | output_params["analyzed_parameter_2"] = result * np.pi 17 | 18 | 19 | def test_analyzed_parameters(): 20 | """ 21 | Test that an exploration runs successfully when including not only an 22 | objective, but also a set of additional analyzed parameters. 23 | """ 24 | vocs = VOCS( 25 | variables={"x0": [-50.0, 5.0], "x1": [-5.0, 15.0]}, 26 | objectives={"f": "MAXIMIZE"}, 27 | observables=["analyzed_parameter_1", "analyzed_parameter_2"], 28 | ) 29 | 30 | # Create generator. 31 | gen = RandomSamplingGenerator(vocs=vocs) 32 | 33 | # Create function evaluator. 34 | ev = FunctionEvaluator(function=eval_func) 35 | 36 | # Create exploration. 37 | exploration = Exploration( 38 | generator=gen, 39 | evaluator=ev, 40 | max_evals=10, 41 | sim_workers=2, 42 | exploration_dir_path="./tests_output/test_analyzed_parameters", 43 | ) 44 | 45 | # Run exploration. 46 | exploration.run() 47 | 48 | # Get f and analyzed parameters from history array. 49 | h = exploration.history 50 | h = h[h["sim_ended"]] 51 | f_out = h["f"] 52 | par1_out = h["analyzed_parameter_1"] 53 | par2_out = h["analyzed_parameter_2"] 54 | 55 | # Check that the values of the analyzed parameters are as expected. 56 | np.testing.assert_array_equal(par1_out, f_out * 3) 57 | np.testing.assert_array_equal(par2_out, f_out * np.pi) 58 | 59 | # Save history for later restart test 60 | np.save( 61 | "./tests_output/ax_sf_history_with_analyzed_parameters", 62 | exploration._libe_history.H, 63 | ) 64 | 65 | 66 | def test_analyzed_parameters_from_history(): 67 | """ 68 | Test that an exploration with analyzed parameters can be successfully 69 | initialized from a history file. This includes checking that the past 70 | values of the analyzed parameters in the history file are correctly 71 | loaded back into the exploration. 72 | """ 73 | vocs = VOCS( 74 | variables={"x0": [-50.0, 5.0], "x1": [-5.0, 15.0]}, 75 | objectives={"f": "MAXIMIZE"}, 76 | observables=["analyzed_parameter_1", "analyzed_parameter_2"], 77 | ) 78 | 79 | # Create generator. 80 | gen = RandomSamplingGenerator(vocs=vocs) 81 | 82 | # Create function evaluator. 83 | ev = FunctionEvaluator(function=eval_func) 84 | 85 | # Create exploration. 86 | exploration = Exploration( 87 | generator=gen, 88 | evaluator=ev, 89 | max_evals=10, 90 | sim_workers=2, 91 | history="./tests_output/ax_sf_history_with_analyzed_parameters.npy", 92 | exploration_dir_path="./tests_output/test_analyzed_parameters_with_history", 93 | ) 94 | 95 | # Run exploration. 96 | exploration.run() 97 | 98 | # Get f and analyzed parameters from history array. 99 | h = exploration.history 100 | h = h[h["sim_ended"]] 101 | f_out = h["f"] 102 | par1_out = h["analyzed_parameter_1"] 103 | par2_out = h["analyzed_parameter_2"] 104 | 105 | # Check that the values of the analyzed parameters are as expected. 106 | np.testing.assert_array_equal(par1_out, f_out * 3) 107 | np.testing.assert_array_equal(par2_out, f_out * np.pi) 108 | 109 | 110 | if __name__ == "__main__": 111 | test_analyzed_parameters() 112 | test_analyzed_parameters_from_history() 113 | -------------------------------------------------------------------------------- /tests/test_chain_evaluator.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import numpy as np 4 | from gest_api.vocs import VOCS 5 | 6 | from optimas.explorations import Exploration 7 | from optimas.generators import RandomSamplingGenerator 8 | from optimas.evaluators import TemplateEvaluator, ChainEvaluator 9 | 10 | 11 | def analysis_func_1(sim_dir, output_params): 12 | """Analysis function used by the template evaluator.""" 13 | # Read back result from file 14 | with open("result.txt") as f: 15 | result = float(f.read()) 16 | output_params["result_1"] = result 17 | 18 | 19 | def analysis_func_2(sim_dir, output_params): 20 | """Analysis function used by the template evaluator.""" 21 | # Read back result from file 22 | with open("result_2.txt") as f: 23 | result = float(f.read()) 24 | output_params["f"] = result 25 | 26 | 27 | def test_chain_evaluator(): 28 | # Define variables and objectives. 29 | vocs = VOCS( 30 | variables={"x0": [-50.0, 5.0], "x1": [-5.0, 15.0]}, 31 | objectives={"f": "MAXIMIZE"}, 32 | observables=["result_1"], 33 | ) 34 | 35 | # Define variables and objectives. 36 | gen = RandomSamplingGenerator(vocs=vocs) 37 | 38 | # Create template evaluator. 39 | ev1 = TemplateEvaluator( 40 | sim_template=os.path.join( 41 | os.path.abspath(os.path.dirname(__file__)), 42 | "resources", 43 | "template_simulation_script.py", 44 | ), 45 | analysis_func=analysis_func_1, 46 | ) 47 | ev2 = TemplateEvaluator( 48 | sim_template=os.path.join( 49 | os.path.abspath(os.path.dirname(__file__)), 50 | "resources", 51 | "template_simulation_script_2.py", 52 | ), 53 | analysis_func=analysis_func_2, 54 | ) 55 | ev = ChainEvaluator([ev1, ev2]) 56 | 57 | # Create exploration. 58 | exploration = Exploration( 59 | generator=gen, 60 | evaluator=ev, 61 | max_evals=10, 62 | sim_workers=2, 63 | exploration_dir_path="./tests_output/test_chain_evaluator", 64 | ) 65 | 66 | # Run exploration. 67 | exploration.run() 68 | 69 | # Check that all simulations returned data. 70 | assert np.all(exploration.history["f"] != 0.0) 71 | assert np.all(exploration.history["result_1"] != 0.0) 72 | 73 | 74 | def test_chain_evaluator_only_final_analysis(): 75 | """Test a ChainEvaluator where only the final TemplateEvaluator has an 76 | analysis function.""" 77 | # Define variables and objectives. 78 | vocs = VOCS( 79 | variables={"x0": [-50.0, 5.0], "x1": [-5.0, 15.0]}, 80 | objectives={"f": "MAXIMIZE"}, 81 | ) 82 | 83 | # Define variables and objectives. 84 | gen = RandomSamplingGenerator(vocs=vocs) 85 | 86 | # Create template evaluator. 87 | ev1 = TemplateEvaluator( 88 | sim_template=os.path.join( 89 | os.path.abspath(os.path.dirname(__file__)), 90 | "resources", 91 | "template_simulation_script.py", 92 | ) 93 | ) 94 | ev2 = TemplateEvaluator( 95 | sim_template=os.path.join( 96 | os.path.abspath(os.path.dirname(__file__)), 97 | "resources", 98 | "template_simulation_script_2.py", 99 | ), 100 | analysis_func=analysis_func_2, 101 | ) 102 | ev = ChainEvaluator([ev1, ev2]) 103 | 104 | # Create exploration. 105 | exploration = Exploration( 106 | generator=gen, 107 | evaluator=ev, 108 | max_evals=10, 109 | sim_workers=2, 110 | exploration_dir_path="./tests_output/test_chain_evaluator_2", 111 | ) 112 | 113 | # Run exploration. 114 | exploration.run() 115 | 116 | # Check that all simulations returned data. 117 | assert np.all(exploration.history["f"] != 0.0) 118 | 119 | 120 | if __name__ == "__main__": 121 | test_chain_evaluator() 122 | test_chain_evaluator_only_final_analysis() 123 | -------------------------------------------------------------------------------- /optimas/generators/ax/base.py: -------------------------------------------------------------------------------- 1 | """Contains the definition of the base Ax generator.""" 2 | 3 | from typing import Optional 4 | import logging 5 | 6 | import torch 7 | 8 | from optimas.core import TrialParameter 9 | from optimas.generators.base import Generator 10 | from gest_api.vocs import VOCS 11 | 12 | 13 | # Disable Ax loggers to get cleaner output. In principle, setting 14 | # `verbose_logging=False` in the `AxClient` should already avoid most of the 15 | # logs, but this does not work when using 'spawn' multiprocessing. 16 | for logger in logging.root.manager.loggerDict: 17 | if logger.startswith("ax.") or logger == "ax": 18 | logging.getLogger(logger).setLevel(logging.ERROR) 19 | 20 | 21 | class AxGenerator(Generator): 22 | """Base class for all Ax generators. 23 | 24 | Parameters 25 | ---------- 26 | vocs : VOCS 27 | VOCS object defining variables, objectives, constraints, and observables. 28 | use_cuda : bool, optional 29 | Whether to allow the generator to run on a CUDA GPU. By default 30 | ``False``. 31 | gpu_id : int, optional 32 | The ID of the GPU in which to run the generator. By default, ``0``. 33 | dedicated_resources : bool, optional 34 | Whether to allocated dedicated resources (e.g., the GPU) for the 35 | generator. These resources will not be available to the 36 | simulation workers. By default, ``False``. 37 | save_model : bool, optional 38 | Whether to save the optimization model (e.g., the surrogate model) to 39 | disk. By default ``False``. 40 | model_save_period : int, optional 41 | Periodicity, in number of evaluated Trials, with which to save the 42 | model to disk. By default, ``5``. 43 | model_history_dir : str, optional 44 | Name of the directory in which the model will be saved. By default, 45 | ``'model_history'``. 46 | custom_trial_parameters : list of TrialParameter 47 | For some generators, it might be necessary to attach additional 48 | parameters to the trials. If so, they can be given here as a list. 49 | By default, ``None``. 50 | allow_fixed_parameters : bool, optional 51 | Whether the generator supports ``VaryingParameter``s whose value 52 | has been fixed. By default, False. 53 | allow_updating_parameters : list of TrialParameter 54 | Whether the generator supports updating the ``VaryingParameter``s. 55 | If so, the `_update_parameter` method must be implemented. 56 | By default, False. 57 | 58 | """ 59 | 60 | def __init__( 61 | self, 62 | vocs: VOCS, 63 | use_cuda: Optional[bool] = False, 64 | gpu_id: Optional[int] = 0, 65 | dedicated_resources: Optional[bool] = False, 66 | save_model: Optional[bool] = False, 67 | model_save_period: Optional[int] = 5, 68 | model_history_dir: Optional[str] = "model_history", 69 | custom_trial_parameters: Optional[TrialParameter] = None, 70 | allow_fixed_parameters: Optional[bool] = False, 71 | allow_updating_parameters: Optional[bool] = False, 72 | ) -> None: 73 | super().__init__( 74 | vocs=vocs, 75 | use_cuda=use_cuda, 76 | gpu_id=gpu_id, 77 | dedicated_resources=dedicated_resources, 78 | save_model=save_model, 79 | model_save_period=model_save_period, 80 | model_history_dir=model_history_dir, 81 | custom_trial_parameters=custom_trial_parameters, 82 | allow_fixed_parameters=allow_fixed_parameters, 83 | allow_updating_parameters=allow_updating_parameters, 84 | ) 85 | self._determine_torch_device() 86 | 87 | def _determine_torch_device(self) -> None: 88 | """Determine whether to run the generator on GPU (CUDA) or CPU.""" 89 | # Use CUDA if available. 90 | if self.use_cuda and torch.cuda.is_available(): 91 | self.torch_device = "cuda" 92 | else: 93 | self.torch_device = "cpu" 94 | -------------------------------------------------------------------------------- /examples/hipace/analysis_script.py: -------------------------------------------------------------------------------- 1 | """Defines the analysis function that runs after the simulation.""" 2 | 3 | import os 4 | from openpmd_viewer.addons import LpaDiagnostics 5 | import numpy as np 6 | from scipy.constants import e 7 | 8 | 9 | def analyze_simulation(simulation_directory, output_params): 10 | """Analyze the simulation output. 11 | 12 | This method analyzes the output generated by the simulation to 13 | obtain the value of the optimization objective and other analyzed 14 | parameters, if specified. The value of these parameters has to be 15 | given to the `output_params` dictionary. 16 | 17 | Parameters 18 | ---------- 19 | simulation_directory : str 20 | Path to the simulation folder where the output was generated. 21 | output_params : dict 22 | Dictionary where the value of the objectives and analyzed parameters 23 | will be stored. There is one entry per parameter, where the key 24 | is the name of the parameter given by the user. 25 | 26 | Returns 27 | ------- 28 | dict 29 | The `output_params` dictionary with the results from the analysis. 30 | 31 | """ 32 | # Open simulation diagnostics. 33 | d = LpaDiagnostics(os.path.join(simulation_directory, "diags/hdf5")) 34 | 35 | # Get beam particles with `u_z >= 10` and transverse offset no larger than 36 | # 15 µm in `x` and `y`. 37 | uz, w = d.get_particle( 38 | ["uz", "w"], iteration=d.iterations[-1], species="witness" 39 | ) 40 | 41 | # Convert charge to pC. 42 | q = w.sum() * e * 1e12 43 | 44 | # Analyze distribution and fill in the output data. 45 | if len(w) < 2: # Need at least 2 particles to calculate energy spread 46 | output_params["f"] = 0 47 | else: 48 | med, mad = weighted_mad(uz / 2, w) 49 | output_params["f"] = np.sqrt(q) * med / mad / 100 50 | output_params["charge"] = q 51 | output_params["energy_med"] = med 52 | output_params["energy_mad"] = mad 53 | 54 | return output_params 55 | 56 | 57 | def weighted_median(data, weights): 58 | """Compute the weighted quantile of a 1D numpy array. 59 | 60 | Parameters 61 | ---------- 62 | data : ndarray 63 | Input array (one dimension). 64 | weights : ndarray 65 | Array with the weights of the same size of `data`. 66 | quantile : float 67 | Quantile to compute. It must have a value between 0 and 1. 68 | 69 | Returns 70 | ------- 71 | quantile_1D : float 72 | The output value. 73 | 74 | """ 75 | quantile = 0.5 76 | # Check the data 77 | if not isinstance(data, np.matrix): 78 | data = np.asarray(data) 79 | if not isinstance(weights, np.matrix): 80 | weights = np.asarray(weights) 81 | nd = data.ndim 82 | if nd != 1: 83 | raise TypeError("data must be a one dimensional array") 84 | ndw = weights.ndim 85 | if ndw != 1: 86 | raise TypeError("weights must be a one dimensional array") 87 | if data.shape != weights.shape: 88 | raise TypeError("the length of data and weights must be the same") 89 | if (quantile > 1.0) or (quantile < 0.0): 90 | raise ValueError("quantile must have a value between 0. and 1.") 91 | # Sort the data 92 | ind_sorted = np.argsort(data) 93 | sorted_data = data[ind_sorted] 94 | sorted_weights = weights[ind_sorted] 95 | # Compute the auxiliary arrays 96 | Sn = np.cumsum(sorted_weights) 97 | # TODO: Check that the weights do not sum zero 98 | # assert Sn != 0, "The sum of the weights must not be zero" 99 | Pn = (Sn - 0.5 * sorted_weights) / Sn[-1] 100 | # Get the value of the weighted median 101 | return np.interp(quantile, Pn, sorted_data) 102 | 103 | 104 | def weighted_mad(x, w): 105 | """Calculate weighted median absolute deviation.""" 106 | med = weighted_median(x, w) 107 | mad = weighted_median(np.abs(x - med), w) 108 | return med, mad 109 | -------------------------------------------------------------------------------- /tests/test_random_sampling.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from gest_api.vocs import VOCS 3 | 4 | from optimas.explorations import Exploration 5 | from optimas.generators import RandomSamplingGenerator 6 | from optimas.evaluators import FunctionEvaluator 7 | 8 | 9 | def eval_func(input_params, output_params): 10 | """Evaluation function for single-fidelity test""" 11 | x0 = input_params["x0"] 12 | x1 = input_params["x1"] 13 | result = -(x0 + 10 * np.cos(x0)) * (x1 + 5 * np.cos(x1)) 14 | output_params["f"] = result 15 | 16 | 17 | def test_uniform_sampling(): 18 | """Test that random sampling with a uniform distribution generates the 19 | expected configurations.""" 20 | 21 | # Set random seed for reproducible results. 22 | seed = 1 23 | 24 | # Create varying parameters. 25 | lower_bounds = [-3.0, 2.0] 26 | upper_bounds = [1.0, 5.0] 27 | 28 | # Set number of evaluations. 29 | n_evals = 10 30 | 31 | vocs = VOCS( 32 | variables={ 33 | "x0": [lower_bounds[0], upper_bounds[0]], 34 | "x1": [lower_bounds[1], upper_bounds[1]], 35 | }, 36 | objectives={"f": "MAXIMIZE"}, 37 | ) 38 | 39 | # Create generator and run exploration. 40 | gen = RandomSamplingGenerator( 41 | vocs=vocs, 42 | distribution="uniform", 43 | seed=1, 44 | ) 45 | ev = FunctionEvaluator(function=eval_func) 46 | exploration = Exploration( 47 | generator=gen, 48 | evaluator=ev, 49 | max_evals=n_evals, 50 | sim_workers=2, 51 | exploration_dir_path="./tests_output/test_uniform_sampling", 52 | ) 53 | exploration.run() 54 | 55 | # Get generated points. 56 | h = exploration.history 57 | h = h[h["sim_ended"]] 58 | x0_gen = h["x0"] 59 | x1_gen = h["x1"] 60 | 61 | # Generate expected points. 62 | rng = np.random.default_rng(seed=seed) 63 | configs = rng.uniform( 64 | lower_bounds, upper_bounds, (n_evals, len(lower_bounds)) 65 | ) 66 | x0_test = configs[:, 0] 67 | x1_test = configs[:, 1] 68 | 69 | # Check that the generated points are as expected. 70 | np.testing.assert_array_equal(x0_gen, x0_test) 71 | np.testing.assert_array_equal(x1_gen, x1_test) 72 | 73 | 74 | def test_normal_sampling(): 75 | """Test that random sampling with a normal distribution generates the 76 | expected configurations.""" 77 | 78 | # Set random seed for reproducible results. 79 | seed = 1 80 | 81 | # Create varying parameters. 82 | center = [0.0, 0.0] 83 | sigma = [1.0, 5.0] 84 | 85 | # Set number of evaluations. 86 | n_evals = 10 87 | 88 | vocs = VOCS( 89 | variables={ 90 | "x0": [center[0] - sigma[0], center[0] + sigma[0]], 91 | "x1": [center[1] - sigma[1], center[1] + sigma[1]], 92 | }, 93 | objectives={"f": "MAXIMIZE"}, 94 | ) 95 | 96 | # Create generator and run exploration. 97 | gen = RandomSamplingGenerator(vocs=vocs, distribution="normal", seed=1) 98 | ev = FunctionEvaluator(function=eval_func) 99 | exploration = Exploration( 100 | generator=gen, 101 | evaluator=ev, 102 | max_evals=n_evals, 103 | sim_workers=2, 104 | exploration_dir_path="./tests_output/test_normal_sampling", 105 | ) 106 | exploration.run() 107 | 108 | # Get generated points. 109 | h = exploration.history 110 | h = h[h["sim_ended"]] 111 | x0_gen = h["x0"] 112 | x1_gen = h["x1"] 113 | 114 | # Generate expected points. 115 | rng = np.random.default_rng(seed=seed) 116 | configs = rng.normal(center, sigma, (n_evals, len(center))) 117 | x0_test = configs[:, 0] 118 | x1_test = configs[:, 1] 119 | 120 | # Check that the generated points are as expected. 121 | np.testing.assert_array_equal(x0_gen, x0_test) 122 | np.testing.assert_array_equal(x1_gen, x1_test) 123 | 124 | 125 | if __name__ == "__main__": 126 | test_uniform_sampling() 127 | test_normal_sampling() 128 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | # To use: 2 | # 3 | # pre-commit run -a 4 | # 5 | # Or: 6 | # 7 | # pre-commit install # (runs every time you commit in git) 8 | # 9 | # To update this file: 10 | # 11 | # pre-commit autoupdate 12 | # 13 | # See https://pre-commit.com for more information 14 | 15 | # Do not check/format anything from third party directories 16 | #exclude: '^share/openPMD/thirdParty' 17 | 18 | # See https://pre-commit.com/hooks.html for more hooks 19 | repos: 20 | - repo: https://github.com/pre-commit/pre-commit-hooks 21 | rev: v5.0.0 22 | hooks: 23 | - id: trailing-whitespace 24 | args: [--markdown-linebreak-ext=md] 25 | - id: end-of-file-fixer 26 | - id: mixed-line-ending 27 | - id: check-json 28 | - id: check-toml 29 | - id: check-yaml 30 | args: [--allow-multiple-documents] 31 | - id: check-added-large-files 32 | args: ['--maxkb=40'] 33 | - id: requirements-txt-fixer 34 | # - id: fix-encoding-pragma 35 | # exclude: ^noxfile.py$ 36 | 37 | # documentation files: .rst 38 | - repo: https://github.com/pre-commit/pygrep-hooks 39 | rev: v1.10.0 40 | hooks: 41 | - id: rst-backticks 42 | - id: rst-directive-colons 43 | - id: rst-inline-touching-normal 44 | 45 | #- repo: https://github.com/asottile/pyupgrade 46 | # rev: v2.29.0 47 | # hooks: 48 | # - id: pyupgrade 49 | 50 | # Changes tabs to spaces 51 | - repo: https://github.com/Lucas-C/pre-commit-hooks 52 | rev: v1.5.5 53 | hooks: 54 | - id: remove-tabs 55 | 56 | # CMake formatting 57 | #- repo: https://github.com/cheshirekow/cmake-format-precommit 58 | # rev: v0.6.13 59 | # hooks: 60 | # - id: cmake-format 61 | # additional_dependencies: [pyyaml] 62 | # types: [file] 63 | # files: (\.cmake|CMakeLists.txt)(.in)?$ 64 | 65 | # Autoremoves unused Python imports 66 | - repo: https://github.com/hadialqattan/pycln 67 | rev: v2.5.0 68 | hooks: 69 | - id: pycln 70 | name: pycln (python) 71 | 72 | # Sorts Python imports according to PEP8 73 | # https://www.python.org/dev/peps/pep-0008/#imports 74 | #- repo: https://github.com/pycqa/isort 75 | # rev: 5.12.0 76 | # hooks: 77 | # - id: isort 78 | # name: isort (python) 79 | 80 | # Python: Flake8 (checks only, does this support auto-fixes?) 81 | #- repo: https://github.com/PyCQA/flake8 82 | # rev: 4.0.1 83 | # hooks: 84 | # - id: flake8 85 | # additional_dependencies: &flake8_dependencies 86 | # - flake8-bugbear 87 | # - pep8-naming 88 | # exclude: ^(docs/.*|tools/.*)$ 89 | # Alternatively: use autopep8? 90 | 91 | # Docstring formatting according to numpy style 92 | - repo: https://github.com/pycqa/pydocstyle 93 | rev: 6.3.0 94 | hooks: 95 | - id: pydocstyle 96 | additional_dependencies: 97 | - tomli 98 | exclude: | 99 | (?x)( 100 | ^examples/multitask_lpa_fbpic_waket/custom_*| 101 | ^setup.py| 102 | ^doc/source/conf.py 103 | ) 104 | 105 | # Python Formatting 106 | - repo: https://github.com/psf/black 107 | rev: 25.1.0 # Keep in sync with blacken-docs 108 | hooks: 109 | - id: black 110 | - repo: https://github.com/asottile/blacken-docs 111 | rev: 1.19.1 112 | hooks: 113 | - id: blacken-docs 114 | args: [--line-length=80] 115 | additional_dependencies: 116 | - black==24.4.2 # keep in sync with black hook 117 | # TODO: black-jupyter 118 | 119 | # Jupyter Notebooks: clean up all cell outputs 120 | # - repo: https://github.com/roy-ht/pre-commit-jupyter 121 | # rev: v1.2.1 122 | # hooks: 123 | # - id: jupyter-notebook-cleanup 124 | # args: 125 | # - --pin-patterns 126 | # - "[pin];[donotremove]" 127 | # # - --remove-kernel-metadata 128 | 129 | # Checks the manifest for missing files (native support) 130 | - repo: https://github.com/mgedmin/check-manifest 131 | rev: "0.50" 132 | hooks: 133 | - id: check-manifest 134 | # This is a slow hook, so only run this if --hook-stage manual is passed 135 | stages: [manual] 136 | additional_dependencies: [cmake, ninja] 137 | -------------------------------------------------------------------------------- /examples/ionization_injection/analysis_script.py: -------------------------------------------------------------------------------- 1 | """Defines the analysis function that runs after the simulation.""" 2 | 3 | import os 4 | from openpmd_viewer.addons import LpaDiagnostics 5 | import numpy as np 6 | from scipy.constants import e 7 | 8 | 9 | def analyze_simulation(simulation_directory, output_params): 10 | """Analyze the simulation output. 11 | 12 | This method analyzes the output generated by the simulation to 13 | obtain the value of the optimization objective and other analyzed 14 | parameters, if specified. The value of these parameters has to be 15 | given to the `output_params` dictionary. 16 | 17 | Parameters 18 | ---------- 19 | simulation_directory : str 20 | Path to the simulation folder where the output was generated. 21 | output_params : dict 22 | Dictionary where the value of the objectives and analyzed parameters 23 | will be stored. There is one entry per parameter, where the key 24 | is the name of the parameter given by the user. 25 | 26 | Returns 27 | ------- 28 | dict 29 | The `output_params` dictionary with the results from the analysis. 30 | """ 31 | # Open simulation diagnostics. 32 | d = LpaDiagnostics(os.path.join(simulation_directory, "diags/hdf5")) 33 | 34 | # Get beam particles with `u_z >= 10` and transverse offset no larger than 35 | # 15 µm in `x` and `y`. 36 | uz, w = d.get_particle( 37 | ["uz", "w"], 38 | iteration=1, 39 | select={"uz": [10, None], "x": [-15e-6, 15e-6], "y": [-15e-6, 15e-6]}, 40 | ) 41 | 42 | # Convert charge to pC. 43 | q = w.sum() * e * 1e12 44 | 45 | # Analyze distribution and fill in the output data. 46 | if len(w) < 2: # Need at least 2 particles to calculate energy spread 47 | output_params["f"] = 0 48 | else: 49 | med, mad = weighted_mad(uz / 2, w) 50 | output_params["f"] = np.sqrt(q) * med / mad / 100 51 | output_params["charge"] = q 52 | output_params["energy_med"] = med 53 | output_params["energy_mad"] = mad 54 | 55 | return output_params 56 | 57 | 58 | def weighted_median(data, weights): 59 | """Compute the weighted quantile of a 1D numpy array. 60 | 61 | Parameters 62 | ---------- 63 | data : ndarray 64 | Input array (one dimension). 65 | weights : ndarray 66 | Array with the weights of the same size of `data`. 67 | quantile : float 68 | Quantile to compute. It must have a value between 0 and 1. 69 | 70 | Returns 71 | ------- 72 | quantile_1D : float 73 | The output value. 74 | 75 | """ 76 | quantile = 0.5 77 | # Check the data 78 | if not isinstance(data, np.matrix): 79 | data = np.asarray(data) 80 | if not isinstance(weights, np.matrix): 81 | weights = np.asarray(weights) 82 | nd = data.ndim 83 | if nd != 1: 84 | raise TypeError("data must be a one dimensional array") 85 | ndw = weights.ndim 86 | if ndw != 1: 87 | raise TypeError("weights must be a one dimensional array") 88 | if data.shape != weights.shape: 89 | raise TypeError("the length of data and weights must be the same") 90 | if (quantile > 1.0) or (quantile < 0.0): 91 | raise ValueError("quantile must have a value between 0. and 1.") 92 | # Sort the data 93 | ind_sorted = np.argsort(data) 94 | sorted_data = data[ind_sorted] 95 | sorted_weights = weights[ind_sorted] 96 | # Compute the auxiliary arrays 97 | Sn = np.cumsum(sorted_weights) 98 | # TODO: Check that the weights do not sum zero 99 | # assert Sn != 0, "The sum of the weights must not be zero" 100 | Pn = (Sn - 0.5 * sorted_weights) / Sn[-1] 101 | # Get the value of the weighted median 102 | return np.interp(quantile, Pn, sorted_data) 103 | 104 | 105 | def weighted_mad(x, w): 106 | """Calculate weighted median absolute deviation.""" 107 | med = weighted_median(x, w) 108 | mad = weighted_median(np.abs(x - med), w) 109 | return med, mad 110 | -------------------------------------------------------------------------------- /optimas/generators/random_sampling.py: -------------------------------------------------------------------------------- 1 | """Contains the definition of the random sampling generator.""" 2 | 3 | from typing import List, Optional 4 | 5 | import numpy as np 6 | 7 | from optimas.core import Trial 8 | from gest_api.vocs import VOCS 9 | from .base import Generator 10 | 11 | 12 | class RandomSamplingGenerator(Generator): 13 | r"""Sample an n-dimensional space with random distributions. 14 | 15 | This generator uses a random distribution to generate a sample of 16 | configurations where to evaluate the given objectives. 17 | 18 | Parameters 19 | ---------- 20 | vocs : VOCS 21 | VOCS object specifying variables, objectives, constraints, and observables. 22 | distribution : {'uniform', 'normal'}, optional 23 | The random distribution to use. The ``'uniform'`` option draws samples 24 | from a uniform distribution within the lower :math:`l_b` and upper 25 | :math:`u_b` bounds of each parameter. The ``'normal'`` option draws 26 | samples from a normal distribution that, for each parameter, is 27 | centered at :math:`c = (l_b + u_b)/2` with standard deviation 28 | :math:`\sigma = u_b - c`. By default, ``'uniform'``. 29 | seed : int, optional 30 | Seed to initialize the random generator. 31 | 32 | """ 33 | 34 | def __init__( 35 | self, 36 | vocs: VOCS, 37 | distribution: Optional[str] = "uniform", 38 | seed: Optional[int] = None, 39 | ) -> None: 40 | super().__init__(vocs=vocs) 41 | self._generate_sampling = { 42 | "uniform": self._generate_uniform_sampling, 43 | "normal": self._generate_normal_sampling, 44 | } 45 | self._check_inputs(vocs, distribution) 46 | self._distribution = distribution 47 | self._rng = np.random.default_rng(seed) 48 | self._define_generator_parameters() 49 | 50 | def suggest(self, num_points: Optional[int]) -> List[dict]: 51 | """Request the next set of points to evaluate.""" 52 | configs = self._generate_sampling[self._distribution](num_points) 53 | points = [] 54 | for config in configs: 55 | point = {} 56 | for var, value in zip(self._varying_parameters, config): 57 | point[var.name] = value 58 | points.append(point) 59 | return points 60 | 61 | def _check_inputs( 62 | self, 63 | vocs: VOCS, 64 | distribution: str, 65 | ) -> None: 66 | """Check that the generator inputs are valid.""" 67 | # Check that the specified distribution is supported. 68 | supported_distributions = list(self._generate_sampling.keys()) 69 | assert ( 70 | distribution in supported_distributions 71 | ), "Distribution '{}' not recognized. Possible values are {}".format( 72 | distribution, supported_distributions 73 | ) 74 | 75 | def _define_generator_parameters(self) -> None: 76 | """Define parameters used by the random generator.""" 77 | self._n_vars = len(self._varying_parameters) 78 | self._lb = np.array( 79 | [var.lower_bound for var in self._varying_parameters] 80 | ) 81 | self._ub = np.array( 82 | [var.upper_bound for var in self._varying_parameters] 83 | ) 84 | self._center = (self._lb + self._ub) / 2 85 | self._width = self._ub - self._center 86 | 87 | def _generate_uniform_sampling(self, n_trials: int) -> np.ndarray: 88 | """Generate trials using a uniform distribution.""" 89 | return self._rng.uniform(self._lb, self._ub, (n_trials, self._n_vars)) 90 | 91 | def _generate_normal_sampling(self, n_trials: int) -> np.ndarray: 92 | """Generate trials using a normal distribution.""" 93 | return self._rng.normal( 94 | self._center, self._width, (n_trials, self._n_vars) 95 | ) 96 | 97 | def _mark_trial_as_failed(self, trial: Trial): 98 | """No need to do anything, since there is no surrogate model.""" 99 | pass 100 | -------------------------------------------------------------------------------- /optimas/evaluators/base.py: -------------------------------------------------------------------------------- 1 | """Contains the definition of the base Evaluator class.""" 2 | 3 | from typing import Callable, Optional, Dict, List 4 | 5 | from optimas.core import VaryingParameter, Objective, Parameter 6 | 7 | 8 | class Evaluator: 9 | """Base class for all evaluators. 10 | 11 | Parameters 12 | ---------- 13 | sim_function : callable 14 | The simulation function (as defined in libEnsemble) to be used for 15 | carrying out the evaluations. 16 | n_procs : int, optional 17 | The number of processes that will be used for each evaluation. By 18 | default, ``n_procs=1`` if ``n_gpus`` is not given. Otherwise, the 19 | default behavior is to match the number of processes to the number 20 | of GPUs, i.e., ``n_procs=n_gpus``. 21 | n_gpus : int, optional 22 | The number of GPUs that will be made available for each evaluation. By 23 | default, 0. 24 | fail_on_nan : bool, optional 25 | Whether to mark an evaluation as failed if the value of any of the 26 | objectives is NaN. By default, ``True``. 27 | 28 | """ 29 | 30 | def __init__( 31 | self, 32 | sim_function: Callable, 33 | n_procs: Optional[int] = None, 34 | n_gpus: Optional[int] = None, 35 | fail_on_nan: Optional[bool] = True, 36 | ) -> None: 37 | self.sim_function = sim_function 38 | # If no resources are specified, use 1 CPU an 0 GPUs. 39 | if n_procs is None and n_gpus is None: 40 | n_procs = 1 41 | n_gpus = 0 42 | # If `n_gpus` is given without specifying `n_procs`, match processes 43 | # to GPUs. 44 | elif n_procs is None: 45 | n_procs = n_gpus 46 | # If `n_procs` is given without specifying `n_gpus`, do not use GPUs. 47 | elif n_gpus is None: 48 | n_gpus = 0 49 | self._n_procs = n_procs 50 | self._n_gpus = n_gpus 51 | self._fail_on_nan = fail_on_nan 52 | self._initialized = False 53 | 54 | def get_sim_specs( 55 | self, 56 | varying_parameters: List[VaryingParameter], 57 | objectives: List[Objective], 58 | analyzed_parameters: List[Parameter], 59 | ) -> Dict: 60 | """Get the `sim_specs` for `libEnsemble`.""" 61 | # Only generate sim_specs if evaluator has been initialized. 62 | if not self._initialized: 63 | raise RuntimeError( 64 | "Evaluator must be initialized before generating sim_specs" 65 | ) 66 | 67 | # Create sim_specs. 68 | sim_specs = { 69 | # Function whose output is being minimized. 70 | "sim_f": self.sim_function, 71 | # Name of input for sim_f, that LibEnsemble is allowed to modify. 72 | # May be a 1D array. 73 | "in": [var.name for var in varying_parameters], 74 | "out": ( 75 | [(obj.name, obj.dtype) for obj in objectives] 76 | + [(par.name, par.dtype) for par in analyzed_parameters] 77 | + [("trial_status", str, 10)] 78 | ), 79 | "user": { 80 | "n_procs": self._n_procs, 81 | "n_gpus": self._n_gpus, 82 | "fail_on_nan": self._fail_on_nan, 83 | "objectives": [obj.name for obj in objectives], 84 | }, 85 | } 86 | return sim_specs 87 | 88 | def get_libe_specs(self) -> Dict: 89 | """Get the `libE_specs` for `libEnsemble`.""" 90 | libE_specs = {} 91 | return libE_specs 92 | 93 | def get_run_params(self) -> Dict: 94 | """Return run parameters for this evaluator.""" 95 | run_params = {"num_procs": self._n_procs, "num_gpus": self._n_gpus} 96 | return run_params 97 | 98 | def initialize(self) -> None: 99 | """Initialize the evaluator.""" 100 | if not self._initialized: 101 | self._initialize() 102 | self._initialized = True 103 | 104 | def _initialize(self) -> None: 105 | """Initialize the evaluator (to be implemented by subclasses).""" 106 | pass 107 | -------------------------------------------------------------------------------- /tests/test_template_evaluator.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import numpy as np 4 | import matplotlib.pyplot as plt 5 | from gest_api.vocs import VOCS 6 | 7 | from optimas.explorations import Exploration 8 | from optimas.generators import RandomSamplingGenerator 9 | from optimas.evaluators import TemplateEvaluator 10 | 11 | 12 | def analysis_func(sim_dir, output_params): 13 | """Analysis function used by the template evaluator.""" 14 | # Read back result from file 15 | with open("result.txt") as f: 16 | result = float(f.read()) 17 | output_params["f"] = result 18 | output_params["p0"] = np.array([[1, 2, 3, 4], [2, 6, 7, 4]]) 19 | output_params["p1"] = np.array([[1, 2, 3, 4], [2, 6, 7, 4]]) 20 | plt.figure() 21 | plt.plot(output_params["p1"][0], output_params["p1"][1]) 22 | output_params["fig"] = plt.gcf() 23 | 24 | 25 | def test_template_evaluator(): 26 | # Define variables and objectives. 27 | vocs = VOCS( 28 | variables={"x0": [-50.0, 5.0], "x1": [-5.0, 15.0]}, 29 | objectives={"f": "MAXIMIZE"}, 30 | observables={ 31 | "p0": (float, (2, 4)), 32 | "p1": "O", 33 | "fig": "O", 34 | }, 35 | ) 36 | 37 | # Define variables and objectives. 38 | gen = RandomSamplingGenerator(vocs=vocs) 39 | 40 | # Create template evaluator. 41 | ev = TemplateEvaluator( 42 | sim_template=os.path.join( 43 | os.path.abspath(os.path.dirname(__file__)), 44 | "resources", 45 | "template_simulation_script.py", 46 | ), 47 | analysis_func=analysis_func, 48 | ) 49 | 50 | # Create exploration. 51 | exploration = Exploration( 52 | generator=gen, 53 | evaluator=ev, 54 | max_evals=10, 55 | sim_workers=2, 56 | exploration_dir_path="./tests_output/test_template_evaluator", 57 | ) 58 | 59 | # Run exploration. 60 | exploration.run() 61 | 62 | # Check that the multidimensional analyzed parameters worked as expected. 63 | for p0_data in exploration.history["p0"]: 64 | np.testing.assert_array_equal( 65 | np.array(p0_data), np.array([[1, 2, 3, 4], [2, 6, 7, 4]]) 66 | ) 67 | for p1_data in exploration.history["p1"]: 68 | np.testing.assert_array_equal( 69 | np.array(p1_data), np.array([[1, 2, 3, 4], [2, 6, 7, 4]]) 70 | ) 71 | for i, fig in enumerate(exploration.history["fig"]): 72 | fig.savefig( 73 | os.path.join(exploration.exploration_dir_path, f"test_fig_{i}.png") 74 | ) 75 | 76 | 77 | def test_template_evaluator_timeout(): 78 | """Test the evaluation timeout. 79 | 80 | All evaluations will sleep for 20 seconds. This should trigger the 1 81 | second timeout set in the `TemplateEvaluator`, causing all evaluations 82 | to fail. 83 | """ 84 | # Make evaluations sleep for 20 seconds. 85 | os.environ["OPTIMAS_TEST_SLEEP"] = "20" 86 | 87 | # Define variables and objectives. 88 | vocs = VOCS( 89 | variables={"x0": [-50.0, 5.0], "x1": [-5.0, 15.0]}, 90 | objectives={"f": "MAXIMIZE"}, 91 | ) 92 | 93 | # Define variables and objectives. 94 | gen = RandomSamplingGenerator(vocs=vocs) 95 | 96 | # Create template evaluator with 1s timeout. 97 | ev = TemplateEvaluator( 98 | sim_template=os.path.join( 99 | os.path.abspath(os.path.dirname(__file__)), 100 | "resources", 101 | "template_simulation_script.py", 102 | ), 103 | timeout=1, 104 | ) 105 | 106 | # Create exploration. 107 | exploration = Exploration( 108 | generator=gen, 109 | evaluator=ev, 110 | max_evals=10, 111 | sim_workers=2, 112 | exploration_dir_path="./tests_output/test_template_evaluator_timeout", 113 | ) 114 | 115 | # Run exploration. 116 | exploration.run() 117 | 118 | # Check that no evaluations were successful. 119 | np.testing.assert_array_equal(exploration.history.trial_status, "FAILED") 120 | np.testing.assert_array_equal(exploration.history.f, np.nan) 121 | 122 | 123 | if __name__ == "__main__": 124 | test_template_evaluator() 125 | test_template_evaluator_timeout() 126 | -------------------------------------------------------------------------------- /doc/source/user_guide/basic_usage/analyze_output.rst: -------------------------------------------------------------------------------- 1 | Exploration output 2 | ================== 3 | 4 | By default, the output of an optimas :class:`~optimas.explorations.Exploration` 5 | is stored in an ``exploration`` folder located in the same directory as the 6 | main script. This output consists of log files, folders and data generated by 7 | `libEnsemble `_ 8 | and the evaluations/simulations. 9 | 10 | Log files 11 | ~~~~~~~~~ 12 | 13 | In every run, the following log files are generated: 14 | 15 | - ``libE-stats.txt``: log indicating the worker, start time, end time, etc. of 16 | each evaluation. 17 | - ``ensemble.log``: log of ``libEnsemble`` containing the main events of 18 | the run. This includes the commands with which each evaluation was launched. 19 | - ``exploration_history_after_sim_.npy``: 20 | numpy file that contains the 21 | `history array `_ 22 | of the run. This is a structured array that stores the data of each 23 | evaluation, including the values of the 24 | :class:`~optimas.core.VaryingParameter`\s, 25 | :class:`~optimas.core.Objective`\s, analyzed 26 | :class:`~optimas.core.Parameter`\s and other useful diagnostics. 27 | The periodicity with which this file is updated can be set with the 28 | ``history_save_period`` argument of the :class:`~optimas.explorations.Exploration`. 29 | - ``exploration_parameters.json``: JSON file containing a serialized 30 | version of the :class:`~optimas.core.VaryingParameter`\s, 31 | :class:`~optimas.core.Objective`\s and other 32 | :class:`~optimas.core.Parameter`\s of the exploration. 33 | 34 | In addition, if the run is aborted for any reason, two additional files 35 | will be created: 36 | 37 | - ``libE_history_at_abort_.npy``: numpy file containing the history 38 | array when the run was aborted. 39 | - ``libE_persis_info_at_abort_.pickle``: contains the internal 40 | ``persis_info`` of libEnsemble when the run was aborted. 41 | 42 | 43 | Simulation data 44 | ~~~~~~~~~~~~~~~ 45 | 46 | When using a :class:`~optimas.evaluators.TemplateEvaluator` (see 47 | :ref:`optimas-with-simulations`), an ``exploration/evaluations`` directory is 48 | also created. Inside this directory, a new folder following the pattern 49 | ``sim`` will be created for each 50 | simulation. This folder contains the simulation script, a copy of the files 51 | specified in ``sim_files``, and the output data of the simulation. 52 | 53 | 54 | Surrogate model 55 | ~~~~~~~~~~~~~~~ 56 | 57 | Some generators store an internal surrogate model. This is the case, 58 | for example, of the 59 | :class:`~optimas.generators.AxSingleFidelityGenerator`, which stores an 60 | ``AxClient`` with the surrogate model used for Bayesian optimization. 61 | 62 | Generators that have this capability can also save the internal model 63 | to file with a certain periodicity (set by the ``model_save_period`` 64 | attribute). By default, these models will be saved in a 65 | ``exploration/model_history`` directory. 66 | 67 | 68 | Example output 69 | ~~~~~~~~~~~~~~ 70 | 71 | An example of the output structure can be seen below. This case 72 | corresponds an optimas run using an 73 | :class:`~optimas.generators.AxSingleFidelityGenerator` and a 74 | :class:`~optimas.evaluators.TemplateEvaluator`, such as in the example 75 | :ref:`bo-basic`. 76 | 77 | .. code-block:: bash 78 | :emphasize-lines: 4,7,21 79 | 80 | / 81 | ├── run_optimas.py 82 | ├── template_simulation_script.py 83 | └── exploration 84 | ├── ensemble.log 85 | ├── libE_stats.txt 86 | ├── exploration_history_after_sim_99.txt 87 | ├── exploration_parameters.json 88 | ├── evaluations 89 | │ ├── sim0000 90 | │ │ ├── simulation_script.py 91 | │ │ └── result.txt 92 | │ ├── sim0001 93 | │ │ ├── simulation_script.py 94 | │ │ └── result.txt 95 | │ ├── sim0002 96 | │ │ ├── simulation_script.py 97 | │ │ └── result.txt 98 | │ ... 99 | │ └── sim0099 100 | │ ├── simulation_script.py 101 | │ └── result.txt 102 | └── model_history 103 | ├── ax_client_at_eval_5.json 104 | ... 105 | └── ax_client_at_eval_100.json 106 | -------------------------------------------------------------------------------- /optimas/generators/line_sampling.py: -------------------------------------------------------------------------------- 1 | """Contains the definition of the line sampling generator.""" 2 | 3 | from typing import List, Optional, Union 4 | 5 | import numpy as np 6 | 7 | from optimas.core import Trial 8 | from gest_api.vocs import VOCS 9 | from .base import Generator 10 | 11 | 12 | class LineSamplingGenerator(Generator): 13 | r"""Sample an n-dimensional space one parameter at a time. 14 | 15 | This generator samples the given objectives along ``n`` dimensions, where 16 | ``n`` is the number of ``varying_parameters``, by varying only one 17 | parameter at a time. Along each direction :math:`i` (i.e., along each 18 | varying parameter), the space is divided in :math:`n_\mathrm{steps,i}` 19 | evenly spaced steps, resulting in a total number of evaluations 20 | :math:`\sum_i n_\mathrm{steps,i}`. 21 | 22 | Since only one parameter is varied at a time, a default value that will be 23 | used when a parameter is not being varied needs to be provided for all 24 | varying parameters. 25 | 26 | Parameters 27 | ---------- 28 | vocs : VOCS 29 | VOCS object specifying variables, objectives, constraints, and observables. 30 | n_steps : ndarray or list of int 31 | A 1D array or list with the number of steps along each direction. 32 | 33 | """ 34 | 35 | def __init__( 36 | self, 37 | vocs: VOCS, 38 | n_steps: Union[np.ndarray, List[int]], 39 | ) -> None: 40 | super().__init__(vocs=vocs) 41 | self._check_inputs(vocs, n_steps) 42 | self._n_steps = n_steps if n_steps is np.ndarray else np.array(n_steps) 43 | self._create_configurations() 44 | 45 | def _validate_vocs(self, vocs: VOCS) -> None: 46 | super()._validate_vocs(vocs) 47 | for var_name, var_spec in vocs.variables.items(): 48 | if var_spec.default_value is None: 49 | raise ValueError( 50 | f"Variable '{var_name}' does not have a default value. " 51 | ) 52 | 53 | def _check_inputs( 54 | self, 55 | vocs: VOCS, 56 | n_steps: int, 57 | ) -> None: 58 | """Check that the generator inputs are valid.""" 59 | # Check as many n_steps as varying_parameters are provided. 60 | assert len(n_steps) == len( 61 | self.varying_parameters 62 | ), "Length of `n_steps` ({}) and ".format( 63 | len(n_steps) 64 | ) + "`varying_parameters` ({}) do not match.".format( 65 | len(self.varying_parameters) 66 | ) 67 | 68 | def _create_configurations(self) -> None: 69 | """Create a list will all configurations to be evaluated.""" 70 | # Determine all required quantities. 71 | lb = [var.lower_bound for var in self._varying_parameters] 72 | ub = [var.upper_bound for var in self._varying_parameters] 73 | n_vars = len(self._varying_parameters) 74 | n_trials = np.sum(self._n_steps) 75 | default_values = np.array( 76 | [var.default_value for var in self._varying_parameters] 77 | ) 78 | 79 | # Generate configurations. 80 | all_configs_array = np.ones((n_trials, n_vars)) * default_values 81 | for i in range(n_vars): 82 | i_var_min = np.sum(self._n_steps[:i]) 83 | i_var_max = i_var_min + self._n_steps[i] 84 | var_vals = np.linspace(lb[i], ub[i], self._n_steps[i]) 85 | all_configs_array[i_var_min:i_var_max, i] = var_vals 86 | 87 | # Turn them into a list of dictionaries. 88 | all_configs = [] 89 | for config_array in all_configs_array: 90 | config = {} 91 | for var, value in zip(self._varying_parameters, config_array): 92 | config[var.name] = value 93 | all_configs.append(config) 94 | 95 | # Store configurations. 96 | self._all_configs = all_configs 97 | 98 | def suggest(self, num_points: Optional[int]) -> List[dict]: 99 | """Request the next set of points to evaluate.""" 100 | points = [] 101 | for _ in range(num_points): 102 | if self._all_configs: 103 | config = self._all_configs.pop(0) 104 | points.append(config) 105 | return points 106 | 107 | def _mark_trial_as_failed(self, trial: Trial): 108 | """No need to do anything, since there is no surrogate model.""" 109 | pass 110 | -------------------------------------------------------------------------------- /optimas/evaluators/multitask_evaluator.py: -------------------------------------------------------------------------------- 1 | """Contains the definition of the MultitaskEvaluator class.""" 2 | 3 | from typing import List, Dict 4 | 5 | from optimas.core import Task, VaryingParameter, Objective, Parameter 6 | from .base import Evaluator 7 | from .template_evaluator import TemplateEvaluator 8 | 9 | 10 | class MultitaskEvaluator(Evaluator): 11 | """Evaluator to be used with multitask optimization. 12 | 13 | Each task has its own evaluator (either a ``FunctionEvaluator`` or a 14 | ``TemplateEvaluator``). The ``MultitaskEvaluator`` groups all tasks and 15 | their corresponding evaluators and generates the combined ``libe_specs`` 16 | and ``sim_specs``. 17 | 18 | Parameters 19 | ---------- 20 | tasks : list of Task 21 | List of the tasks used in the optimization. 22 | task_evaluators : list of Evaluator 23 | List with the evaluators of each task. 24 | 25 | """ 26 | 27 | def __init__( 28 | self, tasks: List[Task], task_evaluators: List[Evaluator] 29 | ) -> None: 30 | self._check_tasks(tasks) 31 | self._check_evaluators(task_evaluators) 32 | super().__init__(sim_function=task_evaluators[0].sim_function) 33 | self.tasks = tasks 34 | self.task_evaluators = task_evaluators 35 | 36 | def get_sim_specs( 37 | self, 38 | varying_parameters: List[VaryingParameter], 39 | objectives: List[Objective], 40 | analyzed_parameters: List[Parameter], 41 | ) -> Dict: 42 | """Get the `sim_specs` for `libEnsemble`.""" 43 | # Get base sim_specs. 44 | sim_specs = super().get_sim_specs( 45 | varying_parameters, objectives, analyzed_parameters 46 | ) 47 | # Get sim_specs of each task evaluator. 48 | sim_specs_1 = self.task_evaluators[0].get_sim_specs( 49 | varying_parameters, objectives, analyzed_parameters 50 | ) 51 | sim_specs_2 = self.task_evaluators[1].get_sim_specs( 52 | varying_parameters, objectives, analyzed_parameters 53 | ) 54 | # Store user sim_specs of each task separately. 55 | sim_specs["user"] = { 56 | self.tasks[0].name: sim_specs_1["user"], 57 | self.tasks[1].name: sim_specs_2["user"], 58 | } 59 | # Add task name to sim_specs in. 60 | sim_specs["in"].append("task") 61 | return sim_specs 62 | 63 | def get_libe_specs(self) -> Dict: 64 | """Get the `libE_specs` for `libEnsemble`.""" 65 | # Get libe_specs of each task evaluator. 66 | libE_specs_1 = self.task_evaluators[0].get_libe_specs() 67 | libE_specs_2 = self.task_evaluators[1].get_libe_specs() 68 | # Include relevant specs from the second evaluator into the first one. 69 | if "sim_dir_copy_files" in libE_specs_1: 70 | libE_specs_1["sim_dir_copy_files"] = list( 71 | set( 72 | libE_specs_1["sim_dir_copy_files"] 73 | + libE_specs_2["sim_dir_copy_files"] 74 | ) 75 | ) 76 | # Use only the combined specs. 77 | return libE_specs_1 78 | 79 | def get_run_params(self) -> Dict: 80 | """Return run parameters for this evaluator.""" 81 | run_params = {} 82 | for task, evaluator in zip(self.tasks, self.task_evaluators): 83 | run_params[task.name] = evaluator.get_run_params() 84 | return run_params 85 | 86 | def _initialize(self) -> None: 87 | """Initialize the evaluator.""" 88 | if isinstance(self.task_evaluators[0], TemplateEvaluator): 89 | for task, evaluator in zip(self.tasks, self.task_evaluators): 90 | evaluator.app_name = task.name 91 | for evaluator in self.task_evaluators: 92 | evaluator.initialize() 93 | 94 | def _check_evaluators(self, evaluators) -> None: 95 | """Check the given evaluators.""" 96 | # Check that only 2 evaluators are given. 97 | assert len(evaluators) == 2 98 | # Check that both evaluators are of the same type. 99 | assert isinstance(evaluators[0], type(evaluators[1])) 100 | 101 | def _check_tasks(self, tasks) -> None: 102 | """Check the given tasks.""" 103 | # Check that only 2 tasks are given. 104 | assert len(tasks) == 2 105 | # Check that the task names are different. 106 | assert tasks[0].name != tasks[1].name 107 | -------------------------------------------------------------------------------- /optimas/evaluators/chain_evaluator.py: -------------------------------------------------------------------------------- 1 | """Contains the definition of the ChainEvaluator class.""" 2 | 3 | import copy 4 | from typing import List, Dict 5 | 6 | 7 | from optimas.core import VaryingParameter, Objective, Parameter 8 | from optimas.sim_functions import run_template_simulation 9 | from .base import Evaluator 10 | from .template_evaluator import TemplateEvaluator 11 | 12 | 13 | class ChainEvaluator(Evaluator): 14 | """Evaluator that executes a chain of `TemplateEvaluators`. 15 | 16 | This is useful when each evaluation consists of several steps, where each 17 | step is a simulation with a different simulation code. Each step 18 | is defined by a TemplateEvaluator and can request a different number of 19 | resources. 20 | 21 | Each evaluation with the `ChainEvaluator` gets allocated the maximum number 22 | of processes (`n_procs`) and GPUs (`n_gpus`) that every step might request 23 | (e.g., if one step requires `n_procs=20` and `n_gpus=0`, and a second step 24 | requires `n_procs=4` and `n_gpus=4`, each evaluation will get assigned 25 | `n_procs=20` and `n_gpus=4`). Then each step will only make use of the 26 | subset of resources it needs. 27 | 28 | Parameters 29 | ---------- 30 | evaluators : list of TemplateEvaluators 31 | A list of TemplateEvaluators given in the order in which they should 32 | be executed. 33 | 34 | """ 35 | 36 | def __init__(self, evaluators: List[TemplateEvaluator]) -> None: 37 | self._check_evaluators(evaluators) 38 | super().__init__( 39 | run_template_simulation, 40 | ) 41 | self.evaluators = evaluators 42 | 43 | def get_sim_specs( 44 | self, 45 | varying_parameters: List[VaryingParameter], 46 | objectives: List[Objective], 47 | analyzed_parameters: List[Parameter], 48 | ) -> Dict: 49 | """Get the `sim_specs` for `libEnsemble`.""" 50 | # Get base sim_specs. 51 | sim_specs = super().get_sim_specs( 52 | varying_parameters, objectives, analyzed_parameters 53 | ) 54 | sim_specs["user"]["steps"] = [] 55 | # Get the user specs from each step. 56 | for evaluator in self.evaluators: 57 | sim_specs["user"]["steps"].append( 58 | evaluator.get_sim_specs( 59 | varying_parameters, objectives, analyzed_parameters 60 | )["user"] 61 | ) 62 | return sim_specs 63 | 64 | def get_libe_specs(self) -> Dict: 65 | """Get the `libE_specs` for `libEnsemble`.""" 66 | # Get libe_specs of each task evaluator. 67 | libE_specs_0 = copy.deepcopy(self.evaluators[0].get_libe_specs()) 68 | # Combine the files to copy from all evaluators. 69 | for evaluator in self.evaluators[1:]: 70 | libE_specs_i = evaluator.get_libe_specs() 71 | libE_specs_0["sim_dir_copy_files"] = list( 72 | set( 73 | libE_specs_0["sim_dir_copy_files"] 74 | + libE_specs_i["sim_dir_copy_files"] 75 | ) 76 | ) 77 | # Use only the combined specs. 78 | return libE_specs_0 79 | 80 | def get_run_params(self) -> Dict: 81 | """Return run parameters for this evaluator.""" 82 | num_procs = 0 83 | num_gpus = 0 84 | # Get maximum number of processes and GPUs. 85 | for evaluator in self.evaluators: 86 | ev_run_params = evaluator.get_run_params() 87 | num_procs = max(num_procs, ev_run_params["num_procs"]) 88 | num_gpus = max(num_gpus, ev_run_params["num_gpus"]) 89 | run_params = { 90 | "num_procs": num_procs, 91 | "num_gpus": num_gpus, 92 | } 93 | return run_params 94 | 95 | def _initialize(self) -> None: 96 | """Initialize the evaluator.""" 97 | for i, evaluator in enumerate(self.evaluators): 98 | # Assign a different app name to each evaluator. 99 | evaluator.app_name = f"sim_{i}" 100 | evaluator.initialize() 101 | 102 | def _check_evaluators(self, evaluators) -> None: 103 | """Check the given evaluators.""" 104 | # Check that both evaluators are of the same type. 105 | for evaluator in evaluators: 106 | assert isinstance( 107 | evaluator, TemplateEvaluator 108 | ), "Only TemplateEvaluators are supported for chain evaluation." 109 | -------------------------------------------------------------------------------- /doc/source/user_guide/basic_usage/exploration_diagnostics.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "raw", 5 | "metadata": { 6 | "raw_mimetype": "text/restructuredtext" 7 | }, 8 | "source": [ 9 | "Analyzing results\n", 10 | "=================\n", 11 | "\n", 12 | "Optimas provides a convenient :class:`~optimas.diagnostics.ExplorationDiagnostics`\n", 13 | "class to easily analyze and visualize the output of an exploration without\n", 14 | "having to manually access each file.\n", 15 | "\n", 16 | "The examples below showcase the functionality of this class by analyzing the\n", 17 | "output of this :ref:`basic Bayesian optimization example `." 18 | ] 19 | }, 20 | { 21 | "cell_type": "code", 22 | "execution_count": null, 23 | "metadata": { 24 | "nbsphinx": "hidden" 25 | }, 26 | "outputs": [], 27 | "source": [ 28 | "%cd ../../../../examples/dummy/\n", 29 | "!python run_example.py" 30 | ] 31 | }, 32 | { 33 | "cell_type": "raw", 34 | "metadata": { 35 | "raw_mimetype": "text/restructuredtext" 36 | }, 37 | "source": [ 38 | "Import and initialize diagnostics\n", 39 | "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", 40 | "\n", 41 | "The diagnostics class only requires the path to the exploration directory\n", 42 | "as input parameter." 43 | ] 44 | }, 45 | { 46 | "cell_type": "code", 47 | "execution_count": null, 48 | "metadata": {}, 49 | "outputs": [], 50 | "source": [ 51 | "from optimas.diagnostics import ExplorationDiagnostics\n", 52 | "\n", 53 | "diags = ExplorationDiagnostics(\"./exploration\")" 54 | ] 55 | }, 56 | { 57 | "cell_type": "raw", 58 | "metadata": { 59 | "raw_mimetype": "text/restructuredtext" 60 | }, 61 | "source": [ 62 | "Access exploration history\n", 63 | "~~~~~~~~~~~~~~~~~~~~~~~~~~\n", 64 | "\n", 65 | "The diagnostics provide easy access to the exploration history, which\n", 66 | "is returned as a pandas ``DataFrame``." 67 | ] 68 | }, 69 | { 70 | "cell_type": "code", 71 | "execution_count": null, 72 | "metadata": {}, 73 | "outputs": [], 74 | "source": [ 75 | "diags.history" 76 | ] 77 | }, 78 | { 79 | "cell_type": "raw", 80 | "metadata": { 81 | "raw_mimetype": "text/restructuredtext" 82 | }, 83 | "source": [ 84 | "Built-in plotting utilities\n", 85 | "~~~~~~~~~~~~~~~~~~~~~~~~~~~\n", 86 | "\n", 87 | "Several basic plotting functions are provided by the diagnostics class.\n", 88 | "The example below uses\n", 89 | ":meth:`~optimas.diagnostics.ExplorationDiagnostics.plot_objective`\n", 90 | "to show the value of the objective `f` for each evaluation, as well as\n", 91 | "the evolution of the cumulative best." 92 | ] 93 | }, 94 | { 95 | "cell_type": "code", 96 | "execution_count": null, 97 | "metadata": {}, 98 | "outputs": [], 99 | "source": [ 100 | "diags.plot_objective(show_trace=True)" 101 | ] 102 | }, 103 | { 104 | "cell_type": "raw", 105 | "metadata": { 106 | "raw_mimetype": "text/restructuredtext" 107 | }, 108 | "source": [ 109 | "User plots\n", 110 | "~~~~~~~~~~\n", 111 | "\n", 112 | "The :class:`~optimas.diagnostics.ExplorationDiagnostics` exposes all\n", 113 | "necessary data to perform any analysis or plot of the exploration.\n", 114 | "As an example, the code below generates a plot of the phase-space of the\n", 115 | "optimization, including the value of each evaluation and the boundaries of\n", 116 | "the varying parameters." 117 | ] 118 | }, 119 | { 120 | "cell_type": "code", 121 | "execution_count": null, 122 | "metadata": {}, 123 | "outputs": [], 124 | "source": [ 125 | "import matplotlib.pyplot as plt\n", 126 | "\n", 127 | "fig, ax = plt.subplots()\n", 128 | "vocs = diags._exploration.generator.vocs\n", 129 | "df = diags.history\n", 130 | "vps = list(vocs.variables.keys())\n", 131 | "f1 = list(vocs.objectives.keys())[0]\n", 132 | "ax.axvline(vocs.variables[vps[0]].domain[0])\n", 133 | "ax.axvline(vocs.variables[vps[0]].domain[1])\n", 134 | "ax.set_xlabel(vps[0])\n", 135 | "ax.axhline(vocs.variables[vps[1]].domain[0])\n", 136 | "ax.axhline(vocs.variables[vps[1]].domain[1])\n", 137 | "ax.set_ylabel(vps[1])\n", 138 | "ax.scatter(df[vps[0]], df[vps[1]], c=df[f1])" 139 | ] 140 | } 141 | ], 142 | "metadata": { 143 | "language_info": { 144 | "name": "python" 145 | } 146 | }, 147 | "nbformat": 4, 148 | "nbformat_minor": 4 149 | } 150 | -------------------------------------------------------------------------------- /examples/multitask_lpa_fbpic_waket/analysis_script.py: -------------------------------------------------------------------------------- 1 | """Defines the analysis function that runs after the simulation.""" 2 | 3 | import os 4 | 5 | import numpy as np 6 | import matplotlib.pyplot as plt 7 | import visualpic as vp 8 | from aptools.plotting.quick_diagnostics import ( 9 | phase_space_overview, 10 | slice_analysis, 11 | ) 12 | 13 | 14 | def analyze_simulation(simulation_directory, output_params): 15 | """Analyze the output of the simulation.""" 16 | # Load data. 17 | diags_dir = os.path.join(simulation_directory, "diags/hdf5") 18 | dc = vp.DataContainer("openpmd", diags_dir) 19 | dc.load_data() 20 | 21 | # Get final bunch distribution. 22 | bunch = dc.get_species("bunch") 23 | ts = bunch.timesteps 24 | bunch_data = bunch.get_data(ts[-1]) 25 | x = bunch_data["x"][0] 26 | y = bunch_data["y"][0] 27 | z = bunch_data["z"][0] 28 | px = bunch_data["px"][0] 29 | py = bunch_data["py"][0] 30 | pz = bunch_data["pz"][0] 31 | q = bunch_data["q"][0] 32 | 33 | # Remove particles with pz < 100 34 | pz_filter = np.where(pz >= 100) 35 | x = x[pz_filter] 36 | y = y[pz_filter] 37 | z = z[pz_filter] 38 | px = px[pz_filter] 39 | py = py[pz_filter] 40 | pz = pz[pz_filter] 41 | q = q[pz_filter] 42 | 43 | # Calculate relevant parameters. 44 | q_tot = np.abs(np.sum(q)) * 1e12 # pC 45 | q_ref = 10 # pC 46 | # ene = np.average(pz, weights=q) * 0.511 # MeV 47 | med, mad = weighted_mad(pz * 0.511, q) 48 | mad_rel = mad / med 49 | med *= 1e-3 # GeV 50 | mad_rel_ref = 1e-2 51 | 52 | # Calculate objective. 53 | f = np.log(med * q_tot / q_ref / (mad_rel / mad_rel_ref)) 54 | 55 | # Store quantities in output. 56 | output_params["f"] = -f 57 | output_params["charge"] = q_tot 58 | output_params["energy_med"] = med 59 | output_params["energy_mad"] = mad 60 | 61 | # Save objective to file (for convenience). 62 | np.savetxt("f.txt", np.array([f])) 63 | 64 | # Make plots. 65 | try: 66 | plt.figure() 67 | slice_analysis(x, y, z, px, py, pz, q, show=False) 68 | plt.savefig("final_lon_phase_space.png") 69 | plt.figure() 70 | phase_space_overview(x, y, z, px, py, pz, q, show=False) 71 | plt.savefig("final_phase_space.png") 72 | except Exception: 73 | print("Failed to make plots.") 74 | 75 | # Remove all diagnostics except last file. 76 | try: 77 | for file in sorted(os.listdir(diags_dir))[:-1]: 78 | file_path = os.path.join(diags_dir, file) 79 | os.remove(file_path) 80 | except Exception: 81 | print("Could not remove diagnostics.") 82 | 83 | return output_params 84 | 85 | 86 | def weighted_mad(x, w): 87 | """Calculate weighted median absolute deviation.""" 88 | med = weighted_median(x, w) 89 | mad = weighted_median(np.abs(x - med), w) 90 | return med, mad 91 | 92 | 93 | def weighted_median(data, weights): 94 | """Compute the weighted quantile of a 1D numpy array. 95 | 96 | Parameters 97 | ---------- 98 | data : ndarray 99 | Input array (one dimension). 100 | weights : ndarray 101 | Array with the weights of the same size of `data`. 102 | quantile : float 103 | Quantile to compute. It must have a value between 0 and 1. 104 | 105 | Returns 106 | ------- 107 | quantile_1D : float 108 | The output value. 109 | 110 | """ 111 | quantile = 0.5 112 | # Check the data 113 | if not isinstance(data, np.matrix): 114 | data = np.asarray(data) 115 | if not isinstance(weights, np.matrix): 116 | weights = np.asarray(weights) 117 | nd = data.ndim 118 | if nd != 1: 119 | raise TypeError("data must be a one dimensional array") 120 | ndw = weights.ndim 121 | if ndw != 1: 122 | raise TypeError("weights must be a one dimensional array") 123 | if data.shape != weights.shape: 124 | raise TypeError("the length of data and weights must be the same") 125 | if (quantile > 1.0) or (quantile < 0.0): 126 | raise ValueError("quantile must have a value between 0. and 1.") 127 | # Sort the data 128 | ind_sorted = np.argsort(data) 129 | sorted_data = data[ind_sorted] 130 | sorted_weights = weights[ind_sorted] 131 | # Compute the auxiliary arrays 132 | Sn = np.cumsum(sorted_weights) 133 | # TODO: Check that the weights do not sum zero 134 | # assert Sn != 0, "The sum of the weights must not be zero" 135 | Pn = (Sn - 0.5 * sorted_weights) / Sn[-1] 136 | # Get the value of the weighted median 137 | return np.interp(quantile, Pn, sorted_data) 138 | -------------------------------------------------------------------------------- /examples/multitask_lpa_fbpic_waket/bunch_utils.py: -------------------------------------------------------------------------------- 1 | """Defines utilities for generating particle bunches.""" 2 | 3 | import numpy as np 4 | from scipy.constants import c 5 | 6 | 7 | def gaussian_bunch( 8 | q_tot, n_part, gamma0, s_g, s_z, emit_x, s_x, zf=0.0, tf=0, x_c=0.0 9 | ): 10 | """Create a Gaussian particle bunch.""" 11 | n_part = int(n_part) 12 | 13 | np.random.seed(42) 14 | z = zf + s_z * np.random.standard_normal(n_part) 15 | x = x_c + s_x * np.random.standard_normal(n_part) 16 | y = s_x * np.random.standard_normal(n_part) 17 | 18 | gamma = np.random.normal(gamma0, s_g, n_part) 19 | 20 | s_ux = emit_x / s_x 21 | ux = s_ux * np.random.standard_normal(n_part) 22 | uy = s_ux * np.random.standard_normal(n_part) 23 | 24 | uz = np.sqrt((gamma**2 - 1) - ux**2 - uy**2) 25 | 26 | if tf != 0.0: 27 | x = x - ux * c * tf / gamma 28 | y = y - uy * c * tf / gamma 29 | z = z - uz * c * tf / gamma 30 | 31 | q = np.ones(n_part) * q_tot / n_part 32 | 33 | return x, y, z, ux, uy, uz, q 34 | 35 | 36 | def flattop_bunch( 37 | q_tot, 38 | n_part, 39 | gamma0, 40 | s_g, 41 | length, 42 | s_z, 43 | emit_x, 44 | s_x, 45 | emit_y, 46 | s_y, 47 | zf=0.0, 48 | tf=0, 49 | x_c=0.0, 50 | y_c=0, 51 | ): 52 | """Create a flat-top particle bunch.""" 53 | n_part = int(n_part) 54 | 55 | norma = length + np.sqrt(2 * np.pi) * s_z 56 | n_plat = int(n_part * length / norma) 57 | n_gaus = int(n_part * np.sqrt(2 * np.pi) * s_z / norma) 58 | 59 | # Create flattop and gaussian profiles 60 | z_plat = np.random.uniform(0.0, length, n_plat) 61 | z_gaus = s_z * np.random.standard_normal(n_gaus) 62 | 63 | # Concatenate both profiles 64 | z = np.concatenate( 65 | ( 66 | z_gaus[np.where(z_gaus <= 0)], 67 | z_plat, 68 | z_gaus[np.where(z_gaus > 0)] + length, 69 | ) 70 | ) 71 | 72 | z = z - length / 2.0 + zf # shift to final position 73 | 74 | n_part = len(z) 75 | x = x_c + s_x * np.random.standard_normal(n_part) 76 | y = y_c + s_y * np.random.standard_normal(n_part) 77 | 78 | gamma = np.random.normal(gamma0, s_g, n_part) 79 | 80 | s_ux = emit_x / s_x 81 | ux = s_ux * np.random.standard_normal(n_part) 82 | 83 | s_uy = emit_y / s_y 84 | uy = s_uy * np.random.standard_normal(n_part) 85 | 86 | uz = np.sqrt((gamma**2 - 1) - ux**2 - uy**2) 87 | 88 | if tf != 0.0: 89 | x = x - ux * c * tf / gamma 90 | y = y - uy * c * tf / gamma 91 | z = z - uz * c * tf / gamma 92 | 93 | q = np.ones(n_part) * q_tot / n_part 94 | 95 | return x, y, z, ux, uy, uz, q 96 | 97 | 98 | def trapezoidal_bunch( 99 | i0, 100 | i1, 101 | n_part, 102 | gamma0, 103 | s_g, 104 | length, 105 | s_z, 106 | emit_x, 107 | s_x, 108 | emit_y, 109 | s_y, 110 | zf=0.0, 111 | tf=0, 112 | x_c=0.0, 113 | y_c=0.0, 114 | ): 115 | """Create a trapezoidal particle bunch.""" 116 | n_part = int(n_part) 117 | 118 | q_plat = (min(i0, i1) / c) * length 119 | q_triag = ((max(i0, i1) - min(i0, i1)) / c) * length / 2.0 120 | q_gaus0 = (i0 / c) * np.sqrt(2 * np.pi) * s_z / 2.0 121 | q_gaus1 = (i1 / c) * np.sqrt(2 * np.pi) * s_z / 2.0 122 | q_tot = q_plat + q_triag + q_gaus0 + q_gaus1 123 | 124 | n_plat = int(n_part * q_plat / q_tot) 125 | n_triag = int(n_part * q_triag / q_tot) 126 | n_gaus0 = int(n_part * q_gaus0 / q_tot) 127 | n_gaus1 = int(n_part * q_gaus1 / q_tot) 128 | 129 | np.random.seed(42) 130 | z_plat = np.random.uniform(0.0, length, n_plat) 131 | if i0 <= i1: 132 | z_triag = np.random.triangular(0.0, length, length, n_triag) 133 | else: 134 | z_triag = np.random.triangular(0.0, 0.0, length, n_triag) 135 | z_gaus0 = s_z * np.random.standard_normal(2 * n_gaus0) 136 | z_gaus1 = s_z * np.random.standard_normal(2 * n_gaus1) 137 | 138 | z = np.concatenate( 139 | ( 140 | z_gaus0[np.where(z_gaus0 < 0)], 141 | z_plat, 142 | z_triag, 143 | z_gaus1[np.where(z_gaus1 > 0)] + length, 144 | ) 145 | ) 146 | 147 | z = z - length / 2.0 + zf # shift to final position 148 | 149 | n_part = len(z) 150 | x = x_c + s_x * np.random.standard_normal(n_part) 151 | y = y_c + s_y * np.random.standard_normal(n_part) 152 | 153 | gamma = np.random.normal(gamma0, s_g, n_part) 154 | 155 | s_ux = emit_x / s_x 156 | ux = s_ux * np.random.standard_normal(n_part) 157 | 158 | s_uy = emit_y / s_y 159 | uy = s_uy * np.random.standard_normal(n_part) 160 | 161 | uz = np.sqrt((gamma**2 - 1) - ux**2 - uy**2) 162 | 163 | if tf != 0.0: 164 | x = x - ux * c * tf / gamma 165 | y = y - uy * c * tf / gamma 166 | z = z - uz * c * tf / gamma 167 | 168 | q = np.ones(n_part) * q_tot / n_part 169 | 170 | return x, y, z, ux, uy, uz, q 171 | --------------------------------------------------------------------------------