├── LICENSE ├── README.md ├── baxus_cma ├── __init__.py ├── baxus.py ├── benchmark_runner.py ├── benchmarks │ ├── __init__.py │ ├── benchmark_function.py │ ├── benchmark_utils.py │ ├── mopta08 │ │ └── .init │ ├── other_methods.py │ ├── real_world_benchmarks.py │ └── synthetic_benchmark_functions.py ├── embeddedturbo.py ├── gp.py └── util │ ├── __init__.py │ ├── acquisition_function_types.py │ ├── acquisition_functions.py │ ├── behaviors │ ├── __init__.py │ ├── baxus_configuration.py │ ├── embedded_turbo_configuration.py │ ├── embedding_configuration.py │ └── gp_configuration.py │ ├── console_entry_point.py │ ├── data_utils.py │ ├── exceptions.py │ ├── gp_utils.py │ ├── parsing.py │ ├── projections.py │ ├── space_learning │ ├── __init__.py │ └── trust_region.py │ └── utils.py ├── cmabo ├── cma_bo.py ├── gp.py └── utils.py ├── cmabo_general_process.png ├── environment.yml ├── experiments-from-paper.sh ├── test-main.py ├── test_functions ├── LassoBench │ ├── LassoBench.py │ └── __init__.py ├── __init__.py ├── function_realworld_bo │ ├── __init__.py │ ├── bipedal_walker.py │ ├── ebo_core │ │ ├── __init__.py │ │ └── helper.py │ ├── functions_mujoco.py │ ├── functions_realworld_bo.py │ ├── functions_xgboost.py │ ├── hpobench │ │ ├── __init__.py │ │ ├── __version__.py │ │ ├── abstract_benchmark.py │ │ ├── benchmarks │ │ │ ├── __init__.py │ │ │ ├── ml │ │ │ │ ├── README.md │ │ │ │ ├── __init__.py │ │ │ │ ├── histgb_benchmark.py │ │ │ │ ├── lr_benchmark.py │ │ │ │ ├── nn_benchmark.py │ │ │ │ ├── pybnn.py │ │ │ │ ├── rf_benchmark.py │ │ │ │ ├── svm_benchmark.py │ │ │ │ ├── svm_benchmark_old.py │ │ │ │ ├── tabular_benchmark.py │ │ │ │ ├── xgboost_benchmark.py │ │ │ │ └── xgboost_benchmark_old.py │ │ │ ├── nas │ │ │ │ ├── __init__.py │ │ │ │ ├── nasbench_101.py │ │ │ │ ├── nasbench_1shot1.py │ │ │ │ ├── nasbench_201.py │ │ │ │ └── tabular_benchmarks.py │ │ │ ├── od │ │ │ │ ├── __init__.py │ │ │ │ ├── od_ae.py │ │ │ │ ├── od_benchmarks.py │ │ │ │ ├── od_kde.py │ │ │ │ └── od_ocsvm.py │ │ │ ├── rl │ │ │ │ ├── __init__.py │ │ │ │ ├── cartpole.py │ │ │ │ └── learna_benchmark.py │ │ │ └── surrogates │ │ │ │ ├── __init__.py │ │ │ │ ├── paramnet_benchmark.py │ │ │ │ └── svm_benchmark.py │ │ ├── config.py │ │ ├── container │ │ │ ├── __init__.py │ │ │ ├── benchmarks │ │ │ │ ├── __init__.py │ │ │ │ ├── ml │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── histgb_benchmark.py │ │ │ │ │ ├── lr_benchmark.py │ │ │ │ │ ├── nn_benchmark.py │ │ │ │ │ ├── pybnn.py │ │ │ │ │ ├── rf_benchmark.py │ │ │ │ │ ├── svm_benchmark.py │ │ │ │ │ ├── svm_benchmark_old.py │ │ │ │ │ ├── tabular_benchmark.py │ │ │ │ │ ├── xgboost_benchmark.py │ │ │ │ │ └── xgboost_benchmark_old.py │ │ │ │ ├── nas │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── nasbench_101.py │ │ │ │ │ ├── nasbench_1shot1.py │ │ │ │ │ ├── nasbench_201.py │ │ │ │ │ └── tabular_benchmarks.py │ │ │ │ ├── od │ │ │ │ │ └── od_benchmarks.py │ │ │ │ ├── rl │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── cartpole.py │ │ │ │ │ └── learna_benchmark.py │ │ │ │ └── surrogates │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── paramnet_benchmark.py │ │ │ │ │ └── svm_benchmark.py │ │ │ ├── client_abstract_benchmark.py │ │ │ ├── recipes │ │ │ │ ├── Singularity.template │ │ │ │ ├── ml │ │ │ │ │ ├── Singularity.PyBNN │ │ │ │ │ ├── Singularity.SupportVectorMachine │ │ │ │ │ ├── Singularity.XGBoostBenchmark │ │ │ │ │ ├── Singularity.ml_mmfb │ │ │ │ │ └── Singularity.ml_tabular_benchmark │ │ │ │ ├── nas │ │ │ │ │ ├── Singularity.TabularBenchmarks │ │ │ │ │ ├── Singularity.nasbench_101 │ │ │ │ │ ├── Singularity.nasbench_1shot1 │ │ │ │ │ └── Singularity.nasbench_201 │ │ │ │ ├── od │ │ │ │ │ ├── Singularity.ODBenchmarks │ │ │ │ │ └── Singularity.ODKernelDensityEstimation │ │ │ │ ├── rl │ │ │ │ │ ├── Singularity.Cartpole │ │ │ │ │ └── Singularity.learnaBenchmark │ │ │ │ └── surrogates │ │ │ │ │ ├── Singularity.ParamnetBenchmark │ │ │ │ │ └── Singularity.SupportVectorMachine │ │ │ └── server_abstract_benchmark.py │ │ ├── dependencies │ │ │ ├── __init__.py │ │ │ ├── ml │ │ │ │ ├── __init__.py │ │ │ │ ├── data_manager.py │ │ │ │ └── ml_benchmark_template.py │ │ │ └── od │ │ │ │ ├── __init__.py │ │ │ │ ├── backbones │ │ │ │ ├── __init__.py │ │ │ │ └── mlp.py │ │ │ │ ├── callbacks │ │ │ │ ├── __init__.py │ │ │ │ ├── checkpoint_saver.py │ │ │ │ └── earlystopping.py │ │ │ │ ├── data_manager.py │ │ │ │ ├── models │ │ │ │ ├── __init__.py │ │ │ │ └── autoencoder.py │ │ │ │ ├── traditional_benchmark.py │ │ │ │ └── utils │ │ │ │ ├── __init__.py │ │ │ │ ├── activations.py │ │ │ │ └── scaler.py │ │ └── util │ │ │ ├── __init__.py │ │ │ ├── clean_up_script.py │ │ │ ├── container_utils.py │ │ │ ├── data_manager.py │ │ │ ├── dependencies.py │ │ │ ├── example_utils.py │ │ │ ├── openml_data_manager.py │ │ │ └── rng_helper.py │ ├── lunar_lander.py │ ├── mopta08 │ │ └── init │ ├── push_function.py │ ├── push_utils.py │ ├── rover_function.py │ └── rover_utils.py ├── functions_bo.py ├── highdim_functions.py ├── lasso_benchmark.py ├── setup.py └── utils.py └── turbo_cma ├── __init__.py ├── gp.py ├── turbo_1.py └── utils.py /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Lam Ngo et al. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## Overview 2 | 3 | Code for the CMA-Meta-Algorithm for Bayesian Optimization from ***High-dimensional Bayesian Optimization via Covariance Matrix Adaptation Strategy*** in the *Transactions on Machine Learning Research (TMLR)*. The OpenReview page can be found in this [link](https://openreview.net/forum?id=eTgxr7gPuU). 4 | 5 | ![figure of cmabo](cmabo_general_process.png) 6 | 7 | If you find our paper or this repo to be useful for your research, please consider citing: 8 | ``` 9 | @article{ngo2024cmabo, 10 | title={High-dimensional Bayesian Optimization via Covariance Matrix Adaptation Strategy}, 11 | author={Ngo, Lam and Ha, Huong and Chan, Jeffrey and Nguyen, Vu and Zhang, Hongyu}, 12 | journal={Transactions on Machine Learning Research}, 13 | year={2024} 14 | } 15 | ``` 16 | 17 | ## Requirement and dependencies 18 | - Install Anaconda/Miniconda. 19 | - Install all dependencies listed in ```environment.yml``` file, or run the following to install via conda: 20 | ``` 21 | conda env create -f environment.yml 22 | ``` 23 | - If you want to run Lasso Benchmark, install the LassoBench library by: 24 | ``` 25 | cd test_functions/ 26 | pip install -e . 27 | ``` 28 | ## Getting started 29 | The main file is ```test-main.py```. To run the code for any problem, use the following syntax: 30 | ``` 31 | python test-main.py --solver -f -d -n --seed 32 | ``` 33 | Available options for arguments: 34 | - ```optimizer_name```: bo, turbo, baxus 35 | - ```function_name```: alpine, levy, ellipsoid, rastrigin, shifted-alpine, shifted-levy, ellipsoid, rastrigin, branin500, schaffer100, lasso-dna, rover100, half-cheetah 36 | - ```input_dim```: an integer (only applicable for alpine, levy, ellipsoid, rastrigin, shifted-alpine, shifted-levy) 37 | - ```number_of_iterations```: an integer to describe to iteration budget. 38 | - ```seeding```: (optional) the random seed for random behavior of ```random```, ```numpy```, and ```torch```. 39 | 40 | ## Results from paper 41 | We provide the settings for reproducing results presented in the paper ```experiments-from-paper.sh```. 42 | 43 | # Acknowledgements 44 | 45 | This implementation uses materials from the following public repositories to implement the CMA local regions and the incorporated BO optimizers. We thank the respective repository maintainers. 46 | 1. CMA-ES: Hansen, N., & Ostermeier, A. (2001). Completely derandomized self-adaptation in evolution strategies. Evolutionary computation, 9(2), 159-195. 47 | Code repo: https://github.com/CMA-ES/pycma 48 | 2. TuRBO: Eriksson, D., Pearce, M., Gardner, J. R., Turner, R., & Poloczek, M. (2019). Scalable global optimization via local Bayesian optimization. Advances in Neural Information Processing Systems, 32 (NeurIPS). 49 | Code repo: https://github.com/uber-research/TuRBO/ 50 | 3. BAxUS: Papenmeier, L., Nardi, L., & Poloczek, M. (2022). Increasing the scope as you learn: Adaptive Bayesian optimization in nested subspaces. Advances in Neural Information Processing Systems, 35, (NeurIPS). 51 | Code repo: https://github.com/LeoIV/BAxUS 52 | 53 | -------------------------------------------------------------------------------- /baxus_cma/__init__.py: -------------------------------------------------------------------------------- 1 | from .embeddedturbo import EmbeddedTuRBO 2 | from .baxus import BAxUS 3 | -------------------------------------------------------------------------------- /baxus_cma/benchmarks/__init__.py: -------------------------------------------------------------------------------- 1 | from .benchmark_function import Benchmark, BoTorchFunctionBenchmark, EffectiveDimBoTorchBenchmark, SyntheticBenchmark, \ 2 | EffectiveDimBenchmark, SyntheticTestFunction 3 | from .benchmark_utils import run_and_plot 4 | from .other_methods import OptimizationMethod, RandomSearch 5 | 6 | from .real_world_benchmarks import SVMBenchmark, LassoHighBenchmark, LassoHardBenchmark, LassoDiabetesBenchmark, \ 7 | LassoLeukemiaBenchmark, LassoMediumBenchmark, LassoSimpleBenchmark, LassoDNABenchmark, LassoRCV1Benchmark, \ 8 | LassoBreastCancerBenchmark, MoptaSoftConstraints 9 | 10 | from .synthetic_benchmark_functions import BraninEffectiveDim, RosenbrockEffectiveDim, MichalewiczEffectiveDim, \ 11 | HartmannEffectiveDim, LevyEffectiveDim, AckleyEffectiveDim, GriewankEffectiveDim, RastriginEffectiveDim, \ 12 | DixonPriceEffectiveDim, RotatedHartmann6, ShiftedAckley10 13 | -------------------------------------------------------------------------------- /baxus_cma/benchmarks/benchmark_utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | from copy import deepcopy, copy 3 | from logging import warning, info 4 | from typing import List 5 | 6 | import numpy as np 7 | import pandas as pd 8 | 9 | 10 | MAX_RETRIES = 1 11 | 12 | 13 | def run_and_plot( 14 | m: "baxus.benchmarks.OptimizationMethod", 15 | repetitions: List[int], 16 | directory: str, 17 | ) -> None: 18 | """ 19 | Run an experiment for a certain number of repetitions and save the results 20 | 21 | Args: 22 | m: the experiment to runm_x 23 | repetitions: the repetitions to run 24 | directory: the directory to save the results 25 | 26 | Returns: 27 | None 28 | """ 29 | os.makedirs(directory, exist_ok=True) 30 | 31 | base_run_dir = copy(m.run_dir) 32 | 33 | for rep in repetitions: 34 | out_path = os.path.join(directory, f"repet_{rep}.csv.xz") 35 | rep_run_dir = os.path.join(base_run_dir, f"repetition_{rep}") 36 | os.makedirs(rep_run_dir, exist_ok=True) 37 | m.run_dir = rep_run_dir 38 | 39 | if os.path.exists(out_path): 40 | continue 41 | info(f"starting repetition {rep}") 42 | for mr in range(MAX_RETRIES): 43 | try: 44 | _m = deepcopy(m) 45 | _m.reset() 46 | _m.optimize() 47 | break 48 | except Exception as e: 49 | if mr == MAX_RETRIES - 1: 50 | raise e 51 | warning(f"Optimization failed. Retrying... ({mr + 1}/{MAX_RETRIES})") 52 | m_x, m_y_raw = _m.optimization_results_raw() 53 | _, m_y = _m.optimization_results_incumbent() 54 | m_y_raw = np.expand_dims(m_y_raw, axis=1) 55 | if m_x is not None: 56 | columns = [f"x{i}" for i in range(m_x.shape[1])] + ["y_raw"] 57 | r_df = pd.DataFrame(np.concatenate((m_x, m_y_raw), axis=1), columns=columns) 58 | else: 59 | columns = [["y_raw"]] 60 | r_df = pd.DataFrame(m_y_raw, columns=columns) 61 | r_df.to_csv(out_path) 62 | del r_df 63 | del m_y_raw 64 | -------------------------------------------------------------------------------- /baxus_cma/benchmarks/mopta08/.init: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LamNgo1/cma-meta-algorithm/9aa76dc82f09f4ed5ead6a0cade40645f8850f8e/baxus_cma/benchmarks/mopta08/.init -------------------------------------------------------------------------------- /baxus_cma/benchmarks/other_methods.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | from abc import ABC, abstractmethod 4 | from logging import warning 5 | from typing import Tuple, Optional, Dict, Any 6 | 7 | import numpy as np 8 | 9 | from baxus_cma.benchmarks import Benchmark 10 | 11 | 12 | class OptimizationMethod(ABC): 13 | def __init__( 14 | self, 15 | run_dir: str, 16 | conf_name: Optional[str] = None, 17 | ): 18 | """ 19 | Abstract base class for a generic optimization method. 20 | 21 | Args: 22 | run_dir: the directory to store results in 23 | conf_name: the algorithm configuration to save to disk 24 | """ 25 | if not os.path.exists(run_dir): 26 | os.makedirs(run_dir, exist_ok=True) 27 | if conf_name is not None: 28 | with open(os.path.join(run_dir, "conf_name.txt"), "w+") as f: 29 | f.write(conf_name) 30 | with open(os.path.join(run_dir, "conf_dict.json"), "w+") as f: 31 | json.dump(self.conf_dict, f) 32 | 33 | self._optimized = False 34 | self.run_dir = run_dir 35 | 36 | @abstractmethod 37 | def optimize(self) -> None: 38 | """ 39 | Start the optimization. 40 | 41 | Returns: None 42 | 43 | """ 44 | raise NotImplementedError() 45 | 46 | @abstractmethod 47 | def optimization_results_raw( 48 | self, 49 | ) -> Tuple[Optional[np.ndarray], np.ndarray]: 50 | """ 51 | Get the raw optimization results, i.e., the x-values, the true function values, and the additional 52 | run information. 53 | 54 | Returns: 55 | tuple[X's, y's, additional_run_information] 56 | """ 57 | raise NotImplementedError() 58 | 59 | def reset(self) -> None: 60 | warning("No reset implemented.") 61 | pass 62 | 63 | @property 64 | def conf_dict(self) -> Dict[str, Any]: 65 | return {} 66 | 67 | def optimization_results_incumbent(self) -> Tuple[np.ndarray, np.ndarray]: 68 | """ 69 | Get the incumbent optimization results, i.e., optimization results such that y_2 is always less or equal to 70 | y_1. 71 | 72 | Returns: 73 | np.ndarray: the x-values 74 | np.ndarray: the incumbent y-values 75 | 76 | """ 77 | assert self._optimized, "Model hasn't been optimized yet" 78 | ( 79 | Xs, 80 | ys, 81 | ) = self.optimization_results_raw() 82 | assert ys.ndim == 1 83 | ys_incumbent = np.minimum.accumulate(ys) 84 | return Xs, ys_incumbent 85 | 86 | 87 | class RandomSearch(OptimizationMethod): 88 | def __init__( 89 | self, 90 | function: Benchmark, 91 | input_dim: int, 92 | max_evals: int, 93 | run_dir: str, 94 | lower_bounds: np.ndarray, 95 | upper_bounds: np.ndarray): 96 | """ 97 | Simple random search implementation, samples points uniformly at random in the search space. 98 | 99 | Args: 100 | function: the function to optimize 101 | input_dim: the dimensionality of the problem 102 | max_evals: maximum number of function evaluations 103 | run_dir: the directory to save results to 104 | lower_bounds: the lower bound of the search space 105 | upper_bounds: the upper_bound of the search space 106 | """ 107 | super().__init__(run_dir) 108 | 109 | self.run_dir = run_dir 110 | 111 | lower_bounds = np.array(lower_bounds, dtype=np.float32) 112 | upper_bounds = np.array(upper_bounds, dtype=np.float32) 113 | 114 | assert type(max_evals) == int 115 | assert type(input_dim) == int 116 | assert len(lower_bounds) == len(upper_bounds) 117 | assert len(lower_bounds) == input_dim 118 | 119 | self.function = function 120 | self.max_evals = max_evals 121 | self.input_dim = input_dim 122 | self.lower_bounds = lower_bounds 123 | self.upper_bounds = upper_bounds 124 | 125 | def optimize(self) -> None: 126 | """ 127 | Run the optimization. 128 | 129 | Returns: None 130 | 131 | """ 132 | assert not self._optimized 133 | 134 | points = np.random.uniform(self.lower_bounds, self.upper_bounds, (self.max_evals, self.input_dim)) 135 | try: 136 | ys = np.array(self.function(points)) 137 | except: 138 | warning("Could not run function on all points at once even though" 139 | " the function should support this.") 140 | ys = np.array([self.function(y) for y in points]) 141 | 142 | self.ys = ys 143 | self._optimized = True 144 | 145 | def optimization_results_raw( 146 | self, 147 | ) -> Tuple[Optional[np.ndarray], np.ndarray]: 148 | assert self._optimized, "Model hasn't been optimized yet" 149 | return None, self.ys 150 | -------------------------------------------------------------------------------- /baxus_cma/util/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LamNgo1/cma-meta-algorithm/9aa76dc82f09f4ed5ead6a0cade40645f8850f8e/baxus_cma/util/__init__.py -------------------------------------------------------------------------------- /baxus_cma/util/acquisition_function_types.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | 3 | 4 | class AcquisitionFunctionType(Enum): 5 | EXPECTED_IMPROVEMENT = 1 6 | """ 7 | Expected improvement acquisition function. 8 | """ 9 | THOMPSON_SAMPLING = 2 10 | """ 11 | Thompson sampling acquisition function. 12 | """ 13 | -------------------------------------------------------------------------------- /baxus_cma/util/acquisition_functions.py: -------------------------------------------------------------------------------- 1 | import math 2 | from typing import Union 3 | 4 | import gpytorch 5 | import numpy as np 6 | import torch 7 | from botorch.acquisition import ExpectedImprovement as _EI 8 | from botorch.optim import optimize_acqf 9 | 10 | from baxus_cma.gp import GP 11 | 12 | 13 | class ExpectedImprovement: 14 | def __init__(self, gp: GP, best_f: Union[float, np.ndarray], lb: np.ndarray, ub: np.ndarray, 15 | evaluation_batch_size: int = 100, ): 16 | self.ub = ub 17 | self.lb = lb 18 | self.evaluation_batch_size = evaluation_batch_size 19 | self.best_f = best_f 20 | self.gp = gp 21 | self._EI = _EI(model=self.gp, best_f=self.best_f) 22 | 23 | def __call__(self, X: np.ndarray): 24 | 25 | def _ei(X): 26 | X = np.expand_dims(X, 1) 27 | return torch.unsqueeze(self._EI(torch.unsqueeze(torch.tensor(X), 1)), 1).detach().numpy() 28 | 29 | if X.ndim == 1: 30 | X = X[np.newaxis, :] 31 | if len(X) > 100: 32 | # batched version 33 | Xs = np.split(X, math.ceil(len(X) / self.evaluation_batch_size)) 34 | eis = [_ei(_X) for _X in Xs] 35 | result = np.concatenate(eis) 36 | else: 37 | result = _ei(X) 38 | return result 39 | 40 | def optimize(self): 41 | with gpytorch.settings.max_cholesky_size(2000): 42 | X_cand, y_cand = optimize_acqf( 43 | acq_function=self._EI, 44 | bounds=torch.tensor([self.lb.reshape(-1), self.ub.reshape(-1)]), 45 | q=1, 46 | num_restarts=20, 47 | raw_samples=100, 48 | options={}, 49 | ) 50 | return X_cand.detach().numpy(), y_cand.detach().numpy() 51 | -------------------------------------------------------------------------------- /baxus_cma/util/behaviors/__init__.py: -------------------------------------------------------------------------------- 1 | from baxus_cma.util.behaviors.baxus_configuration import ( 2 | BaxusBehavior, 3 | ) 4 | from baxus_cma.util.behaviors.embedded_turbo_configuration import ( 5 | EmbeddedTuRBOBehavior, 6 | ) # noqa 7 | -------------------------------------------------------------------------------- /baxus_cma/util/behaviors/baxus_configuration.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from typing import Any, Dict 3 | 4 | from baxus_cma.util.behaviors.embedded_turbo_configuration import EmbeddedTuRBOBehavior 5 | 6 | 7 | @dataclass 8 | class BaxusBehavior(EmbeddedTuRBOBehavior): 9 | """ 10 | The behavior of the BAxUS algorithm. 11 | 12 | """ 13 | 14 | n_new_bins: int = 3 15 | """ 16 | Number of new bins after a splitting. Default: 3 17 | 18 | """ 19 | 20 | budget_until_input_dim: int = 0 21 | """ 22 | The budget after which we have reached the input dimension under the assumption that we always fail. 23 | If zero: use the entire evaluation budget. 24 | """ 25 | 26 | adjust_initial_target_dim: bool = True 27 | """ 28 | Whether to adjust the initial target dim such that the final split is as close to the ambient dim as possible. 29 | """ 30 | 31 | def __str__(self): 32 | return ( 33 | f"{super().__str__()}" 34 | f"_nbos_{self.n_new_bins}" 35 | f"_aitd_{self.adjust_initial_target_dim}" 36 | f"_buad_{self.budget_until_input_dim}" 37 | ) 38 | 39 | @property 40 | def conf_dict(self) -> Dict[str, Any]: 41 | """ 42 | The configuration as a dictionary. 43 | 44 | Returns: The configuration as a dictionary. 45 | 46 | """ 47 | base_class_dict = super().conf_dict 48 | this_dict = { 49 | "number of new bins per dimension": self.n_new_bins, 50 | "adjust initial target dimension": self.adjust_initial_target_dim, 51 | "budget until input dimension": self.budget_until_input_dim, 52 | } 53 | return {**base_class_dict, **this_dict} 54 | -------------------------------------------------------------------------------- /baxus_cma/util/behaviors/embedded_turbo_configuration.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from typing import Any, Dict 3 | 4 | from baxus_cma.util.acquisition_function_types import AcquisitionFunctionType 5 | from baxus_cma.util.behaviors.embedding_configuration import EmbeddingType 6 | 7 | 8 | @dataclass 9 | class EmbeddedTuRBOBehavior: 10 | """ 11 | The behavior of the embedded TuRBO algorithm 12 | 13 | """ 14 | 15 | initial_base_length: float = 0.8 16 | """ 17 | The initial base side length (see TuRBO paper) 18 | 19 | """ 20 | max_base_length: float = 1.6 21 | """ 22 | The maximum base side length (see TuRBO paper) 23 | 24 | """ 25 | min_base_length: float = 0.5 ** 7 26 | """ 27 | The minimum base side length (see TuRBO paper). If you get lower than this, the trust region dies out. 28 | 29 | """ 30 | success_tolerance: int = 3 31 | """ 32 | The number of times we consecutively have to find a better point in order to expand the trust region, initial value 33 | 34 | """ 35 | acquisition_function: AcquisitionFunctionType = AcquisitionFunctionType.THOMPSON_SAMPLING 36 | """ 37 | The different acquisition functions to use in a multi-batch setting (default: only Thompson sampling) 38 | 39 | """ 40 | noise: float = 0. 41 | """ 42 | The noise of the problem. 43 | """ 44 | 45 | embedding_type: EmbeddingType = EmbeddingType.BAXUS 46 | """ 47 | Uniform bin sizing means that all target bins have approx. equally many contributing input dimensions. 48 | Random bin sizing means that a random target dimension is chosen for each input dimension (standard HeSBO 49 | behavior). 50 | """ 51 | 52 | success_decision_factor: float = 0.001 53 | """ 54 | The difference wrt to the current incumbent solution required for a next point to be considered a success. 55 | 56 | """ 57 | 58 | def __str__(self): 59 | return ( 60 | f"_linit_{self.initial_base_length}" 61 | f"_lmax_{self.max_base_length}" 62 | f"_lmin_{self.min_base_length}" 63 | f"_successtol_{self.success_tolerance}" 64 | f"_acq_{self.acquisition_function.name}" 65 | f"_noise_{self.noise}" 66 | f"_et_{self.embedding_type.name}" 67 | f"_sdf_{self.success_decision_factor}" 68 | ) 69 | 70 | @property 71 | def conf_dict(self) -> Dict[str, Any]: 72 | """ 73 | The configuration as a dictionary. 74 | 75 | Returns: The configuration as a dictionary. 76 | 77 | """ 78 | return { 79 | "initial base length": self.initial_base_length, 80 | "maximum base length": self.max_base_length, 81 | "minimum base length": self.min_base_length, 82 | "success tolerance": self.success_tolerance, 83 | "acquisition_functions": self.acquisition_function.name, 84 | "observation noise": self.noise, 85 | "embedding type": self.embedding_type.name, 86 | "success decision factor": self.success_decision_factor, 87 | } 88 | 89 | def pretty_print(self) -> str: 90 | """ 91 | A nice string of the configuration. 92 | 93 | Returns: A nice string of the configuration. 94 | 95 | """ 96 | pstring = "" 97 | for k, v in self.conf_dict.items(): 98 | pstring += f"\t-{k}: {v}\n" 99 | return pstring 100 | -------------------------------------------------------------------------------- /baxus_cma/util/behaviors/embedding_configuration.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | 3 | 4 | class EmbeddingType(Enum): 5 | BAXUS = 0 6 | """ 7 | BAxUS embedding where each target bin has approx. the same number of contributing input dimensions. 8 | """ 9 | HESBO = 1 10 | """ 11 | HeSBO embedding where a target dimension is sampled for each input dimension. 12 | """ 13 | -------------------------------------------------------------------------------- /baxus_cma/util/behaviors/gp_configuration.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from enum import Enum 3 | 4 | 5 | class MLLEstimation(Enum): 6 | MULTI_START_GRADIENT_DESCENT = 1 7 | """ 8 | Sample a number of points and start gradient-based optimization on every point. 9 | """ 10 | LHS_PICK_BEST_START_GD = 2 11 | """ 12 | Sample a number of points and start gradient-based optimization on the best initial points. 13 | """ 14 | 15 | 16 | @dataclass 17 | class GPBehaviour: 18 | mll_estimation: MLLEstimation = MLLEstimation.LHS_PICK_BEST_START_GD 19 | """ 20 | The maximum-likelihood-estimation method. 21 | """ 22 | n_initial_samples: int = 50 23 | """ 24 | The initial samples. 25 | """ 26 | n_best_on_lhs_selection: int = 5 27 | """ 28 | The number of best samples on which to start the gradient-based optimizer. 29 | """ 30 | n_mle_training_steps: int = 50 31 | """ 32 | The number of gradient updates. 33 | """ 34 | 35 | def __str__(self): 36 | return ( 37 | f"mle_{self.mll_estimation.name}_n_init_mle_{self.n_initial_samples}_" 38 | f"n_best_lhs_{self.n_best_on_lhs_selection}_mle_steps_{self.n_mle_training_steps}" 39 | ) 40 | -------------------------------------------------------------------------------- /baxus_cma/util/console_entry_point.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | from baxus_cma.benchmark_runner import main 4 | 5 | 6 | def bench(): 7 | main(sys.argv[1:]) 8 | -------------------------------------------------------------------------------- /baxus_cma/util/data_utils.py: -------------------------------------------------------------------------------- 1 | from copy import deepcopy 2 | from typing import Dict, Sequence 3 | 4 | import numpy as np 5 | 6 | 7 | def join_data(X: np.ndarray, dims_and_bins: Dict[int, int]) -> np.ndarray: 8 | """ 9 | After splitting, copy the data from the splitting dim(s) into the new dim(s) 10 | 11 | Args: 12 | X (np.ndarray): the x-values before the splitting 13 | dims_and_bins (Dict[int, int]): the splitting: dims and number of bins. Be warned that we assume an ordered dict 14 | which we are allowed to in newer Python versions. 15 | 16 | Returns: the x-values after splitting 17 | 18 | """ 19 | X = deepcopy(X) 20 | for dim, bin in dims_and_bins.items(): 21 | data_row = X[:, dim] 22 | X = np.hstack((X, np.tile(data_row, bin - 1).reshape(-1, (len(data_row))).T)) 23 | return X 24 | 25 | 26 | def right_pad_sequence(sequence: Sequence[np.ndarray], dtype=np.float64, fill_value: float = 0.0) -> np.ndarray: 27 | """ 28 | Pads a sequence of 1D NumPy arrays to the same length. 29 | 30 | Args: 31 | sequence: sequence of 1D NumPy arrays 32 | dtype: the dtype of the result matrix 33 | fill_value: the value for the padding 34 | 35 | Returns: a matrix of shape (len(sequence), max_sequence_length) where all rows are filled up with fill_value on the right 36 | 37 | """ 38 | max_len = max(len(s) for s in sequence) 39 | padded_matrix = np.full(shape=(len(sequence), max_len), dtype=dtype, fill_value=fill_value) 40 | for i, seq in enumerate(sequence): 41 | assert seq.ndim == 1, "Only 1D arrays are supported" 42 | padded_matrix[i, 0:len(seq)] = seq 43 | return padded_matrix 44 | -------------------------------------------------------------------------------- /baxus_cma/util/exceptions.py: -------------------------------------------------------------------------------- 1 | class ArgumentError(Exception): 2 | """ 3 | An exception for an illegal input argmument. 4 | """ 5 | pass 6 | 7 | 8 | class EffectiveDimTooLargeException(Exception): 9 | """ 10 | When the effective dimensionality is too large (for example when larger than the input dimensionality). 11 | """ 12 | pass 13 | 14 | 15 | class OutOfBoundsException(Exception): 16 | """ 17 | When a point falls outside the search space. 18 | """ 19 | pass 20 | 21 | 22 | class BoundsMismatchException(Exception): 23 | """ 24 | When the search space bounds don't have the same length. 25 | """ 26 | pass 27 | 28 | 29 | class UnknownBehaviorError(Exception): 30 | pass 31 | -------------------------------------------------------------------------------- /baxus_cma/util/gp_utils.py: -------------------------------------------------------------------------------- 1 | from copy import deepcopy 2 | from typing import Dict, Callable, Tuple, List, Optional, OrderedDict 3 | 4 | import numpy as np 5 | import torch 6 | from gpytorch import ExactMarginalLogLikelihood 7 | from scipy.stats import qmc 8 | from torch import Tensor 9 | 10 | from baxus_cma.util.utils import from_unit_cube 11 | 12 | 13 | def pick_best_from_configurations( 14 | initializers: List[Callable[["baxus.gp.GP"], None]], 15 | model: "baxus.gp.GP", 16 | train_x: torch.Tensor, 17 | train_y: torch.Tensor, 18 | n_best: Optional[int] = 1, 19 | ) -> List[Callable[["baxus.gp.GP"], None]]: 20 | """ 21 | Pick the n_best best performing initializers from a list of initializers based on a GP and a MLL 22 | Args: 23 | initializers: list of initializers, sets GP hyperparameters 24 | model: the GP model 25 | train_x: the data to evaluate the model likelihood on 26 | train_y: the data to evaluate the model likelihood on 27 | n_best: number of best performing initializers to choose 28 | 29 | Returns: list of initializer functions 30 | 31 | """ 32 | assert n_best <= len(initializers), "At most as many best as we have initializers" 33 | # avoid side effects 34 | model = deepcopy(model) 35 | 36 | losses = [] 37 | for i, initializer in enumerate(initializers): 38 | initializer(model) 39 | model.train() 40 | model.likelihood.train() 41 | mll = ExactMarginalLogLikelihood(model.likelihood, model) 42 | output = model(train_x) 43 | loss = -mll(output, train_y).cpu().detach().numpy() 44 | losses.append(loss) 45 | return np.array(initializers)[np.argsort(losses)[:n_best]].tolist() 46 | 47 | 48 | def mle_optimization( 49 | initializer: Callable[["baxus.gp.GP"], None], 50 | model: "baxus.gp.GP", 51 | num_steps: int, 52 | train_x: torch.Tensor, 53 | train_y: torch.Tensor, 54 | ) -> Tuple[OrderedDict[str, Tensor], float]: 55 | """ 56 | Optimize likelihood of a model with an initializer. 57 | :param initializer: the model initializer 58 | :param model: the GP model 59 | :param num_steps: number gradient descent steps 60 | :param kernel_type: the kernel type of the GP model 61 | :param train_x: the training data 62 | :param train_y: the training data 63 | :param mll: the model likelihood 64 | :return: state dict and the average loss 65 | """ 66 | # avoid side effects 67 | model = deepcopy(model) 68 | initializer(model) 69 | model.train() 70 | model.likelihood.train() 71 | mll = ExactMarginalLogLikelihood(model.likelihood, model) # TODO 72 | 73 | optimizer = torch.optim.Adam([{"params": model.parameters()}], lr=0.1) 74 | 75 | # only use half of the optimizer steps if kplsk kernel 76 | cum_loss = 0 77 | for _ in range( 78 | num_steps 79 | ): 80 | optimizer.zero_grad() 81 | output = model(train_x) 82 | loss = -mll(output, train_y) 83 | cum_loss += loss 84 | loss.backward() 85 | optimizer.step() 86 | return deepcopy(model.state_dict()), cum_loss / num_steps if num_steps > 0 else 0 87 | 88 | 89 | def initializer_factory( 90 | hyperparameter_configuration: Dict[str, float] 91 | ) -> Callable[["turbo.gp.GP"], None]: 92 | """ 93 | Take a hyperparameter configuration and return a lambda initializing a model with this configuration 94 | :param hyperparameter_configuration: the hyperparameter configuration 95 | :return: callabe, defined in GPyTorch model 96 | """ 97 | return lambda m: m.initialize(**hyperparameter_configuration) 98 | 99 | 100 | def latin_hypercube_hp_grid( 101 | hyperparameter_grid: Dict[str, Tuple[float, float, float]], n_samples: int 102 | ) -> Dict[str, np.ndarray]: 103 | """ 104 | Draw samples from latin hypercube from hyperparameter grid. Default configuration will always be the first configuration. 105 | :param hyperparameter_grid: dictionary, key: hyperparameter name, value: Tuple[lower_bound, upper_bound, default value] 106 | :param n_samples: number of samples to return, if 1 return default values 107 | :return: dictionary, key: hyperparameter name, value: np.ndarray of sample values (shape: (n_samples, 1)) 108 | """ 109 | return_configs = {} 110 | for k, v in hyperparameter_grid.items(): 111 | return_configs[k] = np.array([v[2]]) 112 | # if only one sample, return the default value 113 | if n_samples == 1: 114 | return return_configs 115 | hp_grid = deepcopy(hyperparameter_grid) 116 | keys = [] 117 | bounds = np.empty((0, 2)) 118 | for k, v in hp_grid.items(): 119 | bounds = np.vstack((bounds, v[:2])) 120 | keys.append(k) 121 | d = len(keys) 122 | sampler = qmc.LatinHypercube(d=d, seed=np.random.randint(1e6)) 123 | sample = sampler.random(n=n_samples - 1) 124 | samples = from_unit_cube(sample, bounds[:, 0], bounds[:, 1]) 125 | for i, k in enumerate(keys): 126 | return_configs[k] = np.hstack((return_configs[k], samples[:, i])) 127 | return return_configs 128 | -------------------------------------------------------------------------------- /baxus_cma/util/space_learning/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LamNgo1/cma-meta-algorithm/9aa76dc82f09f4ed5ead6a0cade40645f8850f8e/baxus_cma/util/space_learning/__init__.py -------------------------------------------------------------------------------- /baxus_cma/util/space_learning/trust_region.py: -------------------------------------------------------------------------------- 1 | from logging import debug 2 | from typing import Tuple 3 | 4 | import numpy as np 5 | from torch.quasirandom import SobolEngine 6 | 7 | 8 | def create_Xcand( 9 | x_center: np.ndarray, 10 | weights: np.ndarray, 11 | length: float, 12 | dim: int, 13 | n_cand: int, 14 | dtype, 15 | device, 16 | ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: 17 | """ 18 | 19 | :param x_center: the TR center 20 | :param weights: the weights of the dims 21 | :param length: baselength 22 | :param dim: the target dim 23 | :param n_cand: number of candidates 24 | :param dtype: the data type 25 | :param device: the device 26 | :return: triple, X_cand, lb, ub 27 | """ 28 | debug(f"creating {n_cand} candidates") 29 | lb = np.clip(x_center - weights * length, -1.0, 1.0) 30 | ub = np.clip(x_center + weights * length, -1.0, 1.0) 31 | 32 | # Draw a Sobolev sequence in [lb, ub] 33 | seed = np.random.randint(int(1e6)) 34 | sobol = SobolEngine(dim, scramble=True, seed=seed) 35 | pert = sobol.draw(n_cand).to(dtype=dtype, device=device).cpu().detach().numpy() 36 | pert = lb + (ub - lb) * pert 37 | 38 | # Create a perturbation mask 39 | prob_perturb = min(20.0 / dim, 1.0) 40 | mask = np.random.rand(n_cand, dim) <= prob_perturb 41 | ind = np.where(np.sum(mask, axis=1) == 0)[0] 42 | mask[ind, np.random.randint(0, dim - 1, size=len(ind))] = 1 43 | 44 | # Create candidate points 45 | X_cand = x_center.copy() * np.ones((n_cand, dim)) 46 | X_cand[mask] = pert[mask] 47 | return X_cand, lb, ub 48 | -------------------------------------------------------------------------------- /baxus_cma/util/utils.py: -------------------------------------------------------------------------------- 1 | ############################################################################### 2 | # Copyright (c) 2019 Uber Technologies, Inc. # 3 | # # 4 | # Licensed under the Uber Non-Commercial License (the "License"); # 5 | # you may not use this file except in compliance with the License. # 6 | # You may obtain a copy of the License at the root directory of this project. # 7 | # # 8 | # See the License for the specific language governing permissions and # 9 | # limitations under the License. # 10 | ############################################################################### 11 | 12 | # Derived from the TuRBO implementation (https://github.com/uber-research/TuRBO) 13 | # Author: anonymous 14 | 15 | import argparse 16 | from logging import warning 17 | 18 | try: 19 | from collections.abc import Iterator 20 | except ImportError as e: 21 | warning("Failed to import Iterator from collections.abc. Python < 3.10 won't be supported in the future.") 22 | from collections import Iterator 23 | 24 | try: 25 | from reprlib import repr 26 | except ImportError: 27 | pass 28 | import functools 29 | 30 | import numpy as np 31 | # import seaborn as sns 32 | 33 | 34 | def to_unit_cube(x: np.ndarray, lower_bounds: np.ndarray, upper_bounds: np.ndarray) -> np.ndarray: 35 | """ 36 | Project to [0, 1]^d from hypercube with bounds lb and ub 37 | 38 | Args: 39 | x: the points to scale 40 | lower_bounds: the lower bounds in the unscaled space 41 | upper_bounds: the upper bounds un the unscaled space 42 | 43 | Returns: scaled points 44 | 45 | """ 46 | assert lower_bounds.ndim == 1 and upper_bounds.ndim == 1 and x.ndim == 2 47 | xx = (x - lower_bounds) / (upper_bounds - lower_bounds) 48 | return xx 49 | 50 | 51 | def to_1_around_origin(x: np.ndarray, lower_bounds: np.ndarray, upper_bounds: np.ndarray) -> np.ndarray: 52 | """ 53 | Project to [-1, 1]^d from hypercube with bounds lb and ub 54 | 55 | Args: 56 | x: the points to scale 57 | lower_bounds: the lower bounds in the unscaled space 58 | upper_bounds: the upper bounds un the unscaled space 59 | 60 | Returns: the scaled points. 61 | 62 | """ 63 | assert lower_bounds.ndim == 1 and upper_bounds.ndim == 1 and x.ndim == 2 64 | x = to_unit_cube(x, lower_bounds, upper_bounds) 65 | xx = x * 2 - 1 66 | return xx 67 | 68 | 69 | def from_unit_cube(x: np.ndarray, lower_bounds: np.ndarray, upper_bounds: np.ndarray) -> np.ndarray: 70 | """ 71 | Project points that were scaled to unit cube back to full space. 72 | 73 | Args: 74 | x: the points 75 | lower_bounds: the lower bounds of the full space 76 | upper_bounds: the upper bounds of the full space 77 | 78 | Returns: scaled points 79 | 80 | """ 81 | assert lower_bounds.ndim == 1 and upper_bounds.ndim == 1 and x.ndim == 2 82 | xx = x * (upper_bounds - lower_bounds) + lower_bounds 83 | return xx 84 | 85 | 86 | def from_1_around_origin(x: np.ndarray, lower_bounds: np.ndarray, upper_bounds: np.ndarray) -> np.ndarray: 87 | """ 88 | Project points that were scaled to one-around-origin cube back to full space. 89 | 90 | Args: 91 | x: the points 92 | lower_bounds: the lower bounds of the full space 93 | upper_bounds: the upper bounds of the full space 94 | 95 | Returns: scaled points 96 | 97 | """ 98 | xx = (x + 1) / 2 99 | return from_unit_cube(xx, lower_bounds, upper_bounds) 100 | 101 | 102 | def one_around_origin_latin_hypercube(n_pts: int, dim: int) -> np.ndarray: 103 | """ 104 | Basic Latin hypercube implementation with center perturbation in a one-around-origin cube. 105 | 106 | Args: 107 | n_pts: number of points to sample 108 | dim: dimensionality of the space 109 | 110 | Returns: the LHS points 111 | 112 | """ 113 | X = latin_hypercube(n_pts=n_pts, dim=dim) 114 | return X * 2 - 1 115 | 116 | 117 | def latin_hypercube(n_pts: int, dim: int) -> np.ndarray: 118 | """ 119 | Basic Latin hypercube implementation with center perturbation. 120 | 121 | Args: 122 | n_pts: number of points to sample 123 | dim: dimensionality of the space 124 | 125 | Returns: the LHS points 126 | 127 | """ 128 | X = np.zeros((n_pts, dim)) 129 | centers = (1.0 + 2.0 * np.arange(0.0, n_pts)) / float(2 * n_pts) 130 | for i in range(dim): # Shuffle the center locations for each dimension. 131 | X[:, i] = centers[np.random.permutation(n_pts)] 132 | 133 | # Add some perturbations within each box 134 | pert = np.random.uniform(-1.0, 1.0, (n_pts, dim)) / float(2 * n_pts) 135 | X += pert 136 | return X 137 | 138 | 139 | def str2bool(value: str) -> bool: 140 | """ 141 | Parse string to boolean or throw error if string has no boolean type. 142 | 143 | Args: 144 | value: the string to parse 145 | 146 | Returns: True, if string is truthy, false if string is falsy 147 | 148 | """ 149 | if isinstance(value, bool): 150 | return value 151 | if value.lower() in ("yes", "true", "t", "y", "1"): 152 | return True 153 | elif value.lower() in ("no", "false", "f", "n", "0"): 154 | return False 155 | else: 156 | raise argparse.ArgumentTypeError("Boolean value expected.") 157 | 158 | 159 | # class ColorIterator(Iterator): 160 | # """ 161 | # A color iterator 162 | # """ 163 | # colors = sns.color_palette("husl", 23) 164 | 165 | # def __init__(self): 166 | # self.cc = 0 167 | 168 | # def __iter__(self): 169 | # return self 170 | 171 | # def __next__(self): 172 | # c = self.colors[self.cc % 23] 173 | # self.cc += 1 174 | # return c 175 | 176 | 177 | def in_range(x: np.ndarray, incumbent: np.ndarray, lb: np.ndarray, ub: np.ndarray): 178 | """ 179 | Whether the point x is within the range of the next slower trust region around incumbent 180 | given the current bounds lb, ub 181 | 182 | Args: 183 | x: the point to test 184 | incumbent: the point to center the next smallest TR around 185 | lb: lower bound of the current trust region 186 | ub: upper bound of the current trust region 187 | 188 | Returns: true if point would fall in the next smaller trust region, false otherwise 189 | 190 | """ 191 | offsets = [(ub.squeeze()[i] - lb.squeeze()[i]) / 4 for i in range(len(incumbent))] 192 | return all( 193 | incumbent[i] - offsets[i] < x[i] < incumbent[i] + offsets[i] 194 | for i in range(len(incumbent)) 195 | ) 196 | 197 | 198 | def star_string(wrap_string: str) -> str: 199 | """ 200 | Wrap string in stars. 201 | 202 | Args: 203 | wrap_string: string to wrap 204 | 205 | Returns: wrapped string 206 | 207 | """ 208 | return f"{''.join(['*'] * (len(wrap_string) + 4))}\n* {wrap_string} *\n{''.join(['*'] * (len(wrap_string) + 4))}" 209 | 210 | 211 | def partialclass(cls, *args, **kwargs): 212 | """ 213 | A partially initialized class 214 | 215 | Args: 216 | cls: the base class 217 | *args: 218 | **kwargs: 219 | 220 | Returns: 221 | 222 | """ 223 | 224 | class PartialClass(cls): 225 | __init__ = functools.partial(cls.__init__, *args, **kwargs) 226 | 227 | return PartialClass -------------------------------------------------------------------------------- /cmabo/gp.py: -------------------------------------------------------------------------------- 1 | 2 | import math 3 | 4 | import gpytorch 5 | import numpy as np 6 | import torch 7 | from gpytorch.constraints.constraints import Interval 8 | from gpytorch.distributions import MultivariateNormal 9 | from gpytorch.kernels import MaternKernel, ScaleKernel 10 | from gpytorch.likelihoods import GaussianLikelihood 11 | from gpytorch.means import ConstantMean 12 | from gpytorch.mlls import ExactMarginalLogLikelihood 13 | from gpytorch.models import ExactGP 14 | 15 | 16 | # GP Model 17 | class GP(ExactGP): 18 | def __init__(self, train_x, train_y, likelihood, lengthscale_constraint, outputscale_constraint, ard_dims): 19 | super(GP, self).__init__(train_x, train_y, likelihood) 20 | self.ard_dims = ard_dims 21 | self.mean_module = ConstantMean() 22 | base_kernel = MaternKernel(lengthscale_constraint=lengthscale_constraint, ard_num_dims=ard_dims, nu=2.5) 23 | self.covar_module = ScaleKernel(base_kernel, outputscale_constraint=outputscale_constraint) 24 | 25 | def forward(self, x): 26 | mean_x = self.mean_module(x) 27 | covar_x = self.covar_module(x) 28 | return MultivariateNormal(mean_x, covar_x) 29 | 30 | 31 | def train_gp(train_x, train_y, use_ard, num_steps, hypers={}): 32 | """Fit a GP model where train_x is in [0, 1]^d and train_y is standardized.""" 33 | assert train_x.ndim == 2 34 | assert train_y.ndim == 1 35 | assert train_x.shape[0] == train_y.shape[0] 36 | 37 | # Create hyper parameter bounds 38 | noise_constraint = Interval(5e-4, 0.2) 39 | if use_ard: 40 | lengthscale_constraint = Interval(0.005, 2.0) 41 | else: 42 | lengthscale_constraint = Interval(0.005, math.sqrt(train_x.shape[1])) # [0.005, sqrt(dim)] 43 | outputscale_constraint = Interval(0.05, 20.0) 44 | 45 | # Create models 46 | likelihood = GaussianLikelihood(noise_constraint=noise_constraint).to(device=train_x.device, dtype=train_y.dtype) 47 | ard_dims = train_x.shape[1] if use_ard else None 48 | model = GP( 49 | train_x=train_x, 50 | train_y=train_y, 51 | likelihood=likelihood, 52 | lengthscale_constraint=lengthscale_constraint, 53 | outputscale_constraint=outputscale_constraint, 54 | ard_dims=ard_dims, 55 | ).to(device=train_x.device, dtype=train_x.dtype) 56 | 57 | # Find optimal model hyperparameters 58 | model.train() 59 | likelihood.train() 60 | 61 | # "Loss" for GPs - the marginal log likelihood 62 | mll = ExactMarginalLogLikelihood(likelihood, model) 63 | 64 | # Initialize model hypers 65 | if hypers: 66 | model.load_state_dict(hypers) 67 | else: 68 | hypers = {} 69 | hypers["covar_module.outputscale"] = 1.0 70 | hypers["covar_module.base_kernel.lengthscale"] = 0.5 71 | hypers["likelihood.noise"] = 0.005 72 | model.initialize(**hypers) 73 | 74 | # Use the adam optimizer 75 | optimizer = torch.optim.Adam([{"params": model.parameters()}], lr=0.1) 76 | 77 | for _ in range(num_steps): 78 | optimizer.zero_grad() 79 | output = model(train_x) 80 | loss = -mll(output, train_y) 81 | loss.backward() 82 | optimizer.step() 83 | 84 | # Switch to eval mode 85 | model.eval() 86 | likelihood.eval() 87 | 88 | return model 89 | -------------------------------------------------------------------------------- /cmabo/utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def to_unit_cube(x, lb, ub): 5 | """Project to [0, 1]^d from hypercube with bounds lb and ub""" 6 | assert np.all(lb < ub) and lb.ndim == 1 and ub.ndim == 1 and x.ndim == 2 7 | xx = (x - lb) / (ub - lb) 8 | return xx 9 | 10 | 11 | def from_unit_cube(x, lb, ub): 12 | """Project from [0, 1]^d to hypercube with bounds lb and ub""" 13 | assert np.all(lb < ub) and lb.ndim == 1 and ub.ndim == 1 and x.ndim == 2 14 | xx = x * (ub - lb) + lb 15 | return xx 16 | 17 | 18 | def latin_hypercube(n_pts, dim): 19 | """Basic Latin hypercube implementation with center perturbation.""" 20 | X = np.zeros((n_pts, dim)) 21 | centers = (1.0 + 2.0 * np.arange(0.0, n_pts)) / float(2 * n_pts) 22 | for i in range(dim): # Shuffle the center locataions for each dimension. 23 | X[:, i] = centers[np.random.permutation(n_pts)] 24 | 25 | # Add some perturbations within each box 26 | pert = np.random.uniform(-1.0, 1.0, (n_pts, dim)) / float(2 * n_pts) 27 | X += pert 28 | return X 29 | -------------------------------------------------------------------------------- /cmabo_general_process.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LamNgo1/cma-meta-algorithm/9aa76dc82f09f4ed5ead6a0cade40645f8850f8e/cmabo_general_process.png -------------------------------------------------------------------------------- /environment.yml: -------------------------------------------------------------------------------- 1 | name: cmabo-linux 2 | channels: 3 | - gpytorch 4 | - conda-forge 5 | - pytorch 6 | - defaults 7 | dependencies: 8 | - python=3.10 9 | - pip 10 | - gxx_linux-64 11 | - gcc_linux-64 12 | - swig 13 | - pip: 14 | - torch==1.13.1 15 | - torchvision==0.14.1 16 | - torchaudio==0.13.1 17 | - gpytorch==1.8.1 18 | - botorch==0.6.6 19 | - gpy 20 | - cma 21 | - gym 22 | - gym[box2d] 23 | - gym[mujoco] 24 | - pandas==1.4.0 25 | 26 | -------------------------------------------------------------------------------- /experiments-from-paper.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | conda init bash 4 | conda activate cmabo-linux 5 | 6 | for optimizer in 'bo' 'turbo' 'baxus'; do 7 | for seed in {1..10}; do 8 | python test-main.py --solver $optimizer -f alpine -d 100 -n 2000 --seed $seed 9 | python test-main.py --solver $optimizer -f levy -d 100 -n 2000 --seed $seed 10 | python test-main.py --solver $optimizer -f ellipsoid -d 100 -n 2000 --seed $seed 11 | python test-main.py --solver $optimizer -f rastrigin -d 100 -n 2000 --seed $seed 12 | python test-main.py --solver $optimizer -f shifted-alpine -d 100 -n 2000 --seed $seed 13 | python test-main.py --solver $optimizer -f shifted-levy -d 100 -n 2000 --seed $seed 14 | python test-main.py --solver $optimizer -f schaffer100 -n 1000 --seed $seed 15 | python test-main.py --solver $optimizer -f branin500 -n 1000 --seed $seed 16 | python test-main.py --solver $optimizer -f rover100 -n 1000 --seed $seed 17 | python test-main.py --solver $optimizer -f half-cheetah -n 2000 --seed $seed 18 | python test-main.py --solver $optimizer -f lasso-dna -n 1000 --seed $seed 19 | done 20 | done -------------------------------------------------------------------------------- /test-main.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | import time 3 | 4 | from cmabo.cma_bo import CMABayesianOptimization 5 | from test_functions.utils import get_arguments, get_bound, set_seed 6 | 7 | # parameters 8 | input_dict = get_arguments() 9 | objective = input_dict['f'] 10 | max_evals = input_dict['max_evals'] 11 | solver = input_dict['solver'] 12 | seed = input_dict['seed'] 13 | 14 | information = f'CMA-{solver.upper()}: {objective.name}-{objective.input_dim}D function with max_evals={max_evals} and seed={seed}' 15 | print(information) 16 | print(f'==============> seed={seed} <===============') 17 | set_seed(seed=seed) 18 | # Start 19 | bounds = get_bound(objective.bounds) 20 | lb = bounds[:, 0] 21 | ub = bounds[:, 1] 22 | 23 | stamp1 = time.time() 24 | cmabo = CMABayesianOptimization(n_init=20, f=objective.func, solver=solver, lb=lb, ub=ub, 25 | max_evals=max_evals, func_name=objective.name, keep_record=True 26 | ) 27 | cmabo.optimize() 28 | stamp2 = time.time() 29 | cmabo.dumpdata(seed, total_time=stamp2-stamp1) # Save pickle file 30 | 31 | print('------> FINISHED <---------') 32 | print(information) 33 | -------------------------------------------------------------------------------- /test_functions/LassoBench/__init__.py: -------------------------------------------------------------------------------- 1 | from .LassoBench import SyntheticBenchmark # noqa 2 | from .LassoBench import RealBenchmark # noqa 3 | -------------------------------------------------------------------------------- /test_functions/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LamNgo1/cma-meta-algorithm/9aa76dc82f09f4ed5ead6a0cade40645f8850f8e/test_functions/__init__.py -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LamNgo1/cma-meta-algorithm/9aa76dc82f09f4ed5ead6a0cade40645f8850f8e/test_functions/function_realworld_bo/__init__.py -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/ebo_core/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LamNgo1/cma-meta-algorithm/9aa76dc82f09f4ed5ead6a0cade40645f8850f8e/test_functions/function_realworld_bo/ebo_core/__init__.py -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/ebo_core/helper.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Code from the Github: https://github.com/zi-w/Ensemble-Bayesian-Optimization 4 | """ 5 | 6 | import numpy as np 7 | import scipy.cluster.hierarchy as hi 8 | 9 | 10 | def sample_multinomial(prob, shape, dim_limit): 11 | assert isinstance(shape, int) 12 | prob = prob / np.sum(prob) 13 | ret = - np.ones(shape, dtype=np.int) 14 | for i in range(shape): 15 | cnt = 0 16 | while cnt < 100: 17 | assign = np.random.choice(len(prob), p=prob) 18 | if np.sum(ret == assign) < dim_limit: 19 | ret[i] = assign 20 | break 21 | cnt += 1 22 | if cnt >= 100: 23 | raise ValueError('Not able to sample multinomial with dim limit within 100 rounds.') 24 | return ret 25 | 26 | 27 | def sample_categorical(prob): 28 | prob = prob / np.sum(prob) 29 | return np.random.choice(len(prob), p=prob) 30 | 31 | 32 | def find(pred): 33 | return np.where(pred)[0] 34 | 35 | 36 | def gumbel(): 37 | return -np.log(-np.log(np.random.random())) 38 | 39 | 40 | def mean_z(z_all, dim_limit): 41 | # use correlation clustering to average group assignments 42 | lz = hi.linkage(z_all.T, 'single', 'hamming') 43 | # not sure why cluster id starts from 1 44 | z = hi.fcluster(lz, 0) - 1 45 | all_cat = np.unique(z) 46 | for a in all_cat: 47 | a_size = np.sum(a == z) 48 | if a_size > dim_limit: 49 | z[a == z] = sample_multinomial([1.] * a_size, a_size, dim_limit) 50 | return z 51 | 52 | 53 | class NormalizedInputFn: 54 | def __init__(self, fn_instance, x_range): 55 | self.fn_instance = fn_instance 56 | self.x_range = x_range 57 | 58 | def __call__(self, x): 59 | return self.fn_instance(self.project_input(x)) 60 | 61 | def project_input(self, x): 62 | return x * (self.x_range[1] - self.x_range[0]) + self.x_range[0] 63 | 64 | def inv_project_input(self, x): 65 | return (x - self.x_range[0]) / (self.x_range[1] - self.x_range[0]) 66 | 67 | def get_range(self): 68 | return np.array([np.zeros(self.x_range[0].shape[0]), np.ones(self.x_range[0].shape[0])]) 69 | 70 | 71 | class ConstantOffsetFn: 72 | def __init__(self, fn_instance, offset): 73 | self.fn_instance = fn_instance 74 | self.offset = offset 75 | 76 | def __call__(self, x): 77 | return self.fn_instance(x) + self.offset 78 | 79 | def get_range(self): 80 | return self.fn_instance.get_range() 81 | -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/functions_mujoco.py: -------------------------------------------------------------------------------- 1 | import os 2 | from typing import ClassVar, Dict, Optional, Tuple 3 | 4 | import gym 5 | import numpy as np 6 | 7 | # http://www.johndcook.com/blog/standard_deviation/ 8 | class RunningStat(object): 9 | def __init__(self, shape=None): 10 | self._n = 0 11 | self._M = np.zeros(shape, dtype=np.float64) 12 | self._S = np.zeros(shape, dtype=np.float64) 13 | 14 | def copy(self): 15 | other = RunningStat() 16 | other._n = self._n 17 | other._M = np.copy(self._M) 18 | other._S = np.copy(self._S) 19 | return other 20 | 21 | def push(self, x): 22 | x = np.asarray(x) 23 | # Unvectorized update of the running statistics. 24 | assert x.shape == self._M.shape, ("x.shape = {}, self.shape = {}".format(x.shape, self._M.shape)) 25 | n1 = self._n 26 | self._n += 1 27 | if self._n == 1: 28 | self._M[...] = x 29 | else: 30 | delta = x - self._M 31 | self._M[...] += delta / self._n 32 | self._S[...] += delta * delta * n1 / self._n 33 | 34 | def update(self, other): 35 | n1 = self._n 36 | n2 = other._n 37 | n = n1 + n2 38 | delta = self._M - other._M 39 | delta2 = delta * delta 40 | M = (n1 * self._M + n2 * other._M) / n 41 | S = self._S + other._S + delta2 * n1 * n2 / n 42 | self._n = n 43 | self._M = M 44 | self._S = S 45 | 46 | def __repr__(self): 47 | return '(n={}, mean_mean={}, mean_std={})'.format( 48 | self.n, np.mean(self.mean), np.mean(self.std)) 49 | 50 | @property 51 | def n(self): 52 | return self._n 53 | 54 | @property 55 | def mean(self): 56 | return self._M 57 | 58 | @property 59 | def var(self): 60 | return self._S / (self._n - 1) if self._n > 1 else np.square(self._M) 61 | 62 | @property 63 | def std(self): 64 | return np.sqrt(self.var) 65 | 66 | @property 67 | def shape(self): 68 | return self._M.shape 69 | 70 | class MujucoPolicyFunc(): 71 | ANT_ENV: ClassVar[Tuple[str, float, float, int]] = ('Ant-v4', -1.0, 1.0, 3) 72 | SWIMMER_ENV: ClassVar[Tuple[str, float, float, int]] = ('Swimmer-v4', -1.0, 1.0, 3) 73 | HALF_CHEETAH_ENV: ClassVar[Tuple[str, float, float, int]] = ('HalfCheetah-v4', -1.0, 1.0, 3) 74 | HOPPER_ENV: ClassVar[Tuple[str, float, float, int]] = ('Hopper-v4', -1.0, 1.0, 3) 75 | WALKER_2D_ENV: ClassVar[Tuple[str, float, float, int]] = ('Walker2d-v4', -1.0, 1.0, 3) 76 | HUMANOID_ENV: ClassVar[Tuple[str, float, float, int]] = ('Humanoid-v4', -1.0, 1.0, 3) 77 | 78 | def __init__(self, env: str, lb: float, ub: float, num_rollouts): 79 | self._env_name = env 80 | self._env = gym.make(env) 81 | self._env.reset(seed=2023) 82 | state_dims = self._env.observation_space.shape[0] 83 | action_dims = self._env.action_space.shape[0] 84 | self._dims = state_dims * action_dims 85 | self._policy_shape = (action_dims, state_dims) 86 | self._lb = np.full(self._dims, lb) 87 | self._ub = np.full(self._dims, ub) 88 | self._num_rollouts = num_rollouts 89 | self._render = False 90 | self._rs = RunningStat(state_dims) 91 | 92 | #custom parameter 93 | self.bounds = [(lb, ub)]*self._dims 94 | self.input_dim = self._dims 95 | 96 | # @property 97 | # def lb(self) -> np.ndarray: 98 | # return self._lb 99 | 100 | # @property 101 | # def ub(self) -> np.ndarray: 102 | # return self._ub 103 | 104 | @property 105 | def dims(self) -> int: 106 | return self._dims 107 | 108 | # @property 109 | # def is_minimizing(self) -> bool: 110 | # return False 111 | 112 | def __call__(self, x): 113 | assert x.ndim == 1 114 | assert len(x) == self.dims 115 | assert np.all(x <= self._ub) and np.all(x >= self._lb) 116 | M = x.reshape(self._policy_shape) 117 | total_r = 0 118 | for _ in range(self._num_rollouts): 119 | obs, _ = self._env.reset() 120 | while True: 121 | self._rs.push(obs) 122 | norm_obs = (obs - self._rs.mean) / (self._rs.std + 1e-6) 123 | action = np.dot(M, norm_obs) 124 | obs, r, done, truncated, _ = self._env.step(action) 125 | total_r += r 126 | if done or truncated: 127 | break 128 | 129 | # for minimization optimizer 130 | return -total_r / self._num_rollouts 131 | 132 | # def __str__(self): 133 | # return f"Mujuco_{self._env_name}[{self.dims}]" 134 | 135 | def func(self, x: np.ndarray): 136 | return self.__call__(x) 137 | 138 | 139 | func_dir = os.path.dirname(os.path.abspath(__file__)) 140 | 141 | class Humanoid(MujucoPolicyFunc): 142 | def __init__(self): 143 | super().__init__(*MujucoPolicyFunc.HUMANOID_ENV) 144 | self.name = 'humanoid' 145 | 146 | class HalfCheetah(MujucoPolicyFunc): 147 | def __init__(self): 148 | super().__init__(*MujucoPolicyFunc.HALF_CHEETAH_ENV) 149 | self.name = 'half-cheetah' 150 | 151 | class Hopper(MujucoPolicyFunc): 152 | def __init__(self): 153 | super().__init__(*MujucoPolicyFunc.HOPPER_ENV) 154 | self.name = 'hopper' 155 | 156 | class Walker2d(MujucoPolicyFunc): 157 | def __init__(self): 158 | super().__init__(*MujucoPolicyFunc.WALKER_2D_ENV) 159 | self.name = 'walker2d' 160 | 161 | class Swimmer(MujucoPolicyFunc): 162 | def __init__(self): 163 | super().__init__(*MujucoPolicyFunc.SWIMMER_ENV) 164 | self.name = 'swimmer' 165 | 166 | class Ant(MujucoPolicyFunc): 167 | def __init__(self): 168 | super().__init__(*MujucoPolicyFunc.ANT_ENV) 169 | self.name = 'ant' 170 | -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/functions_xgboost.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import xgboost as xgb 3 | 4 | from collections import OrderedDict 5 | from .ebo_core.helper import ConstantOffsetFn, NormalizedInputFn 6 | 7 | from .push_function import PushReward 8 | from .lunar_lander import LunarLander, heuristic_turbo 9 | from .rover_function import create_small_domain 10 | from .bipedal_walker import BipedalWalker, heuristic_bipedal 11 | from sklearn import preprocessing 12 | from sklearn.metrics import make_scorer, accuracy_score, balanced_accuracy_score, \ 13 | precision_score, f1_score, log_loss 14 | from .hpobench.dependencies.ml.data_manager import OpenMLDataManager 15 | from .hpobench import config_file 16 | 17 | class XGBoost_OpenML_Task: 18 | ''' 19 | MLP_OpenML_Task: function 20 | 21 | param sd: standard deviation, to generate noisy evaluations of the function 22 | ''' 23 | def __init__(self, task_id, bounds=None, sd=None, seed=0): 24 | self.input_dim = 9 25 | 26 | # Get training and validation datasets 27 | # Use the DataManager of the HPOBenchmark package 28 | data_path = config_file.data_dir / "OpenML" 29 | 30 | # Task ID is passed in by user 31 | valid_size = 0.33 32 | global_seed = 1 33 | dm = OpenMLDataManager(task_id, valid_size, data_path, global_seed) 34 | dm.load() 35 | 36 | train_X = dm.train_X 37 | valid_X = dm.valid_X 38 | 39 | train_y = dm.train_y 40 | valid_y = dm.valid_y 41 | 42 | # Convert to the proper format 43 | le = preprocessing.LabelEncoder() 44 | le.fit(list(train_y) + list(valid_y)) 45 | 46 | train_y = le.transform(train_y) 47 | valid_y = le.transform(valid_y) 48 | 49 | self.X_train = train_X 50 | self.Y_train = train_y 51 | self.X_test = valid_X 52 | self.Y_test = valid_y 53 | self.num_classes = len(set(list(train_y) + list(valid_y))) 54 | 55 | # Tune 4 hyperparameters as in the HPOBench benchmark 56 | if bounds is None: 57 | # self.bounds = OrderedDict([('eta', (-10, 0)), 58 | # ('max_depth', (0, 5.6439)), 59 | # ('colsample_bytree', (0.1, 1)), 60 | # ('reg_lambda', (-10, 10))]) 61 | self.bounds = OrderedDict([('eta', (0, 1)), 62 | ('gamma', (0, 1)), 63 | ('max_depth', (0, 1)), 64 | ('min_child_weight', (0, 1)), 65 | ('max_delta_step', (0, 1)), 66 | ('colsample_bytree', (0, 1)), 67 | ('colsample_bylevel', (0, 1)), 68 | ('colsample_bynode', (0, 1)), 69 | ('reg_lambda', (0, 1))]) 70 | else: 71 | self.bounds = bounds 72 | 73 | self.min = [(0.)*self.input_dim] 74 | self.fmin = 1 75 | self.ismax = 1 76 | self.name = f'xgb-openml-task{task_id}' 77 | self.seed = seed 78 | 79 | def run_XGBoost(self, params): 80 | # NOTE: params has len being 1 81 | 82 | # Extract hyperparameters from params: 83 | params = params.ravel() 84 | 85 | # Transform the hyperparameters 86 | params_transform = params.copy() 87 | params_transform[0] = 10*params_transform[0] - 10 # eta 88 | params_transform[1] = 5.6439*params_transform[1] # gamma 89 | params_transform[2] = 5.6439*params_transform[2] # max_depth 90 | params_transform[3] = 5.6439*params_transform[3] # min_child_weight 91 | params_transform[4] = 5.6439*params_transform[4] # max_delta_step 92 | params_transform[5] = 0.9*params_transform[5] + 0.1 # colsample_bytree 93 | params_transform[6] = 0.9*params_transform[6] + 0.1 # colsample_bylevel 94 | params_transform[7] = 0.9*params_transform[7] + 0.1 # colsample_bynode 95 | params_transform[8] = 20*params_transform[8] - 10 # reg_lambda 96 | 97 | eta = 2**params[0] 98 | gamma = int(round(2**params[1])) 99 | max_depth = int(round(2**params[2])) 100 | min_child_weight = int(round(2**params[3])) 101 | max_delta_step = int(round(2**params[4])) 102 | colsample_bytree = params[5] 103 | colsample_bylevel = params[6] 104 | colsample_bynode = params[7] 105 | reg_lambda = 2**params[8] 106 | 107 | extra_args = dict( 108 | booster="gbtree", 109 | n_estimators=2000, 110 | objective="binary:logistic", 111 | random_state=None, 112 | subsample=1 113 | ) 114 | 115 | if self.num_classes > 2: 116 | extra_args["objective"] = "multi:softmax" 117 | extra_args.update({"num_class": self.num_classes}) 118 | 119 | model = xgb.XGBClassifier( 120 | learning_rate=eta, 121 | gamma=gamma, 122 | max_depth=max_depth, 123 | min_child_weight=min_child_weight, 124 | max_delta_step=max_delta_step, 125 | colsample_bytree=colsample_bytree, 126 | colsample_bylevel=colsample_bylevel, 127 | colsample_bynode=colsample_bynode, 128 | reg_lambda=reg_lambda, 129 | **extra_args 130 | ) 131 | 132 | model.fit(self.X_train, self.Y_train) 133 | 134 | # Compute validation scores 135 | metrics = dict( 136 | acc=accuracy_score, 137 | bal_acc=balanced_accuracy_score, 138 | f1=f1_score, 139 | precision=precision_score, 140 | neglogloss=log_loss, 141 | ) 142 | 143 | metrics_kwargs = dict( 144 | acc=dict(), 145 | bal_acc=dict(), 146 | f1=dict(average="macro", zero_division=0), 147 | precision=dict(average="macro", zero_division=0), 148 | neglogloss=dict() 149 | ) 150 | 151 | scorers = dict() 152 | for k, v in metrics.items(): 153 | scorers[k] = make_scorer(v, **metrics_kwargs[k]) 154 | 155 | val_scores = dict() 156 | for k, v in scorers.items(): 157 | val_scores[k] = v(model, self.X_test, self.Y_test) 158 | accuracy_val = val_scores["acc"] 159 | logloss_val = -val_scores["neglogloss"] 160 | 161 | return accuracy_val*100 162 | 163 | def func(self, params): 164 | 165 | if (type(params) == list): 166 | metrics_accuracy = np.zeros((len(params), 1)) 167 | for i in range(len(params)): 168 | params_single = params[i] 169 | accuracy_temp = self.run_XGBoost(params_single) 170 | metrics_accuracy[i, 0] = accuracy_temp 171 | 172 | elif (type(params) == np.ndarray): 173 | # import os 174 | # os.environ['CUDA_VISIBLE_DEVICES'] = "0" 175 | params_single = params.copy() 176 | metrics_accuracy = self.run_XGBoost(params_single) 177 | else: 178 | print('Something wrong with params!') 179 | 180 | return -metrics_accuracy 181 | 182 | 183 | -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/__init__.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | _default_log_format = '[%(levelname)s] %(name)s at %(asctime)s --- %(message)s' 4 | logging.basicConfig(format=_default_log_format, level=logging.WARNING) 5 | root_logger = logging.getLogger() 6 | 7 | from test_functions.function_realworld_bo.hpobench.__version__ import __version__ # noqa: F401, E402 8 | from test_functions.function_realworld_bo.hpobench.config import config_file # noqa: F401, E402 9 | 10 | __contact__ = "automl.org" 11 | -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/__version__.py: -------------------------------------------------------------------------------- 1 | __version__ = '0.0.10' 2 | -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/benchmarks/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LamNgo1/cma-meta-algorithm/9aa76dc82f09f4ed5ead6a0cade40645f8850f8e/test_functions/function_realworld_bo/hpobench/benchmarks/__init__.py -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/benchmarks/ml/README.md: -------------------------------------------------------------------------------- 1 | Each function evalution returns a dictionary with the following information: 2 | 3 | ``` 4 | └───function_value: 1 - accuracy (acc.) on validation set 5 | └───cost: time to fit model + time to evaluate acc. training set + time to evaluate acc. validation set 6 | └───info: dictionary (dict) with miscellaneous information 7 | | └───train_loss: 1 - accuracy (acc.) on training set 8 | | └───val_loss: 1 - accuracy (acc.) on validation set 9 | | └───model_cost: time taken to fit the model 10 | | └───train_scores: performance on all metrics over the training set (dict) 11 | | | └───f1: F1-score 12 | | | └───acc: Accuracy 13 | | | └───bal_acc: Balanced accuracy 14 | | └───train_costs: time taken to compute performance on all metrics over the training set (dict) 15 | | | └───f1: F1-score 16 | | | └───acc: Accuracy 17 | | | └───bal_acc: Balanced accuracy 18 | | └───valid_scores: performance on all metrics over the validation set (dict) 19 | | | └───... 20 | | └───valid_costs: time taken to compute performance on all metrics over the validation set (dict) 21 | | | └───... 22 | | └───test_scores: performance on all metrics over the test set 23 | | | └───... 24 | | └───test_costs: time taken to compute performance on all metrics over the test set (dict) 25 | | | └───... 26 | ``` 27 | 28 | *NOTE*: the keys `function_value`, `cost`, `info` need to exist when creating a new objective 29 | function, while `info` can house any kind of auxilliary information required. -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/benchmarks/ml/__init__.py: -------------------------------------------------------------------------------- 1 | from hpobench.benchmarks.ml.histgb_benchmark import HistGBBenchmark, HistGBBenchmarkBB, HistGBBenchmarkMF 2 | from hpobench.benchmarks.ml.lr_benchmark import LRBenchmark, LRBenchmarkBB, LRBenchmarkMF 3 | from hpobench.benchmarks.ml.nn_benchmark import NNBenchmark, NNBenchmarkBB, NNBenchmarkMF 4 | from hpobench.benchmarks.ml.rf_benchmark import RandomForestBenchmark, RandomForestBenchmarkBB, \ 5 | RandomForestBenchmarkMF 6 | from hpobench.benchmarks.ml.svm_benchmark import SVMBenchmark, SVMBenchmarkBB, SVMBenchmarkMF 7 | from hpobench.benchmarks.ml.tabular_benchmark import TabularBenchmark 8 | 9 | try: 10 | from hpobench.benchmarks.ml.xgboost_benchmark import XGBoostBenchmark, XGBoostBenchmarkBB, XGBoostBenchmarkMF 11 | except ImportError: 12 | pass 13 | 14 | 15 | __all__ = ['HistGBBenchmark', 'HistGBBenchmarkBB', 'HistGBBenchmarkMF', 16 | 'LRBenchmark', 'LRBenchmarkBB', 'LRBenchmarkMF', 17 | 'NNBenchmark', 'NNBenchmarkBB', 'NNBenchmarkMF', 18 | 'RandomForestBenchmark', 'RandomForestBenchmarkBB', 'RandomForestBenchmarkMF', 19 | 'SVMBenchmark', 'SVMBenchmarkBB', 'SVMBenchmarkMF', 20 | 'TabularBenchmark', 21 | 'XGBoostBenchmark', 'XGBoostBenchmarkBB', 'XGBoostBenchmarkMF', 22 | ] 23 | -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/benchmarks/ml/histgb_benchmark.py: -------------------------------------------------------------------------------- 1 | """ 2 | Changelog: 3 | ========== 4 | 5 | 0.0.1: 6 | * First implementation of the HistGB Benchmarks. 7 | """ 8 | 9 | from typing import Union, Tuple, Dict 10 | 11 | import ConfigSpace as CS 12 | import numpy as np 13 | from ConfigSpace.hyperparameters import Hyperparameter 14 | # https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.HistGradientBoostingClassifier.html 15 | from sklearn.experimental import enable_hist_gradient_boosting # noqa 16 | from sklearn.ensemble import HistGradientBoostingClassifier 17 | 18 | from hpobench.dependencies.ml.ml_benchmark_template import MLBenchmark 19 | 20 | __version__ = '0.0.1' 21 | 22 | 23 | class HistGBBenchmark(MLBenchmark): 24 | def __init__(self, 25 | task_id: int, 26 | rng: Union[np.random.RandomState, int, None] = None, 27 | valid_size: float = 0.33, 28 | data_path: Union[str, None] = None): 29 | super(HistGBBenchmark, self).__init__(task_id, rng, valid_size, data_path) 30 | 31 | @staticmethod 32 | def get_configuration_space(seed: Union[int, None] = None) -> CS.ConfigurationSpace: 33 | """Parameter space to be optimized --- contains the hyperparameters""" 34 | cs = CS.ConfigurationSpace(seed=seed) 35 | 36 | cs.add_hyperparameters([ 37 | CS.UniformIntegerHyperparameter( 38 | 'max_depth', lower=6, upper=30, default_value=6, log=True 39 | ), 40 | CS.UniformIntegerHyperparameter( 41 | 'max_leaf_nodes', lower=2, upper=64, default_value=32, log=True 42 | ), 43 | CS.UniformFloatHyperparameter( 44 | 'learning_rate', lower=2**-10, upper=1, default_value=0.1, log=True 45 | ), 46 | CS.UniformFloatHyperparameter( 47 | 'l2_regularization', lower=2**-10, upper=2**10, default_value=0.1, log=True 48 | ) 49 | ]) 50 | return cs 51 | 52 | @staticmethod 53 | def get_fidelity_space(seed: Union[int, None] = None) -> CS.ConfigurationSpace: 54 | fidelity_space = CS.ConfigurationSpace(seed=seed) 55 | fidelity_space.add_hyperparameters( 56 | # gray-box setting (multi-multi-fidelity) - ntrees + data subsample 57 | HistGBBenchmark._get_fidelity_choices(ntrees_choice='variable', subsample_choice='variable') 58 | ) 59 | return fidelity_space 60 | 61 | @staticmethod 62 | def _get_fidelity_choices(ntrees_choice: str, subsample_choice: str) -> Tuple[Hyperparameter, Hyperparameter]: 63 | 64 | assert ntrees_choice in ['fixed', 'variable'] 65 | assert subsample_choice in ['fixed', 'variable'] 66 | 67 | fidelity1 = dict( 68 | # TODO: this value was 100 in the original code. Please check if 100 or 1000. 69 | fixed=CS.Constant('n_estimators', value=1000), 70 | variable=CS.UniformIntegerHyperparameter( 71 | 'n_estimators', lower=100, upper=1000, default_value=1000, log=False 72 | ) 73 | ) 74 | fidelity2 = dict( 75 | fixed=CS.Constant('subsample', value=1), 76 | variable=CS.UniformFloatHyperparameter( 77 | 'subsample', lower=0.1, upper=1, default_value=1, log=False 78 | ) 79 | ) 80 | ntrees = fidelity1[ntrees_choice] 81 | subsample = fidelity2[subsample_choice] 82 | return ntrees, subsample 83 | 84 | def init_model(self, config: Union[CS.Configuration, Dict], 85 | fidelity: Union[CS.Configuration, Dict, None] = None, 86 | rng: Union[int, np.random.RandomState, None] = None): 87 | """ Function that returns the model initialized based on the configuration and fidelity 88 | """ 89 | rng = self.rng if rng is None else rng 90 | 91 | if isinstance(config, CS.Configuration): 92 | config = config.get_dictionary() 93 | if isinstance(fidelity, CS.Configuration): 94 | fidelity = fidelity.get_dictionary() 95 | 96 | model = HistGradientBoostingClassifier( 97 | **config, 98 | max_iter=fidelity['n_estimators'], # a fidelity being used during initialization 99 | early_stopping=False, 100 | random_state=rng 101 | ) 102 | return model 103 | 104 | 105 | class HistGBBenchmarkBB(HistGBBenchmark): 106 | def get_fidelity_space(self, seed: Union[int, None] = None) -> CS.ConfigurationSpace: 107 | fidelity_space = CS.ConfigurationSpace(seed=seed) 108 | fidelity_space.add_hyperparameters( 109 | # black-box setting (full fidelity) 110 | HistGBBenchmark._get_fidelity_choices(ntrees_choice='fixed', subsample_choice='fixed') 111 | ) 112 | return fidelity_space 113 | 114 | 115 | class HistGBBenchmarkMF(HistGBBenchmark): 116 | def get_fidelity_space(self, seed: Union[int, None] = None) -> CS.ConfigurationSpace: 117 | fidelity_space = CS.ConfigurationSpace(seed=seed) 118 | fidelity_space.add_hyperparameters( 119 | # gray-box setting (multi-fidelity) - ntrees 120 | HistGBBenchmark._get_fidelity_choices(ntrees_choice='variable', subsample_choice='fixed') 121 | ) 122 | return fidelity_space 123 | 124 | 125 | __all__ = ['HistGBBenchmark', 'HistGBBenchmarkBB', 'HistGBBenchmarkMF'] 126 | -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/benchmarks/ml/lr_benchmark.py: -------------------------------------------------------------------------------- 1 | """ 2 | Changelog: 3 | ========== 4 | 5 | 0.0.1: 6 | * First implementation of the LR Benchmarks. 7 | """ 8 | 9 | 10 | from typing import Union, Tuple, Dict 11 | 12 | import ConfigSpace as CS 13 | import numpy as np 14 | from ConfigSpace.hyperparameters import Hyperparameter 15 | from sklearn.linear_model import SGDClassifier 16 | 17 | from hpobench.dependencies.ml.ml_benchmark_template import MLBenchmark 18 | 19 | __version__ = '0.0.1' 20 | 21 | 22 | class LRBenchmark(MLBenchmark): 23 | def __init__(self, 24 | task_id: int, 25 | rng: Union[np.random.RandomState, int, None] = None, 26 | valid_size: float = 0.33, 27 | data_path: Union[str, None] = None): 28 | 29 | super(LRBenchmark, self).__init__(task_id, rng, valid_size, data_path) 30 | self.cache_size = 500 31 | 32 | @staticmethod 33 | def get_configuration_space(seed: Union[int, None] = None) -> CS.ConfigurationSpace: 34 | """Parameter space to be optimized --- contains the hyperparameters 35 | """ 36 | cs = CS.ConfigurationSpace(seed=seed) 37 | cs.add_hyperparameters([ 38 | CS.UniformFloatHyperparameter( 39 | "alpha", 1e-5, 1, log=True, default_value=1e-3 40 | ), 41 | CS.UniformFloatHyperparameter( 42 | "eta0", 1e-5, 1, log=True, default_value=1e-2 43 | ) 44 | ]) 45 | return cs 46 | 47 | def get_fidelity_space(self, seed: Union[int, None] = None) -> CS.ConfigurationSpace: 48 | fidelity_space = CS.ConfigurationSpace(seed=seed) 49 | fidelity_space.add_hyperparameters( 50 | # gray-box setting (multi-multi-fidelity) - iterations + data subsample 51 | LRBenchmark._get_fidelity_choices(iter_choice='variable', subsample_choice='variable') 52 | ) 53 | return fidelity_space 54 | 55 | @staticmethod 56 | def _get_fidelity_choices(iter_choice: str, subsample_choice: str) -> Tuple[Hyperparameter, Hyperparameter]: 57 | """Fidelity space available --- specifies the fidelity dimensions 58 | 59 | For SVM, only a single fidelity exists, i.e., subsample fraction. 60 | if fidelity_choice == 0 61 | uses the entire data (subsample=1), reflecting the black-box setup 62 | else 63 | parameterizes the fraction of data to subsample 64 | 65 | """ 66 | 67 | assert iter_choice in ['fixed', 'variable'] 68 | assert subsample_choice in ['fixed', 'variable'] 69 | 70 | fidelity1 = dict( 71 | fixed=CS.Constant('iter', value=1000), 72 | variable=CS.UniformIntegerHyperparameter( 73 | 'iter', lower=10, upper=1000, default_value=1000, log=False 74 | ) 75 | ) 76 | fidelity2 = dict( 77 | fixed=CS.Constant('subsample', value=1.0), 78 | variable=CS.UniformFloatHyperparameter( 79 | 'subsample', lower=0.1, upper=1.0, default_value=1.0, log=False 80 | ) 81 | ) 82 | 83 | iter = fidelity1[iter_choice] 84 | subsample = fidelity2[subsample_choice] 85 | return iter, subsample 86 | 87 | def init_model(self, config: Union[CS.Configuration, Dict], 88 | fidelity: Union[CS.Configuration, Dict, None] = None, 89 | rng: Union[int, np.random.RandomState, None] = None): 90 | # initializing model 91 | rng = self.rng if rng is None else rng 92 | 93 | if isinstance(config, CS.Configuration): 94 | config = config.get_dictionary() 95 | if isinstance(fidelity, CS.Configuration): 96 | fidelity = fidelity.get_dictionary() 97 | 98 | # https://scikit-learn.org/stable/modules/sgd.html 99 | model = SGDClassifier( 100 | **config, 101 | loss="log", # performs Logistic Regression 102 | max_iter=fidelity["iter"], 103 | learning_rate="adaptive", 104 | tol=None, 105 | random_state=rng, 106 | 107 | ) 108 | return model 109 | 110 | 111 | class LRBenchmarkBB(LRBenchmark): 112 | def get_fidelity_space(self, seed: Union[int, None] = None) -> CS.ConfigurationSpace: 113 | fidelity_space = CS.ConfigurationSpace(seed=seed) 114 | fidelity_space.add_hyperparameters( 115 | # black-box setting (full fidelity) 116 | LRBenchmark._get_fidelity_choices(iter_choice='fixed', subsample_choice='fixed') 117 | ) 118 | return fidelity_space 119 | 120 | 121 | class LRBenchmarkMF(LRBenchmark): 122 | def get_fidelity_space(self, seed: Union[int, None] = None) -> CS.ConfigurationSpace: 123 | fidelity_space = CS.ConfigurationSpace(seed=seed) 124 | fidelity_space.add_hyperparameters( 125 | # gray-box setting (multi-fidelity) - iterations 126 | LRBenchmark._get_fidelity_choices(iter_choice='variable', subsample_choice='fixed') 127 | ) 128 | return fidelity_space 129 | 130 | 131 | __all__ = ['LRBenchmark', 'LRBenchmarkBB', 'LRBenchmarkMF'] 132 | -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/benchmarks/ml/nn_benchmark.py: -------------------------------------------------------------------------------- 1 | """ 2 | Changelog: 3 | ========== 4 | 5 | 0.0.1: 6 | * First implementation of the NN Benchmarks. 7 | """ 8 | 9 | from copy import deepcopy 10 | from typing import Union, Tuple, Dict 11 | 12 | import ConfigSpace as CS 13 | import numpy as np 14 | from ConfigSpace.hyperparameters import Hyperparameter 15 | from sklearn.neural_network import MLPClassifier 16 | 17 | from hpobench.dependencies.ml.ml_benchmark_template import MLBenchmark 18 | 19 | __version__ = '0.0.1' 20 | 21 | 22 | class NNBenchmark(MLBenchmark): 23 | def __init__(self, 24 | task_id: int, 25 | rng: Union[np.random.RandomState, int, None] = None, 26 | valid_size: float = 0.33, 27 | data_path: Union[str, None] = None): 28 | super(NNBenchmark, self).__init__(task_id, rng, valid_size, data_path) 29 | 30 | @staticmethod 31 | def get_configuration_space(seed: Union[int, None] = None) -> CS.ConfigurationSpace: 32 | """Parameter space to be optimized --- contains the hyperparameters 33 | """ 34 | cs = CS.ConfigurationSpace(seed=seed) 35 | 36 | cs.add_hyperparameters([ 37 | CS.UniformIntegerHyperparameter( 38 | 'depth', default_value=3, lower=1, upper=3, log=False 39 | ), 40 | CS.UniformIntegerHyperparameter( 41 | 'width', default_value=64, lower=16, upper=1024, log=True 42 | ), 43 | CS.UniformIntegerHyperparameter( 44 | 'batch_size', lower=4, upper=256, default_value=32, log=True 45 | ), 46 | CS.UniformFloatHyperparameter( 47 | 'alpha', lower=10**-8, upper=1, default_value=10**-3, log=True 48 | ), 49 | CS.UniformFloatHyperparameter( 50 | 'learning_rate_init', lower=10**-5, upper=1, default_value=10**-3, log=True 51 | ) 52 | ]) 53 | return cs 54 | 55 | @staticmethod 56 | def get_fidelity_space(seed: Union[int, None] = None) -> CS.ConfigurationSpace: 57 | 58 | fidelity_space = CS.ConfigurationSpace(seed=seed) 59 | fidelity_space.add_hyperparameters( 60 | # gray-box setting (multi-multi-fidelity) - iterations + data subsample 61 | NNBenchmark._get_fidelity_choices(iter_choice='variable', subsample_choice='variable') 62 | ) 63 | return fidelity_space 64 | 65 | @staticmethod 66 | def _get_fidelity_choices(iter_choice: str, subsample_choice: str) -> Tuple[Hyperparameter, Hyperparameter]: 67 | 68 | fidelity1 = dict( 69 | fixed=CS.Constant('iter', value=243), 70 | variable=CS.UniformIntegerHyperparameter( 71 | 'iter', lower=3, upper=243, default_value=243, log=False 72 | ) 73 | ) 74 | fidelity2 = dict( 75 | fixed=CS.Constant('subsample', value=1), 76 | variable=CS.UniformFloatHyperparameter( 77 | 'subsample', lower=0.1, upper=1, default_value=1, log=False 78 | ) 79 | ) 80 | iter = fidelity1[iter_choice] 81 | subsample = fidelity2[subsample_choice] 82 | return iter, subsample 83 | 84 | def init_model(self, config: Union[CS.Configuration, Dict], 85 | fidelity: Union[CS.Configuration, Dict, None] = None, 86 | rng: Union[int, np.random.RandomState, None] = None): 87 | """ Function that returns the model initialized based on the configuration and fidelity 88 | """ 89 | rng = self.rng if rng is None else rng 90 | 91 | if isinstance(config, CS.Configuration): 92 | config = config.get_dictionary() 93 | if isinstance(fidelity, CS.Configuration): 94 | fidelity = fidelity.get_dictionary() 95 | 96 | config = deepcopy(config) 97 | depth = config["depth"] 98 | width = config["width"] 99 | config.pop("depth") 100 | config.pop("width") 101 | hidden_layers = [width] * depth 102 | model = MLPClassifier( 103 | **config, 104 | hidden_layer_sizes=hidden_layers, 105 | activation="relu", 106 | solver="adam", 107 | max_iter=fidelity['iter'], # a fidelity being used during initialization 108 | random_state=rng 109 | ) 110 | return model 111 | 112 | 113 | class NNBenchmarkBB(NNBenchmark): 114 | def get_fidelity_space(self, seed: Union[int, None] = None) -> CS.ConfigurationSpace: 115 | fidelity_space = CS.ConfigurationSpace(seed=seed) 116 | fidelity_space.add_hyperparameters( 117 | # black-box setting (full fidelity) 118 | NNBenchmarkBB._get_fidelity_choices(iter_choice='fixed', subsample_choice='fixed') 119 | ) 120 | return fidelity_space 121 | 122 | 123 | class NNBenchmarkMF(NNBenchmark): 124 | def get_fidelity_space(self, seed: Union[int, None] = None) -> CS.ConfigurationSpace: 125 | fidelity_space = CS.ConfigurationSpace(seed=seed) 126 | fidelity_space.add_hyperparameters( 127 | # gray-box setting (multi-fidelity) - iterations 128 | NNBenchmarkMF._get_fidelity_choices(iter_choice='variable', subsample_choice='fixed') 129 | ) 130 | return fidelity_space 131 | 132 | 133 | __all__ = ['NNBenchmark', 'NNBenchmarkBB', 'NNBenchmarkMF'] 134 | -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/benchmarks/ml/rf_benchmark.py: -------------------------------------------------------------------------------- 1 | """ 2 | Changelog: 3 | ========== 4 | 5 | 0.0.1: 6 | * First implementation of the RF Benchmarks. 7 | """ 8 | 9 | from copy import deepcopy 10 | from typing import Union, Tuple, Dict 11 | 12 | import ConfigSpace as CS 13 | import numpy as np 14 | from ConfigSpace.hyperparameters import Hyperparameter 15 | from sklearn.ensemble import RandomForestClassifier 16 | 17 | from hpobench.dependencies.ml.ml_benchmark_template import MLBenchmark 18 | 19 | __version__ = '0.0.1' 20 | 21 | 22 | class RandomForestBenchmark(MLBenchmark): 23 | def __init__(self, 24 | task_id: int, 25 | rng: Union[np.random.RandomState, int, None] = None, 26 | valid_size: float = 0.33, 27 | data_path: Union[str, None] = None): 28 | super(RandomForestBenchmark, self).__init__(task_id, rng, valid_size, data_path) 29 | 30 | @staticmethod 31 | def get_configuration_space(seed: Union[int, None] = None) -> CS.ConfigurationSpace: 32 | """Parameter space to be optimized --- contains the hyperparameters 33 | """ 34 | cs = CS.ConfigurationSpace(seed=seed) 35 | cs.add_hyperparameters([ 36 | CS.UniformIntegerHyperparameter( 37 | 'max_depth', lower=1, upper=50, default_value=10, log=True 38 | ), 39 | CS.UniformIntegerHyperparameter( 40 | 'min_samples_split', lower=2, upper=128, default_value=32, log=True 41 | ), 42 | # the use of a float max_features is different than the sklearn usage 43 | CS.UniformFloatHyperparameter( 44 | 'max_features', lower=0, upper=1.0, default_value=0.5, log=False 45 | ), 46 | CS.UniformIntegerHyperparameter( 47 | 'min_samples_leaf', lower=1, upper=20, default_value=1, log=False 48 | ), 49 | ]) 50 | return cs 51 | 52 | @staticmethod 53 | def get_fidelity_space(seed: Union[int, None] = None) -> CS.ConfigurationSpace: 54 | fidelity_space = CS.ConfigurationSpace(seed=seed) 55 | fidelity_space.add_hyperparameters( 56 | # gray-box setting (multi-multi-fidelity) - ntrees + data subsample 57 | RandomForestBenchmark._get_fidelity_choices(n_estimators_choice='variable', subsample_choice='variable') 58 | ) 59 | return fidelity_space 60 | 61 | @staticmethod 62 | def _get_fidelity_choices(n_estimators_choice: str, subsample_choice: str) -> Tuple[Hyperparameter, Hyperparameter]: 63 | 64 | assert n_estimators_choice in ['fixed', 'variable'] 65 | assert subsample_choice in ['fixed', 'variable'] 66 | 67 | fidelity1 = dict( 68 | fixed=CS.Constant('n_estimators', value=512), 69 | variable=CS.UniformIntegerHyperparameter( 70 | 'n_estimators', lower=16, upper=512, default_value=512, log=False 71 | ) 72 | ) 73 | 74 | fidelity2 = dict( 75 | fixed=CS.Constant('subsample', value=1), 76 | variable=CS.UniformFloatHyperparameter( 77 | 'subsample', lower=0.1, upper=1, default_value=1, log=False 78 | ) 79 | ) 80 | n_estimators = fidelity1[n_estimators_choice] 81 | subsample = fidelity2[subsample_choice] 82 | return n_estimators, subsample 83 | 84 | def init_model(self, config: Union[CS.Configuration, Dict], 85 | fidelity: Union[CS.Configuration, Dict, None] = None, 86 | rng: Union[int, np.random.RandomState, None] = None): 87 | """ Function that returns the model initialized based on the configuration and fidelity 88 | """ 89 | rng = self.rng if rng is None else rng 90 | if isinstance(config, CS.Configuration): 91 | config = config.get_dictionary() 92 | if isinstance(fidelity, CS.Configuration): 93 | fidelity = fidelity.get_dictionary() 94 | 95 | config = deepcopy(config) 96 | n_features = self.train_X.shape[1] 97 | config["max_features"] = int(np.rint(np.power(n_features, config["max_features"]))) 98 | model = RandomForestClassifier( 99 | **config, 100 | n_estimators=fidelity['n_estimators'], # a fidelity being used during initialization 101 | bootstrap=True, 102 | random_state=rng 103 | ) 104 | return model 105 | 106 | 107 | class RandomForestBenchmarkBB(RandomForestBenchmark): 108 | def get_fidelity_space(self, seed: Union[int, None] = None) -> CS.ConfigurationSpace: 109 | fidelity_space = CS.ConfigurationSpace(seed=seed) 110 | fidelity_space.add_hyperparameters( 111 | # black-box setting (full fidelity) 112 | RandomForestBenchmark._get_fidelity_choices(n_estimators_choice='fixed', subsample_choice='fixed') 113 | ) 114 | return fidelity_space 115 | 116 | 117 | class RandomForestBenchmarkMF(RandomForestBenchmark): 118 | def get_fidelity_space(self, seed: Union[int, None] = None) -> CS.ConfigurationSpace: 119 | fidelity_space = CS.ConfigurationSpace(seed=seed) 120 | fidelity_space.add_hyperparameters( 121 | # gray-box setting (multi-fidelity) - ntrees 122 | RandomForestBenchmark._get_fidelity_choices(n_estimators_choice='variable', subsample_choice='fixed') 123 | ) 124 | return fidelity_space 125 | 126 | 127 | __all__ = ['RandomForestBenchmark', 'RandomForestBenchmarkBB', 'RandomForestBenchmarkMF'] 128 | -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/benchmarks/ml/svm_benchmark.py: -------------------------------------------------------------------------------- 1 | """ 2 | Changelog: 3 | ========== 4 | 5 | 0.0.1: 6 | * First implementation of the new SVM Benchmarks. 7 | """ 8 | 9 | from typing import Union, Dict 10 | 11 | import ConfigSpace as CS 12 | import numpy as np 13 | from ConfigSpace.hyperparameters import Hyperparameter 14 | from sklearn.svm import SVC 15 | 16 | from hpobench.dependencies.ml.ml_benchmark_template import MLBenchmark 17 | 18 | __version__ = '0.0.1' 19 | 20 | 21 | class SVMBenchmark(MLBenchmark): 22 | def __init__(self, 23 | task_id: int, 24 | rng: Union[np.random.RandomState, int, None] = None, 25 | valid_size: float = 0.33, 26 | data_path: Union[str, None] = None): 27 | super(SVMBenchmark, self).__init__(task_id, rng, valid_size, data_path) 28 | 29 | self.cache_size = 200 30 | 31 | @staticmethod 32 | def get_configuration_space(seed: Union[int, None] = None) -> CS.ConfigurationSpace: 33 | """Parameter space to be optimized --- contains the hyperparameters 34 | """ 35 | cs = CS.ConfigurationSpace(seed=seed) 36 | # https://jmlr.org/papers/volume20/18-444/18-444.pdf (Table 1) 37 | cs.add_hyperparameters([ 38 | CS.UniformFloatHyperparameter( 39 | "C", 2**-10, 2**10, log=True, default_value=1.0 40 | ), 41 | CS.UniformFloatHyperparameter( 42 | "gamma", 2**-10, 2**10, log=True, default_value=0.1 43 | ) 44 | ]) 45 | return cs 46 | 47 | @staticmethod 48 | def get_fidelity_space(seed: Union[int, None] = None) -> CS.ConfigurationSpace: 49 | fidelity_space = CS.ConfigurationSpace(seed=seed) 50 | fidelity_space.add_hyperparameter( 51 | SVMBenchmark._get_fidelity_choices(subsample_choice='variable') 52 | ) 53 | return fidelity_space 54 | 55 | @staticmethod 56 | def _get_fidelity_choices(subsample_choice: str) -> Hyperparameter: 57 | 58 | assert subsample_choice in ['fixed', 'variable'] 59 | 60 | fidelity = dict( 61 | fixed=CS.Constant('subsample', value=1), 62 | variable=CS.UniformFloatHyperparameter( 63 | 'subsample', lower=0.1, upper=1.0, default_value=1.0, log=False 64 | ) 65 | ) 66 | subsample = fidelity[subsample_choice] 67 | 68 | return subsample 69 | 70 | def init_model(self, config: Union[CS.Configuration, Dict], 71 | fidelity: Union[CS.Configuration, Dict, None] = None, 72 | rng: Union[int, np.random.RandomState, None] = None): 73 | # initializing model 74 | rng = self.rng if rng is None else rng 75 | if isinstance(config, CS.Configuration): 76 | config = config.get_dictionary() 77 | model = SVC( 78 | **config, 79 | random_state=rng, 80 | cache_size=self.cache_size 81 | ) 82 | return model 83 | 84 | 85 | class SVMBenchmarkBB(SVMBenchmark): 86 | def get_fidelity_space(self, seed: Union[int, None] = None) -> CS.ConfigurationSpace: 87 | fidelity_space = CS.ConfigurationSpace(seed=seed) 88 | fidelity_space.add_hyperparameter( 89 | # uses the entire data (subsample=1), reflecting the black-box setup 90 | SVMBenchmark._get_fidelity_choices(subsample_choice='fixed') 91 | ) 92 | return fidelity_space 93 | 94 | 95 | # To keep the parity of the the overall design 96 | SVMBenchmarkMF = SVMBenchmark 97 | 98 | __all__ = ['SVMBenchmark', 'SVMBenchmarkMF', 'SVMBenchmarkBB'] 99 | -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/benchmarks/ml/xgboost_benchmark.py: -------------------------------------------------------------------------------- 1 | """ 2 | Changelog: 3 | ========== 4 | 5 | 0.0.1: 6 | * First implementation of the new XGB Benchmarks. 7 | """ 8 | from typing import Union, Tuple, Dict 9 | 10 | import ConfigSpace as CS 11 | import numpy as np 12 | import xgboost as xgb 13 | from ConfigSpace.hyperparameters import Hyperparameter 14 | 15 | from hpobench.dependencies.ml.ml_benchmark_template import MLBenchmark 16 | 17 | __version__ = '0.0.1' 18 | 19 | 20 | class XGBoostBenchmark(MLBenchmark): 21 | def __init__(self, 22 | task_id: int, 23 | rng: Union[np.random.RandomState, int, None] = None, 24 | valid_size: float = 0.33, 25 | data_path: Union[str, None] = None): 26 | super(XGBoostBenchmark, self).__init__(task_id, rng, valid_size, data_path) 27 | 28 | @staticmethod 29 | def get_configuration_space(seed: Union[int, None] = None) -> CS.ConfigurationSpace: 30 | """Parameter space to be optimized --- contains the hyperparameters 31 | """ 32 | cs = CS.ConfigurationSpace(seed=seed) 33 | 34 | cs.add_hyperparameters([ 35 | CS.UniformFloatHyperparameter( 36 | 'eta', lower=2**-10, upper=1., default_value=0.3, log=True 37 | ), # learning rate 38 | CS.UniformIntegerHyperparameter( 39 | 'max_depth', lower=1, upper=50, default_value=10, log=True 40 | ), 41 | CS.UniformFloatHyperparameter( 42 | 'colsample_bytree', lower=0.1, upper=1., default_value=1., log=False 43 | ), 44 | CS.UniformFloatHyperparameter( 45 | 'reg_lambda', lower=2**-10, upper=2**10, default_value=1, log=True 46 | ) 47 | ]) 48 | return cs 49 | 50 | @staticmethod 51 | def get_fidelity_space(seed: Union[int, None] = None) -> CS.ConfigurationSpace: 52 | fidelity_space = CS.ConfigurationSpace(seed=seed) 53 | fidelity_space.add_hyperparameters( 54 | # gray-box setting (multi-multi-fidelity) - ntrees + data subsample 55 | XGBoostBenchmark._get_fidelity_choices(n_estimators_choice='variable', subsample_choice='variable') 56 | ) 57 | return fidelity_space 58 | 59 | @staticmethod 60 | def _get_fidelity_choices(n_estimators_choice: str, subsample_choice: str) -> Tuple[Hyperparameter, Hyperparameter]: 61 | 62 | assert n_estimators_choice in ['fixed', 'variable'] 63 | assert subsample_choice in ['fixed', 'variable'] 64 | 65 | fidelity1 = dict( 66 | fixed=CS.Constant('n_estimators', value=2000), 67 | variable=CS.UniformIntegerHyperparameter( 68 | 'n_estimators', lower=50, upper=2000, default_value=2000, log=False 69 | ) 70 | ) 71 | fidelity2 = dict( 72 | fixed=CS.Constant('subsample', value=1), 73 | variable=CS.UniformFloatHyperparameter( 74 | 'subsample', lower=0.1, upper=1, default_value=1, log=False 75 | ) 76 | ) 77 | 78 | n_estimators = fidelity1[n_estimators_choice] 79 | subsample = fidelity2[subsample_choice] 80 | return n_estimators, subsample 81 | 82 | def init_model(self, 83 | config: Union[CS.Configuration, Dict], 84 | fidelity: Union[CS.Configuration, Dict, None] = None, 85 | rng: Union[int, np.random.RandomState, None] = None): 86 | """ Function that returns the model initialized based on the configuration and fidelity 87 | """ 88 | if isinstance(config, CS.Configuration): 89 | config = config.get_dictionary() 90 | if isinstance(fidelity, CS.Configuration): 91 | fidelity = fidelity.get_dictionary() 92 | 93 | rng = rng if (rng is None or isinstance(rng, int)) else self.seed 94 | extra_args = dict( 95 | booster="gbtree", 96 | n_estimators=fidelity['n_estimators'], 97 | objective="binary:logistic", 98 | random_state=rng, 99 | subsample=1 100 | ) 101 | if self.n_classes > 2: 102 | extra_args["objective"] = "multi:softmax" 103 | extra_args.update({"num_class": self.n_classes}) 104 | 105 | model = xgb.XGBClassifier( 106 | **config, 107 | **extra_args 108 | ) 109 | return model 110 | 111 | 112 | class XGBoostBenchmarkBB(XGBoostBenchmark): 113 | def get_fidelity_space(self, seed: Union[int, None] = None) -> CS.ConfigurationSpace: 114 | fidelity_space = CS.ConfigurationSpace(seed=seed) 115 | fidelity_space.add_hyperparameters( 116 | # black-box setting (full fidelity) 117 | XGBoostBenchmark._get_fidelity_choices(n_estimators_choice='fixed', subsample_choice='fixed') 118 | ) 119 | return fidelity_space 120 | 121 | 122 | class XGBoostBenchmarkMF(XGBoostBenchmark): 123 | def get_fidelity_space(self, seed: Union[int, None] = None) -> CS.ConfigurationSpace: 124 | fidelity_space = CS.ConfigurationSpace(seed=seed) 125 | fidelity_space.add_hyperparameters( 126 | # gray-box setting (multi-fidelity) - ntrees 127 | XGBoostBenchmark._get_fidelity_choices(n_estimators_choice='variable', subsample_choice='fixed') 128 | ) 129 | return fidelity_space 130 | 131 | 132 | __all__ = ['XGBoostBenchmarkBB', 'XGBoostBenchmarkMF', 'XGBoostBenchmark'] 133 | -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/benchmarks/nas/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LamNgo1/cma-meta-algorithm/9aa76dc82f09f4ed5ead6a0cade40645f8850f8e/test_functions/function_realworld_bo/hpobench/benchmarks/nas/__init__.py -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/benchmarks/od/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LamNgo1/cma-meta-algorithm/9aa76dc82f09f4ed5ead6a0cade40645f8850f8e/test_functions/function_realworld_bo/hpobench/benchmarks/od/__init__.py -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/benchmarks/od/od_benchmarks.py: -------------------------------------------------------------------------------- 1 | """ 2 | This is just an entry point for starting the benchmarks. 3 | """ 4 | 5 | 6 | from hpobench.benchmarks.od.od_ae import ODAutoencoder 7 | from hpobench.benchmarks.od.od_kde import ODKernelDensityEstimation 8 | from hpobench.benchmarks.od.od_ocsvm import ODOneClassSupportVectorMachine 9 | 10 | __all__ = [ODAutoencoder, ODKernelDensityEstimation, ODOneClassSupportVectorMachine] 11 | -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/benchmarks/od/od_kde.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from typing import Union 3 | 4 | import ConfigSpace as CS 5 | import ConfigSpace.hyperparameters as CSH 6 | import numpy as np 7 | from sklearn.neighbors import KernelDensity 8 | 9 | from hpobench.dependencies.od.traditional_benchmark import ODTraditional 10 | 11 | __version__ = '0.0.1' 12 | 13 | logger = logging.getLogger('ODKernelDensityEstimation') 14 | 15 | 16 | class ODKernelDensityEstimation(ODTraditional): 17 | """ 18 | Benchmark to train a Kernel Density Estimation (KDE) model for outlier detection. Overall, 19 | this benchmark can be used with one of 15 datasets (using a contamination ratio of 10%) provided by the 20 | ODDS Library (Rayana, 2016). Internally, a 4-fold cross-validation is used to prevent overfitting. 21 | Area under the precission-recall curve (AUPR) is used as metric. 22 | """ 23 | 24 | def __init__(self, 25 | dataset_name: str, 26 | rng: Union[np.random.RandomState, int, None] = None): 27 | """ 28 | Parameters 29 | ---------- 30 | dataset_name : str 31 | Must be one of [ 32 | "annthyroid", "arrhythmia", "breastw", "cardio", "ionosphere", 33 | "mammography", "musk", "optdigits", "pendigits", "pima", 34 | "satellite", "satimage-2", "thyroid", "vowels", "wbc"] 35 | rng : np.random.RandomState, int, None 36 | """ 37 | super(ODKernelDensityEstimation, self).__init__( 38 | dataset_name=dataset_name, 39 | rng=rng 40 | ) 41 | 42 | def get_name(self): 43 | """Returns the name of the model for the meta information.""" 44 | return "Kernel Density Estimation" 45 | 46 | def get_model(self, configuration): 47 | """Returns the unfitted model given a configuration.""" 48 | hp_bandwidth = float(configuration['bandwidth']) 49 | 50 | return KernelDensity(kernel=configuration["kernel"], bandwidth=hp_bandwidth) 51 | 52 | def calculate_scores(self, model, X): 53 | """Calculates the scores based on the model and X.""" 54 | return (-1.) * model.score_samples(X) 55 | 56 | @staticmethod 57 | def get_configuration_space(seed: Union[int, None] = None) -> CS.ConfigurationSpace: 58 | """ 59 | Creates a ConfigSpace.ConfigurationSpace containing all parameters for 60 | the OCSVM Model. 61 | 62 | Parameters 63 | ---------- 64 | seed : int, None 65 | Fixing the seed for the ConfigSpace.ConfigurationSpace 66 | 67 | Returns 68 | ------- 69 | ConfigSpace.ConfigurationSpace 70 | """ 71 | 72 | seed = seed if seed is not None else np.random.randint(1, 100000) 73 | cs = CS.ConfigurationSpace(seed=seed) 74 | 75 | bandwidth = CSH.UniformFloatHyperparameter('bandwidth', lower=pow(2, -5), upper=pow(2, 5), log=True) 76 | cs.add_hyperparameter(bandwidth) 77 | 78 | # Kernel 79 | kernels = ['gaussian', 'tophat', 'epanechnikov', 'exponential', 'linear', 'cosine'] 80 | choice = CSH.CategoricalHyperparameter( 81 | 'kernel', 82 | kernels, 83 | default_value=kernels[0] 84 | ) 85 | cs.add_hyperparameter(choice) 86 | 87 | # Scaler 88 | scalers = ["None", "MinMax", "Standard"] 89 | choice = CSH.CategoricalHyperparameter( 90 | 'scaler', 91 | scalers, 92 | default_value=scalers[0] 93 | ) 94 | cs.add_hyperparameter(choice) 95 | 96 | return cs 97 | -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/benchmarks/od/od_ocsvm.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from typing import Union 3 | 4 | import ConfigSpace as CS 5 | import ConfigSpace.hyperparameters as CSH 6 | import numpy as np 7 | from sklearn.svm import OneClassSVM 8 | 9 | from hpobench.dependencies.od.traditional_benchmark import ODTraditional 10 | 11 | __version__ = '0.0.1' 12 | 13 | logger = logging.getLogger('ODOneClassSupportVectorMachine') 14 | 15 | 16 | class ODOneClassSupportVectorMachine(ODTraditional): 17 | """ 18 | Benchmark to train a One-Class Support Vector Machine (OC-SVM) model for outlier detection. Overall, 19 | this benchmark can be used with one of 15 datasets (using a contamination ratio of 10%) provided by the 20 | ODDS Library (Rayana, 2016). Internally, a 4-fold cross-validation is used to prevent overfitting. 21 | Area under the precission-recall curve (AUPR) is used as metric. 22 | """ 23 | 24 | def __init__(self, 25 | dataset_name: str, 26 | rng: Union[np.random.RandomState, int, None] = None): 27 | """ 28 | Parameters 29 | ---------- 30 | dataset_name : str 31 | Must be one of [ 32 | "annthyroid", "arrhythmia", "breastw", "cardio", "ionosphere", 33 | "mammography", "musk", "optdigits", "pendigits", "pima", 34 | "satellite", "satimage-2", "thyroid", "vowels", "wbc"] 35 | rng : np.random.RandomState, int, None 36 | """ 37 | super(ODOneClassSupportVectorMachine, self).__init__( 38 | dataset_name=dataset_name, 39 | rng=rng 40 | ) 41 | 42 | def get_name(self): 43 | """Returns the name of the model for the meta information.""" 44 | return "One Class Support Vector Machine" 45 | 46 | def get_model(self, configuration): 47 | """Returns the unfitted model given a configuration.""" 48 | hp_gamma = float(configuration['gamma']) 49 | hp_nu = float(configuration['nu']) 50 | 51 | return OneClassSVM(kernel="rbf", gamma=hp_gamma, nu=hp_nu) 52 | 53 | def calculate_scores(self, model, X): 54 | """Calculates the scores based on the model and X.""" 55 | return (-1) * model.decision_function(X) 56 | 57 | @staticmethod 58 | def get_configuration_space(seed: Union[int, None] = None) -> CS.ConfigurationSpace: 59 | """ 60 | Creates a ConfigSpace.ConfigurationSpace containing all parameters for 61 | the OCSVM Model. 62 | 63 | Parameters 64 | ---------- 65 | seed : int, None 66 | Fixing the seed for the ConfigSpace.ConfigurationSpace 67 | 68 | Returns 69 | ------- 70 | ConfigSpace.ConfigurationSpace 71 | """ 72 | 73 | seed = seed if seed is not None else np.random.randint(1, 100000) 74 | cs = CS.ConfigurationSpace(seed=seed) 75 | 76 | cs.add_hyperparameters([ 77 | CS.UniformFloatHyperparameter('gamma', lower=pow(2, -20), upper=pow(2, -2), log=True), 78 | CS.UniformFloatHyperparameter('nu', lower=0.0, upper=1.0, default_value=0.5), 79 | ]) 80 | 81 | # Scaler 82 | scalers = ["None", "MinMax", "Standard"] 83 | choice = CSH.CategoricalHyperparameter( 84 | 'scaler', 85 | scalers, 86 | default_value=scalers[0] 87 | ) 88 | cs.add_hyperparameter(choice) 89 | 90 | return cs 91 | -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/benchmarks/rl/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LamNgo1/cma-meta-algorithm/9aa76dc82f09f4ed5ead6a0cade40645f8850f8e/test_functions/function_realworld_bo/hpobench/benchmarks/rl/__init__.py -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/benchmarks/surrogates/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LamNgo1/cma-meta-algorithm/9aa76dc82f09f4ed5ead6a0cade40645f8850f8e/test_functions/function_realworld_bo/hpobench/benchmarks/surrogates/__init__.py -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/container/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LamNgo1/cma-meta-algorithm/9aa76dc82f09f4ed5ead6a0cade40645f8850f8e/test_functions/function_realworld_bo/hpobench/container/__init__.py -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/container/benchmarks/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LamNgo1/cma-meta-algorithm/9aa76dc82f09f4ed5ead6a0cade40645f8850f8e/test_functions/function_realworld_bo/hpobench/container/benchmarks/__init__.py -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/container/benchmarks/ml/__init__.py: -------------------------------------------------------------------------------- 1 | from hpobench.container.benchmarks.ml.histgb_benchmark import HistGBBenchmark, HistGBBenchmarkBB, HistGBBenchmarkMF 2 | from hpobench.container.benchmarks.ml.lr_benchmark import LRBenchmark, LRBenchmarkBB, LRBenchmarkMF 3 | from hpobench.container.benchmarks.ml.nn_benchmark import NNBenchmark, NNBenchmarkBB, NNBenchmarkMF 4 | from hpobench.container.benchmarks.ml.rf_benchmark import RandomForestBenchmark, RandomForestBenchmarkBB, \ 5 | RandomForestBenchmarkMF 6 | from hpobench.container.benchmarks.ml.svm_benchmark import SVMBenchmark, SVMBenchmarkBB, SVMBenchmarkMF 7 | from hpobench.container.benchmarks.ml.tabular_benchmark import TabularBenchmark 8 | from hpobench.container.benchmarks.ml.xgboost_benchmark import XGBoostBenchmark, XGBoostBenchmarkBB, XGBoostBenchmarkMF 9 | 10 | 11 | __all__ = ['HistGBBenchmark', 'HistGBBenchmarkBB', 'HistGBBenchmarkMF', 12 | 'LRBenchmark', 'LRBenchmarkBB', 'LRBenchmarkMF', 13 | 'NNBenchmark', 'NNBenchmarkBB', 'NNBenchmarkMF', 14 | 'RandomForestBenchmark', 'RandomForestBenchmarkBB', 'RandomForestBenchmarkMF', 15 | 'SVMBenchmark', 'SVMBenchmarkBB', 'SVMBenchmarkMF', 16 | 'TabularBenchmark', 17 | 'XGBoostBenchmark', 'XGBoostBenchmarkBB', 'XGBoostBenchmarkMF'] 18 | -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/container/benchmarks/ml/histgb_benchmark.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | # -*- coding: utf-8 -*- 3 | 4 | """ Benchmark for the HistGB Benchmarks from hpobench/benchmarks/ml_mmfb/histgb_benchmark.py """ 5 | 6 | from hpobench.container.client_abstract_benchmark import AbstractBenchmarkClient 7 | 8 | 9 | class HistGBBenchmark(AbstractBenchmarkClient): 10 | def __init__(self, **kwargs): 11 | kwargs['benchmark_name'] = kwargs.get('benchmark_name', 'HistGBBenchmark') 12 | kwargs['container_name'] = kwargs.get('container_name', 'ml_mmfb') 13 | kwargs['latest'] = kwargs.get('container_tag', '0.0.1') 14 | super(HistGBBenchmark, self).__init__(**kwargs) 15 | 16 | 17 | class HistGBBenchmarkBB(AbstractBenchmarkClient): 18 | def __init__(self, **kwargs): 19 | kwargs['benchmark_name'] = kwargs.get('benchmark_name', 'HistGBBenchmarkBB') 20 | kwargs['container_name'] = kwargs.get('container_name', 'ml_mmfb') 21 | kwargs['latest'] = kwargs.get('container_tag', '0.0.1') 22 | super(HistGBBenchmarkBB, self).__init__(**kwargs) 23 | 24 | 25 | class HistGBBenchmarkMF(AbstractBenchmarkClient): 26 | def __init__(self, **kwargs): 27 | kwargs['benchmark_name'] = kwargs.get('benchmark_name', 'HistGBBenchmarkMF') 28 | kwargs['container_name'] = kwargs.get('container_name', 'ml_mmfb') 29 | kwargs['latest'] = kwargs.get('container_tag', '0.0.1') 30 | super(HistGBBenchmarkMF, self).__init__(**kwargs) 31 | 32 | 33 | __all__ = ['HistGBBenchmark', 'HistGBBenchmarkBB', 'HistGBBenchmarkMF'] 34 | -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/container/benchmarks/ml/lr_benchmark.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | # -*- coding: utf-8 -*- 3 | 4 | """ Benchmark for the learning rate Benchmarks from hpobench/benchmarks/ml_mmfb/lr_benchmarks.py """ 5 | 6 | from hpobench.container.client_abstract_benchmark import AbstractBenchmarkClient 7 | 8 | 9 | class LRBenchmark(AbstractBenchmarkClient): 10 | def __init__(self, **kwargs): 11 | kwargs['benchmark_name'] = kwargs.get('benchmark_name', 'LRBenchmark') 12 | kwargs['container_name'] = kwargs.get('container_name', 'ml_mmfb') 13 | kwargs['latest'] = kwargs.get('container_tag', '0.0.1') 14 | super(LRBenchmark, self).__init__(**kwargs) 15 | 16 | 17 | class LRBenchmarkBB(AbstractBenchmarkClient): 18 | def __init__(self, **kwargs): 19 | kwargs['benchmark_name'] = kwargs.get('benchmark_name', 'LRBenchmarkBB') 20 | kwargs['container_name'] = kwargs.get('container_name', 'ml_mmfb') 21 | kwargs['latest'] = kwargs.get('container_tag', '0.0.1') 22 | super(LRBenchmarkBB, self).__init__(**kwargs) 23 | 24 | 25 | class LRBenchmarkMF(AbstractBenchmarkClient): 26 | def __init__(self, **kwargs): 27 | kwargs['benchmark_name'] = kwargs.get('benchmark_name', 'LRBenchmarkMF') 28 | kwargs['container_name'] = kwargs.get('container_name', 'ml_mmfb') 29 | kwargs['latest'] = kwargs.get('container_tag', '0.0.1') 30 | super(LRBenchmarkMF, self).__init__(**kwargs) 31 | 32 | 33 | __all__ = ['LRBenchmark', 'LRBenchmarkBB', 'LRBenchmarkMF'] 34 | -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/container/benchmarks/ml/nn_benchmark.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | # -*- coding: utf-8 -*- 3 | 4 | """ Benchmark for the Neural Network Benchmarks from hpobench/benchmarks/ml_mmfb/nn_benchmark.py """ 5 | 6 | from hpobench.container.client_abstract_benchmark import AbstractBenchmarkClient 7 | 8 | 9 | class NNBenchmark(AbstractBenchmarkClient): 10 | def __init__(self, **kwargs): 11 | kwargs['benchmark_name'] = kwargs.get('benchmark_name', 'NNBenchmark') 12 | kwargs['container_name'] = kwargs.get('container_name', 'ml_mmfb') 13 | kwargs['latest'] = kwargs.get('container_tag', '0.0.1') 14 | super(NNBenchmark, self).__init__(**kwargs) 15 | 16 | 17 | class NNBenchmarkBB(AbstractBenchmarkClient): 18 | def __init__(self, **kwargs): 19 | kwargs['benchmark_name'] = kwargs.get('benchmark_name', 'NNBenchmarkBB') 20 | kwargs['container_name'] = kwargs.get('container_name', 'ml_mmfb') 21 | kwargs['latest'] = kwargs.get('container_tag', '0.0.1') 22 | super(NNBenchmarkBB, self).__init__(**kwargs) 23 | 24 | 25 | class NNBenchmarkMF(AbstractBenchmarkClient): 26 | def __init__(self, **kwargs): 27 | kwargs['benchmark_name'] = kwargs.get('benchmark_name', 'NNBenchmarkMF') 28 | kwargs['container_name'] = kwargs.get('container_name', 'ml_mmfb') 29 | kwargs['latest'] = kwargs.get('container_tag', '0.0.1') 30 | super(NNBenchmarkMF, self).__init__(**kwargs) 31 | 32 | 33 | __all__ = ['NNBenchmark', 'NNBenchmarkBB', 'NNBenchmarkMF'] 34 | -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/container/benchmarks/ml/pybnn.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | # -*- coding: utf-8 -*- 3 | 4 | """ Benchmark for the pybnn Benchmark from hpobench/benchmarks/ml/pybnn.py """ 5 | 6 | from hpobench.container.client_abstract_benchmark import AbstractBenchmarkClient 7 | 8 | 9 | class BNNOnToyFunction(AbstractBenchmarkClient): 10 | def __init__(self, **kwargs): 11 | kwargs['benchmark_name'] = kwargs.get('benchmark_name', 'BNNOnToyFunction') 12 | kwargs['container_name'] = kwargs.get('container_name', 'pybnn') 13 | kwargs['latest'] = kwargs.get('container_tag', '0.0.4') 14 | super(BNNOnToyFunction, self).__init__(**kwargs) 15 | 16 | 17 | class BNNOnBostonHousing(AbstractBenchmarkClient): 18 | def __init__(self, **kwargs): 19 | kwargs['benchmark_name'] = kwargs.get('benchmark_name', 'BNNOnBostonHousing') 20 | kwargs['container_name'] = kwargs.get('container_name', 'pybnn') 21 | kwargs['latest'] = kwargs.get('container_tag', '0.0.4') 22 | super(BNNOnBostonHousing, self).__init__(**kwargs) 23 | 24 | 25 | class BNNOnProteinStructure(AbstractBenchmarkClient): 26 | def __init__(self, **kwargs): 27 | kwargs['benchmark_name'] = kwargs.get('benchmark_name', 'BNNOnProteinStructure') 28 | kwargs['container_name'] = kwargs.get('container_name', 'pybnn') 29 | kwargs['latest'] = kwargs.get('container_tag', '0.0.4') 30 | super(BNNOnProteinStructure, self).__init__(**kwargs) 31 | 32 | 33 | class BNNOnYearPrediction(AbstractBenchmarkClient): 34 | def __init__(self, **kwargs): 35 | kwargs['benchmark_name'] = kwargs.get('benchmark_name', 'BNNOnYearPrediction') 36 | kwargs['container_name'] = kwargs.get('container_name', 'pybnn') 37 | kwargs['latest'] = kwargs.get('container_tag', '0.0.4') 38 | super(BNNOnYearPrediction, self).__init__(**kwargs) 39 | -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/container/benchmarks/ml/rf_benchmark.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | # -*- coding: utf-8 -*- 3 | 4 | """ Benchmark for the Random Forest Benchmarks from hpobench/benchmarks/ml_mmfb/rf_benchmark.py """ 5 | 6 | from hpobench.container.client_abstract_benchmark import AbstractBenchmarkClient 7 | 8 | 9 | class RandomForestBenchmark(AbstractBenchmarkClient): 10 | def __init__(self, **kwargs): 11 | kwargs['benchmark_name'] = kwargs.get('benchmark_name', 'RandomForestBenchmark') 12 | kwargs['container_name'] = kwargs.get('container_name', 'ml_mmfb') 13 | kwargs['latest'] = kwargs.get('container_tag', '0.0.1') 14 | super(RandomForestBenchmark, self).__init__(**kwargs) 15 | 16 | 17 | class RandomForestBenchmarkBB(AbstractBenchmarkClient): 18 | def __init__(self, **kwargs): 19 | kwargs['benchmark_name'] = kwargs.get('benchmark_name', 'RandomForestBenchmarkBB') 20 | kwargs['container_name'] = kwargs.get('container_name', 'ml_mmfb') 21 | kwargs['latest'] = kwargs.get('container_tag', '0.0.1') 22 | super(RandomForestBenchmarkBB, self).__init__(**kwargs) 23 | 24 | 25 | class RandomForestBenchmarkMF(AbstractBenchmarkClient): 26 | def __init__(self, **kwargs): 27 | kwargs['benchmark_name'] = kwargs.get('benchmark_name', 'RandomForestBenchmarkMF') 28 | kwargs['container_name'] = kwargs.get('container_name', 'ml_mmfb') 29 | kwargs['latest'] = kwargs.get('container_tag', '0.0.1') 30 | super(RandomForestBenchmarkMF, self).__init__(**kwargs) 31 | 32 | 33 | __all__ = ['RandomForestBenchmark', 'RandomForestBenchmarkBB', 'RandomForestBenchmarkMF'] 34 | -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/container/benchmarks/ml/svm_benchmark.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | # -*- coding: utf-8 -*- 3 | 4 | """ Benchmark for the SVM Benchmarks from hpobench/benchmarks/ml_mmfb/svm_benchmark.py """ 5 | 6 | from hpobench.container.client_abstract_benchmark import AbstractBenchmarkClient 7 | 8 | 9 | class SVMBenchmark(AbstractBenchmarkClient): 10 | def __init__(self, **kwargs): 11 | kwargs['benchmark_name'] = kwargs.get('benchmark_name', 'SVMBenchmark') 12 | kwargs['container_name'] = kwargs.get('container_name', 'ml_mmfb') 13 | kwargs['latest'] = kwargs.get('container_tag', '0.0.1') 14 | super(SVMBenchmark, self).__init__(**kwargs) 15 | 16 | 17 | class SVMBenchmarkMF(AbstractBenchmarkClient): 18 | def __init__(self, **kwargs): 19 | kwargs['benchmark_name'] = kwargs.get('benchmark_name', 'SVMBenchmarkMF') 20 | kwargs['container_name'] = kwargs.get('container_name', 'ml_mmfb') 21 | kwargs['latest'] = kwargs.get('container_tag', '0.0.1') 22 | super(SVMBenchmarkMF, self).__init__(**kwargs) 23 | 24 | 25 | class SVMBenchmarkBB(AbstractBenchmarkClient): 26 | def __init__(self, **kwargs): 27 | kwargs['benchmark_name'] = kwargs.get('benchmark_name', 'SVMBenchmarkBB') 28 | kwargs['container_name'] = kwargs.get('container_name', 'ml_mmfb') 29 | kwargs['latest'] = kwargs.get('container_tag', '0.0.1') 30 | super(SVMBenchmarkBB, self).__init__(**kwargs) 31 | 32 | 33 | __all__ = ['SVMBenchmark', 'SVMBenchmarkMF', 'SVMBenchmarkBB'] 34 | -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/container/benchmarks/ml/svm_benchmark_old.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | # -*- coding: utf-8 -*- 3 | 4 | """ Benchmark for the XGBoost Benchmark from hpobench/benchmarks/ml/xgboost_benchmark """ 5 | 6 | from hpobench.container.client_abstract_benchmark import AbstractBenchmarkClient 7 | 8 | 9 | class SupportVectorMachine(AbstractBenchmarkClient): 10 | def __init__(self, task_id: int, **kwargs): 11 | kwargs['task_id'] = task_id 12 | kwargs['benchmark_name'] = kwargs.get('benchmark_name', 'SupportVectorMachine') 13 | kwargs['container_name'] = kwargs.get('container_name', 'svm_benchmark') 14 | kwargs['latest'] = kwargs.get('container_tag', '0.0.3') 15 | super(SupportVectorMachine, self).__init__(**kwargs) 16 | -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/container/benchmarks/ml/tabular_benchmark.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | # -*- coding: utf-8 -*- 3 | 4 | """ Benchmark for the Tabular Benchmarks from hpobench/benchmarks/ml_mmfb/tabular_benchmark.py """ 5 | 6 | from hpobench.container.client_abstract_benchmark import AbstractBenchmarkClient 7 | 8 | 9 | class TabularBenchmark(AbstractBenchmarkClient): 10 | def __init__(self, **kwargs): 11 | kwargs['benchmark_name'] = kwargs.get('benchmark_name', 'TabularBenchmark') 12 | kwargs['container_name'] = kwargs.get('container_name', 'ml_tabular_benchmarks') 13 | kwargs['latest'] = kwargs.get('container_tag', '0.0.1') 14 | super(TabularBenchmark, self).__init__(**kwargs) 15 | 16 | 17 | __all__ = ['TabularBenchmark'] 18 | -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/container/benchmarks/ml/xgboost_benchmark.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | # -*- coding: utf-8 -*- 3 | 4 | """ Benchmark for the XGB Benchmarks from hpobench/benchmarks/ml_mmfb/xgboost_benchmark.py """ 5 | 6 | from hpobench.container.client_abstract_benchmark import AbstractBenchmarkClient 7 | 8 | 9 | class XGBoostBenchmark(AbstractBenchmarkClient): 10 | def __init__(self, **kwargs): 11 | kwargs['benchmark_name'] = kwargs.get('benchmark_name', 'XGBoostBenchmark') 12 | kwargs['container_name'] = kwargs.get('container_name', 'ml_mmfb') 13 | kwargs['latest'] = kwargs.get('container_tag', '0.0.1') 14 | super(XGBoostBenchmark, self).__init__(**kwargs) 15 | 16 | 17 | class XGBoostBenchmarkBB(AbstractBenchmarkClient): 18 | def __init__(self, **kwargs): 19 | kwargs['benchmark_name'] = kwargs.get('benchmark_name', 'XGBoostBenchmarkBB') 20 | kwargs['container_name'] = kwargs.get('container_name', 'ml_mmfb') 21 | kwargs['latest'] = kwargs.get('container_tag', '0.0.1') 22 | super(XGBoostBenchmarkBB, self).__init__(**kwargs) 23 | 24 | 25 | class XGBoostBenchmarkMF(AbstractBenchmarkClient): 26 | def __init__(self, **kwargs): 27 | kwargs['benchmark_name'] = kwargs.get('benchmark_name', 'XGBoostBenchmarkMF') 28 | kwargs['container_name'] = kwargs.get('container_name', 'ml_mmfb') 29 | kwargs['latest'] = kwargs.get('container_tag', '0.0.1') 30 | super(XGBoostBenchmarkMF, self).__init__(**kwargs) 31 | 32 | 33 | class XGBoostSearchSpace3Benchmark(AbstractBenchmarkClient): 34 | def __init__(self, **kwargs): 35 | kwargs['benchmark_name'] = kwargs.get('benchmark_name', 'XGBoostSearchSpace3Benchmark') 36 | kwargs['container_name'] = kwargs.get('container_name', 'ml_mmfb') 37 | kwargs['latest'] = kwargs.get('container_tag', '0.0.1') 38 | super(XGBoostSearchSpace3Benchmark, self).__init__(**kwargs) 39 | 40 | 41 | __all__ = ['XGBoostBenchmark', 'XGBoostBenchmarkBB', 'XGBoostBenchmarkMF'] 42 | -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/container/benchmarks/ml/xgboost_benchmark_old.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | # -*- coding: utf-8 -*- 3 | 4 | """ Benchmark for the XGBoost Benchmark from hpobench/benchmarks/ml/xgboost_benchmark """ 5 | 6 | from hpobench.container.client_abstract_benchmark import AbstractBenchmarkClient 7 | 8 | 9 | class XGBoostBenchmark(AbstractBenchmarkClient): 10 | def __init__(self, task_id: int, **kwargs): 11 | kwargs['task_id'] = task_id 12 | kwargs['benchmark_name'] = kwargs.get('benchmark_name', 'XGBoostBenchmark') 13 | kwargs['container_name'] = kwargs.get('container_name', 'xgboost_benchmark') 14 | kwargs['latest'] = kwargs.get('container_tag', '0.0.3') 15 | super(XGBoostBenchmark, self).__init__(**kwargs) 16 | 17 | 18 | class XGBoostExtendedBenchmark(AbstractBenchmarkClient): 19 | def __init__(self, task_id: int, **kwargs): 20 | kwargs['task_id'] = task_id 21 | kwargs['benchmark_name'] = kwargs.get('benchmark_name', 'XGBoostExtendedBenchmark') 22 | kwargs['container_name'] = kwargs.get('container_name', 'xgboost_benchmark') 23 | kwargs['latest'] = kwargs.get('container_tag', '0.0.3') 24 | super(XGBoostExtendedBenchmark, self).__init__(**kwargs) 25 | -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/container/benchmarks/nas/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LamNgo1/cma-meta-algorithm/9aa76dc82f09f4ed5ead6a0cade40645f8850f8e/test_functions/function_realworld_bo/hpobench/container/benchmarks/nas/__init__.py -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/container/benchmarks/nas/nasbench_101.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | # -*- coding: utf-8 -*- 3 | 4 | """ Benchmark for the Tabular Benchmark from hpobench/benchmarks/nas/nasbench_101.py """ 5 | 6 | from hpobench.container.client_abstract_benchmark import AbstractBenchmarkClient 7 | 8 | 9 | class NASCifar10ABenchmark(AbstractBenchmarkClient): 10 | def __init__(self, **kwargs): 11 | kwargs['benchmark_name'] = kwargs.get('benchmark_name', 'NASCifar10ABenchmark') 12 | kwargs['container_name'] = kwargs.get('container_name', 'nasbench_101') 13 | kwargs['latest'] = kwargs.get('container_tag', '0.0.4') 14 | super(NASCifar10ABenchmark, self).__init__(**kwargs) 15 | 16 | 17 | class NASCifar10BBenchmark(AbstractBenchmarkClient): 18 | def __init__(self, **kwargs): 19 | kwargs['benchmark_name'] = kwargs.get('benchmark_name', 'NASCifar10BBenchmark') 20 | kwargs['container_name'] = kwargs.get('container_name', 'nasbench_101') 21 | kwargs['latest'] = kwargs.get('container_tag', '0.0.4') 22 | super(NASCifar10BBenchmark, self).__init__(**kwargs) 23 | 24 | 25 | class NASCifar10CBenchmark(AbstractBenchmarkClient): 26 | def __init__(self, **kwargs): 27 | kwargs['benchmark_name'] = kwargs.get('benchmark_name', 'NASCifar10CBenchmark') 28 | kwargs['container_name'] = kwargs.get('container_name', 'nasbench_101') 29 | kwargs['latest'] = kwargs.get('container_tag', '0.0.4') 30 | super(NASCifar10CBenchmark, self).__init__(**kwargs) 31 | -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/container/benchmarks/nas/nasbench_1shot1.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | # -*- coding: utf-8 -*- 3 | 4 | """ Benchmark for the nasbench 1shot1 benchmarks from hpobench/benchmarks/nas/nasbench_1shot1.py """ 5 | 6 | from hpobench.container.client_abstract_benchmark import AbstractBenchmarkClient 7 | 8 | 9 | class NASBench1shot1SearchSpace1Benchmark(AbstractBenchmarkClient): 10 | def __init__(self, **kwargs): 11 | kwargs['benchmark_name'] = kwargs.get('benchmark_name', 'NASBench1shot1SearchSpace1Benchmark') 12 | kwargs['container_name'] = kwargs.get('container_name', 'nasbench_1shot1') 13 | kwargs['latest'] = kwargs.get('container_tag', '0.0.4') 14 | super(NASBench1shot1SearchSpace1Benchmark, self).__init__(**kwargs) 15 | 16 | 17 | class NASBench1shot1SearchSpace2Benchmark(AbstractBenchmarkClient): 18 | def __init__(self, **kwargs): 19 | kwargs['benchmark_name'] = kwargs.get('benchmark_name', 'NASBench1shot1SearchSpace2Benchmark') 20 | kwargs['container_name'] = kwargs.get('container_name', 'nasbench_1shot1') 21 | kwargs['latest'] = kwargs.get('container_tag', '0.0.4') 22 | super(NASBench1shot1SearchSpace2Benchmark, self).__init__(**kwargs) 23 | 24 | 25 | class NASBench1shot1SearchSpace3Benchmark(AbstractBenchmarkClient): 26 | def __init__(self, **kwargs): 27 | kwargs['benchmark_name'] = kwargs.get('benchmark_name', 'NASBench1shot1SearchSpace3Benchmark') 28 | kwargs['container_name'] = kwargs.get('container_name', 'nasbench_1shot1') 29 | kwargs['latest'] = kwargs.get('container_tag', '0.0.4') 30 | super(NASBench1shot1SearchSpace3Benchmark, self).__init__(**kwargs) 31 | -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/container/benchmarks/nas/nasbench_201.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | # -*- coding: utf-8 -*- 3 | 4 | """ Benchmark for the NasBench201 Benchmark from hpobench/benchmarks/nas/nasbench_201.py """ 5 | 6 | from hpobench.container.client_abstract_benchmark import AbstractBenchmarkClient 7 | 8 | 9 | class Cifar10ValidNasBench201Benchmark(AbstractBenchmarkClient): 10 | def __init__(self, **kwargs): 11 | kwargs['benchmark_name'] = kwargs.get('benchmark_name', 'Cifar10ValidNasBench201Benchmark') 12 | kwargs['container_name'] = kwargs.get('container_name', 'nasbench_201') 13 | kwargs['latest'] = kwargs.get('container_tag', '0.0.5') 14 | super(Cifar10ValidNasBench201Benchmark, self).__init__(**kwargs) 15 | 16 | 17 | class Cifar100NasBench201Benchmark(AbstractBenchmarkClient): 18 | def __init__(self, **kwargs): 19 | kwargs['benchmark_name'] = kwargs.get('benchmark_name', 'Cifar100NasBench201Benchmark') 20 | kwargs['container_name'] = kwargs.get('container_name', 'nasbench_201') 21 | kwargs['latest'] = kwargs.get('container_tag', '0.0.5') 22 | super(Cifar100NasBench201Benchmark, self).__init__(**kwargs) 23 | 24 | 25 | class ImageNetNasBench201Benchmark(AbstractBenchmarkClient): 26 | def __init__(self, **kwargs): 27 | kwargs['benchmark_name'] = kwargs.get('benchmark_name', 'ImageNetNasBench201Benchmark') 28 | kwargs['container_name'] = kwargs.get('container_name', 'nasbench_201') 29 | kwargs['latest'] = kwargs.get('container_tag', '0.0.5') 30 | super(ImageNetNasBench201Benchmark, self).__init__(**kwargs) 31 | 32 | 33 | class Cifar10ValidNasBench201BenchmarkOriginal(AbstractBenchmarkClient): 34 | def __init__(self, **kwargs): 35 | kwargs['benchmark_name'] = kwargs.get('benchmark_name', 'Cifar10ValidNasBench201BenchmarkOriginal') 36 | kwargs['container_name'] = kwargs.get('container_name', 'nasbench_201') 37 | kwargs['latest'] = kwargs.get('container_tag', '0.0.5') 38 | super(Cifar10ValidNasBench201BenchmarkOriginal, self).__init__(**kwargs) 39 | 40 | 41 | class Cifar100NasBench201BenchmarkOriginal(AbstractBenchmarkClient): 42 | def __init__(self, **kwargs): 43 | kwargs['benchmark_name'] = kwargs.get('benchmark_name', 'Cifar100NasBench201BenchmarkOriginal') 44 | kwargs['container_name'] = kwargs.get('container_name', 'nasbench_201') 45 | kwargs['latest'] = kwargs.get('container_tag', '0.0.5') 46 | super(Cifar100NasBench201BenchmarkOriginal, self).__init__(**kwargs) 47 | 48 | 49 | class ImageNetNasBench201BenchmarkOriginal(AbstractBenchmarkClient): 50 | def __init__(self, **kwargs): 51 | kwargs['benchmark_name'] = kwargs.get('benchmark_name', 'ImageNetNasBench201BenchmarkOriginal') 52 | kwargs['container_name'] = kwargs.get('container_name', 'nasbench_201') 53 | kwargs['latest'] = kwargs.get('container_tag', '0.0.5') 54 | super(ImageNetNasBench201BenchmarkOriginal, self).__init__(**kwargs) 55 | 56 | 57 | __all__ = ["Cifar10ValidNasBench201Benchmark", 58 | "Cifar100NasBench201Benchmark", 59 | "ImageNetNasBench201Benchmark", 60 | "Cifar10ValidNasBench201BenchmarkOriginal", 61 | "Cifar100NasBench201BenchmarkOriginal", 62 | "ImageNetNasBench201BenchmarkOriginal"] 63 | -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/container/benchmarks/nas/tabular_benchmarks.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | # -*- coding: utf-8 -*- 3 | 4 | """ Benchmark for the Tabular Benchmark from hpobench/benchmarks/nas/tabular_benchmarks.py """ 5 | 6 | from hpobench.container.client_abstract_benchmark import AbstractBenchmarkClient 7 | 8 | 9 | class SliceLocalizationBenchmark(AbstractBenchmarkClient): 10 | def __init__(self, **kwargs): 11 | kwargs['data_path'] = '/home/fcnet_tabular_benchmarks' 12 | kwargs['benchmark_name'] = kwargs.get('benchmark_name', 'SliceLocalizationBenchmark') 13 | kwargs['container_name'] = kwargs.get('container_name', 'tabular_benchmarks') 14 | kwargs['latest'] = kwargs.get('container_tag', '0.0.5') 15 | super(SliceLocalizationBenchmark, self).__init__(**kwargs) 16 | 17 | 18 | class ProteinStructureBenchmark(AbstractBenchmarkClient): 19 | def __init__(self, **kwargs): 20 | kwargs['data_path'] = '/home/fcnet_tabular_benchmarks' 21 | kwargs['benchmark_name'] = kwargs.get('benchmark_name', 'ProteinStructureBenchmark') 22 | kwargs['container_name'] = kwargs.get('container_name', 'tabular_benchmarks') 23 | kwargs['latest'] = kwargs.get('container_tag', '0.0.5') 24 | super(ProteinStructureBenchmark, self).__init__(**kwargs) 25 | 26 | 27 | class NavalPropulsionBenchmark(AbstractBenchmarkClient): 28 | def __init__(self, **kwargs): 29 | kwargs['data_path'] = '/home/fcnet_tabular_benchmarks' 30 | kwargs['benchmark_name'] = kwargs.get('benchmark_name', 'NavalPropulsionBenchmark') 31 | kwargs['container_name'] = kwargs.get('container_name', 'tabular_benchmarks') 32 | kwargs['latest'] = kwargs.get('container_tag', '0.0.5') 33 | super(NavalPropulsionBenchmark, self).__init__(**kwargs) 34 | 35 | 36 | class ParkinsonsTelemonitoringBenchmark(AbstractBenchmarkClient): 37 | def __init__(self, **kwargs): 38 | kwargs['data_path'] = '/home/fcnet_tabular_benchmarks' 39 | kwargs['benchmark_name'] = kwargs.get('benchmark_name', 'ParkinsonsTelemonitoringBenchmark') 40 | kwargs['container_name'] = kwargs.get('container_name', 'tabular_benchmarks') 41 | kwargs['latest'] = kwargs.get('container_tag', '0.0.5') 42 | super(ParkinsonsTelemonitoringBenchmark, self).__init__(**kwargs) 43 | 44 | 45 | class SliceLocalizationBenchmarkOriginal(AbstractBenchmarkClient): 46 | def __init__(self, **kwargs): 47 | kwargs['data_path'] = '/home/fcnet_tabular_benchmarks' 48 | kwargs['benchmark_name'] = kwargs.get('benchmark_name', 'SliceLocalizationBenchmarkOriginal') 49 | kwargs['container_name'] = kwargs.get('container_name', 'tabular_benchmarks') 50 | kwargs['latest'] = kwargs.get('container_tag', '0.0.5') 51 | super(SliceLocalizationBenchmarkOriginal, self).__init__(**kwargs) 52 | 53 | 54 | class ProteinStructureBenchmarkOriginal(AbstractBenchmarkClient): 55 | def __init__(self, **kwargs): 56 | kwargs['data_path'] = '/home/fcnet_tabular_benchmarks' 57 | kwargs['benchmark_name'] = kwargs.get('benchmark_name', 'ProteinStructureBenchmarkOriginal') 58 | kwargs['container_name'] = kwargs.get('container_name', 'tabular_benchmarks') 59 | kwargs['latest'] = kwargs.get('container_tag', '0.0.5') 60 | super(ProteinStructureBenchmarkOriginal, self).__init__(**kwargs) 61 | 62 | 63 | class NavalPropulsionBenchmarkOriginal(AbstractBenchmarkClient): 64 | def __init__(self, **kwargs): 65 | kwargs['data_path'] = '/home/fcnet_tabular_benchmarks' 66 | kwargs['benchmark_name'] = kwargs.get('benchmark_name', 'NavalPropulsionBenchmarkOriginal') 67 | kwargs['container_name'] = kwargs.get('container_name', 'tabular_benchmarks') 68 | kwargs['latest'] = kwargs.get('container_tag', '0.0.5') 69 | super(NavalPropulsionBenchmarkOriginal, self).__init__(**kwargs) 70 | 71 | 72 | class ParkinsonsTelemonitoringBenchmarkOriginal(AbstractBenchmarkClient): 73 | def __init__(self, **kwargs): 74 | kwargs['data_path'] = '/home/fcnet_tabular_benchmarks' 75 | kwargs['benchmark_name'] = kwargs.get('benchmark_name', 'ParkinsonsTelemonitoringBenchmarkOriginal') 76 | kwargs['container_name'] = kwargs.get('container_name', 'tabular_benchmarks') 77 | kwargs['latest'] = kwargs.get('container_tag', '0.0.5') 78 | super(ParkinsonsTelemonitoringBenchmarkOriginal, self).__init__(**kwargs) 79 | 80 | 81 | __all__ = ["SliceLocalizationBenchmark", "SliceLocalizationBenchmarkOriginal", 82 | "ProteinStructureBenchmark", "ProteinStructureBenchmarkOriginal", 83 | "NavalPropulsionBenchmark", "NavalPropulsionBenchmarkOriginal", 84 | "ParkinsonsTelemonitoringBenchmark", "ParkinsonsTelemonitoringBenchmarkOriginal"] 85 | -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/container/benchmarks/od/od_benchmarks.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | # -*- coding: utf-8 -*- 3 | 4 | """ Benchmark for OCSVM and outlier detection """ 5 | 6 | from hpobench.container.client_abstract_benchmark import AbstractBenchmarkClient 7 | 8 | 9 | class ODAutoencoder(AbstractBenchmarkClient): 10 | def __init__(self, dataset_name: str, **kwargs): 11 | kwargs['dataset_name'] = dataset_name 12 | kwargs['benchmark_name'] = kwargs.get('benchmark_name', 'ODAutoencoder') 13 | kwargs['container_name'] = kwargs.get('container_name', 'outlier_detection') 14 | kwargs['latest'] = kwargs.get('container_tag', '0.0.1') 15 | super(ODAutoencoder, self).__init__(**kwargs) 16 | 17 | 18 | class ODKernelDensityEstimation(AbstractBenchmarkClient): 19 | def __init__(self, dataset_name: str, **kwargs): 20 | kwargs['dataset_name'] = dataset_name 21 | kwargs['benchmark_name'] = kwargs.get('benchmark_name', 'ODKernelDensityEstimation') 22 | kwargs['container_name'] = kwargs.get('container_name', 'outlier_detection') 23 | kwargs['latest'] = kwargs.get('container_tag', '0.0.1') 24 | super(ODKernelDensityEstimation, self).__init__(**kwargs) 25 | 26 | 27 | class ODOneClassSupportVectorMachine(AbstractBenchmarkClient): 28 | def __init__(self, dataset_name: str, **kwargs): 29 | kwargs['dataset_name'] = dataset_name 30 | kwargs['benchmark_name'] = kwargs.get('benchmark_name', 'ODOneClassSupportVectorMachine') 31 | kwargs['container_name'] = kwargs.get('container_name', 'outlier_detection') 32 | kwargs['latest'] = kwargs.get('container_tag', '0.0.1') 33 | super(ODOneClassSupportVectorMachine, self).__init__(**kwargs) 34 | -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/container/benchmarks/rl/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LamNgo1/cma-meta-algorithm/9aa76dc82f09f4ed5ead6a0cade40645f8850f8e/test_functions/function_realworld_bo/hpobench/container/benchmarks/rl/__init__.py -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/container/benchmarks/rl/cartpole.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | # -*- coding: utf-8 -*- 3 | 4 | """ Benchmark for the Cartpole Benchmark from hpobench/benchmarks/rl/cartpole.py """ 5 | 6 | from hpobench.container.client_abstract_benchmark import AbstractBenchmarkClient 7 | 8 | 9 | class CartpoleReduced(AbstractBenchmarkClient): 10 | def __init__(self, **kwargs): 11 | kwargs['benchmark_name'] = kwargs.get('benchmark_name', 'CartpoleReduced') 12 | kwargs['container_name'] = kwargs.get('container_name', 'cartpole') 13 | kwargs['latest'] = kwargs.get('container_tag', '0.0.4') 14 | super(CartpoleReduced, self).__init__(**kwargs) 15 | 16 | 17 | class CartpoleFull(AbstractBenchmarkClient): 18 | def __init__(self, **kwargs): 19 | kwargs['benchmark_name'] = kwargs.get('benchmark_name', 'CartpoleFull') 20 | kwargs['container_name'] = kwargs.get('container_name', 'cartpole') 21 | kwargs['latest'] = kwargs.get('container_tag', '0.0.4') 22 | super(CartpoleFull, self).__init__(**kwargs) 23 | -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/container/benchmarks/rl/learna_benchmark.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | # -*- coding: utf-8 -*- 3 | 4 | """ Benchmark for the learna benchmark from hpobench/benchmarks/rl/learna_benchmarks.py """ 5 | 6 | from hpobench.container.client_abstract_benchmark import AbstractBenchmarkClient 7 | 8 | 9 | class Learna(AbstractBenchmarkClient): 10 | def __init__(self, **kwargs): 11 | kwargs['data_path'] = '/home/learna/data' 12 | kwargs['benchmark_name'] = kwargs.get('benchmark_name', 'Learna') 13 | kwargs['container_name'] = kwargs.get('container_name', 'learna_benchmark') 14 | kwargs['latest'] = kwargs.get('container_tag', '0.0.4') 15 | super(Learna, self).__init__(**kwargs) 16 | 17 | 18 | class MetaLearna(AbstractBenchmarkClient): 19 | def __init__(self, **kwargs): 20 | kwargs['data_path'] = '/home/learna/data' 21 | kwargs['benchmark_name'] = kwargs.get('benchmark_name', 'MetaLearna') 22 | kwargs['container_name'] = kwargs.get('container_name', 'learna_benchmark') 23 | kwargs['latest'] = kwargs.get('container_tag', '0.0.4') 24 | super(MetaLearna, self).__init__(**kwargs) 25 | -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/container/benchmarks/surrogates/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LamNgo1/cma-meta-algorithm/9aa76dc82f09f4ed5ead6a0cade40645f8850f8e/test_functions/function_realworld_bo/hpobench/container/benchmarks/surrogates/__init__.py -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/container/benchmarks/surrogates/svm_benchmark.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | # -*- coding: utf-8 -*- 3 | 4 | """ Benchmark for the svm surrogates Benchmark from hpobench/benchmarks/surrogates/svm_benchmark.py 5 | """ 6 | 7 | from hpobench.container.client_abstract_benchmark import AbstractBenchmarkClient 8 | 9 | 10 | class SurrogateSVMBenchmark(AbstractBenchmarkClient): 11 | def __init__(self, **kwargs): 12 | kwargs['benchmark_name'] = kwargs.get('benchmark_name', 'SurrogateSVMBenchmark') 13 | kwargs['container_name'] = kwargs.get('container_name', 'surrogate_svm') 14 | kwargs['latest'] = kwargs.get('container_tag', '0.0.2') 15 | super(SurrogateSVMBenchmark, self).__init__(**kwargs) 16 | -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/container/recipes/Singularity.template: -------------------------------------------------------------------------------- 1 | Bootstrap: docker 2 | From: python:3.7-slim 3 | 4 | %labels 5 | MAINTAINER 6 | VERSION v0.0.1 7 | 8 | %help 9 | This is a template for a Singularity recipe 10 | 11 | %post 12 | apt update -y 13 | apt install build-essential git wget -y 14 | 15 | cd /home \ 16 | && mkdir data && cd data \ 17 | && echo "Here you could download data e.g. using wget" \ 18 | 19 | cd /home \ 20 | && echo "Here you can install everything you need, e.g. dependencies not available on pypi" \ 21 | && echo "Next, we clone and install HPOBench" \ 22 | && git clone https://github.com/automl/HPOBench.git \ 23 | && cd HPOBench \ 24 | && echo "Please never push a recipe that checks out any other branch than development or master" \ 25 | && git checkout development \ 26 | && echo "Here you can install extra requirements additional to singularity" \ 27 | && pip install .[] \ 28 | && echo "Please don't touch the following lines" 29 | && cd / \ 30 | && mkdir /var/lib/hpobench/ \ 31 | && chmod -R 777 /var/lib/hpobench/ \ 32 | && rm -rf /var/lib/apt/lists/* \ 33 | && pip cache purge 34 | 35 | echo "Finally, please change the benchmark in the runscript to point to your benchmark" 36 | 37 | %runscript 38 | python -s /home/HPOBench/hpobench/container/server_abstract_benchmark.py . $@ -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/container/recipes/ml/Singularity.PyBNN: -------------------------------------------------------------------------------- 1 | Bootstrap: docker 2 | From: python:3.7-slim 3 | 4 | %labels 5 | MAINTAINER muelleph@cs.uni-freiburg.de 6 | VERSION v0.0.1 7 | 8 | %post 9 | apt update -y \ 10 | && apt install build-essential git -y \ 11 | && cd /home \ 12 | && git clone https://github.com/automl/HPOBench.git \ 13 | && cd HPOBench \ 14 | && git checkout master \ 15 | && pip install .[pybnn] \ 16 | && cd / \ 17 | && mkdir /var/lib/hpobench/ \ 18 | && chmod -R 777 /var/lib/hpobench/ \ 19 | && rm -rf /var/lib/apt/lists/* \ 20 | && pip cache purge 21 | 22 | %runscript 23 | python -s /home/HPOBench/hpobench/container/server_abstract_benchmark.py ml.pybnn $@ 24 | -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/container/recipes/ml/Singularity.SupportVectorMachine: -------------------------------------------------------------------------------- 1 | Bootstrap: docker 2 | From: python:3.7-slim 3 | 4 | %labels 5 | MAINTAINER muelleph@cs.uni-freiburg.de 6 | VERSION v0.0.1 7 | 8 | %post 9 | apt update -y \ 10 | && apt install build-essential git -y \ 11 | && cd /home \ 12 | && git clone https://github.com/automl/HPOBench.git \ 13 | && cd HPOBench \ 14 | && git checkout master \ 15 | && pip install .[svm] \ 16 | && cd / \ 17 | && mkdir /var/lib/hpobench/ \ 18 | && chmod -R 777 /var/lib/hpobench/ \ 19 | && rm -rf /var/lib/apt/lists/* \ 20 | && pip cache purge 21 | 22 | %runscript 23 | python -s /home/HPOBench/hpobench/container/server_abstract_benchmark.py ml.svm_benchmark $@ 24 | -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/container/recipes/ml/Singularity.XGBoostBenchmark: -------------------------------------------------------------------------------- 1 | Bootstrap: docker 2 | From: python:3.7-slim 3 | 4 | %labels 5 | MAINTAINER muelleph@cs.uni-freiburg.de 6 | VERSION v0.0.1 7 | 8 | %post 9 | apt update -y \ 10 | && apt install build-essential git -y \ 11 | && cd /home \ 12 | && git clone https://github.com/automl/HPOBench.git \ 13 | && cd HPOBench \ 14 | && git checkout master \ 15 | && pip install .[xgboost] \ 16 | && cd / \ 17 | && mkdir /var/lib/hpobench/ \ 18 | && chmod -R 777 /var/lib/hpobench/ \ 19 | && pip cache purge \ 20 | && rm -rf /var/lib/apt/lists/* 21 | 22 | 23 | %runscript 24 | python -s /home/HPOBench/hpobench/container/server_abstract_benchmark.py ml.xgboost_benchmark $@ 25 | -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/container/recipes/ml/Singularity.ml_mmfb: -------------------------------------------------------------------------------- 1 | Bootstrap: docker 2 | From: python:3.8-slim 3 | 4 | %labels 5 | MAINTAINER muelleph@cs.uni-freiburg.de 6 | VERSION v0.0.1 7 | 8 | %post 9 | apt update -y \ 10 | && apt install build-essential git -y \ 11 | 12 | cd /home \ 13 | && git clone https://github.com/automl/HPOBench.git \ 14 | && cd HPOBench \ 15 | && git checkout development \ 16 | && pip install ".[ml_mfbb]" \ 17 | && cd / \ 18 | && mkdir /var/lib/hpobench/ \ 19 | && chmod -R 777 /var/lib/hpobench/ \ 20 | && pip cache purge \ 21 | && rm -rf /var/lib/apt/lists/* 22 | 23 | 24 | %runscript 25 | python -s /home/HPOBench/hpobench/container/server_abstract_benchmark.py ml $@ 26 | -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/container/recipes/ml/Singularity.ml_tabular_benchmark: -------------------------------------------------------------------------------- 1 | Bootstrap: docker 2 | From: python:3.8-slim 3 | 4 | %labels 5 | MAINTAINER muelleph@cs.uni-freiburg.de 6 | VERSION v0.0.1 7 | 8 | %post 9 | apt update -y \ 10 | && apt install build-essential git -y \ 11 | 12 | cd /home \ 13 | && git clone https://github.com/automl/HPOBench.git \ 14 | && cd HPOBench \ 15 | && git checkout development \ 16 | && pip install ".[ml_tabular_benchmarks]" \ 17 | && cd / \ 18 | && mkdir /var/lib/hpobench/ \ 19 | && chmod -R 777 /var/lib/hpobench/ \ 20 | && pip cache purge \ 21 | && rm -rf /var/lib/apt/lists/* 22 | 23 | 24 | %runscript 25 | python -s /home/HPOBench/hpobench/container/server_abstract_benchmark.py ml $@ 26 | -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/container/recipes/nas/Singularity.TabularBenchmarks: -------------------------------------------------------------------------------- 1 | Bootstrap: docker 2 | From: python:3.7-slim 3 | 4 | %labels 5 | MAINTAINER muelleph@cs.uni-freiburg.de 6 | VERSION v0.0.1 7 | 8 | %post 9 | apt update -y 10 | apt install build-essential git wget -y 11 | 12 | cd /home \ 13 | && wget http://ml4aad.org/wp-content/uploads/2019/01/fcnet_tabular_benchmarks.tar.gz \ 14 | && tar xf fcnet_tabular_benchmarks.tar.gz 15 | 16 | cd /home \ 17 | && pip install git+https://github.com/google-research/nasbench.git@master \ 18 | && pip install git+https://github.com/automl/nas_benchmarks.git \ 19 | && git clone https://github.com/automl/HPOBench.git \ 20 | && cd HPOBench \ 21 | && git checkout master \ 22 | && pip install .[tabular_benchmarks] \ 23 | && cd / \ 24 | && mkdir /var/lib/hpobench/ \ 25 | && chmod -R 777 /var/lib/hpobench/ \ 26 | && rm -rf /var/lib/apt/lists/* \ 27 | && pip cache purge 28 | 29 | %runscript 30 | python -s /home/HPOBench/hpobench/container/server_abstract_benchmark.py nas.tabular_benchmarks $@ 31 | -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/container/recipes/nas/Singularity.nasbench_101: -------------------------------------------------------------------------------- 1 | Bootstrap: docker 2 | From: python:3.7-slim 3 | 4 | %labels 5 | MAINTAINER muelleph@cs.uni-freiburg.de 6 | VERSION v0.0.1 7 | 8 | %post 9 | apt update -y 10 | apt install build-essential git wget -y 11 | 12 | cd /home \ 13 | && pip install git+https://github.com/google-research/nasbench.git@master \ 14 | && pip install git+https://github.com/automl/nas_benchmarks.git \ 15 | && git clone https://github.com/automl/HPOBench.git \ 16 | && cd HPOBench \ 17 | && git checkout master \ 18 | && pip install .[nasbench_101] \ 19 | && cd / \ 20 | && mkdir /var/lib/hpobench/ \ 21 | && chmod -R 777 /var/lib/hpobench/ \ 22 | && rm -rf /var/lib/apt/lists/* \ 23 | && pip cache purge 24 | 25 | %runscript 26 | python -s /home/HPOBench/hpobench/container/server_abstract_benchmark.py nas.nasbench_101 $@ 27 | -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/container/recipes/nas/Singularity.nasbench_1shot1: -------------------------------------------------------------------------------- 1 | Bootstrap: docker 2 | From: python:3.7-slim 3 | 4 | %labels 5 | MAINTAINER muelleph@cs.uni-freiburg.de 6 | VERSION v0.0.1 7 | 8 | %environment 9 | export PYTHONPATH=/home/nasbench-1shot1:$PYTHONPATH 10 | 11 | %post 12 | apt update -y \ 13 | && apt install build-essential git wget -y 14 | 15 | cd /home \ 16 | && pip install tensorflow==1.15.0 \ 17 | && pip install git+https://github.com/google-research/nasbench.git@master \ 18 | && git clone https://github.com/automl/nasbench-1shot1.git \ 19 | && git clone https://github.com/automl/HPOBench.git \ 20 | && cd HPOBench \ 21 | && git checkout master \ 22 | && pip install .[nasbench_1shot1] \ 23 | && cd / \ 24 | && mkdir /var/lib/hpobench/ \ 25 | && chmod -R 777 /var/lib/hpobench/ \ 26 | && rm -rf /var/lib/apt/lists/* \ 27 | && pip cache purge 28 | 29 | %runscript 30 | python -s /home/HPOBench/hpobench/container/server_abstract_benchmark.py nas.nasbench_1shot1 $@ -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/container/recipes/nas/Singularity.nasbench_201: -------------------------------------------------------------------------------- 1 | Bootstrap: docker 2 | From: python:3.7-slim 3 | 4 | %labels 5 | MAINTAINER muelleph@cs.uni-freiburg.de 6 | VERSION v0.0.1 7 | 8 | %post 9 | apt update -y 10 | apt install build-essential git -y 11 | 12 | cd /home \ 13 | && git clone https://github.com/automl/HPOBench.git \ 14 | && cd HPOBench \ 15 | && git checkout master \ 16 | && pip install . \ 17 | && cd / \ 18 | && mkdir /var/lib/hpobench/ \ 19 | && chmod -R 777 /var/lib/hpobench/ \ 20 | && rm -rf /var/lib/apt/lists/* \ 21 | && pip cache purge 22 | 23 | %runscript 24 | python -s /home/HPOBench/hpobench/container/server_abstract_benchmark.py nas.nasbench_201 $@ 25 | -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/container/recipes/od/Singularity.ODBenchmarks: -------------------------------------------------------------------------------- 1 | Bootstrap: docker 2 | From: python:3.7-slim 3 | 4 | %labels 5 | MAINTAINER sass@tnt.uni-hannover.de 6 | VERSION v0.0.1 7 | 8 | %post 9 | apt update -y \ 10 | && apt install build-essential git -y \ 11 | && cd /home \ 12 | && git clone https://github.com/automl/HPOBench.git \ 13 | && cd HPOBench \ 14 | && git checkout development \ 15 | && pip install .[outlier_detection] \ 16 | && cd / \ 17 | && mkdir /var/lib/hpobench/ \ 18 | && chmod -R 777 /var/lib/hpobench/ \ 19 | && rm -rf /var/lib/apt/lists/* \ 20 | && pip cache purge 21 | 22 | %runscript 23 | python -s /home/HPOBench/hpobench/container/server_abstract_benchmark.py od.od_benchmarks $@ 24 | -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/container/recipes/od/Singularity.ODKernelDensityEstimation: -------------------------------------------------------------------------------- 1 | Bootstrap: docker 2 | From: python:3.7-slim 3 | 4 | %labels 5 | MAINTAINER sass@tnt.uni-hannover.de 6 | VERSION v0.0.1 7 | 8 | %post 9 | apt update -y \ 10 | && apt install build-essential git -y \ 11 | && cd /home \ 12 | && git clone https://github.com/automl/HPOBench.git \ 13 | && cd HPOBench \ 14 | && git checkout development \ 15 | && pip install .[outlier_detection] \ 16 | && cd / \ 17 | && mkdir /var/lib/hpobench/ \ 18 | && chmod -R 777 /var/lib/hpobench/ \ 19 | && rm -rf /var/lib/apt/lists/* \ 20 | && pip cache purge 21 | 22 | %runscript 23 | python -s /home/HPOBench/hpobench/container/server_abstract_benchmark.py od.od_kde $@ 24 | -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/container/recipes/rl/Singularity.Cartpole: -------------------------------------------------------------------------------- 1 | Bootstrap: docker 2 | From: python:3.7-slim 3 | 4 | %labels 5 | MAINTAINER muelleph@cs.uni-freiburg.de 6 | VERSION v0.0.1 7 | 8 | %post 9 | apt update -y \ 10 | && apt install build-essential git -y \ 11 | && pip install numpy==1.18.1 cython==0.29.14 12 | 13 | cd /home \ 14 | && git clone https://github.com/automl/HPOBench.git \ 15 | && cd HPOBench \ 16 | && git checkout master \ 17 | && pip install .[cartpole] \ 18 | && cd / \ 19 | && mkdir /var/lib/hpobench/ \ 20 | && chmod -R 777 /var/lib/hpobench/ \ 21 | && rm -rf /var/lib/apt/lists/* \ 22 | && pip cache purge 23 | 24 | %runscript 25 | python -s /home/HPOBench/hpobench/container/server_abstract_benchmark.py rl.cartpole $@ 26 | -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/container/recipes/rl/Singularity.learnaBenchmark: -------------------------------------------------------------------------------- 1 | Bootstrap: docker 2 | From: python:3.7-slim 3 | 4 | %labels 5 | MAINTAINER muelleph@cs.uni-freiburg.de 6 | VERSION v0.0.1 7 | 8 | %post 9 | apt update -y \ 10 | && apt install build-essential git wget -y 11 | 12 | cd /home \ 13 | && git clone --single-branch --branch development https://github.com/PhMueller/learna.git \ 14 | && cd learna \ 15 | && ./thirdparty/miniconda/make_miniconda.sh \ 16 | && ./thirdparty/miniconda/miniconda/bin/conda env create -f environment.yml \ 17 | && ./thirdparty/miniconda/miniconda/envs/learna/bin/python -m pip install docutils==0.16 \ 18 | && ./thirdparty/miniconda/miniconda/envs/learna/bin/python -m pip install tensorforce==0.3.3 \ 19 | && ./thirdparty/miniconda/miniconda/envs/learna/bin/python -m pip install . \ 20 | && ./thirdparty/miniconda/miniconda/envs/learna/bin/python -m learna.data.download_and_build_eterna ./learna/data/secondaries_to_single_files.sh data/eterna data/eterna/interim/eterna.txt \ 21 | && ./learna/data/download_and_build_rfam_taneda.sh \ 22 | && ./learna/data/download_and_build_rfam_learn.sh \ 23 | && mv data/rfam_learn/test data/rfam_learn_test \ 24 | && mv data/rfam_learn/validation data/rfam_learn_validation \ 25 | && mv data/rfam_learn/train data/rfam_learn_train \ 26 | && rm -rf data/rfam_learn \ 27 | && chmod -R 755 data/ \ 28 | && cd /home \ 29 | && git clone https://github.com/automl/HPOBench.git \ 30 | && cd HPOBench \ 31 | && git checkout master \ 32 | && ../learna/thirdparty/miniconda/miniconda/envs/learna/bin/python -m pip install . \ 33 | && cd / \ 34 | && mkdir /var/lib/hpobench/ \ 35 | && chmod -R 777 /var/lib/hpobench/ \ 36 | && rm -rf /var/lib/apt/lists/* \ 37 | && pip cache purge 38 | 39 | %runscript 40 | /home/learna/thirdparty/miniconda/miniconda/envs/learna/bin/python -s \ 41 | /home/HPOBench/hpobench/container/server_abstract_benchmark.py rl.learna_benchmark $@ 42 | -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/container/recipes/surrogates/Singularity.ParamnetBenchmark: -------------------------------------------------------------------------------- 1 | Bootstrap: docker 2 | From: python:3.7-slim 3 | 4 | %labels 5 | MAINTAINER muelleph@cs.uni-freiburg.de 6 | VERSION v0.0.1 7 | 8 | %post 9 | apt update -y \ 10 | && apt install build-essential git wget -y 11 | 12 | cd /home \ 13 | && git clone https://github.com/automl/HPOBench.git \ 14 | && cd HPOBench \ 15 | && git checkout master \ 16 | && pip install --upgrade .[paramnet] \ 17 | && cd / \ 18 | && mkdir /var/lib/hpobench/ \ 19 | && chmod -R 777 /var/lib/hpobench/ \ 20 | && rm -rf /var/lib/apt/lists/* \ 21 | && pip cache purge 22 | 23 | %runscript 24 | python -s /home/HPOBench/hpobench/container/server_abstract_benchmark.py surrogates.paramnet_benchmark $@ -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/container/recipes/surrogates/Singularity.SupportVectorMachine: -------------------------------------------------------------------------------- 1 | Bootstrap: docker 2 | From: python:3.7-slim 3 | 4 | %labels 5 | MAINTAINER muelleph@cs.uni-freiburg.de 6 | VERSION v0.0.1 7 | 8 | %post 9 | 10 | %post 11 | apt update -y \ 12 | && apt install build-essential git wget -y 13 | 14 | cd /home \ 15 | && git clone https://github.com/automl/HPOBench.git \ 16 | && cd HPOBench \ 17 | && git checkout master \ 18 | && pip install --upgrade .[paramnet] \ 19 | && cd / \ 20 | && mkdir /var/lib/hpobench/ \ 21 | && chmod -R 777 /var/lib/hpobench/ \ 22 | && rm -rf /var/lib/apt/lists/* \ 23 | && pip cache purge 24 | 25 | %runscript 26 | python -s /home/HPOBench/hpobench/container/server_abstract_benchmark.py surrogates.svm_benchmark $@ -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/container/server_abstract_benchmark.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | # -*- coding: utf-8 -*- 3 | 4 | """ Defines the server-side for using benchmarks with containers 5 | 6 | BenchmarkServer defines the server side for the communication between the 7 | container (benchmark) and the client. 8 | It starts the Pyro4 server and awaits commands from the client. Make sure 9 | that all payloads are json-serializable. 10 | """ 11 | 12 | import argparse 13 | import json 14 | import logging 15 | import os 16 | 17 | import Pyro4 18 | from ConfigSpace.read_and_write import json as csjson 19 | 20 | from hpobench.config import HPOBenchConfig 21 | from hpobench.util.container_utils import BenchmarkEncoder, BenchmarkDecoder 22 | 23 | # Read in the verbosity level from the environment variable HPOBENCH_DEBUG 24 | log_level_str = os.environ.get('HPOBENCH_DEBUG', 'false') 25 | LOG_LEVEL = logging.DEBUG if log_level_str == 'true' else logging.INFO 26 | 27 | root = logging.getLogger() 28 | root.setLevel(LOG_LEVEL) 29 | 30 | logger = logging.getLogger('BenchmarkServer') 31 | 32 | 33 | @Pyro4.expose 34 | @Pyro4.behavior(instance_mode="single") 35 | class BenchmarkServer: 36 | def __init__(self, socket_id): 37 | self.pyro_running = True 38 | config = HPOBenchConfig() 39 | self.benchmark = None 40 | 41 | self.socket_id = socket_id 42 | socket_path = config.socket_dir / (self.socket_id + "_unix.sock") 43 | logger.debug(f'Socket Path: {socket_path}') 44 | logger.info(f'Logging level: {logger.level}') 45 | 46 | if socket_path.exists(): 47 | os.remove(socket_path) 48 | self.daemon = Pyro4.Daemon(unixsocket=str(socket_path)) 49 | 50 | _ = self.daemon.register(self, self.socket_id + ".unixsock") 51 | 52 | # start the event loop of the server to wait for calls 53 | self.daemon.requestLoop(loopCondition=lambda: self.pyro_running) 54 | 55 | def init_benchmark(self, kwargs_str): 56 | try: 57 | kwargs = json.loads(kwargs_str, cls=BenchmarkDecoder) 58 | self.benchmark = Benchmark(**kwargs) # noqa: F821 59 | logger.info('Server: Connected Successfully') 60 | except Exception as e: 61 | logger.exception(e) 62 | 63 | def get_configuration_space(self, kwargs_str: str) -> str: 64 | logger.debug(f'Server: get_config_space: kwargs_str: {kwargs_str}') 65 | 66 | kwargs = json.loads(kwargs_str, cls=BenchmarkDecoder) 67 | seed = kwargs.get('seed', None) 68 | 69 | result = self.benchmark.get_configuration_space(seed=seed) 70 | logger.debug(f'Server: Configspace: {result}') 71 | return csjson.write(result, indent=None) 72 | 73 | def get_fidelity_space(self, kwargs_str: str) -> str: 74 | logger.debug(f'Server: get_fidelity_space: kwargs_str: {kwargs_str}') 75 | 76 | kwargs = json.loads(kwargs_str, cls=BenchmarkDecoder) 77 | seed = kwargs.get('seed', None) 78 | 79 | result = self.benchmark.get_fidelity_space(seed=seed) 80 | logger.debug(f'Server: Fidelity Space: {result}') 81 | return csjson.write(result, indent=None) 82 | 83 | def objective_function(self, c_str: str, f_str: str, kwargs_str: str) -> str: 84 | logger.debug(f'Server: objective_function: c_str: {c_str} f_str: {f_str} kwargs_str: {kwargs_str}') 85 | 86 | configuration = json.loads(c_str, cls=BenchmarkDecoder) 87 | fidelity = json.loads(f_str, cls=BenchmarkDecoder) 88 | kwargs = json.loads(kwargs_str, cls=BenchmarkDecoder) 89 | 90 | result = self.benchmark.objective_function(configuration=configuration, fidelity=fidelity, **kwargs) 91 | return json.dumps(result, indent=None, cls=BenchmarkEncoder) 92 | 93 | def objective_function_test(self, c_str: str, f_str: str, kwargs_str: str) -> str: 94 | logger.debug(f'Server: objective_function: c_str: {c_str} f_str: {f_str} kwargs_str: {kwargs_str}') 95 | 96 | configuration = json.loads(c_str, cls=BenchmarkDecoder) 97 | fidelity = json.loads(f_str, cls=BenchmarkDecoder) 98 | kwargs = json.loads(kwargs_str, cls=BenchmarkDecoder) 99 | 100 | result = self.benchmark.objective_function_test(configuration=configuration, fidelity=fidelity, **kwargs) 101 | return json.dumps(result, indent=None, cls=BenchmarkEncoder) 102 | 103 | def get_meta_information(self): 104 | logger.debug('Server: get_meta_info called') 105 | return json.dumps(self.benchmark.get_meta_information(), indent=None, cls=BenchmarkEncoder) 106 | 107 | @Pyro4.oneway # in case call returns much later than daemon.shutdown 108 | def shutdown(self): 109 | logger.debug('Server: Shutting down...') 110 | Pyro4.config.COMMTIMEOUT = 0.5 111 | self.pyro_running = False 112 | self.daemon.shutdown() 113 | 114 | 115 | if __name__ == "__main__": 116 | Pyro4.config.REQUIRE_EXPOSE = False 117 | 118 | parser = argparse.ArgumentParser(prog='server_abstract_benchmark.py', 119 | description='HPOBench Container Server', 120 | usage='%(prog)s ') 121 | parser.add_argument('importBase', type=str, 122 | help='Relative path to benchmark file in hpobench/benchmarks, e.g. ml.xgboost_benchmark') 123 | parser.add_argument('benchmark', type=str, 124 | help='Classname of the benchmark, e.g. XGBoostBenchmark') 125 | parser.add_argument('socket_id', type=str, 126 | help='socket_id for pyro-server') 127 | 128 | args = parser.parse_args() 129 | 130 | # pylint: disable=logging-fstring-interpolation 131 | exec(f"from hpobench.benchmarks.{args.importBase} import {args.benchmark} as Benchmark") 132 | bp = BenchmarkServer(args.socket_id) 133 | -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/dependencies/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LamNgo1/cma-meta-algorithm/9aa76dc82f09f4ed5ead6a0cade40645f8850f8e/test_functions/function_realworld_bo/hpobench/dependencies/__init__.py -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/dependencies/ml/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LamNgo1/cma-meta-algorithm/9aa76dc82f09f4ed5ead6a0cade40645f8850f8e/test_functions/function_realworld_bo/hpobench/dependencies/ml/__init__.py -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/dependencies/od/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LamNgo1/cma-meta-algorithm/9aa76dc82f09f4ed5ead6a0cade40645f8850f8e/test_functions/function_realworld_bo/hpobench/dependencies/od/__init__.py -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/dependencies/od/backbones/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LamNgo1/cma-meta-algorithm/9aa76dc82f09f4ed5ead6a0cade40645f8850f8e/test_functions/function_realworld_bo/hpobench/dependencies/od/backbones/__init__.py -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/dependencies/od/backbones/mlp.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import nn 3 | 4 | from hpobench.dependencies.od.utils.activations import ACTIVATIONS 5 | 6 | 7 | class Block(nn.Module): 8 | def __init__(self, in_channels, out_channels, activation, batch_normalization=False, dropout_rate=0.0): 9 | super().__init__() 10 | 11 | self.batch_normalization = batch_normalization 12 | self.linear_layer = nn.Linear(in_features=in_channels, out_features=out_channels) 13 | self.batch_normalization_layer = nn.BatchNorm1d(out_channels) 14 | self.activation_layer = activation 15 | self.dropout_layer = nn.Dropout(dropout_rate) 16 | 17 | def forward(self, x1, x2=None): 18 | if x2 is not None: 19 | x = torch.cat([x1, x2], dim=1) 20 | else: 21 | x = x1 22 | 23 | z = self.linear_layer(x) 24 | 25 | if self.batch_normalization: 26 | # Batch normalization causes some troubles if it comes 27 | # to validation sanity run since mean/variance 28 | # are not initialized to that moment 29 | try: 30 | z = self.batch_normalization_layer(z) 31 | except: # noqa E722 32 | pass 33 | 34 | z = self.activation_layer(z) 35 | z = self.dropout_layer(z) 36 | 37 | return z 38 | 39 | 40 | class MLP(nn.Module): 41 | def __init__(self, num_features, config): 42 | super(MLP, self).__init__() 43 | self.config = config 44 | self.num_features = num_features 45 | self._build_backbone() 46 | 47 | def _get_activation(self): 48 | if self.config["activation"] == "swish" or self.config["activation"] == "swish-1": 49 | train_beta = False 50 | if self.config["activation"] == "swish": 51 | train_beta = True 52 | 53 | return ACTIVATIONS["swish"](train_beta=train_beta) 54 | else: 55 | return ACTIVATIONS[self.config["activation"]]() 56 | 57 | def _build_backbone(self): 58 | features = self.num_features 59 | activation = self._get_activation() 60 | latent_dim = self.config["num_latent_units"] 61 | 62 | encoder_features = [features] 63 | for i in range(1, self.config["num_layers"]+1): 64 | encoder_features += [self.config[f"num_units_layer_{i}"]] 65 | 66 | decoder_features = [latent_dim] + encoder_features[::-1] 67 | 68 | features = encoder_features + decoder_features 69 | in_features = features.copy() # We need different in_features if we use skip connections 70 | 71 | if self.config["skip_connection"]: 72 | # If skip connection 73 | # Usually we'd have the following: 74 | # 768 -> 128 -> 64 -> 8 -> 64 -> 128 -> 768 75 | # But since we merge the layers we get 76 | # 768 -> 128 -> 64 -> 8+64 -> 64+128 -> 128+768 -> 768 77 | decoder_index = int(len(features) / 2) 78 | encoder_index = decoder_index - 1 79 | for i in range(decoder_index, len(features)-1): 80 | in_features[i] = features[decoder_index] + features[encoder_index] 81 | encoder_index -= 1 82 | decoder_index += 1 83 | 84 | decoder_in_features = in_features[int(len(features) / 2):] 85 | else: 86 | decoder_in_features = in_features[int(len(features) / 2):] 87 | 88 | # Build encoder 89 | self.encoder_blocks = [] 90 | for i in range(len(encoder_features)-1): 91 | self.encoder_blocks += [ 92 | Block( 93 | encoder_features[i], 94 | encoder_features[i+1], 95 | activation, 96 | batch_normalization=self.config["batch_normalization"], 97 | dropout_rate=0.0 if not self.config["dropout"] else self.config["dropout_rate"] 98 | ) 99 | ] 100 | 101 | # Build decoder 102 | self.decoder_blocks = [] 103 | for i in range(len(decoder_features)-2): 104 | self.decoder_blocks += [ 105 | Block( 106 | decoder_in_features[i], 107 | decoder_features[i+1], 108 | activation, 109 | batch_normalization=self.config["batch_normalization"], 110 | dropout_rate=0.0 if not self.config["dropout"] else self.config["dropout_rate"] 111 | ) 112 | ] 113 | 114 | self.latent_dim = latent_dim 115 | self.encoder_features = encoder_features 116 | self.decoder_features = decoder_features 117 | self.decoder_in_features = decoder_in_features 118 | 119 | # Make sure the parameters are within the model 120 | self.encoder_blocks = nn.Sequential(*self.encoder_blocks) 121 | self.decoder_blocks = nn.Sequential(*self.decoder_blocks) 122 | 123 | def encode(self, x): 124 | encoder_outputs = [] 125 | 126 | output = x 127 | encoder_outputs += [output] 128 | 129 | # Processing encoder 130 | for block in self.encoder_blocks: 131 | output = block(output) 132 | encoder_outputs += [output] 133 | 134 | return encoder_outputs 135 | 136 | def decode(self, z, encoder_outputs=None): 137 | # Use encoder outputs only if skip connection is used 138 | if not self.config["skip_connection"]: 139 | encoder_outputs = None 140 | 141 | if encoder_outputs is not None: 142 | encoder_outputs = encoder_outputs[::-1] 143 | 144 | # Processing decoder 145 | output = z 146 | for i, block in enumerate(self.decoder_blocks): 147 | if encoder_outputs is not None: 148 | output = block(output, encoder_outputs[i]) 149 | else: 150 | output = block(output) 151 | 152 | # concat if skip connection available 153 | if encoder_outputs is not None: 154 | output = torch.cat([output, encoder_outputs[-1]], dim=1) 155 | 156 | return output 157 | -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/dependencies/od/callbacks/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LamNgo1/cma-meta-algorithm/9aa76dc82f09f4ed5ead6a0cade40645f8850f8e/test_functions/function_realworld_bo/hpobench/dependencies/od/callbacks/__init__.py -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/dependencies/od/callbacks/checkpoint_saver.py: -------------------------------------------------------------------------------- 1 | import pytorch_lightning as pl 2 | 3 | 4 | class CheckpointSaver(pl.Callback): 5 | def __init__(self): 6 | self.best_checkpoint = None 7 | 8 | def save(self, trainer, model): 9 | """ 10 | Saves the best weights locally. 11 | """ 12 | 13 | checkpoint = { 14 | 'should_stop': trainer.should_stop, 15 | 'current_epoch': trainer.current_epoch, 16 | 'weights': model.state_dict(), 17 | } 18 | 19 | self.best_checkpoint = checkpoint 20 | 21 | def load(self, trainer, model): 22 | checkpoint = None 23 | if hasattr(self, "best_checkpoint"): 24 | checkpoint = self.best_checkpoint 25 | 26 | if checkpoint is not None: 27 | trainer.should_stop = checkpoint["should_stop"] 28 | trainer.current_epoch = checkpoint["current_epoch"] + 1 29 | model.load_state_dict(checkpoint["weights"]) 30 | 31 | def on_validation_end(self, trainer, model): 32 | # We already saved if the trainer has stopped 33 | if not trainer.should_stop: 34 | # Save if it's the best epoch 35 | if model.val_auprs[-1] == max(model.val_auprs): 36 | self.save(trainer, model) 37 | 38 | def on_test_start(self, trainer, model): 39 | # Load best weights here 40 | self.load(trainer, model) 41 | -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/dependencies/od/callbacks/earlystopping.py: -------------------------------------------------------------------------------- 1 | import pytorch_lightning as pl 2 | 3 | 4 | class EarlyStopping(pl.Callback): 5 | def __init__(self, activated: bool, patience: int, worst_loss: float): 6 | self.patience = patience 7 | self.lowest_loss = worst_loss 8 | self.counter = 0 9 | self.activated = activated 10 | 11 | def setup(self, trainer, model, stage): 12 | if not self.activated: 13 | trainer.should_stop = False 14 | 15 | def on_validation_end(self, trainer, model): 16 | if not self.activated: 17 | return 18 | 19 | last_loss = model.val_auprs[-1] 20 | 21 | if last_loss > self.lowest_loss: 22 | self.counter += 1 23 | if self.counter >= self.patience: 24 | trainer.should_stop = True 25 | else: 26 | self.lowest_loss = last_loss 27 | self.counter = 0 28 | -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/dependencies/od/models/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LamNgo1/cma-meta-algorithm/9aa76dc82f09f4ed5ead6a0cade40645f8850f8e/test_functions/function_realworld_bo/hpobench/dependencies/od/models/__init__.py -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/dependencies/od/models/autoencoder.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pytorch_lightning as pl 3 | import torch 4 | import torch.nn.functional as F 5 | from sklearn.metrics import precision_recall_curve, auc 6 | from torch import nn 7 | 8 | 9 | class Autoencoder(pl.LightningModule): 10 | def __init__(self, backbone, config): 11 | super().__init__() 12 | 13 | self.config = config 14 | self.backbone = backbone 15 | 16 | self.train_losses = [] 17 | self.train_auprs = [] 18 | self.val_auprs = [] 19 | self.test_aupr = None 20 | 21 | # Setup latent layer 22 | self.latent_layer = nn.Linear( 23 | in_features=self.backbone.encoder_features[-1], 24 | out_features=self.backbone.latent_dim 25 | ) 26 | 27 | # Setup output layer 28 | self.output_layer = nn.Linear( 29 | in_features=self.backbone.decoder_in_features[-2], 30 | out_features=self.backbone.decoder_features[-1] 31 | ) 32 | 33 | def configure_optimizers(self): 34 | optimizer = torch.optim.AdamW( 35 | self.parameters(), 36 | lr=self.config["lr"], 37 | betas=(self.config["beta1"], self.config["beta2"]), 38 | weight_decay=self.config["weight_decay"] 39 | ) 40 | 41 | return optimizer 42 | 43 | @staticmethod 44 | def calculate_aupr(labels, scores): 45 | precision, recall, _ = precision_recall_curve(labels, scores) 46 | aupr = auc(recall, precision) 47 | 48 | return aupr 49 | 50 | @staticmethod 51 | def calculate_loss(x, x_hat): 52 | return F.mse_loss(x_hat, x) 53 | 54 | def forward(self, x): 55 | # Encode first 56 | encoder_outputs = self.backbone.encode(x) 57 | z = self.latent_layer(encoder_outputs[-1]) 58 | 59 | # Decode 60 | x_hat = self.output_layer(self.backbone.decode(z, encoder_outputs)) 61 | 62 | return x_hat 63 | 64 | def training_step(self, batch, _): 65 | x, _ = batch 66 | x_hat = self(x) 67 | loss = self.calculate_loss(x, x_hat) 68 | 69 | return loss 70 | 71 | def training_epoch_end(self, outputs): 72 | losses = torch.stack([o['loss'] for o in outputs]).cpu().numpy().flatten() 73 | self.train_losses.append(np.mean(losses)) 74 | 75 | def validation_step(self, batch, _): 76 | x, y = batch 77 | x_hat = self(x) 78 | loss = self.calculate_loss(x, x_hat) 79 | 80 | return { 81 | 'labels': y.flatten(), 82 | 'loss': loss 83 | } 84 | 85 | def validation_epoch_end(self, outputs): 86 | labels = torch.stack([o['labels'] for o in outputs]).cpu().numpy().flatten() 87 | losses = torch.stack([o['loss'] for o in outputs]).cpu().numpy().flatten() 88 | 89 | aupr = self.calculate_aupr(labels, losses) 90 | self.val_auprs.append(aupr) 91 | 92 | def test_step(self, batch, _): 93 | x, y = batch 94 | x_hat = self(x) 95 | loss = self.calculate_loss(x, x_hat) 96 | 97 | return { 98 | 'labels': y.flatten(), 99 | 'loss': loss 100 | } 101 | 102 | def test_epoch_end(self, outputs): 103 | labels = np.array([o['labels'].item() for o in outputs]).flatten() 104 | losses = np.array([o['loss'].item() for o in outputs]).flatten() 105 | 106 | self.test_aupr = self.calculate_aupr(labels, losses) 107 | -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/dependencies/od/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LamNgo1/cma-meta-algorithm/9aa76dc82f09f4ed5ead6a0cade40645f8850f8e/test_functions/function_realworld_bo/hpobench/dependencies/od/utils/__init__.py -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/dependencies/od/utils/activations.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from torch.nn.parameter import Parameter 4 | 5 | 6 | class Swish(nn.Module): 7 | def __init__(self, train_beta=False): 8 | super(Swish, self).__init__() 9 | if train_beta: 10 | self.weight = Parameter(torch.Tensor([1.])) 11 | else: 12 | self.weight = 1.0 13 | 14 | def forward(self, input): 15 | return input * torch.sigmoid(self.weight * input) 16 | 17 | 18 | ACTIVATIONS = { 19 | "relu": torch.nn.ReLU, 20 | "tanh": torch.nn.Tanh, 21 | "sigmoid": torch.nn.Sigmoid, 22 | "swish": Swish 23 | } 24 | -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/dependencies/od/utils/scaler.py: -------------------------------------------------------------------------------- 1 | from sklearn.preprocessing import MinMaxScaler, StandardScaler 2 | 3 | 4 | def get_fitted_scaler(X_train, name=None): 5 | """ 6 | Instantiates a scaler by a given name and fits the scaler 7 | with X_train. 8 | """ 9 | 10 | if name == "MinMax": 11 | scaler = MinMaxScaler(feature_range=(0, 1), copy=True) 12 | elif name == "Standard": 13 | scaler = StandardScaler(copy=True) 14 | elif name is None or name == "None": 15 | return None 16 | else: 17 | raise NotImplementedError() 18 | 19 | scaler.fit(X_train) 20 | return lambda x: scaler.transform(x) 21 | -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/util/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LamNgo1/cma-meta-algorithm/9aa76dc82f09f4ed5ead6a0cade40645f8850f8e/test_functions/function_realworld_bo/hpobench/util/__init__.py -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/util/clean_up_script.py: -------------------------------------------------------------------------------- 1 | from hpobench import config_file 2 | 3 | import shutil 4 | import logging 5 | logger = logging.getLogger('Clean-up') 6 | logger.setLevel(logging.INFO) 7 | 8 | 9 | def _ask_for_del(directory, name): 10 | logger.info(f'Going to remove the {name} directory {directory}') 11 | inp = input('Do you want to proceed? [N|y] ') 12 | if inp in ['y', 'j', 'Y']: 13 | shutil.rmtree(directory) 14 | logger.info(f'Successfully removed the {name} directory.') 15 | 16 | 17 | def delete_container(): 18 | _ask_for_del(config_file.container_dir, 'container') 19 | 20 | 21 | def clear_socket_dir(): 22 | _ask_for_del(config_file.socket_dir, 'socket') 23 | 24 | 25 | def clear_cache(): 26 | _ask_for_del(config_file.cache_dir, 'cache') 27 | 28 | 29 | def clear_data_dir(): 30 | _ask_for_del(config_file.data_dir, 'data') 31 | 32 | 33 | if __name__ == '__main__': 34 | import argparse 35 | parser = argparse.ArgumentParser() 36 | parser.add_argument("--clear_all", help="Remove containers, clear socket, data, and cache directory", 37 | action="store_true") 38 | parser.add_argument("--clear_container", help="Delete the HPOBench container", action="store_true") 39 | parser.add_argument("--clear_cache", help="Delete the HPOBench cache", action="store_true") 40 | parser.add_argument("--clear_data", help="Delete the HPOBench data", action="store_true") 41 | parser.add_argument("--clear_socket", help="Delete the HPOBench socket", action="store_true") 42 | args = parser.parse_args() 43 | 44 | if args.clear_all or args.clear_container: 45 | delete_container() 46 | if args.clear_all or args.clear_cache: 47 | clear_cache() 48 | if args.clear_all or args.clear_data: 49 | clear_data_dir() 50 | if args.clear_all or args.clear_socket: 51 | clear_socket_dir() 52 | -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/util/container_utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | import importlib 3 | import json 4 | import numpy as np 5 | import enum 6 | 7 | from typing import Any, Union 8 | 9 | from hpobench.util.rng_helper import serialize_random_state, deserialize_random_state 10 | 11 | 12 | class BenchmarkEncoder(json.JSONEncoder): 13 | """ Json Encoder to save tuple and or numpy arrays | numpy floats / integer. 14 | from: https://stackoverflow.com/questions/15721363/preserve-python-tuples-with-json 15 | 16 | Serializing tuple/numpy array may not work. We need to annotate those types, to reconstruct them correctly. 17 | """ 18 | # pylint: disable=arguments-differ 19 | def encode(self, obj): 20 | def hint(item): 21 | # Annotate the different item types 22 | if isinstance(item, tuple): 23 | return {'__type__': 'tuple', '__items__': [hint(e) for e in item]} 24 | if isinstance(item, np.ndarray): 25 | return {'__type__': 'np.ndarray', '__items__': item.tolist()} 26 | if isinstance(item, np.floating): 27 | return {'__type__': 'np.float', '__items__': float(item)} 28 | if isinstance(item, np.integer): 29 | return {'__type__': 'np.int', '__items__': item.tolist()} 30 | if isinstance(item, enum.Enum): 31 | return str(item) 32 | if isinstance(item, np.random.RandomState): 33 | rs = serialize_random_state(item) 34 | return {'__type__': 'random_state', '__items__': rs} 35 | 36 | # If it is a container data structure, go also through the items. 37 | if isinstance(item, list): 38 | return [hint(e) for e in item] 39 | if isinstance(item, dict): 40 | return {key: hint(value) for key, value in item.items()} 41 | return item 42 | 43 | return super(BenchmarkEncoder, self).encode(hint(obj)) 44 | 45 | 46 | class BenchmarkDecoder(json.JSONDecoder): 47 | def __init__(self, *args, **kwargs): 48 | json.JSONDecoder.__init__(self, object_hook=self.object_hook, *args, **kwargs) 49 | 50 | def object_hook(self, obj: Any) -> Union[Union[tuple, np.ndarray, float, float, int], Any]: 51 | if '__type__' in obj: 52 | __type = obj['__type__'] 53 | 54 | if __type == 'tuple': 55 | return tuple(obj['__items__']) 56 | if __type == 'np.ndarray': 57 | return np.array(obj['__items__']) 58 | if __type == 'np.float': 59 | return np.float(obj['__items__']) 60 | if __type == 'np.int': 61 | return np.int(obj['__items__']) 62 | if __type == 'random_state': 63 | return deserialize_random_state(obj['__items__']) 64 | return obj 65 | 66 | 67 | def __reload_module(): 68 | """ 69 | The env variable which enables the debug level is read in during the import of the client module. 70 | Reloading the module, re-reads the env variable and therefore changes the level. 71 | """ 72 | import hpobench.container.client_abstract_benchmark as client 73 | importlib.reload(client) 74 | 75 | 76 | def enable_container_debug(): 77 | """ Sets the environment variable "HPOBENCH_DEBUG" to true. The container checks this variable and if set to true, 78 | enables debugging on the container side. """ 79 | os.environ['HPOBENCH_DEBUG'] = 'true' 80 | __reload_module() 81 | 82 | 83 | def disable_container_debug(): 84 | os.environ['HPOBENCH_DEBUG'] = 'false' 85 | __reload_module() 86 | -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/util/dependencies.py: -------------------------------------------------------------------------------- 1 | import re 2 | from distutils.version import LooseVersion 3 | 4 | import pkg_resources 5 | 6 | RE_PATTERN = re.compile( 7 | r'^(?P[\w\-]+)((?P==|>=|>)' 8 | r'(?P(\d+)?(\.[a-zA-Z0-9]+)?(\.\d+)?))?$') 9 | 10 | """Generic code to verify package versions (a.k.a. dependencies). 11 | 12 | Written by Anatolii Domashnev (@ayaro) for auto-sklearn. Licensed under a BSD 13 | 3-clause license. 14 | 15 | See the following link for the original PR: 16 | https://github.com/Ayaro/auto-sklearn/commit/ 17 | f59c6e9751061ec0e68a402507c83f6c10ae5bbd 18 | """ 19 | 20 | 21 | def verify_packages(packages): 22 | if not packages: 23 | return 24 | if isinstance(packages, str): 25 | packages = packages.splitlines() 26 | 27 | for package in packages: 28 | if not package: 29 | continue 30 | 31 | match = RE_PATTERN.match(package) 32 | if match: 33 | name = match.group('name') 34 | operation = match.group('operation') 35 | version = match.group('version') 36 | _verify_package(name, operation, version) 37 | else: 38 | raise ValueError('Unable to read requirement: %s' % package) 39 | 40 | 41 | def _verify_package(name, operation, version): 42 | try: 43 | module = pkg_resources.get_distribution(name) 44 | except pkg_resources.DistributionNotFound: 45 | raise MissingPackageError(name) 46 | 47 | if not operation: 48 | return 49 | 50 | required_version = LooseVersion(version) 51 | installed_version = LooseVersion(module.version) 52 | 53 | if operation == '==': 54 | check = required_version == installed_version 55 | elif operation == '>': 56 | check = installed_version > required_version 57 | elif operation == '>=': 58 | check = installed_version > required_version or \ 59 | installed_version == required_version 60 | else: 61 | raise NotImplementedError( 62 | 'operation \'%s\' is not supported' % operation) 63 | if not check: 64 | raise IncorrectPackageVersionError(name, installed_version, operation, 65 | required_version) 66 | 67 | 68 | class MissingPackageError(Exception): 69 | error_message = 'mandatory package \'{name}\' not found' 70 | 71 | def __init__(self, package_name): 72 | self.package_name = package_name 73 | super(MissingPackageError, self).__init__( 74 | self.error_message.format(name=package_name)) 75 | 76 | 77 | class IncorrectPackageVersionError(Exception): 78 | error_message = '\'{name} {installed_version}\' version mismatch ' \ 79 | '({operation}{required_version})' 80 | 81 | def __init__(self, package_name, installed_version, operation, 82 | required_version): 83 | self.package_name = package_name 84 | self.installed_version = installed_version 85 | self.operation = operation 86 | self.required_version = required_version 87 | message = self.error_message.format( 88 | name=package_name, 89 | installed_version=installed_version, 90 | operation=operation, 91 | required_version=required_version) 92 | super(IncorrectPackageVersionError, self).__init__(message) 93 | -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/util/example_utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | from pathlib import Path 3 | from typing import Dict 4 | 5 | 6 | def get_travis_settings(optimizer_type: str) -> Dict: 7 | """ Helper function to reduce time consumption for test runs on travis.ci""" 8 | if optimizer_type == 'smac': 9 | return {"runcount-limit": 5, 'wallclock-limit': 50, 'cutoff': 50, 'memory_limit': 10000, 'output_dir': '.'} 10 | if optimizer_type == 'bohb': 11 | return {'max_budget': 2, 'num_iterations': 1, 'output_dir': Path('./')} 12 | 13 | raise ValueError(f'Unknown type {optimizer_type}. Must be one of [smac, bohb]') 14 | 15 | 16 | def set_env_variables_to_use_only_one_core(): 17 | """ Helper function: Sets all variables which are responsible for using multiple threads to 1. 18 | This is necessary/useful, if you are computing on a cluster.""" 19 | os.environ['OMP_NUM_THREADS'] = '1' 20 | os.environ['OPENBLAS_NUM_THREADS'] = '1' 21 | os.environ['MKL_NUM_THREADS'] = '1' 22 | os.environ['VECLIB_MAXIMUM_THREADS'] = '1' 23 | os.environ['NUMEXPR_NUM_THREADS'] = '1' 24 | os.environ['NUMEXPR_MAX_THREADS'] = '1' 25 | -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/hpobench/util/rng_helper.py: -------------------------------------------------------------------------------- 1 | """ Helper functions to easily obtain randomState """ 2 | from typing import Union, Tuple, List 3 | 4 | import numpy as np 5 | 6 | 7 | def get_rng(rng: Union[int, np.random.RandomState, None] = None, 8 | self_rng: Union[int, np.random.RandomState, None] = None) -> np.random.RandomState: 9 | """ 10 | Helper function to obtain RandomState from int or create a new one. 11 | 12 | Sometimes a default random state (self_rng) is already available, but a 13 | new random state is desired. In this case ``rng`` is not None and not already 14 | a random state (int or None) -> a new random state is created. 15 | If ``rng`` is already a randomState, it is just returned. 16 | Same if ``rng`` is None, but the default rng is given. 17 | 18 | Parameters 19 | ---------- 20 | rng : int, np.random.RandomState, None 21 | self_rng : np.random.RandomState, None 22 | 23 | Returns 24 | ------- 25 | np.random.RandomState 26 | """ 27 | 28 | if rng is not None: 29 | return _cast_int_to_random_state(rng) 30 | if rng is None and self_rng is not None: 31 | return _cast_int_to_random_state(self_rng) 32 | return np.random.RandomState() 33 | 34 | 35 | def _cast_int_to_random_state(rng: Union[int, np.random.RandomState]) -> np.random.RandomState: 36 | """ 37 | Helper function to cast ``rng`` from int to np.random.RandomState if necessary. 38 | 39 | Parameters 40 | ---------- 41 | rng : int, np.random.RandomState 42 | 43 | Returns 44 | ------- 45 | np.random.RandomState 46 | """ 47 | if isinstance(rng, np.random.RandomState): 48 | return rng 49 | if int(rng) == rng: 50 | # As seed is sometimes -1 (e.g. if SMAC optimizes a deterministic function) -> use abs() 51 | return np.random.RandomState(np.abs(rng)) 52 | raise ValueError(f"{rng} is neither a number nor a RandomState. Initializing RandomState failed") 53 | 54 | 55 | def serialize_random_state(random_state: np.random.RandomState) -> Tuple[int, List, int, int, int]: 56 | (rnd0, rnd1, rnd2, rnd3, rnd4) = random_state.get_state() 57 | rnd1 = rnd1.tolist() 58 | return rnd0, rnd1, rnd2, rnd3, rnd4 59 | 60 | 61 | def deserialize_random_state(random_state: Tuple[int, List, int, int, int]) -> np.random.RandomState: 62 | (rnd0, rnd1, rnd2, rnd3, rnd4) = random_state 63 | rnd1 = [np.uint32(number) for number in rnd1] 64 | random_state = np.random.RandomState() 65 | random_state.set_state((rnd0, rnd1, rnd2, rnd3, rnd4)) 66 | return random_state 67 | -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/mopta08/init: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LamNgo1/cma-meta-algorithm/9aa76dc82f09f4ed5ead6a0cade40645f8850f8e/test_functions/function_realworld_bo/mopta08/init -------------------------------------------------------------------------------- /test_functions/function_realworld_bo/push_function.py: -------------------------------------------------------------------------------- 1 | """ 2 | Code from the Github: https://github.com/zi-w/Ensemble-Bayesian-Optimization 3 | """ 4 | 5 | 6 | from test_functions.function_realworld_bo.push_utils import b2WorldInterface, make_base, create_body, end_effector, run_simulation 7 | 8 | import numpy as np 9 | 10 | 11 | class PushReward: 12 | def __init__(self): 13 | 14 | # domain of this function 15 | self.xmin = [-5., -5., -10., -10., 2., 0., -5., -5., -10., -10., 2., 0., -5., -5.] 16 | self.xmax = [5., 5., 10., 10., 30., 2.*np.pi, 5., 5., 10., 10., 30., 2.*np.pi, 5., 5.] 17 | 18 | # starting xy locations for the two objects 19 | self.sxy = (0, 2) 20 | self.sxy2 = (0, -2) 21 | # goal xy locations for the two objects 22 | self.gxy = [4, 3.5] 23 | self.gxy2 = [-4, 3.5] 24 | 25 | @property 26 | def f_max(self): 27 | # maximum value of this function 28 | return np.linalg.norm(np.array(self.gxy) - np.array(self.sxy)) \ 29 | + np.linalg.norm(np.array(self.gxy2) - np.array(self.sxy2)) 30 | @property 31 | def dx(self): 32 | # dimension of the input 33 | return self._dx 34 | 35 | def __call__(self, argv): 36 | # returns the reward of pushing two objects with two robots 37 | rx = float(argv[0]) 38 | ry = float(argv[1]) 39 | xvel = float(argv[2]) 40 | yvel = float(argv[3]) 41 | simu_steps = int(float(argv[4]) * 10) 42 | init_angle = float(argv[5]) 43 | rx2 = float(argv[6]) 44 | ry2 = float(argv[7]) 45 | xvel2 = float(argv[8]) 46 | yvel2 = float(argv[9]) 47 | simu_steps2 = int(float(argv[10]) * 10) 48 | init_angle2 = float(argv[11]) 49 | rtor = float(argv[12]) 50 | rtor2 = float(argv[13]) 51 | 52 | initial_dist = self.f_max 53 | 54 | world = b2WorldInterface(False) 55 | oshape, osize, ofriction, odensity, bfriction, hand_shape, hand_size = \ 56 | 'circle', 1, 0.01, 0.05, 0.01, 'rectangle', (1, 0.3) 57 | 58 | base = make_base(500, 500, world) 59 | body = create_body(base, world, 'rectangle', (0.5, 0.5), ofriction, odensity, self.sxy) 60 | body2 = create_body(base, world, 'circle', 1, ofriction, odensity, self.sxy2) 61 | 62 | robot = end_effector(world, (rx,ry), base, init_angle, hand_shape, hand_size) 63 | robot2 = end_effector(world, (rx2,ry2), base, init_angle2, hand_shape, hand_size) 64 | (ret1, ret2) = run_simulation(world, body, body2, robot, robot2, xvel, yvel, \ 65 | xvel2, yvel2, rtor, rtor2, simu_steps, simu_steps2) 66 | 67 | ret1 = np.linalg.norm(np.array(self.gxy) - ret1) 68 | ret2 = np.linalg.norm(np.array(self.gxy2) - ret2) 69 | return initial_dist - ret1 - ret2 70 | 71 | 72 | def main(): 73 | f = PushReward() 74 | x = np.random.uniform(f.xmin, f.xmax) 75 | print('Input = {}'.format(x)) 76 | print('Output = {}'.format(f(x))) 77 | 78 | 79 | if __name__ == '__main__': 80 | main() -------------------------------------------------------------------------------- /test_functions/highdim_functions.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from .functions_bo import hartman_6d, branin, schaffer_n2, bohachevsky_n1 3 | 4 | 5 | class Hartmann500D: 6 | ''' 7 | Hartman500 function 8 | ''' 9 | def __init__(self, bounds=None): 10 | self.input_dim = 500 11 | 12 | if bounds is None: 13 | self.bounds = [(0.0, 1.0)]*self.input_dim 14 | else: 15 | self.bounds = bounds 16 | 17 | # self.min = [(0.)*self.input_dim] 18 | self.fmin = -3.32237 19 | self.name = 'hartmann500' 20 | self._b = hartman_6d() 21 | self.effective_dims = 6 22 | 23 | def func(self, x): 24 | if x.ndim == 1: 25 | x_ = x.reshape(1, -1) 26 | else: 27 | x_ = np.copy(x) 28 | return self._b.func(x_[:, :self.effective_dims]) 29 | 30 | class Branin500D: 31 | ''' 32 | Branin500D function 33 | ''' 34 | def __init__(self): 35 | self.input_dim = 500 36 | self._b = branin() 37 | self.bounds = [(0., 1.)]*self.input_dim 38 | # self.min = [(0.)*self.input_dim] 39 | self.fmin = 0.397887 40 | self.name = 'branin500' 41 | 42 | self.effective_dims = self._b.input_dim 43 | 44 | def func(self, x): 45 | if x.ndim == 1: 46 | x_ = np.copy(x.reshape(1, -1)) 47 | else: 48 | x_ = np.copy(x) 49 | x_[:, 0] = 15.0*x_[:, 0] - 5.0 50 | x_[:, 1] = 15.0*x_[:, 1] 51 | return self._b.func(x_[:, :self.effective_dims]) 52 | 53 | 54 | class Branin20D: 55 | ''' 56 | Branin20D function 57 | ''' 58 | def __init__(self): 59 | self.input_dim = 20 60 | self._b = branin() 61 | self.bounds = [(0., 1.)]*self.input_dim 62 | # self.min = [(0.)*self.input_dim] 63 | self.fmin = 0.397887 64 | self.name = 'branin20' 65 | 66 | self.effective_dims = self._b.input_dim 67 | 68 | def func(self, x): 69 | if x.ndim == 1: 70 | x_ = np.copy(x.reshape(1, -1)) 71 | else: 72 | x_ = np.copy(x) 73 | x_[:, 0] = 15.0*x_[:, 0] - 5.0 74 | x_[:, 1] = 15.0*x_[:, 1] 75 | return self._b.func(x_[:, :self.effective_dims]) 76 | 77 | 78 | class Branin40D: 79 | ''' 80 | Branin40D function 81 | ''' 82 | def __init__(self): 83 | self.input_dim = 40 84 | self._b = branin() 85 | self.bounds = [(0., 1.)]*self.input_dim 86 | # self.min = [(0.)*self.input_dim] 87 | self.fmin = 0.397887 88 | self.name = 'branin40' 89 | 90 | self.effective_dims = self._b.input_dim 91 | 92 | def func(self, x): 93 | if x.ndim == 1: 94 | x_ = np.copy(x.reshape(1, -1)) 95 | else: 96 | x_ = np.copy(x) 97 | x_[:, 0] = 15.0*x_[:, 0] - 5.0 98 | x_[:, 1] = 15.0*x_[:, 1] 99 | return self._b.func(x_[:, :self.effective_dims]) 100 | 101 | class Schaffer40: 102 | ''' 103 | Schaffer-N2 40D function 104 | ''' 105 | def __init__(self, bounds=None): 106 | self.input_dim = 40 107 | 108 | if bounds is None: 109 | self.bounds = [(-100., 100.)]*self.input_dim 110 | else: 111 | self.bounds = bounds 112 | 113 | # self.min = [(0.)*self.input_dim] 114 | self.fmin = 0 115 | self.name = 'schaffer40' 116 | self._b = schaffer_n2() 117 | self.effective_dims = 2 118 | 119 | def func(self, x): 120 | if x.ndim == 1: 121 | x_ = np.copy(x.reshape(1, -1)) 122 | else: 123 | x_ = np.copy(x) 124 | return self._b.func(x_[:, :self.effective_dims]) 125 | 126 | class Schaffer100: 127 | ''' 128 | Schaffer-N2 100D function 129 | ''' 130 | def __init__(self, bounds=None): 131 | self.input_dim = 100 132 | 133 | if bounds is None: 134 | self.bounds = [(-100., 100.)]*self.input_dim 135 | else: 136 | self.bounds = bounds 137 | 138 | # self.min = [(0.)*self.input_dim] 139 | self.fmin = 0 140 | self.name = 'schaffer100' 141 | self._b = schaffer_n2() 142 | self.effective_dims = 2 143 | 144 | def func(self, x): 145 | if x.ndim == 1: 146 | x_ = np.copy(x.reshape(1, -1)) 147 | else: 148 | x_ = np.copy(x) 149 | return self._b.func(x_[:, :self.effective_dims]) 150 | 151 | 152 | class Bohachevsky100: 153 | ''' 154 | bohachevsky-N1 100D function 155 | ''' 156 | def __init__(self, bounds=None): 157 | self.input_dim = 100 158 | 159 | if bounds is None: 160 | self.bounds = [(-100., 100.)]*self.input_dim 161 | else: 162 | self.bounds = bounds 163 | 164 | # self.min = [(0.)*self.input_dim] 165 | self.fmin = 0 166 | self.name = 'bohachevsky100' 167 | self._b = bohachevsky_n1() 168 | self.effective_dims = 2 169 | 170 | def func(self, x): 171 | if x.ndim == 1: 172 | x_ = np.copy(x.reshape(1, -1)) 173 | else: 174 | x_ = np.copy(x) 175 | return self._b.func(x_[:, :self.effective_dims]) -------------------------------------------------------------------------------- /test_functions/lasso_benchmark.py: -------------------------------------------------------------------------------- 1 | from typing import List, Optional, Union 2 | import numpy as np 3 | 4 | class LassoRealWorldBenchmark: 5 | ''' 6 | The base class for Lasso realworld benchmark. Use the derived classes 7 | ''' 8 | def __init__(self, pick_data: str, seed: Optional[int] = None, **kwargs): 9 | """ 10 | Constructs all the necessary attributes for real-world bench. 11 | 12 | Parameters 13 | ---------- 14 | pick_data : str 15 | name of dataset such as 16 | Diabetes, Breast_cancer, DNA, Leukemia, RCV1 17 | seed: int, optional 18 | seed number 19 | """ 20 | from LassoBench import LassoBench 21 | 22 | self._b: LassoBench.RealBenchmark = LassoBench.RealBenchmark( 23 | pick_data=pick_data, mf_opt="discrete_fidelity", seed=seed 24 | ) 25 | self.input_dim = self._b.n_features 26 | self.effective_dim = None # to be filled in derived classes 27 | self.bounds = [(-1.0, 1.0)]*self.input_dim 28 | self.name = f'lasso-{pick_data}' 29 | self.noise_std = 0 30 | 31 | def __call__(self, x: Union[np.ndarray, List[float], List[List[float]]]): 32 | x = np.array(x, dtype=np.double) 33 | if x.ndim == 0: 34 | x = np.expand_dims(x, 0) 35 | if x.ndim == 1: 36 | x = np.expand_dims(x, 0) 37 | assert x.ndim == 2 38 | result_list = [] 39 | for y in x: 40 | result = self._b.evaluate(y) 41 | result_list.append(result) 42 | result = np.array(result_list).squeeze() 43 | return result + np.random.normal( 44 | np.zeros_like(result), np.ones_like(result) * self.noise_std, result.shape 45 | ) 46 | 47 | def func(self, x: Union[np.ndarray, List[float], List[List[float]]]): 48 | return self.__call__(x) 49 | 50 | class LassoSyntheticBenchmark: 51 | ''' 52 | The base class for Lasso realworld benchmark. Use the derived classes 53 | ''' 54 | 55 | def __init__(self, pick_bench: str, seed: Optional[int] = None, **kwargs): 56 | """ 57 | Constructs all the necessary attributes for real-world bench. 58 | 59 | Parameters 60 | ---------- 61 | pick_bench : str 62 | name of a predefined bench such as: synt_simple, synt_medium, synt_high, synt_hard 63 | seed: int, optional 64 | seed number 65 | """ 66 | from LassoBench import LassoBench 67 | 68 | self._b: LassoBench.SyntheticBenchmark = LassoBench.SyntheticBenchmark( 69 | pick_bench=pick_bench, seed=seed 70 | ) 71 | self.input_dim = self._b.n_features 72 | 73 | self.effective_dims = np.arange(self.input_dim)[self._b.w_true != 0] 74 | print(f"function effective dimensions: {self.effective_dims.tolist()}") 75 | 76 | self.bounds = [(-1.0, 1.0)]*self.input_dim 77 | problem = pick_bench.replace('synt_','') 78 | self.name = f'lasso-{problem}' 79 | 80 | def __call__(self, x: Union[np.ndarray, List[float], List[List[float]]]): 81 | x = np.array(x, dtype=np.double) 82 | if x.ndim == 0: 83 | x = np.expand_dims(x, 0) 84 | if x.ndim == 1: 85 | x = np.expand_dims(x, 0) 86 | assert x.ndim == 2 87 | result_list = [] 88 | for y in x: 89 | result = self._b.evaluate(y) 90 | result_list.append(result) 91 | return np.array(result_list).squeeze() 92 | 93 | def func(self, x: Union[np.ndarray, List[float], List[List[float]]]): 94 | return self.__call__(x) 95 | 96 | 97 | 98 | class LassoDiabetesBenchmark(LassoRealWorldBenchmark): 99 | """ 100 | 8-D diabetes benchmark from https://github.com/ksehic/LassoBench 101 | 102 | Args: 103 | seed: seed number 104 | **kwargs: 105 | """ 106 | def __init__(self, seed: Optional[int] = None, **kwargs): 107 | super().__init__(pick_data="diabetes", seed=seed) 108 | self.effective_dim = 5 109 | 110 | class LassoRCV1Benchmark(LassoRealWorldBenchmark): 111 | """ 112 | 19 959-D RCV1 benchmark from https://github.com/ksehic/LassoBench 113 | 114 | Args: 115 | seed: seed number 116 | **kwargs: 117 | """ 118 | def __init__(self, seed: Optional[int] = None, **kwargs): 119 | super().__init__(pick_data="rcv1", seed=seed) 120 | self.effective_dim = 75 121 | 122 | class LassoDNABenchmark(LassoRealWorldBenchmark): 123 | """ 124 | 180-D DNA RCV1 benchmark from https://github.com/ksehic/LassoBench 125 | 126 | Args: 127 | seed: seed number 128 | **kwargs: 129 | """ 130 | def __init__(self, seed: Optional[int] = None, **kwargs): 131 | super().__init__(pick_data="dna", seed=seed) 132 | self.effective_dim = 43 133 | 134 | 135 | 136 | class LassoSimpleBenchmark(LassoSyntheticBenchmark): 137 | """ 138 | 60-D synthetic Lasso simple benchmark from https://github.com/ksehic/LassoBench . 139 | Effective dimensionality: 5% of input dimensionality. 140 | 141 | Args: 142 | seed: optional int | None 143 | **kwargs: 144 | """ 145 | 146 | def __init__(self, seed: Optional[int] = None, **kwargs): 147 | super().__init__(pick_bench="synt_simple", seed=seed) 148 | 149 | class LassoMediumBenchmark(LassoSyntheticBenchmark): 150 | """ 151 | 100-D synthetic Lasso medium benchmark from https://github.com/ksehic/LassoBench . 152 | Effective dimensionality: 5% of input dimensionality. 153 | 154 | Args: 155 | seed: optional int | None 156 | **kwargs: 157 | """ 158 | 159 | def __init__(self, seed: Optional[int] = None, **kwargs): 160 | super().__init__(pick_bench="synt_medium", seed=seed) 161 | 162 | 163 | class LassoHighBenchmark(LassoSyntheticBenchmark): 164 | """ 165 | 300-D synthetic Lasso high benchmark from https://github.com/ksehic/LassoBench . 166 | Effective dimensionality: 5% of input dimensionality. 167 | 168 | Args: 169 | seed: optional int | None 170 | **kwargs: 171 | """ 172 | 173 | def __init__(self, seed: Optional[int] = None, **kwargs): 174 | super().__init__(pick_bench="synt_high", seed=seed) 175 | 176 | 177 | class LassoHardBenchmark(LassoSyntheticBenchmark): 178 | """ 179 | 1000-D synthetic Lasso hard benchmark from https://github.com/ksehic/LassoBench . 180 | Effective dimensionality: 5% of input dimensionality. 181 | 182 | Args: 183 | seed: optional int | None 184 | **kwargs: 185 | """ 186 | 187 | def __init__(self, seed: Optional[int] = None, **kwargs): 188 | super().__init__(pick_bench="synt_hard", seed=seed) 189 | 190 | -------------------------------------------------------------------------------- /test_functions/setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | 3 | 4 | setup(name='LassoBench', 5 | packages=['LassoBench'], 6 | install_requires=[ 7 | 'sparse-ho @ https://github.com/QB3/sparse-ho/archive/master.zip', 8 | 'celer', 9 | 'pyDOE', 10 | 'libsvmdata', 11 | 'ax-platform', 12 | 'matplotlib>=2.0.0', 13 | 'numpy>=1.12', 14 | 'scipy>=0.18.0', 15 | 'scikit-learn>=0.21', 16 | 'seaborn>=0.7', 17 | 'GPy>=1.9.2', 18 | 'pyDOE>=0.3.8'], 19 | ) 20 | -------------------------------------------------------------------------------- /test_functions/utils.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import os 3 | import random 4 | from collections import OrderedDict 5 | 6 | import botorch 7 | import numpy as np 8 | import torch 9 | 10 | from .function_realworld_bo.functions_mujoco import * 11 | from .function_realworld_bo.functions_realworld_bo import * 12 | from .functions_bo import * 13 | from .highdim_functions import * 14 | from .lasso_benchmark import * 15 | 16 | 17 | def get_arguments(): 18 | parser = argparse.ArgumentParser(description='Process inputs') 19 | parser.add_argument('-f', '--func', help='specify the test function') 20 | parser.add_argument('-d', '--dim', type=int, help='specify the problem dimensions', default=10) 21 | parser.add_argument('-n', '--maxevals', type=int, help='specify the maxium number of evaluations to collect in the search') 22 | parser.add_argument('--solver', type=str, help='specify the solver', default='bo') 23 | parser.add_argument('--seed', type=int, help='seeding option', default=1) 24 | 25 | 26 | args = parser.parse_args() 27 | 28 | 29 | dim = args.dim 30 | func = args.func.lower() 31 | if func == 'ackley': 32 | f = ackley(dim) 33 | elif func == 'shifted-ackley': 34 | f = shifted_ackley(dim) 35 | elif func == 'rastrigin': 36 | f = rastrigin(dim) 37 | elif func == 'ellipsoid': 38 | f = ellipsoid(dim) 39 | elif func == 'shifted-ellipsoid': 40 | f = shifted_ellipsoid(dim) 41 | elif func == 'levy': 42 | f = Levy(dim) 43 | elif func == 'shifted-levy': 44 | f = shifted_levy(dim) 45 | elif func == 'schwefel': 46 | f = schwefel(dim) 47 | elif func == 'alpine': 48 | f = alpine(dim) 49 | elif func == 'shifted-alpine': 50 | f = shifted_alpine(dim) 51 | elif func == 'eggholder': 52 | f = egg_holder() 53 | dim = f.input_dim 54 | elif func == 'beale': 55 | f = beale() 56 | dim = f.input_dim 57 | elif func == 'branin': 58 | f = branin_uniformbound() 59 | dim = f.input_dim 60 | elif func == 'rosenbrock': 61 | f = rosenbrock(dim) 62 | elif func == 'powell': 63 | f = powell(dim) 64 | elif func == 'schaffer': 65 | f = schaffer_n2() 66 | elif func == 'robot-pushing': 67 | f = Robot_pushing() 68 | elif func == 'rover60': 69 | f = Rover() 70 | elif func == 'rover20': 71 | f = Rover20() 72 | elif func == 'rover100': 73 | f = Rover100() 74 | elif func == 'lunar-landing': 75 | f = Lunar_landing() 76 | elif func == 'bipedal-walking': 77 | f = Bipedal_walking() 78 | elif func == 'electron9': 79 | f = ElectronSphere9np() 80 | elif func == 'electron6': 81 | f = ElectronSphere6np() 82 | elif func == 'lasso-simple': 83 | f = LassoSimpleBenchmark() 84 | elif func == 'lasso-medium': 85 | f = LassoMediumBenchmark() 86 | elif func == 'lasso-high': 87 | f = LassoHighBenchmark() 88 | elif func == 'lasso-hard': 89 | f = LassoHardBenchmark() 90 | elif func == 'lasso-diabete': 91 | f = LassoDiabetesBenchmark() 92 | elif func == 'lasso-dna': 93 | f = LassoDNABenchmark() 94 | elif func == 'hartmann500': 95 | f = Hartmann500D() 96 | elif func == 'branin20': 97 | f = Branin20D() 98 | elif func == 'branin40': 99 | f = Branin40D() 100 | elif func == 'branin500': 101 | f = Branin500D() 102 | elif func == 'schaffer40': 103 | f = Schaffer40() 104 | elif func == 'schaffer100': 105 | f = Schaffer100() 106 | elif func == 'bohachevsky100': 107 | f = Bohachevsky100() 108 | elif func == 'mopta08': 109 | f = MoptaSoftConstraints() 110 | elif func == 'hopper': 111 | f = Hopper() 112 | elif func == 'walker2d': 113 | f = Walker2d() 114 | elif func == 'half-cheetah': 115 | f = HalfCheetah() 116 | elif func == 'humanoid': 117 | f = Humanoid() 118 | elif func == 'ant': 119 | f = Ant() 120 | elif func == 'swimmer': 121 | f = Swimmer() 122 | elif func == 'svm': 123 | f = SVMBenchmark() 124 | else: 125 | raise NotImplementedError(f'Objective function {func} is not supported') 126 | 127 | dim = f.input_dim 128 | max_evals = args.maxevals 129 | dict = { 130 | 'func_name': func, 131 | "f": f, 132 | 'max_evals': max_evals, 133 | 'solver': args.solver, 134 | 'seed': args.seed, 135 | } 136 | return dict 137 | 138 | def get_bound(bounds): 139 | if isinstance(bounds, OrderedDict): 140 | return np.array([val for val in bounds.values()]) 141 | else: 142 | return np.array(bounds) 143 | 144 | def set_seed(seed=1234): 145 | os.environ["CUBLAS_WORKSPACE_CONFIG"]=":4096:8" 146 | random.seed(seed) 147 | np.random.seed(seed) 148 | torch.manual_seed(seed) 149 | torch.use_deterministic_algorithms(True) 150 | botorch.manual_seed(seed) -------------------------------------------------------------------------------- /turbo_cma/__init__.py: -------------------------------------------------------------------------------- 1 | from .turbo_1 import Turbo1 2 | -------------------------------------------------------------------------------- /turbo_cma/gp.py: -------------------------------------------------------------------------------- 1 | 2 | import math 3 | 4 | import gpytorch 5 | import numpy as np 6 | import torch 7 | from gpytorch.constraints.constraints import Interval 8 | from gpytorch.distributions import MultivariateNormal 9 | from gpytorch.kernels import MaternKernel, ScaleKernel 10 | from gpytorch.likelihoods import GaussianLikelihood 11 | from gpytorch.means import ConstantMean 12 | from gpytorch.mlls import ExactMarginalLogLikelihood 13 | from gpytorch.models import ExactGP 14 | 15 | 16 | # GP Model 17 | class GP(ExactGP): 18 | def __init__(self, train_x, train_y, likelihood, lengthscale_constraint, outputscale_constraint, ard_dims): 19 | super(GP, self).__init__(train_x, train_y, likelihood) 20 | self.ard_dims = ard_dims 21 | self.mean_module = ConstantMean() 22 | base_kernel = MaternKernel(lengthscale_constraint=lengthscale_constraint, ard_num_dims=ard_dims, nu=2.5) 23 | self.covar_module = ScaleKernel(base_kernel, outputscale_constraint=outputscale_constraint) 24 | 25 | def forward(self, x): 26 | mean_x = self.mean_module(x) 27 | covar_x = self.covar_module(x) 28 | return MultivariateNormal(mean_x, covar_x) 29 | 30 | 31 | def train_gp(train_x, train_y, use_ard, num_steps, hypers={}): 32 | """Fit a GP model where train_x is in [0, 1]^d and train_y is standardized.""" 33 | assert train_x.ndim == 2 34 | assert train_y.ndim == 1 35 | assert train_x.shape[0] == train_y.shape[0] 36 | 37 | # Create hyper parameter bounds 38 | noise_constraint = Interval(5e-4, 0.2) 39 | if use_ard: 40 | lengthscale_constraint = Interval(0.005, 2.0) 41 | else: 42 | lengthscale_constraint = Interval(0.005, math.sqrt(train_x.shape[1])) # [0.005, sqrt(dim)] 43 | outputscale_constraint = Interval(0.05, 20.0) 44 | 45 | # Create models 46 | likelihood = GaussianLikelihood(noise_constraint=noise_constraint).to(device=train_x.device, dtype=train_y.dtype) 47 | ard_dims = train_x.shape[1] if use_ard else None 48 | model = GP( 49 | train_x=train_x, 50 | train_y=train_y, 51 | likelihood=likelihood, 52 | lengthscale_constraint=lengthscale_constraint, 53 | outputscale_constraint=outputscale_constraint, 54 | ard_dims=ard_dims, 55 | ).to(device=train_x.device, dtype=train_x.dtype) 56 | 57 | # Find optimal model hyperparameters 58 | model.train() 59 | likelihood.train() 60 | 61 | # "Loss" for GPs - the marginal log likelihood 62 | mll = ExactMarginalLogLikelihood(likelihood, model) 63 | 64 | # Initialize model hypers 65 | if hypers: 66 | model.load_state_dict(hypers) 67 | else: 68 | hypers = {} 69 | hypers["covar_module.outputscale"] = 1.0 70 | hypers["covar_module.base_kernel.lengthscale"] = 0.5 71 | hypers["likelihood.noise"] = 0.005 72 | model.initialize(**hypers) 73 | 74 | # Use the adam optimizer 75 | optimizer = torch.optim.Adam([{"params": model.parameters()}], lr=0.1) 76 | 77 | for _ in range(num_steps): 78 | optimizer.zero_grad() 79 | output = model(train_x) 80 | loss = -mll(output, train_y) 81 | loss.backward() 82 | optimizer.step() 83 | 84 | # Switch to eval mode 85 | model.eval() 86 | likelihood.eval() 87 | 88 | return model 89 | -------------------------------------------------------------------------------- /turbo_cma/utils.py: -------------------------------------------------------------------------------- 1 | 2 | import numpy as np 3 | 4 | 5 | def to_unit_cube(x, lb, ub): 6 | """Project to [0, 1]^d from hypercube with bounds lb and ub""" 7 | assert np.all(lb < ub) and lb.ndim == 1 and ub.ndim == 1 and x.ndim == 2 8 | xx = (x - lb) / (ub - lb) 9 | return xx 10 | 11 | 12 | def from_unit_cube(x, lb, ub): 13 | """Project from [0, 1]^d to hypercube with bounds lb and ub""" 14 | assert np.all(lb < ub) and lb.ndim == 1 and ub.ndim == 1 and x.ndim == 2 15 | xx = x * (ub - lb) + lb 16 | return xx 17 | 18 | 19 | def latin_hypercube(n_pts, dim): 20 | """Basic Latin hypercube implementation with center perturbation.""" 21 | X = np.zeros((n_pts, dim)) 22 | centers = (1.0 + 2.0 * np.arange(0.0, n_pts)) / float(2 * n_pts) 23 | for i in range(dim): # Shuffle the center locataions for each dimension. 24 | X[:, i] = centers[np.random.permutation(n_pts)] 25 | 26 | # Add some perturbations within each box 27 | pert = np.random.uniform(-1.0, 1.0, (n_pts, dim)) / float(2 * n_pts) 28 | X += pert 29 | return X 30 | --------------------------------------------------------------------------------