diff --git a/neps/optimizers/acquisition/base_acquisition.py b/neps/optimizers/acquisition/base_acquisition.py deleted file mode 100644 index 17a1a974..00000000 --- a/neps/optimizers/acquisition/base_acquisition.py +++ /dev/null @@ -1,30 +0,0 @@ -from __future__ import annotations - -from abc import ABC, abstractmethod -from collections.abc import Iterable -from typing import TYPE_CHECKING, Any - -if TYPE_CHECKING: - import numpy as np - import torch - - -class BaseAcquisition(ABC): - def __init__(self): - self.surrogate_model: Any | None = None - - @abstractmethod - def eval( - self, - x: Iterable, - *, - asscalar: bool = False, - ) -> np.ndarray | torch.Tensor | float: - """Evaluate the acquisition function at point x2.""" - raise NotImplementedError - - def __call__(self, *args: Any, **kwargs: Any) -> np.ndarray | torch.Tensor | float: - return self.eval(*args, **kwargs) - - def set_state(self, surrogate_model: Any, **kwargs: Any) -> None: - self.surrogate_model = surrogate_model diff --git a/neps/optimizers/acquisition/cost_cooling.py b/neps/optimizers/acquisition/cost_cooling.py index 46fe1309..49c18cad 100644 --- a/neps/optimizers/acquisition/cost_cooling.py +++ b/neps/optimizers/acquisition/cost_cooling.py @@ -5,9 +5,7 @@ import torch from botorch.acquisition.logei import partial -from neps.optimizers.bayesian_optimization.acquisition_functions.weighted_acquisition import ( # noqa: E501 - WeightedAcquisition, -) +from neps.optimizers.acquisition.weighted_acquisition import WeightedAcquisition if TYPE_CHECKING: from botorch.acquisition import AcquisitionFunction diff --git a/neps/optimizers/acquisition/ei.py b/neps/optimizers/acquisition/ei.py deleted file mode 100644 index b8ee5f75..00000000 --- a/neps/optimizers/acquisition/ei.py +++ /dev/null @@ -1,128 +0,0 @@ -from __future__ import annotations - -from collections.abc import Sequence -from typing import TYPE_CHECKING, Any - -import torch -from torch.distributions import Normal - -from .base_acquisition import BaseAcquisition - -if TYPE_CHECKING: - import numpy as np - - from neps.search_spaces import SearchSpace - - -class ComprehensiveExpectedImprovement(BaseAcquisition): - def __init__( - self, - *, - augmented_ei: bool = False, - xi: float = 0.0, - in_fill: str = "best", - log_ei: bool = False, - optimize_on_max_fidelity: bool = True, - ): - """This is the graph BO version of the expected improvement - key differences are: - - 1. The input x2 is a networkx graph instead of a vectorial input - - 2. The search space (a collection of x1_graphs) is discrete, so there is no - gradient-based optimisation. Instead, we compute the EI at all candidate points - and empirically select the best position during optimisation - - Args: - augmented_ei: Using the Augmented EI heuristic modification to the standard - expected improvement algorithm according to Huang (2006). - xi: manual exploration-exploitation trade-off parameter. - in_fill: the criterion to be used for in-fill for the determination of mu_star - 'best' means the empirical best observation so far (but could be - susceptible to noise), 'posterior' means the best *posterior GP mean* - encountered so far, and is recommended for optimization of more noisy - functions. Defaults to "best". - log_ei: log-EI if true otherwise usual EI. - """ - super().__init__() - - if in_fill not in ["best", "posterior"]: - raise ValueError(f"Invalid value for in_fill ({in_fill})") - self.augmented_ei = augmented_ei - self.xi = xi - self.in_fill = in_fill - self.log_ei = log_ei - self.incumbent: float | None = None - self.optimize_on_max_fidelity = optimize_on_max_fidelity - - def eval( - self, - x: Sequence[SearchSpace], - *, - asscalar: bool = False, - ) -> np.ndarray | torch.Tensor | float: - """Return the negative expected improvement at the query point x2.""" - assert self.incumbent is not None, "EI function not fitted on model" - assert self.surrogate_model is not None - - space = x[0] - if len(space.fidelities) > 0 and self.optimize_on_max_fidelity: - assert len(space.fidelities) == 1 - fid_name, fid = next(iter(space.fidelities.items())) - _x = [space.from_dict({**e._values, fid_name: fid.upper}) for e in x] - else: - _x = list(x) - - mu, cov = self.surrogate_model.predict(_x) - - std = torch.sqrt(torch.diag(cov)) - mu_star = self.incumbent - - gauss = Normal(torch.zeros(1, device=mu.device), torch.ones(1, device=mu.device)) - # > u = (mu - mu_star - self.xi) / std - # > ei = std * updf + (mu - mu_star - self.xi) * ucdf - if self.log_ei: - # we expect that f_min is in log-space - f_min = mu_star - self.xi - v = (f_min - mu) / std - ei = torch.exp(f_min) * gauss.cdf(v) - torch.exp( - 0.5 * torch.diag(cov) + mu - ) * gauss.cdf(v - std) - else: - u = (mu_star - mu - self.xi) / std - try: - ucdf = gauss.cdf(u) - except ValueError as e: - print(f"u: {u}") # noqa: T201 - print(f"mu_star: {mu_star}") # noqa: T201 - print(f"mu: {mu}") # noqa: T201 - print(f"std: {std}") # noqa: T201 - print(f"diag: {cov.diag()}") # noqa: T201 - raise e - updf = torch.exp(gauss.log_prob(u)) - ei = std * updf + (mu_star - mu - self.xi) * ucdf - if self.augmented_ei: - sigma_n = self.surrogate_model.likelihood - ei *= 1.0 - torch.sqrt(torch.tensor(sigma_n, device=mu.device)) / torch.sqrt( - sigma_n + torch.diag(cov) - ) - if isinstance(_x, list) and asscalar: - return ei.detach().numpy() - - if asscalar: - ei = ei.detach().numpy().item() - - return ei - - def set_state(self, surrogate_model: Any, **kwargs: Any) -> None: - super().set_state(surrogate_model, **kwargs) - assert self.surrogate_model is not None - - # Compute incumbent - if self.in_fill == "best": - self.incumbent = float(torch.min(self.surrogate_model.y_)) - else: - x = self.surrogate_model.x - mu_train, _ = self.surrogate_model.predict(x) - incumbent_idx = torch.argmin(mu_train) - self.incumbent = self.surrogate_model.y_[incumbent_idx] diff --git a/neps/optimizers/acquisition/pibo.py b/neps/optimizers/acquisition/pibo.py index 3cba54e5..1b456f46 100644 --- a/neps/optimizers/acquisition/pibo.py +++ b/neps/optimizers/acquisition/pibo.py @@ -18,9 +18,7 @@ from botorch.acquisition.logei import partial -from neps.optimizers.bayesian_optimization.acquisition_functions.weighted_acquisition import ( # noqa: E501 - WeightedAcquisition, -) +from neps.optimizers.acquisition.weighted_acquisition import WeightedAcquisition if TYPE_CHECKING: from botorch.acquisition.acquisition import AcquisitionFunction diff --git a/neps/optimizers/acquisition/ucb.py b/neps/optimizers/acquisition/ucb.py deleted file mode 100644 index 52587a7a..00000000 --- a/neps/optimizers/acquisition/ucb.py +++ /dev/null @@ -1,57 +0,0 @@ -from __future__ import annotations - -import logging -from collections.abc import Iterable -from typing import Any - -import numpy as np -import torch - -from neps.optimizers.bayesian_optimization.acquisition_functions.base_acquisition import ( - BaseAcquisition, -) - -logger = logging.getLogger(__name__) - - -class UpperConfidenceBound(BaseAcquisition): - def __init__(self, *, beta: float = 1.0, maximize: bool = False): - """Upper Confidence Bound (UCB) acquisition function. - - Args: - beta: Controls the balance between exploration and exploitation. - maximize: If True, maximize the given model, else minimize. - DEFAULT=False, assumes minimzation. - """ - super().__init__() - self.beta = beta # can be updated as part of the state for dynamism or a schedule - self.maximize = maximize - - # to be initialized as part of the state - self.surrogate_model = None - - def set_state(self, surrogate_model: Any, **kwargs: Any) -> None: - super().set_state(surrogate_model) - self.surrogate_model = surrogate_model - if "beta" in kwargs: - if not isinstance(kwargs["beta"], list | np.array): - self.beta = kwargs["beta"] - else: - logger.warning("Beta is a list, not updating beta value!") - - def eval( - self, - x: Iterable, - *, - asscalar: bool = False, - ) -> np.ndarray | torch.Tensor | float: - assert self.surrogate_model is not None, "Surrogate model is not set." - try: - mu, cov = self.surrogate_model.predict(x) - std = torch.sqrt(torch.diag(cov)) - except ValueError as e: - raise e - sign = 1 if self.maximize else -1 # LCB is performed if minimize=True - ucb_scores = mu + sign * np.sqrt(self.beta) * std - # if LCB, minimize acquisition, or maximize -acquisition - return ucb_scores.detach().numpy() * sign diff --git a/neps/optimizers/models/__init__.py b/neps/optimizers/models/__init__.py index 034049a3..a634e736 100755 --- a/neps/optimizers/models/__init__.py +++ b/neps/optimizers/models/__init__.py @@ -1,4 +1,4 @@ -from neps.optimizers.bayesian_optimization.models.ftpfn import FTPFNSurrogate -from neps.optimizers.bayesian_optimization.models.gp import make_default_single_obj_gp +from neps.optimizers.models.ftpfn import FTPFNSurrogate +from neps.optimizers.models.gp import make_default_single_obj_gp __all__ = ["FTPFNSurrogate", "make_default_single_obj_gp"] diff --git a/neps/optimizers/models/gp.py b/neps/optimizers/models/gp.py index 2210e44b..932dcb4e 100644 --- a/neps/optimizers/models/gp.py +++ b/neps/optimizers/models/gp.py @@ -20,10 +20,10 @@ from gpytorch import ExactMarginalLogLikelihood from gpytorch.kernels import ScaleKernel -from neps.optimizers.bayesian_optimization.acquisition_functions.cost_cooling import ( +from neps.optimizers.acquisition.cost_cooling import ( cost_cooled_acq, ) -from neps.optimizers.bayesian_optimization.acquisition_functions.pibo import ( +from neps.optimizers.acquisition.pibo import ( pibo_acquisition, ) from neps.search_spaces.encoding import CategoricalToIntegerTransformer, ConfigEncoder