diff --git a/README.md b/README.md index 40c038143..cf9633303 100644 --- a/README.md +++ b/README.md @@ -60,7 +60,7 @@ import logging # 1. Define a function that accepts hyperparameters and computes the validation error def run_pipeline( - hyperparameter_a: float, hyperparameter_b: int, architecture_parameter: str + hyperparameter_a: float, hyperparameter_b: int, architecture_parameter: str ) -> dict: # Create your model model = MyModel(architecture_parameter) @@ -74,14 +74,13 @@ def run_pipeline( # 2. Define a search space of parameters; use the same parameter names as in run_pipeline pipeline_space = dict( - hyperparameter_a=neps.FloatParameter( + hyperparameter_a=neps.Float( lower=0.001, upper=0.1, log=True # The search space is sampled in log space ), - hyperparameter_b=neps.IntegerParameter(lower=1, upper=42), - architecture_parameter=neps.CategoricalParameter(["option_a", "option_b"]), + hyperparameter_b=neps.Integer(lower=1, upper=42), + architecture_parameter=neps.Categorical(["option_a", "option_b"]), ) - # 3. Run the NePS optimization logging.basicConfig(level=logging.INFO) neps.run( diff --git a/docs/doc_yamls/architecture_search_space.py b/docs/doc_yamls/architecture_search_space.py index cdac0da0e..66771cb3b 100644 --- a/docs/doc_yamls/architecture_search_space.py +++ b/docs/doc_yamls/architecture_search_space.py @@ -86,12 +86,12 @@ def set_recursive_attribute(op_name, predecessor_values): pipeline_space = dict( - architecture=neps.ArchitectureParameter( + architecture=neps.Architecture( set_recursive_attribute=set_recursive_attribute, structure=structure, primitives=primitives, ), - optimizer=neps.CategoricalParameter(choices=["sgd", "adam"]), - learning_rate=neps.FloatParameter(lower=10e-7, upper=10e-3, log=True), + optimizer=neps.Categorical(choices=["sgd", "adam"]), + learning_rate=neps.Float(lower=10e-7, upper=10e-3, log=True), ) diff --git a/docs/getting_started.md b/docs/getting_started.md index 9f414c0a1..78cad7640 100644 --- a/docs/getting_started.md +++ b/docs/getting_started.md @@ -35,10 +35,11 @@ In code, the usage pattern can look like this: import neps import logging -def run_pipeline( # (1)! - hyperparameter_a: float, - hyperparameter_b: int, - architecture_parameter: str, + +def run_pipeline( # (1)! + hyperparameter_a: float, + hyperparameter_b: int, + architecture_parameter: str, ) -> dict: # insert here your own model model = MyModel(architecture_parameter) @@ -49,7 +50,7 @@ def run_pipeline( # (1)! ) return { - "loss": validation_error, #! (2) + "loss": validation_error, # ! (2) "info_dict": { "training_error": training_error # + Other metrics @@ -58,9 +59,9 @@ def run_pipeline( # (1)! pipeline_space = { # (3)! - "hyperparameter_b":neps.IntegerParameter(1, 42, is_fidelity=True), #! (4) - "hyperparameter_a":neps.FloatParameter(1e-3, 1e-1, log=True) #! (5) - "architecture_parameter": neps.CategoricalParameter(["option_a", "option_b", "option_c"]), + "hyperparameter_b": neps.Integer(1, 42, is_fidelity=True), # ! (4) + "hyperparameter_a": neps.Float(1e-3, 1e-1, log=True) # ! (5) + "architecture_parameter": neps.Categorical(["option_a", "option_b", "option_c"]), } if __name__ == "__main__": diff --git a/docs/index.md b/docs/index.md index 84d22460b..2f6f69283 100644 --- a/docs/index.md +++ b/docs/index.md @@ -66,7 +66,7 @@ import logging # 1. Define a function that accepts hyperparameters and computes the validation error def run_pipeline( - hyperparameter_a: float, hyperparameter_b: int, architecture_parameter: str + hyperparameter_a: float, hyperparameter_b: int, architecture_parameter: str ) -> dict: # Create your model model = MyModel(architecture_parameter) @@ -80,14 +80,13 @@ def run_pipeline( # 2. Define a search space of parameters; use the same parameter names as in run_pipeline pipeline_space = dict( - hyperparameter_a=neps.FloatParameter( + hyperparameter_a=neps.Float( lower=0.001, upper=0.1, log=True # The search space is sampled in log space ), - hyperparameter_b=neps.IntegerParameter(lower=1, upper=42), - architecture_parameter=neps.CategoricalParameter(["option_a", "option_b"]), + hyperparameter_b=neps.Integer(lower=1, upper=42), + architecture_parameter=neps.Categorical(["option_a", "option_b"]), ) - # 3. Run the NePS optimization logging.basicConfig(level=logging.INFO) neps.run( diff --git a/docs/reference/pipeline_space.md b/docs/reference/pipeline_space.md index 0458f60d4..f0780378c 100644 --- a/docs/reference/pipeline_space.md +++ b/docs/reference/pipeline_space.md @@ -12,10 +12,10 @@ effectively incorporate various parameter types, ensuring that NePS can utilize ## Parameters NePS currently features 4 primary hyperparameter types: -* [`CategoricalParameter`][neps.search_spaces.hyperparameters.categorical.CategoricalParameter] -* [`FloatParameter`][neps.search_spaces.hyperparameters.float.FloatParameter] -* [`IntegerParameter`][neps.search_spaces.hyperparameters.integer.IntegerParameter] -* [`ConstantParameter`][neps.search_spaces.hyperparameters.constant.ConstantParameter] +* [`Categorical`][neps.search_spaces.hyperparameters.categorical.Categorical] +* [`Float`][neps.search_spaces.hyperparameters.float.Float] +* [`Integer`][neps.search_spaces.hyperparameters.integer.Integer] +* [`Constant`][neps.search_spaces.hyperparameters.constant.Constant] Using these types, you can define the parameters that NePS will optimize during the search process. The most basic way to pass these parameters is through a Python dictionary, where each key-value @@ -25,32 +25,32 @@ for optimizing a deep learning model: ```python pipeline_space = { - "learning_rate": neps.FloatParameter(0.00001, 0.1, log=True), - "num_epochs": neps.IntegerParameter(3, 30, is_fidelity=True), - "optimizer": neps.CategoricalParameter(["adam", "sgd", "rmsprop"]), - "dropout_rate": neps.ConstantParameter(0.5), + "learning_rate": neps.Float(0.00001, 0.1, log=True), + "num_epochs": neps.Integer(3, 30, is_fidelity=True), + "optimizer": neps.Categorical(["adam", "sgd", "rmsprop"]), + "dropout_rate": neps.Constant(0.5), } -neps.run(.., pipeline_space=pipeline_space) +neps.run(.., pipeline_space = pipeline_space) ``` ??? example "Quick Parameter Reference" - === "`CategoricalParameter`" + === "`Categorical`" - ::: neps.search_spaces.hyperparameters.categorical.CategoricalParameter + ::: neps.search_spaces.hyperparameters.categorical.Categorical - === "`FloatParameter`" + === "`Float`" - ::: neps.search_spaces.hyperparameters.float.FloatParameter + ::: neps.search_spaces.hyperparameters.float.Float - === "`IntegerParameter`" + === "`Integer`" - ::: neps.search_spaces.hyperparameters.integer.IntegerParameter + ::: neps.search_spaces.hyperparameters.integer.Integer - === "`ConstantParameter`" + === "`Constant`" - ::: neps.search_spaces.hyperparameters.constant.ConstantParameter + ::: neps.search_spaces.hyperparameters.constant.Constant ## Using your knowledge, providing a Prior @@ -70,10 +70,10 @@ import neps neps.run( ..., pipeline_space={ - "learning_rate": neps.FloatParameter(1e-4, 1e-1, log=True, default=1e-2, default_confidence="medium"), - "num_epochs": neps.IntegerParameter(3, 30, is_fidelity=True), - "optimizer": neps.CategoricalParameter(["adam", "sgd", "rmsprop"], default="adam", default_confidence="low"), - "dropout_rate": neps.ConstantParameter(0.5), + "learning_rate": neps.Float(1e-4, 1e-1, log=True, default=1e-2, default_confidence="medium"), + "num_epochs": neps.Integer(3, 30, is_fidelity=True), + "optimizer": neps.Categorical(["adam", "sgd", "rmsprop"], default="adam", default_confidence="low"), + "dropout_rate": neps.Constant(0.5), } ) ``` diff --git a/neps/__init__.py b/neps/__init__.py index c27257ef8..4e59493f9 100644 --- a/neps/__init__.py +++ b/neps/__init__.py @@ -2,28 +2,29 @@ from neps.plot.plot import plot from neps.plot.tensorboard_eval import tblogger from neps.search_spaces import ( + Architecture, ArchitectureParameter, + Categorical, CategoricalParameter, + Constant, ConstantParameter, + Float, FloatParameter, + Function, FunctionParameter, GraphGrammar, + Integer, IntegerParameter, ) from neps.status.status import get_summary_dict, status -Integer = IntegerParameter -Float = FloatParameter -Categorical = CategoricalParameter -Constant = ConstantParameter -Architecture = ArchitectureParameter - __all__ = [ "Architecture", "Integer", "Float", "Categorical", "Constant", + "Function", "ArchitectureParameter", "CategoricalParameter", "ConstantParameter", diff --git a/neps/api.py b/neps/api.py index b6f7bdd15..d4878c882 100644 --- a/neps/api.py +++ b/neps/api.py @@ -115,7 +115,7 @@ def run( >>> validation_error = -some_parameter >>> return validation_error - >>> pipeline_space = dict(some_parameter=neps.FloatParameter(lower=0, upper=1)) + >>> pipeline_space = dict(some_parameter=neps.Float(lower=0, upper=1)) >>> logging.basicConfig(level=logging.INFO) >>> neps.run( diff --git a/neps/optimizers/grid_search/optimizer.py b/neps/optimizers/grid_search/optimizer.py index cf40976ec..9b3a3809b 100644 --- a/neps/optimizers/grid_search/optimizer.py +++ b/neps/optimizers/grid_search/optimizer.py @@ -11,10 +11,10 @@ from neps.optimizers.base_optimizer import BaseOptimizer, SampledConfig from neps.search_spaces.architecture.graph_grammar import GraphParameter from neps.search_spaces.domain import UNIT_FLOAT_DOMAIN -from neps.search_spaces.hyperparameters.categorical import CategoricalParameter -from neps.search_spaces.hyperparameters.constant import ConstantParameter -from neps.search_spaces.hyperparameters.float import FloatParameter -from neps.search_spaces.hyperparameters.integer import IntegerParameter +from neps.search_spaces.hyperparameters.categorical import Categorical +from neps.search_spaces.hyperparameters.constant import Constant +from neps.search_spaces.hyperparameters.float import Float +from neps.search_spaces.hyperparameters.integer import Integer if TYPE_CHECKING: from neps.search_spaces.search_space import SearchSpace @@ -29,16 +29,16 @@ def _make_grid( ) -> list[dict[str, Any]]: """Get a grid of configurations from the search space. - For [`NumericalParameter`][neps.search_spaces.NumericalParameter] hyperparameters, + For [`Numerical`][neps.search_spaces.Numerical] hyperparameters, the parameter `size_per_numerical_hp=` is used to determine a grid. If there are any duplicates, e.g. for an - [`IntegerParameter`][neps.search_spaces.IntegerParameter], then we will + [`Integer`][neps.search_spaces.Integer], then we will remove duplicates. - For [`CategoricalParameter`][neps.search_spaces.CategoricalParameter] + For [`Categorical`][neps.search_spaces.Categorical] hyperparameters, we include all the choices in the grid. - For [`ConstantParameter`][neps.search_spaces.ConstantParameter] hyperparameters, + For [`Constant`][neps.search_spaces.Constant] hyperparameters, we include the constant value in the grid. !!! note "TODO" @@ -65,11 +65,11 @@ def _make_grid( # If this is resolved, please update the docstring! case GraphParameter(): raise ValueError("Trying to create a grid for graphs!") - case CategoricalParameter(): + case Categorical(): param_ranges[name] = list(hp.choices) - case ConstantParameter(): + case Constant(): param_ranges[name] = [hp.value] - case IntegerParameter() | FloatParameter(): + case Integer() | Float(): if hp.is_fidelity: param_ranges[name] = [hp.upper] continue diff --git a/neps/optimizers/multi_fidelity/ifbo.py b/neps/optimizers/multi_fidelity/ifbo.py index cc264d8e5..8f873b2ce 100755 --- a/neps/optimizers/multi_fidelity/ifbo.py +++ b/neps/optimizers/multi_fidelity/ifbo.py @@ -19,7 +19,7 @@ from neps.sampling.samplers import Sampler from neps.search_spaces.domain import Domain from neps.search_spaces.encoding import CategoricalToUnitNorm, ConfigEncoder -from neps.search_spaces.search_space import FloatParameter, IntegerParameter, SearchSpace +from neps.search_spaces.search_space import Float, Integer, SearchSpace if TYPE_CHECKING: from neps.state.optimizer import BudgetInfo @@ -48,7 +48,7 @@ def _adjust_pipeline_space_to_match_stepsize( fidelity = pipeline_space.fidelity fidelity_name = pipeline_space.fidelity_name assert fidelity_name is not None - assert isinstance(fidelity, FloatParameter | IntegerParameter) + assert isinstance(fidelity, Float | Integer) if fidelity.log: raise NotImplementedError("Log fidelity not yet supported") diff --git a/neps/optimizers/multi_fidelity/successive_halving.py b/neps/optimizers/multi_fidelity/successive_halving.py index c8c40cf1c..8882b9cf9 100644 --- a/neps/optimizers/multi_fidelity/successive_halving.py +++ b/neps/optimizers/multi_fidelity/successive_halving.py @@ -20,10 +20,10 @@ RandomUniformPolicy, ) from neps.search_spaces import ( - CategoricalParameter, - ConstantParameter, - FloatParameter, - IntegerParameter, + Categorical, + Constant, + Float, + Integer, SearchSpace, ) @@ -34,12 +34,10 @@ logger = logging.getLogger(__name__) -CUSTOM_FLOAT_CONFIDENCE_SCORES = dict(FloatParameter.DEFAULT_CONFIDENCE_SCORES) +CUSTOM_FLOAT_CONFIDENCE_SCORES = dict(Float.DEFAULT_CONFIDENCE_SCORES) CUSTOM_FLOAT_CONFIDENCE_SCORES.update({"ultra": 0.05}) -CUSTOM_CATEGORICAL_CONFIDENCE_SCORES = dict( - CategoricalParameter.DEFAULT_CONFIDENCE_SCORES -) +CUSTOM_CATEGORICAL_CONFIDENCE_SCORES = dict(Categorical.DEFAULT_CONFIDENCE_SCORES) CUSTOM_CATEGORICAL_CONFIDENCE_SCORES.update({"ultra": 8}) @@ -187,7 +185,7 @@ def _get_rung_map(self, s: int = 0) -> dict: for i in reversed(range(nrungs)): rung_map[i + s] = ( int(_max_budget) - if isinstance(self.pipeline_space.fidelity, IntegerParameter) + if isinstance(self.pipeline_space.fidelity, Integer) else _max_budget ) _max_budget /= self.eta @@ -465,15 +463,15 @@ def _enhance_priors(self, confidence_score: dict[str, float] | None = None) -> N return for k, v in self.pipeline_space.items(): - if v.is_fidelity or isinstance(v, ConstantParameter): + if v.is_fidelity or isinstance(v, Constant): continue - if isinstance(v, FloatParameter | IntegerParameter): + if isinstance(v, Float | Integer): if confidence_score is None: confidence = CUSTOM_FLOAT_CONFIDENCE_SCORES[self.prior_confidence] else: confidence = confidence_score["numeric"] self.pipeline_space[k].default_confidence_score = confidence - elif isinstance(v, CategoricalParameter): + elif isinstance(v, Categorical): if confidence_score is None: confidence = CUSTOM_CATEGORICAL_CONFIDENCE_SCORES[ self.prior_confidence diff --git a/neps/optimizers/multi_fidelity_prior/utils.py b/neps/optimizers/multi_fidelity_prior/utils.py index 7f49cb45c..b0e164ef9 100644 --- a/neps/optimizers/multi_fidelity_prior/utils.py +++ b/neps/optimizers/multi_fidelity_prior/utils.py @@ -7,14 +7,14 @@ from neps.sampling.priors import Prior from neps.search_spaces import ( - CategoricalParameter, - ConstantParameter, + Categorical, + Constant, GraphParameter, SearchSpace, ) from neps.search_spaces.encoding import ConfigEncoder -from neps.search_spaces.hyperparameters.float import FloatParameter -from neps.search_spaces.hyperparameters.integer import IntegerParameter +from neps.search_spaces.hyperparameters.float import Float +from neps.search_spaces.hyperparameters.integer import Integer if TYPE_CHECKING: import pandas as pd @@ -44,7 +44,7 @@ def local_mutation( for name, parameter in space.hyperparameters.items(): if ( parameter.is_fidelity - or isinstance(parameter, ConstantParameter) + or isinstance(parameter, Constant) or np.random.uniform() > mutation_rate ): parameters_to_keep[name] = parameter.value @@ -58,14 +58,14 @@ def local_mutation( for hp_name, hp in parameters_to_mutate.items(): match hp: - case CategoricalParameter(): + case Categorical(): assert hp._value_index is not None perm: list[int] = torch.randperm(len(hp.choices)).tolist() ix = perm[0] if perm[0] != hp._value_index else perm[1] new_config[hp_name] = hp.choices[ix] case GraphParameter(): new_config[hp_name] = hp.mutate(mutation_strategy="bananas") - case IntegerParameter() | FloatParameter(): + case Integer() | Float(): prior = Prior.from_parameters( {hp_name: hp}, confidence_values={hp_name: (1 - std)}, diff --git a/neps/sampling/priors.py b/neps/sampling/priors.py index 7624bd4a7..22e3d5d2b 100644 --- a/neps/sampling/priors.py +++ b/neps/sampling/priors.py @@ -22,14 +22,14 @@ TruncatedNormal, ) from neps.sampling.samplers import Sampler -from neps.search_spaces import CategoricalParameter +from neps.search_spaces import Categorical from neps.search_spaces.domain import UNIT_FLOAT_DOMAIN, Domain from neps.search_spaces.encoding import ConfigEncoder if TYPE_CHECKING: from torch.distributions import Distribution - from neps.search_spaces import FloatParameter, IntegerParameter + from neps.search_spaces import Float, Integer from neps.search_spaces.search_space import SearchSpace @@ -119,7 +119,7 @@ def from_parameters( cls, parameters: Mapping[ str, - CategoricalParameter | FloatParameter | IntegerParameter, + Categorical | Float | Integer, ], *, center_values: Mapping[str, Any] | None = None, @@ -152,11 +152,7 @@ def from_parameters( name, _mapping[hp.default_confidence_choice], ) - center = ( - hp.choices.index(default) - if isinstance(hp, CategoricalParameter) - else default - ) + center = hp.choices.index(default) if isinstance(hp, Categorical) else default centers.append((center, confidence_score)) return Prior.from_domains_and_centers(domains=domains, centers=centers) diff --git a/neps/search_spaces/__init__.py b/neps/search_spaces/__init__.py index 7e2c289b8..b726b8ae1 100644 --- a/neps/search_spaces/__init__.py +++ b/neps/search_spaces/__init__.py @@ -1,29 +1,46 @@ -from neps.search_spaces.architecture.api import ArchitectureParameter, FunctionParameter +from neps.search_spaces.architecture.api import ( + Architecture, + ArchitectureParameter, + Function, + FunctionParameter, +) from neps.search_spaces.architecture.graph_grammar import ( CoreGraphGrammar, GraphGrammar, GraphParameter, ) from neps.search_spaces.hyperparameters import ( + Categorical, CategoricalParameter, + Constant, ConstantParameter, + Float, FloatParameter, + Integer, IntegerParameter, + Numerical, NumericalParameter, ) from neps.search_spaces.parameter import Parameter, ParameterWithPrior from neps.search_spaces.search_space import SearchSpace __all__ = [ + "Architecture", "ArchitectureParameter", + "Categorical", "CategoricalParameter", + "Constant", "ConstantParameter", "CoreGraphGrammar", + "Float", "FloatParameter", + "Function", "FunctionParameter", "GraphGrammar", "GraphParameter", + "Integer", "IntegerParameter", + "Numerical", "NumericalParameter", "Parameter", "ParameterWithPrior", diff --git a/neps/search_spaces/architecture/api.py b/neps/search_spaces/architecture/api.py index ff5f74e4a..9521bd7aa 100644 --- a/neps/search_spaces/architecture/api.py +++ b/neps/search_spaces/architecture/api.py @@ -51,7 +51,7 @@ def _build(graph, set_recursive_attribute): graph.edges[e].update(set_recursive_attribute(op_name, predecessor_values)) -def ArchitectureParameter(**kwargs): +def Architecture(**kwargs): """Factory function.""" if "structure" not in kwargs: raise ValueError("Factory function requires structure") @@ -155,11 +155,51 @@ def to_pytorch(self) -> nn.Module: return super().to_pytorch() # create PyTorch model def create_new_instance_from_id(self, identifier: str): - g = ArchitectureParameter(**self.input_kwargs) # type: ignore[arg-type] + g = Architecture(**self.input_kwargs) # type: ignore[arg-type] g.load_from(identifier) return g return _FunctionParameter(**kwargs) -FunctionParameter = ArchitectureParameter +def ArchitectureParameter(**kwargs): + """Deprecated: Use `Architecture` instead of `ArchitectureParameter`. + + This function remains for backward compatibility and will raise a deprecation + warning if used. + """ + import warnings + + warnings.warn( + ( + "Usage of 'neps.ArchitectureParameter' is deprecated and will be removed in" + " future releases. Please use 'neps.Architecture' instead." + ), + DeprecationWarning, + stacklevel=2, + ) + + return Architecture(**kwargs) + + +Function = Architecture + + +def FunctionParameter(**kwargs): + """Deprecated: Use `Function` instead of `FunctionParameter`. + + This function remains for backward compatibility and will raise a deprecation + warning if used. + """ + import warnings + + warnings.warn( + ( + "Usage of 'neps.FunctionParameter' is deprecated and will be removed in" + " future releases. Please use 'neps.Function' instead." + ), + DeprecationWarning, + stacklevel=2, + ) + + return Function(**kwargs) diff --git a/neps/search_spaces/encoding.py b/neps/search_spaces/encoding.py index b13511f21..3de6a3f15 100644 --- a/neps/search_spaces/encoding.py +++ b/neps/search_spaces/encoding.py @@ -16,9 +16,9 @@ import torch from neps.search_spaces.domain import UNIT_FLOAT_DOMAIN, Domain -from neps.search_spaces.hyperparameters.categorical import CategoricalParameter -from neps.search_spaces.hyperparameters.float import FloatParameter -from neps.search_spaces.hyperparameters.integer import IntegerParameter +from neps.search_spaces.hyperparameters.categorical import Categorical +from neps.search_spaces.hyperparameters.float import Float +from neps.search_spaces.hyperparameters.integer import Integer if TYPE_CHECKING: from neps.search_spaces.parameter import Parameter @@ -470,8 +470,8 @@ def from_space( automatically creates transformers for each hyperparameter based on its type. The transformers are as follows: - * `FloatParameter` and `IntegerParameter` are normalized to the unit interval. - * `CategoricalParameter` is transformed into an integer. + * `Float` and `Integer` are normalized to the unit interval. + * `Categorical` is transformed into an integer. Args: space: The search space to build an encoder for @@ -511,8 +511,8 @@ def from_parameters( automatically creates transformers for each hyperparameter based on its type. The transformers are as follows: - * `FloatParameter` and `IntegerParameter` are normalized to the unit interval. - * `CategoricalParameter` is transformed into an integer. + * `Float` and `Integer` are normalized to the unit interval. + * `Categorical` is transformed into an integer. Args: parameters: A mapping of hyperparameter names to hyperparameters. @@ -547,9 +547,9 @@ def from_parameters( continue match hp: - case FloatParameter() | IntegerParameter(): + case Float() | Integer(): transformers[name] = MinMaxNormalizer(hp.domain) # type: ignore - case CategoricalParameter(): + case Categorical(): transformers[name] = CategoricalToIntegerTransformer(hp.choices) case _: raise ValueError( diff --git a/neps/search_spaces/hyperparameters/__init__.py b/neps/search_spaces/hyperparameters/__init__.py index 98ed1aa9f..bae71ab7d 100644 --- a/neps/search_spaces/hyperparameters/__init__.py +++ b/neps/search_spaces/hyperparameters/__init__.py @@ -1,10 +1,18 @@ -from neps.search_spaces.hyperparameters.categorical import CategoricalParameter -from neps.search_spaces.hyperparameters.constant import ConstantParameter -from neps.search_spaces.hyperparameters.float import FloatParameter -from neps.search_spaces.hyperparameters.integer import IntegerParameter -from neps.search_spaces.hyperparameters.numerical import NumericalParameter +from neps.search_spaces.hyperparameters.categorical import ( + Categorical, + CategoricalParameter, +) +from neps.search_spaces.hyperparameters.constant import Constant, ConstantParameter +from neps.search_spaces.hyperparameters.float import Float, FloatParameter +from neps.search_spaces.hyperparameters.integer import Integer, IntegerParameter +from neps.search_spaces.hyperparameters.numerical import Numerical, NumericalParameter __all__ = [ + "Categorical", + "Constant", + "Integer", + "Float", + "Numerical", "CategoricalParameter", "ConstantParameter", "IntegerParameter", diff --git a/neps/search_spaces/hyperparameters/categorical.py b/neps/search_spaces/hyperparameters/categorical.py index 35426af0f..8e24690f1 100644 --- a/neps/search_spaces/hyperparameters/categorical.py +++ b/neps/search_spaces/hyperparameters/categorical.py @@ -19,19 +19,19 @@ CategoricalTypes: TypeAlias = float | int | str -class CategoricalParameter(ParameterWithPrior[CategoricalTypes, CategoricalTypes]): +class Categorical(ParameterWithPrior[CategoricalTypes, CategoricalTypes]): """A list of **unordered** choices for a parameter. This kind of [`Parameter`][neps.search_spaces.parameter] is used to represent hyperparameters that can take on a discrete set of unordered values. For example, the `optimizer` hyperparameter in a neural network - search space can be a `CategoricalParameter` with choices like + search space can be a `Categorical` with choices like `#!python ["adam", "sgd", "rmsprop"]`. ```python import neps - optimizer_choice = neps.CategoricalParameter( + optimizer_choice = neps.Categorical( ["adam", "sgd", "rmsprop"], default="adam" ) @@ -55,7 +55,7 @@ def __init__( default: float | int | str | None = None, default_confidence: Literal["low", "medium", "high"] = "low", ): - """Create a new `CategoricalParameter`. + """Create a new `Categorical`. Args: choices: choices for the hyperparameter. @@ -166,3 +166,47 @@ def set_value(self, value: Any | None) -> None: self._value = value self._value_index = self.choices.index(value) self.normalized_value = float(self._value_index) + + +class CategoricalParameter(Categorical): + """Deprecated: Use `Categorical` instead of `CategoricalParameter`. + + This class remains for backward compatibility and will raise a deprecation + warning if used. + """ + + def __init__( + self, + choices: Iterable[float | int | str], + *, + default: float | int | str | None = None, + default_confidence: Literal["low", "medium", "high"] = "low", + ): + """Initialize a deprecated `CategoricalParameter`. + + Args: + choices: choices for the hyperparameter. + default: default value for the hyperparameter, must be in `choices=` + if provided. + default_confidence: confidence score for the default value, used when + condsider prior based optimization. + + Raises: + DeprecationWarning: A warning indicating that `neps.CategoricalParameter` is + deprecated and `neps.Categorical` should be used instead. + """ + import warnings + + warnings.warn( + ( + "Usage of 'neps.CategoricalParameter' is deprecated and will be removed " + "in future releases. Please use 'neps.Categorical' instead." + ), + DeprecationWarning, + stacklevel=2, + ) + super().__init__( + choices=choices, + default=default, + default_confidence=default_confidence, + ) diff --git a/neps/search_spaces/hyperparameters/constant.py b/neps/search_spaces/hyperparameters/constant.py index 5f17abd9d..8dfbfdd10 100644 --- a/neps/search_spaces/hyperparameters/constant.py +++ b/neps/search_spaces/hyperparameters/constant.py @@ -10,23 +10,23 @@ T = TypeVar("T", int, float, str) -class ConstantParameter(Parameter[T, T]): +class Constant(Parameter[T, T]): """A constant value for a parameter. This kind of [`Parameter`][neps.search_spaces.parameter] is used to represent hyperparameters with values that should not change during optimization. For example, the `batch_size` hyperparameter in a neural - network search space can be a `ConstantParameter` with a value of `32`. + network search space can be a `Constant` with a value of `32`. ```python import neps - batch_size = neps.ConstantParameter(32) + batch_size = neps.Constant(32) ``` !!! note - As the name suggests, the value of a `ConstantParameter` only have one + As the name suggests, the value of a `Constant` only have one value and so its [`.default`][neps.search_spaces.parameter.Parameter.default] and [`.value`][neps.search_spaces.parameter.Parameter.value] should always be the same. @@ -35,13 +35,13 @@ class ConstantParameter(Parameter[T, T]): [`.default`][neps.search_spaces.parameter.Parameter.default] can never be `None`. Please use - [`.set_constant_value()`][neps.search_spaces.hyperparameters.constant.ConstantParameter.set_constant_value] + [`.set_constant_value()`][neps.search_spaces.hyperparameters.constant.Constant.set_constant_value] if you need to change the value of the constant parameter. """ def __init__(self, value: T): - """Create a new `ConstantParameter`. + """Create a new `Constant`. Args: value: value for the hyperparameter. @@ -82,7 +82,7 @@ def set_value(self, value: T | None) -> None: is different from the current value. Please see - [`.set_constant_value()`][neps.search_spaces.hyperparameters.constant.ConstantParameter.set_constant_value] + [`.set_constant_value()`][neps.search_spaces.hyperparameters.constant.Constant.set_constant_value] which can be used to set both the [`.value`][neps.search_spaces.parameter.Parameter.value] and the [`.default`][neps.search_spaces.parameter.Parameter.default] at once @@ -119,3 +119,33 @@ def value_to_normalized(self, value: T) -> float: @override def normalized_to_value(self, normalized_value: float) -> T: return self._value + + +class ConstantParameter(Constant): + """Deprecated: Use `Constant` instead of `ConstantParameter`. + + This class remains for backward compatibility and will raise a deprecation + warning if used. + """ + + def __init__(self, value: T): + """Initialize a deprecated `ConstantParameter`. + + Args: + value: value for the hyperparameter. + + Raises: + DeprecationWarning: A warning indicating that `neps.ConstantParameter` is + deprecated and `neps.Constant` should be used instead. + """ + import warnings + + warnings.warn( + ( + "Usage of 'neps.ConstantParameter' is deprecated and will be removed in" + " future releases. Please use 'neps.Constant' instead." + ), + DeprecationWarning, + stacklevel=2, + ) + super().__init__(value=value) diff --git a/neps/search_spaces/hyperparameters/float.py b/neps/search_spaces/hyperparameters/float.py index 7f6e55d4c..f90030249 100644 --- a/neps/search_spaces/hyperparameters/float.py +++ b/neps/search_spaces/hyperparameters/float.py @@ -10,13 +10,13 @@ import numpy as np from neps.search_spaces.domain import Domain -from neps.search_spaces.hyperparameters.numerical import NumericalParameter +from neps.search_spaces.hyperparameters.numerical import Numerical if TYPE_CHECKING: from neps.utils.types import Number -class FloatParameter(NumericalParameter[float]): +class Float(Numerical[float]): """A float value for a parameter. This kind of [`Parameter`][neps.search_spaces.parameter] is used @@ -24,17 +24,17 @@ class FloatParameter(NumericalParameter[float]): it exists on a log scale. For example, `l2_norm` could be a value in `(0.1)`, while the `learning_rate` - hyperparameter in a neural network search space can be a `FloatParameter` + hyperparameter in a neural network search space can be a `Float` with a range of `(0.0001, 0.1)` but on a log scale. ```python import neps - l2_norm = neps.FloatParameter(0, 1) - learning_rate = neps.FloatParameter(1e-4, 1e-1, log=True) + l2_norm = neps.Float(0, 1) + learning_rate = neps.Float(1e-4, 1e-1, log=True) ``` - Please see the [`NumericalParameter`][neps.search_spaces.numerical.NumericalParameter] + Please see the [`Numerical`][neps.search_spaces.numerical.Numerical] class for more details on the methods available for this class. """ @@ -54,7 +54,7 @@ def __init__( default: Number | None = None, default_confidence: Literal["low", "medium", "high"] = "low", ): - """Create a new `FloatParameter`. + """Create a new `Float`. Args: lower: lower bound for the hyperparameter. @@ -155,3 +155,55 @@ def normalized_to_value(self, normalized_value: float) -> float: def __repr__(self) -> str: float_repr = f"{self.value:.07f}" if self.value is not None else "None" return f"" + + +class FloatParameter(Float): + """Deprecated: Use `Float` instead of `FloatParameter`. + + This class remains for backward compatibility and will raise a deprecation + warning if used. + """ + + def __init__( + self, + lower: Number, + upper: Number, + *, + log: bool = False, + is_fidelity: bool = False, + default: Number | None = None, + default_confidence: Literal["low", "medium", "high"] = "low", + ): + """Initialize a deprecated `FloatParameter`. + + Args: + lower: lower bound for the hyperparameter. + upper: upper bound for the hyperparameter. + log: whether the hyperparameter is on a log scale. + is_fidelity: whether the hyperparameter is fidelity. + default: default value for the hyperparameter. + default_confidence: confidence score for the default value, used when + condsidering prior based optimization.. + + Raises: + DeprecationWarning: A warning indicating that `neps.FloatParameter` is + deprecated and `neps.Float` should be used instead. + """ + import warnings + + warnings.warn( + ( + "Usage of 'neps.FloatParameter' is deprecated and will be removed in" + " future releases. Please use 'neps.Float' instead." + ), + DeprecationWarning, + stacklevel=2, + ) + super().__init__( + lower=lower, + upper=upper, + log=log, + is_fidelity=is_fidelity, + default=default, + default_confidence=default_confidence, + ) diff --git a/neps/search_spaces/hyperparameters/integer.py b/neps/search_spaces/hyperparameters/integer.py index be60da6de..2644cd94b 100644 --- a/neps/search_spaces/hyperparameters/integer.py +++ b/neps/search_spaces/hyperparameters/integer.py @@ -9,28 +9,28 @@ import numpy as np from neps.search_spaces.domain import Domain -from neps.search_spaces.hyperparameters.float import FloatParameter -from neps.search_spaces.hyperparameters.numerical import NumericalParameter +from neps.search_spaces.hyperparameters.float import Float +from neps.search_spaces.hyperparameters.numerical import Numerical if TYPE_CHECKING: from neps.utils.types import Number -class IntegerParameter(NumericalParameter[int]): +class Integer(Numerical[int]): """An integer value for a parameter. This kind of [`Parameter`][neps.search_spaces.parameter] is used to represent hyperparameters with continuous integer values, optionally specifying f it exists on a log scale. For example, `batch_size` could be a value in `(32, 128)`, while the `num_layers` - hyperparameter in a neural network search space can be a `IntegerParameter` + hyperparameter in a neural network search space can be a `Integer` with a range of `(1, 1000)` but on a log scale. ```python import neps - batch_size = neps.IntegerParameter(32, 128) - num_layers = neps.IntegerParameter(1, 1000, log=True) + batch_size = neps.Integer(32, 128) + num_layers = neps.Integer(1, 1000, log=True) ``` """ @@ -50,7 +50,7 @@ def __init__( default: Number | None = None, default_confidence: Literal["low", "medium", "high"] = "low", ): - """Create a new `IntegerParameter`. + """Create a new `Integer`. Args: lower: lower bound for the hyperparameter. @@ -66,7 +66,7 @@ def __init__( _size = upper - lower + 1 if _size <= 1: raise ValueError( - f"IntegerParameter: expected at least 2 possible values in the range," + f"Integer: expected at least 2 possible values in the range," f" got upper={upper}, lower={lower}." ) @@ -83,7 +83,7 @@ def __init__( # We subtract/add 0.499999 from lower/upper bounds respectively, such that # sampling in the float space gives equal probability for all integer values, # i.e. [x - 0.499999, x + 0.499999] - self.float_hp = FloatParameter( + self.float_hp = Float( lower=self.lower - 0.499999, upper=self.upper + 0.499999, log=self.log, @@ -148,3 +148,55 @@ def value_to_normalized(self, value: int) -> float: @override def normalized_to_value(self, normalized_value: float) -> int: return int(np.rint(self.float_hp.normalized_to_value(normalized_value))) + + +class IntegerParameter(Integer): + """Deprecated: Use `Integer` instead of `IntegerParameter`. + + This class remains for backward compatibility and will raise a deprecation + warning if used. + """ + + def __init__( + self, + lower: Number, + upper: Number, + *, + log: bool = False, + is_fidelity: bool = False, + default: Number | None = None, + default_confidence: Literal["low", "medium", "high"] = "low", + ): + """Initialize a deprecated `IntegerParameter`. + + Args: + lower: lower bound for the hyperparameter. + upper: upper bound for the hyperparameter. + log: whether the hyperparameter is on a log scale. + is_fidelity: whether the hyperparameter is fidelity. + default: default value for the hyperparameter. + default_confidence: confidence score for the default value, used when + condsider prior based optimization. + + Raises: + DeprecationWarning: A warning indicating that `neps.IntegerParameter` is + deprecated and `neps.Integer` should be used instead. + """ + import warnings + + warnings.warn( + ( + "Usage of 'neps.IntegerParameter' is deprecated and will be removed in" + " future releases. Please use 'neps.Integer' instead." + ), + DeprecationWarning, + stacklevel=2, + ) + super().__init__( + lower=lower, + upper=upper, + log=log, + is_fidelity=is_fidelity, + default=default, + default_confidence=default_confidence, + ) diff --git a/neps/search_spaces/hyperparameters/numerical.py b/neps/search_spaces/hyperparameters/numerical.py index a52405082..a6bd56736 100644 --- a/neps/search_spaces/hyperparameters/numerical.py +++ b/neps/search_spaces/hyperparameters/numerical.py @@ -1,15 +1,15 @@ -"""The [`NumericalParameter`][neps.search_spaces.NumericalParameter] is +"""The [`Numerical`][neps.search_spaces.Numerical] is a [`Parameter`][neps.search_spaces.Parameter] that represents a numerical range. The two primary numerical hyperparameters are: -* [`FloatParameter`][neps.search_spaces.FloatParameter] for continuous +* [`Float`][neps.search_spaces.Float] for continuous float values. -* [`IntegerParameter`][neps.search_spaces.IntegerParameter] for discrete +* [`Integer`][neps.search_spaces.Integer] for discrete integer values. -The [`NumericalParameter`][neps.search_spaces.NumericalParameter] is a +The [`Numerical`][neps.search_spaces.Numerical] is a base class for both of these hyperparameters, and includes methods from both [`ParameterWithPrior`][neps.search_spaces.ParameterWithPrior], allowing you to set a confidence along with a @@ -52,7 +52,7 @@ def _get_truncnorm_prior_and_std( return scipy.stats.truncnorm(a, b), float(std) -class NumericalParameter(ParameterWithPrior[T, T]): +class Numerical(ParameterWithPrior[T, T]): """A numerical hyperparameter is bounded by a lower and upper value. Attributes: @@ -180,3 +180,57 @@ def _get_truncnorm_prior_and_std(self) -> tuple[TruncNorm, float]: default=default, confidence_score=self.default_confidence_score, ) + + +class NumericalParameter(Numerical): + """Deprecated: Use `Numerical` instead of `NumericalParameter`. + + This class remains for backward compatibility and will raise a deprecation + warning if used. + """ + + def __init__( + self, + lower: T, + upper: T, + *, + log: bool = False, + default: T | None, + is_fidelity: bool, + domain: Domain[T], + default_confidence: Literal["low", "medium", "high"] = "low", + ): + """Initialize a deprecated `NumericalParameter`. + + Args: + lower: The lower bound of the numerical hyperparameter. + upper: The upper bound of the numerical hyperparameter. + log: Whether the hyperparameter is in log space. + default: The default value of the hyperparameter. + is_fidelity: Whether the hyperparameter is a fidelity parameter. + domain: The domain of the hyperparameter. + default_confidence: The default confidence choice. + + Raises: + DeprecationWarning: A warning indicating that `neps.NumericalParameter` is + deprecated and `neps.Numerical` should be used instead. + """ + import warnings + + warnings.warn( + ( + "Usage of 'neps.NumericalParameter' is deprecated and will be removed in" + " future releases. Please use 'neps.Numerical' instead." + ), + DeprecationWarning, + stacklevel=2, + ) + super().__init__( + lower=lower, + upper=upper, + log=log, + default=default, + is_fidelity=is_fidelity, + domain=domain, + default_confidence=default_confidence, + ) diff --git a/neps/search_spaces/parameter.py b/neps/search_spaces/parameter.py index 8f2c07943..ea94a84d6 100644 --- a/neps/search_spaces/parameter.py +++ b/neps/search_spaces/parameter.py @@ -129,10 +129,10 @@ def value_to_normalized(self, value: ValueT) -> float: but roughly refers to numeric values. * `(0, 1)` scaling in the case of - a [`NumericalParameter`][neps.search_spaces.NumericalParameter], - * `{0.0, 1.0}` for a [`ConstantParameter`][neps.search_spaces.ConstantParameter], + a [`Numerical`][neps.search_spaces.Numerical], + * `{0.0, 1.0}` for a [`Constant`][neps.search_spaces.Constant], * `[0, 1, ..., n]` for a - [`Categorical`][neps.search_spaces.CategoricalParameter]. + [`Categorical`][neps.search_spaces.Categorical]. Args: value: value to convert. diff --git a/neps/search_spaces/search_space.py b/neps/search_spaces/search_space.py index 46c329fa3..2573167e4 100644 --- a/neps/search_spaces/search_space.py +++ b/neps/search_spaces/search_space.py @@ -15,11 +15,11 @@ from neps.search_spaces.architecture.graph_grammar import GraphParameter from neps.search_spaces.hyperparameters import ( - CategoricalParameter, - ConstantParameter, - FloatParameter, - IntegerParameter, - NumericalParameter, + Categorical, + Constant, + Float, + Integer, + Numerical, ) from neps.search_spaces.parameter import Parameter, ParameterWithPrior from neps.search_spaces.yaml_search_space_utils import ( @@ -56,26 +56,26 @@ def pipeline_space_from_configspace( for hyperparameter in configspace.get_hyperparameters(): if isinstance(hyperparameter, CS.Constant): - parameter = ConstantParameter(value=hyperparameter.value) + parameter = Constant(value=hyperparameter.value) elif isinstance(hyperparameter, CS.CategoricalHyperparameter): - parameter = CategoricalParameter( + parameter = Categorical( hyperparameter.choices, default=hyperparameter.default_value, ) elif isinstance(hyperparameter, CS.OrdinalHyperparameter): - parameter = CategoricalParameter( + parameter = Categorical( hyperparameter.sequence, default=hyperparameter.default_value, ) elif isinstance(hyperparameter, CS.UniformIntegerHyperparameter): - parameter = IntegerParameter( + parameter = Integer( lower=hyperparameter.lower, upper=hyperparameter.upper, log=hyperparameter.log, default=hyperparameter.default_value, ) elif isinstance(hyperparameter, CS.UniformFloatHyperparameter): - parameter = FloatParameter( + parameter = Float( lower=hyperparameter.lower, upper=hyperparameter.upper, log=hyperparameter.log, @@ -134,16 +134,16 @@ def pipeline_space_from_yaml( # noqa: C901 if param_type in ("int", "integer"): formatted_details = formatting_int(name, details) - pipeline_space[name] = IntegerParameter(**formatted_details) + pipeline_space[name] = Integer(**formatted_details) elif param_type == "float": formatted_details = formatting_float(name, details) - pipeline_space[name] = FloatParameter(**formatted_details) + pipeline_space[name] = Float(**formatted_details) elif param_type in ("cat", "categorical"): formatted_details = formatting_cat(name, details) - pipeline_space[name] = CategoricalParameter(**formatted_details) + pipeline_space[name] = Categorical(**formatted_details) elif param_type == "const": const_details = formatting_const(details) - pipeline_space[name] = ConstantParameter(const_details) # type: ignore + pipeline_space[name] = Constant(const_details) # type: ignore else: # Handle unknown parameter type raise TypeError( @@ -189,7 +189,7 @@ def __init__(self, **hyperparameters: Parameter): """ # Ensure a consistent ordering for uses throughout the lib _hyperparameters = sorted(hyperparameters.items(), key=lambda x: x[0]) - _fidelity_param: NumericalParameter | None = None + _fidelity_param: Numerical | None = None _fidelity_name: str | None = None _has_prior: bool = False @@ -202,7 +202,7 @@ def __init__(self, **hyperparameters: Parameter): "multiple is_fidelity=True)" ) - if not isinstance(hp, NumericalParameter): + if not isinstance(hp, Numerical): raise ValueError( f"Only float and integer fidelities supported, got {hp}" ) @@ -214,28 +214,28 @@ def __init__(self, **hyperparameters: Parameter): _has_prior = True self.hyperparameters: dict[str, Parameter] = dict(_hyperparameters) - self.fidelity: NumericalParameter | None = _fidelity_param + self.fidelity: Numerical | None = _fidelity_param self.fidelity_name: str | None = _fidelity_name self.has_prior: bool = _has_prior - self.categoricals: Mapping[str, CategoricalParameter] = { - k: hp for k, hp in _hyperparameters if isinstance(hp, CategoricalParameter) + self.categoricals: Mapping[str, Categorical] = { + k: hp for k, hp in _hyperparameters if isinstance(hp, Categorical) } - self.numerical: Mapping[str, IntegerParameter | FloatParameter] = { + self.numerical: Mapping[str, Integer | Float] = { k: hp for k, hp in _hyperparameters - if isinstance(hp, IntegerParameter | FloatParameter) and not hp.is_fidelity + if isinstance(hp, Integer | Float) and not hp.is_fidelity } self.graphs: Mapping[str, GraphParameter] = { k: hp for k, hp in _hyperparameters if isinstance(hp, GraphParameter) } self.constants: Mapping[str, Any] = { - k: hp.value for k, hp in _hyperparameters if isinstance(hp, ConstantParameter) + k: hp.value for k, hp in _hyperparameters if isinstance(hp, Constant) } # NOTE: For future of multiple fidelities - self.fidelities: Mapping[str, IntegerParameter | FloatParameter] = {} + self.fidelities: Mapping[str, Integer | Float] = {} if _fidelity_param is not None and _fidelity_name is not None: - assert isinstance(_fidelity_param, IntegerParameter | FloatParameter) + assert isinstance(_fidelity_param, Integer | Float) self.fidelities = {_fidelity_name: _fidelity_param} def sample( diff --git a/neps_examples/basic_usage/architecture.py b/neps_examples/basic_usage/architecture.py index cc73029ae..adca3544f 100644 --- a/neps_examples/basic_usage/architecture.py +++ b/neps_examples/basic_usage/architecture.py @@ -114,7 +114,7 @@ def run_pipeline(architecture): pipeline_space = dict( - architecture=neps.ArchitectureParameter( + architecture=neps.Architecture( set_recursive_attribute=set_recursive_attribute, structure=structure, primitives=primitives, diff --git a/neps_examples/basic_usage/architecture_and_hyperparameters.py b/neps_examples/basic_usage/architecture_and_hyperparameters.py index c83f3eac8..b7f4bd637 100644 --- a/neps_examples/basic_usage/architecture_and_hyperparameters.py +++ b/neps_examples/basic_usage/architecture_and_hyperparameters.py @@ -109,13 +109,13 @@ def run_pipeline(**config): pipeline_space = dict( - architecture=neps.ArchitectureParameter( + architecture=neps.Architecture( set_recursive_attribute=set_recursive_attribute, structure=structure, primitives=primitives, ), - optimizer=neps.CategoricalParameter(choices=["sgd", "adam"]), - learning_rate=neps.FloatParameter(lower=10e-7, upper=10e-3, log=True), + optimizer=neps.Categorical(choices=["sgd", "adam"]), + learning_rate=neps.Float(lower=10e-7, upper=10e-3, log=True), ) logging.basicConfig(level=logging.INFO) diff --git a/neps_examples/basic_usage/hyperparameters.py b/neps_examples/basic_usage/hyperparameters.py index 164b49cbd..724974ae0 100644 --- a/neps_examples/basic_usage/hyperparameters.py +++ b/neps_examples/basic_usage/hyperparameters.py @@ -13,11 +13,11 @@ def run_pipeline(float1, float2, categorical, integer1, integer2): pipeline_space = dict( - float1=neps.FloatParameter(lower=0, upper=1), - float2=neps.FloatParameter(lower=-10, upper=10), - categorical=neps.CategoricalParameter(choices=[0, 1]), - integer1=neps.IntegerParameter(lower=0, upper=1), - integer2=neps.IntegerParameter(lower=1, upper=1000, log=True), + float1=neps.Float(lower=0, upper=1), + float2=neps.Float(lower=-10, upper=10), + categorical=neps.Categorical(choices=[0, 1]), + integer1=neps.Integer(lower=0, upper=1), + integer2=neps.Integer(lower=1, upper=1000, log=True), ) logging.basicConfig(level=logging.INFO) diff --git a/neps_examples/convenience/logging_additional_info.py b/neps_examples/convenience/logging_additional_info.py index 5bde13c6c..34470d9f7 100644 --- a/neps_examples/convenience/logging_additional_info.py +++ b/neps_examples/convenience/logging_additional_info.py @@ -19,11 +19,11 @@ def run_pipeline(float1, float2, categorical, integer1, integer2): pipeline_space = dict( - float1=neps.FloatParameter(lower=0, upper=1), - float2=neps.FloatParameter(lower=-10, upper=10), - categorical=neps.CategoricalParameter(choices=[0, 1]), - integer1=neps.IntegerParameter(lower=0, upper=1), - integer2=neps.IntegerParameter(lower=1, upper=1000, log=True), + float1=neps.Float(lower=0, upper=1), + float2=neps.Float(lower=-10, upper=10), + categorical=neps.Categorical(choices=[0, 1]), + integer1=neps.Integer(lower=0, upper=1), + integer2=neps.Integer(lower=1, upper=1000, log=True), ) logging.basicConfig(level=logging.INFO) diff --git a/neps_examples/convenience/neps_tblogger_tutorial.py b/neps_examples/convenience/neps_tblogger_tutorial.py index 724ac4e3e..1a363e60c 100644 --- a/neps_examples/convenience/neps_tblogger_tutorial.py +++ b/neps_examples/convenience/neps_tblogger_tutorial.py @@ -232,9 +232,9 @@ def training( def pipeline_space() -> dict: pipeline = dict( - lr=neps.FloatParameter(lower=1e-5, upper=1e-1, log=True), - optim=neps.CategoricalParameter(choices=["Adam", "SGD"]), - weight_decay=neps.FloatParameter(lower=1e-4, upper=1e-1, log=True), + lr=neps.Float(lower=1e-5, upper=1e-1, log=True), + optim=neps.Categorical(choices=["Adam", "SGD"]), + weight_decay=neps.Float(lower=1e-4, upper=1e-1, log=True), ) return pipeline diff --git a/neps_examples/convenience/neps_x_lightning.py b/neps_examples/convenience/neps_x_lightning.py index de1426e83..a8c873c0a 100644 --- a/neps_examples/convenience/neps_x_lightning.py +++ b/neps_examples/convenience/neps_x_lightning.py @@ -249,14 +249,14 @@ def test_dataloader(self) -> DataLoader: def search_space() -> dict: # Define a dictionary to represent the hyperparameter search space space = dict( - data_dir=neps.ConstantParameter("./data"), - batch_size=neps.ConstantParameter(64), - lr=neps.FloatParameter(lower=1e-5, upper=1e-2, log=True, default=1e-3), - weight_decay=neps.FloatParameter( + data_dir=neps.Constant("./data"), + batch_size=neps.Constant(64), + lr=neps.Float(lower=1e-5, upper=1e-2, log=True, default=1e-3), + weight_decay=neps.Float( lower=1e-5, upper=1e-3, log=True, default=5e-4 ), - optimizer=neps.CategoricalParameter(choices=["Adam", "SGD"], default="Adam"), - epochs=neps.IntegerParameter(lower=1, upper=9, log=False, is_fidelity=True), + optimizer=neps.Categorical(choices=["Adam", "SGD"], default="Adam"), + epochs=neps.Integer(lower=1, upper=9, log=False, is_fidelity=True), ) return space diff --git a/neps_examples/convenience/running_on_slurm_scripts.py b/neps_examples/convenience/running_on_slurm_scripts.py index 7ccc5374f..03a73c96f 100644 --- a/neps_examples/convenience/running_on_slurm_scripts.py +++ b/neps_examples/convenience/running_on_slurm_scripts.py @@ -52,8 +52,8 @@ def run_pipeline_via_slurm( pipeline_space = dict( - optimizer=neps.CategoricalParameter(choices=["sgd", "adam"]), - learning_rate=neps.FloatParameter(lower=10e-7, upper=10e-3, log=True), + optimizer=neps.Categorical(choices=["sgd", "adam"]), + learning_rate=neps.Float(lower=10e-7, upper=10e-3, log=True), ) logging.basicConfig(level=logging.INFO) diff --git a/neps_examples/convenience/working_directory_per_pipeline.py b/neps_examples/convenience/working_directory_per_pipeline.py index c79977f16..7aa261978 100644 --- a/neps_examples/convenience/working_directory_per_pipeline.py +++ b/neps_examples/convenience/working_directory_per_pipeline.py @@ -18,9 +18,9 @@ def run_pipeline(pipeline_directory: Path, float1, categorical, integer1): pipeline_space = dict( - float1=neps.FloatParameter(lower=0, upper=1), - categorical=neps.CategoricalParameter(choices=[0, 1]), - integer1=neps.IntegerParameter(lower=0, upper=1), + float1=neps.Float(lower=0, upper=1), + categorical=neps.Categorical(choices=[0, 1]), + integer1=neps.Integer(lower=0, upper=1), ) logging.basicConfig(level=logging.INFO) diff --git a/neps_examples/efficiency/expert_priors_for_hyperparameters.py b/neps_examples/efficiency/expert_priors_for_hyperparameters.py index 11f50c9db..46ed93258 100644 --- a/neps_examples/efficiency/expert_priors_for_hyperparameters.py +++ b/neps_examples/efficiency/expert_priors_for_hyperparameters.py @@ -23,13 +23,13 @@ def run_pipeline(some_float, some_integer, some_cat): # neps uses the default values and a confidence in this default value to construct a prior # that speeds up the search pipeline_space = dict( - some_float=neps.FloatParameter( + some_float=neps.Float( lower=1, upper=1000, log=True, default=900, default_confidence="medium" ), - some_integer=neps.IntegerParameter( + some_integer=neps.Integer( lower=0, upper=50, default=35, default_confidence="low" ), - some_cat=neps.CategoricalParameter( + some_cat=neps.Categorical( choices=["a", "b", "c"], default="a", default_confidence="high" ), ) diff --git a/neps_examples/efficiency/multi_fidelity.py b/neps_examples/efficiency/multi_fidelity.py index bdbcc9650..3e58eefa9 100644 --- a/neps_examples/efficiency/multi_fidelity.py +++ b/neps_examples/efficiency/multi_fidelity.py @@ -73,8 +73,8 @@ def run_pipeline(pipeline_directory, previous_pipeline_directory, learning_rate, pipeline_space = dict( - learning_rate=neps.FloatParameter(lower=1e-4, upper=1e0, log=True), - epoch=neps.IntegerParameter(lower=1, upper=10, is_fidelity=True), + learning_rate=neps.Float(lower=1e-4, upper=1e0, log=True), + epoch=neps.Integer(lower=1, upper=10, is_fidelity=True), ) logging.basicConfig(level=logging.INFO) diff --git a/neps_examples/efficiency/multi_fidelity_and_expert_priors.py b/neps_examples/efficiency/multi_fidelity_and_expert_priors.py index 032b83dfa..1de286170 100644 --- a/neps_examples/efficiency/multi_fidelity_and_expert_priors.py +++ b/neps_examples/efficiency/multi_fidelity_and_expert_priors.py @@ -11,16 +11,16 @@ def run_pipeline(float1, float2, integer1, fidelity): pipeline_space = dict( - float1=neps.FloatParameter( + float1=neps.Float( lower=1, upper=1000, log=False, default=600, default_confidence="medium" ), - float2=neps.FloatParameter( + float2=neps.Float( lower=-10, upper=10, default=0, default_confidence="medium" ), - integer1=neps.IntegerParameter( + integer1=neps.Integer( lower=0, upper=50, default=35, default_confidence="low" ), - fidelity=neps.IntegerParameter(lower=1, upper=10, is_fidelity=True), + fidelity=neps.Integer(lower=1, upper=10, is_fidelity=True), ) logging.basicConfig(level=logging.INFO) diff --git a/neps_examples/experimental/cost_aware.py b/neps_examples/experimental/cost_aware.py index e6cd5ebfd..5b08bdccd 100644 --- a/neps_examples/experimental/cost_aware.py +++ b/neps_examples/experimental/cost_aware.py @@ -19,13 +19,13 @@ def run_pipeline( pipeline_space = dict( - float1=neps.FloatParameter(lower=0, upper=1, log=False), - float2=neps.FloatParameter( + float1=neps.Float(lower=0, upper=1, log=False), + float2=neps.Float( lower=0, upper=10, log=False, default=10, default_confidence="medium" ), - categorical=neps.CategoricalParameter(choices=[0, 1]), - integer1=neps.IntegerParameter(lower=0, upper=1, log=False), - integer2=neps.IntegerParameter(lower=0, upper=1, log=False), + categorical=neps.Categorical(choices=[0, 1]), + integer1=neps.Integer(lower=0, upper=1, log=False), + integer2=neps.Integer(lower=0, upper=1, log=False), ) logging.basicConfig(level=logging.INFO) diff --git a/neps_examples/experimental/expert_priors_for_architecture_and_hyperparameters.py b/neps_examples/experimental/expert_priors_for_architecture_and_hyperparameters.py index 5aa3e5235..c0d1f5f6c 100644 --- a/neps_examples/experimental/expert_priors_for_architecture_and_hyperparameters.py +++ b/neps_examples/experimental/expert_priors_for_architecture_and_hyperparameters.py @@ -106,20 +106,20 @@ def run_pipeline(some_architecture, some_float, some_integer, some_cat): pipeline_space = dict( - some_architecture=neps.FunctionParameter( + some_architecture=neps.Function( set_recursive_attribute=set_recursive_attribute, structure=structure, primitives=primitives, name="pibo", prior=prior_distr, ), - some_float=neps.FloatParameter( + some_float=neps.Float( lower=1, upper=1000, log=True, default=900, default_confidence="medium" ), - some_integer=neps.IntegerParameter( + some_integer=neps.Integer( lower=0, upper=50, default=35, default_confidence="low" ), - some_cat=neps.CategoricalParameter( + some_cat=neps.Categorical( choices=["a", "b", "c"], default="a", default_confidence="high" ), ) diff --git a/neps_examples/experimental/fault_tolerance.py b/neps_examples/experimental/fault_tolerance.py index 9c4a9d2c5..8406627ed 100644 --- a/neps_examples/experimental/fault_tolerance.py +++ b/neps_examples/experimental/fault_tolerance.py @@ -78,7 +78,7 @@ def run_pipeline(pipeline_directory, learning_rate): pipeline_space = dict( - learning_rate=neps.FloatParameter(lower=0, upper=1), + learning_rate=neps.Float(lower=0, upper=1), ) logging.basicConfig(level=logging.INFO) diff --git a/neps_examples/experimental/hierarchical_architecture.py b/neps_examples/experimental/hierarchical_architecture.py index 20a912d01..440b116aa 100644 --- a/neps_examples/experimental/hierarchical_architecture.py +++ b/neps_examples/experimental/hierarchical_architecture.py @@ -64,7 +64,7 @@ def set_recursive_attribute(op_name, predecessor_values): return dict(c_in=in_channels, c_out=out_channels) -def run_pipeline(architecture: neps.FunctionParameter): +def run_pipeline(architecture: neps.Function): in_channels = 3 n_classes = 20 base_channels = 64 @@ -86,7 +86,7 @@ def run_pipeline(architecture: neps.FunctionParameter): pipeline_space = dict( - architecture=neps.FunctionParameter( + architecture=neps.Function( set_recursive_attribute=set_recursive_attribute, structure=structure, primitives=primitives, diff --git a/neps_examples/template/basic_template.py b/neps_examples/template/basic_template.py index 3717150ae..87e6c3aeb 100644 --- a/neps_examples/template/basic_template.py +++ b/neps_examples/template/basic_template.py @@ -36,7 +36,7 @@ def pipeline_space() -> dict: # Create the search space based on NEPS parameters and return the dictionary. # Example: space = dict( - lr=neps.FloatParameter( + lr=neps.Float( lower=1e-5, upper=1e-2, log=True, # If True, the search space is sampled in log space diff --git a/neps_examples/template/lightning_template.py b/neps_examples/template/lightning_template.py index 9c674fc4a..d284b6929 100644 --- a/neps_examples/template/lightning_template.py +++ b/neps_examples/template/lightning_template.py @@ -48,14 +48,14 @@ def pipeline_space() -> dict: # Create the search space based on NEPS parameters and return the dictionary. # IMPORTANT: space = dict( - lr=neps.FloatParameter( + lr=neps.Float( lower=1e-5, upper=1e-2, log=True, # If True, the search space is sampled in log space default=1e-3, # a non-None value here acts as the mode of the prior distribution ), - optimizer=neps.CategoricalParameter(choices=["Adam", "SGD"], default="Adam"), - epochs=neps.IntegerParameter( + optimizer=neps.Categorical(choices=["Adam", "SGD"], default="Adam"), + epochs=neps.Integer( lower=1, upper=9, is_fidelity=True, # IMPORTANT to set this to True for the fidelity parameter diff --git a/neps_examples/template/priorband_template.py b/neps_examples/template/priorband_template.py index 2a0fabe2a..e2d754331 100644 --- a/neps_examples/template/priorband_template.py +++ b/neps_examples/template/priorband_template.py @@ -41,19 +41,19 @@ def pipeline_space() -> dict: # Create the search space based on NEPS parameters and return the dictionary. # IMPORTANT: space = dict( - lr=neps.FloatParameter( + lr=neps.Float( lower=1e-5, upper=1e-2, log=True, # If True, the search space is sampled in log space default=1e-3, # a non-None value here acts as the mode of the prior distribution ), - wd=neps.FloatParameter( + wd=neps.Float( lower=0, upper=1e-1, log=True, default=1e-3, ), - epoch=neps.IntegerParameter( + epoch=neps.Integer( lower=1, upper=10, is_fidelity=True, # IMPORTANT to set this to True for the fidelity parameter diff --git a/tests/regression_objectives.py b/tests/regression_objectives.py index 3e296dcfd..e6bec6c5c 100644 --- a/tests/regression_objectives.py +++ b/tests/regression_objectives.py @@ -117,7 +117,7 @@ def __init__( self.pipeline_space = pipeline_space_from_configspace(joint_config_space) - self.pipeline_space["epoch"] = neps.IntegerParameter( + self.pipeline_space["epoch"] = neps.Integer( lower=1, upper=200, is_fidelity=self.has_fidelity ) self.run_pipeline = self.evaluation_func() @@ -275,11 +275,11 @@ def __init__( ) self.pipeline_space: dict[str, Any] = { - f"X_{i}": neps.FloatParameter(lower=0.0, upper=1.0) for i in range(self.dim) + f"X_{i}": neps.Float(lower=0.0, upper=1.0) for i in range(self.dim) } if self.has_fidelity: - self.pipeline_space["z"] = neps.IntegerParameter( + self.pipeline_space["z"] = neps.Integer( lower=self.z_min, upper=self.z_max, is_fidelity=self.has_fidelity ) diff --git a/tests/test_config_encoder.py b/tests/test_config_encoder.py index ca643a66d..25c441657 100644 --- a/tests/test_config_encoder.py +++ b/tests/test_config_encoder.py @@ -7,17 +7,17 @@ MinMaxNormalizer, ) from neps.search_spaces.hyperparameters import ( - CategoricalParameter, - FloatParameter, - IntegerParameter, + Categorical, + Float, + Integer, ) def test_config_encoder_default() -> None: parameters = { - "a": CategoricalParameter(["cat", "mouse", "dog"]), - "b": IntegerParameter(5, 6), - "c": FloatParameter(5, 6), + "a": Categorical(["cat", "mouse", "dog"]), + "b": Integer(5, 6), + "c": Float(5, 6), } encoder = ConfigEncoder.from_parameters(parameters) @@ -73,9 +73,9 @@ def test_config_encoder_default() -> None: def test_config_encoder_pdist_calculation() -> None: parameters = { - "a": CategoricalParameter(["cat", "mouse", "dog"]), - "b": IntegerParameter(1, 10), - "c": FloatParameter(1, 10), + "a": Categorical(["cat", "mouse", "dog"]), + "b": Integer(1, 10), + "c": Float(1, 10), } encoder = ConfigEncoder.from_parameters(parameters) config1 = {"a": "cat", "b": 1, "c": 1.0} @@ -109,9 +109,9 @@ def test_config_encoder_pdist_calculation() -> None: def test_config_encoder_pdist_squareform() -> None: parameters = { - "a": CategoricalParameter(["cat", "mouse", "dog"]), - "b": IntegerParameter(1, 10), - "c": FloatParameter(1, 10), + "a": Categorical(["cat", "mouse", "dog"]), + "b": Integer(1, 10), + "c": Float(1, 10), } encoder = ConfigEncoder.from_parameters(parameters) config1 = {"a": "cat", "b": 1, "c": 1.0} @@ -143,9 +143,9 @@ def test_config_encoder_pdist_squareform() -> None: def test_config_encoder_accepts_custom_transformers() -> None: parameters = { - "b": IntegerParameter(5, 6), - "a": FloatParameter(5, 6), - "c": CategoricalParameter(["cat", "mouse", "dog"]), + "b": Integer(5, 6), + "a": Float(5, 6), + "c": Categorical(["cat", "mouse", "dog"]), } encoder = ConfigEncoder.from_parameters( parameters, @@ -160,9 +160,9 @@ def test_config_encoder_accepts_custom_transformers() -> None: def test_config_encoder_removes_constants_in_encoding_and_includes_in_decoding() -> None: parameters = { - "b": IntegerParameter(5, 6), - "a": FloatParameter(5, 6), - "c": CategoricalParameter(["cat", "mouse", "dog"]), + "b": Integer(5, 6), + "a": Float(5, 6), + "c": Categorical(["cat", "mouse", "dog"]), } x = "raspberry" @@ -184,9 +184,9 @@ def test_config_encoder_removes_constants_in_encoding_and_includes_in_decoding() def test_config_encoder_complains_if_missing_entry_in_config() -> None: parameters = { - "b": IntegerParameter(5, 6), - "a": FloatParameter(5, 6), - "c": CategoricalParameter(["cat", "mouse", "dog"]), + "b": Integer(5, 6), + "a": Float(5, 6), + "c": Categorical(["cat", "mouse", "dog"]), } encoder = ConfigEncoder.from_parameters(parameters) @@ -197,9 +197,9 @@ def test_config_encoder_complains_if_missing_entry_in_config() -> None: def test_config_encoder_sorts_parameters_by_name_for_consistent_ordering() -> None: parameters = { - "a": CategoricalParameter([0, 1]), - "b": IntegerParameter(0, 1), - "c": FloatParameter(0, 1), + "a": Categorical([0, 1]), + "b": Integer(0, 1), + "c": Float(0, 1), } p1 = dict(sorted(parameters.items())) p2 = dict(sorted(parameters.items(), reverse=True)) diff --git a/tests/test_neps_api/testing_scripts/baseoptimizer_neps.py b/tests/test_neps_api/testing_scripts/baseoptimizer_neps.py index 1fe9a2199..63ca66705 100644 --- a/tests/test_neps_api/testing_scripts/baseoptimizer_neps.py +++ b/tests/test_neps_api/testing_scripts/baseoptimizer_neps.py @@ -6,13 +6,13 @@ from neps.search_spaces.search_space import SearchSpace pipeline_space_fidelity = dict( - val1=neps.FloatParameter(lower=-10, upper=10), - val2=neps.IntegerParameter(lower=1, upper=5, is_fidelity=True), + val1=neps.Float(lower=-10, upper=10), + val2=neps.Integer(lower=1, upper=5, is_fidelity=True), ) pipeline_space = dict( - val1=neps.FloatParameter(lower=-10, upper=10), - val2=neps.IntegerParameter(lower=1, upper=5), + val1=neps.Float(lower=-10, upper=10), + val2=neps.Integer(lower=1, upper=5), ) diff --git a/tests/test_neps_api/testing_scripts/default_neps.py b/tests/test_neps_api/testing_scripts/default_neps.py index c6e1ac12d..11b41dca7 100644 --- a/tests/test_neps_api/testing_scripts/default_neps.py +++ b/tests/test_neps_api/testing_scripts/default_neps.py @@ -3,23 +3,23 @@ import neps pipeline_space_fidelity_priors = dict( - val1=neps.FloatParameter(lower=-10, upper=10, default=1), - val2=neps.IntegerParameter(lower=1, upper=5, is_fidelity=True), + val1=neps.Float(lower=-10, upper=10, default=1), + val2=neps.Integer(lower=1, upper=5, is_fidelity=True), ) pipeline_space_not_fidelity_priors = dict( - val1=neps.FloatParameter(lower=-10, upper=10, default=1), - val2=neps.IntegerParameter(lower=1, upper=5, default=1), + val1=neps.Float(lower=-10, upper=10, default=1), + val2=neps.Integer(lower=1, upper=5, default=1), ) pipeline_space_fidelity = dict( - val1=neps.FloatParameter(lower=-10, upper=10), - val2=neps.IntegerParameter(lower=1, upper=5, is_fidelity=True), + val1=neps.Float(lower=-10, upper=10), + val2=neps.Integer(lower=1, upper=5, is_fidelity=True), ) pipeline_space_not_fidelity = dict( - val1=neps.FloatParameter(lower=-10, upper=10), - val2=neps.IntegerParameter(lower=1, upper=5), + val1=neps.Float(lower=-10, upper=10), + val2=neps.Integer(lower=1, upper=5), ) diff --git a/tests/test_neps_api/testing_scripts/user_yaml_neps.py b/tests/test_neps_api/testing_scripts/user_yaml_neps.py index 5d862d9db..d28cbbede 100644 --- a/tests/test_neps_api/testing_scripts/user_yaml_neps.py +++ b/tests/test_neps_api/testing_scripts/user_yaml_neps.py @@ -4,8 +4,8 @@ import neps pipeline_space = dict( - val1=neps.FloatParameter(lower=-10, upper=10), - val2=neps.IntegerParameter(lower=1, upper=5), + val1=neps.Float(lower=-10, upper=10), + val2=neps.Integer(lower=1, upper=5), ) diff --git a/tests/test_runtime/test_default_report_values.py b/tests/test_runtime/test_default_report_values.py index 2ebec1c0c..265d4c083 100644 --- a/tests/test_runtime/test_default_report_values.py +++ b/tests/test_runtime/test_default_report_values.py @@ -8,7 +8,7 @@ from neps.state.neps_state import NePSState from neps.state.optimizer import OptimizationState, OptimizerInfo from neps.state.settings import DefaultReportValues, OnErrorPossibilities, WorkerSettings -from neps.search_spaces import FloatParameter +from neps.search_spaces import Float from neps.state.trial import Trial @@ -24,7 +24,7 @@ def neps_state(tmp_path: Path) -> NePSState[Path]: def test_default_values_on_error( neps_state: NePSState, ) -> None: - optimizer = RandomSearch(pipeline_space=SearchSpace(a=FloatParameter(0, 1))) + optimizer = RandomSearch(pipeline_space=SearchSpace(a=Float(0, 1))) settings = WorkerSettings( on_error=OnErrorPossibilities.IGNORE, default_report_values=DefaultReportValues( @@ -75,7 +75,7 @@ def eval_function(*args, **kwargs) -> float: def test_default_values_on_not_specified( neps_state: NePSState, ) -> None: - optimizer = RandomSearch(pipeline_space=SearchSpace(a=FloatParameter(0, 1))) + optimizer = RandomSearch(pipeline_space=SearchSpace(a=Float(0, 1))) settings = WorkerSettings( on_error=OnErrorPossibilities.IGNORE, default_report_values=DefaultReportValues( @@ -124,7 +124,7 @@ def eval_function(*args, **kwargs) -> float: def test_default_value_loss_curve_take_loss_value( neps_state: NePSState, ) -> None: - optimizer = RandomSearch(pipeline_space=SearchSpace(a=FloatParameter(0, 1))) + optimizer = RandomSearch(pipeline_space=SearchSpace(a=Float(0, 1))) settings = WorkerSettings( on_error=OnErrorPossibilities.IGNORE, default_report_values=DefaultReportValues(learning_curve_if_not_provided="loss"), diff --git a/tests/test_runtime/test_error_handling_strategies.py b/tests/test_runtime/test_error_handling_strategies.py index d3180ae92..05cf762a3 100644 --- a/tests/test_runtime/test_error_handling_strategies.py +++ b/tests/test_runtime/test_error_handling_strategies.py @@ -13,7 +13,7 @@ from neps.state.neps_state import NePSState from neps.state.optimizer import OptimizationState, OptimizerInfo from neps.state.settings import DefaultReportValues, OnErrorPossibilities, WorkerSettings -from neps.search_spaces import FloatParameter +from neps.search_spaces import Float from neps.state.trial import Trial @@ -34,7 +34,7 @@ def test_worker_raises_when_error_in_self( neps_state: NePSState, on_error: OnErrorPossibilities, ) -> None: - optimizer = RandomSearch(pipeline_space=SearchSpace(a=FloatParameter(0, 1))) + optimizer = RandomSearch(pipeline_space=SearchSpace(a=Float(0, 1))) settings = WorkerSettings( on_error=on_error, # <- Highlight default_report_values=DefaultReportValues(), @@ -73,7 +73,7 @@ def eval_function(*args, **kwargs) -> float: def test_worker_raises_when_error_in_other_worker(neps_state: NePSState) -> None: - optimizer = RandomSearch(pipeline_space=SearchSpace(a=FloatParameter(0, 1))) + optimizer = RandomSearch(pipeline_space=SearchSpace(a=Float(0, 1))) settings = WorkerSettings( on_error=OnErrorPossibilities.RAISE_ANY_ERROR, # <- Highlight default_report_values=DefaultReportValues(), @@ -133,7 +133,7 @@ def test_worker_does_not_raise_when_error_in_other_worker( neps_state: NePSState, on_error: OnErrorPossibilities, ) -> None: - optimizer = RandomSearch(pipeline_space=SearchSpace(a=FloatParameter(0, 1))) + optimizer = RandomSearch(pipeline_space=SearchSpace(a=Float(0, 1))) settings = WorkerSettings( on_error=OnErrorPossibilities.RAISE_WORKER_ERROR, # <- Highlight default_report_values=DefaultReportValues(), diff --git a/tests/test_runtime/test_stopping_criterion.py b/tests/test_runtime/test_stopping_criterion.py index 5b83985d0..c73051a9f 100644 --- a/tests/test_runtime/test_stopping_criterion.py +++ b/tests/test_runtime/test_stopping_criterion.py @@ -9,7 +9,7 @@ from neps.state.neps_state import NePSState from neps.state.optimizer import OptimizationState, OptimizerInfo from neps.state.settings import DefaultReportValues, OnErrorPossibilities, WorkerSettings -from neps.search_spaces import FloatParameter +from neps.search_spaces import Float from neps.state.trial import Trial @@ -25,7 +25,7 @@ def neps_state(tmp_path: Path) -> NePSState[Path]: def test_max_evaluations_total_stopping_criterion( neps_state: NePSState, ) -> None: - optimizer = RandomSearch(pipeline_space=SearchSpace(a=FloatParameter(0, 1))) + optimizer = RandomSearch(pipeline_space=SearchSpace(a=Float(0, 1))) settings = WorkerSettings( on_error=OnErrorPossibilities.IGNORE, default_report_values=DefaultReportValues(), @@ -78,7 +78,7 @@ def eval_function(*args, **kwargs) -> float: def test_worker_evaluations_total_stopping_criterion( neps_state: NePSState, ) -> None: - optimizer = RandomSearch(pipeline_space=SearchSpace(a=FloatParameter(0, 1))) + optimizer = RandomSearch(pipeline_space=SearchSpace(a=Float(0, 1))) settings = WorkerSettings( on_error=OnErrorPossibilities.IGNORE, default_report_values=DefaultReportValues(), @@ -140,7 +140,7 @@ def eval_function(*args, **kwargs) -> float: def test_include_in_progress_evaluations_towards_maximum_with_work_eval_count( neps_state: NePSState, ) -> None: - optimizer = RandomSearch(pipeline_space=SearchSpace(a=FloatParameter(0, 1))) + optimizer = RandomSearch(pipeline_space=SearchSpace(a=Float(0, 1))) settings = WorkerSettings( on_error=OnErrorPossibilities.IGNORE, default_report_values=DefaultReportValues(), @@ -196,7 +196,7 @@ def eval_function(*args, **kwargs) -> float: def test_max_cost_total( neps_state: NePSState, ) -> None: - optimizer = RandomSearch(pipeline_space=SearchSpace(a=FloatParameter(0, 1))) + optimizer = RandomSearch(pipeline_space=SearchSpace(a=Float(0, 1))) settings = WorkerSettings( on_error=OnErrorPossibilities.IGNORE, default_report_values=DefaultReportValues(), @@ -247,7 +247,7 @@ def eval_function(*args, **kwargs) -> dict: def test_worker_cost_total( neps_state: NePSState, ) -> None: - optimizer = RandomSearch(pipeline_space=SearchSpace(a=FloatParameter(0, 1))) + optimizer = RandomSearch(pipeline_space=SearchSpace(a=Float(0, 1))) settings = WorkerSettings( on_error=OnErrorPossibilities.IGNORE, default_report_values=DefaultReportValues(), @@ -306,7 +306,7 @@ def eval_function(*args, **kwargs) -> dict: def test_worker_wallclock_time( neps_state: NePSState, ) -> None: - optimizer = RandomSearch(pipeline_space=SearchSpace(a=FloatParameter(0, 1))) + optimizer = RandomSearch(pipeline_space=SearchSpace(a=Float(0, 1))) settings = WorkerSettings( on_error=OnErrorPossibilities.IGNORE, default_report_values=DefaultReportValues(), @@ -364,7 +364,7 @@ def eval_function(*args, **kwargs) -> float: def test_max_worker_evaluation_time( neps_state: NePSState, ) -> None: - optimizer = RandomSearch(pipeline_space=SearchSpace(a=FloatParameter(0, 1))) + optimizer = RandomSearch(pipeline_space=SearchSpace(a=Float(0, 1))) settings = WorkerSettings( on_error=OnErrorPossibilities.IGNORE, default_report_values=DefaultReportValues(), @@ -423,7 +423,7 @@ def eval_function(*args, **kwargs) -> float: def test_max_evaluation_time_global( neps_state: NePSState, ) -> None: - optimizer = RandomSearch(pipeline_space=SearchSpace(a=FloatParameter(0, 1))) + optimizer = RandomSearch(pipeline_space=SearchSpace(a=Float(0, 1))) settings = WorkerSettings( on_error=OnErrorPossibilities.IGNORE, default_report_values=DefaultReportValues(), diff --git a/tests/test_state/test_neps_state.py b/tests/test_state/test_neps_state.py index 6de39a09d..51773fdb7 100644 --- a/tests/test_state/test_neps_state.py +++ b/tests/test_state/test_neps_state.py @@ -9,10 +9,10 @@ import pytest from neps.optimizers.base_optimizer import BaseOptimizer from neps.search_spaces.hyperparameters import ( - FloatParameter, - IntegerParameter, - ConstantParameter, - CategoricalParameter, + Float, + Integer, + Constant, + Categorical, ) from neps.search_spaces.search_space import SearchSpace from neps.state.filebased import ( @@ -28,42 +28,42 @@ @case def case_search_space_no_fid() -> SearchSpace: return SearchSpace( - a=FloatParameter(0, 1), - b=CategoricalParameter(["a", "b", "c"]), - c=ConstantParameter("a"), - d=IntegerParameter(0, 10), + a=Float(0, 1), + b=Categorical(["a", "b", "c"]), + c=Constant("a"), + d=Integer(0, 10), ) @case def case_search_space_with_fid() -> SearchSpace: return SearchSpace( - a=FloatParameter(0, 1), - b=CategoricalParameter(["a", "b", "c"]), - c=ConstantParameter("a"), - d=IntegerParameter(0, 10), - e=IntegerParameter(1, 10, is_fidelity=True), + a=Float(0, 1), + b=Categorical(["a", "b", "c"]), + c=Constant("a"), + d=Integer(0, 10), + e=Integer(1, 10, is_fidelity=True), ) @case def case_search_space_no_fid_with_prior() -> SearchSpace: return SearchSpace( - a=FloatParameter(0, 1, default=0.5), - b=CategoricalParameter(["a", "b", "c"], default="a"), - c=ConstantParameter("a"), - d=IntegerParameter(0, 10, default=5), + a=Float(0, 1, default=0.5), + b=Categorical(["a", "b", "c"], default="a"), + c=Constant("a"), + d=Integer(0, 10, default=5), ) @case def case_search_space_fid_with_prior() -> SearchSpace: return SearchSpace( - a=FloatParameter(0, 1, default=0.5), - b=CategoricalParameter(["a", "b", "c"], default="a"), - c=ConstantParameter("a"), - d=IntegerParameter(0, 10, default=5), - e=IntegerParameter(1, 10, is_fidelity=True), + a=Float(0, 1, default=0.5), + b=Categorical(["a", "b", "c"], default="a"), + c=Constant("a"), + d=Integer(0, 10, default=5), + e=Integer(1, 10, is_fidelity=True), ) diff --git a/tests/test_yaml_run_args/test_declarative_usage_docs/pipeline_space.py b/tests/test_yaml_run_args/test_declarative_usage_docs/pipeline_space.py index 79a034db5..d4ba4028c 100644 --- a/tests/test_yaml_run_args/test_declarative_usage_docs/pipeline_space.py +++ b/tests/test_yaml_run_args/test_declarative_usage_docs/pipeline_space.py @@ -1,8 +1,8 @@ import neps pipeline_space = dict( - learning_rate=neps.FloatParameter(lower=1e-5, upper=1e-1, log=True), - epochs=neps.IntegerParameter(lower=5, upper=20, is_fidelity=True), - optimizer=neps.CategoricalParameter(choices=["adam", "sgd", "adamw"]), - batch_size=neps.ConstantParameter(value=64) + learning_rate=neps.Float(lower=1e-5, upper=1e-1, log=True), + epochs=neps.Integer(lower=5, upper=20, is_fidelity=True), + optimizer=neps.Categorical(choices=["adam", "sgd", "adamw"]), + batch_size=neps.Constant(value=64) ) diff --git a/tests/test_yaml_run_args/test_run_args_by_neps_run/neps_run.py b/tests/test_yaml_run_args/test_run_args_by_neps_run/neps_run.py index f2ee0c9c6..404fea461 100644 --- a/tests/test_yaml_run_args/test_run_args_by_neps_run/neps_run.py +++ b/tests/test_yaml_run_args/test_run_args_by_neps_run/neps_run.py @@ -15,10 +15,10 @@ def run_pipeline(learning_rate, epochs, optimizer, batch_size): # For testing the functionality of loading a dictionary from a YAML configuration. pipeline_space = dict( - learning_rate=neps.FloatParameter(lower=1e-6, upper=1e-1, log=False), - epochs=neps.IntegerParameter(lower=1, upper=3, is_fidelity=False), - optimizer=neps.CategoricalParameter(choices=["a", "b", "c"]), - batch_size=neps.ConstantParameter(64), + learning_rate=neps.Float(lower=1e-6, upper=1e-1, log=False), + epochs=neps.Integer(lower=1, upper=3, is_fidelity=False), + optimizer=neps.Categorical(choices=["a", "b", "c"]), + batch_size=neps.Constant(64), ) if __name__ == "__main__": diff --git a/tests/test_yaml_run_args/test_yaml_run_args.py b/tests/test_yaml_run_args/test_yaml_run_args.py index 8200b2fde..aebd2a378 100644 --- a/tests/test_yaml_run_args/test_yaml_run_args.py +++ b/tests/test_yaml_run_args/test_yaml_run_args.py @@ -6,10 +6,10 @@ BASE_PATH = "tests/test_yaml_run_args/" pipeline_space = dict( - lr=neps.FloatParameter(lower=1e-3, upper=0.1), - optimizer=neps.CategoricalParameter(choices=["adam", "sgd", "adamw"]), - epochs=neps.IntegerParameter(lower=1, upper=10), - batch_size=neps.ConstantParameter(value=64), + lr=neps.Float(lower=1e-3, upper=0.1), + optimizer=neps.Categorical(choices=["adam", "sgd", "adamw"]), + epochs=neps.Integer(lower=1, upper=10), + batch_size=neps.Constant(value=64), ) diff --git a/tests/test_yaml_search_space/test_search_space.py b/tests/test_yaml_search_space/test_search_space.py index b92289f56..bfc1c84c1 100644 --- a/tests/test_yaml_search_space/test_search_space.py +++ b/tests/test_yaml_search_space/test_search_space.py @@ -6,7 +6,7 @@ pipeline_space_from_yaml, ) -from neps import CategoricalParameter, ConstantParameter, FloatParameter, IntegerParameter +from neps import Categorical, Constant, Float, Integer BASE_PATH = "tests/test_yaml_search_space/" @@ -17,19 +17,19 @@ def test_correct_yaml_file(path): """Test the function with a correctly formatted YAML file.""" pipeline_space = pipeline_space_from_yaml(path) assert isinstance(pipeline_space, dict) - float1 = FloatParameter(0.00001, 0.1, log=True, is_fidelity=False) + float1 = Float(0.00001, 0.1, log=True, is_fidelity=False) assert float1.__eq__(pipeline_space["param_float1"]) is True - int1 = IntegerParameter(-3, 30, log=False, is_fidelity=True) + int1 = Integer(-3, 30, log=False, is_fidelity=True) assert int1.__eq__(pipeline_space["param_int1"]) is True - int2 = IntegerParameter(100, 30000, log=True, is_fidelity=False) + int2 = Integer(100, 30000, log=True, is_fidelity=False) assert int2.__eq__(pipeline_space["param_int2"]) is True - float2 = FloatParameter(3.3e-5, 0.15, log=False) + float2 = Float(3.3e-5, 0.15, log=False) assert float2.__eq__(pipeline_space["param_float2"]) is True - cat1 = CategoricalParameter([2, "sgd", 10e-3]) + cat1 = Categorical([2, "sgd", 10e-3]) assert cat1.__eq__(pipeline_space["param_cat"]) is True - const1 = ConstantParameter(0.5) + const1 = Constant(0.5) assert const1.__eq__(pipeline_space["param_const1"]) is True - const2 = ConstantParameter(1e3) + const2 = Constant(1e3) assert const2.__eq__(pipeline_space["param_const2"]) is True test_correct_yaml_file(BASE_PATH + "correct_config.yaml") @@ -43,13 +43,13 @@ def test_correct_including_priors_yaml_file(): BASE_PATH + "correct_config_including_priors.yml" ) assert isinstance(pipeline_space, dict) - float1 = FloatParameter(0.00001, 0.1, log=True, is_fidelity=False, default=3.3e-2, default_confidence="high") + float1 = Float(0.00001, 0.1, log=True, is_fidelity=False, default=3.3e-2, default_confidence="high") assert float1.__eq__(pipeline_space["learning_rate"]) is True - int1 = IntegerParameter(3, 30, log=False, is_fidelity=True) + int1 = Integer(3, 30, log=False, is_fidelity=True) assert int1.__eq__(pipeline_space["num_epochs"]) is True - cat1 = CategoricalParameter(["adam", 90e-3, "rmsprop"], default=90e-3, default_confidence="medium") + cat1 = Categorical(["adam", 90e-3, "rmsprop"], default=90e-3, default_confidence="medium") assert cat1.__eq__(pipeline_space["optimizer"]) is True - const1 = ConstantParameter(1e3) + const1 = Constant(1e3) assert const1.__eq__(pipeline_space["dropout_rate"]) is True @@ -127,7 +127,7 @@ def test_float_log_not_boolean(): @pytest.mark.neps_api def test_float_is_fidelity_not_boolean(): - """Test if an exception is raised when for FloatParameter the 'is_fidelity' + """Test if an exception is raised when for Float the 'is_fidelity' attribute is not a boolean.""" with pytest.raises(SearchSpaceFromYamlFileError) as excinfo: pipeline_space_from_yaml( @@ -139,7 +139,7 @@ def test_float_is_fidelity_not_boolean(): @pytest.mark.neps_api def test_categorical_default_value_not_in_choices(): """Test if a ValueError is raised when the default value is not in the choices - for a CategoricalParameter.""" + for a Categorical.""" with pytest.raises(SearchSpaceFromYamlFileError) as excinfo: pipeline_space_from_yaml(BASE_PATH + "default_value_not_in_choices_config.yaml") assert excinfo.value.exception_type == "ValueError"