Skip to content

Commit

Permalink
Move noise_std, constraint_noise_std, and negate from `ParamBasedTest…
Browse files Browse the repository at this point in the history
…Problem` to `SyntheticProblemRunner` (facebook#2926)

Summary:

Context: Noise and negation are (somewhat surprisingly!) handled by the runner, even though similar attributes exist on BoTorch test problems. This is confusing; it is better to _require_ these to be set on the runner and to raise an exception if they are set on the test problem.

Also, a `ParamBasedTestProblem` should be as minimal as possible, since it is the only benchmark class that needs to be repeatedly subclassed.

Furthermore, this makes the code easier to work with by moving these arguments to a shallower level in the stack, making them easier to access and reducing the need to pass dicts of parameters.

This PR:
* Adds `noise_std`, `constraint_noise_std`, and `negate` to `SyntheticProblemRunner`
* Removes those arguments from `ParamBasedTestProblem` and all its subclasses
* Updates references
* Adds an exception if those arguments are present when creating a runner based on a BoTorch problem

Reviewed By: Balandat

Differential Revision: D64575398
  • Loading branch information
esantorella authored and facebook-github-bot committed Oct 23, 2024
1 parent c05f0c5 commit 34f8e69
Show file tree
Hide file tree
Showing 9 changed files with 80 additions and 35 deletions.
13 changes: 13 additions & 0 deletions ax/benchmark/benchmark_problem.py
Original file line number Diff line number Diff line change
Expand Up @@ -298,6 +298,9 @@ def create_problem_from_botorch(
*,
test_problem_class: type[BaseTestProblem],
test_problem_kwargs: dict[str, Any],
noise_std: float | list[float] | None = None,
constraint_noise_std: float | list[float] | None = None,
negate: bool = False,
num_trials: int,
lower_is_better: bool = True,
observe_noise_sd: bool = False,
Expand All @@ -316,6 +319,13 @@ def create_problem_from_botorch(
to define the `search_space`, `optimization_config`, and `runner`.
test_problem_kwargs: Keyword arguments used to instantiate the
`test_problem_class`.
noise_std: Standard deviation of synthetic noise added to objectives. If
`None`, no noise is added. If a float, the same noise level is used
for all objectives.
constraint_noise_std: Standard deviation of synthetic noise added to
constraints.
negate: Whether the values produced by the test function should be
negated. Does not apply to constraints.
lower_is_better: Whether this is a minimization problem. For MOO, this
applies to all objectives.
num_trials: Simply the `num_trials` of the `BenchmarkProblem` created.
Expand Down Expand Up @@ -382,6 +392,9 @@ def create_problem_from_botorch(
search_space=search_space,
param_names=list(search_space.parameters.keys()),
),
noise_std=noise_std,
negate=negate,
constraint_noise_std=constraint_noise_std,
),
num_trials=num_trials,
observe_noise_stds=observe_noise_sd,
Expand Down
4 changes: 1 addition & 3 deletions ax/benchmark/problems/synthetic/hss/jenatton.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,8 +55,6 @@ def jenatton_test_function(
class Jenatton(ParamBasedTestProblem):
"""Jenatton test function for hierarchical search spaces."""

noise_std: float | None = None
negate: bool = False
num_objectives: int = 1

# pyre-fixme[14]: Inconsistent override
Expand Down Expand Up @@ -125,7 +123,7 @@ def get_jenatton_benchmark_problem(
search_space=search_space,
optimization_config=optimization_config,
runner=ParamBasedTestProblemRunner(
test_problem=Jenatton(noise_std=noise_std), outcome_names=[name]
test_problem=Jenatton(), outcome_names=[name], noise_std=noise_std
),
num_trials=num_trials,
observe_noise_stds=observe_noise_sd,
Expand Down
30 changes: 23 additions & 7 deletions ax/benchmark/runners/botorch_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,9 +28,6 @@ class ParamBasedTestProblem(ABC):
"""

num_objectives: int
constraint_noise_std: float | list[float] | None = None
noise_std: float | list[float] | None = None
negate: bool = False

@abstractmethod
def evaluate_true(self, params: Mapping[str, TParamValue]) -> Tensor:
Expand Down Expand Up @@ -74,19 +71,22 @@ class SyntheticProblemRunner(BenchmarkRunner, ABC):

test_problem: BaseTestProblem | ParamBasedTestProblem
modified_bounds: list[tuple[float, float]] | None = None
constraint_noise_std: float | list[float] | None = None
noise_std: float | list[float] | None = None
negate: bool = False

@property
def _is_constrained(self) -> bool:
return isinstance(self.test_problem, ConstrainedBaseTestProblem)

def get_noise_stds(self) -> None | float | dict[str, float]:
noise_std = self.test_problem.noise_std
noise_std = self.noise_std
noise_std_dict: dict[str, float] = {}
num_obj = self.test_problem.num_objectives

# populate any noise_stds for constraints
if self._is_constrained:
constraint_noise_std = self.test_problem.constraint_noise_std
constraint_noise_std = self.constraint_noise_std
if isinstance(constraint_noise_std, list):
for i, cns in enumerate(constraint_noise_std, start=num_obj):
if cns is not None:
Expand Down Expand Up @@ -141,6 +141,22 @@ class BotorchTestProblemRunner(SyntheticProblemRunner):

def __post_init__(self, search_space_digest: SearchSpaceDigest | None) -> None:
super().__post_init__(search_space_digest=search_space_digest)
if self.test_problem.noise_std is not None:
raise ValueError(
"noise_std should be set on the runner, not the test problem."
)
if (
hasattr(self.test_problem, "constraint_noise_std")
and self.test_problem.constraint_noise_std is not None
):
raise ValueError(
"constraint_noise_std should be set on the runner, not the test "
"problem."
)
if self.test_problem.negate:
raise ValueError(
"negate should be set on the runner, not the test problem."
)
self.test_problem = self.test_problem.to(dtype=torch.double)

def get_Y_true(self, params: Mapping[str, TParamValue]) -> Tensor:
Expand Down Expand Up @@ -174,7 +190,7 @@ def get_Y_true(self, params: Mapping[str, TParamValue]) -> Tensor:

Y_true = self.test_problem.evaluate_true(X).view(-1)
# `BaseTestProblem.evaluate_true()` does not negate the outcome
if self.test_problem.negate:
if self.negate:
Y_true = -Y_true

if self._is_constrained:
Expand Down Expand Up @@ -228,6 +244,6 @@ def get_Y_true(self, params: Mapping[str, TParamValue]) -> Tensor:
"""
Y_true = self.test_problem.evaluate_true(params).view(-1)
# `ParamBasedTestProblem.evaluate_true()` does not negate the outcome
if self.test_problem.negate:
if self.negate:
Y_true = -Y_true
return Y_true
4 changes: 1 addition & 3 deletions ax/benchmark/tests/methods/test_methods.py
Original file line number Diff line number Diff line change
Expand Up @@ -138,9 +138,7 @@ def test_sobol(self) -> None:
def _test_get_best_parameters(
self, use_model_predictions: bool, as_batch: bool
) -> None:
problem = get_problem(
problem_key="ackley4", num_trials=2, test_problem_kwargs={"noise_std": 1.0}
)
problem = get_problem(problem_key="ackley4", num_trials=2, noise_std=1.0)

method = get_sobol_botorch_modular_acquisition(
model_cls=SingleTaskGP,
Expand Down
8 changes: 2 additions & 6 deletions ax/benchmark/tests/problems/synthetic/hss/test_jenatton.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,9 +107,7 @@ def test_create_problem(self) -> None:
self.assertTrue(objective.minimize)
self.assertTrue(metric.lower_is_better)
self.assertEqual(
assert_is_instance(
problem.runner, ParamBasedTestProblemRunner
).test_problem.noise_std,
assert_is_instance(problem.runner, ParamBasedTestProblemRunner).noise_std,
0.0,
)
self.assertFalse(assert_is_instance(metric, BenchmarkMetric).observe_noise_sd)
Expand All @@ -121,9 +119,7 @@ def test_create_problem(self) -> None:
metric = objective.metric
self.assertTrue(metric.lower_is_better)
self.assertEqual(
assert_is_instance(
problem.runner, ParamBasedTestProblemRunner
).test_problem.noise_std,
assert_is_instance(problem.runner, ParamBasedTestProblemRunner).noise_std,
0.1,
)
self.assertTrue(assert_is_instance(metric, BenchmarkMetric).observe_noise_sd)
Expand Down
37 changes: 30 additions & 7 deletions ax/benchmark/tests/runners/test_botorch_test_problem.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,11 +31,35 @@


class TestSyntheticRunner(TestCase):
def test_runner_raises_for_botorch_attrs(self) -> None:
with self.assertRaisesRegex(
ValueError, "noise_std should be set on the runner, not the test problem."
):
BotorchTestProblemRunner(
test_problem=Hartmann(dim=6, noise_std=0.1),
outcome_names=["objective"],
)
with self.assertRaisesRegex(
ValueError,
"constraint_noise_std should be set on the runner, not the test problem.",
):
BotorchTestProblemRunner(
test_problem=ConstrainedHartmann(dim=6, constraint_noise_std=0.1),
outcome_names=["objective", "constraint"],
)
with self.assertRaisesRegex(
ValueError, "negate should be set on the runner, not the test problem."
):
BotorchTestProblemRunner(
test_problem=Hartmann(dim=6, negate=True),
outcome_names=["objective"],
)

def test_synthetic_runner(self) -> None:
botorch_cases = [
(
BotorchTestProblemRunner,
test_problem_class(dim=6, noise_std=noise_std),
test_problem_class(dim=6),
modified_bounds,
noise_std,
)
Expand All @@ -48,9 +72,7 @@ def test_synthetic_runner(self) -> None:
param_based_cases = [
(
ParamBasedTestProblemRunner,
TestParamBasedTestProblem(
num_objectives=num_objectives, dim=6, noise_std=noise_std
),
TestParamBasedTestProblem(num_objectives=num_objectives, dim=6),
None,
noise_std,
)
Expand All @@ -76,6 +98,7 @@ def test_synthetic_runner(self) -> None:
test_problem=test_problem,
outcome_names=outcome_names,
modified_bounds=modified_bounds,
noise_std=noise_std,
)

test_description: str = (
Expand Down Expand Up @@ -168,9 +191,9 @@ def test_synthetic_runner(self) -> None:

def test_botorch_test_problem_runner_heterogeneous_noise(self) -> None:
runner = BotorchTestProblemRunner(
test_problem=ConstrainedHartmann(
dim=6, noise_std=0.1, constraint_noise_std=0.05
),
test_problem=ConstrainedHartmann(dim=6),
noise_std=0.1,
constraint_noise_std=0.05,
outcome_names=["objective", "constraint"],
)
self.assertDictEqual(
Expand Down
3 changes: 1 addition & 2 deletions ax/benchmark/tests/test_benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -202,12 +202,11 @@ def _test_replication_with_inference_value(
num_sobol_trials=3,
)

test_problem_kwargs = {"noise_std": 100.0}
num_trials = 4
problem = get_single_objective_benchmark_problem(
test_problem_kwargs=test_problem_kwargs,
num_trials=num_trials,
report_inference_value_as_trace=report_inference_value_as_trace,
noise_std=100.0,
)
res = benchmark_replication(problem=problem, method=method, seed=seed)
# The inference trace could coincide with the oracle trace, but it won't
Expand Down
14 changes: 7 additions & 7 deletions ax/benchmark/tests/test_benchmark_problem.py
Original file line number Diff line number Diff line change
Expand Up @@ -204,19 +204,18 @@ def _test_constrained_from_botorch(
) -> None:
ax_problem = create_problem_from_botorch(
test_problem_class=test_problem_class,
test_problem_kwargs={
"noise_std": objective_noise_std,
"constraint_noise_std": constraint_noise_std,
},
test_problem_kwargs={},
lower_is_better=True,
num_trials=1,
observe_noise_sd=observe_noise_sd,
noise_std=objective_noise_std,
constraint_noise_std=constraint_noise_std,
)
runner = checked_cast(BotorchTestProblemRunner, ax_problem.runner)
self.assertTrue(runner._is_constrained)
botorch_problem = checked_cast(ConstrainedBaseTestProblem, runner.test_problem)
self.assertEqual(botorch_problem.noise_std, objective_noise_std)
self.assertEqual(botorch_problem.constraint_noise_std, constraint_noise_std)
self.assertEqual(runner.noise_std, objective_noise_std)
self.assertEqual(runner.constraint_noise_std, constraint_noise_std)
opt_config = ax_problem.optimization_config
outcome_constraints = opt_config.outcome_constraints
self.assertEqual(
Expand Down Expand Up @@ -376,8 +375,9 @@ def test_get_oracle_experiment_from_params(self) -> None:
def test_get_oracle_experiment_from_experiment(self) -> None:
problem = create_problem_from_botorch(
test_problem_class=Branin,
test_problem_kwargs={"negate": True},
test_problem_kwargs={},
num_trials=5,
negate=True,
)

# empty experiment
Expand Down
2 changes: 2 additions & 0 deletions ax/utils/testing/benchmark_stubs.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,13 +48,15 @@ def get_single_objective_benchmark_problem(
num_trials: int = 4,
test_problem_kwargs: dict[str, Any] | None = None,
report_inference_value_as_trace: bool = False,
noise_std: float | list[float] | None = None,
) -> BenchmarkProblem:
return create_problem_from_botorch(
test_problem_class=Branin,
test_problem_kwargs=test_problem_kwargs or {},
num_trials=num_trials,
observe_noise_sd=observe_noise_sd,
report_inference_value_as_trace=report_inference_value_as_trace,
noise_std=noise_std,
)


Expand Down

0 comments on commit 34f8e69

Please sign in to comment.