Skip to content

Commit

Permalink
Tabular support + DyHPO - joint commit with @karibbov
Browse files Browse the repository at this point in the history
  • Loading branch information
Neeratyoy committed Nov 2, 2023
1 parent 334ecbc commit e91f0c8
Show file tree
Hide file tree
Showing 17 changed files with 928 additions and 443 deletions.
106 changes: 0 additions & 106 deletions neps_examples/efficiency/multi_fidelity_dyhpo.py

This file was deleted.

22 changes: 19 additions & 3 deletions src/metahyper/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
from copy import deepcopy
from dataclasses import dataclass
from pathlib import Path
from typing import Any
from typing import Any, List

from neps.plot.tensorboard_eval import tblogger

Expand Down Expand Up @@ -206,16 +206,29 @@ def _check_max_evaluations(

return evaluation_count >= max_evaluations

from timeit import default_timer as timer

def _sample_config(optimization_dir, sampler, serializer, logger):
def _sample_config(optimization_dir, sampler, serializer, logger, pre_load_hooks):
# First load the results and state of the optimizer
logger.info("Metahyper: Started collecting previous and pending data")
start = timer()
previous_results, pending_configs, pending_configs_free = read(
optimization_dir, serializer, logger, do_lock=False
)
end = timer()
logger.info("Metahyper: Finished collecting previous and pending data")
logger.info(f"took {end - start} seconds")

base_result_directory = optimization_dir / "results"

logger.debug("Sampling a new configuration")

for hook in pre_load_hooks:
# executes operations on the sampler before setting its state
# can be used for setting custom constraints on the optimizer state
# for example, can be used to input custom grid of configs, meta learning
# information for surrogate building, any non-stationary auxiliary information
sampler = hook(sampler)
sampler.load_results(previous_results, pending_configs)
config, config_id, previous_config_id = sampler.get_config_and_ids()

Expand Down Expand Up @@ -338,6 +351,7 @@ def run(
logger=None,
post_evaluation_hook=None,
overwrite_optimization_dir=False,
pre_load_hooks: List=[],
):
serializer = YamlSerializer(sampler.load_config)
if logger is None:
Expand Down Expand Up @@ -392,7 +406,9 @@ def run(
config,
pipeline_directory,
previous_pipeline_directory,
) = _sample_config(optimization_dir, sampler, serializer, logger)
) = _sample_config(
optimization_dir, sampler, serializer, logger, pre_load_hooks
)
if tblogger.logger_init_bool or tblogger.logger_bool:
# This block manages configuration data, potentially for TensorBoard.
# Captures details during sampling; initial config always captured.
Expand Down
5 changes: 4 additions & 1 deletion src/neps/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -109,6 +109,7 @@ def run(
ignore_errors: bool = False,
loss_value_on_error: None | float = None,
cost_value_on_error: None | float = None,
pre_load_hooks: List=[],
searcher: Literal[
"default",
"bayesian_optimization",
Expand Down Expand Up @@ -154,6 +155,7 @@ def run(
supress any error and will use given loss value instead. default: None
cost_value_on_error: Setting this and loss_value_on_error to any float will
supress any error and will use given cost value instead. default: None
pre_load_hooks: List of functions that will be called before load_results().
searcher: Which optimizer to use. This is usually only needed by neps developers.
**searcher_kwargs: Will be passed to the searcher. This is usually only needed by
neps develolpers.
Expand Down Expand Up @@ -213,7 +215,7 @@ def run(
else:
new_pipeline_space[key] = value
pipeline_space = new_pipeline_space

# Transform to neps internal representation of the pipeline space
pipeline_space = SearchSpace(**pipeline_space)
except TypeError as e:
Expand Down Expand Up @@ -256,4 +258,5 @@ def run(
post_evaluation_hook=_post_evaluation_hook_function(
loss_value_on_error, ignore_errors
),
pre_load_hooks=pre_load_hooks,
)
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@

from .ei import ComprehensiveExpectedImprovement
from .mf_ei import MFEI
from .ucb import UpperConfidenceBound, MF_UCB


AcquisitionMapping: dict[str, Callable] = {
"EI": partial(
Expand All @@ -30,4 +32,12 @@
in_fill="best",
augmented_ei=False,
),
"UCB": partial(
UpperConfidenceBound,
maximize=False,
),
"MF-UCB": partial(
MF_UCB,
maximize=False,
),
}

This file was deleted.

Loading

0 comments on commit e91f0c8

Please sign in to comment.