diff --git a/src/neps/optimizers/bayesian_optimization/acquisition_functions/mf_ei.py b/src/neps/optimizers/bayesian_optimization/acquisition_functions/mf_ei.py index 29d2bcbd..8c9af9e6 100644 --- a/src/neps/optimizers/bayesian_optimization/acquisition_functions/mf_ei.py +++ b/src/neps/optimizers/bayesian_optimization/acquisition_functions/mf_ei.py @@ -137,8 +137,8 @@ def preprocess_pfn(self, x: Iterable) -> Tuple[Iterable, Iterable, Iterable]: len_partial = len(self.observations.seen_config_ids) z_min = x[0].fidelity.lower # converting fidelity to the discrete budget level - # STRICT ASSUMPTION: fidelity is the first dimension - _x_tok[:len_partial, 0] = (_x_tok[:len_partial, 0] + self.b_step - z_min) / self.b_step + # STRICT ASSUMPTION: fidelity is the second dimension + _x_tok[:len_partial, 1] = (_x_tok[:len_partial, 1] + self.b_step - z_min) / self.b_step return _x_tok, _x, inc_list def eval( diff --git a/src/neps/optimizers/bayesian_optimization/acquisition_samplers/freeze_thaw_sampler.py b/src/neps/optimizers/bayesian_optimization/acquisition_samplers/freeze_thaw_sampler.py index c0a61c82..310a9b21 100644 --- a/src/neps/optimizers/bayesian_optimization/acquisition_samplers/freeze_thaw_sampler.py +++ b/src/neps/optimizers/bayesian_optimization/acquisition_samplers/freeze_thaw_sampler.py @@ -13,7 +13,7 @@ class FreezeThawSampler(AcquisitionSampler): - SAMPLES_TO_DRAW = 3 # number of random samples to draw at lowest fidelity + SAMPLES_TO_DRAW = 100 # number of random samples to draw at lowest fidelity def __init__(self, **kwargs): super().__init__(**kwargs) @@ -106,7 +106,7 @@ def sample( _partial_ids = set([conf["id"].value for conf in partial_configs]) _all_ids = set(self.pipeline_space.custom_grid_table.index.values) # accounting for unseen configs only - _n = max(_n, len(_all_ids - _partial_ids)) + _n = min(_n, len(_all_ids - _partial_ids)) _new_configs = np.random.choice(list(_all_ids - _partial_ids), size=_n, replace=False) new_configs = [self.pipeline_space.sample( patience=self.patience, user_priors=False, ignore_fidelity=False @@ -115,7 +115,7 @@ def sample( config["id"].value = _new_configs[i] config.fidelity.value = self.pipeline_space.fidelity.lower new_configs = pd.Series( - new_configs, + new_configs, index=np.arange(len(partial_configs), len(partial_configs) + len(new_configs)) ) diff --git a/src/neps/optimizers/multi_fidelity/utils.py b/src/neps/optimizers/multi_fidelity/utils.py index 030f526e..4c1be7d3 100644 --- a/src/neps/optimizers/multi_fidelity/utils.py +++ b/src/neps/optimizers/multi_fidelity/utils.py @@ -267,8 +267,8 @@ def get_training_data_4DyHPO( def get_tokenized_data(self, df: pd.DataFrame): idxs = df.index.values - idxs = np.array([list(idx)[::-1] for idx in idxs]) - idxs[:, 0] += 1 # all fidelity IDs begin with 0 in NePS + idxs = np.array([list(idx) for idx in idxs]) + idxs[:, 1] += 1 # all fidelity IDs begin with 0 in NePS performances = df.perf.values configs = df.config.values configs = np.array([normalize_vectorize_config(c) for c in configs]) @@ -281,7 +281,7 @@ def tokenize(self, df: pd.DataFrame, as_tensor: bool = False): fidelity = np.array([c.fidelity.value for c in df]).reshape(-1, 1) idx = df.index.values.reshape(-1, 1) - data = np.hstack([fidelity, idx, configs]) + data = np.hstack([idx, fidelity, configs]) if as_tensor: device = torch.device("cuda" if torch.cuda.is_available() else "cpu")