Skip to content

Commit

Permalink
Bug fix to MFI + budget-based initial design
Browse files Browse the repository at this point in the history
  • Loading branch information
Neeratyoy committed Sep 1, 2023
1 parent a7f9776 commit 04b10c0
Show file tree
Hide file tree
Showing 3 changed files with 80 additions and 17 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,6 @@ def eval(
self, x: Iterable, asscalar: bool = False
) -> Union[np.ndarray, torch.Tensor, float]:
"""Vanilla-EI modified to preprocess samples and accept list of incumbents."""

x, inc_list = self.preprocess(x) # IMPORTANT change from vanilla-EI

_x = x.copy()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,15 +12,15 @@


class FreezeThawSampler(AcquisitionSampler):
n = 500 # number of random samples to draw at lowest fidelity
SAMPLES_TO_DRAW = 100 # number of random samples to draw at lowest fidelity

def __init__(self, **kwargs):
super().__init__(**kwargs)
self.observations = None
self.b_step = None

def _sample_new(self, index_from: int, n: int = None) -> pd.Series:
n = n if n is not None else self.n
n = n if n is not None else self.SAMPLES_TO_DRAW
configs = [
self.pipeline_space.sample(
patience=self.patience, user_priors=False, ignore_fidelity=False
Expand Down Expand Up @@ -69,4 +69,4 @@ def set_state(
self.pipeline_space = pipeline_space
self.observations = observations
self.b_step = b_step
self.n = n if n is not None else self.n
self.n = n if n is not None else self.SAMPLES_TO_DRAW
90 changes: 77 additions & 13 deletions src/neps/optimizers/multi_fidelity/dyhpo.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,7 @@ def __init__(
model_policy: Any = MFEIModel,
log_prior_weighted: bool = False,
initial_design_size: int = 10,
initial_design_budget: int = 100,
):
"""Initialise
Expand Down Expand Up @@ -97,6 +98,7 @@ def __init__(
self.max_budget = self.pipeline_space.fidelity.upper

self._initial_design_size = initial_design_size
self._initial_design_budget = initial_design_budget
self._model_update_failed = False
self.sample_default_first = sample_default_first
self.sample_default_at_target = sample_default_at_target
Expand Down Expand Up @@ -173,11 +175,34 @@ def get_budget_value(self, budget_level: int | float) -> int | float:
)
self._budget_list.append(budget_val)
return budget_val

def total_budget_spent(self) -> int | float:
""" Calculates the toal budget spent so far.
This is calculated as a function of the fidelity range provided, that takes into
account the minimum budget and the step size.
"""
if len(self.observed_configs.df) == 0:
return 0
_df = self.observed_configs.get_learning_curves()
# budgets are columns now in _df
budget_used = 0

for idx in _df.index:
# finds the budget steps taken per config excluding first min_budget step
_n = (~_df.loc[idx].isna()).sum() - 1 # budget_id starts from 0
budget_used += self.get_budget_value(_n)

return budget_used

@property
def is_init_phase(self) -> bool:
if self.num_train_configs < self._initial_design_size:
return True
def is_init_phase(self, budget_based: bool=True) -> bool:
if budget_based:
if self.total_budget_spent() < self._initial_design_budget:
return True
else:
if self.num_train_configs < self._initial_design_size:
return True
return False

@property
Expand Down Expand Up @@ -262,6 +287,45 @@ def _fit_models(self):
self.pipeline_space, self.observed_configs, self.step_size
)

def _sample_init_design(self) -> tuple[SearchSpace, int]:
""" Samples the initial design.
With an unbiased coin toss (p=0.5) it decides whether to sample a new
configuration or continue a partial configuration, until initial_design_size
configurations have been sampled.
"""
_p = np.random.uniform() # random choice
print("*" * 50, "\n", len(self.observed_configs.seen_config_ids), "\n", "*" * 50)
if (
(_p < 0.5 or len(self.observed_configs.df) == 0) and
len(self.observed_configs.seen_config_ids) < self._initial_design_size
):
# sampling a new configuration
config = self.pipeline_space.sample(
patience=self.patience, user_priors=True, ignore_fidelity=False
)
# setting the fidelity to the minimum
config.fidelity.value = self.min_budget
# finding the ID of the new configuration
_config_id = self.observed_configs.next_config_id()
else:
# sampling a configuration ID from the observed ones
_config_ids = np.unique(
self.observed_configs.df.index.get_level_values('config_id').values
)
_config_id = np.random.choice(_config_ids)
# extracting the config
config = self.observed_configs.df.loc[
_config_id, self.observed_configs.config_col
].iloc[0]
# extracting the budget level
budget = self.observed_configs.df.loc[_config_id].index.values[-1]
# calculating fidelity value
new_fidelity = self.get_budget_value(budget + 1)
# settingt the config fidelity
config.fidelity.value = new_fidelity
return config, _config_id

def get_config_and_ids( # pylint: disable=no-self-use
self,
) -> tuple[SearchSpace, str, str | None]:
Expand All @@ -277,22 +341,22 @@ def get_config_and_ids( # pylint: disable=no-self-use
or self.is_init_phase
or self._model_update_failed
):
config = self.pipeline_space.sample(
patience=self.patience, user_priors=True, ignore_fidelity=False
)
config.fidelity.value = config.fidelity.lower
_config_id = self.observed_configs.next_config_id()
# config = self.pipeline_space.sample(
# patience=self.patience, user_priors=True, ignore_fidelity=False
# )
# config.fidelity.value = config.fidelity.lower
# _config_id = self.observed_configs.next_config_id()
config, _config_id = self._sample_init_design()
else:
# main call here

samples = self.acquisition_sampler.sample()
eis = self.acquisition.eval( # type: ignore[attr-defined]
x=samples.to_list(), asscalar=True
)
# TODO: verify
_ids = np.argsort(eis)[0]
# samples should have new configs with fidelities set to minimum
# due to this line, otherwise we have to set them in here
# maximizing EI
_ids = np.argsort(eis)[-1]
# samples should have new configs with fidelities set to as required by
# the acquisition sampler
config = samples.iloc[_ids]
_config_id = samples.index[_ids]

Expand Down

0 comments on commit 04b10c0

Please sign in to comment.