Skip to content

Commit

Permalink
feat(yaml): Enable mixed usage for run_args and neps func arguments +…
Browse files Browse the repository at this point in the history
… new design for pre-load-hooks + new design for searcher config (yaml) (#102)
  • Loading branch information
danrgll committed Jun 11, 2024
1 parent a2d0fb1 commit baea9c5
Show file tree
Hide file tree
Showing 42 changed files with 429 additions and 413 deletions.
4 changes: 2 additions & 2 deletions docs/doc_yamls/full_configuration_template.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ max_cost_total:

# Debug and Monitoring
overwrite_working_directory: True
post_run_summary: True
post_run_summary: False
development_stage_id:
task_id:

Expand All @@ -36,7 +36,7 @@ cost_value_on_error:
ignore_errors:

# Customization Options
searcher: bayesian_optimization # Internal key to select a NePS optimizer.
searcher: hyperband # Internal key to select a NePS optimizer.

# Hooks
pre_load_hooks:
23 changes: 11 additions & 12 deletions docs/reference/optimizers.md
Original file line number Diff line number Diff line change
Expand Up @@ -59,18 +59,17 @@ The library will then load your custom settings and use them for optimization.
Here's the format of a custom YAML (`custom_bo.yaml`) configuration using `Bayesian Optimization` as an example:

```yaml
searcher_init:
algorithm: bayesian_optimization
searcher_kwargs: # Specific arguments depending on the searcher
initial_design_size: 7
surrogate_model: gp
acquisition: EI
log_prior_weighted: false
acquisition_sampler: random
random_interleave_prob: 0.1
disable_priors: false
prior_confidence: high
sample_default_first: false
algorithm: bayesian_optimization
# Specific arguments depending on the searcher
initial_design_size: 7
surrogate_model: gp
acquisition: EI
log_prior_weighted: false
acquisition_sampler: random
random_interleave_prob: 0.1
disable_priors: false
prior_confidence: high
sample_default_first: false
```

```python
Expand Down
68 changes: 30 additions & 38 deletions neps/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@

import ConfigSpace as CS
from neps.utils.run_args_from_yaml import check_essential_arguments, get_run_args_from_yaml,\
check_arg_defaults
check_double_reference

from neps.utils.common import instance_from_map
from neps.runtime import launch_runtime
Expand Down Expand Up @@ -221,46 +221,37 @@ def run(
del searcher_kwargs["budget"]
logger = logging.getLogger("neps")

# if arguments via run_args provided overwrite them
if run_args:
# Check if the user provided other arguments directly to neps.run().
# If so, raise an error.
check_arg_defaults(run, locals())

# Warning if the user has specified default values for arguments that differ
# from those specified in 'run_args'. These user-defined changes are not applied.
warnings.warn(
"WARNING: Loading arguments from 'run_args'. Arguments directly provided "
"to neps.run(...) will be not used!"
)

optim_settings = get_run_args_from_yaml(run_args)
check_double_reference(run, locals(), optim_settings)

# Update each argument based on optim_settings. If not key is not provided in yaml
# use default value. Currently strict but will change in the future.
run_pipeline = optim_settings.get("run_pipeline", None)
root_directory = optim_settings.get("root_directory", None)
pipeline_space = optim_settings.get("pipeline_space", None)
run_pipeline = optim_settings.get("run_pipeline", run_pipeline)
root_directory = optim_settings.get("root_directory", root_directory)
pipeline_space = optim_settings.get("pipeline_space", pipeline_space)
overwrite_working_directory = optim_settings.get(
"overwrite_working_directory", False
"overwrite_working_directory", overwrite_working_directory
)
post_run_summary = optim_settings.get("post_run_summary", False)
development_stage_id = optim_settings.get("development_stage_id", None)
task_id = optim_settings.get("task_id", None)
max_evaluations_total = optim_settings.get("max_evaluations_total", None)
max_evaluations_per_run = optim_settings.get("max_evaluations_per_run", None)
post_run_summary = optim_settings.get("post_run_summary", post_run_summary)
development_stage_id = optim_settings.get("development_stage_id",
development_stage_id)
task_id = optim_settings.get("task_id", task_id)
max_evaluations_total = optim_settings.get("max_evaluations_total",
max_evaluations_total)
max_evaluations_per_run = optim_settings.get("max_evaluations_per_run",
max_evaluations_per_run)
continue_until_max_evaluation_completed = optim_settings.get(
"continue_until_max_evaluation_completed",
False,
)
max_cost_total = optim_settings.get("max_cost_total", None)
ignore_errors = optim_settings.get("ignore_errors", False)
loss_value_on_error = optim_settings.get("loss_value_on_error", None)
cost_value_on_error = optim_settings.get("cost_value_on_error", None)
pre_load_hooks = optim_settings.get("pre_load_hooks", None)
searcher = optim_settings.get("searcher", "default")
searcher_path = optim_settings.get("searcher_path", None)
for key, value in optim_settings.get("searcher_kwargs", {}).items():
continue_until_max_evaluation_completed)
max_cost_total = optim_settings.get("max_cost_total", max_cost_total)
ignore_errors = optim_settings.get("ignore_errors", ignore_errors)
loss_value_on_error = optim_settings.get("loss_value_on_error",
loss_value_on_error)
cost_value_on_error = optim_settings.get("cost_value_on_error",
cost_value_on_error)
pre_load_hooks = optim_settings.get("pre_load_hooks", pre_load_hooks)
searcher = optim_settings.get("searcher", searcher)
searcher_path = optim_settings.get("searcher_path", searcher_path)
for key, value in optim_settings.get("searcher_kwargs", searcher_kwargs).items():
searcher_kwargs[key] = value

# check if necessary arguments are provided.
Expand Down Expand Up @@ -447,10 +438,11 @@ def _run_args(
# Fetching the searcher data, throws an error when the searcher is not found
config = get_searcher_data(searcher)

searcher_alg = config["searcher_init"]["algorithm"]
searcher_config = (
{} if config["searcher_kwargs"] is None else config["searcher_kwargs"]
)
if "algorithm" in config:
searcher_alg = config.pop("algorithm")
else:
raise KeyError(f"Missing key algorithm in searcher config:{config}")
searcher_config = config

logger.info(f"Running {searcher} as the searcher")
logger.info(f"Algorithm: {searcher_alg}")
Expand Down
26 changes: 12 additions & 14 deletions neps/optimizers/default_searchers/asha.yaml
Original file line number Diff line number Diff line change
@@ -1,15 +1,13 @@
searcher_init:
algorithm: asha
searcher_kwargs:
# Arguments that can be modified by the user
eta: 3
early_stopping_rate: 0
initial_design_type: max_budget
use_priors: false
random_interleave_prob: 0.0
sample_default_first: false
sample_default_at_target: false
algorithm: asha
# Arguments that can be modified by the user
eta: 3
early_stopping_rate: 0
initial_design_type: max_budget
use_priors: false
random_interleave_prob: 0.0
sample_default_first: false
sample_default_at_target: false

# Arguments that can not be modified by the user
# sampling_policy: RandomUniformPolicy
# promotion_policy: AsyncPromotionPolicy
# Arguments that can not be modified by the user
# sampling_policy: RandomUniformPolicy
# promotion_policy: AsyncPromotionPolicy
26 changes: 12 additions & 14 deletions neps/optimizers/default_searchers/asha_prior.yaml
Original file line number Diff line number Diff line change
@@ -1,15 +1,13 @@
searcher_init:
algorithm: asha_prior
searcher_kwargs:
# Arguments that can be modified by the user
eta: 3
early_stopping_rate: 0
initial_design_type: max_budget
prior_confidence: medium # or {"low", "high"}
random_interleave_prob: 0.0
sample_default_first: false
sample_default_at_target: false
algorithm: asha_prior
# Arguments that can be modified by the user
eta: 3
early_stopping_rate: 0
initial_design_type: max_budget
prior_confidence: medium # or {"low", "high"}
random_interleave_prob: 0.0
sample_default_first: false
sample_default_at_target: false

# Arguments that can not be modified by the user
# sampling_policy: FixedPriorPolicy
# promotion_policy: AsyncPromotionPolicy
# Arguments that can not be modified by the user
# sampling_policy: FixedPriorPolicy
# promotion_policy: AsyncPromotionPolicy
34 changes: 16 additions & 18 deletions neps/optimizers/default_searchers/bayesian_optimization.yaml
Original file line number Diff line number Diff line change
@@ -1,19 +1,17 @@
searcher_init:
algorithm: bayesian_optimization
searcher_kwargs:
# Arguments that can be modified by the user
initial_design_size: 10
surrogate_model: gp # or {"gp_hierarchy"}
acquisition: EI # or {"LogEI", "AEI"}
log_prior_weighted: false
acquisition_sampler: mutation # or {"random", "evolution"}
random_interleave_prob: 0.0
disable_priors: true
sample_default_first: false
algorithm: bayesian_optimization
# Arguments that can be modified by the user
initial_design_size: 10
surrogate_model: gp # or {"gp_hierarchy"}
acquisition: EI # or {"LogEI", "AEI"}
log_prior_weighted: false
acquisition_sampler: mutation # or {"random", "evolution"}
random_interleave_prob: 0.0
disable_priors: true
sample_default_first: false

# Other arguments:
# surrogate_model_args: None # type: dict
# optimal_assignment: false # type: bool
# domain_se_kernel: None # type: str
# graph_kernels: None # type: list
# hp_kernels: None # type: list
# Other arguments:
# surrogate_model_args: None # type: dict
# optimal_assignment: false # type: bool
# domain_se_kernel: None # type: str
# graph_kernels: None # type: list
# hp_kernels: None # type: list
24 changes: 11 additions & 13 deletions neps/optimizers/default_searchers/hyperband.yaml
Original file line number Diff line number Diff line change
@@ -1,14 +1,12 @@
searcher_init:
algorithm: hyperband
searcher_kwargs:
# Arguments that can be modified by the user
eta: 3
initial_design_type: max_budget
use_priors: false
random_interleave_prob: 0.0
sample_default_first: false
sample_default_at_target: false
algorithm: hyperband
# Arguments that can be modified by the user
eta: 3
initial_design_type: max_budget
use_priors: false
random_interleave_prob: 0.0
sample_default_first: false
sample_default_at_target: false

# Arguments that can not be modified by the user
# sampling_policy: RandomUniformPolicy
# promotion_policy: SyncPromotionPolicy
# Arguments that can not be modified by the user
# sampling_policy: RandomUniformPolicy
# promotion_policy: SyncPromotionPolicy
46 changes: 22 additions & 24 deletions neps/optimizers/default_searchers/mobster.yaml
Original file line number Diff line number Diff line change
@@ -1,27 +1,25 @@
searcher_init:
algorithm: mobster
searcher_kwargs:
# Arguments that can be modified by the user
eta: 3
initial_design_type: max_budget
use_priors: false
random_interleave_prob: 0.0
sample_default_first: false
sample_default_at_target: false
algorithm: mobster
# Arguments that can be modified by the user
eta: 3
initial_design_type: max_budget
use_priors: false
random_interleave_prob: 0.0
sample_default_first: false
sample_default_at_target: false

# arguments for model
surrogate_model: gp # or {"gp_hierarchy"}
acquisition: EI # or {"LogEI", "AEI"}
log_prior_weighted: false
acquisition_sampler: random # or {"mutation", "evolution"}
# arguments for model
surrogate_model: gp # or {"gp_hierarchy"}
acquisition: EI # or {"LogEI", "AEI"}
log_prior_weighted: false
acquisition_sampler: random # or {"mutation", "evolution"}

# Arguments that can not be modified by the user
# sampling_policy: RandomUniformPolicy
# promotion_policy: AsyncPromotionPolicy
# model_policy: ModelPolicy
# Arguments that can not be modified by the user
# sampling_policy: RandomUniformPolicy
# promotion_policy: AsyncPromotionPolicy
# model_policy: ModelPolicy

# Other arguments
# surrogate_model_args: None # type: dict
# domain_se_kernel: None # type: str
# graph_kernels: None # type: list
# hp_kernels: None # type: list
# Other arguments
# surrogate_model_args: None # type: dict
# domain_se_kernel: None # type: str
# graph_kernels: None # type: list
# hp_kernels: None # type: list
36 changes: 17 additions & 19 deletions neps/optimizers/default_searchers/pibo.yaml
Original file line number Diff line number Diff line change
@@ -1,20 +1,18 @@
searcher_init:
algorithm: bayesian_optimization
searcher_kwargs:
# Arguments that can be modified by the user
initial_design_size: 10
surrogate_model: gp # or {"gp_hierarchy"}
acquisition: EI # or {"LogEI", "AEI"}
log_prior_weighted: false
acquisition_sampler: mutation # or {"random", "evolution"}
random_interleave_prob: 0.0
disable_priors: false
prior_confidence: medium # or {"low", "high"}
sample_default_first: false
algorithm: bayesian_optimization
# Arguments that can be modified by the user
initial_design_size: 10
surrogate_model: gp # or {"gp_hierarchy"}
acquisition: EI # or {"LogEI", "AEI"}
log_prior_weighted: false
acquisition_sampler: mutation # or {"random", "evolution"}
random_interleave_prob: 0.0
disable_priors: false
prior_confidence: medium # or {"low", "high"}
sample_default_first: false

# Other arguments:
# surrogate_model_args: None # type: dict
# optimal_assignment: false # type: bool
# domain_se_kernel: None # type: str
# graph_kernels: None # type: list
# hp_kernels: None # type: list
# Other arguments:
# surrogate_model_args: None # type: dict
# optimal_assignment: false # type: bool
# domain_se_kernel: None # type: str
# graph_kernels: None # type: list
# hp_kernels: None # type: list
38 changes: 18 additions & 20 deletions neps/optimizers/default_searchers/priorband.yaml
Original file line number Diff line number Diff line change
@@ -1,22 +1,20 @@
searcher_init:
algorithm: priorband
searcher_kwargs:
# Arguments that can be modified by the user
eta: 3
initial_design_type: max_budget
prior_confidence: medium # or {"low", "high"}
random_interleave_prob: 0.0
sample_default_first: true
sample_default_at_target: false
prior_weight_type: geometric
inc_sample_type: mutation
inc_mutation_rate: 0.5
inc_mutation_std: 0.25
inc_style: dynamic
algorithm: priorband
# Arguments that can be modified by the user
eta: 3
initial_design_type: max_budget
prior_confidence: medium # or {"low", "high"}
random_interleave_prob: 0.0
sample_default_first: true
sample_default_at_target: false
prior_weight_type: geometric
inc_sample_type: mutation
inc_mutation_rate: 0.5
inc_mutation_std: 0.25
inc_style: dynamic

# arguments for model
model_based: false # crucial argument to set to allow model-search
# arguments for model
model_based: false # crucial argument to set to allow model-search

# Arguments that can not be modified by the user
# sampling_policy: EnsemblePolicy
# promotion_policy: SyncPromotionPolicy
# Arguments that can not be modified by the user
# sampling_policy: EnsemblePolicy
# promotion_policy: SyncPromotionPolicy
Loading

0 comments on commit baea9c5

Please sign in to comment.