Skip to content

Commit

Permalink
[temporal] [cont] Fix errors
Browse files Browse the repository at this point in the history
[test] Add the tests for the instantiation of abstract evaluator 1 -- 3
[test] Add the tests for util 1 -- 2
[test] Add the tests for train_evaluator 1 -- 2
[refactor] [test] Clean up the pipeline classes and add tests for it 1 -- 2
[test] Add the tests for tae 1 -- 4
[fix] Fix an error due to the change in extract learning curve
[experimental] Increase the coverage

[test] Add tests for pipeline repr

Since the modifications in tests removed the coverage on pipeline repr,
I added tests to increase those parts.
Basically, the decrease in the coverage happened due to the usage of
dummy pipelines.
  • Loading branch information
nabenabe0928 committed Feb 23, 2022
1 parent 1dfa30c commit b32e8be
Show file tree
Hide file tree
Showing 16 changed files with 769 additions and 279 deletions.
2 changes: 1 addition & 1 deletion autoPyTorch/api/base_task.py
Original file line number Diff line number Diff line change
Expand Up @@ -1046,7 +1046,7 @@ def _search(
DisableFileOutputParameters.y_opt in self._disable_file_output
and self.ensemble_size > 1
):
self._logger.warning(f"No ensemble will be created when {DisableFileOutputParameters.y_optimization}"
self._logger.warning(f"No ensemble will be created when {DisableFileOutputParameters.y_opt}"
f" is in disable_file_output")

self._memory_limit = memory_limit
Expand Down
17 changes: 11 additions & 6 deletions autoPyTorch/evaluation/abstract_evaluator.py
Original file line number Diff line number Diff line change
Expand Up @@ -261,6 +261,9 @@ def _init_miscellaneous(self) -> None:
self.predict_function = self._predict_proba

self.X_train, self.y_train = datamanager.train_tensors
self.unique_train_labels = [
list(np.unique(self.y_train[train_indices])) for train_indices, _ in self.splits
]
self.X_valid, self.y_valid, self.X_test, self.y_test = None, None, None, None
if datamanager.val_tensors is not None:
self.X_valid, self.y_valid = datamanager.val_tensors
Expand Down Expand Up @@ -383,7 +386,7 @@ def predict(
self,
X: Optional[np.ndarray],
pipeline: BaseEstimator,
label_examples: Optional[np.ndarray] = None
unique_train_labels: Optional[List[int]] = None
) -> Optional[np.ndarray]:
"""
A wrapper function to handle the prediction of regression or classification tasks.
Expand All @@ -393,7 +396,8 @@ def predict(
A set of features to feed to the pipeline
pipeline (BaseEstimator):
A model that will take the features X return a prediction y
label_examples (Optional[np.ndarray]):
unique_train_labels (Optional[List[int]]):
The unique labels included in the train split.

Returns:
(np.ndarray):
Expand All @@ -417,7 +421,7 @@ def predict(
prediction=pred,
num_classes=self.num_classes,
output_type=self.output_type,
label_examples=label_examples
unique_train_labels=unique_train_labels
)

return pred
Expand All @@ -441,6 +445,10 @@ def _get_pipeline(self) -> BaseEstimator:
A scikit-learn compliant pipeline which is not yet fit to the data.
"""
config = self.evaluator_params.configuration
if not isinstance(config, (int, str, Configuration)):
raise TypeError("The type of configuration must be either (int, str, Configuration), "
f"but got type {type(config)}")

kwargs = dict(
config=config,
random_state=np.random.RandomState(self.fixed_pipeline_params.seed),
Expand All @@ -458,9 +466,6 @@ def _get_pipeline(self) -> BaseEstimator:
exclude=self.fixed_pipeline_params.exclude,
search_space_updates=self.fixed_pipeline_params.search_space_updates,
**kwargs)
else:
raise ValueError("The type of configuration must be either (int, str, Configuration), "
f"but got type {type(config)}")

def _loss(self, labels: np.ndarray, preds: np.ndarray) -> Dict[str, float]:
"""SMAC follows a minimization goal, so the make_scorer
Expand Down
Loading

0 comments on commit b32e8be

Please sign in to comment.