From a213cd32aeb3408b7c672d901cc8dc39925e3f61 Mon Sep 17 00:00:00 2001 From: Nikolaos Perrakis <89025229+nikml@users.noreply.github.com> Date: Tue, 7 May 2024 13:03:49 +0300 Subject: [PATCH] Update Missing Values Handling (#378) * variable code fixes * bc auroc sampling error calculation * refactor auroc sampling error nan handling * auroc realized perf nan handling * realized ap missing value handling * upd bc metrics missing value handling * upd realized perf mc missing value handling * upd realized perf regr missing value handling and fixes * upd dle missing value handling * wip update CBPE missing value handling * update CBPE BC missing value handling * update MC CBPE missing value handling * remove redundant methods/classes * linting updates * linting for DLE * performance calculation linting updates * remove unneeded import * mypy fixes * more mypy fixes * mypy updates * cbpe lingint * sampling error update * ap fix * code fixes wip * nan code updates * Removed some superfluous comments * Remove exception re-raise as it causes the "fallback scenario" to be ignored. * mypy and linting --------- Co-authored-by: Niels Nuyttens --- nannyml/base.py | 30 + .../data_reconstruction/calculator.py | 2 +- nannyml/performance_calculation/calculator.py | 7 +- .../performance_calculation/metrics/base.py | 9 +- .../metrics/binary_classification.py | 427 ++++-- .../metrics/multiclass_classification.py | 375 +++-- .../metrics/regression.py | 240 ++- .../confidence_based/cbpe.py | 7 + .../confidence_based/metrics.py | 1354 ++++++++++++----- .../direct_loss_estimation/dle.py | 12 +- .../direct_loss_estimation/metrics.py | 219 ++- .../direct_loss_estimation/result.py | 8 +- .../sampling_error/binary_classification.py | 21 +- tests/drift/test_multiv_pca.py | 4 +- .../performance_estimation/CBPE/test_cbpe.py | 14 +- ...st_binary_classification_sampling_error.py | 10 + 16 files changed, 1889 insertions(+), 850 deletions(-) diff --git a/nannyml/base.py b/nannyml/base.py index 2fff2d87..f81241ef 100644 --- a/nannyml/base.py +++ b/nannyml/base.py @@ -614,3 +614,33 @@ def _raise_exception_for_negative_values(column: pd.Series): "\tLog-based metrics are not supported for negative target values.\n" f"\tCheck '{column.name}' at rows {str(negative_item_indices)}." ) + + +def common_nan_removal(data: pd.DataFrame, selected_columns: List[str]) -> Tuple[pd.DataFrame, bool]: + """Remove rows of dataframe containing NaN values on selected columns. + + Parameters + ---------- + data: pd.DataFrame + Pandas dataframe containing data. + selected_columns: List[str] + List containing the strings of column names + + Returns + ------- + df: + Dataframe with rows containing NaN's on selected_columns removed. All columns of original + dataframe are being returned. + empty: + Boolean whether the resulting data are contain any rows (false) or not (true) + """ + # If we want target and it's not available we get None + if not set(selected_columns) <= set(data.columns): + raise InvalidArgumentsException( + f"Selected columns: {selected_columns} not all present in provided data columns {list(data.columns)}" + ) + df = data.dropna(axis=0, how='any', inplace=False, subset=selected_columns).reset_index(drop=True).infer_objects() + empty: bool = False + if df.shape[0] == 0: + empty = True + return (df, empty) diff --git a/nannyml/drift/multivariate/data_reconstruction/calculator.py b/nannyml/drift/multivariate/data_reconstruction/calculator.py index 0b5fcbb1..07b12405 100644 --- a/nannyml/drift/multivariate/data_reconstruction/calculator.py +++ b/nannyml/drift/multivariate/data_reconstruction/calculator.py @@ -14,7 +14,7 @@ """ -from typing import List, Optional, Tuple, Union, Dict +from typing import Dict, List, Optional, Tuple, Union import numpy as np import pandas as pd diff --git a/nannyml/performance_calculation/calculator.py b/nannyml/performance_calculation/calculator.py index d780f00e..a4c06cf4 100644 --- a/nannyml/performance_calculation/calculator.py +++ b/nannyml/performance_calculation/calculator.py @@ -111,11 +111,9 @@ def __init__( When it is not given, only the ROC AUC and Average Precision metrics are supported. problem_type: Union[str, ProblemType] Determines which method to use. Allowed values are: - - 'regression' - 'classification_binary' - 'classification_multiclass' - y_pred_proba: ModelOutputsType, default=None Name(s) of the column(s) containing your model output. Pass a single string when there is only a single model output column, e.g. in binary classification cases. @@ -124,7 +122,6 @@ def __init__( timestamp_column_name: str, default=None The name of the column containing the timestamp of the model prediction. thresholds: dict - The default values are:: { @@ -158,7 +155,7 @@ def __init__( chunk_period: str, default=None Splits the data according to the given period. Only one of `chunk_size`, `chunk_number` or `chunk_period` should be given. - chunker : Chunker, default=None + chunker: Chunker, default=None The `Chunker` used to split the data sets into a lists of chunks. normalize_confusion_matrix: str, default=None Determines how the confusion matrix will be normalized. Allowed values are None, 'all', 'true' and @@ -311,7 +308,7 @@ def _calculate(self, data: pd.DataFrame, *args, **kwargs) -> Result: data = data.copy(deep=True) # Setup for target completeness rate - data['NML_TARGET_INCOMPLETE'] = data[self.y_true].isna().astype(np.int16) + data[TARGET_COMPLETENESS_RATE_COLUMN_NAME] = data[self.y_true].isna().astype(np.int16) # Generate chunks if self.chunker is None: diff --git a/nannyml/performance_calculation/metrics/base.py b/nannyml/performance_calculation/metrics/base.py index f62f499f..32700054 100644 --- a/nannyml/performance_calculation/metrics/base.py +++ b/nannyml/performance_calculation/metrics/base.py @@ -1,6 +1,7 @@ # Author: Niels Nuyttens # # License: Apache Software License 2.0 +"""Base Classes for performane calculation.""" import abc import logging from logging import Logger @@ -134,7 +135,6 @@ def sampling_error(self, data: pd.DataFrame): Returns ------- - sampling_error: float The expected sampling error. @@ -153,6 +153,7 @@ def alert(self, value: float) -> bool: ---------- value: float Value of a calculated metric. + Returns ------- bool: bool @@ -206,18 +207,22 @@ def get_chunk_record(self, chunk_data: pd.DataFrame) -> Dict: @property def display_name(self) -> str: + """Get metric display name.""" return self.name @property def column_name(self) -> str: + """Get metric column name.""" return self.components[0][1] @property def display_names(self) -> List[str]: + """Get metric display names.""" return [c[0] for c in self.components] @property def column_names(self) -> List[str]: + """Get metric column names.""" return [c[1] for c in self.components] @@ -256,6 +261,8 @@ def create(cls, key: str, use_case: ProblemType, **kwargs) -> Metric: @classmethod def register(cls, metric: str, use_case: ProblemType) -> Callable: + """Register performance metric class in MetricFactory.""" + def inner_wrapper(wrapped_class: Type[Metric]) -> Type[Metric]: if metric in cls.registry: if use_case in cls.registry[metric]: diff --git a/nannyml/performance_calculation/metrics/binary_classification.py b/nannyml/performance_calculation/metrics/binary_classification.py index 261d61ba..c70033ad 100644 --- a/nannyml/performance_calculation/metrics/binary_classification.py +++ b/nannyml/performance_calculation/metrics/binary_classification.py @@ -8,6 +8,7 @@ import numpy as np import pandas as pd from sklearn.metrics import ( + accuracy_score, average_precision_score, confusion_matrix, f1_score, @@ -17,7 +18,7 @@ ) from nannyml._typing import ProblemType -from nannyml.base import _list_missing, _remove_nans +from nannyml.base import _list_missing, common_nan_removal from nannyml.chunk import Chunk, Chunker from nannyml.exceptions import InvalidArgumentsException from nannyml.performance_calculation.metrics.base import Metric, MetricFactory @@ -54,6 +55,8 @@ class BinaryClassificationAUROC(Metric): """Area under Receiver Operating Curve metric.""" + y_pred_proba: str + def __init__( self, y_true: str, @@ -97,21 +100,27 @@ def __str__(self): def _fit(self, reference_data: pd.DataFrame): """Metric _fit implementation on reference data.""" _list_missing([self.y_true, self.y_pred_proba], list(reference_data.columns)) - # we don't want to count missing rows for sampling error - reference_data = _remove_nans(reference_data, (self.y_true,)) - self._sampling_error_components = auroc_sampling_error_components( - y_true_reference=reference_data[self.y_true], - y_pred_proba_reference=reference_data[self.y_pred_proba], - ) + data = reference_data[[self.y_true, self.y_pred_proba]] + data, empty = common_nan_removal(data, [self.y_true, self.y_pred_proba]) + if empty: + self._sampling_error_components = np.NaN, 0 + else: + self._sampling_error_components = auroc_sampling_error_components( + y_true_reference=data[self.y_true], + y_pred_proba_reference=data[self.y_pred_proba], + ) def _calculate(self, data: pd.DataFrame): """Redefine to handle NaNs and edge cases.""" _list_missing([self.y_true, self.y_pred_proba], list(data.columns)) - data = _remove_nans(data, (self.y_true,)) + data = data[[self.y_true, self.y_pred_proba]] + data, empty = common_nan_removal(data, [self.y_true, self.y_pred_proba]) + if empty: + warnings.warn(f"Too many missing values, cannot calculate {self.display_name}. " f"Returning NaN.") + return np.NaN y_true = data[self.y_true] y_pred_proba = data[self.y_pred_proba] - if y_true.nunique() <= 1: warnings.warn( f"'{self.y_true}' only contains a single class for chunk, cannot calculate {self.display_name}. " @@ -122,7 +131,15 @@ def _calculate(self, data: pd.DataFrame): return roc_auc_score(y_true, y_pred_proba) def _sampling_error(self, data: pd.DataFrame) -> float: - return auroc_sampling_error(self._sampling_error_components, data) + data = data[[self.y_true, self.y_pred_proba]] + data, empty = common_nan_removal(data, [self.y_true, self.y_pred_proba]) + if empty: + warnings.warn( + f"Too many missing values, cannot calculate {self.display_name} sampling error. " "Returning NaN." + ) + return np.NaN + else: + return auroc_sampling_error(self._sampling_error_components, data) @MetricFactory.register(metric='average_precision', use_case=ProblemType.CLASSIFICATION_BINARY) @@ -132,6 +149,8 @@ class BinaryClassificationAP(Metric): https://scikit-learn.org/stable/modules/generated/sklearn.metrics.average_precision_score.html """ + y_pred_proba: str + def __init__( self, y_true: str, @@ -175,21 +194,26 @@ def __str__(self): def _fit(self, reference_data: pd.DataFrame): """Metric _fit implementation on reference data.""" _list_missing([self.y_true, self.y_pred_proba], list(reference_data.columns)) - # we don't want to count missing rows for sampling error - reference_data = _remove_nans(reference_data, (self.y_true,)) + data, empty = common_nan_removal( + reference_data[[self.y_true, self.y_pred_proba]], [self.y_true, self.y_pred_proba] + ) - if 1 not in reference_data[self.y_true].unique(): + if empty: self._sampling_error_components = np.NaN, 0 else: self._sampling_error_components = ap_sampling_error_components( - y_true_reference=reference_data[self.y_true], - y_pred_proba_reference=reference_data[self.y_pred_proba], + y_true_reference=data[self.y_true], + y_pred_proba_reference=data[self.y_pred_proba], ) def _calculate(self, data: pd.DataFrame): """Redefine to handle NaNs and edge cases.""" _list_missing([self.y_true, self.y_pred_proba], list(data.columns)) - data = _remove_nans(data, (self.y_true,)) + data = data[[self.y_true, self.y_pred_proba]] + data, empty = common_nan_removal(data, [self.y_true, self.y_pred_proba]) + if empty: + warnings.warn(f"Too many missing values, cannot calculate {self.display_name}. " f"Returning NaN.") + return np.NaN y_true = data[self.y_true] y_pred_proba = data[self.y_pred_proba] @@ -204,13 +228,22 @@ def _calculate(self, data: pd.DataFrame): return average_precision_score(y_true, y_pred_proba) def _sampling_error(self, data: pd.DataFrame) -> float: - return ap_sampling_error(self._sampling_error_components, data) + data, empty = common_nan_removal(data[[self.y_true, self.y_pred_proba]], [self.y_true, self.y_pred_proba]) + if empty: + warnings.warn( + f"Too many missing values, cannot calculate {self.display_name} sampling error. " "Returning NaN." + ) + return np.NaN + else: + return ap_sampling_error(self._sampling_error_components, data) @MetricFactory.register(metric='f1', use_case=ProblemType.CLASSIFICATION_BINARY) class BinaryClassificationF1(Metric): """F1 score metric.""" + y_pred: str + def __init__( self, y_true: str, @@ -248,21 +281,28 @@ def __init__( self._sampling_error_components: Tuple = () def __str__(self): + """Get string representation of metric.""" return "f1" def _fit(self, reference_data: pd.DataFrame): _list_missing([self.y_true, self.y_pred], list(reference_data.columns)) - # TODO: maybe handle data quality issues here and pass clean data to sampling error calculation? - self._sampling_error_components = f1_sampling_error_components( - y_true_reference=reference_data[self.y_true], - y_pred_reference=reference_data[self.y_pred], - ) + data, empty = common_nan_removal(reference_data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred]) + + if empty: + self._sampling_error_components = np.NaN, 0 + else: + self._sampling_error_components = f1_sampling_error_components( + y_true_reference=data[self.y_true], + y_pred_reference=data[self.y_pred], + ) def _calculate(self, data: pd.DataFrame): """Redefine to handle NaNs and edge cases.""" _list_missing([self.y_true, self.y_pred], list(data.columns)) - assert self.y_pred - data = _remove_nans(data, (self.y_true, self.y_pred)) + data, empty = common_nan_removal(data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred]) + if empty: + warnings.warn(f"Too many missing values, cannot calculate {self.display_name}. " f"Returning NaN.") + return np.NaN y_true = data[self.y_true] y_pred = data[self.y_pred] @@ -283,13 +323,22 @@ def _calculate(self, data: pd.DataFrame): return f1_score(y_true, y_pred) def _sampling_error(self, data: pd.DataFrame) -> float: - return f1_sampling_error(self._sampling_error_components, data) + data, empty = common_nan_removal(data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred]) + if empty: + warnings.warn( + f"Too many missing values, cannot calculate {self.display_name} sampling error. " "Returning NaN." + ) + return np.NaN + else: + return f1_sampling_error(self._sampling_error_components, data) @MetricFactory.register(metric='precision', use_case=ProblemType.CLASSIFICATION_BINARY) class BinaryClassificationPrecision(Metric): """Precision metric.""" + y_pred: str + def __init__( self, y_true: str, @@ -327,19 +376,27 @@ def __init__( self._sampling_error_components: Tuple = () def __str__(self): + """Get string representation of metric.""" return "precision" def _fit(self, reference_data: pd.DataFrame): _list_missing([self.y_true, self.y_pred], list(reference_data.columns)) - self._sampling_error_components = precision_sampling_error_components( - y_true_reference=reference_data[self.y_true], - y_pred_reference=reference_data[self.y_pred], - ) + data, empty = common_nan_removal(reference_data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred]) + + if empty: + self._sampling_error_components = np.NaN, 0 + else: + self._sampling_error_components = precision_sampling_error_components( + y_true_reference=data[self.y_true], + y_pred_reference=data[self.y_pred], + ) def _calculate(self, data: pd.DataFrame): _list_missing([self.y_true, self.y_pred], list(data.columns)) - assert self.y_pred - data = _remove_nans(data, (self.y_true, self.y_pred)) + data, empty = common_nan_removal(data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred]) + if empty: + warnings.warn(f"Too many missing values, cannot calculate {self.display_name}. " f"Returning NaN.") + return np.NaN y_true = data[self.y_true] y_pred = data[self.y_pred] @@ -360,7 +417,14 @@ def _calculate(self, data: pd.DataFrame): return precision_score(y_true, y_pred) def _sampling_error(self, data: pd.DataFrame): - return precision_sampling_error(self._sampling_error_components, data) + data, empty = common_nan_removal(data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred]) + if empty: + warnings.warn( + f"Too many missing values, cannot calculate {self.display_name} sampling error. " "Returning NaN." + ) + return np.NaN + else: + return precision_sampling_error(self._sampling_error_components, data) @MetricFactory.register(metric='recall', use_case=ProblemType.CLASSIFICATION_BINARY) @@ -380,6 +444,8 @@ class BinaryClassificationRecall(Metric): refering to the model output column. """ + y_pred: str + def __init__( self, y_true: str, @@ -404,19 +470,26 @@ def __init__( self._sampling_error_components: Tuple = () def __str__(self): + """Get string representation of metric.""" return "recall" def _fit(self, reference_data: pd.DataFrame): _list_missing([self.y_true, self.y_pred], list(reference_data.columns)) - self._sampling_error_components = recall_sampling_error_components( - y_true_reference=reference_data[self.y_true], - y_pred_reference=reference_data[self.y_pred], - ) + data, empty = common_nan_removal(reference_data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred]) + if empty: + self._sampling_error_components = np.NaN, 0 + else: + self._sampling_error_components = recall_sampling_error_components( + y_true_reference=data[self.y_true], + y_pred_reference=data[self.y_pred], + ) def _calculate(self, data: pd.DataFrame): _list_missing([self.y_true, self.y_pred], list(data.columns)) - assert self.y_pred - data = _remove_nans(data, (self.y_true, self.y_pred)) + data, empty = common_nan_removal(data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred]) + if empty: + warnings.warn(f"Too many missing values, cannot calculate {self.display_name}. " f"Returning NaN.") + return np.NaN y_true = data[self.y_true] y_pred = data[self.y_pred] @@ -437,13 +510,22 @@ def _calculate(self, data: pd.DataFrame): return recall_score(y_true, y_pred) def _sampling_error(self, data: pd.DataFrame): - return recall_sampling_error(self._sampling_error_components, data) + data, empty = common_nan_removal(data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred]) + if empty: + warnings.warn( + f"Too many missing values, cannot calculate {self.display_name} sampling error. " "Returning NaN." + ) + return np.NaN + else: + return recall_sampling_error(self._sampling_error_components, data) @MetricFactory.register(metric='specificity', use_case=ProblemType.CLASSIFICATION_BINARY) class BinaryClassificationSpecificity(Metric): """Specificity metric.""" + y_pred: str + def __init__( self, y_true: str, @@ -481,41 +563,46 @@ def __init__( self._sampling_error_components: Tuple = () def __str__(self): + """Get string representation of metric.""" return "specificity" def _fit(self, reference_data: pd.DataFrame): _list_missing([self.y_true, self.y_pred], list(reference_data.columns)) - self._sampling_error_components = specificity_sampling_error_components( - y_true_reference=reference_data[self.y_true], - y_pred_reference=reference_data[self.y_pred], - ) + data, empty = common_nan_removal(reference_data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred]) + if empty: + self._sampling_error_components = np.NaN, 0 + else: + self._sampling_error_components = specificity_sampling_error_components( + y_true_reference=data[self.y_true], + y_pred_reference=data[self.y_pred], + ) def _calculate(self, data: pd.DataFrame): _list_missing([self.y_true, self.y_pred], list(data.columns)) - assert self.y_pred - data = _remove_nans(data, (self.y_true, self.y_pred)) + data, empty = common_nan_removal(data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred]) + if empty: + warnings.warn(f"Too many missing values, cannot calculate {self.display_name}. " f"Returning NaN.") + return np.NaN y_true = data[self.y_true] y_pred = data[self.y_pred] - if y_true.nunique() <= 1: - warnings.warn( - f"'{self.y_true}' only contains a single class for chunk, cannot calculate {self.display_name}. " - f"Returning NaN." - ) + tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel() + denominator = tn + fp + if denominator == 0: return np.NaN - elif y_pred.nunique() <= 1: + else: + return tn / denominator + + def _sampling_error(self, data: pd.DataFrame): + data, empty = common_nan_removal(data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred]) + if empty: warnings.warn( - f"'{self.y_pred}' only contains a single class for chunk, cannot calculate {self.display_name}. " - f"Returning NaN." + f"Too many missing values, cannot calculate {self.display_name} sampling error. " "Returning NaN." ) return np.NaN else: - tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel() - return tn / (tn + fp) - - def _sampling_error(self, data: pd.DataFrame): - return specificity_sampling_error(self._sampling_error_components, data) + return specificity_sampling_error(self._sampling_error_components, data) @MetricFactory.register(metric='accuracy', use_case=ProblemType.CLASSIFICATION_BINARY) @@ -535,6 +622,8 @@ class BinaryClassificationAccuracy(Metric): refering to the model output column. """ + y_pred: str + def __init__( self, y_true: str, @@ -559,47 +648,49 @@ def __init__( self._sampling_error_components: Tuple = () def __str__(self): + """Get string representation of metric.""" return "accuracy" def _fit(self, reference_data: pd.DataFrame): _list_missing([self.y_true, self.y_pred], list(reference_data.columns)) - self._sampling_error_components = accuracy_sampling_error_components( - y_true_reference=reference_data[self.y_true], - y_pred_reference=reference_data[self.y_pred], - ) + data, empty = common_nan_removal(reference_data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred]) + if empty: + self._sampling_error_components = np.NaN, 0 + else: + self._sampling_error_components = accuracy_sampling_error_components( + y_true_reference=data[self.y_true], + y_pred_reference=data[self.y_pred], + ) def _calculate(self, data: pd.DataFrame): _list_missing([self.y_true, self.y_pred], list(data.columns)) - assert self.y_pred - data = _remove_nans(data, (self.y_true, self.y_pred)) + data, empty = common_nan_removal(data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred]) + if empty: + warnings.warn(f"Too many missing values, cannot calculate {self.display_name}. " f"Returning NaN.") + return np.NaN y_true = data[self.y_true] y_pred = data[self.y_pred] - if y_true.nunique() <= 1: - warnings.warn( - f"'{self.y_true}' only contains a single class for chunk, cannot calculate {self.display_name}. " - f"Returning NaN." - ) - return np.NaN - elif y_pred.nunique() <= 1: + return accuracy_score(y_true, y_pred) + + def _sampling_error(self, data: pd.DataFrame): + data, empty = common_nan_removal(data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred]) + if empty: warnings.warn( - f"'{self.y_pred}' only contains a single class for chunk, cannot calculate {self.display_name}. " - f"Returning NaN." + f"Too many missing values, cannot calculate {self.display_name} sampling error. " "Returning NaN." ) return np.NaN else: - tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel() - return (tp + tn) / (tp + tn + fp + fn) - - def _sampling_error(self, data: pd.DataFrame): - return accuracy_sampling_error(self._sampling_error_components, data) + return accuracy_sampling_error(self._sampling_error_components, data) @MetricFactory.register(metric='business_value', use_case=ProblemType.CLASSIFICATION_BINARY) class BinaryClassificationBusinessValue(Metric): """Business Value metric.""" + y_pred: str + def __init__( self, y_true: str, @@ -668,29 +759,32 @@ def __init__( self._sampling_error_components: Tuple = () def __str__(self): + """Get string representation of metric.""" return "business_value" def _fit(self, reference_data: pd.DataFrame): _list_missing([self.y_true, self.y_pred], list(reference_data.columns)) - self._sampling_error_components = business_value_sampling_error_components( - y_true_reference=reference_data[self.y_true], - y_pred_reference=reference_data[self.y_pred], - business_value_matrix=self.business_value_matrix, - normalize_business_value=self.normalize_business_value, - ) + data, empty = common_nan_removal(reference_data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred]) + if empty: + self._sampling_error_components = np.NaN, self.normalize_business_value + else: + self._sampling_error_components = business_value_sampling_error_components( + y_true_reference=data[self.y_true], + y_pred_reference=data[self.y_pred], + business_value_matrix=self.business_value_matrix, + normalize_business_value=self.normalize_business_value, + ) def _calculate(self, data: pd.DataFrame): _list_missing([self.y_true, self.y_pred], list(data.columns)) - assert self.y_pred - data = _remove_nans(data, (self.y_true, self.y_pred)) + data, empty = common_nan_removal(data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred]) + if empty: + warnings.warn(f"'{self.y_true}' contains no data, cannot calculate business value. Returning NaN.") + return np.NaN y_true = data[self.y_true] y_pred = data[self.y_pred] - if y_true.shape[0] == 0: - warnings.warn(f"'{self.y_true}' contains no data, cannot calculate business value. Returning NaN.") - return np.NaN - tp_value = self.business_value_matrix[1, 1] tn_value = self.business_value_matrix[0, 0] fp_value = self.business_value_matrix[0, 1] @@ -706,13 +800,22 @@ def _calculate(self, data: pd.DataFrame): return (bv_array * cm).sum() def _sampling_error(self, data: pd.DataFrame) -> float: - return business_value_sampling_error(self._sampling_error_components, data) + data, empty = common_nan_removal(data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred]) + if empty: + warnings.warn( + f"Too many missing values, cannot calculate {self.display_name} sampling error. " "Returning NaN." + ) + return np.NaN + else: + return business_value_sampling_error(self._sampling_error_components, data) @MetricFactory.register(metric='confusion_matrix', use_case=ProblemType.CLASSIFICATION_BINARY) class BinaryClassificationConfusionMatrix(Metric): """Confusion Matrix metric.""" + y_pred: str + def __init__( self, y_true: str, @@ -842,40 +945,46 @@ def _calculate_confusion_matrix_alert_thresholds( def _fit(self, reference_data: pd.DataFrame): _list_missing([self.y_true, self.y_pred], list(reference_data.columns)) - - self._true_positive_sampling_error_components = true_positive_sampling_error_components( - y_true_reference=reference_data[self.y_true], - y_pred_reference=reference_data[self.y_pred], - normalize_confusion_matrix=self.normalize_confusion_matrix, - ) - self._true_negative_sampling_error_components = true_negative_sampling_error_components( - y_true_reference=reference_data[self.y_true], - y_pred_reference=reference_data[self.y_pred], - normalize_confusion_matrix=self.normalize_confusion_matrix, - ) - self._false_positive_sampling_error_components = false_positive_sampling_error_components( - y_true_reference=reference_data[self.y_true], - y_pred_reference=reference_data[self.y_pred], - normalize_confusion_matrix=self.normalize_confusion_matrix, - ) - self._false_negative_sampling_error_components = false_negative_sampling_error_components( - y_true_reference=reference_data[self.y_true], - y_pred_reference=reference_data[self.y_pred], - normalize_confusion_matrix=self.normalize_confusion_matrix, + reference_data, empty = common_nan_removal( + reference_data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred] ) + if empty: + self._true_positive_sampling_error_components = (np.NaN, 0.0, self.normalize_confusion_matrix) + self._true_negative_sampling_error_components = (np.NaN, 0.0, self.normalize_confusion_matrix) + self._false_positive_sampling_error_components = (np.NaN, 0.0, self.normalize_confusion_matrix) + self._false_negative_sampling_error_components = (np.NaN, 0.0, self.normalize_confusion_matrix) + else: + self._true_positive_sampling_error_components = true_positive_sampling_error_components( + y_true_reference=reference_data[self.y_true], + y_pred_reference=reference_data[self.y_pred], + normalize_confusion_matrix=self.normalize_confusion_matrix, + ) + self._true_negative_sampling_error_components = true_negative_sampling_error_components( + y_true_reference=reference_data[self.y_true], + y_pred_reference=reference_data[self.y_pred], + normalize_confusion_matrix=self.normalize_confusion_matrix, + ) + self._false_positive_sampling_error_components = false_positive_sampling_error_components( + y_true_reference=reference_data[self.y_true], + y_pred_reference=reference_data[self.y_pred], + normalize_confusion_matrix=self.normalize_confusion_matrix, + ) + self._false_negative_sampling_error_components = false_negative_sampling_error_components( + y_true_reference=reference_data[self.y_true], + y_pred_reference=reference_data[self.y_pred], + normalize_confusion_matrix=self.normalize_confusion_matrix, + ) def _calculate_true_positives(self, data: pd.DataFrame) -> float: _list_missing([self.y_true, self.y_pred], list(data.columns)) - assert self.y_pred - data = _remove_nans(data, (self.y_true, self.y_pred)) + data, empty = common_nan_removal(data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred]) + if empty: + warnings.warn("Too many missing values, cannot calculate true_positives. " "Returning NaN.") + return np.NaN y_true = data[self.y_true] y_pred = data[self.y_pred] - if y_true.empty or y_pred.empty: - warnings.warn("Calculated true_positives contain NaN values.") - return np.nan - num_tp = np.sum(np.logical_and(y_pred, y_true)) num_fn = np.sum(np.logical_and(np.logical_not(y_pred), y_true)) num_fp = np.sum(np.logical_and(y_pred, np.logical_not(y_true))) @@ -891,16 +1000,14 @@ def _calculate_true_positives(self, data: pd.DataFrame) -> float: def _calculate_true_negatives(self, data: pd.DataFrame) -> float: _list_missing([self.y_true, self.y_pred], list(data.columns)) - assert self.y_pred - data = _remove_nans(data, (self.y_true, self.y_pred)) + data, empty = common_nan_removal(data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred]) + if empty: + warnings.warn("Too many missing values, cannot calculate true_negatives. " "Returning NaN.") + return np.NaN y_true = data[self.y_true] y_pred = data[self.y_pred] - if y_true.empty or y_pred.empty: - warnings.warn("Calculated true_negatives contain NaN values.") - return np.nan - num_tn = np.sum(np.logical_and(np.logical_not(y_pred), np.logical_not(y_true))) num_fn = np.sum(np.logical_and(np.logical_not(y_pred), y_true)) num_fp = np.sum(np.logical_and(y_pred, np.logical_not(y_true))) @@ -916,16 +1023,14 @@ def _calculate_true_negatives(self, data: pd.DataFrame) -> float: def _calculate_false_positives(self, data: pd.DataFrame) -> float: _list_missing([self.y_true, self.y_pred], list(data.columns)) - assert self.y_pred - data = _remove_nans(data, (self.y_true, self.y_pred)) + data, empty = common_nan_removal(data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred]) + if empty: + warnings.warn("Too many missing values, cannot calculate false_positives. " "Returning NaN.") + return np.NaN y_true = data[self.y_true] y_pred = data[self.y_pred] - if y_true.empty or y_pred.empty: - warnings.warn("Calculated false_positives contain NaN values.") - return np.nan - num_fp = np.sum(np.logical_and(y_pred, np.logical_not(y_true))) num_tn = np.sum(np.logical_and(np.logical_not(y_pred), np.logical_not(y_true))) num_tp = np.sum(np.logical_and(y_pred, y_true)) @@ -941,16 +1046,14 @@ def _calculate_false_positives(self, data: pd.DataFrame) -> float: def _calculate_false_negatives(self, data: pd.DataFrame) -> float: _list_missing([self.y_true, self.y_pred], list(data.columns)) - assert self.y_pred - data = _remove_nans(data, (self.y_true, self.y_pred)) + data, empty = common_nan_removal(data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred]) + if empty: + warnings.warn("Too many missing values, cannot calculate false_negatives. " "Returning NaN.") + return np.NaN y_true = data[self.y_true] y_pred = data[self.y_pred] - if y_true.empty or y_pred.empty: - warnings.warn(f"'{self.y_true}' contains no data, cannot calculate {self.display_name}. Returning NaN.") - return np.NaN - num_fn = np.sum(np.logical_and(np.logical_not(y_pred), y_true)) num_tn = np.sum(np.logical_and(np.logical_not(y_pred), np.logical_not(y_true))) num_tp = np.sum(np.logical_and(y_pred, y_true)) @@ -977,13 +1080,21 @@ def get_true_pos_info(self, chunk_data: pd.DataFrame) -> Dict: true_pos_info : Dict A dictionary of true positive's information and its value pairs. """ - column_name = 'true_positive' true_pos_info: Dict[str, Any] = {} - realized_tp = self._calculate_true_positives(chunk_data) # in this function, check if there are - sampling_error_tp = true_positive_sampling_error(self._true_positive_sampling_error_components, chunk_data) + # we check for nans inside _calculate_true_positives + realized_tp = self._calculate_true_positives(chunk_data) + # we do sampling error nan checks here because we don't have dedicated sampling error function + # TODO: Refactor similarly to multiclass so code can be re-used. + chunk_data, empty = common_nan_removal(chunk_data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred]) + if empty: + warnings.warn("Too many missing values, cannot calculate true positive sampling error. " "Returning NaN.") + sampling_error_tp = np.NaN + else: + sampling_error_tp = true_positive_sampling_error(self._true_positive_sampling_error_components, chunk_data) + # TODO: NaN removal is duplicated to an extent. Upon refactor consider if we can do it only once true_pos_info[f'{column_name}_sampling_error'] = sampling_error_tp true_pos_info[f'{column_name}'] = realized_tp @@ -1008,13 +1119,21 @@ def get_true_neg_info(self, chunk_data: pd.DataFrame) -> Dict: true_neg_info : Dict A dictionary of true negative's information and its value pairs. """ - column_name = 'true_negative' true_neg_info: Dict[str, Any] = {} + # we check for nans inside _calculate_true_negatives realized_tn = self._calculate_true_negatives(chunk_data) - sampling_error_tn = true_negative_sampling_error(self._true_negative_sampling_error_components, chunk_data) + # we do sampling error nan checks here because we don't have dedicated sampling error function + # TODO: Refactor similarly to multiclass so code can be re-used. + chunk_data, empty = common_nan_removal(chunk_data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred]) + if empty: + warnings.warn("Too many missing values, cannot calculate true negative sampling error. " "Returning NaN.") + sampling_error_tn = np.NaN + else: + sampling_error_tn = true_negative_sampling_error(self._true_negative_sampling_error_components, chunk_data) + # TODO: NaN removal is duplicated to an extent. Upon refactor consider if we can do it only once true_neg_info[f'{column_name}_sampling_error'] = sampling_error_tn true_neg_info[f'{column_name}'] = realized_tn @@ -1039,13 +1158,23 @@ def get_false_pos_info(self, chunk_data: pd.DataFrame) -> Dict: false_pos_info : Dict A dictionary of false positive's information and its value pairs. """ - column_name = 'false_positive' false_pos_info: Dict[str, Any] = {} + # we check for nans inside _calculate_false_positives realized_fp = self._calculate_false_positives(chunk_data) - sampling_error_fp = false_positive_sampling_error(self._false_positive_sampling_error_components, chunk_data) + # we do sampling error nan checks here because we don't have dedicated sampling error function + # TODO: Refactor similarly to multiclass so code can be re-used. + chunk_data, empty = common_nan_removal(chunk_data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred]) + if empty: + warnings.warn("Too many missing values, cannot calculate false positive sampling error. " "Returning NaN.") + sampling_error_fp = np.NaN + else: + sampling_error_fp = false_positive_sampling_error( + self._false_positive_sampling_error_components, chunk_data + ) + # TODO: NaN removal is duplicated to an extent. Upon refactor consider if we can do it only once false_pos_info[f'{column_name}_sampling_error'] = sampling_error_fp false_pos_info[f'{column_name}'] = realized_fp @@ -1070,13 +1199,23 @@ def get_false_neg_info(self, chunk_data: pd.DataFrame) -> Dict: false_neg_info : Dict A dictionary of false negative's information and its value pairs. """ - column_name = 'false_negative' false_neg_info: Dict[str, Any] = {} + # we check for nans inside _calculate_false_negatives realized_fn = self._calculate_false_negatives(chunk_data) - sampling_error_fn = false_negative_sampling_error(self._false_negative_sampling_error_components, chunk_data) + # we do sampling error nan checks here because we don't have dedicated sampling error function + # TODO: Refactor similarly to multiclass so code can be re-used. + chunk_data, empty = common_nan_removal(chunk_data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred]) + if empty: + warnings.warn("Too many missing values, cannot calculate false positive sampling error. " "Returning NaN.") + sampling_error_fn = np.NaN + else: + sampling_error_fn = false_negative_sampling_error( + self._false_negative_sampling_error_components, chunk_data + ) + # TODO: NaN removal is duplicated to an extent. Upon refactor consider if we can do it only once false_neg_info[f'{column_name}_sampling_error'] = sampling_error_fn false_neg_info[f'{column_name}'] = realized_fn diff --git a/nannyml/performance_calculation/metrics/multiclass_classification.py b/nannyml/performance_calculation/metrics/multiclass_classification.py index b04b4897..75bceec6 100644 --- a/nannyml/performance_calculation/metrics/multiclass_classification.py +++ b/nannyml/performance_calculation/metrics/multiclass_classification.py @@ -2,10 +2,6 @@ # # # License: Apache Software License 2.0 -# Author: Niels Nuyttens -# -# License: Apache Software License 2.0 - """Module containing metric utilities and implementations.""" import warnings from typing import Dict, List, Optional, Tuple, Union # noqa: TYP001 @@ -24,7 +20,7 @@ from sklearn.preprocessing import LabelBinarizer, label_binarize from nannyml._typing import ProblemType, class_labels, model_output_column_names -from nannyml.base import _list_missing, _remove_nans +from nannyml.base import _list_missing, common_nan_removal from nannyml.chunk import Chunker from nannyml.exceptions import InvalidArgumentsException from nannyml.performance_calculation.metrics.base import Metric, MetricFactory @@ -51,12 +47,14 @@ class MulticlassClassificationAUROC(Metric): """Area under Receiver Operating Curve metric.""" + y_pred_proba: Dict[str, str] + def __init__( self, y_true: str, y_pred: str, threshold: Threshold, - y_pred_proba: Optional[Union[str, Dict[str, str]]] = None, + y_pred_proba: Dict[str, str], **kwargs, ): """Creates a new AUROC instance. @@ -93,19 +91,25 @@ def __init__( self._sampling_error_components: List[Tuple] = [] def __str__(self): + """Get string representation of metric.""" return "roc_auc" def _fit(self, reference_data: pd.DataFrame): - _list_missing([self.y_true, self.y_pred], list(reference_data.columns)) - - # sampling error classes = class_labels(self.y_pred_proba) - binarized_y_true = list(label_binarize(reference_data[self.y_true], classes=classes).T) - y_pred_proba = [reference_data[self.y_pred_proba[clazz]].T for clazz in classes] - - self._sampling_error_components = auroc_sampling_error_components( - y_true_reference=binarized_y_true, y_pred_proba_reference=y_pred_proba + class_y_pred_proba_columns = model_output_column_names(self.y_pred_proba) + _list_missing([self.y_true] + class_y_pred_proba_columns, list(reference_data.columns)) + reference_data, empty = common_nan_removal( + reference_data[[self.y_true] + class_y_pred_proba_columns], [self.y_true] + class_y_pred_proba_columns ) + if empty: + self._sampling_error_components = [(np.NaN, 0) for class_col in class_y_pred_proba_columns] + else: + # sampling error + binarized_y_true = list(label_binarize(reference_data[self.y_true], classes=classes).T) + y_pred_proba = [reference_data[self.y_pred_proba[clazz]].T for clazz in classes] + self._sampling_error_components = auroc_sampling_error_components( + y_true_reference=binarized_y_true, y_pred_proba_reference=y_pred_proba + ) def _calculate(self, data: pd.DataFrame): if not isinstance(self.y_pred_proba, Dict): @@ -115,8 +119,14 @@ def _calculate(self, data: pd.DataFrame): "be a dictionary mapping classes to columns." ) - _list_missing([self.y_true] + model_output_column_names(self.y_pred_proba), data) - data = _remove_nans(data, (self.y_true, self.y_pred_proba.values())) + class_y_pred_proba_columns = model_output_column_names(self.y_pred_proba) + _list_missing([self.y_true] + class_y_pred_proba_columns, data) + data, empty = common_nan_removal( + data[[self.y_true] + class_y_pred_proba_columns], [self.y_true] + class_y_pred_proba_columns + ) + if empty: + warnings.warn(f"Too many missing values, cannot calculate {self.display_name}. " f"Returning NaN.") + return np.NaN labels, class_probability_columns = [], [] for label in sorted(list(self.y_pred_proba.keys())): @@ -126,11 +136,6 @@ def _calculate(self, data: pd.DataFrame): y_true = data[self.y_true] y_pred_proba = data[class_probability_columns] - if y_pred_proba.isna().all().any(): - raise InvalidArgumentsException( - f"could not calculate metric {self.display_name}: " "prediction column contains no data" - ) - if y_true.nunique() <= 1: warnings.warn( f"'{self.y_true}' only contains a single class for chunk, cannot calculate {self.display_name}. " @@ -141,13 +146,27 @@ def _calculate(self, data: pd.DataFrame): return roc_auc_score(y_true, y_pred_proba, multi_class='ovr', average='macro', labels=labels) def _sampling_error(self, data: pd.DataFrame) -> float: - return auroc_sampling_error(self._sampling_error_components, data) + class_y_pred_proba_columns = model_output_column_names(self.y_pred_proba) + _list_missing([self.y_true] + class_y_pred_proba_columns, data) + data, empty = common_nan_removal( + data[[self.y_true] + class_y_pred_proba_columns], [self.y_true] + class_y_pred_proba_columns + ) + if empty: + warnings.warn( + f"Too many missing values, cannot calculate {self.display_name} sampling error. " f"Returning NaN." + ) + return np.NaN + else: + return auroc_sampling_error(self._sampling_error_components, data) @MetricFactory.register(metric='f1', use_case=ProblemType.CLASSIFICATION_MULTICLASS) class MulticlassClassificationF1(Metric): """F1 score metric.""" + y_pred: str + y_pred_proba: Dict[str, str] + def __init__( self, y_true: str, @@ -188,19 +207,25 @@ def __init__( self._sampling_error_components: List[Tuple] = [] def __str__(self): + """Get string representation of metric.""" return "f1" def _fit(self, reference_data: pd.DataFrame): _list_missing([self.y_true, self.y_pred], reference_data) - - # sampling error - label_binarizer = LabelBinarizer() - binarized_y_true = list(label_binarizer.fit_transform(reference_data[self.y_true]).T) - binarized_y_pred = list(label_binarizer.transform(reference_data[self.y_pred]).T) - - self._sampling_error_components = f1_sampling_error_components( - y_true_reference=binarized_y_true, y_pred_reference=binarized_y_pred + classes = class_labels(self.y_pred_proba) + reference_data, empty = common_nan_removal( + reference_data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred] ) + if empty: + self._sampling_error_components = [(np.NaN, 0) for clazz in classes] + else: + # sampling error + label_binarizer = LabelBinarizer() + binarized_y_true = list(label_binarizer.fit_transform(reference_data[self.y_true]).T) + binarized_y_pred = list(label_binarizer.transform(reference_data[self.y_pred]).T) + self._sampling_error_components = f1_sampling_error_components( + y_true_reference=binarized_y_true, y_pred_reference=binarized_y_pred + ) def _calculate(self, data: pd.DataFrame): if not isinstance(self.y_pred_proba, Dict): @@ -211,18 +236,15 @@ def _calculate(self, data: pd.DataFrame): ) _list_missing([self.y_true, self.y_pred], data) - assert self.y_pred - data = _remove_nans(data, (self.y_true, self.y_pred)) + data, empty = common_nan_removal(data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred]) + if empty: + warnings.warn(f"Too many missing values, cannot calculate {self.display_name}. " f"Returning NaN.") + return np.NaN labels = sorted(list(self.y_pred_proba.keys())) y_true = data[self.y_true] y_pred = data[self.y_pred] - if y_pred.isna().all().any(): - raise InvalidArgumentsException( - f"could not calculate metric {self.display_name}: " "prediction column contains no data" - ) - if y_true.nunique() <= 1: warnings.warn( f"'{self.y_true}' only contains a single class, cannot calculate {self.display_name}. Returning NaN." @@ -237,13 +259,24 @@ def _calculate(self, data: pd.DataFrame): return f1_score(y_true, y_pred, average='macro', labels=labels) def _sampling_error(self, data: pd.DataFrame) -> float: - return f1_sampling_error(self._sampling_error_components, data) + _list_missing([self.y_true, self.y_pred], data) + data, empty = common_nan_removal(data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred]) + if empty: + warnings.warn( + f"Too many missing values, cannot calculate {self.display_name} sampling error. " "Returning NaN." + ) + return np.NaN + else: + return f1_sampling_error(self._sampling_error_components, data) @MetricFactory.register(metric='precision', use_case=ProblemType.CLASSIFICATION_MULTICLASS) class MulticlassClassificationPrecision(Metric): """Precision metric.""" + y_pred: str + y_pred_proba: Dict[str, str] + def __init__( self, y_true: str, @@ -284,19 +317,25 @@ def __init__( self._sampling_error_components: List[Tuple] = [] def __str__(self): + """Get string representation of metric.""" return "precision" def _fit(self, reference_data: pd.DataFrame): _list_missing([self.y_true, self.y_pred], reference_data) - - # sampling error - label_binarizer = LabelBinarizer() - binarized_y_true = list(label_binarizer.fit_transform(reference_data[self.y_true]).T) - binarized_y_pred = list(label_binarizer.transform(reference_data[self.y_pred]).T) - - self._sampling_error_components = precision_sampling_error_components( - y_true_reference=binarized_y_true, y_pred_reference=binarized_y_pred + classes = class_labels(self.y_pred_proba) + reference_data, empty = common_nan_removal( + reference_data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred] ) + if empty: + self._sampling_error_components = [(np.NaN, 0) for clazz in classes] + else: + # sampling error + label_binarizer = LabelBinarizer() + binarized_y_true = list(label_binarizer.fit_transform(reference_data[self.y_true]).T) + binarized_y_pred = list(label_binarizer.transform(reference_data[self.y_pred]).T) + self._sampling_error_components = precision_sampling_error_components( + y_true_reference=binarized_y_true, y_pred_reference=binarized_y_pred + ) def _calculate(self, data: pd.DataFrame): if not isinstance(self.y_pred_proba, Dict): @@ -307,18 +346,15 @@ def _calculate(self, data: pd.DataFrame): ) _list_missing([self.y_true, self.y_pred], data) - assert self.y_pred - data = _remove_nans(data, (self.y_true, self.y_pred)) + data, empty = common_nan_removal(data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred]) + if empty: + warnings.warn(f"Too many missing values, cannot calculate {self.display_name}. " f"Returning NaN.") + return np.NaN labels = sorted(list(self.y_pred_proba.keys())) y_true = data[self.y_true] y_pred = data[self.y_pred] - if y_pred.isna().all().any(): - raise InvalidArgumentsException( - f"could not calculate metric {self.display_name}: " "prediction column contains no data" - ) - if y_true.nunique() <= 1: warnings.warn( f"'{self.y_true}' only contains a single class, cannot calculate {self.display_name}. Returning NaN." @@ -333,13 +369,24 @@ def _calculate(self, data: pd.DataFrame): return precision_score(y_true, y_pred, average='macro', labels=labels) def _sampling_error(self, data: pd.DataFrame) -> float: - return precision_sampling_error(self._sampling_error_components, data) + _list_missing([self.y_true, self.y_pred], data) + data, empty = common_nan_removal(data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred]) + if empty: + warnings.warn( + f"Too many missing values, cannot calculate {self.display_name} sampling error. " "Returning NaN." + ) + return np.NaN + else: + return precision_sampling_error(self._sampling_error_components, data) @MetricFactory.register(metric='recall', use_case=ProblemType.CLASSIFICATION_MULTICLASS) class MulticlassClassificationRecall(Metric): """Recall metric, also known as 'sensitivity'.""" + y_pred: str + y_pred_proba: Dict[str, str] + def __init__( self, y_true: str, @@ -380,19 +427,25 @@ def __init__( self._sampling_error_components: List[Tuple] = [] def __str__(self): + """Get string representation of metric.""" return "recall" def _fit(self, reference_data: pd.DataFrame): _list_missing([self.y_true, self.y_pred], reference_data) - - # sampling error - label_binarizer = LabelBinarizer() - binarized_y_true = list(label_binarizer.fit_transform(reference_data[self.y_true]).T) - binarized_y_pred = list(label_binarizer.transform(reference_data[self.y_pred]).T) - - self._sampling_error_components = recall_sampling_error_components( - y_true_reference=binarized_y_true, y_pred_reference=binarized_y_pred + classes = class_labels(self.y_pred_proba) + reference_data, empty = common_nan_removal( + reference_data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred] ) + if empty: + self._sampling_error_components = [(np.NaN, 0) for clazz in classes] + else: + # sampling error + label_binarizer = LabelBinarizer() + binarized_y_true = list(label_binarizer.fit_transform(reference_data[self.y_true]).T) + binarized_y_pred = list(label_binarizer.transform(reference_data[self.y_pred]).T) + self._sampling_error_components = recall_sampling_error_components( + y_true_reference=binarized_y_true, y_pred_reference=binarized_y_pred + ) def _calculate(self, data: pd.DataFrame): if not isinstance(self.y_pred_proba, Dict): @@ -403,18 +456,15 @@ def _calculate(self, data: pd.DataFrame): ) _list_missing([self.y_true, self.y_pred], data) - assert self.y_pred - data = _remove_nans(data, (self.y_true, self.y_pred)) + data, empty = common_nan_removal(data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred]) + if empty: + warnings.warn(f"Too many missing values, cannot calculate {self.display_name}. " f"Returning NaN.") + return np.NaN labels = sorted(list(self.y_pred_proba.keys())) y_true = data[self.y_true] y_pred = data[self.y_pred] - if y_pred.isna().all().any(): - raise InvalidArgumentsException( - f"could not calculate metric {self.display_name}: " "prediction column contains no data" - ) - if y_true.nunique() <= 1: warnings.warn( f"'{self.y_true}' only contains a single class, cannot calculate {self.display_name}. Returning NaN." @@ -429,13 +479,24 @@ def _calculate(self, data: pd.DataFrame): return recall_score(y_true, y_pred, average='macro', labels=labels) def _sampling_error(self, data: pd.DataFrame) -> float: - return recall_sampling_error(self._sampling_error_components, data) + _list_missing([self.y_true, self.y_pred], data) + data, empty = common_nan_removal(data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred]) + if empty: + warnings.warn( + f"Too many missing values, cannot calculate {self.display_name} sampling error. " "Returning NaN." + ) + return np.NaN + else: + return recall_sampling_error(self._sampling_error_components, data) @MetricFactory.register(metric='specificity', use_case=ProblemType.CLASSIFICATION_MULTICLASS) class MulticlassClassificationSpecificity(Metric): """Specificity metric.""" + y_pred: str + y_pred_proba: Dict[str, str] + def __init__( self, y_true: str, @@ -476,19 +537,25 @@ def __init__( self._sampling_error_components: List[Tuple] = [] def __str__(self): + """Get string representation of metric.""" return "specificity" def _fit(self, reference_data: pd.DataFrame): _list_missing([self.y_true, self.y_pred], reference_data) - - # sampling error - label_binarizer = LabelBinarizer() - binarized_y_true = list(label_binarizer.fit_transform(reference_data[self.y_true]).T) - binarized_y_pred = list(label_binarizer.transform(reference_data[self.y_pred]).T) - - self._sampling_error_components = specificity_sampling_error_components( - y_true_reference=binarized_y_true, y_pred_reference=binarized_y_pred + classes = class_labels(self.y_pred_proba) + reference_data, empty = common_nan_removal( + reference_data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred] ) + if empty: + self._sampling_error_components = [(np.NaN, 0) for clazz in classes] + else: + # sampling error + label_binarizer = LabelBinarizer() + binarized_y_true = list(label_binarizer.fit_transform(reference_data[self.y_true]).T) + binarized_y_pred = list(label_binarizer.transform(reference_data[self.y_pred]).T) + self._sampling_error_components = specificity_sampling_error_components( + y_true_reference=binarized_y_true, y_pred_reference=binarized_y_pred + ) def _calculate(self, data: pd.DataFrame): if not isinstance(self.y_pred_proba, Dict): @@ -499,18 +566,15 @@ def _calculate(self, data: pd.DataFrame): ) _list_missing([self.y_true, self.y_pred], data) - assert self.y_pred - data = _remove_nans(data, (self.y_true, self.y_pred)) + data, empty = common_nan_removal(data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred]) + if empty: + warnings.warn(f"Too many missing values, cannot calculate {self.display_name}. " f"Returning NaN.") + return np.NaN labels = sorted(list(self.y_pred_proba.keys())) y_true = data[self.y_true] y_pred = data[self.y_pred] - if y_pred.isna().all().any(): - raise InvalidArgumentsException( - f"could not calculate metric {self.display_name}: prediction column contains no data" - ) - if y_true.nunique() <= 1: warnings.warn( f"'{self.y_true}' only contains a single class, cannot calculate {self.display_name}. Returning NaN." @@ -529,13 +593,24 @@ def _calculate(self, data: pd.DataFrame): return np.mean(class_wise_specificity) def _sampling_error(self, data: pd.DataFrame) -> float: - return specificity_sampling_error(self._sampling_error_components, data) + _list_missing([self.y_true, self.y_pred], data) + data, empty = common_nan_removal(data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred]) + if empty: + warnings.warn( + f"Too many missing values, cannot calculate {self.display_name} sampling error. " "Returning NaN." + ) + return np.NaN + else: + return specificity_sampling_error(self._sampling_error_components, data) @MetricFactory.register(metric='accuracy', use_case=ProblemType.CLASSIFICATION_MULTICLASS) class MulticlassClassificationAccuracy(Metric): """Accuracy metric.""" + y_pred: str + y_pred_proba: Dict[str, str] + def __init__( self, y_true: str, @@ -576,45 +651,58 @@ def __init__( self._sampling_error_components: Tuple = () def __str__(self): + """Get string representation of metric.""" return "accuracy" def _fit(self, reference_data: pd.DataFrame): _list_missing([self.y_true, self.y_pred], reference_data) - - # sampling error - label_binarizer = LabelBinarizer() - binarized_y_true = label_binarizer.fit_transform(reference_data[self.y_true]) - binarized_y_pred = label_binarizer.transform(reference_data[self.y_pred]) - - self._sampling_error_components = accuracy_sampling_error_components( - y_true_reference=binarized_y_true, y_pred_reference=binarized_y_pred + reference_data, empty = common_nan_removal( + reference_data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred] ) + if empty: + self._sampling_error_components = (np.NaN,) + else: + # sampling error + label_binarizer = LabelBinarizer() + binarized_y_true = label_binarizer.fit_transform(reference_data[self.y_true]) + binarized_y_pred = label_binarizer.transform(reference_data[self.y_pred]) + + self._sampling_error_components = accuracy_sampling_error_components( + y_true_reference=binarized_y_true, y_pred_reference=binarized_y_pred + ) def _calculate(self, data: pd.DataFrame): _list_missing([self.y_true, self.y_pred], data) - assert self.y_pred - data = _remove_nans(data, (self.y_true, self.y_pred)) + data, empty = common_nan_removal(data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred]) + if empty: + warnings.warn(f"Too many missing values, cannot calculate {self.display_name}. " f"Returning NaN.") + return np.NaN y_true = data[self.y_true] y_pred = data[self.y_pred] - if y_pred.isna().all().any(): - raise InvalidArgumentsException( - f"could not calculate metric '{self.display_name}': " "prediction column contains no data" - ) - - if (y_true.nunique() <= 1) or (y_pred.nunique() <= 1): - warnings.warn("Calculated Accuracy score contains NaN values.") - return np.nan - else: - return accuracy_score(y_true, y_pred) + return accuracy_score(y_true, y_pred) def _sampling_error(self, data: pd.DataFrame) -> float: - return accuracy_sampling_error(self._sampling_error_components, data) + _list_missing([self.y_true, self.y_pred], data) + data, empty = common_nan_removal(data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred]) + if empty: + warnings.warn( + f"Too many missing values, cannot calculate {self.display_name} sampling error. " "Returning NaN." + ) + return np.NaN + else: + return accuracy_sampling_error(self._sampling_error_components, data) @MetricFactory.register('confusion_matrix', ProblemType.CLASSIFICATION_MULTICLASS) class MulticlassClassificationConfusionMatrix(Metric): + """Multiclass Confusion Matrix metric.""" + + y_pred: str + y_pred_proba: Dict[str, str] + # classes: List[str] + def __init__( self, y_true: str, @@ -641,9 +729,22 @@ def __init__( self.classes: Optional[List[str]] = None def __str__(self): + """Get string representation of metric.""" return "confusion_matrix" def fit(self, reference_data: pd.DataFrame, chunker: Chunker): + """Fits a Metric on reference data. + + Parameters + ---------- + reference_data: pd.DataFrame + The reference data used for fitting. Must have target data available. + chunker: Chunker + The :class:`~nannyml.chunk.Chunker` used to split the reference data into chunks. + This value is provided by the calling + :class:`~nannyml.performance_calculation.calculator.PerformanceCalculator`. + + """ # _fit # realized perf on chunks # set thresholds @@ -692,18 +793,23 @@ def _multiclass_confusion_matrix_alert_thresholds( return alert_thresholds def _fit(self, reference_data: pd.DataFrame): - _list_missing([self.y_true, self.y_pred], reference_data) - - self.sampling_error_components = multiclass_confusion_matrix_sampling_error_components( - y_true_reference=reference_data[self.y_true], - y_pred_reference=reference_data[self.y_pred], - normalize_confusion_matrix=self.normalize_confusion_matrix, - ) - self.classes = sorted(reference_data[self.y_true].unique()) - self.components = self._get_components(self.classes) + _list_missing([self.y_true, self.y_pred], reference_data) + reference_data, empty = common_nan_removal( + reference_data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred] + ) + if empty: + self._sampling_error_components = np.full((len(self.classes), len(self.classes)), np.nan), 0 + else: + # sampling error + self.sampling_error_components = multiclass_confusion_matrix_sampling_error_components( + y_true_reference=reference_data[self.y_true], + y_pred_reference=reference_data[self.y_pred], + normalize_confusion_matrix=self.normalize_confusion_matrix, + ) + def _get_components(self, classes: List[str]) -> List[Tuple[str, str]]: components = [] @@ -720,27 +826,52 @@ def _get_components(self, classes: List[str]) -> List[Tuple[str, str]]: def _calculate(self, data: pd.DataFrame) -> Union[np.ndarray, float]: _list_missing([self.y_true, self.y_pred], data) + data, empty = common_nan_removal(data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred]) + if empty: + warnings.warn(f"Too many missing values, cannot calculate {self.display_name}. " f"Returning NaN.") + return np.NaN y_true = data[self.y_true] y_pred = data[self.y_pred] - if y_pred.isna().all().any(): - raise InvalidArgumentsException( - f"could not calculate metric {self.display_name}: prediction column contains no data" - ) - if (y_true.nunique() <= 1) or (y_pred.nunique() <= 1): return np.nan else: cm = confusion_matrix(y_true, y_pred, labels=self.classes, normalize=self.normalize_confusion_matrix) return cm + def sampling_error(self, data: pd.DataFrame): + """Calculates the sampling error with respect to the reference data for a given chunk of data. + + Parameters + ---------- + data: pd.DataFrame + The data to calculate the sampling error on, with respect to the reference data. + + Returns + ------- + sampling_error: float + The expected sampling error. + + """ + _list_missing([self.y_true, self.y_pred], data) + data, empty = common_nan_removal(data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred]) + if empty: + warnings.warn( + f"Too many missing values, cannot calculate {self.display_name} sampling error. " "Returning NaN." + ) + num_classes: int = len(self.classes) # type: ignore + return np.full((num_classes, num_classes), np.nan) + else: + return multiclass_confusion_matrix_sampling_error(self.sampling_error_components, data) + def get_chunk_record(self, chunk_data: pd.DataFrame) -> Dict[str, Union[float, bool]]: + """Create results for provided chunk data.""" if self.classes is None: raise ValueError("classes must be set before calling this method") - sampling_errors = multiclass_confusion_matrix_sampling_error(self.sampling_error_components, chunk_data) realized_cm = self._calculate(chunk_data) + sampling_errors = self.sampling_error(chunk_data) if isinstance(realized_cm, float): realized_cm = np.full((len(self.classes), len(self.classes)), np.nan) diff --git a/nannyml/performance_calculation/metrics/regression.py b/nannyml/performance_calculation/metrics/regression.py index 933259a0..be4140c6 100644 --- a/nannyml/performance_calculation/metrics/regression.py +++ b/nannyml/performance_calculation/metrics/regression.py @@ -1,6 +1,8 @@ # Author: Niels Nuyttens # # License: Apache Software License 2.0 +"""Performance Calculation Regression Metrics Module.""" + import warnings from typing import Optional, Tuple @@ -14,7 +16,7 @@ ) from nannyml._typing import ProblemType -from nannyml.base import _list_missing, _raise_exception_for_negative_values, _remove_nans +from nannyml.base import _list_missing, _raise_exception_for_negative_values, common_nan_removal from nannyml.performance_calculation.metrics.base import Metric, MetricFactory from nannyml.sampling_error.regression import ( mae_sampling_error, @@ -37,6 +39,8 @@ class MAE(Metric): """Mean Absolute Error metric.""" + y_pred: str + def __init__(self, y_true: str, y_pred: str, threshold: Threshold, y_pred_proba: Optional[str] = None, **kwargs): """Creates a new MAE instance. @@ -65,41 +69,54 @@ def __init__(self, y_true: str, y_pred: str, threshold: Threshold, y_pred_proba: self._sampling_error_components: Tuple = () def __str__(self): + """Get string representation of metric.""" return "MAE" def _fit(self, reference_data: pd.DataFrame): _list_missing([self.y_true, self.y_pred], list(reference_data.columns)) - self._sampling_error_components = mae_sampling_error_components( - y_true_reference=reference_data[self.y_true], - y_pred_reference=reference_data[self.y_pred], + reference_data, empty = common_nan_removal( + reference_data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred] ) + if empty: + self._sampling_error_components = (np.NaN,) + else: + self._sampling_error_components = mae_sampling_error_components( + y_true_reference=reference_data[self.y_true], + y_pred_reference=reference_data[self.y_pred], + ) def _calculate(self, data: pd.DataFrame): """Redefine to handle NaNs and edge cases.""" _list_missing([self.y_true, self.y_pred], list(data.columns)) - assert self.y_pred - data = _remove_nans(data, (self.y_true, self.y_pred)) + data, empty = common_nan_removal(data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred]) + if empty: + warnings.warn( + f"No data or too many missing values, cannot calculate {self.display_name}. " f"Returning NaN." + ) + return np.NaN y_true = data[self.y_true] y_pred = data[self.y_pred] - if y_true.empty: - warnings.warn(f"'{self.y_true}' contains no data, cannot calculate {self.display_name}. Returning NaN.") - return np.NaN - elif y_pred.empty: - warnings.warn(f"'{self.y_pred}' contains no data, cannot calculate {self.display_name}. Returning NaN.") - return np.NaN - return mean_absolute_error(y_true, y_pred) def _sampling_error(self, data: pd.DataFrame) -> float: - return mae_sampling_error(self._sampling_error_components, data) + data, empty = common_nan_removal(data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred]) + if empty: + warnings.warn( + f"Too many missing values, cannot calculate {self.display_name} sampling error. " "Returning NaN." + ) + return np.NaN + else: + return mae_sampling_error(self._sampling_error_components, data) @MetricFactory.register(metric='mape', use_case=ProblemType.REGRESSION) class MAPE(Metric): """Mean Absolute Percentage Error metric.""" + y_pred: str + def __init__(self, y_true: str, y_pred: str, threshold: Threshold, y_pred_proba: Optional[str] = None, **kwargs): """Creates a new MAPE instance. @@ -128,41 +145,54 @@ def __init__(self, y_true: str, y_pred: str, threshold: Threshold, y_pred_proba: self._sampling_error_components: Tuple = () def __str__(self): + """Get string representation of metric.""" return "MAPE" def _fit(self, reference_data: pd.DataFrame): _list_missing([self.y_true, self.y_pred], list(reference_data.columns)) - self._sampling_error_components = mape_sampling_error_components( - y_true_reference=reference_data[self.y_true], - y_pred_reference=reference_data[self.y_pred], + reference_data, empty = common_nan_removal( + reference_data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred] ) + if empty: + self._sampling_error_components = (np.NaN,) + else: + self._sampling_error_components = mape_sampling_error_components( + y_true_reference=reference_data[self.y_true], + y_pred_reference=reference_data[self.y_pred], + ) def _calculate(self, data: pd.DataFrame): """Redefine to handle NaNs and edge cases.""" _list_missing([self.y_true, self.y_pred], list(data.columns)) - assert self.y_pred - data = _remove_nans(data, (self.y_true, self.y_pred)) + data, empty = common_nan_removal(data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred]) + if empty: + warnings.warn( + f"No data or too many missing values, cannot calculate {self.display_name}. " f"Returning NaN." + ) + return np.NaN y_true = data[self.y_true] y_pred = data[self.y_pred] - if y_true.empty: - warnings.warn(f"'{self.y_true}' contains no data, cannot calculate {self.display_name}. Returning NaN.") - return np.NaN - elif y_pred.empty: - warnings.warn(f"'{self.y_pred}' contains no data, cannot calculate {self.display_name}. Returning NaN.") - return np.NaN - return mean_absolute_percentage_error(y_true, y_pred) def _sampling_error(self, data: pd.DataFrame) -> float: - return mape_sampling_error(self._sampling_error_components, data) + data, empty = common_nan_removal(data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred]) + if empty: + warnings.warn( + f"Too many missing values, cannot calculate {self.display_name} sampling error. " "Returning NaN." + ) + return np.NaN + else: + return mape_sampling_error(self._sampling_error_components, data) @MetricFactory.register(metric='mse', use_case=ProblemType.REGRESSION) class MSE(Metric): """Mean Squared Error metric.""" + y_pred: str + def __init__(self, y_true: str, y_pred: str, threshold: Threshold, y_pred_proba: Optional[str] = None, **kwargs): """Creates a new MSE instance. @@ -191,41 +221,54 @@ def __init__(self, y_true: str, y_pred: str, threshold: Threshold, y_pred_proba: self._sampling_error_components: Tuple = () def __str__(self): + """Get string representation of metric.""" return "MSE" def _fit(self, reference_data: pd.DataFrame): _list_missing([self.y_true, self.y_pred], list(reference_data.columns)) - self._sampling_error_components = mse_sampling_error_components( - y_true_reference=reference_data[self.y_true], - y_pred_reference=reference_data[self.y_pred], + reference_data, empty = common_nan_removal( + reference_data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred] ) + if empty: + self._sampling_error_components = (np.NaN,) + else: + self._sampling_error_components = mse_sampling_error_components( + y_true_reference=reference_data[self.y_true], + y_pred_reference=reference_data[self.y_pred], + ) def _calculate(self, data: pd.DataFrame): """Redefine to handle NaNs and edge cases.""" _list_missing([self.y_true, self.y_pred], list(data.columns)) - assert self.y_pred - data = _remove_nans(data, (self.y_true, self.y_pred)) + data, empty = common_nan_removal(data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred]) + if empty: + warnings.warn( + f"No data or too many missing values, cannot calculate {self.display_name}. " f"Returning NaN." + ) + return np.NaN y_true = data[self.y_true] y_pred = data[self.y_pred] - if y_true.empty: - warnings.warn(f"'{self.y_true}' contains no data, cannot calculate {self.display_name}. Returning NaN.") - return np.NaN - elif y_pred.empty: - warnings.warn(f"'{self.y_pred}' contains no data, cannot calculate {self.display_name}. Returning NaN.") - return np.NaN - return mean_squared_error(y_true, y_pred) def _sampling_error(self, data: pd.DataFrame) -> float: - return mse_sampling_error(self._sampling_error_components, data) + data, empty = common_nan_removal(data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred]) + if empty: + warnings.warn( + f"Too many missing values, cannot calculate {self.display_name} sampling error. " "Returning NaN." + ) + return np.NaN + else: + return mse_sampling_error(self._sampling_error_components, data) @MetricFactory.register(metric='msle', use_case=ProblemType.REGRESSION) class MSLE(Metric): """Mean Squared Logarithmic Error metric.""" + y_pred: str + def __init__(self, y_true: str, y_pred: str, threshold: Threshold, y_pred_proba: Optional[str] = None, **kwargs): """Creates a new MSLE instance. @@ -254,46 +297,58 @@ def __init__(self, y_true: str, y_pred: str, threshold: Threshold, y_pred_proba: self._sampling_error_components: Tuple = () def __str__(self): + """Get string representation of metric.""" return "MSLE" def _fit(self, reference_data: pd.DataFrame): _list_missing([self.y_true, self.y_pred], list(reference_data.columns)) - self._sampling_error_components = msle_sampling_error_components( - y_true_reference=reference_data[self.y_true], - y_pred_reference=reference_data[self.y_pred], + reference_data, empty = common_nan_removal( + reference_data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred] ) + if empty: + self._sampling_error_components = (np.NaN,) + else: + self._sampling_error_components = msle_sampling_error_components( + y_true_reference=reference_data[self.y_true], + y_pred_reference=reference_data[self.y_pred], + ) def _calculate(self, data: pd.DataFrame): """Redefine to handle NaNs and edge cases.""" _list_missing([self.y_true, self.y_pred], list(data.columns)) - assert self.y_pred - data = _remove_nans(data, (self.y_true, self.y_pred)) + data, empty = common_nan_removal(data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred]) + if empty: + warnings.warn( + f"No data or too many missing values, cannot calculate {self.display_name}. " f"Returning NaN." + ) + return np.NaN y_true = data[self.y_true] y_pred = data[self.y_pred] - if y_true.empty: - warnings.warn(f"'{self.y_true}' contains no data, cannot calculate {self.display_name}. Returning NaN.") - return np.NaN - elif y_pred.empty: - warnings.warn(f"'{self.y_pred}' contains no data, cannot calculate {self.display_name}. Returning NaN.") - return np.NaN - # TODO: include option to drop negative values as well? - _raise_exception_for_negative_values(y_true) _raise_exception_for_negative_values(y_pred) return mean_squared_log_error(y_true, y_pred) def _sampling_error(self, data: pd.DataFrame) -> float: - return msle_sampling_error(self._sampling_error_components, data) + data, empty = common_nan_removal(data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred]) + if empty: + warnings.warn( + f"Too many missing values, cannot calculate {self.display_name} sampling error. " "Returning NaN." + ) + return np.NaN + else: + return msle_sampling_error(self._sampling_error_components, data) @MetricFactory.register(metric='rmse', use_case=ProblemType.REGRESSION) class RMSE(Metric): """Root Mean Squared Error metric.""" + y_pred: str + def __init__(self, y_true: str, y_pred: str, threshold: Threshold, y_pred_proba: Optional[str] = None, **kwargs): """Creates a new RMSE instance. @@ -322,41 +377,54 @@ def __init__(self, y_true: str, y_pred: str, threshold: Threshold, y_pred_proba: self._sampling_error_components: Tuple = () def __str__(self): + """Get string representation of metric.""" return "RMSE" def _fit(self, reference_data: pd.DataFrame): _list_missing([self.y_true, self.y_pred], list(reference_data.columns)) - self._sampling_error_components = rmse_sampling_error_components( - y_true_reference=reference_data[self.y_true], - y_pred_reference=reference_data[self.y_pred], + reference_data, empty = common_nan_removal( + reference_data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred] ) + if empty: + self._sampling_error_components = (np.NaN,) + else: + self._sampling_error_components = rmse_sampling_error_components( + y_true_reference=reference_data[self.y_true], + y_pred_reference=reference_data[self.y_pred], + ) def _calculate(self, data: pd.DataFrame): """Redefine to handle NaNs and edge cases.""" _list_missing([self.y_true, self.y_pred], list(data.columns)) - assert self.y_pred - data = _remove_nans(data, (self.y_true, self.y_pred)) + data, empty = common_nan_removal(data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred]) + if empty: + warnings.warn( + f"No data or too many missing values, cannot calculate {self.display_name}. " f"Returning NaN." + ) + return np.NaN y_true = data[self.y_true] y_pred = data[self.y_pred] - if y_true.empty: - warnings.warn(f"'{self.y_true}' contains no data, cannot calculate {self.display_name}. Returning NaN.") - return np.NaN - elif y_pred.empty: - warnings.warn(f"'{self.y_pred}' contains no data, cannot calculate {self.display_name}. Returning NaN.") - return np.NaN - return mean_squared_error(y_true, y_pred, squared=False) def _sampling_error(self, data: pd.DataFrame) -> float: - return rmse_sampling_error(self._sampling_error_components, data) + data, empty = common_nan_removal(data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred]) + if empty: + warnings.warn( + f"Too many missing values, cannot calculate {self.display_name} sampling error. " "Returning NaN." + ) + return np.NaN + else: + return rmse_sampling_error(self._sampling_error_components, data) @MetricFactory.register(metric='rmsle', use_case=ProblemType.REGRESSION) class RMSLE(Metric): """Root Mean Squared Logarithmic Error metric.""" + y_pred: str + def __init__(self, y_true: str, y_pred: str, threshold: Threshold, y_pred_proba: Optional[str] = None, **kwargs): """Creates a new RMSLE instance. @@ -385,37 +453,47 @@ def __init__(self, y_true: str, y_pred: str, threshold: Threshold, y_pred_proba: self._sampling_error_components: Tuple = () def __str__(self): + """Get string representation of metric.""" return "RMSLE" def _fit(self, reference_data: pd.DataFrame): _list_missing([self.y_true, self.y_pred], list(reference_data.columns)) - self._sampling_error_components = rmsle_sampling_error_components( - y_true_reference=reference_data[self.y_true], - y_pred_reference=reference_data[self.y_pred], + reference_data, empty = common_nan_removal( + reference_data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred] ) + if empty: + self._sampling_error_components = (np.NaN,) + else: + self._sampling_error_components = rmsle_sampling_error_components( + y_true_reference=reference_data[self.y_true], + y_pred_reference=reference_data[self.y_pred], + ) def _calculate(self, data: pd.DataFrame): """Redefine to handle NaNs and edge cases.""" _list_missing([self.y_true, self.y_pred], list(data.columns)) - assert self.y_pred - data = _remove_nans(data, (self.y_true, self.y_pred)) + data, empty = common_nan_removal(data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred]) + if empty: + warnings.warn( + f"No data or too many missing values, cannot calculate {self.display_name}. " f"Returning NaN." + ) + return np.NaN y_true = data[self.y_true] y_pred = data[self.y_pred] - if y_true.empty: - warnings.warn(f"'{self.y_true}' contains no data, cannot calculate {self.display_name}. Returning NaN.") - return np.NaN - elif y_pred.empty: - warnings.warn(f"'{self.y_pred}' contains no data, cannot calculate {self.display_name}. Returning NaN.") - return np.NaN - # TODO: include option to drop negative values as well? - _raise_exception_for_negative_values(y_true) _raise_exception_for_negative_values(y_pred) return mean_squared_log_error(y_true, y_pred, squared=False) def _sampling_error(self, data: pd.DataFrame) -> float: - return rmsle_sampling_error(self._sampling_error_components, data) + data, empty = common_nan_removal(data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred]) + if empty: + warnings.warn( + f"Too many missing values, cannot calculate {self.display_name} sampling error. " "Returning NaN." + ) + return np.NaN + else: + return rmsle_sampling_error(self._sampling_error_components, data) diff --git a/nannyml/performance_estimation/confidence_based/cbpe.py b/nannyml/performance_estimation/confidence_based/cbpe.py index aabf2dc5..e8115f4e 100644 --- a/nannyml/performance_estimation/confidence_based/cbpe.py +++ b/nannyml/performance_estimation/confidence_based/cbpe.py @@ -259,6 +259,13 @@ def __init__( if self.problem_type is not ProblemType.CLASSIFICATION_BINARY and y_pred is None: raise InvalidArgumentsException(f"'y_pred' can not be 'None' for problem type {self.problem_type.value}") + if self.problem_type == ProblemType.CLASSIFICATION_BINARY: + if not isinstance(self.y_pred_proba, str): + raise InvalidArgumentsException("y_pred_proba must be a string for binary classification") + elif self.problem_type == ProblemType.CLASSIFICATION_MULTICLASS: + if not isinstance(self.y_pred_proba, dict): + raise InvalidArgumentsException("y_pred_proba must be a dictionary for multiclass classification") + self.thresholds = DEFAULT_THRESHOLDS if thresholds: self.thresholds.update(**thresholds) diff --git a/nannyml/performance_estimation/confidence_based/metrics.py b/nannyml/performance_estimation/confidence_based/metrics.py index eb35c04e..13495d11 100644 --- a/nannyml/performance_estimation/confidence_based/metrics.py +++ b/nannyml/performance_estimation/confidence_based/metrics.py @@ -31,7 +31,8 @@ import nannyml.sampling_error.binary_classification as bse import nannyml.sampling_error.multiclass_classification as mse -from nannyml._typing import ModelOutputsType, ProblemType, class_labels +from nannyml._typing import ModelOutputsType, ProblemType, class_labels, model_output_column_names +from nannyml.base import _list_missing, common_nan_removal from nannyml.chunk import Chunk, Chunker from nannyml.exceptions import CalculatorException, InvalidArgumentsException from nannyml.performance_estimation.confidence_based import SUPPORTED_METRIC_VALUES @@ -225,54 +226,6 @@ def __eq__(self, other): """ return self.components == other.components - # def _common_cleaning( - # self, data: pd.DataFrame, y_pred_proba_column_name: Optional[str] = None - # ) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]: - # if y_pred_proba_column_name is None: - # if not isinstance(self.y_pred_proba, str): - # raise InvalidArgumentsException( - # f"'y_pred_proba' is of type '{type(self.y_pred_proba)}'. " - # f"Binary use cases require 'y_pred_proba' to be a string." - # ) - # y_pred_proba_column_name = self.y_pred_proba - - # data = _remove_nans(data, [self.y_pred, y_pred_proba_column_name]) - - # clean_targets = self.y_true in data.columns and not data[self.y_true].isna().all() - # if clean_targets: - # data = _remove_nans(data, [self.y_true]) - - # return data[y_pred_proba_column_name], data[self.y_pred], (data[self.y_true] if clean_targets else None) - def _common_cleaning(self, data: pd.DataFrame, selected_columns: List[str]) -> Tuple[List[pd.Series], bool]: - """Remove NaN values from rows of selected columns. - - Parameters - ---------- - data: pd.DataFrame - Pandas dataframe containing data. - selected_columns: List[str] - List containing the strings of column names - - Returns - ------- - col_list: - List containing the clean columns specified. Order of columns from selected_columns is - preserved. - """ - # If we want target and it's not available we get None - if not set(selected_columns) <= set(data.columns): - raise InvalidArgumentsException( - f"Selected columns: {selected_columns} not all present in provided data columns {list(data.columns)}" - ) - df = data[selected_columns].dropna(axis=0, how='any', inplace=False).reset_index() - empty: bool = False - if df.shape[0] == 0: - empty = True - results = [] - for el in selected_columns: - results.append(df[el]) - return (results, empty) - def get_chunk_record(self, chunk_data: pd.DataFrame) -> Dict: """Returns a dictionary containing the performance metrics for a given chunk. @@ -385,9 +338,11 @@ def inner_wrapper(wrapped_class: Type[Metric]) -> Type[Metric]: class BinaryClassificationAUROC(Metric): """CBPE binary classification AUROC Metric Class.""" + y_pred_proba: str + def __init__( self, - y_pred_proba: ModelOutputsType, + y_pred_proba: str, y_pred: str, y_true: str, chunker: Chunker, @@ -413,57 +368,77 @@ def __init__( self._sampling_error_components: Tuple = () def _fit(self, reference_data: pd.DataFrame): - self._sampling_error_components = bse.auroc_sampling_error_components( - y_true_reference=reference_data[self.y_true], - y_pred_proba_reference=reference_data[self.y_pred_proba], - ) + data = reference_data[[self.y_true, self.y_pred_proba]] + data, empty = common_nan_removal(data, [self.y_true, self.y_pred_proba]) + if empty: + self._sampling_error_components = np.NaN, 0 + else: + self._sampling_error_components = bse.auroc_sampling_error_components( + y_true_reference=reference_data[self.y_true], + y_pred_proba_reference=reference_data[self.y_pred_proba], + ) def _estimate(self, data: pd.DataFrame): try: - assert isinstance(self.y_pred_proba, str) # because of binary classification - _dat, _empty = self._common_cleaning( - data=data, selected_columns=[self.y_pred_proba, self.uncalibrated_y_pred_proba] - ) + _list_missing([self.y_pred_proba, self.uncalibrated_y_pred_proba], list(data.columns)) except InvalidArgumentsException as ex: - if "not all present in provided data columns" in str(ex): + if "missing required columns" in str(ex): self._logger.debug(str(ex)) return np.NaN else: raise ex - if _empty: + data, empty = common_nan_removal( + data[[self.y_pred_proba, self.uncalibrated_y_pred_proba]], + [self.y_pred_proba, self.uncalibrated_y_pred_proba], + ) + if empty: self._logger.debug(f"Not enough data to compute estimated {self.display_name}.") warnings.warn(f"Not enough data to compute estimated {self.display_name}.") return np.NaN - y_pred_proba, uncalibrated_y_pred_proba = _dat + + y_pred_proba = data[self.y_pred_proba] + uncalibrated_y_pred_proba = data[self.uncalibrated_y_pred_proba] return estimate_roc_auc(y_pred_proba, uncalibrated_y_pred_proba) def _realized_performance(self, data: pd.DataFrame) -> float: try: - _dat, _empty = self._common_cleaning( - data=data, selected_columns=[self.uncalibrated_y_pred_proba, self.y_true] - ) + _list_missing([self.uncalibrated_y_pred_proba, self.y_true], list(data.columns)) except InvalidArgumentsException as ex: - if "not all present in provided data columns" in str(ex): + if "missing required columns" in str(ex): self._logger.debug(str(ex)) return np.NaN else: raise ex - if _empty: - self._logger.debug(f"Not enough data to compute estimated {self.display_name}.") - warnings.warn(f"Not enough data to compute estimated {self.display_name}.") + data, empty = common_nan_removal( + data[[self.uncalibrated_y_pred_proba, self.y_true]], [self.uncalibrated_y_pred_proba, self.y_true] + ) + if empty: + self._logger.debug(f"Not enough data to compute realized {self.display_name}.") + warnings.warn(f"Not enough data to compute realized {self.display_name}.") return np.NaN - y_pred_proba, y_true = _dat + + y_true = data[self.y_true] + uncalibrated_y_pred_proba = data[self.uncalibrated_y_pred_proba] + if y_true.nunique() <= 1: warnings.warn( f"'{self.y_true}' contains a single class for chunk, " f"cannot compute realized {self.display_name}." ) return np.NaN - return roc_auc_score(y_true, y_pred_proba) + return roc_auc_score(y_true, uncalibrated_y_pred_proba) def _sampling_error(self, data: pd.DataFrame) -> float: - return bse.auroc_sampling_error(self._sampling_error_components, data) + data = data[[self.y_pred_proba, self.uncalibrated_y_pred_proba]] + data, empty = common_nan_removal(data, [self.y_pred_proba, self.uncalibrated_y_pred_proba]) + if empty: + warnings.warn( + f"Too many missing values, cannot calculate {self.display_name} sampling error. " "Returning NaN." + ) + return np.NaN + else: + return bse.auroc_sampling_error(self._sampling_error_components, data) def estimate_roc_auc(true_y_pred_proba: pd.Series, model_y_pred_proba: pd.Series) -> float: @@ -505,9 +480,11 @@ def estimate_roc_auc(true_y_pred_proba: pd.Series, model_y_pred_proba: pd.Series class BinaryClassificationAP(Metric): """CBPE binary classification AP Metric Class.""" + y_pred_proba: str + def __init__( self, - y_pred_proba: ModelOutputsType, + y_pred_proba: str, y_pred: str, y_true: str, chunker: Chunker, @@ -534,11 +511,10 @@ def __init__( def _fit(self, reference_data: pd.DataFrame): """Metric _fit implementation on reference data.""" - # if requested columns are missing we want to raise an error. - _dat, _ = self._common_cleaning( - data=reference_data, selected_columns=[self.y_true, self.y_pred_proba] # type: ignore - ) - y_true, y_pred_proba = _dat + data = reference_data[[self.y_true, self.y_pred_proba]] + data, empty = common_nan_removal(data, [self.y_true, self.y_pred_proba]) + y_true = data[self.y_true] + y_pred_proba = data[self.y_pred_proba] # if empty then positive class won't be part of y_true series if 1 not in y_true.unique(): @@ -552,21 +528,44 @@ def _fit(self, reference_data: pd.DataFrame): ) def _estimate(self, data: pd.DataFrame): + try: + _list_missing([self.y_pred_proba, self.uncalibrated_y_pred_proba], list(data.columns)) + except InvalidArgumentsException as ex: + if "missing required columns" in str(ex): + self._logger.debug(str(ex)) + return np.NaN + else: + raise ex + + data, empty = common_nan_removal( + data[[self.y_pred_proba, self.uncalibrated_y_pred_proba]], + [self.y_pred_proba, self.uncalibrated_y_pred_proba], + ) + if empty: + self._logger.debug(f"Not enough data to compute estimated {self.display_name}.") + warnings.warn(f"Not enough data to compute estimated {self.display_name}.") + return np.NaN + calibrated_y_pred_proba = data[self.y_pred_proba].to_numpy() uncalibrated_y_pred_proba = data[self.uncalibrated_y_pred_proba].to_numpy() - return estimate_ap(calibrated_y_pred_proba, uncalibrated_y_pred_proba) def _realized_performance(self, data: pd.DataFrame) -> float: try: - _dat, _ = self._common_cleaning(data=data, selected_columns=[self.uncalibrated_y_pred_proba, self.y_true]) + _list_missing([self.uncalibrated_y_pred_proba, self.y_true], list(data.columns)) except InvalidArgumentsException as ex: - if "not all present in provided data columns" in str(ex): + if "missing required columns" in str(ex): self._logger.debug(str(ex)) return np.NaN else: raise ex - uncalibrated_y_pred_proba, y_true = _dat + + data, _ = common_nan_removal( + data[[self.uncalibrated_y_pred_proba, self.y_true]], [self.uncalibrated_y_pred_proba, self.y_true] + ) + + y_true = data[self.y_true] + uncalibrated_y_pred_proba = data[self.uncalibrated_y_pred_proba] # if empty then positive class won't be part of y_true series if 1 not in y_true.unique(): @@ -579,7 +578,15 @@ def _realized_performance(self, data: pd.DataFrame) -> float: return average_precision_score(y_true, uncalibrated_y_pred_proba) def _sampling_error(self, data: pd.DataFrame) -> float: - return bse.ap_sampling_error(self._sampling_error_components, data) + data = data[[self.y_pred_proba, self.uncalibrated_y_pred_proba]] + data, empty = common_nan_removal(data, [self.y_pred_proba, self.uncalibrated_y_pred_proba]) + if empty: + warnings.warn( + f"Too many missing values, cannot calculate {self.display_name} sampling error. " "Returning NaN." + ) + return np.NaN + else: + return bse.ap_sampling_error(self._sampling_error_components, data) def estimate_ap(calibrated_y_pred_proba: np.ndarray, uncalibrated_y_pred_proba: np.ndarray) -> float: @@ -627,9 +634,11 @@ def estimate_ap(calibrated_y_pred_proba: np.ndarray, uncalibrated_y_pred_proba: class BinaryClassificationF1(Metric): """CBPE binary classification f1 Metric Class.""" + y_pred_proba: str + def __init__( self, - y_pred_proba: ModelOutputsType, + y_pred_proba: str, y_pred: str, y_true: str, chunker: Chunker, @@ -655,47 +664,71 @@ def __init__( self._sampling_error_components: Tuple = () def _fit(self, reference_data: pd.DataFrame): - self._sampling_error_components = bse.f1_sampling_error_components( - y_true_reference=reference_data[self.y_true], - y_pred_reference=reference_data[self.y_pred], - ) + # filter nans + data = reference_data[[self.y_true, self.y_pred]] + data, empty = common_nan_removal(data, [self.y_true, self.y_pred]) + y_true = data[self.y_true] + y_pred = data[self.y_pred] + + if empty: + self._logger.debug(f"Not enough data to compute fit {self.display_name}.") + warnings.warn(f"Not enough data to compute fit {self.display_name}.") + self._sampling_error_components = np.NaN, 0 + else: + self._sampling_error_components = bse.f1_sampling_error_components( + y_true_reference=y_true, + y_pred_reference=y_pred, + ) def _estimate(self, data: pd.DataFrame): try: - assert isinstance(self.y_pred_proba, str) # because of binary classification - _dat, _empty = self._common_cleaning(data=data, selected_columns=[self.y_pred_proba, self.y_pred]) + _list_missing([self.y_pred_proba, self.y_pred], list(data.columns)) except InvalidArgumentsException as ex: - if "not all present in provided data columns" in str(ex): + if "missing required columns" in str(ex): self._logger.debug(str(ex)) return np.NaN else: raise ex - if _empty: + data, empty = common_nan_removal(data[[self.y_pred_proba, self.y_pred]], [self.y_pred_proba, self.y_pred]) + if empty: self._logger.debug(f"Not enough data to compute estimated {self.display_name}.") warnings.warn(f"Not enough data to compute estimated {self.display_name}.") return np.NaN - y_pred_proba, y_pred = _dat + + y_pred = data[self.y_pred] + y_pred_proba = data[self.y_pred_proba] return estimate_f1(y_pred, y_pred_proba) def _sampling_error(self, data: pd.DataFrame) -> float: - return bse.f1_sampling_error(self._sampling_error_components, data) + data = data[[self.y_pred_proba, self.y_pred]] + data, empty = common_nan_removal(data, [self.y_pred_proba, self.y_pred]) + if empty: + warnings.warn( + f"Too many missing values, cannot calculate {self.display_name} sampling error. " "Returning NaN." + ) + return np.NaN + else: + return bse.f1_sampling_error(self._sampling_error_components, data) def _realized_performance(self, data: pd.DataFrame) -> float: try: - _dat, _empty = self._common_cleaning(data=data, selected_columns=[self.y_pred, self.y_true]) + _list_missing([self.y_pred, self.y_true], list(data.columns)) except InvalidArgumentsException as ex: - if "not all present in provided data columns" in str(ex): + if "missing required columns" in str(ex): self._logger.debug(str(ex)) return np.NaN else: raise ex - if _empty: - self._logger.debug(f"Not enough data to compute estimated {self.display_name}.") - warnings.warn(f"Not enough data to compute estimated {self.display_name}.") + data, empty = common_nan_removal(data[[self.y_pred, self.y_true]], [self.y_pred, self.y_true]) + if empty: + self._logger.debug(f"Not enough data to compute realized {self.display_name}.") + warnings.warn(f"Not enough data to compute realized {self.display_name}.") return np.NaN - y_pred, y_true = _dat + + y_true = data[self.y_true] + y_pred = data[self.y_pred] if y_true.nunique() <= 1: warnings.warn( @@ -742,9 +775,11 @@ def estimate_f1(y_pred: pd.DataFrame, y_pred_proba: pd.DataFrame) -> float: class BinaryClassificationPrecision(Metric): """CBPE binary classification precision Metric Class.""" + y_pred_proba: str + def __init__( self, - y_pred_proba: ModelOutputsType, + y_pred_proba: str, y_pred: str, y_true: str, chunker: Chunker, @@ -770,48 +805,72 @@ def __init__( self._sampling_error_components: Tuple = () def _fit(self, reference_data: pd.DataFrame): - self._sampling_error_components = bse.precision_sampling_error_components( - y_true_reference=reference_data[self.y_true], - y_pred_reference=reference_data[self.y_pred], - ) - pass + # filter nans + data = reference_data[[self.y_true, self.y_pred]] + data, empty = common_nan_removal(data, [self.y_true, self.y_pred]) + y_true = data[self.y_true] + y_pred = data[self.y_pred] + + if empty: + self._logger.debug(f"Not enough data to compute fit {self.display_name}.") + warnings.warn(f"Not enough data to compute fit {self.display_name}.") + self._sampling_error_components = np.NaN, 0 + else: + self._sampling_error_components = bse.precision_sampling_error_components( + y_true_reference=y_true, + y_pred_reference=y_pred, + ) def _estimate(self, data: pd.DataFrame): try: - assert isinstance(self.y_pred_proba, str) # because of binary classification - _dat, _empty = self._common_cleaning(data=data, selected_columns=[self.y_pred_proba, self.y_pred]) + _list_missing([self.y_pred_proba, self.y_pred], list(data.columns)) except InvalidArgumentsException as ex: - if "not all present in provided data columns" in str(ex): + if "missing required columns" in str(ex): self._logger.debug(str(ex)) return np.NaN else: raise ex - if _empty: + data, empty = common_nan_removal(data[[self.y_pred_proba, self.y_pred]], [self.y_pred_proba, self.y_pred]) + if empty: self._logger.debug(f"Not enough data to compute estimated {self.display_name}.") warnings.warn(f"Not enough data to compute estimated {self.display_name}.") return np.NaN - y_pred_proba, y_pred = _dat + + y_pred = data[self.y_pred] + y_pred_proba = data[self.y_pred_proba] return estimate_precision(y_pred, y_pred_proba) def _sampling_error(self, data: pd.DataFrame) -> float: - return bse.precision_sampling_error(self._sampling_error_components, data) + data = data[[self.y_pred_proba, self.y_pred]] + data, empty = common_nan_removal(data, [self.y_pred_proba, self.y_pred]) + if empty: + warnings.warn( + f"Too many missing values, cannot calculate {self.display_name} sampling error. " "Returning NaN." + ) + return np.NaN + else: + return bse.precision_sampling_error(self._sampling_error_components, data) def _realized_performance(self, data: pd.DataFrame) -> float: try: - _dat, _empty = self._common_cleaning(data=data, selected_columns=[self.y_pred, self.y_true]) + _list_missing([self.y_pred, self.y_true], list(data.columns)) except InvalidArgumentsException as ex: - if "not all present in provided data columns" in str(ex): + if "missing required columns" in str(ex): self._logger.debug(str(ex)) return np.NaN else: raise ex - if _empty: - self._logger.debug(f"Not enough data to compute estimated {self.display_name}.") - warnings.warn(f"Not enough data to compute estimated {self.display_name}.") + data, empty = common_nan_removal(data[[self.y_pred, self.y_true]], [self.y_pred, self.y_true]) + if empty: + self._logger.debug(f"Not enough data to compute realized {self.display_name}.") + warnings.warn(f"Not enough data to compute realized {self.display_name}.") return np.NaN - y_pred, y_true = _dat + + y_true = data[self.y_true] + y_pred = data[self.y_pred] + if y_true.nunique() <= 1: warnings.warn( f"Too few unique values present in '{self.y_true}', " @@ -856,9 +915,11 @@ def estimate_precision(y_pred: pd.DataFrame, y_pred_proba: pd.DataFrame) -> floa class BinaryClassificationRecall(Metric): """CBPE binary classification recall Metric Class.""" + y_pred_proba: str + def __init__( self, - y_pred_proba: ModelOutputsType, + y_pred_proba: str, y_pred: str, y_true: str, chunker: Chunker, @@ -884,47 +945,71 @@ def __init__( self._sampling_error_components: Tuple = () def _fit(self, reference_data: pd.DataFrame): - self._sampling_error_components = bse.recall_sampling_error_components( - y_true_reference=reference_data[self.y_true], - y_pred_reference=reference_data[self.y_pred], - ) + # filter nans + data = reference_data[[self.y_true, self.y_pred]] + data, empty = common_nan_removal(data, [self.y_true, self.y_pred]) + y_true = data[self.y_true] + y_pred = data[self.y_pred] + + if empty: + self._logger.debug(f"Not enough data to compute fit {self.display_name}.") + warnings.warn(f"Not enough data to compute fit {self.display_name}.") + self._sampling_error_components = np.NaN, 0 + else: + self._sampling_error_components = bse.recall_sampling_error_components( + y_true_reference=y_true, + y_pred_reference=y_pred, + ) def _estimate(self, data: pd.DataFrame): try: - assert isinstance(self.y_pred_proba, str) # because of binary classification - _dat, _empty = self._common_cleaning(data=data, selected_columns=[self.y_pred_proba, self.y_pred]) + _list_missing([self.y_pred_proba, self.y_pred], list(data.columns)) except InvalidArgumentsException as ex: - if "not all present in provided data columns" in str(ex): + if "missing required columns" in str(ex): self._logger.debug(str(ex)) return np.NaN else: raise ex - if _empty: + data, empty = common_nan_removal(data[[self.y_pred_proba, self.y_pred]], [self.y_pred_proba, self.y_pred]) + if empty: self._logger.debug(f"Not enough data to compute estimated {self.display_name}.") warnings.warn(f"Not enough data to compute estimated {self.display_name}.") return np.NaN - y_pred_proba, y_pred = _dat + + y_pred = data[self.y_pred] + y_pred_proba = data[self.y_pred_proba] return estimate_recall(y_pred, y_pred_proba) def _sampling_error(self, data: pd.DataFrame) -> float: - return bse.recall_sampling_error(self._sampling_error_components, data) + data = data[[self.y_pred_proba, self.y_pred]] + data, empty = common_nan_removal(data, [self.y_pred_proba, self.y_pred]) + if empty: + warnings.warn( + f"Too many missing values, cannot calculate {self.display_name} sampling error. " "Returning NaN." + ) + return np.NaN + else: + return bse.recall_sampling_error(self._sampling_error_components, data) def _realized_performance(self, data: pd.DataFrame) -> float: try: - _dat, _empty = self._common_cleaning(data=data, selected_columns=[self.y_pred, self.y_true]) + _list_missing([self.y_pred, self.y_true], list(data.columns)) except InvalidArgumentsException as ex: - if "not all present in provided data columns" in str(ex): + if "missing required columns" in str(ex): self._logger.debug(str(ex)) return np.NaN else: raise ex - if _empty: - self._logger.debug(f"Not enough data to compute estimated {self.display_name}.") - warnings.warn(f"Not enough data to compute estimated {self.display_name}.") + data, empty = common_nan_removal(data[[self.y_pred, self.y_true]], [self.y_pred, self.y_true]) + if empty: + self._logger.debug(f"Not enough data to compute realized {self.display_name}.") + warnings.warn(f"Not enough data to compute realized {self.display_name}.") return np.NaN - y_pred, y_true = _dat + + y_true = data[self.y_true] + y_pred = data[self.y_pred] if y_true.nunique() <= 1: warnings.warn( @@ -970,9 +1055,11 @@ def estimate_recall(y_pred: pd.DataFrame, y_pred_proba: pd.DataFrame) -> float: class BinaryClassificationSpecificity(Metric): """CBPE binary classification specificity Metric Class.""" + y_pred_proba: str + def __init__( self, - y_pred_proba: ModelOutputsType, + y_pred_proba: str, y_pred: str, y_true: str, chunker: Chunker, @@ -1001,47 +1088,71 @@ def __init__( self._labels = [0, 1] def _fit(self, reference_data: pd.DataFrame): - self._sampling_error_components = bse.specificity_sampling_error_components( - y_true_reference=reference_data[self.y_true], - y_pred_reference=reference_data[self.y_pred], - ) + # filter nans + data = reference_data[[self.y_true, self.y_pred]] + data, empty = common_nan_removal(data, [self.y_true, self.y_pred]) + y_true = data[self.y_true] + y_pred = data[self.y_pred] + + if empty: + self._logger.debug(f"Not enough data to compute fit {self.display_name}.") + warnings.warn(f"Not enough data to compute fit {self.display_name}.") + self._sampling_error_components = np.NaN, 0 + else: + self._sampling_error_components = bse.specificity_sampling_error_components( + y_true_reference=y_true, + y_pred_reference=y_pred, + ) def _estimate(self, data: pd.DataFrame): try: - assert isinstance(self.y_pred_proba, str) # because of binary classification - _dat, _empty = self._common_cleaning(data=data, selected_columns=[self.y_pred_proba, self.y_pred]) + _list_missing([self.y_pred_proba, self.y_pred], list(data.columns)) except InvalidArgumentsException as ex: - if "not all present in provided data columns" in str(ex): + if "missing required columns" in str(ex): self._logger.debug(str(ex)) return np.NaN else: raise ex - if _empty: + data, empty = common_nan_removal(data[[self.y_pred_proba, self.y_pred]], [self.y_pred_proba, self.y_pred]) + if empty: self._logger.debug(f"Not enough data to compute estimated {self.display_name}.") warnings.warn(f"Not enough data to compute estimated {self.display_name}.") return np.NaN - y_pred_proba, y_pred = _dat + + y_pred = data[self.y_pred] + y_pred_proba = data[self.y_pred_proba] return estimate_specificity(y_pred, y_pred_proba) def _sampling_error(self, data: pd.DataFrame) -> float: - return bse.specificity_sampling_error(self._sampling_error_components, data) + data = data[[self.y_pred_proba, self.y_pred]] + data, empty = common_nan_removal(data, [self.y_pred_proba, self.y_pred]) + if empty: + warnings.warn( + f"Too many missing values, cannot calculate {self.display_name} sampling error. " "Returning NaN." + ) + return np.NaN + else: + return bse.specificity_sampling_error(self._sampling_error_components, data) def _realized_performance(self, data: pd.DataFrame) -> float: try: - _dat, _empty = self._common_cleaning(data=data, selected_columns=[self.y_pred, self.y_true]) + _list_missing([self.y_pred, self.y_true], list(data.columns)) except InvalidArgumentsException as ex: - if "not all present in provided data columns" in str(ex): + if "missing required columns" in str(ex): self._logger.debug(str(ex)) return np.NaN else: raise ex - if _empty: - self._logger.debug(f"Not enough data to compute estimated {self.display_name}.") - warnings.warn(f"Not enough data to compute estimated {self.display_name}.") + data, empty = common_nan_removal(data[[self.y_pred, self.y_true]], [self.y_pred, self.y_true]) + if empty: + self._logger.debug(f"Not enough data to compute realized {self.display_name}.") + warnings.warn(f"Not enough data to compute realized {self.display_name}.") return np.NaN - y_pred, y_true = _dat + + y_true = data[self.y_true] + y_pred = data[self.y_pred] tn, fp, fn, tp = confusion_matrix(y_true, y_pred, labels=self._labels).ravel() denominator = tn + fp if denominator == 0: @@ -1076,9 +1187,11 @@ def estimate_specificity(y_pred: pd.DataFrame, y_pred_proba: pd.DataFrame) -> fl class BinaryClassificationAccuracy(Metric): """CBPE binary classification accuracy Metric Class.""" + y_pred_proba: str + def __init__( self, - y_pred_proba: ModelOutputsType, + y_pred_proba: str, y_pred: str, y_true: str, chunker: Chunker, @@ -1104,47 +1217,71 @@ def __init__( self._sampling_error_components: Tuple = () def _fit(self, reference_data: pd.DataFrame): - self._sampling_error_components = bse.accuracy_sampling_error_components( - y_true_reference=reference_data[self.y_true], - y_pred_reference=reference_data[self.y_pred], - ) + # filter nans + data = reference_data[[self.y_true, self.y_pred]] + data, empty = common_nan_removal(data, [self.y_true, self.y_pred]) + y_true = data[self.y_true] + y_pred = data[self.y_pred] + + if empty: + self._logger.debug(f"Not enough data to compute fit {self.display_name}.") + warnings.warn(f"Not enough data to compute fit {self.display_name}.") + self._sampling_error_components = np.NaN, 0 + else: + self._sampling_error_components = bse.accuracy_sampling_error_components( + y_true_reference=y_true, + y_pred_reference=y_pred, + ) def _estimate(self, data: pd.DataFrame): try: - assert isinstance(self.y_pred_proba, str) # because of binary classification - _dat, _empty = self._common_cleaning(data=data, selected_columns=[self.y_pred_proba, self.y_pred]) + _list_missing([self.y_pred_proba, self.y_pred], list(data.columns)) except InvalidArgumentsException as ex: - if "not all present in provided data columns" in str(ex): + if "missing required columns" in str(ex): self._logger.debug(str(ex)) return np.NaN else: raise ex - if _empty: + data, empty = common_nan_removal(data[[self.y_pred_proba, self.y_pred]], [self.y_pred_proba, self.y_pred]) + if empty: self._logger.debug(f"Not enough data to compute estimated {self.display_name}.") warnings.warn(f"Not enough data to compute estimated {self.display_name}.") return np.NaN - y_pred_proba, y_pred = _dat + + y_pred = data[self.y_pred] + y_pred_proba = data[self.y_pred_proba] return estimate_accuracy(y_pred, y_pred_proba) def _sampling_error(self, data: pd.DataFrame) -> float: - return bse.accuracy_sampling_error(self._sampling_error_components, data) + data = data[[self.y_pred_proba, self.y_pred]] + data, empty = common_nan_removal(data, [self.y_pred_proba, self.y_pred]) + if empty: + warnings.warn( + f"Too many missing values, cannot calculate {self.display_name} sampling error. " "Returning NaN." + ) + return np.NaN + else: + return bse.accuracy_sampling_error(self._sampling_error_components, data) def _realized_performance(self, data: pd.DataFrame) -> float: try: - _dat, _empty = self._common_cleaning(data=data, selected_columns=[self.y_pred, self.y_true]) + _list_missing([self.y_pred, self.y_true], list(data.columns)) except InvalidArgumentsException as ex: - if "not all present in provided data columns" in str(ex): + if "missing required columns" in str(ex): self._logger.debug(str(ex)) return np.NaN else: raise ex - if _empty: - self._logger.debug(f"Not enough data to compute estimated {self.display_name}.") - warnings.warn(f"Not enough data to compute estimated {self.display_name}.") + data, empty = common_nan_removal(data[[self.y_pred, self.y_true]], [self.y_pred, self.y_true]) + if empty: + self._logger.debug(f"Not enough data to compute realized {self.display_name}.") + warnings.warn(f"Not enough data to compute realized {self.display_name}.") return np.NaN - y_pred, y_true = _dat + + y_true = data[self.y_true] + y_pred = data[self.y_pred] return accuracy_score(y_true=y_true, y_pred=y_pred) @@ -1175,9 +1312,11 @@ def estimate_accuracy(y_pred: pd.Series, y_pred_proba: pd.Series) -> float: class BinaryClassificationConfusionMatrix(Metric): """CBPE binary classification confusion matrix Metric Class.""" + y_pred_proba: str + def __init__( self, - y_pred_proba: ModelOutputsType, + y_pred_proba: str, y_pred: str, y_true: str, chunker: Chunker, @@ -1253,26 +1392,37 @@ def fit(self, reference_data: pd.DataFrame): # override the superclass fit meth return def _fit(self, reference_data: pd.DataFrame): - self._true_positive_sampling_error_components = bse.true_positive_sampling_error_components( - y_true_reference=reference_data[self.y_true], - y_pred_reference=reference_data[self.y_pred], - normalize_confusion_matrix=self.normalize_confusion_matrix, - ) - self._true_negative_sampling_error_components = bse.true_negative_sampling_error_components( - y_true_reference=reference_data[self.y_true], - y_pred_reference=reference_data[self.y_pred], - normalize_confusion_matrix=self.normalize_confusion_matrix, - ) - self._false_positive_sampling_error_components = bse.false_positive_sampling_error_components( - y_true_reference=reference_data[self.y_true], - y_pred_reference=reference_data[self.y_pred], - normalize_confusion_matrix=self.normalize_confusion_matrix, - ) - self._false_negative_sampling_error_components = bse.false_negative_sampling_error_components( - y_true_reference=reference_data[self.y_true], - y_pred_reference=reference_data[self.y_pred], - normalize_confusion_matrix=self.normalize_confusion_matrix, - ) + _list_missing([self.y_true, self.y_pred], list(reference_data.columns)) + # filter nans here + reference_data, empty = common_nan_removal( + reference_data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred] + ) + if empty: + self._true_positive_sampling_error_components = np.NaN, 0.0, self.normalize_confusion_matrix + self._true_negative_sampling_error_components = np.NaN, 0.0, self.normalize_confusion_matrix + self._false_positive_sampling_error_components = np.NaN, 0.0, self.normalize_confusion_matrix + self._false_negative_sampling_error_components = np.NaN, 0.0, self.normalize_confusion_matrix + else: + self._true_positive_sampling_error_components = bse.true_positive_sampling_error_components( + y_true_reference=reference_data[self.y_true], + y_pred_reference=reference_data[self.y_pred], + normalize_confusion_matrix=self.normalize_confusion_matrix, + ) + self._true_negative_sampling_error_components = bse.true_negative_sampling_error_components( + y_true_reference=reference_data[self.y_true], + y_pred_reference=reference_data[self.y_pred], + normalize_confusion_matrix=self.normalize_confusion_matrix, + ) + self._false_positive_sampling_error_components = bse.false_positive_sampling_error_components( + y_true_reference=reference_data[self.y_true], + y_pred_reference=reference_data[self.y_pred], + normalize_confusion_matrix=self.normalize_confusion_matrix, + ) + self._false_negative_sampling_error_components = bse.false_negative_sampling_error_components( + y_true_reference=reference_data[self.y_true], + y_pred_reference=reference_data[self.y_pred], + normalize_confusion_matrix=self.normalize_confusion_matrix, + ) def _true_positive_alert_thresholds(self, reference_chunks: List[Chunk]) -> Tuple[Optional[float], Optional[float]]: realized_chunk_performance = np.asarray( @@ -1340,19 +1490,20 @@ def _false_negative_alert_thresholds( def _true_positive_realized_performance(self, data: pd.DataFrame) -> float: try: - _dat, _empty = self._common_cleaning(data=data, selected_columns=[self.y_pred, self.y_true]) + _list_missing([self.y_pred, self.y_true], list(data.columns)) except InvalidArgumentsException as ex: - if "not all present in provided data columns" in str(ex): + if "missing required columns" in str(ex): self._logger.debug(str(ex)) return np.NaN else: raise ex - - if _empty: - self._logger.debug(f"Not enough data to compute estimated {self.display_name}.") - warnings.warn(f"Not enough data to compute estimated {self.display_name}.") + data, empty = common_nan_removal(data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred]) + if empty: + warnings.warn("Too many missing values, cannot calculate true_positives. " "Returning NaN.") return np.NaN - y_pred, y_true = _dat + + y_true = data[self.y_true] + y_pred = data[self.y_pred] _, _, _, tp = confusion_matrix( y_true, y_pred, labels=self._labels, normalize=self.normalize_confusion_matrix @@ -1361,20 +1512,20 @@ def _true_positive_realized_performance(self, data: pd.DataFrame) -> float: def _true_negative_realized_performance(self, data: pd.DataFrame) -> float: try: - _dat, _empty = self._common_cleaning(data=data, selected_columns=[self.y_pred, self.y_true]) + _list_missing([self.y_pred, self.y_true], list(data.columns)) except InvalidArgumentsException as ex: - if "not all present in provided data columns" in str(ex): + if "missing required columns" in str(ex): self._logger.debug(str(ex)) return np.NaN else: raise ex - - if _empty: - self._logger.debug(f"Not enough data to compute estimated {self.display_name}.") - warnings.warn(f"Not enough data to compute estimated {self.display_name}.") + data, empty = common_nan_removal(data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred]) + if empty: + warnings.warn("Too many missing values, cannot calculate true_negatives. " "Returning NaN.") return np.NaN - y_pred, y_true = _dat + y_true = data[self.y_true] + y_pred = data[self.y_pred] tn, _, _, _ = confusion_matrix( y_true, y_pred, labels=self._labels, normalize=self.normalize_confusion_matrix @@ -1383,19 +1534,20 @@ def _true_negative_realized_performance(self, data: pd.DataFrame) -> float: def _false_positive_realized_performance(self, data: pd.DataFrame) -> float: try: - _dat, _empty = self._common_cleaning(data=data, selected_columns=[self.y_pred, self.y_true]) + _list_missing([self.y_pred, self.y_true], list(data.columns)) except InvalidArgumentsException as ex: - if "not all present in provided data columns" in str(ex): + if "missing required columns" in str(ex): self._logger.debug(str(ex)) return np.NaN else: raise ex - - if _empty: - self._logger.debug(f"Not enough data to compute estimated {self.display_name}.") - warnings.warn(f"Not enough data to compute estimated {self.display_name}.") + data, empty = common_nan_removal(data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred]) + if empty: + warnings.warn("Too many missing values, cannot calculate false_positives. " "Returning NaN.") return np.NaN - y_pred, y_true = _dat + + y_true = data[self.y_true] + y_pred = data[self.y_pred] _, fp, _, _ = confusion_matrix( y_true, y_pred, labels=self._labels, normalize=self.normalize_confusion_matrix @@ -1404,19 +1556,20 @@ def _false_positive_realized_performance(self, data: pd.DataFrame) -> float: def _false_negative_realized_performance(self, data: pd.DataFrame) -> float: try: - _dat, _empty = self._common_cleaning(data=data, selected_columns=[self.y_pred, self.y_true]) + _list_missing([self.y_pred, self.y_true], list(data.columns)) except InvalidArgumentsException as ex: - if "not all present in provided data columns" in str(ex): + if "missing required columns" in str(ex): self._logger.debug(str(ex)) return np.NaN else: raise ex - - if _empty: - self._logger.debug(f"Not enough data to compute estimated {self.display_name}.") - warnings.warn(f"Not enough data to compute estimated {self.display_name}.") + data, empty = common_nan_removal(data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred]) + if empty: + warnings.warn("Too many missing values, cannot calculate false_negatives. " "Returning NaN.") return np.NaN - y_pred, y_true = _dat + + y_true = data[self.y_true] + y_pred = data[self.y_pred] _, _, fn, _ = confusion_matrix( y_true, y_pred, labels=self._labels, normalize=self.normalize_confusion_matrix @@ -1437,20 +1590,22 @@ def get_true_positive_estimate(self, chunk_data: pd.DataFrame) -> float: Estimated true positive rate. """ try: - assert isinstance(self.y_pred_proba, str) # because of binary classification - _dat, _empty = self._common_cleaning(data=chunk_data, selected_columns=[self.y_pred_proba, self.y_pred]) + _list_missing([self.y_pred_proba, self.y_pred], list(chunk_data.columns)) except InvalidArgumentsException as ex: - if "not all present in provided data columns" in str(ex): + if "missing required columns" in str(ex): self._logger.debug(str(ex)) return np.NaN else: raise ex - if _empty: + data, empty = common_nan_removal(chunk_data[[self.y_pred_proba, self.y_pred]], [self.y_pred_proba, self.y_pred]) + if empty: self._logger.debug(f"Not enough data to compute estimated {self.display_name}.") warnings.warn(f"Not enough data to compute estimated {self.display_name}.") return np.NaN - y_pred_proba, y_pred = _dat + + y_pred = data[self.y_pred] + y_pred_proba = data[self.y_pred_proba] est_tp_ratio = np.mean(np.where(y_pred == 1, y_pred_proba, 0)) est_fp_ratio = np.mean(np.where(y_pred == 1, 1 - y_pred_proba, 0)) @@ -1492,20 +1647,22 @@ def get_true_negative_estimate(self, chunk_data: pd.DataFrame) -> float: Estimated true negative rate. """ try: - assert isinstance(self.y_pred_proba, str) # because of binary classification - _dat, _empty = self._common_cleaning(data=chunk_data, selected_columns=[self.y_pred_proba, self.y_pred]) + _list_missing([self.y_pred_proba, self.y_pred], list(chunk_data.columns)) except InvalidArgumentsException as ex: - if "not all present in provided data columns" in str(ex): + if "missing required columns" in str(ex): self._logger.debug(str(ex)) return np.NaN else: raise ex - if _empty: + data, empty = common_nan_removal(chunk_data[[self.y_pred_proba, self.y_pred]], [self.y_pred_proba, self.y_pred]) + if empty: self._logger.debug(f"Not enough data to compute estimated {self.display_name}.") warnings.warn(f"Not enough data to compute estimated {self.display_name}.") return np.NaN - y_pred_proba, y_pred = _dat + + y_pred = data[self.y_pred] + y_pred_proba = data[self.y_pred_proba] est_tn_ratio = np.mean(np.where(y_pred == 0, 1 - y_pred_proba, 0)) est_fp_ratio = np.mean(np.where(y_pred == 1, 1 - y_pred_proba, 0)) @@ -1547,20 +1704,22 @@ def get_false_positive_estimate(self, chunk_data: pd.DataFrame) -> float: Estimated false positive rate. """ try: - assert isinstance(self.y_pred_proba, str) # because of binary classification - _dat, _empty = self._common_cleaning(data=chunk_data, selected_columns=[self.y_pred_proba, self.y_pred]) + _list_missing([self.y_pred_proba, self.y_pred], list(chunk_data.columns)) except InvalidArgumentsException as ex: - if "not all present in provided data columns" in str(ex): + if "missing required columns" in str(ex): self._logger.debug(str(ex)) return np.NaN else: raise ex - if _empty: + data, empty = common_nan_removal(chunk_data[[self.y_pred_proba, self.y_pred]], [self.y_pred_proba, self.y_pred]) + if empty: self._logger.debug(f"Not enough data to compute estimated {self.display_name}.") warnings.warn(f"Not enough data to compute estimated {self.display_name}.") return np.NaN - y_pred_proba, y_pred = _dat + + y_pred = data[self.y_pred] + y_pred_proba = data[self.y_pred_proba] est_tp_ratio = np.mean(np.where(y_pred == 1, y_pred_proba, 0)) est_fp_ratio = np.mean(np.where(y_pred == 1, 1 - y_pred_proba, 0)) @@ -1602,20 +1761,22 @@ def get_false_negative_estimate(self, chunk_data: pd.DataFrame) -> float: Estimated false negative rate. """ try: - assert isinstance(self.y_pred_proba, str) # because of binary classification - _dat, _empty = self._common_cleaning(data=chunk_data, selected_columns=[self.y_pred_proba, self.y_pred]) + _list_missing([self.y_pred_proba, self.y_pred], list(chunk_data.columns)) except InvalidArgumentsException as ex: - if "not all present in provided data columns" in str(ex): + if "missing required columns" in str(ex): self._logger.debug(str(ex)) return np.NaN else: raise ex - if _empty: + data, empty = common_nan_removal(chunk_data[[self.y_pred_proba, self.y_pred]], [self.y_pred_proba, self.y_pred]) + if empty: self._logger.debug(f"Not enough data to compute estimated {self.display_name}.") warnings.warn(f"Not enough data to compute estimated {self.display_name}.") return np.NaN - y_pred_proba, y_pred = _dat + + y_pred = data[self.y_pred] + y_pred_proba = data[self.y_pred_proba] est_tp_ratio = np.mean(np.where(y_pred == 1, y_pred_proba, 0)) est_fn_ratio = np.mean(np.where(y_pred == 0, y_pred_proba, 0)) @@ -1658,15 +1819,27 @@ def get_true_pos_info(self, chunk_data: pd.DataFrame) -> Dict: """ true_pos_info: Dict[str, Any] = {} + # we check for nans inside estimated_true_positives = self.get_true_positive_estimate(chunk_data) - - sampling_error_true_positives = bse.true_positive_sampling_error( - self._true_positive_sampling_error_components, chunk_data - ) + realized_true_positives = self._true_positive_realized_performance(chunk_data) + # we do sampling error nan checks here because we don't have dedicated sampling error function + # TODO: Refactor similarly to multiclass so code can be re-used. + # filter nans here - for realized performance both columns are expected + chunk_data, empty = common_nan_removal( + chunk_data[[self.y_pred_proba, self.y_pred]], [self.y_pred_proba, self.y_pred] + ) + if empty: + warnings.warn("Too many missing values, cannot calculate true positive sampling error. " "Returning NaN.") + sampling_error_true_positives = np.NaN + else: + sampling_error_true_positives = bse.true_positive_sampling_error( + self._true_positive_sampling_error_components, chunk_data + ) + # TODO: NaN removal is duplicated to an extent. Upon refactor consider if we can do it only once true_pos_info['estimated_true_positive'] = estimated_true_positives true_pos_info['sampling_error_true_positive'] = sampling_error_true_positives - true_pos_info['realized_true_positive'] = self._true_positive_realized_performance(chunk_data) + true_pos_info['realized_true_positive'] = realized_true_positives true_pos_info['upper_confidence_boundary_true_positive'] = np.minimum( np.inf if self.upper_threshold_value_limit is None else self.upper_threshold_value_limit, @@ -1706,15 +1879,27 @@ def get_true_neg_info(self, chunk_data: pd.DataFrame) -> Dict: """ true_neg_info: Dict[str, Any] = {} + # we check for nans inside estimated_true_negatives = self.get_true_negative_estimate(chunk_data) - - sampling_error_true_negatives = bse.true_negative_sampling_error( - self._true_negative_sampling_error_components, chunk_data - ) + realized_true_negatives = self._true_negative_realized_performance(chunk_data) + # we do sampling error nan checks here because we don't have dedicated sampling error function + # TODO: Refactor similarly to multiclass so code can be re-used. + # filter nans here - for realized performance both columns are expected + chunk_data, empty = common_nan_removal( + chunk_data[[self.y_pred_proba, self.y_pred]], [self.y_pred_proba, self.y_pred] + ) + if empty: + warnings.warn("Too many missing values, cannot calculate true positive sampling error. " "Returning NaN.") + sampling_error_true_negatives = np.NaN + else: + sampling_error_true_negatives = bse.true_negative_sampling_error( + self._true_negative_sampling_error_components, chunk_data + ) + # TODO: NaN removal is duplicated to an extent. Upon refactor consider if we can do it only once true_neg_info['estimated_true_negative'] = estimated_true_negatives true_neg_info['sampling_error_true_negative'] = sampling_error_true_negatives - true_neg_info['realized_true_negative'] = self._true_negative_realized_performance(chunk_data) + true_neg_info['realized_true_negative'] = realized_true_negatives true_neg_info['upper_confidence_boundary_true_negative'] = np.minimum( np.inf if self.upper_threshold_value_limit is None else self.upper_threshold_value_limit, @@ -1754,15 +1939,27 @@ def get_false_pos_info(self, chunk_data: pd.DataFrame) -> Dict: """ false_pos_info: Dict[str, Any] = {} + # we check for nans inside estimated_false_positives = self.get_false_positive_estimate(chunk_data) - - sampling_error_false_positives = bse.false_positive_sampling_error( - self._false_positive_sampling_error_components, chunk_data - ) + realized_false_positives = self._false_positive_realized_performance(chunk_data) + # we do sampling error nan checks here because we don't have dedicated sampling error function + # TODO: Refactor similarly to multiclass so code can be re-used. + # filter nans here - for realized performance both columns are expected + chunk_data, empty = common_nan_removal( + chunk_data[[self.y_pred_proba, self.y_pred]], [self.y_pred_proba, self.y_pred] + ) + if empty: + warnings.warn("Too many missing values, cannot calculate true positive sampling error. " "Returning NaN.") + sampling_error_false_positives = np.NaN + else: + sampling_error_false_positives = bse.false_positive_sampling_error( + self._false_positive_sampling_error_components, chunk_data + ) + # TODO: NaN removal is duplicated to an extent. Upon refactor consider if we can do it only once false_pos_info['estimated_false_positive'] = estimated_false_positives false_pos_info['sampling_error_false_positive'] = sampling_error_false_positives - false_pos_info['realized_false_positive'] = self._false_positive_realized_performance(chunk_data) + false_pos_info['realized_false_positive'] = realized_false_positives false_pos_info['upper_confidence_boundary_false_positive'] = np.minimum( np.inf if self.upper_threshold_value_limit is None else self.upper_threshold_value_limit, @@ -1802,15 +1999,27 @@ def get_false_neg_info(self, chunk_data: pd.DataFrame) -> Dict: """ false_neg_info: Dict[str, Any] = {} + # we check for nans inside estimated_false_negatives = self.get_false_negative_estimate(chunk_data) - - sampling_error_false_negatives = bse.false_negative_sampling_error( - self._false_negative_sampling_error_components, chunk_data - ) + realized_false_negatives = self._false_negative_realized_performance(chunk_data) + # we do sampling error nan checks here because we don't have dedicated sampling error function + # TODO: Refactor similarly to multiclass so code can be re-used. + # filter nans here - for realized performance both columns are expected + chunk_data, empty = common_nan_removal( + chunk_data[[self.y_pred_proba, self.y_pred]], [self.y_pred_proba, self.y_pred] + ) + if empty: + warnings.warn("Too many missing values, cannot calculate true positive sampling error. " "Returning NaN.") + sampling_error_false_negatives = np.NaN + else: + sampling_error_false_negatives = bse.false_negative_sampling_error( + self._false_negative_sampling_error_components, chunk_data + ) + # TODO: NaN removal is duplicated to an extent. Upon refactor consider if we can do it only once false_neg_info['estimated_false_negative'] = estimated_false_negatives false_neg_info['sampling_error_false_negative'] = sampling_error_false_negatives - false_neg_info['realized_false_negative'] = self._false_negative_realized_performance(chunk_data) + false_neg_info['realized_false_negative'] = realized_false_negatives false_neg_info['upper_confidence_boundary_false_negative'] = np.minimum( np.inf if self.upper_threshold_value_limit is None else self.upper_threshold_value_limit, @@ -1878,9 +2087,11 @@ def _realized_performance(self, data: pd.DataFrame) -> float: class BinaryClassificationBusinessValue(Metric): """CBPE binary classification business value Metric Class.""" + y_pred_proba: str + def __init__( self, - y_pred_proba: ModelOutputsType, + y_pred_proba: str, y_pred: str, y_true: str, chunker: Chunker, @@ -1928,28 +2139,42 @@ def __init__( # self.upper_threshold: Optional[float] = 1 def _fit(self, reference_data: pd.DataFrame): - self._sampling_error_components = bse.business_value_sampling_error_components( - y_true_reference=reference_data[self.y_true], - y_pred_reference=reference_data[self.y_pred], - business_value_matrix=self.business_value_matrix, - normalize_business_value=self.normalize_business_value, - ) + # filter nans + data = reference_data[[self.y_true, self.y_pred]] + data, empty = common_nan_removal(data, [self.y_true, self.y_pred]) + y_true = data[self.y_true] + y_pred = data[self.y_pred] + + if empty: + self._logger.debug(f"Not enough data to compute fit {self.display_name}.") + warnings.warn(f"Not enough data to compute fit {self.display_name}.") + self._sampling_error_components = np.NaN, self.normalize_business_value + else: + self._sampling_error_components = bse.business_value_sampling_error_components( + y_true_reference=y_true, + y_pred_reference=y_pred, + business_value_matrix=self.business_value_matrix, + normalize_business_value=self.normalize_business_value, + ) def _realized_performance(self, data: pd.DataFrame) -> float: try: - _dat, _empty = self._common_cleaning(data=data, selected_columns=[self.y_pred, self.y_true]) + _list_missing([self.y_pred, self.y_true], list(data.columns)) except InvalidArgumentsException as ex: - if "not all present in provided data columns" in str(ex): + if "missing required columns" in str(ex): self._logger.debug(str(ex)) return np.NaN else: raise ex - if _empty: - self._logger.debug(f"Not enough data to compute estimated {self.display_name}.") - warnings.warn(f"Not enough data to compute estimated {self.display_name}.") + data, empty = common_nan_removal(data[[self.y_pred, self.y_true]], [self.y_pred, self.y_true]) + if empty: + self._logger.debug(f"Not enough data to compute realized {self.display_name}.") + warnings.warn(f"Not enough data to compute realized {self.display_name}.") return np.NaN - y_pred, y_true = _dat + + y_true = data[self.y_true] + y_pred = data[self.y_pred] tp_value = self.business_value_matrix[1, 1] tn_value = self.business_value_matrix[0, 0] @@ -1966,20 +2191,22 @@ def _realized_performance(self, data: pd.DataFrame) -> float: def _estimate(self, chunk_data: pd.DataFrame) -> float: try: - assert isinstance(self.y_pred_proba, str) # because of binary classification - _dat, _empty = self._common_cleaning(data=chunk_data, selected_columns=[self.y_pred_proba, self.y_pred]) + _list_missing([self.y_pred_proba, self.y_pred], list(chunk_data.columns)) except InvalidArgumentsException as ex: - if "not all present in provided data columns" in str(ex): + if "missing required columns" in str(ex): self._logger.debug(str(ex)) return np.NaN else: raise ex - if _empty: + data, empty = common_nan_removal(chunk_data[[self.y_pred_proba, self.y_pred]], [self.y_pred_proba, self.y_pred]) + if empty: self._logger.debug(f"Not enough data to compute estimated {self.display_name}.") warnings.warn(f"Not enough data to compute estimated {self.display_name}.") return np.NaN - y_pred_proba, y_pred = _dat + + y_pred = data[self.y_pred] + y_pred_proba = data[self.y_pred_proba] business_value_normalization = self.normalize_business_value business_value_matrix = self.business_value_matrix @@ -1987,10 +2214,15 @@ def _estimate(self, chunk_data: pd.DataFrame) -> float: return estimate_business_value(y_pred, y_pred_proba, business_value_normalization, business_value_matrix) def _sampling_error(self, data: pd.DataFrame) -> float: - return bse.business_value_sampling_error( - self._sampling_error_components, - data, - ) + data = data[[self.y_pred_proba, self.y_pred]] + data, empty = common_nan_removal(data, [self.y_pred_proba, self.y_pred]) + if empty: + warnings.warn( + f"Too many missing values, cannot calculate {self.display_name} sampling error. " "Returning NaN." + ) + return np.NaN + else: + return bse.business_value_sampling_error(self._sampling_error_components, data) def estimate_business_value( @@ -2070,26 +2302,8 @@ def _get_multiclass_uncalibrated_predictions(data: pd.DataFrame, y_pred: str, y_ return data[y_pred], data[class_probability_columns], labels -class _MulticlassClassificationMetric(Metric): - """Base class for multiclass classification metrics.""" - - def _ensure_targets(self, data: pd.DataFrame) -> Optional[pd.DataFrame]: - """Ensures that the data contains the target column and that it doesn't contain all NaNs. - - Any rows in the input where the target is NaN are dropped. - """ - if self.y_true not in data.columns: - return None - - na = data[self.y_true].isna() - if na.all(): - return None - else: - return data[~na] - - @MetricFactory.register('roc_auc', ProblemType.CLASSIFICATION_MULTICLASS) -class MulticlassClassificationAUROC(_MulticlassClassificationMetric): +class MulticlassClassificationAUROC(Metric): """CBPE multiclass classification AUROC Metric Class.""" def __init__( @@ -2121,14 +2335,43 @@ def __init__( def _fit(self, reference_data: pd.DataFrame): classes = class_labels(self.y_pred_proba) - binarized_y_true = list(label_binarize(reference_data[self.y_true], classes=classes).T) - y_pred_proba = [reference_data[self.y_pred_proba[clazz]].T for clazz in classes] - - self._sampling_error_components = mse.auroc_sampling_error_components( - y_true_reference=binarized_y_true, y_pred_proba_reference=y_pred_proba - ) + class_y_pred_proba_columns = model_output_column_names(self.y_pred_proba) + class_uncalibrated_y_pred_proba_columns = ['uncalibrated_' + el for el in class_y_pred_proba_columns] + _list_missing([self.y_true] + class_uncalibrated_y_pred_proba_columns, list(reference_data.columns)) + # filter nans here + reference_data, empty = common_nan_removal( + reference_data[[self.y_true] + class_uncalibrated_y_pred_proba_columns], + [self.y_true] + class_uncalibrated_y_pred_proba_columns, + ) + if empty: + self._sampling_error_components = [(np.NaN, 0) for class_col in class_y_pred_proba_columns] + else: + # sampling error + binarized_y_true = list(label_binarize(reference_data[self.y_true], classes=classes).T) + y_pred_proba = [reference_data['uncalibrated_' + self.y_pred_proba[clazz]].T for clazz in classes] + self._sampling_error_components = mse.auroc_sampling_error_components( + y_true_reference=binarized_y_true, y_pred_proba_reference=y_pred_proba + ) def _estimate(self, data: pd.DataFrame): + class_y_pred_proba_columns = model_output_column_names(self.y_pred_proba) + class_uncalibrated_y_pred_proba_columns = ['uncalibrated_' + el for el in class_y_pred_proba_columns] + needed_columns = class_y_pred_proba_columns + class_uncalibrated_y_pred_proba_columns + try: + _list_missing(needed_columns, list(data.columns)) + except InvalidArgumentsException as ex: + if "missing required columns" in str(ex): + self._logger.debug(str(ex)) + return np.NaN + else: + raise ex + + data, empty = common_nan_removal(data, needed_columns) + if empty: + self._logger.debug(f"Not enough data to compute estimated {self.display_name}.") + warnings.warn(f"Not enough data to compute estimated {self.display_name}.") + return np.NaN + _, y_pred_probas, _ = _get_binarized_multiclass_predictions(data, self.y_pred, self.y_pred_proba) _, y_pred_probas_uncalibrated, _ = _get_multiclass_uncalibrated_predictions( data, self.y_pred, self.y_pred_proba @@ -2147,26 +2390,48 @@ def _estimate(self, data: pd.DataFrame): return multiclass_roc_auc def _sampling_error(self, data: pd.DataFrame) -> float: - return mse.auroc_sampling_error(self._sampling_error_components, data) + class_y_pred_proba_columns = model_output_column_names(self.y_pred_proba) + class_uncalibrated_y_pred_proba_columns = ['uncalibrated_' + el for el in class_y_pred_proba_columns] + needed_columns = class_y_pred_proba_columns + class_uncalibrated_y_pred_proba_columns + _list_missing(needed_columns, data) + data, empty = common_nan_removal(data[needed_columns], needed_columns) + if empty: + warnings.warn( + f"Too many missing values, cannot calculate {self.display_name} sampling error. " f"Returning NaN." + ) + return np.NaN + else: + return mse.auroc_sampling_error(self._sampling_error_components, data) def _realized_performance(self, data: pd.DataFrame) -> float: - data = self._ensure_targets(data) + class_y_pred_proba_columns = model_output_column_names(self.y_pred_proba) + class_uncalibrated_y_pred_proba_columns = ['uncalibrated_' + el for el in class_y_pred_proba_columns] + try: + _list_missing([self.y_true] + class_uncalibrated_y_pred_proba_columns, data) + except InvalidArgumentsException as ex: + if "missing required columns" in str(ex): + self._logger.debug(str(ex)) + return np.NaN + else: + raise ex - if data is None: - warnings.warn("No 'y_true' values given for chunk, returning NaN as realized ROC-AUC.") + data, empty = common_nan_removal(data, [self.y_true] + class_uncalibrated_y_pred_proba_columns) + if empty: + warnings.warn(f"Too many missing values, cannot calculate {self.display_name}. " f"Returning NaN.") return np.NaN - if data[self.y_true].nunique() <= 1: + y_true = data[self.y_true] + if y_true.nunique() <= 1: warnings.warn("Too few unique values present in 'y_true', returning NaN as realized ROC-AUC.") return np.NaN _, y_pred_probas, labels = _get_multiclass_uncalibrated_predictions(data, self.y_pred, self.y_pred_proba) - return roc_auc_score(data[self.y_true], y_pred_probas, multi_class='ovr', average='macro', labels=labels) + return roc_auc_score(y_true, y_pred_probas, multi_class='ovr', average='macro', labels=labels) @MetricFactory.register('f1', ProblemType.CLASSIFICATION_MULTICLASS) -class MulticlassClassificationF1(_MulticlassClassificationMetric): +class MulticlassClassificationF1(Metric): """CBPE multiclass classification f1 Metric Class.""" def __init__( @@ -2195,15 +2460,40 @@ def __init__( self._sampling_error_components: List[Tuple] = [] def _fit(self, reference_data: pd.DataFrame): - label_binarizer = LabelBinarizer() - binarized_y_true = list(label_binarizer.fit_transform(reference_data[self.y_true]).T) - binarized_y_pred = list(label_binarizer.transform(reference_data[self.y_pred]).T) - - self._sampling_error_components = mse.f1_sampling_error_components( - y_true_reference=binarized_y_true, y_pred_reference=binarized_y_pred + classes = class_labels(self.y_pred_proba) + _list_missing([self.y_true, self.y_pred], list(reference_data.columns)) + # filter nans here + reference_data, empty = common_nan_removal( + reference_data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred] ) + if empty: + self._sampling_error_components = [(np.NaN, 0) for clazz in classes] + else: + label_binarizer = LabelBinarizer() + binarized_y_true = list(label_binarizer.fit_transform(reference_data[self.y_true]).T) + binarized_y_pred = list(label_binarizer.transform(reference_data[self.y_pred]).T) + self._sampling_error_components = mse.f1_sampling_error_components( + y_true_reference=binarized_y_true, y_pred_reference=binarized_y_pred + ) def _estimate(self, data: pd.DataFrame): + class_y_pred_proba_columns = model_output_column_names(self.y_pred_proba) + needed_columns = class_y_pred_proba_columns + [self.y_pred] + try: + _list_missing(needed_columns, list(data.columns)) + except InvalidArgumentsException as ex: + if "missing required columns" in str(ex): + self._logger.debug(str(ex)) + return np.NaN + else: + raise ex + + data, empty = common_nan_removal(data, needed_columns) + if empty: + self._logger.debug(f"Not enough data to compute estimated {self.display_name}.") + warnings.warn(f"Not enough data to compute estimated {self.display_name}.") + return np.NaN + y_preds, y_pred_probas, _ = _get_binarized_multiclass_predictions(data, self.y_pred, self.y_pred_proba) ovr_estimates = [] for y_pred, y_pred_proba in zip(y_preds, y_pred_probas): @@ -2213,17 +2503,36 @@ def _estimate(self, data: pd.DataFrame): return multiclass_metric def _sampling_error(self, data: pd.DataFrame) -> float: - return mse.f1_sampling_error(self._sampling_error_components, data) + class_y_pred_proba_columns = model_output_column_names(self.y_pred_proba) + needed_columns = class_y_pred_proba_columns + [self.y_pred] + _list_missing(needed_columns, data) + data, empty = common_nan_removal(data[needed_columns], needed_columns) + if empty: + warnings.warn( + f"Too many missing values, cannot calculate {self.display_name} sampling error. " f"Returning NaN." + ) + return np.NaN + else: + return mse.f1_sampling_error(self._sampling_error_components, data) def _realized_performance(self, data: pd.DataFrame) -> float: - data = self._ensure_targets(data) + try: + _list_missing([self.y_true, self.y_pred], data) + except InvalidArgumentsException as ex: + if "missing required columns" in str(ex): + self._logger.debug(str(ex)) + return np.NaN + else: + raise ex - if data is None: - warnings.warn("No 'y_true' values given for chunk, returning NaN as realized F1 score.") + data, empty = common_nan_removal(data, [self.y_true, self.y_pred]) + if empty: + warnings.warn(f"Too many missing values, cannot calculate {self.display_name}. " f"Returning NaN.") return np.NaN - if data[self.y_true].nunique() <= 1: - warnings.warn("Too few unique values present in 'y_true', returning NaN as realized F1 score.") + y_true = data[self.y_true] + if y_true.nunique() <= 1: + warnings.warn(f"Too few unique values present in 'y_true', returning NaN as realized {self.display_name}.") return np.NaN if data[self.y_pred].nunique() <= 1: @@ -2232,11 +2541,11 @@ def _realized_performance(self, data: pd.DataFrame) -> float: y_pred, _, labels = _get_multiclass_uncalibrated_predictions(data, self.y_pred, self.y_pred_proba) - return f1_score(y_true=data[self.y_true], y_pred=y_pred, average='macro', labels=labels) + return f1_score(y_true=y_true, y_pred=y_pred, average='macro', labels=labels) @MetricFactory.register('precision', ProblemType.CLASSIFICATION_MULTICLASS) -class MulticlassClassificationPrecision(_MulticlassClassificationMetric): +class MulticlassClassificationPrecision(Metric): """CBPE multiclass classification precision Metric Class.""" def __init__( @@ -2265,15 +2574,40 @@ def __init__( self._sampling_error_components: List[Tuple] = [] def _fit(self, reference_data: pd.DataFrame): - label_binarizer = LabelBinarizer() - binarized_y_true = list(label_binarizer.fit_transform(reference_data[self.y_true]).T) - binarized_y_pred = list(label_binarizer.transform(reference_data[self.y_pred]).T) - - self._sampling_error_components = mse.precision_sampling_error_components( - y_true_reference=binarized_y_true, y_pred_reference=binarized_y_pred + classes = class_labels(self.y_pred_proba) + _list_missing([self.y_true, self.y_pred], list(reference_data.columns)) + # filter nans here + reference_data, empty = common_nan_removal( + reference_data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred] ) + if empty: + self._sampling_error_components = [(np.NaN, 0) for clazz in classes] + else: + label_binarizer = LabelBinarizer() + binarized_y_true = list(label_binarizer.fit_transform(reference_data[self.y_true]).T) + binarized_y_pred = list(label_binarizer.transform(reference_data[self.y_pred]).T) + self._sampling_error_components = mse.precision_sampling_error_components( + y_true_reference=binarized_y_true, y_pred_reference=binarized_y_pred + ) def _estimate(self, data: pd.DataFrame): + class_y_pred_proba_columns = model_output_column_names(self.y_pred_proba) + needed_columns = class_y_pred_proba_columns + [self.y_pred] + try: + _list_missing(needed_columns, list(data.columns)) + except InvalidArgumentsException as ex: + if "missing required columns" in str(ex): + self._logger.debug(str(ex)) + return np.NaN + else: + raise ex + + data, empty = common_nan_removal(data, needed_columns) + if empty: + self._logger.debug(f"Not enough data to compute estimated {self.display_name}.") + warnings.warn(f"Not enough data to compute estimated {self.display_name}.") + return np.NaN + y_preds, y_pred_probas, _ = _get_binarized_multiclass_predictions(data, self.y_pred, self.y_pred_proba) ovr_estimates = [] for y_pred, y_pred_proba in zip(y_preds, y_pred_probas): @@ -2283,30 +2617,50 @@ def _estimate(self, data: pd.DataFrame): return multiclass_metric def _sampling_error(self, data: pd.DataFrame) -> float: - return mse.precision_sampling_error(self._sampling_error_components, data) + class_y_pred_proba_columns = model_output_column_names(self.y_pred_proba) + needed_columns = class_y_pred_proba_columns + [self.y_pred] + _list_missing(needed_columns, data) + data, empty = common_nan_removal(data[needed_columns], needed_columns) + if empty: + warnings.warn( + f"Too many missing values, cannot calculate {self.display_name} sampling error. " f"Returning NaN." + ) + return np.NaN + else: + return mse.precision_sampling_error(self._sampling_error_components, data) def _realized_performance(self, data: pd.DataFrame) -> float: - data = self._ensure_targets(data) + try: + _list_missing([self.y_true, self.y_pred], data) + except InvalidArgumentsException as ex: + if "missing required columns" in str(ex): + self._logger.debug(str(ex)) + return np.NaN + else: + raise ex - if data is None: - warnings.warn("No 'y_true' values given for chunk, returning NaN as realized precision.") + data, empty = common_nan_removal(data, [self.y_true, self.y_pred]) + if empty: + warnings.warn(f"Too many missing values, cannot calculate {self.display_name}. " f"Returning NaN.") return np.NaN - if data[self.y_true].nunique() <= 1: - warnings.warn("Too few unique values present in 'y_true', returning NaN as realized precision.") + y_true = data[self.y_true] + if y_true.nunique() <= 1: + warnings.warn(f"Too few unique values present in 'y_true', returning NaN as realized {self.display_name}.") return np.NaN if data[self.y_pred].nunique() <= 1: - warnings.warn("Too few unique values present in 'y_pred', returning NaN as realized precision.") + warnings.warn( + f"Too few unique values present in 'y_pred', returning NaN as realized {self.display_name} score." + ) return np.NaN y_pred, _, labels = _get_multiclass_uncalibrated_predictions(data, self.y_pred, self.y_pred_proba) - - return precision_score(y_true=data[self.y_true], y_pred=y_pred, average='macro', labels=labels) + return precision_score(y_true=y_true, y_pred=y_pred, average='macro', labels=labels) @MetricFactory.register('recall', ProblemType.CLASSIFICATION_MULTICLASS) -class MulticlassClassificationRecall(_MulticlassClassificationMetric): +class MulticlassClassificationRecall(Metric): """CBPE multiclass classification recall Metric Class.""" def __init__( @@ -2335,48 +2689,93 @@ def __init__( self._sampling_error_components: List[Tuple] = [] def _fit(self, reference_data: pd.DataFrame): - label_binarizer = LabelBinarizer() - binarized_y_true = list(label_binarizer.fit_transform(reference_data[self.y_true]).T) - binarized_y_pred = list(label_binarizer.transform(reference_data[self.y_pred]).T) - - self._sampling_error_components = mse.recall_sampling_error_components( - y_true_reference=binarized_y_true, y_pred_reference=binarized_y_pred + classes = class_labels(self.y_pred_proba) + _list_missing([self.y_true, self.y_pred], list(reference_data.columns)) + # filter nans here + reference_data, empty = common_nan_removal( + reference_data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred] ) + if empty: + self._sampling_error_components = [(np.NaN, 0) for clazz in classes] + else: + label_binarizer = LabelBinarizer() + binarized_y_true = list(label_binarizer.fit_transform(reference_data[self.y_true]).T) + binarized_y_pred = list(label_binarizer.transform(reference_data[self.y_pred]).T) + self._sampling_error_components = mse.recall_sampling_error_components( + y_true_reference=binarized_y_true, y_pred_reference=binarized_y_pred + ) def _estimate(self, data: pd.DataFrame): + class_y_pred_proba_columns = model_output_column_names(self.y_pred_proba) + needed_columns = class_y_pred_proba_columns + [self.y_pred] + try: + _list_missing(needed_columns, list(data.columns)) + except InvalidArgumentsException as ex: + if "missing required columns" in str(ex): + self._logger.debug(str(ex)) + return np.NaN + else: + raise ex + + data, empty = common_nan_removal(data, needed_columns) + if empty: + self._logger.debug(f"Not enough data to compute estimated {self.display_name}.") + warnings.warn(f"Not enough data to compute estimated {self.display_name}.") + return np.NaN + y_preds, y_pred_probas, _ = _get_binarized_multiclass_predictions(data, self.y_pred, self.y_pred_proba) ovr_estimates = [] for y_pred, y_pred_proba in zip(y_preds, y_pred_probas): ovr_estimates.append(estimate_recall(y_pred, y_pred_proba)) multiclass_metric = np.mean(ovr_estimates) - return multiclass_metric def _sampling_error(self, data: pd.DataFrame) -> float: - return mse.recall_sampling_error(self._sampling_error_components, data) + class_y_pred_proba_columns = model_output_column_names(self.y_pred_proba) + needed_columns = class_y_pred_proba_columns + [self.y_pred] + _list_missing(needed_columns, data) + data, empty = common_nan_removal(data[needed_columns], needed_columns) + if empty: + warnings.warn( + f"Too many missing values, cannot calculate {self.display_name} sampling error. " f"Returning NaN." + ) + return np.NaN + else: + return mse.recall_sampling_error(self._sampling_error_components, data) def _realized_performance(self, data: pd.DataFrame) -> float: - data = self._ensure_targets(data) + try: + _list_missing([self.y_true, self.y_pred], data) + except InvalidArgumentsException as ex: + if "missing required columns" in str(ex): + self._logger.debug(str(ex)) + return np.NaN + else: + raise ex - if data is None: - warnings.warn("No 'y_true' values given for chunk, returning NaN as realized recall.") + data, empty = common_nan_removal(data, [self.y_true, self.y_pred]) + if empty: + warnings.warn(f"Too many missing values, cannot calculate {self.display_name}. " f"Returning NaN.") return np.NaN - if data[self.y_true].nunique() <= 1: - warnings.warn("Too few unique values present in 'y_true', returning NaN as realized recall.") + y_true = data[self.y_true] + if y_true.nunique() <= 1: + warnings.warn(f"Too few unique values present in 'y_true', returning NaN as realized {self.display_name}.") return np.NaN if data[self.y_pred].nunique() <= 1: - warnings.warn("Too few unique values present in 'y_pred', returning NaN as realized recall.") + warnings.warn( + f"Too few unique values present in 'y_pred', returning NaN as realized {self.display_name} score." + ) return np.NaN y_pred, _, labels = _get_multiclass_uncalibrated_predictions(data, self.y_pred, self.y_pred_proba) - return recall_score(y_true=data[self.y_true], y_pred=y_pred, average='macro', labels=labels) + return recall_score(y_true=y_true, y_pred=y_pred, average='macro', labels=labels) @MetricFactory.register('specificity', ProblemType.CLASSIFICATION_MULTICLASS) -class MulticlassClassificationSpecificity(_MulticlassClassificationMetric): +class MulticlassClassificationSpecificity(Metric): """CBPE multiclass classification specificity Metric Class.""" def __init__( @@ -2405,15 +2804,40 @@ def __init__( self._sampling_error_components: List[Tuple] = [] def _fit(self, reference_data: pd.DataFrame): - label_binarizer = LabelBinarizer() - binarized_y_true = list(label_binarizer.fit_transform(reference_data[self.y_true]).T) - binarized_y_pred = list(label_binarizer.transform(reference_data[self.y_pred]).T) - - self._sampling_error_components = mse.specificity_sampling_error_components( - y_true_reference=binarized_y_true, y_pred_reference=binarized_y_pred + classes = class_labels(self.y_pred_proba) + _list_missing([self.y_true, self.y_pred], list(reference_data.columns)) + # filter nans here + reference_data, empty = common_nan_removal( + reference_data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred] ) + if empty: + self._sampling_error_components = [(np.NaN, 0) for clazz in classes] + else: + label_binarizer = LabelBinarizer() + binarized_y_true = list(label_binarizer.fit_transform(reference_data[self.y_true]).T) + binarized_y_pred = list(label_binarizer.transform(reference_data[self.y_pred]).T) + self._sampling_error_components = mse.specificity_sampling_error_components( + y_true_reference=binarized_y_true, y_pred_reference=binarized_y_pred + ) def _estimate(self, data: pd.DataFrame): + class_y_pred_proba_columns = model_output_column_names(self.y_pred_proba) + needed_columns = class_y_pred_proba_columns + [self.y_pred] + try: + _list_missing(needed_columns, list(data.columns)) + except InvalidArgumentsException as ex: + if "missing required columns" in str(ex): + self._logger.debug(str(ex)) + return np.NaN + else: + raise ex + + data, empty = common_nan_removal(data, needed_columns) + if empty: + self._logger.debug(f"Not enough data to compute estimated {self.display_name}.") + warnings.warn(f"Not enough data to compute estimated {self.display_name}.") + return np.NaN + y_preds, y_pred_probas, _ = _get_binarized_multiclass_predictions(data, self.y_pred, self.y_pred_proba) ovr_estimates = [] for y_pred, y_pred_proba in zip(y_preds, y_pred_probas): @@ -2423,26 +2847,47 @@ def _estimate(self, data: pd.DataFrame): return multiclass_metric def _sampling_error(self, data: pd.DataFrame) -> float: - return mse.specificity_sampling_error(self._sampling_error_components, data) + class_y_pred_proba_columns = model_output_column_names(self.y_pred_proba) + needed_columns = class_y_pred_proba_columns + [self.y_pred] + _list_missing(needed_columns, data) + data, empty = common_nan_removal(data[needed_columns], needed_columns) + if empty: + warnings.warn( + f"Too many missing values, cannot calculate {self.display_name} sampling error. " f"Returning NaN." + ) + return np.NaN + else: + return mse.specificity_sampling_error(self._sampling_error_components, data) def _realized_performance(self, data: pd.DataFrame) -> float: - data = self._ensure_targets(data) + try: + _list_missing([self.y_true, self.y_pred], data) + except InvalidArgumentsException as ex: + if "missing required columns" in str(ex): + self._logger.debug(str(ex)) + return np.NaN + else: + raise ex - if data is None: - warnings.warn("No 'y_true' values given for chunk, returning NaN as realized specificity.") + data, empty = common_nan_removal(data, [self.y_true, self.y_pred]) + if empty: + warnings.warn(f"Too many missing values, cannot calculate {self.display_name}. " f"Returning NaN.") return np.NaN - if data[self.y_true].nunique() <= 1: - warnings.warn("Too few unique values present in 'y_true', returning NaN as realized specificity.") + y_true = data[self.y_true] + if y_true.nunique() <= 1: + warnings.warn(f"Too few unique values present in 'y_true', returning NaN as realized {self.display_name}.") return np.NaN if data[self.y_pred].nunique() <= 1: - warnings.warn("Too few unique values present in 'y_pred', returning NaN as realized specificity.") + warnings.warn( + f"Too few unique values present in 'y_pred', returning NaN as realized {self.display_name} score." + ) return np.NaN y_pred, _, labels = _get_multiclass_uncalibrated_predictions(data, self.y_pred, self.y_pred_proba) - mcm = multilabel_confusion_matrix(data[self.y_true], y_pred, labels=labels) + mcm = multilabel_confusion_matrix(y_true, y_pred, labels=labels) tn_sum = mcm[:, 0, 0] fp_sum = mcm[:, 0, 1] class_wise_specificity = tn_sum / (tn_sum + fp_sum) @@ -2450,7 +2895,7 @@ def _realized_performance(self, data: pd.DataFrame) -> float: @MetricFactory.register('accuracy', ProblemType.CLASSIFICATION_MULTICLASS) -class MulticlassClassificationAccuracy(_MulticlassClassificationMetric): +class MulticlassClassificationAccuracy(Metric): """CBPE multiclass classification accuracy Metric Class.""" def __init__( @@ -2479,15 +2924,40 @@ def __init__( self._sampling_error_components: Tuple = () def _fit(self, reference_data: pd.DataFrame): - label_binarizer = LabelBinarizer() - binarized_y_true = label_binarizer.fit_transform(reference_data[self.y_true]) - binarized_y_pred = label_binarizer.transform(reference_data[self.y_pred]) - - self._sampling_error_components = mse.accuracy_sampling_error_components( - y_true_reference=binarized_y_true, y_pred_reference=binarized_y_pred + _list_missing([self.y_true, self.y_pred], list(reference_data.columns)) + # filter nans here + reference_data, empty = common_nan_removal( + reference_data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred] ) + if empty: + self._sampling_error_components = (np.NaN,) + else: + label_binarizer = LabelBinarizer() + binarized_y_true = label_binarizer.fit_transform(reference_data[self.y_true]) + binarized_y_pred = label_binarizer.transform(reference_data[self.y_pred]) + + self._sampling_error_components = mse.accuracy_sampling_error_components( + y_true_reference=binarized_y_true, y_pred_reference=binarized_y_pred + ) def _estimate(self, data: pd.DataFrame): + class_y_pred_proba_columns = model_output_column_names(self.y_pred_proba) + needed_columns = class_y_pred_proba_columns + [self.y_pred] + try: + _list_missing(needed_columns, list(data.columns)) + except InvalidArgumentsException as ex: + if "missing required columns" in str(ex): + self._logger.debug(str(ex)) + return np.NaN + else: + raise ex + + data, empty = common_nan_removal(data, needed_columns) + if empty: + self._logger.debug(f"Not enough data to compute estimated {self.display_name}.") + warnings.warn(f"Not enough data to compute estimated {self.display_name}.") + return np.NaN + y_preds, y_pred_probas, _ = _get_binarized_multiclass_predictions(data, self.y_pred, self.y_pred_proba) y_preds_array = np.asarray(y_preds).T y_pred_probas_array = np.asarray(y_pred_probas).T @@ -2495,34 +2965,57 @@ def _estimate(self, data: pd.DataFrame): return np.mean(probability_of_predicted) def _sampling_error(self, data: pd.DataFrame) -> float: - return mse.accuracy_sampling_error(self._sampling_error_components, data) + class_y_pred_proba_columns = model_output_column_names(self.y_pred_proba) + needed_columns = class_y_pred_proba_columns + [self.y_pred] + _list_missing(needed_columns, data) + data, empty = common_nan_removal(data[needed_columns], needed_columns) + if empty: + warnings.warn( + f"Too many missing values, cannot calculate {self.display_name} sampling error. " f"Returning NaN." + ) + return np.NaN + else: + return mse.accuracy_sampling_error(self._sampling_error_components, data) def _realized_performance(self, data: pd.DataFrame) -> float: - data = self._ensure_targets(data) + try: + _list_missing([self.y_true, self.y_pred], data) + except InvalidArgumentsException as ex: + if "missing required columns" in str(ex): + self._logger.debug(str(ex)) + return np.NaN + else: + raise ex - if data is None: - warnings.warn("No 'y_true' values given for chunk, returning NaN as realized accuracy.") + data, empty = common_nan_removal(data, [self.y_true, self.y_pred]) + if empty: + warnings.warn(f"Too many missing values, cannot calculate {self.display_name}. " f"Returning NaN.") return np.NaN - if data[self.y_true].nunique() <= 1: - warnings.warn("Too few unique values present in 'y_true', returning NaN as realized accuracy.") + y_true = data[self.y_true] + if y_true.nunique() <= 1: + warnings.warn(f"Too few unique values present in 'y_true', returning NaN as realized {self.display_name}.") return np.NaN if data[self.y_pred].nunique() <= 1: - warnings.warn("Too few unique values present in 'y_pred', returning NaN as realized accuracy.") + warnings.warn( + f"Too few unique values present in 'y_pred', returning NaN as realized {self.display_name} score." + ) return np.NaN y_pred, _, _ = _get_multiclass_uncalibrated_predictions(data, self.y_pred, self.y_pred_proba) - return accuracy_score(data[self.y_true], y_pred) + return accuracy_score(y_true, y_pred) @MetricFactory.register('confusion_matrix', ProblemType.CLASSIFICATION_MULTICLASS) class MulticlassClassificationConfusionMatrix(Metric): """CBPE multiclass classification confusion matrix Metric Class.""" + y_pred_proba: Dict[str, str] + def __init__( self, - y_pred_proba: ModelOutputsType, + y_pred_proba: Dict[str, str], y_pred: str, y_true: str, chunker: Chunker, @@ -2537,7 +3030,7 @@ def __init__( "y_pred_proba must be a dictionary with class labels as keys and pred_proba column names as values" ) - self.classes = list(y_pred_proba.keys()) + self.classes: List[str] = sorted(list(y_pred_proba.keys())) super().__init__( name='confusion_matrix', @@ -2593,11 +3086,25 @@ def fit(self, reference_data: pd.DataFrame): # override the superclass fit meth return def _fit(self, reference_data: pd.DataFrame): - self._confusion_matrix_sampling_error_components = mse.multiclass_confusion_matrix_sampling_error_components( - y_true_reference=reference_data[self.y_true], - y_pred_reference=reference_data[self.y_pred], - normalize_confusion_matrix=self.normalize_confusion_matrix, - ) + _list_missing([self.y_true, self.y_pred], reference_data) + # filter nans here + reference_data, empty = common_nan_removal( + reference_data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred] + ) + if empty: + self._confusion_matrix_sampling_error_components = ( + np.full((len(self.classes), len(self.classes)), np.nan), + 0, + ) + else: + # sampling error + self._confusion_matrix_sampling_error_components = ( + mse.multiclass_confusion_matrix_sampling_error_components( # noqa: E501 + y_true_reference=reference_data[self.y_true], + y_pred_reference=reference_data[self.y_pred], + normalize_confusion_matrix=self.normalize_confusion_matrix, + ) + ) def _multiclass_confusion_matrix_alert_thresholds( self, reference_chunks: List[Chunk] @@ -2626,29 +3133,52 @@ def _multiclass_confusion_matrix_alert_thresholds( return alert_thresholds def _multi_class_confusion_matrix_realized_performance(self, data: pd.DataFrame) -> Union[np.ndarray, float]: - if data is None or self.y_true not in data.columns: - warnings.warn("No 'y_true' values given for chunk, returning NaN as realized precision.") - return np.NaN + try: + _list_missing([self.y_true, self.y_pred], data) + except InvalidArgumentsException as ex: + if "missing required columns" in str(ex): + self._logger.debug(str(ex)) + return np.NaN + else: + raise ex - if data[self.y_true].nunique() <= 1: - warnings.warn("Too few unique values present in 'y_true', returning NaN as realized precision.") + data, empty = common_nan_removal(data, [self.y_true, self.y_pred]) + if empty: + warnings.warn(f"Too many missing values, cannot calculate {self.display_name}. " f"Returning NaN.") return np.NaN + y_true = data[self.y_true] + if y_true.nunique() <= 1: + warnings.warn(f"Too few unique values present in 'y_true', returning NaN as realized {self.display_name}.") + return np.NaN if data[self.y_pred].nunique() <= 1: - warnings.warn("Too few unique values present in 'y_pred', returning NaN as realized precision.") + warnings.warn( + f"Too few unique values present in 'y_pred', returning NaN as realized {self.display_name} score." + ) return np.NaN cm = confusion_matrix( data[self.y_true], data[self.y_pred], labels=self.classes, normalize=self.normalize_confusion_matrix ) - return cm def _get_multiclass_confusion_matrix_estimate(self, chunk_data: pd.DataFrame) -> np.ndarray: - if isinstance(self.y_pred_proba, str): - raise ValueError( - "y_pred_proba must be a dictionary with class labels as keys and pred_proba column names as values" - ) + class_y_pred_proba_columns = model_output_column_names(self.y_pred_proba) + needed_columns = class_y_pred_proba_columns + [self.y_pred] + try: + _list_missing(needed_columns, list(chunk_data.columns)) + except InvalidArgumentsException as ex: + if "missing required columns" in str(ex): + self._logger.debug(str(ex)) + return np.full((len(self.classes), len(self.classes)), np.nan) + else: + raise ex + + chunk_data, empty = common_nan_removal(chunk_data, needed_columns) + if empty: + self._logger.debug(f"Not enough data to compute estimated {self.display_name}.") + warnings.warn(f"Not enough data to compute estimated {self.display_name}.") + return np.full((len(self.classes), len(self.classes)), np.nan) y_pred_proba = {key: chunk_data[value] for key, value in self.y_pred_proba.items()} @@ -2704,10 +3234,18 @@ def get_chunk_record(self, chunk_data: pd.DataFrame) -> Dict: estimated_cm = self._get_multiclass_confusion_matrix_estimate(chunk_data) realized_cm = self._multi_class_confusion_matrix_realized_performance(chunk_data) - sampling_error = mse.multiclass_confusion_matrix_sampling_error( - self._confusion_matrix_sampling_error_components, - chunk_data, - ) + class_y_pred_proba_columns = model_output_column_names(self.y_pred_proba) + needed_columns = class_y_pred_proba_columns + [self.y_pred] + _list_missing(needed_columns, chunk_data) + # filter nans here + chunk_data, empty = common_nan_removal(chunk_data[needed_columns], needed_columns) + if empty: + sampling_error = np.full((len(self.classes), len(self.classes)), np.nan) + else: + sampling_error = mse.multiclass_confusion_matrix_sampling_error( + self._confusion_matrix_sampling_error_components, + chunk_data, + ) for true_class in self.classes: for pred_class in self.classes: diff --git a/nannyml/performance_estimation/direct_loss_estimation/dle.py b/nannyml/performance_estimation/direct_loss_estimation/dle.py index 68731a68..1290f3c8 100644 --- a/nannyml/performance_estimation/direct_loss_estimation/dle.py +++ b/nannyml/performance_estimation/direct_loss_estimation/dle.py @@ -1,6 +1,10 @@ # Author: Niels Nuyttens # # License: Apache Software License 2.0 + +"""Classs implementing Direct Loss Estimation algorithm to estimate performance for regression models.""" + + from collections import defaultdict from typing import Any, Dict, List, Optional, Union @@ -31,7 +35,9 @@ class DLE(AbstractEstimator): - """The Direct Loss Estimator (DLE) estimates the :term:`loss` resulting + """Class implementing the Direct Loss Estimation method. + + The Direct Loss Estimator (DLE) estimates the :term:`loss` resulting from the difference between the prediction and the target before the targets become known. The loss is defined from the regression performance metric specified. For all metrics used the loss function is positive. @@ -81,8 +87,7 @@ def __init__( hyperparameter_tuning_config: Optional[Dict[str, Any]] = None, thresholds: Optional[Dict[str, Threshold]] = None, ): - """ - Creates a new Direct Loss Estimator. + """Creates a new Direct Loss Estimator. Parameters ---------- @@ -268,6 +273,7 @@ def __init__( self.result: Optional[Result] = None def __str__(self): + """Get sting representing instantiated class.""" return ( f"{self.__class__.__name__}[tune_hyperparameters={self.tune_hyperparameters}, " f"metrics={[str(m) for m in self.metrics]}]" diff --git a/nannyml/performance_estimation/direct_loss_estimation/metrics.py b/nannyml/performance_estimation/direct_loss_estimation/metrics.py index 03e35a65..6d041599 100644 --- a/nannyml/performance_estimation/direct_loss_estimation/metrics.py +++ b/nannyml/performance_estimation/direct_loss_estimation/metrics.py @@ -2,8 +2,7 @@ # # License: Apache Software License 2.0 -"""A module containing the implementations of metrics estimated by -:class:`~nannyml.performance_estimation.direct_loss_estimation.dle.DLE`. +"""A module containing the implementations of metrics estimated by DLE class. The :class:`~nannyml.performance_estimation.direct_loss_estimation.dle.DLE` estimator converts a list of metric names into :class:`~nannyml.performance_estimation.direct_loss_estimation.metrics.Metric` @@ -29,9 +28,9 @@ ) from nannyml._typing import ProblemType -from nannyml.base import _raise_exception_for_negative_values, _remove_nans +from nannyml.base import _raise_exception_for_negative_values, common_nan_removal from nannyml.chunk import Chunk, Chunker -from nannyml.exceptions import InvalidArgumentsException +from nannyml.exceptions import InvalidArgumentsException, InvalidReferenceDataException from nannyml.sampling_error.regression import ( mae_sampling_error, mae_sampling_error_components, @@ -154,6 +153,7 @@ def _logger(self) -> logging.Logger: return logging.getLogger(__name__) def __str__(self): + """Get string of class name.""" return self.__class__.__name__ def fit(self, reference_data: pd.DataFrame): @@ -211,10 +211,8 @@ def sampling_error(self, data: pd.DataFrame): Returns ------- - sampling_error: float The expected sampling error. - """ return self._sampling_error(data) @@ -255,7 +253,8 @@ def alert(self, value: float) -> bool: @abc.abstractmethod def realized_performance(self, data: pd.DataFrame) -> float: - """Calculates de realized performance of a model with respect of a given chunk of data. + """Calculates the realized performance of a model with respect of a given chunk of data. + The data needs to have both prediction and real targets. Parameters @@ -271,15 +270,6 @@ def __eq__(self, other): """Establishes equality by comparing all properties.""" return self.display_name == other.display_name and self.column_name == other.column_name - def _common_cleaning(self, data: pd.DataFrame) -> Tuple[pd.Series, Optional[pd.Series]]: - data = _remove_nans(data, [self.y_pred]) - - clean_targets = self.y_true in data.columns and not data[self.y_true].isna().all() - if clean_targets: - data = _remove_nans(data, [self.y_pred, self.y_true]) - - return data[self.y_pred], (data[self.y_true] if clean_targets else None) - def _train_direct_error_estimation_model( self, X_train: pd.DataFrame, @@ -334,6 +324,7 @@ def create(cls, key: str, problem_type: ProblemType, **kwargs) -> Metric: Parameters ---------- key: str + string representing metric key of selected metric problem_type: ProblemType Determines which method to use. Use 'regression' for regression tasks. """ @@ -359,6 +350,8 @@ def create(cls, key: str, problem_type: ProblemType, **kwargs) -> Metric: @classmethod def register(cls, metric: str, problem_type: ProblemType) -> Callable: + """Add a metric class to metric registry.""" + def inner_wrapper(wrapped_class: Type[Metric]) -> Type[Metric]: if metric in cls.registry: if problem_type in cls.registry[metric]: @@ -375,6 +368,8 @@ def inner_wrapper(wrapped_class: Type[Metric]) -> Type[Metric]: @MetricFactory.register('mae', ProblemType.REGRESSION) class MAE(Metric): + """Estimate regression performance using Mean Absolute Error metric.""" + def __init__( self, feature_column_names: List[str], @@ -443,6 +438,13 @@ def __init__( ) def _fit(self, reference_data: pd.DataFrame): + # filter nans here + reference_data, empty = common_nan_removal(reference_data, [self.y_true, self.y_pred]) + if empty: + raise InvalidReferenceDataException( + f"Cannot fit DLE for {self.display_name}, too many missing values for predictions and targets." + ) + y_true = reference_data[self.y_true] y_pred = reference_data[self.y_pred] @@ -469,31 +471,43 @@ def _estimate(self, data: pd.DataFrame): return chunk_level_estimate def _sampling_error(self, data: pd.DataFrame) -> float: - return mae_sampling_error(self._sampling_error_components, data) + # we only expect predictions to be present and estimate sampling error based on them + data, empty = common_nan_removal(data[[self.y_pred]], [self.y_pred]) + if empty: + return np.NaN + else: + return mae_sampling_error(self._sampling_error_components, data) def realized_performance(self, data: pd.DataFrame) -> float: - """Calculates de realized performance of a model with respect of a given chunk of data. + """Calculates the realized performance of a model with respect of a given chunk of data. + The data needs to have both prediction and real targets. Parameters ---------- data: pd.DataFrame The data to calculate the realized performance on. + Returns ------- mae: float - Mean Absolute Error + Mean Absolute Error """ - y_pred, y_true = self._common_cleaning(data) - - if y_true is None: + if self.y_true not in data.columns: + return np.NaN + data, empty = common_nan_removal(data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred]) + if empty: return np.NaN + y_true = data[self.y_true] + y_pred = data[self.y_pred] return mean_absolute_error(y_true, y_pred) @MetricFactory.register('mape', ProblemType.REGRESSION) class MAPE(Metric): + """Estimate regression performance using Mean Absolute Percentage Error metric.""" + def __init__( self, feature_column_names: List[str], @@ -562,6 +576,13 @@ def __init__( ) def _fit(self, reference_data: pd.DataFrame): + # filter nans here + reference_data, empty = common_nan_removal(reference_data, [self.y_true, self.y_pred]) + if empty: + raise InvalidReferenceDataException( + f"Cannot fit DLE for {self.display_name}, too many missing values for predictions and targets." + ) + y_true = reference_data[self.y_true] y_pred = reference_data[self.y_pred] @@ -589,31 +610,43 @@ def _estimate(self, data: pd.DataFrame): return chunk_level_estimate def _sampling_error(self, data: pd.DataFrame) -> float: - return mape_sampling_error(self._sampling_error_components, data) + # we only expect predictions to be present and estimate sampling error based on them + data, empty = common_nan_removal(data[[self.y_pred]], [self.y_pred]) + if empty: + return np.NaN + else: + return mape_sampling_error(self._sampling_error_components, data) def realized_performance(self, data: pd.DataFrame) -> float: - """Calculates de realized performance of a model with respect of a given chunk of data. + """Calculates the realized performance of a model with respect of a given chunk of data. + The data needs to have both prediction and real targets. Parameters ---------- data: pd.DataFrame The data to calculate the realized performance on. + Returns ------- - mae: float - Mean Absolute Percentage Error + mape: float + Mean Absolute Percentage Error """ - y_pred, y_true = self._common_cleaning(data) - - if y_true is None: + if self.y_true not in data.columns: + return np.NaN + data, empty = common_nan_removal(data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred]) + if empty: return np.NaN + y_true = data[self.y_true] + y_pred = data[self.y_pred] return mean_absolute_percentage_error(y_true, y_pred) @MetricFactory.register('mse', ProblemType.REGRESSION) class MSE(Metric): + """Estimate regression performance using Mean Squared Error metric.""" + def __init__( self, feature_column_names: List[str], @@ -682,6 +715,13 @@ def __init__( ) def _fit(self, reference_data: pd.DataFrame): + # filter nans here + reference_data, empty = common_nan_removal(reference_data, [self.y_true, self.y_pred]) + if empty: + raise InvalidReferenceDataException( + f"Cannot fit DLE for {self.display_name}, too many missing values for predictions and targets." + ) + y_true = reference_data[self.y_true] y_pred = reference_data[self.y_pred] @@ -708,31 +748,42 @@ def _estimate(self, data: pd.DataFrame): return chunk_level_estimate def _sampling_error(self, data: pd.DataFrame) -> float: - return mse_sampling_error(self._sampling_error_components, data) + # we only expect predictions to be present and estimate sampling error based on them + data, empty = common_nan_removal(data[[self.y_pred]], [self.y_pred]) + if empty: + return np.NaN + else: + return mse_sampling_error(self._sampling_error_components, data) def realized_performance(self, data: pd.DataFrame) -> float: - """Calculates de realized performance of a model with respect of a given chunk of data. + """Calculates the realized performance of a model with respect of a given chunk of data. + The data needs to have both prediction and real targets. Parameters ---------- data: pd.DataFrame The data to calculate the realized performance on. + Returns ------- - mae: float - Mean Squared Error + mse: float + Mean Squared Error """ - y_pred, y_true = self._common_cleaning(data) - - if y_true is None: + if self.y_true not in data.columns: return np.NaN - + data, empty = common_nan_removal(data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred]) + if empty: + return np.NaN + y_true = data[self.y_true] + y_pred = data[self.y_pred] return mean_squared_error(y_true, y_pred) @MetricFactory.register('msle', ProblemType.REGRESSION) class MSLE(Metric): + """Estimate regression performance using Mean Squared Logarithmic Error metric.""" + def __init__( self, feature_column_names: List[str], @@ -801,6 +852,12 @@ def __init__( ) def _fit(self, reference_data: pd.DataFrame): + # filter nans here + reference_data, empty = common_nan_removal(reference_data, [self.y_true, self.y_pred]) + if empty: + raise InvalidReferenceDataException( + f"Cannot fit DLE for {self.display_name}, too many missing values for predictions and targets." + ) y_true = reference_data[self.y_true] y_pred = reference_data[self.y_pred] @@ -830,10 +887,16 @@ def _estimate(self, data: pd.DataFrame): return chunk_level_estimate def _sampling_error(self, data: pd.DataFrame) -> float: - return msle_sampling_error(self._sampling_error_components, data) + # we only expect predictions to be present and estimate sampling error based on them + data, empty = common_nan_removal(data[[self.y_pred]], [self.y_pred]) + if empty: + return np.NaN + else: + return msle_sampling_error(self._sampling_error_components, data) def realized_performance(self, data: pd.DataFrame) -> float: - """Calculates de realized performance of a model with respect of a given chunk of data. + """Calculates the realized performance of a model with respect of a given chunk of data. + The data needs to have both prediction and real targets. Parameters @@ -847,22 +910,23 @@ def realized_performance(self, data: pd.DataFrame) -> float: Returns ------- - mae: float - Mean Squared Log Error + msle: float + Mean Squared Log Error """ - y_pred, y_true = self._common_cleaning(data) - - if y_true is None: + if self.y_true not in data.columns: return np.NaN - - _raise_exception_for_negative_values(y_true) - _raise_exception_for_negative_values(y_pred) - + data, empty = common_nan_removal(data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred]) + if empty: + return np.NaN + y_true = data[self.y_true] + y_pred = data[self.y_pred] return mean_squared_log_error(y_true, y_pred) @MetricFactory.register('rmse', ProblemType.REGRESSION) class RMSE(Metric): + """Estimate regression performance using Root Mean Squared Error metric.""" + def __init__( self, feature_column_names: List[str], @@ -931,6 +995,13 @@ def __init__( ) def _fit(self, reference_data: pd.DataFrame): + # filter nans here + reference_data, empty = common_nan_removal(reference_data, [self.y_true, self.y_pred]) + if empty: + raise InvalidReferenceDataException( + f"Cannot fit DLE for {self.display_name}, too many missing values for predictions and targets." + ) + y_true = reference_data[self.y_true] y_pred = reference_data[self.y_pred] @@ -957,10 +1028,16 @@ def _estimate(self, data: pd.DataFrame): return chunk_level_estimate def _sampling_error(self, data: pd.DataFrame) -> float: - return rmse_sampling_error(self._sampling_error_components, data) + # we only expect predictions to be present and estimate sampling error based on them + data, empty = common_nan_removal(data[[self.y_pred]], [self.y_pred]) + if empty: + return np.NaN + else: + return rmse_sampling_error(self._sampling_error_components, data) def realized_performance(self, data: pd.DataFrame) -> float: - """Calculates de realized performance of a model with respect of a given chunk of data. + """Calculates the realized performance of a model with respect of a given chunk of data. + The data needs to have both prediction and real targets. Parameters @@ -971,18 +1048,22 @@ def realized_performance(self, data: pd.DataFrame) -> float: Returns ------- rmse: float - Root Mean Squared Error + Root Mean Squared Error """ - y_pred, y_true = self._common_cleaning(data) - - if y_true is None: + if self.y_true not in data.columns: return np.NaN - + data, empty = common_nan_removal(data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred]) + if empty: + return np.NaN + y_true = data[self.y_true] + y_pred = data[self.y_pred] return mean_squared_error(y_true, y_pred, squared=False) @MetricFactory.register('rmsle', ProblemType.REGRESSION) class RMSLE(Metric): + """Estimate regression performance using Root Mean Squared Logarithmic Error metric.""" + def __init__( self, feature_column_names: List[str], @@ -1051,6 +1132,13 @@ def __init__( ) def _fit(self, reference_data: pd.DataFrame): + # filter nans here + reference_data, empty = common_nan_removal(reference_data, [self.y_true, self.y_pred]) + if empty: + raise InvalidReferenceDataException( + f"Cannot fit DLE for {self.display_name}, too many missing values for predictions and targets." + ) + y_true = reference_data[self.y_true] y_pred = reference_data[self.y_pred] @@ -1080,10 +1168,16 @@ def _estimate(self, data: pd.DataFrame): return chunk_level_estimate def _sampling_error(self, data: pd.DataFrame) -> float: - return rmsle_sampling_error(self._sampling_error_components, data) + # we only expect predictions to be present and estimate sampling error based on them + data, empty = common_nan_removal(data[[self.y_pred]], [self.y_pred]) + if empty: + return np.NaN + else: + return rmsle_sampling_error(self._sampling_error_components, data) def realized_performance(self, data: pd.DataFrame) -> float: - """Calculates de realized performance of a model with respect of a given chunk of data. + """Calculates the realized performance of a model with respect of a given chunk of data. + The data needs to have both prediction and real targets. Parameters @@ -1098,12 +1192,15 @@ def realized_performance(self, data: pd.DataFrame) -> float: Returns ------- rmsle: float - Root Mean Squared Log Error + Root Mean Squared Log Error """ - y_pred, y_true = self._common_cleaning(data) - - if y_true is None: + if self.y_true not in data.columns: + return np.NaN + data, empty = common_nan_removal(data[[self.y_true, self.y_pred]], [self.y_true, self.y_pred]) + if empty: return np.NaN + y_true = data[self.y_true] + y_pred = data[self.y_pred] _raise_exception_for_negative_values(y_true) _raise_exception_for_negative_values(y_pred) diff --git a/nannyml/performance_estimation/direct_loss_estimation/result.py b/nannyml/performance_estimation/direct_loss_estimation/result.py index 23c95190..a512e9a1 100644 --- a/nannyml/performance_estimation/direct_loss_estimation/result.py +++ b/nannyml/performance_estimation/direct_loss_estimation/result.py @@ -32,7 +32,8 @@ def __init__( hyperparameters: Optional[Dict[str, Any]], timestamp_column_name: Optional[str] = None, ): - """ + """DLE Result Class. + Parameters ---------- results_data: pd.DataFrame @@ -93,9 +94,7 @@ def __init__( self.hyperparameters = hyperparameters def keys(self) -> List[Key]: - """ - Creates a list of keys where each Key is a `namedtuple('Key', 'properties display_names')` - """ + """Creates a list of keys where each Key is a `namedtuple('Key', 'properties display_names')`.""" return [ Key( properties=(metric.column_name,), @@ -119,6 +118,7 @@ def plot( Parameters ---------- kind: str, default='performance' + What kind of plot to create, currently only performance is supported. Raises ------ diff --git a/nannyml/sampling_error/binary_classification.py b/nannyml/sampling_error/binary_classification.py index bcd4a0f6..c2323177 100644 --- a/nannyml/sampling_error/binary_classification.py +++ b/nannyml/sampling_error/binary_classification.py @@ -49,14 +49,13 @@ def auroc_sampling_error_components(y_true_reference: pd.Series, y_pred_proba_re ------- (std, fraction): Tuple[np.ndarray, float] """ - - y_true = y_true_reference.copy().reset_index(drop=True) - y_pred_proba = y_pred_proba_reference.copy().reset_index(drop=True) + # keep converting to numpy here for now + y_true = y_true_reference.to_numpy() + y_pred_proba = y_pred_proba_reference.to_numpy() if np.mean(y_true) > 0.5: y_true = abs(np.asarray(y_true) - 1) y_pred_proba = 1 - y_pred_proba - sorted_idx = np.argsort(y_pred_proba) y_pred_proba = y_pred_proba[sorted_idx] y_true = y_true[sorted_idx] @@ -145,9 +144,9 @@ def ap_sampling_error(sampling_error_components, data): sampling_error: float """ - reference_std, reference_size = sampling_error_components + reference_std, sample_size = sampling_error_components analysis_size = data.shape[0] - return reference_std * np.sqrt(reference_size / analysis_size) + return reference_std * np.sqrt(sample_size / analysis_size) def f1_sampling_error_components(y_true_reference: pd.Series, y_pred_reference: pd.Series) -> Tuple: @@ -387,7 +386,7 @@ def accuracy_sampling_error(sampling_error_components: Tuple, data) -> float: def true_positive_sampling_error_components( y_true_reference: pd.Series, y_pred_reference: pd.Series, normalize_confusion_matrix: Union[str, None] -) -> Tuple: +) -> Tuple[float, float, Union[str, None]]: """ Estimate sampling error components for true positive rate using reference data. Calculation is based on modified standard error of mean formula. @@ -491,7 +490,7 @@ def true_positive_sampling_error(sampling_error_components: Tuple, data) -> floa def true_negative_sampling_error_components( y_true_reference: pd.Series, y_pred_reference: pd.Series, normalize_confusion_matrix: Union[str, None] -) -> Tuple: +) -> Tuple[float, float, Union[str, None]]: """ Estimate sampling error components for true negative rate using reference data. Calculation is based on modified standard error of mean formula. @@ -595,7 +594,7 @@ def true_negative_sampling_error(sampling_error_components: Tuple, data) -> floa def false_positive_sampling_error_components( y_true_reference: pd.Series, y_pred_reference: pd.Series, normalize_confusion_matrix: Union[str, None] -) -> Tuple: +) -> Tuple[float, float, Union[str, None]]: """ Estimate sampling error components for false positive rate using reference data. Calculation is based on modified standard error of mean formula. @@ -699,7 +698,7 @@ def false_positive_sampling_error(sampling_error_components: Tuple, data) -> flo def false_negative_sampling_error_components( y_true_reference: pd.Series, y_pred_reference: pd.Series, normalize_confusion_matrix: Union[str, None] -) -> Tuple: +) -> Tuple[float, float, Union[str, None]]: """ Estimate sampling error components for false negative rate using reference data. Calculation is based on modified standard error of mean formula. @@ -806,7 +805,7 @@ def business_value_sampling_error_components( y_pred_reference: pd.Series, business_value_matrix: np.ndarray, normalize_business_value: Optional[str], -) -> Tuple: +) -> Tuple[float, Union[str, None]]: """ Estimate sampling error for the false negative rate. Parameters diff --git a/tests/drift/test_multiv_pca.py b/tests/drift/test_multiv_pca.py index d0114778..6626585d 100644 --- a/tests/drift/test_multiv_pca.py +++ b/tests/drift/test_multiv_pca.py @@ -569,8 +569,6 @@ def test_data_reconstruction_drift_chunked_by_size(sample_drift_data): # noqa: assert ('reconstruction_error', 'sampling_error') in results.data.columns assert np.array_equal( - np.round( - results.filter(period='analysis').to_df().loc[:, ('reconstruction_error', 'sampling_error')], - 4), + np.round(results.filter(period='analysis').to_df().loc[:, ('reconstruction_error', 'sampling_error')], 4), [0.0118, 0.0115, 0.0118], ) diff --git a/tests/performance_estimation/CBPE/test_cbpe.py b/tests/performance_estimation/CBPE/test_cbpe.py index 1748fe37..1ff7a241 100644 --- a/tests/performance_estimation/CBPE/test_cbpe.py +++ b/tests/performance_estimation/CBPE/test_cbpe.py @@ -725,11 +725,13 @@ def test_cbpe_without_predictions(): @pytest.mark.filterwarnings("ignore:Too few unique values", "ignore:'y_true' contains a single class") def test_cbpe_fitting_does_not_generate_error_when_single_class_present(): - ref_df = pd.DataFrame({ - 'y_true': [0] * 1000, - 'y_pred': [0] * 1000, - 'y_pred_proba': [0.5] * 1000, - }) + ref_df = pd.DataFrame( + { + 'y_true': [0] * 1000, + 'y_pred': [0] * 1000, + 'y_pred_proba': [0.5] * 1000, + } + ) sut = CBPE( y_true='y_true', y_pred='y_pred', @@ -746,6 +748,6 @@ def test_cbpe_fitting_does_not_generate_error_when_single_class_present(): 'business_value', ], chunk_size=100, - business_value_matrix=[[1, -1], [-1, 1]] + business_value_matrix=[[1, -1], [-1, 1]], ) sut.fit(ref_df) diff --git a/tests/sampling_error/test_binary_classification_sampling_error.py b/tests/sampling_error/test_binary_classification_sampling_error.py index 57ff6186..0a73d166 100644 --- a/tests/sampling_error/test_binary_classification_sampling_error.py +++ b/tests/sampling_error/test_binary_classification_sampling_error.py @@ -20,6 +20,16 @@ def test_auroc_sampling_error(): assert np.round(sampling_error, 4) == 0.0575 +def test_auroc_sampling_error_nan(): + np.random.seed(1) + sample_size = 50 + chunk = np.random.random(sample_size) + + components = np.NaN, np.NaN + sampling_error = bse.auroc_sampling_error(components, chunk) + assert np.isnan(sampling_error) + + def test_f1_sampling_error(): np.random.seed(1) sample_size = 50