Skip to content

Commit

Permalink
remove unused code in metrics
Browse files Browse the repository at this point in the history
  • Loading branch information
xuhongzuo committed May 25, 2024
1 parent 6a2118b commit af4db6c
Show file tree
Hide file tree
Showing 9 changed files with 21 additions and 1,421 deletions.
20 changes: 10 additions & 10 deletions deepod/metrics/_anomaly_detection.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
from deepod.metrics.affiliation.generics import convert_vector_to_events
from deepod.metrics.vus.metrics import get_range_vus_roc
from deepod.metrics.affiliation.metrics import pr_from_events
from deepod.metrics._tsad_adjustment import point_adjustment


def auc_roc(y_true, y_score):
Expand Down Expand Up @@ -169,19 +170,19 @@ def ts_metrics_enhanced(y_true, y_score, y_test):
tuple: A tuple containing:
- auroc (float):
The score of the area under the ROC curve.
The score of the area under the ROC curve after point adjustment.
- aupr (float):
The score of the area under the precision-recall curve.
The score of the area under the precision-recall curve after point adjustment.
- best_f1 (float):
The best score of F1-score.
The best score of F1-score after point adjustment.
- best_p (float):
The best score of precision.
The best score of precision after point adjustment.
- best_r (float):
The best score of recall.
The best score of recall after point adjustment.
- affiliation_precision (float):
The score of affiliation precision.
Expand All @@ -202,16 +203,15 @@ def ts_metrics_enhanced(y_true, y_score, y_test):
The score of VUS PR.
"""

best_f1, best_p, best_r = get_best_f1(y_true, y_score)
auroc = auc_roc(y_true, point_adjustment(y_true, y_score))
aupr = auc_pr(y_true, point_adjustment(y_true, y_score))
best_f1, best_p, best_r = get_best_f1(y_true, point_adjustment(y_true, y_score))

events_pred = convert_vector_to_events(y_test)
events_gt = convert_vector_to_events(y_true)
Trange = (0, len(y_test))
affiliation = pr_from_events(events_pred, events_gt, Trange)
vus_results = get_range_vus_roc(y_score, y_true, 100) # default slidingWindow = 100

auroc = auc_roc(y_true, y_score)
aupr = auc_pr(y_true, y_score)
vus_results = get_range_vus_roc(y_score, y_true, slidingWindow=100) # default slidingWindow = 100

affiliation_precision = affiliation['Affiliation_Precision']
affiliation_recall = affiliation['Affiliation_Recall']
Expand Down
Empty file.
323 changes: 0 additions & 323 deletions deepod/metrics/vus/analysis/robustness_eval.py

This file was deleted.

12 changes: 7 additions & 5 deletions deepod/metrics/vus/metrics.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,13 @@
from .utils.metrics import metricor
from .analysis.robustness_eval import generate_curve
from deepod.metrics.vus.utils.metrics import metricor


def get_range_vus_roc(score, labels, slidingWindow):
grader = metricor()
R_AUC_ROC, R_AUC_PR, _, _, _ = grader.RangeAUC(labels=labels, score=score, window=slidingWindow, plot_ROC=True)
_, _, _, _, _, _,VUS_ROC, VUS_PR = generate_curve(labels, score, 2*slidingWindow)
R_AUC_ROC, R_AUC_PR, _, _, _ = metricor().RangeAUC(labels=labels, score=score,
window=slidingWindow, plot_ROC=True)
VUS_ROC, VUS_PR = metricor().RangeAUC_volume(labels_original=labels,
score=score,
windowSize=2*slidingWindow)

metrics = {'R_AUC_ROC': R_AUC_ROC, 'R_AUC_PR': R_AUC_PR, 'VUS_ROC': VUS_ROC, 'VUS_PR': VUS_PR}

return metrics
Empty file.
Loading

0 comments on commit af4db6c

Please sign in to comment.