How to correctly apply metrics API in binary use case #6356
-
How would one correctly apply the Precision metric from v1.2.0 on, with the revised metrics api? I am currently doing something like this: import torch
from pytorch_lightning import metrics
# example data
preds = [0] * 200 + [1] * 30 + [0] * 10 + [1] * 20
targets = [0] * 200 + [1] * 30 + [1] * 10 + [0] * 20
preds = torch.tensor(preds)
targets = torch.tensor(targets)
# define method for printing metrics
def _print_some_metrics(preds, targets, num_classes):
precision = metrics.classification.Precision(
num_classes=None, is_multiclass=False)
recall = metrics.classification.Recall(
num_classes=None, is_multiclass=False)
f1 = metrics.classification.F1(num_classes=num_classes)
f1beta = metrics.classification.FBeta(
num_classes=num_classes,
beta=2
)
accuracy = metrics.classification.Accuracy()
avg_precision = metrics.classification.AveragePrecision(
num_classes=None)
confusion_matrix = metrics.ConfusionMatrix(num_classes=2)
# print results
print("Precision:\n{}\n".format(precision(preds, targets)))
print("Recall:\n{}\n".format(recall(preds, targets)))
print("F1:\n{}\n".format(f1(preds, targets)))
print("F1-Beta:\n{}\n".format(f1beta(preds, targets)))
print("AVG Precision:\n{}\n".format(avg_precision(preds, targets)))
print("Accuracy:\n{}\n".format(accuracy(preds, targets)))
print("ConfMat:\n{}\n".format(confusion_matrix(preds, targets)))
_print_some_metrics(preds, targets, num_classes=2) Which gives me these results:
However, when calculating precision by hand (TP / TP + FN) with the numbers from the contingency table, I get 30 / 50 = 0.6 Why does applying the precision class result in this (small) deviation? Further, when logging the metrics on epoch_end steps inside my model, I am not able to reproduce the logged precision, recall or accuraccy numbers on the validation set with the output from the contingency table, logged on the same steps (I haven't validated the other metrics yet by hand). It would be great to get some help on how to correctly apply the new metrics API for a binary use case. |
Beta Was this translation helpful? Give feedback.
Replies: 1 comment 6 replies
-
for binary classification, where you are only interested in the positive class you should pass in def _print_some_metrics(preds, targets, num_classes):
precision = metrics.classification.Precision(
num_classes=num_classes, is_multiclass=False)
recall = metrics.classification.Recall(
num_classes=num_classes, is_multiclass=False)
f1 = metrics.classification.F1(num_classes=num_classes)
f1beta = metrics.classification.FBeta(
num_classes=num_classes,
beta=2
)
accuracy = metrics.classification.Accuracy()
avg_precision = metrics.classification.AveragePrecision(
num_classes=num_classes)
confusion_matrix = metrics.ConfusionMatrix(num_classes=2)
# print results
print("Precision:\n{}\n".format(precision(preds, targets)))
print("Recall:\n{}\n".format(recall(preds, targets)))
print("F1:\n{}\n".format(f1(preds, targets)))
print("F1-Beta:\n{}\n".format(f1beta(preds, targets)))
print("AVG Precision:\n{}\n".format(avg_precision(preds, targets)))
print("Accuracy:\n{}\n".format(accuracy(preds, targets)))
print("ConfMat:\n{}\n".format(confusion_matrix(preds, targets)))
_print_some_metrics(preds, targets, num_classes=1) only |
Beta Was this translation helpful? Give feedback.
for binary classification, where you are only interested in the positive class you should pass in
num_classes=1
. Here is your corrected code: