From bc39a99a6342c88a89cd160d6de6cf167b3d4a0d Mon Sep 17 00:00:00 2001 From: Bryce Kalmbach Date: Wed, 10 Mar 2021 16:40:18 -0800 Subject: [PATCH 1/5] Adding DonutDetector file and tests. --- python/lsst/ts/wep/DonutDetector.py | 147 ++++++++++++++++++ .../ts/wep/cwfs/CentroidConvolveTemplate.py | 67 +++++--- tests/cwfs/test_centroidConvolveTemplate.py | 26 +++- tests/test_donutDetector.py | 109 +++++++++++++ 4 files changed, 328 insertions(+), 21 deletions(-) create mode 100644 python/lsst/ts/wep/DonutDetector.py create mode 100644 tests/test_donutDetector.py diff --git a/python/lsst/ts/wep/DonutDetector.py b/python/lsst/ts/wep/DonutDetector.py new file mode 100644 index 000000000..17d28fd9d --- /dev/null +++ b/python/lsst/ts/wep/DonutDetector.py @@ -0,0 +1,147 @@ +# This file is part of ts_wep. +# +# Developed for the LSST Telescope and Site Systems. +# This product includes software developed by the LSST Project +# (https://www.lsst.org). +# See the COPYRIGHT file at the top-level directory of this distribution +# for details of code ownership. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import numpy as np +import pandas as pd +from copy import copy + + +from lsst.ts.wep.Utility import CentroidFindType +from lsst.ts.wep.cwfs.CentroidFindFactory import CentroidFindFactory +from scipy.spatial.distance import cdist + + +class DonutDetector(object): + + """Class to detect donuts directly from an out of focus image.""" + + def detectDonuts( + self, expArray, template, blendRadius, peakThreshold=0.95, dbscanEps=5 + ): + + """ + Detect and categorize donut sources as blended/unblended + + Parameters + ------- + expArray: numpy ndarray + The input image data + template: numpy ndarray + Donut template appropriate for the image + blendRadius: float + Minimum distance in pixels two donut centers need to + be apart in order to be tagged as unblended + peakThreshold: float, optional + This value is a specifies a number between 0 and 1 that is + the fraction of the highest pixel value in the convolved image. + The code then sets all pixels with a value below this to 0 before + running the K-means algorithm to find peaks that represent possible + donut locations. (The default is 0.95) + dbscanEps: float, optional + Maximum distance scikit-learn DBSCAN algorithm allows "between two + samples for one to considered in the neighborhood of the other". + (The default is 5.0) + + Returns + ------- + pandas dataframe + Dataframe identifying donut positions and if they + are blended with other donuts. If blended also identfies + which donuts are blended with which. + """ + + centroidFinder = CentroidFindFactory.createCentroidFind( + CentroidFindType.ConvolveTemplate + ) + binaryExp = centroidFinder.getImgBinary(copy(expArray)) + centroidX, centroidY, donutRad = centroidFinder.getCenterAndRfromTemplateConv( + binaryExp, + templateImgBinary=template, + nDonuts=-1, + peakThreshold=peakThreshold, + dbscanEps=dbscanEps, + ) + + donutDf = pd.DataFrame( + np.array([centroidX, centroidY]).T, columns=["x_center", "y_center"] + ) + donutDf = self.labelUnblended(donutDf, blendRadius) + + return donutDf + + def labelUnblended(self, donutDf, blendRadius): + + """ + Label donuts as blended/unblended if the centroids are within + the blendRadius number of pixels. + + Parameters + ---------- + donutDf: pandas dataframe + Dataframe identifying donut positions with labels + 'x_center' and 'y_center'. + blendRadius: float + Minimum distance in pixels two donut centers need to + be apart in order to be tagged as unblended + + Returns + ------- + pandas dataframe + Dataframe identifying donut positions and if they + are blended with other donuts. If blended also identfies + which donuts are blended with which. + """ + + # Find distances between each pair of objects + donutCenters = [donutDf["x_center"].values, donutDf["y_center"].values] + donutCenters = np.array(donutCenters).T + distMatrix = cdist(donutCenters, donutCenters) + # Don't need repeats of each pair + distMatrixUpper = np.triu(distMatrix) + + # Identify blended pairs of objects by distance + blendedPairs = np.array( + np.where((distMatrixUpper > 0.0) & (distMatrixUpper < blendRadius)) + ).T + blendedCenters = np.unique(blendedPairs.flatten()) + + # Add blended information into dataframe + donutDf["blended"] = False + donutDf.loc[blendedCenters, "blended"] = True + donutDf["blended_with"] = None + for i, j in blendedPairs: + if donutDf.loc[i, "blended_with"] is None: + donutDf.at[i, "blended_with"] = [] + if donutDf.loc[j, "blended_with"] is None: + donutDf.at[j, "blended_with"] = [] + donutDf.loc[i, "blended_with"].append(j) + donutDf.loc[j, "blended_with"].append(i) + + # Count the number of other donuts overlapping + # each donut + donutDf["num_blended_neighbors"] = 0 + for i in range(len(donutDf)): + if donutDf["blended_with"].iloc[i] is None: + continue + + donutDf.at[i, "num_blended_neighbors"] = len(donutDf["blended_with"].loc[i]) + + return donutDf diff --git a/python/lsst/ts/wep/cwfs/CentroidConvolveTemplate.py b/python/lsst/ts/wep/cwfs/CentroidConvolveTemplate.py index 5df4e1eaf..028bb2f49 100644 --- a/python/lsst/ts/wep/cwfs/CentroidConvolveTemplate.py +++ b/python/lsst/ts/wep/cwfs/CentroidConvolveTemplate.py @@ -24,7 +24,7 @@ from lsst.ts.wep.cwfs.CentroidDefault import CentroidDefault from lsst.ts.wep.cwfs.CentroidRandomWalk import CentroidRandomWalk from scipy.signal import correlate -from sklearn.cluster import KMeans +from sklearn.cluster import KMeans, DBSCAN class CentroidConvolveTemplate(CentroidDefault): @@ -128,7 +128,12 @@ def getCenterAndRfromImgBinary( return x[0], y[0], radius def getCenterAndRfromTemplateConv( - self, imageBinary, templateImgBinary=None, nDonuts=1, peakThreshold=0.95 + self, + imageBinary, + templateImgBinary=None, + nDonuts=1, + peakThreshold=0.95, + dbscanEps=5.0, ): """ Get the centers of the donuts by convolving a binary template image @@ -147,14 +152,22 @@ def getCenterAndRfromTemplateConv( Binary image of template donut. If set to None then the image is convolved with itself. (The default is None) nDonuts: int, optional - Number of donuts there should be in the binary image. Needs to - be >= 1. (The default is 1) + Number of donuts there should be in the binary image. If the number + is >= 1 then K-Means clustering will be used to return the + specified number of donut centers. However, this can also be set to + -1 if the number of donuts is unknown and it will perform DBSCAN + clustering to find and return a set of donut centers. + (The default is 1) peakThreshold: float, optional This value is a specifies a number between 0 and 1 that is the fraction of the highest pixel value in the convolved image. The code then sets all pixels with a value below this to 0 before running the K-means algorithm to find peaks that represent possible donut locations. (The default is 0.95) + dbscanEps: float, optional + Maximum distance scikit-learn DBSCAN algorithm allows "between two + samples for one to considered in the neighborhood of the other". + (The default is 5.0) Returns ------- @@ -169,8 +182,10 @@ def getCenterAndRfromTemplateConv( if templateImgBinary is None: templateImgBinary = copy(imageBinary) - nDonutsAssertStr = "nDonuts must be an integer >= 1" - assert (nDonuts >= 1) & (type(nDonuts) is int), nDonutsAssertStr + nDonutsAssertStr = "nDonuts must be an integer >= 1 or -1" + assert ((nDonuts >= 1) | (nDonuts == -1)) & ( + type(nDonuts) is int + ), nDonutsAssertStr # We set the mode to be "same" because we need to return the same # size image to the code. @@ -185,20 +200,36 @@ def getCenterAndRfromTemplateConv( rankedConvolveCutoff = rankedConvolve[:cutoff] nx, ny = np.unravel_index(rankedConvolveCutoff, np.shape(imageBinary)) - # Then to find peaks in the image we use K-Means with the - # specified number of donuts - kmeans = KMeans(n_clusters=nDonuts) - labels = kmeans.fit_predict(np.array([nx, ny]).T) - - # Then in each cluster we take the brightest pixel as the centroid + # Donut centers lists centX = [] centY = [] - for labelNum in range(nDonuts): - nxLabel, nyLabel = np.unravel_index( - rankedConvolveCutoff[labels == labelNum][0], np.shape(imageBinary) - ) - centX.append(nxLabel) - centY.append(nyLabel) + + if nDonuts >= 1: + # Then to find peaks in the image we use K-Means with the + # specified number of donuts + kmeans = KMeans(n_clusters=nDonuts) + labels = kmeans.fit_predict(np.array([nx, ny]).T) + + # Then in each cluster we take the brightest pixel as the centroid + for labelNum in range(nDonuts): + nxLabel, nyLabel = np.unravel_index( + rankedConvolveCutoff[labels == labelNum][0], np.shape(imageBinary) + ) + centX.append(nxLabel) + centY.append(nyLabel) + elif nDonuts == -1: + # Use DBSCAN to find clusters of points when the + # number of donuts is unknown + labels = DBSCAN(eps=dbscanEps).fit_predict(np.array([ny, nx]).T) + + # Save the centroid as the brightest pixel + # within each identified cluster + for labelNum in np.unique(labels): + nxLabel, nyLabel = np.unravel_index( + rankedConvolveCutoff[labels == labelNum][0], np.shape(imageBinary) + ) + centX.append(nxLabel) + centY.append(nyLabel) # Get the radius of the donut from the template image radius = np.sqrt(np.sum(templateImgBinary) / np.pi) diff --git a/tests/cwfs/test_centroidConvolveTemplate.py b/tests/cwfs/test_centroidConvolveTemplate.py index 469962686..c87cfdadb 100644 --- a/tests/cwfs/test_centroidConvolveTemplate.py +++ b/tests/cwfs/test_centroidConvolveTemplate.py @@ -150,7 +150,7 @@ def testNDonutsAssertion(self): singleDonut, doubleDonut, eff_radius = self._createData(20, 40, 160) - nDonutsAssertMsg = "nDonuts must be an integer >= 1" + nDonutsAssertMsg = "nDonuts must be an integer >= 1 or -1" with self.assertRaises(AssertionError, msg=nDonutsAssertMsg): cX, cY, rad = self.centroidConv.getCenterAndRfromTemplateConv( singleDonut, nDonuts=0 @@ -158,7 +158,7 @@ def testNDonutsAssertion(self): with self.assertRaises(AssertionError, msg=nDonutsAssertMsg): cX, cY, rad = self.centroidConv.getCenterAndRfromTemplateConv( - singleDonut, nDonuts=-1 + singleDonut, nDonuts=-2 ) with self.assertRaises(AssertionError, msg=nDonutsAssertMsg): @@ -166,7 +166,7 @@ def testNDonutsAssertion(self): singleDonut, nDonuts=1.5 ) - def testGetCenterAndRFromTemplateConv(self): + def testGetCenterAndRFromTemplateConvKMeans(self): singleDonut, doubleDonut, eff_radius = self._createData(20, 40, 160) @@ -186,6 +186,26 @@ def testGetCenterAndRFromTemplateConv(self): self.assertEqual(doubleCY, [80.0, 80.0]) self.assertAlmostEqual(rad, eff_radius, delta=0.1) + def testGetCenterAndRFromTemplateConvDBSCAN(self): + + singleDonut, doubleDonut, eff_radius = self._createData(20, 40, 160) + + # Test recovery of single donut + singleCX, singleCY, rad = self.centroidConv.getCenterAndRfromTemplateConv( + singleDonut, nDonuts=-1 + ) + self.assertEqual(singleCX, [80.0]) + self.assertEqual(singleCY, [80.0]) + self.assertAlmostEqual(rad, eff_radius, delta=0.1) + + # Test recovery of two donuts at once + doubleCX, doubleCY, rad = self.centroidConv.getCenterAndRfromTemplateConv( + doubleDonut, templateImgBinary=singleDonut, nDonuts=-1 + ) + self.assertCountEqual(doubleCX, [50.0, 110.0]) + self.assertEqual(doubleCY, [80.0, 80.0]) + self.assertAlmostEqual(rad, eff_radius, delta=0.1) + if __name__ == "__main__": diff --git a/tests/test_donutDetector.py b/tests/test_donutDetector.py new file mode 100644 index 000000000..1f777f2dd --- /dev/null +++ b/tests/test_donutDetector.py @@ -0,0 +1,109 @@ +# This file is part of ts_wep. +# +# Developed for the LSST Telescope and Site Systems. +# This product includes software developed by the LSST Project +# (https://www.lsst.org). +# See the COPYRIGHT file at the top-level directory of this distribution +# for details of code ownership. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import unittest +import numpy as np +import pandas as pd + +from lsst.ts.wep.DonutDetector import DonutDetector +from lsst.ts.wep.cwfs.DonutTemplateFactory import DonutTemplateFactory +from lsst.ts.wep.Utility import DonutTemplateType, DefocalType + + +class TestDonutDetector(unittest.TestCase): + """Test the DonutDetector class.""" + + def setUp(self): + + self.donutDetector = DonutDetector() + + def _makeData(self, imgSize, templateSize, donutSep): + + templateMaker = DonutTemplateFactory.createDonutTemplate( + DonutTemplateType.Model + ) + template = templateMaker.makeTemplate( + "R22_S11", DefocalType.Extra, templateSize + ) + templateHalfWidth = int(templateSize / 2) + + blendedImg = np.zeros((imgSize, imgSize)) + center = int(imgSize / 2) + leftCenter = int(center - donutSep / 2) + rightCenter = int(center + donutSep / 2) + + # Place two template images to left and right along center line + # separated by donutSep + blendedImg[ + leftCenter - templateHalfWidth : leftCenter + templateHalfWidth, + center - templateHalfWidth : center + templateHalfWidth, + ] += template + blendedImg[ + rightCenter - templateHalfWidth : rightCenter + templateHalfWidth, + center - templateHalfWidth : center + templateHalfWidth, + ] += template + # Make binary image again after overlapping areas sum + blendedImg[blendedImg > 1] = 1 + + return template, blendedImg + + def testLabelUnblended(self): + + testDataFrame = pd.DataFrame() + testDataFrame["x_center"] = [50.0, 100.0, 120.0] + testDataFrame["y_center"] = [100.0, 100.0, 100.0] + + labeledDf = self.donutDetector.labelUnblended(testDataFrame, 30.0) + + self.assertCountEqual( + labeledDf.columns, + [ + "x_center", + "y_center", + "blended", + "blended_with", + "num_blended_neighbors", + ], + ) + np.testing.assert_array_equal(labeledDf["blended"], [False, True, True]) + np.testing.assert_array_equal(labeledDf["blended_with"], [None, [2], [1]]) + np.testing.assert_array_equal(labeledDf["num_blended_neighbors"], [0, 1, 1]) + + def testDetectDonuts(self): + + template, testImg = self._makeData(480, 160, 60) + donutDf = self.donutDetector.detectDonuts(testImg, template, 126) + + self.assertCountEqual( + donutDf.columns, + [ + "x_center", + "y_center", + "blended", + "blended_with", + "num_blended_neighbors", + ], + ) + self.assertCountEqual(donutDf["x_center"], [270, 210]) + self.assertCountEqual(donutDf["y_center"], [240, 240]) + self.assertCountEqual(donutDf["blended"], [True, True]) + np.testing.assert_array_equal(list(donutDf["blended_with"]), [[1], [0]]) + self.assertCountEqual(donutDf["num_blended_neighbors"], [1, 1]) From 0ff0c8855cee011847cc5a979d0c3315e324932e Mon Sep 17 00:00:00 2001 From: Bryce Kalmbach Date: Thu, 11 Mar 2021 15:22:53 -0800 Subject: [PATCH 2/5] Update wepClass.uml --- doc/uml/wepClass.uml | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/uml/wepClass.uml b/doc/uml/wepClass.uml index 6ea00c439..c70a9b299 100644 --- a/doc/uml/wepClass.uml +++ b/doc/uml/wepClass.uml @@ -1,5 +1,6 @@ @startuml class DonutImageCheck +class DonutDetector WepController *-- ButlerWrapper WepController *-- CamDataCollector WepController *-- CamIsrWrapper From 1cf9fa0b7cccf41951e71b86a1af798a53758e8d Mon Sep 17 00:00:00 2001 From: Bryce Kalmbach Date: Thu, 11 Mar 2021 16:26:37 -0800 Subject: [PATCH 3/5] Update versionHistory and docstring in DonutDetector. --- doc/versionHistory.rst | 8 ++++++++ python/lsst/ts/wep/DonutDetector.py | 5 ++++- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/doc/versionHistory.rst b/doc/versionHistory.rst index 7c36b0542..b7c3560c0 100644 --- a/doc/versionHistory.rst +++ b/doc/versionHistory.rst @@ -6,6 +6,14 @@ Version History ################## +.. _lsst.ts.wep-1.5.5: + +------------- +1.5.5 +------------- + +* Add `DonutDetector` class. + .. _lsst.ts.wep-1.5.4: ------------- diff --git a/python/lsst/ts/wep/DonutDetector.py b/python/lsst/ts/wep/DonutDetector.py index 17d28fd9d..9944b4cb3 100644 --- a/python/lsst/ts/wep/DonutDetector.py +++ b/python/lsst/ts/wep/DonutDetector.py @@ -31,7 +31,10 @@ class DonutDetector(object): - """Class to detect donuts directly from an out of focus image.""" + """ + Class to detect donuts directly from an out of focus image + by convolution with a template image. + """ def detectDonuts( self, expArray, template, blendRadius, peakThreshold=0.95, dbscanEps=5 From f687571f5ebaaf830c88b2ee40c4417fbb3c2290 Mon Sep 17 00:00:00 2001 From: Bryce Kalmbach Date: Fri, 12 Mar 2021 11:49:37 -0800 Subject: [PATCH 4/5] Update content.rst with DonutDetector --- doc/content.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/content.rst b/doc/content.rst index 9673aa0ed..08d5d9cd1 100644 --- a/doc/content.rst +++ b/doc/content.rst @@ -33,6 +33,7 @@ This module is a high-level module to use other modules. * **ParamReader**: Parameter reader class to read the yaml configuration files used in the calculation. * **DonutImageCheck**: Donut image check class to judge the donut image is effective or not. * **CreatePhosimDonutTemplates**: Create donut templates on camera detectors using Phosim. See :doc:`here ` for more information on generating Phosim donut templates. +* **DonutDetector**: Detect donuts directly from an out of focus image by convolution with a template image. .. _lsst.ts.wep-modules_wep_bsc: From 7dd864f40cc69d443de9858b63c9f3bd2cf98c2f Mon Sep 17 00:00:00 2001 From: Bryce Kalmbach Date: Fri, 12 Mar 2021 11:50:25 -0800 Subject: [PATCH 5/5] Update DonutDetector after review comments. --- python/lsst/ts/wep/DonutDetector.py | 47 ++++++++++++++--------------- tests/test_donutDetector.py | 4 +-- 2 files changed, 25 insertions(+), 26 deletions(-) diff --git a/python/lsst/ts/wep/DonutDetector.py b/python/lsst/ts/wep/DonutDetector.py index 9944b4cb3..4eb274246 100644 --- a/python/lsst/ts/wep/DonutDetector.py +++ b/python/lsst/ts/wep/DonutDetector.py @@ -30,7 +30,6 @@ class DonutDetector(object): - """ Class to detect donuts directly from an out of focus image by convolution with a template image. @@ -39,19 +38,18 @@ class DonutDetector(object): def detectDonuts( self, expArray, template, blendRadius, peakThreshold=0.95, dbscanEps=5 ): - """ Detect and categorize donut sources as blended/unblended Parameters ------- - expArray: numpy ndarray - The input image data - template: numpy ndarray - Donut template appropriate for the image + expArray: numpy.ndarray + The input image data. + template: numpy.ndarray + Donut template appropriate for the image. blendRadius: float Minimum distance in pixels two donut centers need to - be apart in order to be tagged as unblended + be apart in order to be tagged as unblended. peakThreshold: float, optional This value is a specifies a number between 0 and 1 that is the fraction of the highest pixel value in the convolved image. @@ -65,7 +63,7 @@ def detectDonuts( Returns ------- - pandas dataframe + pandas.DataFrame Dataframe identifying donut positions and if they are blended with other donuts. If blended also identfies which donuts are blended with which. @@ -86,28 +84,27 @@ def detectDonuts( donutDf = pd.DataFrame( np.array([centroidX, centroidY]).T, columns=["x_center", "y_center"] ) - donutDf = self.labelUnblended(donutDf, blendRadius) + donutDf = self.identifyBlendedDonuts(donutDf, blendRadius) return donutDf - def labelUnblended(self, donutDf, blendRadius): - + def identifyBlendedDonuts(self, donutDf, blendRadius): """ Label donuts as blended/unblended if the centroids are within the blendRadius number of pixels. Parameters ---------- - donutDf: pandas dataframe + donutDf: pandas.DataFrame Dataframe identifying donut positions with labels 'x_center' and 'y_center'. blendRadius: float Minimum distance in pixels two donut centers need to - be apart in order to be tagged as unblended + be apart in order to be tagged as unblended. Returns ------- - pandas dataframe + pandas.DataFrame Dataframe identifying donut positions and if they are blended with other donuts. If blended also identfies which donuts are blended with which. @@ -130,21 +127,23 @@ def labelUnblended(self, donutDf, blendRadius): donutDf["blended"] = False donutDf.loc[blendedCenters, "blended"] = True donutDf["blended_with"] = None - for i, j in blendedPairs: - if donutDf.loc[i, "blended_with"] is None: - donutDf.at[i, "blended_with"] = [] - if donutDf.loc[j, "blended_with"] is None: - donutDf.at[j, "blended_with"] = [] - donutDf.loc[i, "blended_with"].append(j) - donutDf.loc[j, "blended_with"].append(i) + for donutOne, donutTwo in blendedPairs: + if donutDf.loc[donutOne, "blended_with"] is None: + donutDf.at[donutOne, "blended_with"] = [] + if donutDf.loc[donutTwo, "blended_with"] is None: + donutDf.at[donutTwo, "blended_with"] = [] + donutDf.loc[donutOne, "blended_with"].append(donutTwo) + donutDf.loc[donutTwo, "blended_with"].append(donutOne) # Count the number of other donuts overlapping # each donut donutDf["num_blended_neighbors"] = 0 - for i in range(len(donutDf)): - if donutDf["blended_with"].iloc[i] is None: + for donutIdx in range(len(donutDf)): + if donutDf["blended_with"].iloc[donutIdx] is None: continue - donutDf.at[i, "num_blended_neighbors"] = len(donutDf["blended_with"].loc[i]) + donutDf.at[donutIdx, "num_blended_neighbors"] = len( + donutDf["blended_with"].loc[donutIdx] + ) return donutDf diff --git a/tests/test_donutDetector.py b/tests/test_donutDetector.py index 1f777f2dd..9ecffefb8 100644 --- a/tests/test_donutDetector.py +++ b/tests/test_donutDetector.py @@ -65,13 +65,13 @@ def _makeData(self, imgSize, templateSize, donutSep): return template, blendedImg - def testLabelUnblended(self): + def testIdentifyBlendedDonuts(self): testDataFrame = pd.DataFrame() testDataFrame["x_center"] = [50.0, 100.0, 120.0] testDataFrame["y_center"] = [100.0, 100.0, 100.0] - labeledDf = self.donutDetector.labelUnblended(testDataFrame, 30.0) + labeledDf = self.donutDetector.identifyBlendedDonuts(testDataFrame, 30.0) self.assertCountEqual( labeledDf.columns,