1
- """Traditional image processing functions"""
1
+ """Traditional image processing functions. """
2
2
3
3
import cv2
4
4
import numpy as np
5
5
from PIL import Image
6
6
7
- def get_limits (color :list ,
8
- h_range :list = [10 ,10 ],
9
- s_range :list = [100 ,255 ],
10
- v_range :list = [100 ,255 ]):
11
- """Get limits for color thresholding
7
+
8
+ def get_limits (
9
+ color : list ,
10
+ h_range : list = [10 , 10 ],
11
+ s_range : list = [100 , 255 ],
12
+ v_range : list = [100 , 255 ],
13
+ ):
14
+ """Get limits for color thresholding.
15
+
12
16
Args:
13
17
color: Color in BGR to be thresholded.
14
18
h_range: Range of Hue channels.
@@ -21,16 +25,21 @@ def get_limits(color:list,
21
25
# convert color to HSV space
22
26
bgr = np .uint8 ([[color ]])
23
27
hsv = cv2 .cvtColor (bgr , cv2 .COLOR_BGR2HSV )
24
-
28
+
25
29
# Calculate the limits
26
- lowerLimit = np .array ((hsv [0 ][0 ][0 ] - h_range [0 ], s_range [0 ], v_range [0 ]), dtype = np .uint8 )
27
- upperLimit = np .array ((hsv [0 ][0 ][0 ] + h_range [1 ], s_range [1 ], v_range [1 ]), dtype = np .uint8 )
28
-
30
+ lowerLimit = np .array (
31
+ (hsv [0 ][0 ][0 ] - h_range [0 ], s_range [0 ], v_range [0 ]), dtype = np .uint8
32
+ )
33
+ upperLimit = np .array (
34
+ (hsv [0 ][0 ][0 ] + h_range [1 ], s_range [1 ], v_range [1 ]), dtype = np .uint8
35
+ )
36
+
29
37
return lowerLimit , upperLimit
30
38
31
39
32
40
def get_threshold (img ):
33
- """Get threshold for Canny edge detection
41
+ """Get threshold for Canny edge detection.
42
+
34
43
Args:
35
44
img: image to be processed
36
45
Returns:
@@ -39,45 +48,47 @@ def get_threshold(img):
39
48
"""
40
49
# Calculate median
41
50
med_val = np .median (img )
42
-
51
+
43
52
# LOWER THRESHOLD IS EITHER 0 OR 70% OF THE MEDIAN, WHICHEVER IS GREATER
44
- thres1 = int (max (0 , 0.7 * med_val ))
45
-
53
+ thres1 = int (max (0 , 0.7 * med_val ))
54
+
46
55
# UPPER THRESHOLD IS EITHER 130% OF THE MEDIAN OR 255, WHICHEVER IS SMALLER
47
- thres2 = int (min (255 , 1.3 * med_val ))
48
-
56
+ thres2 = int (min (255 , 1.3 * med_val ))
57
+
49
58
# increase the upper threshold
50
59
thres2 = thres2 + 50
51
-
60
+
52
61
return thres1 , thres2
53
62
54
63
55
64
def remove_background (img ):
56
- """Isolate the object from the background
65
+ """Isolate the object from the background.
66
+
57
67
Args:
58
68
img: image to be processed
59
69
Returns:
60
70
bbox: bounding box coordinate of object
61
71
"""
62
72
# blur and convert image to HSV
63
73
background_img = img .copy ()
64
- blurred = cv2 .blur (background_img [:, 185 :430 ], ksize = (5 ,5 ))
74
+ blurred = cv2 .blur (background_img [:, 185 :430 ], ksize = (5 , 5 ))
65
75
hsvImg = cv2 .cvtColor (blurred , cv2 .COLOR_BGR2HSV )
66
-
76
+
67
77
# Apply color thresholding to background (conveyor)
68
- lower , upper = get_limits ([64 , 124 , 41 ], v_range = [3 ,255 ], h_range = [15 , 15 ])
78
+ lower , upper = get_limits ([64 , 124 , 41 ], v_range = [3 , 255 ], h_range = [15 , 15 ])
69
79
mask = cv2 .inRange (hsvImg , lower , upper )
70
80
mask_not = cv2 .bitwise_not (mask )
71
-
81
+
72
82
# Get the bpunding box coordinate from mask
73
83
maskImage = Image .fromarray (mask_not )
74
84
bbox = maskImage .getbbox ()
75
-
85
+
76
86
return bbox
77
87
78
88
79
89
def color_detection (img , bbox ):
80
- """Apply Color Detection
90
+ """Apply Color Detection.
91
+
81
92
Args:
82
93
img: image to be processed
83
94
bbox: bounding box coordinate of object
@@ -91,60 +102,77 @@ def color_detection(img, bbox):
91
102
detected = False
92
103
object_id = 404
93
104
object_color = (0 , 0 , 0 )
94
- object_name = '-'
95
-
105
+ object_name = "-"
106
+
96
107
# If bounding box exist and enough pixels are collected, detect it
97
- if (bbox is not None ) and (abs (bbox [2 ] - bbox [0 ]) >= 110 ) and (abs (bbox [3 ] - bbox [1 ]) >= 80 ):
108
+ if (
109
+ (bbox is not None )
110
+ and (abs (bbox [2 ] - bbox [0 ]) >= 110 )
111
+ and (abs (bbox [3 ] - bbox [1 ]) >= 80 )
112
+ ):
98
113
detected = True
99
-
114
+
100
115
# Collect bbox coordinate
101
116
x1 , y1 , x2 , y2 = bbox
102
-
117
+
103
118
# Isolate object from the rest of the image
104
119
sampled_img = img .copy ()
105
- duck = sampled_img [y1 :y2 , (x1 + 185 ):(x2 + 185 )]
106
- blurred_duck = cv2 .blur (duck , ksize = (7 ,7 ))
107
-
120
+ duck = sampled_img [y1 :y2 , (x1 + 185 ):(x2 + 185 )]
121
+ blurred_duck = cv2 .blur (duck , ksize = (7 , 7 ))
122
+
108
123
# Sample the color
109
124
color_sample = blurred_duck [45 :75 , 45 :75 ]
110
125
colorHSV = cv2 .cvtColor (color_sample , cv2 .COLOR_BGR2HSV )
111
126
112
- # Calculate the mean from each channel
113
- h_mean = colorHSV [:, :,0 ].mean ()
114
- s_mean = colorHSV [:, :,1 ].mean ()
115
- v_mean = colorHSV [:, :,2 ].mean ()
116
-
117
- # Classify
127
+ # Calculate the mean from hue channel
128
+ h_mean = colorHSV [:, :, 0 ].mean ()
129
+
130
+ # Classify
118
131
if 17 <= h_mean <= 37 :
119
- object_name = ' yellow_duck'
132
+ object_name = " yellow_duck"
120
133
object_color = (55 , 232 , 254 )
121
134
object_id = 0
122
135
elif 151 <= h_mean <= 171 :
123
- object_name = ' pink_duck'
136
+ object_name = " pink_duck"
124
137
object_color = (211 , 130 , 255 )
125
138
object_id = 1
126
139
elif 88 <= h_mean <= 108 :
127
- object_name = ' blue_duck'
140
+ object_name = " blue_duck"
128
141
object_color = (205 , 172 , 73 )
129
142
object_id = 2
130
143
else :
131
144
detected = False
132
-
145
+
133
146
# Draw label and bounding box
134
147
if bbox is not None :
135
148
x1 += 185
136
149
x2 += 185
137
150
cv2 .rectangle (img , (x1 , y1 ), (x2 , y2 ), object_color , 5 )
138
- cv2 .putText (img , "Class: " + object_name , (x2 + 10 , y1 + 15 ),
139
- cv2 .FONT_HERSHEY_COMPLEX , 0.7 , object_color , 2 )
140
- cv2 .putText (img , f"Color: { object_color } " , (x2 + 10 , y1 + 40 ),
141
- cv2 .FONT_HERSHEY_COMPLEX , 0.7 , object_color , 2 )
142
-
151
+ cv2 .putText (
152
+ img ,
153
+ "Class: " + object_name ,
154
+ (x2 + 10 , y1 + 15 ),
155
+ cv2 .FONT_HERSHEY_COMPLEX ,
156
+ 0.7 ,
157
+ object_color ,
158
+ 2 ,
159
+ )
160
+ cv2 .putText (
161
+ img ,
162
+ f"Color: { object_color } " ,
163
+ (x2 + 10 , y1 + 40 ),
164
+ cv2 .FONT_HERSHEY_COMPLEX ,
165
+ 0.7 ,
166
+ object_color ,
167
+ 2 ,
168
+ )
169
+
143
170
return detected , object_id , object_color , object_name
144
171
145
172
146
- def contour_detection (img , bbox , offset :int = 17 , targetArea = 14000 ):
147
- """Apply Contour Detection
173
+ def contour_detection (img , bbox , offset : int = 17 , targetArea = 14000 ):
174
+ """Apply Contour Detection.
175
+
148
176
Args:
149
177
img: image to be processed
150
178
bbox: bounding box coordinate of the object
@@ -160,78 +188,108 @@ def contour_detection(img, bbox, offset:int=17, targetArea=14000):
160
188
detected = False
161
189
object_id = 404
162
190
object_contour = (0 , 0 )
163
- object_name = '-'
164
-
191
+ object_name = "-"
192
+
165
193
# If bounding box exist
166
- if ( bbox is not None ) :
194
+ if bbox is not None :
167
195
# Collect Bounding Box Coordinate
168
196
x1 , y1 , x2 , y2 = bbox
169
- cnt_offset = (x1 + 150 + offset , y1 - offset )
170
-
197
+ cnt_offset = (x1 + 150 + offset , y1 - offset )
198
+
171
199
# Isolate the object from the rest of image
172
200
sampled_img = img .copy ()
173
- shape = sampled_img [y1 - offset :y2 + offset , (x1 + 185 - offset ):(x2 + 185 + offset )]
174
-
201
+ shape = sampled_img [
202
+ (y1 - offset ):(y2 + offset ), (x1 + 185 - offset ):(x2 + 185 + offset )
203
+ ]
204
+
175
205
# If the isolated image is not empty, detect contours
176
206
if shape .size != 0 :
177
207
# blur it and convert the color to grayspace
178
- blurred_shape = cv2 .blur (shape , ksize = (5 ,5 ))
208
+ blurred_shape = cv2 .blur (shape , ksize = (5 , 5 ))
179
209
gray_shape = cv2 .cvtColor (blurred_shape , cv2 .COLOR_BGR2GRAY )
180
-
210
+
181
211
# Apply Edge Detection
182
212
threshold1 , threshold2 = get_threshold (gray_shape )
183
- edges = cv2 .Canny (image = gray_shape , threshold1 = threshold1 , threshold2 = threshold2 )
184
-
213
+ edges = cv2 .Canny (
214
+ image = gray_shape , threshold1 = threshold1 , threshold2 = threshold2
215
+ )
216
+
185
217
# Apply Dilation
186
- kernel = np .ones ((3 ,3 ))
218
+ kernel = np .ones ((3 , 3 ))
187
219
dilation = cv2 .dilate (edges , kernel , iterations = 1 )
188
-
220
+
189
221
# Find contours
190
- contours , hierarchy = cv2 .findContours (dilation , cv2 .RETR_EXTERNAL , cv2 .CHAIN_APPROX_NONE )
191
-
222
+ contours , hierarchy = cv2 .findContours (
223
+ dilation , cv2 .RETR_EXTERNAL , cv2 .CHAIN_APPROX_NONE
224
+ )
225
+
192
226
# draw every detected contour with area larger than the target
193
227
for cnt in contours :
194
228
# area = cv2.contourArea(cnt)
195
-
229
+
196
230
# Add offset to the contour coordinate
197
- cnt [:,:, 0 ] = cnt [:,:, 0 ] + cnt_offset [0 ]
198
- cnt [:,:, 1 ] = cnt [:,:, 1 ] + cnt_offset [1 ]
199
-
231
+ cnt [:, :, 0 ] = cnt [:, :, 0 ] + cnt_offset [0 ]
232
+ cnt [:, :, 1 ] = cnt [:, :, 1 ] + cnt_offset [1 ]
233
+
200
234
# Approximate number of corner points, box coordinate, and area
201
235
perimeter = cv2 .arcLength (cnt , True )
202
- approx = cv2 .approxPolyDP (cnt , 0.02 * perimeter , True )
236
+ approx = cv2 .approxPolyDP (cnt , 0.02 * perimeter , True )
203
237
x , y , w , h = cv2 .boundingRect (approx )
204
- boxArea = w * h
205
-
238
+ boxArea = w * h
239
+
206
240
# If target < box area < 30000 px, count as detected
207
241
if boxArea > targetArea and boxArea < 30000 :
208
242
detected = True
209
-
243
+
210
244
# Draw contours on image
211
245
cv2 .drawContours (img , cnt , - 1 , (255 , 0 , 255 ), 3 )
212
-
246
+
213
247
# Collect the area and the number of corner points
214
248
object_contour = (int (boxArea ), len (approx ))
215
-
249
+
216
250
# Classify object based on area and number of corner points
217
251
if (object_contour [0 ] >= 23000 ) and (object_contour [1 ] <= 11 ):
218
252
object_id = 0
219
- object_name = 'duck'
220
- elif ((object_contour [0 ] < 23000 ) and (object_contour [0 ] > 18000 )
221
- and (object_contour [1 ] >= 9 )):
253
+ object_name = "duck"
254
+ elif (
255
+ (object_contour [0 ] < 23000 )
256
+ and (object_contour [0 ] > 18000 )
257
+ and (object_contour [1 ] >= 9 )
258
+ ):
222
259
object_id = 1
223
- object_name = ' cock'
260
+ object_name = " cock"
224
261
elif (object_contour [0 ] <= 18000 ) and (object_contour [1 ] <= 11 ):
225
262
object_id = 2
226
- object_name = ' chick'
227
-
263
+ object_name = " chick"
264
+
228
265
# Draw box, draw contour, and add text
229
- cv2 .rectangle (img , (x , y ), (x + w , y + h ), (0 , 0 , 255 ), 4 )
230
- cv2 .putText (img , "Class: " + object_name , (x + w + 10 , y + 15 ),
231
- cv2 .FONT_HERSHEY_COMPLEX , 0.7 , (0 , 0 , 255 ), 2 )
232
- cv2 .putText (img , f"Points: { object_contour [1 ]} " , (x + w + 10 , y + 40 ),
233
- cv2 .FONT_HERSHEY_COMPLEX , 0.7 , (0 , 0 , 255 ), 2 )
234
- cv2 .putText (img , f"Area: { object_contour [0 ]} " , (x + w + 10 , y + 65 ),
235
- cv2 .FONT_HERSHEY_COMPLEX , 0.7 , (0 , 0 , 255 ), 2 )
236
-
237
- return detected , object_id , object_contour , object_name
266
+ cv2 .rectangle (img , (x , y ), (x + w , y + h ), (0 , 0 , 255 ), 4 )
267
+ cv2 .putText (
268
+ img ,
269
+ "Class: " + object_name ,
270
+ (x + w + 10 , y + 15 ),
271
+ cv2 .FONT_HERSHEY_COMPLEX ,
272
+ 0.7 ,
273
+ (0 , 0 , 255 ),
274
+ 2 ,
275
+ )
276
+ cv2 .putText (
277
+ img ,
278
+ f"Points: { object_contour [1 ]} " ,
279
+ (x + w + 10 , y + 40 ),
280
+ cv2 .FONT_HERSHEY_COMPLEX ,
281
+ 0.7 ,
282
+ (0 , 0 , 255 ),
283
+ 2 ,
284
+ )
285
+ cv2 .putText (
286
+ img ,
287
+ f"Area: { object_contour [0 ]} " ,
288
+ (x + w + 10 , y + 65 ),
289
+ cv2 .FONT_HERSHEY_COMPLEX ,
290
+ 0.7 ,
291
+ (0 , 0 , 255 ),
292
+ 2 ,
293
+ )
294
+
295
+ return detected , object_id , object_contour , object_name
0 commit comments