Skip to content

Commit ab77a68

Browse files
Clean the code
1 parent 4cd38eb commit ab77a68

File tree

9 files changed

+462
-291
lines changed

9 files changed

+462
-291
lines changed

constant.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
"""This module defines constants used in the project"""
1+
"""This module defines constants used in the project."""
22

33
# Limit of camera detection time
44
LIMIT_CMR_TIME = 8

processing/process.py

Lines changed: 147 additions & 89 deletions
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,18 @@
1-
"""Traditional image processing functions"""
1+
"""Traditional image processing functions."""
22

33
import cv2
44
import numpy as np
55
from PIL import Image
66

7-
def get_limits(color:list,
8-
h_range:list=[10,10],
9-
s_range:list=[100,255],
10-
v_range:list=[100,255]):
11-
"""Get limits for color thresholding
7+
8+
def get_limits(
9+
color: list,
10+
h_range: list = [10, 10],
11+
s_range: list = [100, 255],
12+
v_range: list = [100, 255],
13+
):
14+
"""Get limits for color thresholding.
15+
1216
Args:
1317
color: Color in BGR to be thresholded.
1418
h_range: Range of Hue channels.
@@ -21,16 +25,21 @@ def get_limits(color:list,
2125
# convert color to HSV space
2226
bgr = np.uint8([[color]])
2327
hsv = cv2.cvtColor(bgr, cv2.COLOR_BGR2HSV)
24-
28+
2529
# Calculate the limits
26-
lowerLimit = np.array((hsv[0][0][0] - h_range[0], s_range[0], v_range[0]), dtype=np.uint8)
27-
upperLimit = np.array((hsv[0][0][0] + h_range[1], s_range[1], v_range[1]), dtype=np.uint8)
28-
30+
lowerLimit = np.array(
31+
(hsv[0][0][0] - h_range[0], s_range[0], v_range[0]), dtype=np.uint8
32+
)
33+
upperLimit = np.array(
34+
(hsv[0][0][0] + h_range[1], s_range[1], v_range[1]), dtype=np.uint8
35+
)
36+
2937
return lowerLimit, upperLimit
3038

3139

3240
def get_threshold(img):
33-
"""Get threshold for Canny edge detection
41+
"""Get threshold for Canny edge detection.
42+
3443
Args:
3544
img: image to be processed
3645
Returns:
@@ -39,45 +48,47 @@ def get_threshold(img):
3948
"""
4049
# Calculate median
4150
med_val = np.median(img)
42-
51+
4352
# LOWER THRESHOLD IS EITHER 0 OR 70% OF THE MEDIAN, WHICHEVER IS GREATER
44-
thres1 = int(max(0, 0.7*med_val))
45-
53+
thres1 = int(max(0, 0.7 * med_val))
54+
4655
# UPPER THRESHOLD IS EITHER 130% OF THE MEDIAN OR 255, WHICHEVER IS SMALLER
47-
thres2 = int(min(255, 1.3*med_val))
48-
56+
thres2 = int(min(255, 1.3 * med_val))
57+
4958
# increase the upper threshold
5059
thres2 = thres2 + 50
51-
60+
5261
return thres1, thres2
5362

5463

5564
def remove_background(img):
56-
"""Isolate the object from the background
65+
"""Isolate the object from the background.
66+
5767
Args:
5868
img: image to be processed
5969
Returns:
6070
bbox: bounding box coordinate of object
6171
"""
6272
# blur and convert image to HSV
6373
background_img = img.copy()
64-
blurred = cv2.blur(background_img[:, 185:430], ksize=(5,5))
74+
blurred = cv2.blur(background_img[:, 185:430], ksize=(5, 5))
6575
hsvImg = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
66-
76+
6777
# Apply color thresholding to background (conveyor)
68-
lower, upper = get_limits([64, 124, 41], v_range=[3,255], h_range=[15, 15])
78+
lower, upper = get_limits([64, 124, 41], v_range=[3, 255], h_range=[15, 15])
6979
mask = cv2.inRange(hsvImg, lower, upper)
7080
mask_not = cv2.bitwise_not(mask)
71-
81+
7282
# Get the bpunding box coordinate from mask
7383
maskImage = Image.fromarray(mask_not)
7484
bbox = maskImage.getbbox()
75-
85+
7686
return bbox
7787

7888

7989
def color_detection(img, bbox):
80-
"""Apply Color Detection
90+
"""Apply Color Detection.
91+
8192
Args:
8293
img: image to be processed
8394
bbox: bounding box coordinate of object
@@ -91,60 +102,77 @@ def color_detection(img, bbox):
91102
detected = False
92103
object_id = 404
93104
object_color = (0, 0, 0)
94-
object_name = '-'
95-
105+
object_name = "-"
106+
96107
# If bounding box exist and enough pixels are collected, detect it
97-
if (bbox is not None) and (abs(bbox[2] - bbox[0]) >= 110) and (abs(bbox[3] - bbox[1]) >= 80):
108+
if (
109+
(bbox is not None)
110+
and (abs(bbox[2] - bbox[0]) >= 110)
111+
and (abs(bbox[3] - bbox[1]) >= 80)
112+
):
98113
detected = True
99-
114+
100115
# Collect bbox coordinate
101116
x1, y1, x2, y2 = bbox
102-
117+
103118
# Isolate object from the rest of the image
104119
sampled_img = img.copy()
105-
duck = sampled_img[y1:y2, (x1+185):(x2+185)]
106-
blurred_duck = cv2.blur(duck, ksize=(7,7))
107-
120+
duck = sampled_img[y1:y2, (x1 + 185):(x2 + 185)]
121+
blurred_duck = cv2.blur(duck, ksize=(7, 7))
122+
108123
# Sample the color
109124
color_sample = blurred_duck[45:75, 45:75]
110125
colorHSV = cv2.cvtColor(color_sample, cv2.COLOR_BGR2HSV)
111126

112-
# Calculate the mean from each channel
113-
h_mean = colorHSV[:, :,0].mean()
114-
s_mean = colorHSV[:, :,1].mean()
115-
v_mean = colorHSV[:, :,2].mean()
116-
117-
# Classify
127+
# Calculate the mean from hue channel
128+
h_mean = colorHSV[:, :, 0].mean()
129+
130+
# Classify
118131
if 17 <= h_mean <= 37:
119-
object_name = 'yellow_duck'
132+
object_name = "yellow_duck"
120133
object_color = (55, 232, 254)
121134
object_id = 0
122135
elif 151 <= h_mean <= 171:
123-
object_name = 'pink_duck'
136+
object_name = "pink_duck"
124137
object_color = (211, 130, 255)
125138
object_id = 1
126139
elif 88 <= h_mean <= 108:
127-
object_name = 'blue_duck'
140+
object_name = "blue_duck"
128141
object_color = (205, 172, 73)
129142
object_id = 2
130143
else:
131144
detected = False
132-
145+
133146
# Draw label and bounding box
134147
if bbox is not None:
135148
x1 += 185
136149
x2 += 185
137150
cv2.rectangle(img, (x1, y1), (x2, y2), object_color, 5)
138-
cv2.putText(img, "Class: " + object_name, (x2+10, y1+15),
139-
cv2.FONT_HERSHEY_COMPLEX, 0.7, object_color, 2)
140-
cv2.putText(img, f"Color: {object_color}", (x2+10, y1+40),
141-
cv2.FONT_HERSHEY_COMPLEX, 0.7, object_color, 2)
142-
151+
cv2.putText(
152+
img,
153+
"Class: " + object_name,
154+
(x2 + 10, y1 + 15),
155+
cv2.FONT_HERSHEY_COMPLEX,
156+
0.7,
157+
object_color,
158+
2,
159+
)
160+
cv2.putText(
161+
img,
162+
f"Color: {object_color}",
163+
(x2 + 10, y1 + 40),
164+
cv2.FONT_HERSHEY_COMPLEX,
165+
0.7,
166+
object_color,
167+
2,
168+
)
169+
143170
return detected, object_id, object_color, object_name
144171

145172

146-
def contour_detection(img, bbox, offset:int=17, targetArea=14000):
147-
"""Apply Contour Detection
173+
def contour_detection(img, bbox, offset: int = 17, targetArea=14000):
174+
"""Apply Contour Detection.
175+
148176
Args:
149177
img: image to be processed
150178
bbox: bounding box coordinate of the object
@@ -160,78 +188,108 @@ def contour_detection(img, bbox, offset:int=17, targetArea=14000):
160188
detected = False
161189
object_id = 404
162190
object_contour = (0, 0)
163-
object_name = '-'
164-
191+
object_name = "-"
192+
165193
# If bounding box exist
166-
if (bbox is not None):
194+
if bbox is not None:
167195
# Collect Bounding Box Coordinate
168196
x1, y1, x2, y2 = bbox
169-
cnt_offset = (x1+150+offset, y1-offset)
170-
197+
cnt_offset = (x1 + 150 + offset, y1 - offset)
198+
171199
# Isolate the object from the rest of image
172200
sampled_img = img.copy()
173-
shape = sampled_img[y1-offset:y2+offset, (x1+185-offset):(x2+185+offset)]
174-
201+
shape = sampled_img[
202+
(y1 - offset):(y2 + offset), (x1 + 185 - offset):(x2 + 185 + offset)
203+
]
204+
175205
# If the isolated image is not empty, detect contours
176206
if shape.size != 0:
177207
# blur it and convert the color to grayspace
178-
blurred_shape = cv2.blur(shape, ksize=(5,5))
208+
blurred_shape = cv2.blur(shape, ksize=(5, 5))
179209
gray_shape = cv2.cvtColor(blurred_shape, cv2.COLOR_BGR2GRAY)
180-
210+
181211
# Apply Edge Detection
182212
threshold1, threshold2 = get_threshold(gray_shape)
183-
edges = cv2.Canny(image=gray_shape, threshold1=threshold1, threshold2=threshold2)
184-
213+
edges = cv2.Canny(
214+
image=gray_shape, threshold1=threshold1, threshold2=threshold2
215+
)
216+
185217
# Apply Dilation
186-
kernel = np.ones((3,3))
218+
kernel = np.ones((3, 3))
187219
dilation = cv2.dilate(edges, kernel, iterations=1)
188-
220+
189221
# Find contours
190-
contours, hierarchy = cv2.findContours(dilation, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
191-
222+
contours, hierarchy = cv2.findContours(
223+
dilation, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE
224+
)
225+
192226
# draw every detected contour with area larger than the target
193227
for cnt in contours:
194228
# area = cv2.contourArea(cnt)
195-
229+
196230
# Add offset to the contour coordinate
197-
cnt[:,:,0] = cnt[:,:,0] + cnt_offset[0]
198-
cnt[:,:,1] = cnt[:,:,1] + cnt_offset[1]
199-
231+
cnt[:, :, 0] = cnt[:, :, 0] + cnt_offset[0]
232+
cnt[:, :, 1] = cnt[:, :, 1] + cnt_offset[1]
233+
200234
# Approximate number of corner points, box coordinate, and area
201235
perimeter = cv2.arcLength(cnt, True)
202-
approx = cv2.approxPolyDP(cnt, 0.02*perimeter, True)
236+
approx = cv2.approxPolyDP(cnt, 0.02 * perimeter, True)
203237
x, y, w, h = cv2.boundingRect(approx)
204-
boxArea = w*h
205-
238+
boxArea = w * h
239+
206240
# If target < box area < 30000 px, count as detected
207241
if boxArea > targetArea and boxArea < 30000:
208242
detected = True
209-
243+
210244
# Draw contours on image
211245
cv2.drawContours(img, cnt, -1, (255, 0, 255), 3)
212-
246+
213247
# Collect the area and the number of corner points
214248
object_contour = (int(boxArea), len(approx))
215-
249+
216250
# Classify object based on area and number of corner points
217251
if (object_contour[0] >= 23000) and (object_contour[1] <= 11):
218252
object_id = 0
219-
object_name = 'duck'
220-
elif ((object_contour[0] < 23000) and (object_contour[0] > 18000)
221-
and (object_contour[1] >= 9)):
253+
object_name = "duck"
254+
elif (
255+
(object_contour[0] < 23000)
256+
and (object_contour[0] > 18000)
257+
and (object_contour[1] >= 9)
258+
):
222259
object_id = 1
223-
object_name = 'cock'
260+
object_name = "cock"
224261
elif (object_contour[0] <= 18000) and (object_contour[1] <= 11):
225262
object_id = 2
226-
object_name = 'chick'
227-
263+
object_name = "chick"
264+
228265
# Draw box, draw contour, and add text
229-
cv2.rectangle(img, (x, y), (x+w, y+h), (0, 0, 255), 4)
230-
cv2.putText(img, "Class: " + object_name, (x+w+10, y+15),
231-
cv2.FONT_HERSHEY_COMPLEX, 0.7, (0, 0, 255), 2)
232-
cv2.putText(img, f"Points: {object_contour[1]}", (x+w+10, y+40),
233-
cv2.FONT_HERSHEY_COMPLEX, 0.7, (0, 0, 255), 2)
234-
cv2.putText(img, f"Area: {object_contour[0]}", (x+w+10, y+65),
235-
cv2.FONT_HERSHEY_COMPLEX, 0.7, (0, 0, 255), 2)
236-
237-
return detected, object_id, object_contour, object_name
266+
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 255), 4)
267+
cv2.putText(
268+
img,
269+
"Class: " + object_name,
270+
(x + w + 10, y + 15),
271+
cv2.FONT_HERSHEY_COMPLEX,
272+
0.7,
273+
(0, 0, 255),
274+
2,
275+
)
276+
cv2.putText(
277+
img,
278+
f"Points: {object_contour[1]}",
279+
(x + w + 10, y + 40),
280+
cv2.FONT_HERSHEY_COMPLEX,
281+
0.7,
282+
(0, 0, 255),
283+
2,
284+
)
285+
cv2.putText(
286+
img,
287+
f"Area: {object_contour[0]}",
288+
(x + w + 10, y + 65),
289+
cv2.FONT_HERSHEY_COMPLEX,
290+
0.7,
291+
(0, 0, 255),
292+
2,
293+
)
294+
295+
return detected, object_id, object_contour, object_name

0 commit comments

Comments
 (0)