Skip to content

Commit 4a7b59a

Browse files
committed
Updated remaining docstrings | closes #29
1 parent 78fc37c commit 4a7b59a

File tree

4 files changed

+113
-108
lines changed

4 files changed

+113
-108
lines changed

utils/darknet_sub_process/opencv_direct_darknet.py

Lines changed: 104 additions & 104 deletions
Original file line numberDiff line numberDiff line change
@@ -4,15 +4,15 @@
44
import time
55

66
img = cv.imread(
7-
"C:/Users/Legos/AppData/Roaming/Blender Foundation/Blender/3.2/scripts/addons/omni_trax/darknet_sub_process/ant3.jpg"
7+
"C:/Users/Legos/AppData/Roaming/Blender Foundation/Blender/3.2/scripts/addons/omni_trax/darknet_sub_process/ant3.jpg"
88
)
99
cv.imshow("window", img)
1010
cv.waitKey(1)
1111

1212
# Give the configuration and weight files for the model and load the network.
1313
net = cv.dnn.readNetFromDarknet(
14-
"C:/Users/Legos/Documents/PhD/Blender/OmniTrax/trained_networks/atta_single_class/yolov4-big_and_small_ants_480.cfg",
15-
"C:/Users/Legos/Documents/PhD/Blender/OmniTrax/trained_networks/atta_single_class/yolov4-big_and_small_ants_HPC_final.weights",
14+
"C:/Users/Legos/Documents/PhD/Blender/OmniTrax/trained_networks/atta_single_class/yolov4-big_and_small_ants_480.cfg",
15+
"C:/Users/Legos/Documents/PhD/Blender/OmniTrax/trained_networks/atta_single_class/yolov4-big_and_small_ants_HPC_final.weights",
1616
)
1717
net.setPreferableBackend(cv.dnn.DNN_BACKEND_CUDA)
1818
net.setPreferableTarget(cv.dnn.DNN_TARGET_CUDA)
@@ -21,12 +21,12 @@
2121

2222
# Load names of classes and get random colors
2323
classes = (
24-
open(
25-
"C:/Users/Legos/Documents/PhD/Blender/OmniTrax/trained_networks/atta_single_class/obj.names"
26-
)
27-
.read()
28-
.strip()
29-
.split("\n")
24+
open(
25+
"C:/Users/Legos/Documents/PhD/Blender/OmniTrax/trained_networks/atta_single_class/obj.names"
26+
)
27+
.read()
28+
.strip()
29+
.split("\n")
3030
)
3131
np.random.seed(42)
3232
colors = np.random.randint(0, 255, size=(len(classes), 3), dtype="uint8")
@@ -52,37 +52,37 @@
5252

5353
print(len(outputs))
5454
for out in outputs:
55-
print(out.shape)
55+
print(out.shape)
5656

5757
boxes = []
5858
confidences = []
5959
classIDs = []
6060
h, w = img.shape[:2]
6161

6262
for output in outputs:
63-
for detection in output:
64-
scores = detection[5:]
65-
classID = np.argmax(scores)
66-
confidence = scores[classID]
67-
if confidence > 0.5:
68-
box = detection[:4] * np.array([w, h, w, h])
69-
(centerX, centerY, width, height) = box.astype("int")
70-
x = int(centerX - (width / 2))
71-
y = int(centerY - (height / 2))
72-
box = [x, y, int(width), int(height)]
73-
boxes.append(box)
74-
confidences.append(float(confidence))
75-
classIDs.append(classID)
63+
for detection in output:
64+
scores = detection[5:]
65+
classID = np.argmax(scores)
66+
confidence = scores[classID]
67+
if confidence > 0.5:
68+
box = detection[:4] * np.array([w, h, w, h])
69+
(centerX, centerY, width, height) = box.astype("int")
70+
x = int(centerX - (width / 2))
71+
y = int(centerY - (height / 2))
72+
box = [x, y, int(width), int(height)]
73+
boxes.append(box)
74+
confidences.append(float(confidence))
75+
classIDs.append(classID)
7676

7777
indices = cv.dnn.NMSBoxes(boxes, confidences, 0.2, 0.4)
7878
if len(indices) > 0:
79-
for i in indices.flatten():
80-
(x, y) = (boxes[i][0], boxes[i][1])
81-
(w, h) = (boxes[i][2], boxes[i][3])
82-
color = [int(c) for c in colors[classIDs[i]]]
83-
cv.rectangle(img, (x, y), (x + w, y + h), color, 2)
84-
text = "{}: {:.4f}".format(classes[classIDs[i]], confidences[i])
85-
cv.putText(img, text, (x, y - 5), cv.FONT_HERSHEY_SIMPLEX, 0.5, color, 1)
79+
for i in indices.flatten():
80+
(x, y) = (boxes[i][0], boxes[i][1])
81+
(w, h) = (boxes[i][2], boxes[i][3])
82+
color = [int(c) for c in colors[classIDs[i]]]
83+
cv.rectangle(img, (x, y), (x + w, y + h), color, 2)
84+
text = "{}: {:.4f}".format(classes[classIDs[i]], confidences[i])
85+
cv.putText(img, text, (x, y - 5), cv.FONT_HERSHEY_SIMPLEX, 0.5, color, 1)
8686

8787
cv.imshow("window", img)
8888
cv.waitKey(0)
@@ -93,89 +93,89 @@
9393
# initialize the video stream, pointer to output video file, and
9494
# frame dimensions
9595
vs = cv.VideoCapture(
96-
"C:/Users/Legos/Desktop/yolov4/example_recordings/first_half_hour_resized.mp4"
96+
"C:/Users/Legos/Desktop/yolov4/example_recordings/first_half_hour_resized.mp4"
9797
)
9898
writer = None
9999
(W, H) = (None, None)
100100
# try to determine the total number of frames in the video file
101101
try:
102-
prop = cv.CAP_PROP_FRAME_COUNT
103-
total = int(vs.get(prop))
104-
print("[INFO] {} total frames in video".format(total))
102+
prop = cv.CAP_PROP_FRAME_COUNT
103+
total = int(vs.get(prop))
104+
print("[INFO] {} total frames in video".format(total))
105105
# an error occurred while trying to determine the total
106106
# number of frames in the video file
107107
except:
108-
print("[INFO] could not determine # of frames in video")
109-
print("[INFO] no approx. completion time can be provided")
110-
total = -1
108+
print("[INFO] could not determine # of frames in video")
109+
print("[INFO] no approx. completion time can be provided")
110+
total = -1
111111
# loop over frames from the video file stream
112112
while True:
113-
# read the next frame from the file
114-
(grabbed, frame) = vs.read()
115-
# if the frame was not grabbed, then we have reached the end
116-
# of the stream
117-
if not grabbed:
118-
break
119-
# if the frame dimensions are empty, grab them
120-
if W is None or H is None:
121-
(H, W) = frame.shape[:2]
122-
# construct a blob from the input frame and then perform a forward
123-
# pass of the YOLO object detector, giving us our bounding boxes
124-
# and associated probabilities
125-
blob = cv.dnn.blobFromImage(frame, 1 / 255.0, (416, 416), swapRB=True, crop=False)
126-
net.setInput(blob)
127-
start = time.time()
128-
layerOutputs = net.forward(ln)
129-
end = time.time()
130-
# initialize our lists of detected bounding boxes, confidences,
131-
# and class IDs, respectively
132-
boxes = []
133-
confidences = []
134-
classIDs = []
135-
136-
# loop over each of the layer outputs
137-
for output in layerOutputs:
138-
# loop over each of the detections
139-
for detection in output:
140-
# extract the class ID and confidence (i.e., probability)
141-
# of the current object detection
142-
scores = detection[5:]
143-
classID = np.argmax(scores)
144-
confidence = scores[classID]
145-
# filter out weak predictions by ensuring the detected
146-
# probability is greater than the minimum probability
147-
if confidence > 0.5:
148-
# scale the bounding box coordinates back relative to
149-
# the size of the image, keeping in mind that YOLO
150-
# actually returns the center (x, y)-coordinates of
151-
# the bounding box followed by the boxes' width and
152-
# height
153-
box = detection[0:4] * np.array([W, H, W, H])
154-
(centerX, centerY, width, height) = box.astype("int")
155-
# use the center (x, y)-coordinates to derive the top
156-
# and and left corner of the bounding box
157-
x = int(centerX - (width / 2))
158-
y = int(centerY - (height / 2))
159-
# update our list of bounding box coordinates,
160-
# confidences, and class IDs
161-
boxes.append([x, y, int(width), int(height)])
162-
confidences.append(float(confidence))
163-
classIDs.append(classID)
164-
165-
# apply non-maxima suppression to suppress weak, overlapping
166-
# bounding boxes
167-
indices = cv.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)
168-
# ensure at least one detection exists
169-
if len(indices) > 0:
170-
for i in indices.flatten():
171-
(x, y) = (boxes[i][0], boxes[i][1])
172-
(w, h) = (boxes[i][2], boxes[i][3])
173-
color = [int(c) for c in colors[classIDs[i]]]
174-
cv.rectangle(frame, (x, y), (x + w, y + h), color, 2)
175-
text = "{}: {:.4f}".format(classes[classIDs[i]], confidences[i])
176-
cv.putText(frame, text, (x, y - 5), cv.FONT_HERSHEY_SIMPLEX, 0.5, color, 1)
177-
178-
cv.imshow("test", frame)
179-
cv.waitKey(1)
113+
# read the next frame from the file
114+
(grabbed, frame) = vs.read()
115+
# if the frame was not grabbed, then we have reached the end
116+
# of the stream
117+
if not grabbed:
118+
break
119+
# if the frame dimensions are empty, grab them
120+
if W is None or H is None:
121+
(H, W) = frame.shape[:2]
122+
# construct a blob from the input frame and then perform a forward
123+
# pass of the YOLO object detector, giving us our bounding boxes
124+
# and associated probabilities
125+
blob = cv.dnn.blobFromImage(frame, 1 / 255.0, (416, 416), swapRB=True, crop=False)
126+
net.setInput(blob)
127+
start = time.time()
128+
layerOutputs = net.forward(ln)
129+
end = time.time()
130+
# initialize our lists of detected bounding boxes, confidences,
131+
# and class IDs, respectively
132+
boxes = []
133+
confidences = []
134+
classIDs = []
135+
136+
# loop over each of the layer outputs
137+
for output in layerOutputs:
138+
# loop over each of the detections
139+
for detection in output:
140+
# extract the class ID and confidence (i.e., probability)
141+
# of the current object detection
142+
scores = detection[5:]
143+
classID = np.argmax(scores)
144+
confidence = scores[classID]
145+
# filter out weak predictions by ensuring the detected
146+
# probability is greater than the minimum probability
147+
if confidence > 0.5:
148+
# scale the bounding box coordinates back relative to
149+
# the size of the image, keeping in mind that YOLO
150+
# actually returns the center (x, y)-coordinates of
151+
# the bounding box followed by the boxes' width and
152+
# height
153+
box = detection[0:4] * np.array([W, H, W, H])
154+
(centerX, centerY, width, height) = box.astype("int")
155+
# use the center (x, y)-coordinates to derive the top
156+
# and and left corner of the bounding box
157+
x = int(centerX - (width / 2))
158+
y = int(centerY - (height / 2))
159+
# update our list of bounding box coordinates,
160+
# confidences, and class IDs
161+
boxes.append([x, y, int(width), int(height)])
162+
confidences.append(float(confidence))
163+
classIDs.append(classID)
164+
165+
# apply non-maxima suppression to suppress weak, overlapping
166+
# bounding boxes
167+
indices = cv.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)
168+
# ensure at least one detection exists
169+
if len(indices) > 0:
170+
for i in indices.flatten():
171+
(x, y) = (boxes[i][0], boxes[i][1])
172+
(w, h) = (boxes[i][2], boxes[i][3])
173+
color = [int(c) for c in colors[classIDs[i]]]
174+
cv.rectangle(frame, (x, y), (x + w, y + h), color, 2)
175+
text = "{}: {:.4f}".format(classes[classIDs[i]], confidences[i])
176+
cv.putText(frame, text, (x, y - 5), cv.FONT_HERSHEY_SIMPLEX, 0.5, color, 1)
177+
178+
cv.imshow("test", frame)
179+
cv.waitKey(1)
180180

181181
cv.destroyAllWindows()

utils/setup/operators.py

Whitespace-only changes.

utils/setup/ui.py

Whitespace-only changes.

utils/track/kalman_filter_new.py

Lines changed: 9 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -62,20 +62,25 @@ def __init__(self, dt, u_x, u_y, std_acc, x_std_meas, y_std_meas, initial_state)
6262
self.P = np.eye(self.A.shape[1])
6363

6464
def predict(self):
65-
# Refer to :Eq.(9) and Eq.(10)
66-
# in https://machinelearningspace.com/object-tracking-python/
67-
# Update time state
68-
# x_k =Ax_(k-1) + Bu_(k-1) Eq.(9)
65+
"""
66+
Extrapolate the track based on its current position and velocity
67+
Refer to :Eq.(9) and Eq.(10)
68+
in https://machinelearningspace.com/object-tracking-python/
69+
x_k =Ax_(k-1) + Bu_(k-1) Eq.(9)
70+
"""
6971
self.x = np.dot(self.A, self.x) + np.dot(self.B, self.u)
7072
# Calculate error covariance
7173
# P= A*P*A' + Q Eq.(10)
7274
self.P = np.dot(np.dot(self.A, self.P), self.A.T) + self.Q
7375
return self.x[0:2]
7476

7577
def update(self, z, flag):
78+
"""
79+
Update time state of the filter
7680
# Refer to :Eq.(11), Eq.(12) and Eq.(13) in
7781
# https://machinelearningspace.com/object-tracking-python/
7882
# S = H*P*H'+R
83+
"""
7984
if not flag:
8085
# use prediction of previous
8186
z = self.x[0:2] # + (self.dt * self.x[:2])

0 commit comments

Comments
 (0)