Skip to content

Commit e9cbbfa

Browse files
author
Matevz Morato
committed
Fix up the examples
1 parent a3a7c81 commit e9cbbfa

26 files changed

+111
-149
lines changed

bindings/python/examples/v3/ColorCamera/rgb_video.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,4 +25,4 @@
2525
cv2.imshow("video", videoIn.getCvFrame())
2626

2727
if cv2.waitKey(1) == ord('q'):
28-
break
28+
break
Lines changed: 17 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -1,28 +1,27 @@
11
import depthai as dai
22
import cv2
3-
import time
43

54

6-
class HostDisplay(dai.node.ThreadedHostNode):
7-
def __init__(self):
8-
dai.node.ThreadedHostNode.__init__(self)
9-
self.input = dai.Node.Input(self)
10-
def run(self):
11-
while True:
12-
message : dai.ImgFrame = self.input.get()
13-
cv2.imshow("HostDisplay", message.getCvFrame())
14-
key = cv2.waitKey(1)
15-
if key == ord('q'):
16-
self.stopPipeline()
17-
break
5+
class HostDisplay(dai.node.HostNode):
6+
def build(self, frameOutput: dai.Node.Output):
7+
self.link_args(frameOutput) # Has to match the inputs to the `process` method
8+
9+
# This sends all the processing to the pipeline where it's executed by the `pipeline.runTasks()` or implicitly by `pipeline.run()` method.
10+
# It's needed as the GUI window needs to be updated in the main thread, and the `process` method is by default called in a separate thread.
11+
self.sendProcessingToPipeline(True)
12+
return self
13+
14+
def process(self, message: dai.ImgFrame):
15+
cv2.imshow("HostDisplay", message.getCvFrame())
16+
key = cv2.waitKey(1)
17+
if key == ord('q'):
18+
print("Detected 'q' - stopping the pipeline...")
19+
self.stopPipeline()
1820

1921
with dai.Pipeline() as p:
2022
camera = p.create(dai.node.ColorCamera)
2123
camera.setBoardSocket(dai.CameraBoardSocket.CAM_A)
2224

23-
myHostDisplay = p.create(HostDisplay)
24-
camera.video.link(myHostDisplay.input)
25+
hostDisplay = p.create(HostDisplay).build(camera.video)
2526

26-
p.start()
27-
while p.isRunning():
28-
time.sleep(1)
27+
p.run() # Will block until the pipeline is stopped by someone else (in this case it's the display node)

bindings/python/examples/v3/HostNodes/host_camera.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -23,13 +23,13 @@ def run(self):
2323
imgFrame.setData(frame)
2424
imgFrame.setWidth(frame.shape[1])
2525
imgFrame.setHeight(frame.shape[0])
26-
imgFrame.setType(dai.ImgFrame.Type.RGB888i)
26+
imgFrame.setType(dai.ImgFrame.Type.BGR888i)
2727
# Send the message
2828
self.output.send(imgFrame)
2929
# Wait for the next frame
3030
time.sleep(0.1)
3131

32-
with dai.Pipeline() as p:
32+
with dai.Pipeline(createImplicitDevice=False) as p:
3333
hostCamera = p.create(HostCamera)
3434
camQueue = hostCamera.output.createQueue()
3535

bindings/python/examples/v3/NNArchive/nn_archive.py

Lines changed: 3 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -26,22 +26,17 @@
2626
with dai.Pipeline() as pipeline:
2727

2828
# Define sources and outputs
29-
camRgb = pipeline.create(dai.node.ColorCamera)
30-
detectionNetwork = pipeline.create(dai.node.YoloDetectionNetwork)
31-
29+
camRgb = pipeline.create(dai.node.ColorCamera).build()
3230
# Properties
3331
camRgb.setPreviewSize(640, 640)
3432
camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)
3533
camRgb.setInterleaved(False)
3634
camRgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.BGR)
37-
camRgb.setFps(30)
38-
35+
camRgb.setFps(15)
3936
nnArchive = dai.NNArchive(nnPath)
40-
detectionNetwork.setNNArchive(nnArchive)
37+
detectionNetwork = pipeline.create(dai.node.DetectionNetwork).build(camRgb.preview, nnArchive)
4138
detectionNetwork.setNumInferenceThreads(2)
4239

43-
# Linking
44-
camRgb.preview.link(detectionNetwork.input)
4540

4641
qRgb = detectionNetwork.passthrough.createQueue()
4742
qDet = detectionNetwork.out.createQueue()

bindings/python/examples/v3/RecordReplay/record_video.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@ def signal_handler(sig, frame):
1818
cam = pipeline.create(dai.node.ColorCamera)
1919
cam.setBoardSocket(dai.CameraBoardSocket.RGB)
2020
cam.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)
21+
cam.setVideoSize(320, 320)
2122

2223
videoEncoder = pipeline.create(dai.node.VideoEncoder)
2324
videoEncoder.setProfile(dai.VideoEncoderProperties.Profile.H264_MAIN)

bindings/python/examples/v3/RecordReplay/replay_video.py

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111

1212
# Check if the input video file exists
1313
if not Path(args.inputVideo).exists():
14+
print("First record a video using the record_video.py script")
1415
raise FileNotFoundError(f'Input video file not found: {args.inputVideo}')
1516

1617
with dai.Pipeline() as pipeline:
@@ -20,14 +21,16 @@
2021

2122
imageManip = pipeline.create(dai.node.ImageManip)
2223
imageManip.initialConfig.setResize(300, 300)
23-
imageManip.setFrameType(dai.ImgFrame.Type.BGR888p)
24+
imageManip.initialConfig.setFrameType(dai.ImgFrame.Type.BGR888p)
2425
replay.out.link(imageManip.inputImage)
2526
manipOutQueue = imageManip.out.createQueue()
2627

2728
pipeline.start()
28-
while pipeline.isRunning():
29+
while pipeline.isRunning() and replay.isRunning():
2930
outFrame : dai.ImgFrame = manipOutQueue.get()
3031
outFrameCv = outFrame.getCvFrame()
3132
cv2.imshow("video", outFrameCv)
3233
if cv2.waitKey(1) == ord('q'):
34+
print("Stopping pipeline")
35+
pipeline.stop()
3336
break

bindings/python/examples/v3/SpatialDetection/spatial_tiny_yolo.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@
3131
with dai.Pipeline() as p:
3232
# Define sources and outputs
3333
camRgb = p.create(dai.node.ColorCamera)
34-
spatialDetectionNetwork = p.create(dai.node.YoloSpatialDetectionNetwork)
34+
spatialDetectionNetwork = p.create(dai.node.YoloSpatialDetectionNetwork).build()
3535
monoLeft = p.create(dai.node.MonoCamera)
3636
monoRight = p.create(dai.node.MonoCamera)
3737
stereo = p.create(dai.node.StereoDepth)

bindings/python/examples/v3/SpatialDetection/spatial_with_host_nodes.py

Lines changed: 10 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -29,14 +29,13 @@
2929
class SpatialVisualizer(dai.node.HostNode):
3030
def __init__(self):
3131
dai.node.HostNode.__init__(self)
32-
self.depthIn = self.inputs["depth"]
33-
self.detectionsIn = self.inputs["detections"]
34-
self.rgbIn = self.inputs["rgb"]
35-
36-
def processGroup(self, messages: dai.MessageGroup):
37-
depthPreview = messages["depth"].getCvFrame()
38-
detections = messages["detections"]
39-
rgbPreview = messages["rgb"].getCvFrame()
32+
self.sendProcessingToPipeline(True)
33+
def build(self, depth:dai.Node.Output, detections: dai.Node.Output, rgb: dai.Node.Output):
34+
self.link_args(depth, detections, rgb) # Must match the inputs to the process method
35+
36+
def process(self, depthPreview: dai.ImgFrame, detections: dai.ImgDetections, rgbPreview: dai.ImgFrame):
37+
depthPreview = depthPreview.getCvFrame()
38+
rgbPreview = rgbPreview.getCvFrame()
4039
depthFrameColor = self.processDepthFrame(depthPreview)
4140
self.displayResults(rgbPreview, depthFrameColor, detections.detections)
4241

@@ -90,7 +89,7 @@ def drawDetections(self, frame, detection, frameWidth, frameHeight):
9089
with dai.Pipeline() as p:
9190
# Define sources and outputs
9291
camRgb = p.create(dai.node.ColorCamera)
93-
spatialDetectionNetwork = p.create(dai.node.YoloSpatialDetectionNetwork)
92+
spatialDetectionNetwork = p.create(dai.node.YoloSpatialDetectionNetwork).build()
9493
monoLeft = p.create(dai.node.MonoCamera)
9594
monoRight = p.create(dai.node.MonoCamera)
9695
stereo = p.create(dai.node.StereoDepth)
@@ -127,12 +126,6 @@ def drawDetections(self, frame, detection, frameWidth, frameHeight):
127126
camRgb.preview.link(spatialDetectionNetwork.input)
128127
visualizer.labelMap = spatialDetectionNetwork.getClasses()
129128

129+
visualizer.build(stereo.depth, spatialDetectionNetwork.out, camRgb.preview)
130130

131-
camRgb.preview.link(visualizer.rgbIn)
132-
spatialDetectionNetwork.passthroughDepth.link(visualizer.depthIn)
133-
spatialDetectionNetwork.out.link(visualizer.detectionsIn)
134-
135-
p.start()
136-
137-
while p.isRunning():
138-
time.sleep(0.1)
131+
p.run()

bindings/python/examples/v3/StereoDepth/stereo_autocreate.py

Lines changed: 4 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -5,16 +5,7 @@
55
import time
66

77
class StereoVisualizer(dai.node.HostNode):
8-
def __init__(self):
9-
dai.node.HostNode.__init__(self)
10-
self.inputDepth = self.inputs["disparity"]
11-
12-
def build(self, output: dai.Node.Output):
13-
output.link(self.inputDepth)
14-
return self
15-
16-
def processGroup(self, messages: dai.MessageGroup):
17-
inFrame : dai.ImgFrame = messages["disparity"]
8+
def process(self, inFrame: dai.ImgFrame):
189
outFrame = inFrame.getFrame()
1910

2011
# Colorize the disparity map
@@ -33,8 +24,7 @@ def processGroup(self, messages: dai.MessageGroup):
3324
# Allow stereo inputs to be created automatically
3425
# NOTE: This is a naive implementation, it will not handle correctly the case where cameras have already been created
3526
stereo = pipeline.create(dai.node.StereoDepth).build(autoCreateCameras=True)
36-
visualizer = pipeline.create(StereoVisualizer).build(stereo.disparity)
27+
visualizer = pipeline.create(StereoVisualizer)
28+
visualizer.link_args(stereo.disparity)
3729

38-
pipeline.start()
39-
while pipeline.isRunning():
40-
time.sleep(0.1)
30+
pipeline.run()

bindings/python/src/pipeline/PipelineBindings.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -183,7 +183,7 @@ void PipelineBindings::bind(pybind11::module& m, void* pCallstack){
183183
throw std::invalid_argument(std::string(py::str(class_)) + " is not a subclass of depthai.node");
184184
}
185185
return node;
186-
})
186+
}, py::keep_alive<1,0>())
187187
// TODO(themarpe) DEPRECATE, use pipeline.create([class name])
188188
// templated create<NODE> function
189189
.def("createXLinkIn", &Pipeline::create<node::XLinkIn>)

0 commit comments

Comments
 (0)