Skip to content

Commit

Permalink
Merge pull request #912 from luxonis/develop
Browse files Browse the repository at this point in the history
1.9.2 release
  • Loading branch information
daniilpastukhov committed Jan 23, 2023
2 parents ec55504 + 4981eea commit 5caf106
Show file tree
Hide file tree
Showing 108 changed files with 1,242 additions and 15,570 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,7 @@ resources/*.json
# Virtual environment
virtualenv/
venv/
.venv/

# DepthAI recordings
recordings/
Expand Down
3 changes: 3 additions & 0 deletions .gitmodules
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
[submodule "depthai_sdk/src/depthai_sdk/components/integrations/depthai_pipeline_graph"]
path = depthai_sdk/src/depthai_sdk/components/integrations/depthai_pipeline_graph
url = https://github.com/luxonis/depthai_pipeline_graph
2 changes: 1 addition & 1 deletion depthai_sdk/docs/source/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
author = 'Luxonis'

# The full version, including alpha/beta/rc tags
release = '1.0.0'
release = '1.9.2'


# -- General configuration ---------------------------------------------------
Expand Down
2 changes: 1 addition & 1 deletion depthai_sdk/docs/source/features/recording.rst
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ stream and mono streams. You can open the rosbag with the `RealSense Viewer <htt
-----------------

An alternative to Rosbags are `mcap files <https://github.com/foxglove/mcap>`__ which can be viewed with `Foxglove studio <https://foxglove.dev/>`__.
You can find `MCAP recording example here <https://github.com/luxonis/depthai/blob/main/depthai_sdk/examples/recording/mcap-record.py>`__.
You can find `MCAP recording example here <https://github.com/luxonis/depthai/blob/main/depthai_sdk/examples/recording/mcap_record.py>`__.
Currently supported streams:

- MJPEG encoded color/left/right/disparity. Lossless MJPEG/H264/H265 aren't supported by Foxglove Studio.
Expand Down
10 changes: 10 additions & 0 deletions depthai_sdk/examples/CameraComponent/cam_ffc.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
from depthai_sdk import OakCamera

with OakCamera() as oak:
cama = oak.create_camera('cama,c', resolution='1200p')
camb = oak.create_camera('camb,c', resolution='1200p')
camc = oak.create_camera('camc,c', resolution='1200p')
# stereo = oak.create_stereo(left=left, right=right)

oak.visualize([cama, camb,camc], fps=True, scale=2/3)
oak.start(blocking=True)
2 changes: 1 addition & 1 deletion depthai_sdk/examples/CameraComponent/rgb_mono_preview.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,4 +5,4 @@
left = oak.create_camera('left')
right = oak.create_camera('right')
oak.visualize([color, left, right], fps=True)
oak.start(blocking=True)
oak.start(blocking=True)
30 changes: 3 additions & 27 deletions depthai_sdk/examples/NNComponent/deeplabv3_person.py
Original file line number Diff line number Diff line change
@@ -1,35 +1,11 @@
import cv2
import numpy as np

from depthai_sdk import OakCamera, DetectionPacket, Visualizer

NN_WIDTH, NN_HEIGHT = 513, 513


def process_mask(output_tensor):
class_colors = [[0, 0, 0], [0, 255, 0]]
class_colors = np.asarray(class_colors, dtype=np.uint8)
output = output_tensor.reshape(NN_WIDTH, NN_HEIGHT)
output_colors = np.take(class_colors, output, axis=0)
return output_colors


def callback(packet: DetectionPacket, visualizer: Visualizer):
frame = packet.frame
mask = packet.img_detections.mask

output_colors = process_mask(mask)
output_colors = cv2.resize(output_colors, (frame.shape[1], frame.shape[0]))

frame = cv2.addWeighted(frame, 1, output_colors, 0.2, 0)
cv2.imshow('DeepLabV3 person segmentation', frame)
from depthai_sdk import OakCamera


with OakCamera() as oak:
color = oak.create_camera('color', resolution='1080p')

nn = oak.create_nn('deeplabv3_person', color)
nn.config_nn(resize_mode='stretch')
nn.config_nn(resize_mode='letterbox')

oak.callback(nn, callback=callback)
visualizer = oak.visualize([nn, nn.out.passthrough], fps=True)
oak.start(blocking=True)
7 changes: 7 additions & 0 deletions depthai_sdk/examples/NNComponent/mobilenet_encoded.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
from depthai_sdk import OakCamera

with OakCamera() as oak:
color = oak.create_camera('color', encode='mjpeg', fps=10)
nn = oak.create_nn('mobilenet-ssd', color, spatial=True)
oak.visualize([nn.out.encoded])
oak.start(blocking=True)
4 changes: 2 additions & 2 deletions depthai_sdk/examples/NNComponent/roboflow_integration.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,8 @@
model_config = {
'source': 'roboflow', # Specify that we are downloading the model from Roboflow
'model':'american-sign-language-letters/6',
'key':'d2OP8nbhA9rZcWd6G8p1' # Fake API key, replace with your own!
'key':'181b0f6e43d59ee5ea421cd77f6d9ea2a4b059f8' # Fake API key, replace with your own!
}
nn = oak.create_nn(model_config, color)
oak.visualize(nn, fps=True)
oak.start(blocking=True)
oak.start(blocking=True)
4 changes: 2 additions & 2 deletions depthai_sdk/examples/NNComponent/yolo.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,6 @@
with OakCamera() as oak:
color = oak.create_camera('color')
nn = oak.create_nn('yolo-v3-tf', color)
oak.visualize([nn, color], scale=2 / 3, fps=True) # 1080P -> 720P
oak.visualize([nn, color], scale=2 / 3, fps=True) # 1080P -> 720P
# oak.show_graph()
oak.start(blocking=True)
oak.start(blocking=True)
12 changes: 6 additions & 6 deletions depthai_sdk/examples/StereoComponent/stereo.py
Original file line number Diff line number Diff line change
@@ -1,17 +1,17 @@
import cv2

from depthai_sdk import OakCamera
from depthai_sdk.components.stereo_component import WLSLevel
from depthai_sdk.visualize.configs import StereoColor

with OakCamera() as oak:
stereo = oak.create_stereo('400p', fps=30)
stereo.configure_postprocessing(
colorize=StereoColor.RGB,
stereo = oak.create_stereo('800p', fps=30)
stereo.config_postprocessing(
colorize=StereoColor.GRAY,
colormap=cv2.COLORMAP_BONE,
wls_filter=True,
wls_lambda=8000,
wls_sigma=1.5
wls_level=WLSLevel.HIGH
)

oak.visualize(stereo.out.depth)
oak.visualize(stereo.out.disparity)
oak.start(blocking=True)
9 changes: 9 additions & 0 deletions depthai_sdk/examples/StereoComponent/stereo_encoded.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
import cv2

from depthai_sdk import OakCamera
from depthai_sdk.visualize.configs import StereoColor

with OakCamera() as oak:
stereo = oak.create_stereo('400p', fps=30, encode='h264')
oak.visualize(stereo.out.encoded)
oak.start(blocking=True)
6 changes: 3 additions & 3 deletions depthai_sdk/examples/mixed/sync_multiple_outputs.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,9 @@
from depthai_sdk import OakCamera

with OakCamera() as oak:
color = oak.create_camera('color', encode='h264')
nn = oak.create_nn('mobilenet-ssd', color)
nn2 = oak.create_nn('face-detection-retail-0004', color)
color = oak.create_camera('color', encode='h264', name='color')
nn = oak.create_nn('mobilenet-ssd', color, name='mobilenet')
nn2 = oak.create_nn('face-detection-retail-0004', color, name='face-detection')
# oak.visualize([nn.out.main, nn.out.passthrough])
# oak.visualize(nn.out.spatials, scale=1 / 2)
def cb(msgs: Dict):
Expand Down
12 changes: 6 additions & 6 deletions depthai_sdk/examples/recording/encode.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,15 @@
from depthai_sdk import OakCamera, RecordType

with OakCamera() as oak:
color = oak.create_camera('color', resolution='1080P', fps=20, encode='H265')
left = oak.create_camera('left', resolution='800p', fps=20, encode='H265')
right = oak.create_camera('right', resolution='800p', fps=20, encode='H265')
color = oak.create_camera('color', resolution='1080P', fps=10, encode='H265')
left = oak.create_camera('left', resolution='800p', fps=10, encode='H265')
right = oak.create_camera('right', resolution='800p', fps=10, encode='H265')

stereo = oak.create_stereo(left=left, right=right)
nn = oak.create_nn('mobilenet-ssd', color, spatial=stereo)

# Sync & save all (encoded) streams
oak.record([color.out.encoded, left.out.encoded, right.out.encoded], './', RecordType.VIDEO)
oak.visualize([nn])
oak.record([color.out.encoded, left.out.encoded, right.out.encoded], './record', RecordType.VIDEO)
oak.visualize([color.out.encoded], fps=True)

oak.start(blocking=True)
oak.start(blocking=True)
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

with OakCamera() as oak:
color = oak.create_camera('color', resolution='1080P', fps=30, encode='MJPEG')
color.config_color_camera(ispScale=(2,3)) # 720P
color.config_color_camera(isp_scale=(2, 3)) # 720P
left = oak.create_camera('left', resolution='400p', fps=30)
right = oak.create_camera('right', resolution='400p', fps=30)
stereo = oak.create_stereo(left=left, right=right)
Expand Down
11 changes: 5 additions & 6 deletions depthai_sdk/examples/recording/stereo_record.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,13 @@
import cv2
import depthai

from depthai_sdk import OakCamera
from depthai_sdk.visualize.configs import StereoColor

with OakCamera() as oak:
color = oak.create_camera('color', resolution='1080p', fps=5)
stereo = oak.create_stereo('400p', fps=5)
color = oak.create_camera('color', resolution='1080p', fps=30)
stereo = oak.create_stereo('400p', fps=30)

stereo.configure_postprocessing(
stereo.config_postprocessing(
colorize=StereoColor.RGB,
colormap=cv2.COLORMAP_JET,
wls_filter=True,
Expand All @@ -18,9 +17,9 @@

# Record RGB and disparity to records folder
# Record doesn't work with visualize so the config is ignored
oak.record([color.out.main, stereo.out.disparity], 'records')
# oak.record([color.out.main, stereo.out.disparity], 'records')

# Record depth only
# oak.visualize(stereo.out.depth, record='depth.avi')
oak.visualize(stereo.out.disparity, record_path='disparity.avi')

oak.start(blocking=True)
2 changes: 0 additions & 2 deletions depthai_sdk/examples/replay/people-tracker.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
import cv2
import depthai as dai
from depthai_sdk import OakCamera, ResizeMode

with OakCamera(replay="people-tracking-above-02") as oak:
Expand Down
4 changes: 3 additions & 1 deletion depthai_sdk/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,9 @@ numpy>=1.21; python_version >= "3.7"
opencv-contrib-python>4
blobconverter>=1.2.8
pytube>=12.1.0
depthai>=2.18.0
--extra-index-url https://artifacts.luxonis.com/artifactory/luxonis-python-snapshot-local/
# This specific commit is needed for the debug mode (oak.show_graph()). TODO: update when depthai has new release
depthai>=2.20.0.0
PyTurboJPEG==1.6.4
marshmallow==3.17.0
distinctipy
Expand Down
9 changes: 6 additions & 3 deletions depthai_sdk/setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,14 @@
from setuptools import setup

with open('requirements.txt') as f:
required = f.readlines()
requirements = f.readlines()

install_requires=[requirement for requirement in requirements if '--' not in requirement]


setup(
name='depthai-sdk',
version='1.9.1.1',
version='1.9.2',
description='This package provides an abstraction of the DepthAI API library.',
long_description=io.open("README.md", encoding="utf-8").read(),
long_description_content_type="text/markdown",
Expand All @@ -18,7 +21,7 @@
license='MIT',
packages=['depthai_sdk'],
package_dir={"": "src"}, # https://stackoverflow.com/a/67238346/5494277
install_requires=required,
install_requires=install_requires,
include_package_data=True,
extras_require={
"visualize": ['PySide2',
Expand Down
34 changes: 0 additions & 34 deletions depthai_sdk/src/depthai_sdk/REPLAY.md

This file was deleted.

1 change: 1 addition & 0 deletions depthai_sdk/src/depthai_sdk/classes/nn_config.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
from marshmallow import Schema, fields

from depthai_sdk.classes.yolo_config import YoloConfig


Expand Down
11 changes: 6 additions & 5 deletions depthai_sdk/src/depthai_sdk/classes/nn_results.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ class GenericNNOutput:
def __init__(self, nn_data: NNData):
self.nn_data = nn_data


# First we have Object detection results, which are already standarized with dai.ImgDetections

@dataclass
Expand Down Expand Up @@ -59,7 +60,7 @@ class SemanticSegmentation(GenericNNOutput): # In core, extend from NNData

def __init__(self, nn_data: NNData, mask: List[np.ndarray]):
super().__init__(nn_data)
self.mask: List[np.ndarray] = mask
self.mask = mask


@dataclass
Expand All @@ -69,17 +70,16 @@ class ImgLandmarks(GenericNNOutput): # In core, extend from NNData
Examples: `human-pose-estimation-0001`, `openpose2`, `facial-landmarks-68`, `landmarks-regression-retail-0009`.
"""
landmarks: List[List[Any]]
pairs: List[Tuple[int, int]] # Pairs of landmarks, to draw lines between them
colors: List[Tuple[int, int, int]] # Color for each landmark (eg. both elbows are in the same color)

def __init__(self,
nn_data: NNData,
landmarks: List[List[Any]] = None,
landmarks_indices: List[List[int]] = None,
pairs: List[Tuple[int, int]] = None,
colors: List[Tuple[int, int, int]] = None):
super().__init__(nn_data)
self.landmarks = landmarks
self.landmarks_indices = landmarks_indices
self.pairs = pairs
self.colors = colors

Expand All @@ -89,9 +89,10 @@ class InstanceSegmentation(GenericNNOutput):
"""
Instance segmentation results, with a mask for each instance.
"""
# TODO: Finish this, add example models
masks: List[np.ndarray] # 2D np.array for each instance
labels: List[int] # Class label for each instance

def __init__(self, nn_data: NNData, masks: List[np.ndarray], labels: List[int]):
raise NotImplementedError('Instance segmentation not yet implemented')
super().__init__(nn_data)

Loading

0 comments on commit 5caf106

Please sign in to comment.