Skip to content

Commit cf2ae36

Browse files
author
TheNoobInventor
committed
Confirm aruco visualizer node tracks marker on robot as it moves
1 parent 36e84a0 commit cf2ae36

File tree

5 files changed

+59
-27
lines changed

5 files changed

+59
-27
lines changed

lidarbot_aruco/README.md

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -63,6 +63,11 @@ ros2 run usb_cam usb_cam_node_exe --ros-args --params-file ~/dev_ws/src/lidarbot
6363
ros2 run lidarbot_aruco aruco_trajectory_visualizer_node
6464
```
6565

66+
Launch file
67+
```bash
68+
ros2 launch lidarbot_aruco trajectory_visualizer_launch.py
69+
```
70+
6671
TODO: add gif showing node in action
6772

6873
Move this to main README
Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,34 @@
1+
# Launch file to start the webcam usb driver and aruco trajectory visualizer node
2+
# to track the robot as it moves
3+
4+
import os
5+
from ament_index_python.packages import get_package_share_directory
6+
7+
from launch import LaunchDescription
8+
from launch_ros.actions import Node
9+
10+
11+
def generate_launch_description():
12+
13+
usb_cam_params = os.path.join(
14+
get_package_share_directory("lidarbot_aruco"), "config", "params_1.yaml"
15+
)
16+
17+
usb_cam_node = Node(
18+
package="usb_cam",
19+
executable="usb_cam_node_exe",
20+
parameters=[usb_cam_params],
21+
)
22+
23+
aruco_visualizer_node = Node(
24+
package="lidarbot_aruco",
25+
executable="aruco_trajectory_visualizer_node",
26+
output="screen",
27+
)
28+
29+
return LaunchDescription(
30+
[
31+
usb_cam_node,
32+
aruco_visualizer_node,
33+
]
34+
)

lidarbot_aruco/lidarbot_aruco/aruco_trajectory_visualizer.py

Lines changed: 10 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,6 @@
88
# Import Python libraries
99
import cv2
1010
import numpy as np
11-
import argparse
1211

1312
# Import ROS2 libraries
1413
import rclpy
@@ -17,14 +16,6 @@
1716
from cv_bridge import CvBridge # Package to convert between ROS and OpenCV Images
1817
from sensor_msgs.msg import Image # Image is the message type
1918

20-
# Construct argument parser and parse the arguments
21-
ap = argparse.ArgumentParser()
22-
ap.add_argument(
23-
"-t", "--type", type=str, default="DICT_4X4_50", help="type of ArUco tag to detect"
24-
)
25-
26-
args = vars(ap.parse_args())
27-
2819
# The different ArUco dictionaries built into the OpenCV library
2920
ARUCO_DICT = {
3021
"DICT_4X4_50": cv2.aruco.DICT_4X4_50,
@@ -49,7 +40,7 @@
4940

5041
class ArucoNode(Node):
5142
def __init__(self):
52-
super().__init__("aruco_node")
43+
super().__init__("aruco_visualizer_node")
5344

5445
# Declare parameters
5546
self.declare_parameter("aruco_dictionary_name", "DICT_4X4_50")
@@ -83,7 +74,9 @@ def __init__(self):
8374
# Check that we have a valid ArUco marker
8475
if ARUCO_DICT.get(aruco_dictionary_name, None) is None:
8576
self.get_logger().info(
86-
"[INFO] ArUCo tag of '{}' is not supported".format(args["type"])
77+
"[INFO] ArUCo tag of '{}' is not supported".format(
78+
aruco_dictionary_name
79+
)
8780
)
8881

8982
# Load the camera parameters from the saved file
@@ -98,7 +91,9 @@ def __init__(self):
9891
self.get_logger().info(
9992
"[INFO] detecting '{}' markers...".format(aruco_dictionary_name)
10093
)
101-
self.arucoDict = cv2.aruco.getPredefinedDictionary(ARUCO_DICT[args["type"]])
94+
self.arucoDict = cv2.aruco.getPredefinedDictionary(
95+
ARUCO_DICT[aruco_dictionary_name]
96+
)
10297
self.arucoParams = cv2.aruco.DetectorParameters()
10398

10499
# Create the subscriber. This subscriber will receive an Image
@@ -184,15 +179,15 @@ def main(args=None):
184179
rclpy.init(args=args)
185180

186181
# Create the node
187-
aruco_node = ArucoNode()
182+
aruco_visualizer_node = ArucoNode()
188183

189184
# Spin the node so the callback function is called
190-
rclpy.spin(aruco_node)
185+
rclpy.spin(aruco_visualizer_node)
191186

192187
# Destroy the node explicitly
193188
# (optional - otherwise it will be done automatically
194189
# when the garbage collector destroys the node object)
195-
aruco_node.destroy_node()
190+
aruco_visualizer_node.destroy_node()
196191

197192
# Shutdown the ROS client library for Python
198193
rclpy.shutdown()

lidarbot_aruco/setup.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,10 @@
1111
data_files=[
1212
("share/ament_index/resource_index/packages", ["resource/" + package_name]),
1313
("share/" + package_name, ["package.xml"]),
14+
(
15+
os.path.join("share", package_name, "launch"),
16+
glob(os.path.join("launch", "*launch.[pxy][yma]*")),
17+
),
1418
(
1519
os.path.join("share", package_name, "config"),
1620
glob(os.path.join("config", "*yaml")),

lidarbot_navigation/scripts/trajectory_visualizer.py

Lines changed: 6 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,8 @@
33
# Node to visualize the robot trajectory in Rviz.
44
# Adapted from https://github.com/RBinsonB/trajectory_visualization
55

6+
# WIP
7+
68
import rclpy
79
from rclpy.node import Node
810

@@ -22,7 +24,6 @@ def __init__(self, name):
2224
self.max_poses_param = self.declare_parameter("max_poses", 1000).value
2325
self.threshold_param = self.declare_parameter("threshold", 0.001).value
2426
self.frame_id_param = self.declare_parameter("frame_id", "map").value
25-
# self.frame_id_param = self.declare_parameter("frame_id", "odom").value
2627

2728
self.trajectory_path_msg = Path()
2829
self.previous_pose_position = Point()
@@ -34,6 +35,8 @@ def __init__(self, name):
3435
self.odom_sub = self.create_subscription(
3536
Path,
3637
"/transformed_global_plan",
38+
# Odometry,
39+
# /odometry/filtered,
3740
self.odom_callback,
3841
10,
3942
)
@@ -43,7 +46,7 @@ def odom_callback(self, odom_msg):
4346

4447
# Process message position and add it to path
4548
# self.publish_trajectory_path(odom_msg.pose.pose.position)
46-
self.publish_trajectory_path(odom_msg.pose.position)
49+
self.publish_trajectory_path(odom_msg.poses[0].pose.position)
4750

4851
# Add pose and publish trajectory path message
4952
def publish_trajectory_path(self, position):
@@ -55,21 +58,12 @@ def publish_trajectory_path(self, position):
5558

5659
# Add current pose to path
5760
self.trajectory_path_msg.header.stamp = self.get_clock().now().to_msg()
58-
# self.trajectory_path_msg.header.stamp.sec = (
59-
# self.get_clock().now().to_msg().sec()
60-
# )
61-
#
62-
# self.trajectory_path_msg.header.stamp.nanosec = (
63-
# self.get_clock().now().to_msg().nanosec()
64-
# )
65-
6661
self.trajectory_path_msg.header.frame_id = self.frame_id_param
6762
pose_stamped_msg = PoseStamped()
6863
pose_stamped_msg.header.stamp = self.get_clock().now().to_msg()
6964
pose_stamped_msg.pose.position.x = position.x
7065
pose_stamped_msg.pose.position.y = position.y
71-
pose_stamped_msg.pose.orientation.z = position.orientation.z
72-
pose_stamped_msg.pose.orientation.w = position.orientation.w
66+
pose_stamped_msg.pose.orientation.w = 1.0
7367

7468
# If max number of poses in path has not been reach, just add pose to message
7569
if len(self.trajectory_path_msg.poses) < self.max_poses_param:

0 commit comments

Comments
 (0)