Attaching a Camera to the End-Effector of a Rigid Franka Arm in Genesis #416
-
Hello, I'm currently using the Franka Emika Panda robot arm within the Genesis framework, configured as a rigid model. I would like to attach a camera to the robot’s end-effector so it moves in sync with the robot during simulation. Is there a recommended way in Genesis to rigidly attach a camera to a specific link within an MJCF model so that it follows the robot’s motion? Any example code snippets, references, or tips would be greatly appreciated. Thank you! |
Beta Was this translation helpful? Give feedback.
Replies: 2 comments
-
We don't have an api for this yet, thought the implementation should be quite straightforward, we are busy with a benchmarking report... For now, a simple workaround is you manually do |
Beta Was this translation helpful? Give feedback.
-
I tried your workaround by manually calling # BASE franka_cube.py
import numpy as np
import time
import genesis as gs
from genesis.utils.geom import trans_quat_to_T, xyz_to_quat, quat_to_T
########################## init ##########################
gs.init(backend=gs.gpu, precision="32")
########################## create a scene ##########################
scene = gs.Scene(
viewer_options=gs.options.ViewerOptions(
camera_pos=(3, -1, 1.5),
camera_lookat=(0.0, 0.0, 0.5),
camera_fov=30,
res=(960, 640),
max_FPS=60,
),
sim_options=gs.options.SimOptions(
dt=0.01,
),
rigid_options=gs.options.RigidOptions(
box_box_detection=True,
),
show_viewer=True,
)
########################## entities ##########################
plane = scene.add_entity(
gs.morphs.Plane(),
)
franka = scene.add_entity(
gs.morphs.MJCF(file="xml/franka_emika_panda/panda.xml"),
)
cube = scene.add_entity(
gs.morphs.Box(
size=(0.04, 0.04, 0.04),
pos=(0.65, 0.0, 0.02),
)
)
########################## cameras ##########################
cam_0 = scene.add_camera(
# res=(1280, 960),
fov=30,
GUI=True,
)
########################## build ##########################
scene.build()
# fixed transformation
cam_0_transform = trans_quat_to_T(np.array([0.03, 0, 0.03]), xyz_to_quat(np.array([180+5, 0, -90])))
# for control
motors_dof = np.arange(7)
fingers_dof = np.arange(7, 9)
end_effector = franka.get_link("hand")
# initial position
qpos = franka.inverse_kinematics(
link=end_effector,
pos=np.array([0.65, 0.0, 0.3]),
quat=np.array([0, 1, 0, 0]),
)
franka.set_qpos(qpos)
scene.step()
cam_0.set_pose(transform=trans_quat_to_T(end_effector.get_pos(), end_effector.get_quat()).cpu().numpy() @ cam_0_transform)
cam_0.render(rgb=True, depth=True)
franka.control_dofs_position(qpos[:-2], motors_dof)
franka.control_dofs_position([0.04, 0.04], fingers_dof)
for i in range(100):
print("init", i)
scene.step()
cam_0.set_pose(transform=trans_quat_to_T(end_effector.get_pos(), end_effector.get_quat()).cpu().numpy() @ cam_0_transform)
cam_0.render(rgb=True, depth=True)
# reach
qpos = franka.inverse_kinematics(
link=end_effector,
pos=np.array([0.65, 0.0, 0.135]),
quat=np.array([0, 1, 0, 0]),
)
franka.control_dofs_position(qpos[:-2], motors_dof)
for i in range(100):
print("reach", i)
scene.step()
cam_0.set_pose(transform=trans_quat_to_T(end_effector.get_pos(), end_effector.get_quat()).cpu().numpy() @ cam_0_transform)
cam_0.render(rgb=True, depth=True)
# grasp
finder_pos = -0.0
franka.control_dofs_position(np.array([finder_pos, finder_pos]), fingers_dof)
for i in range(100):
print("grasp", i)
scene.step()
cam_0.set_pose(transform=trans_quat_to_T(end_effector.get_pos(), end_effector.get_quat()).cpu().numpy() @ cam_0_transform)
cam_0.render(rgb=True, depth=True)
# lift
qpos = franka.inverse_kinematics(
link=end_effector,
pos=np.array([0.65, 0.0, 0.3]),
quat=np.array([0, 1, 0, 0]),
)
franka.control_dofs_position(qpos[:-2], motors_dof)
franka.control_dofs_position(np.array([finder_pos, finder_pos]), fingers_dof)
for i in range(200):
print("lift", i)
scene.step()
cam_0.set_pose(transform=trans_quat_to_T(end_effector.get_pos(), end_effector.get_quat()).cpu().numpy() @ cam_0_transform)
cam_0.render(rgb=True, depth=True)
import IPython; IPython.embed() |
Beta Was this translation helpful? Give feedback.
We don't have an api for this yet, thought the implementation should be quite straightforward, we are busy with a benchmarking report...
For now, a simple workaround is you manually do
cam.set_pose()
. should work just as well