Skip to content

Commit 6e31751

Browse files
committed
update camera setup.py README
1 parent d209639 commit 6e31751

File tree

13 files changed

+103
-104
lines changed

13 files changed

+103
-104
lines changed

README.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -97,6 +97,8 @@ The Task Name in the following table corresponds to `--task TASK_NAME` in task s
9797

9898
1. If you get the following error: `ImportError: libpython3.8m.so.1.0: cannot open shared object file: No such file or directory`, it is also possible that you need to do `export LD_LIBRARY_PATH=/PATH/TO/LIBPYTHON/DIRECTORY` / `export LD_LIBRARY_PATH=/PATH/TO/CONDA/envs/YOUR_ENV_NAME/lib`. You can also try: `sudo apt install libpython3.8`.
9999

100+
2. If you get the following error: `AttributeError: module 'numpy' has no attribute 'float'.`, it's because of the version of package `numpy`. First uninstall `numpy` by `pip uninstall numpy`, and install `numpy` of specific version by `pip install numpy==1.20.3`.
101+
100102
## Citing MQE ##
101103

102104
If our work has been helpful to you. please feel free to cite us:

mqe/envs/base/legged_robot.py

Lines changed: 6 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -868,6 +868,7 @@ def _create_envs(self):
868868
start_pose.p = gymapi.Vec3(*pos)
869869

870870
if colorize_robot:
871+
# TODO: self.gym.set_rigid_body_color()
871872
agent_handle = self.gym.create_actor(env_handle, robot_assets[j], start_pose, self.cfg.asset.name, i, self.cfg.asset.self_collisions, 0)
872873
else:
873874
agent_handle = self.gym.create_actor(env_handle, robot_asset, start_pose, self.cfg.asset.name, i, self.cfg.asset.self_collisions, 0)
@@ -923,10 +924,10 @@ def _create_envs(self):
923924

924925
def _render_headless(self):
925926
if self.record_now and self.complete_video_frames is not None and len(self.complete_video_frames) == 0:
926-
bx, by, bz = self.root_states[self.cfg.env.record_actor_id, 0], self.root_states[self.cfg.env.record_actor_id, 1], self.root_states[self.cfg.env.record_actor_id, 2]
927-
target_loc = [bx, by , bz]
928-
cam_distance = [0, -1.0, 1.0]
929-
self.rendering_camera.set_position(target_loc, cam_distance)
927+
# bx, by, bz = self.root_states[self.cfg.env.record_actor_id, 0], self.root_states[self.cfg.env.record_actor_id, 1], self.root_states[self.cfg.env.record_actor_id, 2]
928+
# target_loc = [bx, by , bz]
929+
# cam_distance = [0, -1.0, 1.0]
930+
self.rendering_camera.set_position(self.cfg.viewer.pos, self.cfg.viewer.lookat)
930931
self.video_frame = self.rendering_camera.get_observation()
931932
self.video_frames.append(self.video_frame)
932933

@@ -936,6 +937,7 @@ def start_recording(self):
936937
self.record_now = True
937938

938939
def pause_recording(self):
940+
print("pause recording")
939941
self.complete_video_frames = []
940942
self.video_frames = []
941943
self.record_now = False
@@ -1058,44 +1060,6 @@ def _init_height_points(self):
10581060
points[:, :, 1] = grid_y.flatten()
10591061
return points
10601062

1061-
# def _get_heights(self, env_ids=None):
1062-
# """ Samples heights of the terrain at required points around each robot.
1063-
# The points are offset by the base's position and rotated by the base's yaw
1064-
1065-
# Args:
1066-
# env_ids (List[int], optional): Subset of environments for which to return the heights. Defaults to None.
1067-
1068-
# Raises:
1069-
# NameError: [description]
1070-
1071-
# Returns:
1072-
# [type]: [description]
1073-
# """
1074-
# if self.cfg.terrain.mesh_type == 'plane':
1075-
# return torch.zeros(self.num_envs, self.num_height_points, device=self.device, requires_grad=False)
1076-
# elif self.cfg.terrain.mesh_type == 'none':
1077-
# raise NameError("Can't measure height with terrain mesh type 'none'")
1078-
1079-
# if env_ids:
1080-
# points = quat_apply_yaw(self.base_quat[env_ids].repeat(1, self.num_height_points), self.height_points[env_ids]) + (self.root_states[env_ids, :3]).unsqueeze(1)
1081-
# else:
1082-
# points = quat_apply_yaw(self.base_quat.repeat(1, self.num_height_points), self.height_points) + (self.root_states[:, :3]).unsqueeze(1)
1083-
1084-
# points += self.terrain.cfg.border_size
1085-
# points = (points/self.terrain.cfg.horizontal_scale).long()
1086-
# px = points[:, :, 0].view(-1)
1087-
# py = points[:, :, 1].view(-1)
1088-
# px = torch.clip(px, 0, self.height_samples.shape[0]-2)
1089-
# py = torch.clip(py, 0, self.height_samples.shape[1]-2)
1090-
1091-
# heights1 = self.height_samples[px, py]
1092-
# heights2 = self.height_samples[px+1, py]
1093-
# heights3 = self.height_samples[px, py+1]
1094-
# heights = torch.min(heights1, heights2)
1095-
# heights = torch.min(heights, heights3)
1096-
1097-
# return heights.view(self.num_envs, -1) * self.terrain.cfg.vertical_scale
1098-
10991063
def _fill_extras(self, env_ids):
11001064
self.extras["episode"] = {}
11011065
for key in self.episode_sums.keys():

mqe/envs/configs/go1_door_config.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ class Go1DoorCfg(Go1Cfg):
66

77
class env(Go1Cfg.env):
88
env_name = "go1door"
9-
num_envs = 1 # 4096
9+
num_envs = 1
1010
num_agents = 2
1111
num_npcs = 1
1212
num_actions_npc = 1
@@ -22,8 +22,8 @@ class asset(Go1Cfg.asset):
2222

2323
class terrain(Go1Cfg.terrain):
2424

25-
num_rows = 1 # 20
26-
num_cols = 1 # 50
25+
num_rows = 1
26+
num_cols = 1
2727

2828
BarrierTrack_kwargs = merge_dict(Go1Cfg.terrain.BarrierTrack_kwargs, dict(
2929
options = [

mqe/envs/configs/go1_football_config.py

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ class Go1FootballDefenderCfg(Go1Cfg):
66

77
class env(Go1Cfg.env):
88
env_name = "go1football"
9-
num_envs = 1 # 4096
9+
num_envs = 1
1010
num_agents = 3
1111
num_npcs = 1
1212
episode_length_s = 20
@@ -21,8 +21,8 @@ class asset(Go1Cfg.asset):
2121

2222
class terrain(Go1Cfg.terrain):
2323

24-
num_rows = 1 # 20
25-
num_cols = 1 # 50
24+
num_rows = 1
25+
num_cols = 1
2626

2727
BarrierTrack_kwargs = merge_dict(Go1Cfg.terrain.BarrierTrack_kwargs, dict(
2828
options = [
@@ -135,10 +135,10 @@ class Go1Football1vs1Cfg(Go1Cfg):
135135

136136
class env(Go1Cfg.env):
137137
env_name = "go1football"
138-
num_envs = 1 # 4096
138+
num_envs = 1
139139
num_agents = 2
140140
num_npcs = 1
141-
episode_length_s = 20
141+
episode_length_s = 1
142142

143143
class asset(Go1Cfg.asset):
144144
file_npc = "{LEGGED_GYM_ROOT_DIR}/resources/objects/ball.urdf"
@@ -150,8 +150,8 @@ class asset(Go1Cfg.asset):
150150

151151
class terrain(Go1Cfg.terrain):
152152

153-
num_rows = 1 # 20
154-
num_cols = 1 # 50
153+
num_rows = 1
154+
num_cols = 1
155155

156156
BarrierTrack_kwargs = merge_dict(Go1Cfg.terrain.BarrierTrack_kwargs, dict(
157157
options = [
@@ -248,7 +248,7 @@ class Go1Football2vs2Cfg(Go1Cfg):
248248

249249
class env(Go1Cfg.env):
250250
env_name = "go1football"
251-
num_envs = 1 # 4096
251+
num_envs = 1
252252
num_agents = 4
253253
num_npcs = 1
254254
episode_length_s = 20
@@ -263,8 +263,8 @@ class asset(Go1Cfg.asset):
263263

264264
class terrain(Go1Cfg.terrain):
265265

266-
num_rows = 1 # 20
267-
num_cols = 1 # 50
266+
num_rows = 1
267+
num_cols = 1
268268

269269
BarrierTrack_kwargs = merge_dict(Go1Cfg.terrain.BarrierTrack_kwargs, dict(
270270
options = [

mqe/envs/configs/go1_gate_config.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6,16 +6,16 @@ class Go1GateCfg(Go1Cfg):
66

77
class env(Go1Cfg.env):
88
env_name = "go1gate"
9-
num_envs = 1 # 4096
9+
num_envs = 1
1010
num_agents = 2
1111
episode_length_s = 10 # episode length in seconds
1212

1313
class terrain(Go1Cfg.terrain):
1414

1515
mesh_type = "trimesh"
1616
selected = "BarrierTrack"
17-
num_rows = 1 # 20
18-
num_cols = 1 # 50
17+
num_rows = 1
18+
num_cols = 1
1919
max_init_terrain_level = 2
2020
border_size = 1
2121
slope_treshold = 20.

mqe/envs/configs/go1_seesaw_config.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -6,11 +6,11 @@ class Go1SeesawCfg(Go1Cfg):
66

77
class env(Go1Cfg.env):
88
env_name = "go1seesaw"
9-
num_envs = 1 # 4096
9+
num_envs = 1
1010
num_agents = 2
1111
num_npcs = 1
1212
num_actions_npc = 1
13-
episode_length_s = 15 # episode length in seconds
13+
episode_length_s = 1 # episode length in seconds
1414

1515
class asset(Go1Cfg.asset):
1616
file_npc = "{LEGGED_GYM_ROOT_DIR}/resources/objects/seesaw.urdf"
@@ -21,8 +21,8 @@ class asset(Go1Cfg.asset):
2121

2222
class terrain(Go1Cfg.terrain):
2323

24-
num_rows = 1 # 20
25-
num_cols = 1 # 50
24+
num_rows = 1
25+
num_cols = 1
2626

2727
BarrierTrack_kwargs = merge_dict(Go1Cfg.terrain.BarrierTrack_kwargs, dict(
2828
options = [
@@ -132,5 +132,5 @@ class scales:
132132
# exceed_torque_limits_i = -2e-1
133133

134134
class viewer(Go1Cfg.viewer):
135-
pos = [0., -2., 4.] # [m]
135+
pos = [-1., -3., 5.] # [m]
136136
lookat = [4., 2., 0.] # [m]

mqe/envs/configs/go1_sheep_config.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ class SingleSheepCfg(Go1Cfg):
66

77
class env(Go1Cfg.env):
88
env_name = "go1sheep"
9-
num_envs = 1 # 4096
9+
num_envs = 1
1010
num_agents = 2
1111
num_npcs = 1
1212
episode_length_s = 15
@@ -26,8 +26,8 @@ class asset(Go1Cfg.asset):
2626

2727
class terrain(Go1Cfg.terrain):
2828

29-
num_rows = 1 # 20
30-
num_cols = 1 # 50
29+
num_rows = 1
30+
num_cols = 1
3131

3232
BarrierTrack_kwargs = merge_dict(Go1Cfg.terrain.BarrierTrack_kwargs, dict(
3333
options = [
@@ -133,7 +133,7 @@ class NineSheepCfg(Go1Cfg):
133133

134134
class env(Go1Cfg.env):
135135
env_name = "go1sheep"
136-
num_envs = 35 # 4096
136+
num_envs = 35
137137
num_agents = 2
138138
num_npcs = 9
139139
episode_length_s = 15

mqe/envs/utils.py

Lines changed: 19 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,12 @@
11
# environments
2+
from mqe.envs.field.legged_robot_field import LeggedRobotField
23
from mqe.envs.go1.go1 import Go1
34
from mqe.envs.npc.go1_sheep import Go1Sheep
45
from mqe.envs.npc.go1_object import Go1Object
56
from mqe.envs.npc.go1_football_defender import Go1FootballDefender
67

78
# configs
9+
from mqe.envs.field.legged_robot_field_config import LeggedRobotFieldCfg
810
from mqe.envs.configs.go1_plane_config import Go1PlaneCfg
911
from mqe.envs.configs.go1_gate_config import Go1GateCfg
1012
from mqe.envs.configs.go1_sheep_config import SingleSheepCfg, NineSheepCfg
@@ -23,6 +25,8 @@
2325

2426
from mqe.utils import get_args, make_env
2527

28+
from typing import Tuple
29+
2630
ENV_DICT = {
2731
"go1plane": {
2832
"class": Go1,
@@ -76,7 +80,7 @@
7680
# },
7781
}
7882

79-
def make_mqe_env(env_name, args=None, custom_cfg=None):
83+
def make_mqe_env(env_name: str, args=None, custom_cfg=None) -> Tuple[LeggedRobotField, LeggedRobotFieldCfg]:
8084

8185
env_dict = ENV_DICT[env_name]
8286

@@ -86,4 +90,17 @@ def make_mqe_env(env_name, args=None, custom_cfg=None):
8690
env, env_cfg = make_env(env_dict["class"], env_dict["config"], args)
8791
env = env_dict["wrapper"](env)
8892

89-
return env, env_cfg
93+
return env, env_cfg
94+
95+
def custom_cfg(args):
96+
97+
def fn(cfg:LeggedRobotFieldCfg):
98+
99+
if getattr(args, "num_envs", None) is not None:
100+
cfg.env.num_envs = args.num_envs
101+
102+
cfg.env.record_video = args.record_video
103+
104+
return cfg
105+
106+
return fn

mqe/utils/helpers.py

Lines changed: 6 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -282,24 +282,19 @@ def __init__(self, env):
282282
camera_props.width = self.env.cfg.env.recording_width_px
283283
camera_props.height = self.env.cfg.env.recording_height_px
284284
self.rendering_camera = self.env.gym.create_camera_sensor(self.env.envs[0], camera_props)
285-
self.env.gym.set_camera_location(self.rendering_camera, self.env.envs[0], gymapi.Vec3(1.5, 1, 3.0),
286-
gymapi.Vec3(0, 0, 0))
287285

288-
def set_position(self, target_loc=None, cam_distance=None):
289-
if cam_distance is None:
290-
cam_distance = [0, -1.0, 1.0]
291-
if target_loc is None:
286+
def set_position(self, pos=None, lookat=None):
287+
if pos is None:
292288
bx, by, bz = self.env.root_states[0, 0], self.env.root_states[0, 1], self.env.root_states[0, 2]
293-
target_loc = [bx, by, bz]
294-
self.env.gym.set_camera_location(self.rendering_camera, self.env.envs[0], gymapi.Vec3(target_loc[0] + cam_distance[0],
295-
target_loc[1] + cam_distance[1],
296-
target_loc[2] + cam_distance[2]),
297-
gymapi.Vec3(target_loc[0], target_loc[1], target_loc[2]))
289+
lookat = [bx, by, bz]
290+
pos = [bx, by - 1.0, bz + 1.0]
291+
self.env.gym.set_camera_location(self.rendering_camera, self.env.envs[0], gymapi.Vec3(*pos), gymapi.Vec3(*lookat))
298292

299293
def get_observation(self, env_ids = None):
300294
self.env.gym.step_graphics(self.env.sim)
301295
self.env.gym.render_all_camera_sensors(self.env.sim)
302296
img = self.env.gym.get_camera_image(self.env.sim, self.env.envs[0], self.rendering_camera, gymapi.IMAGE_COLOR)
297+
_img = self.env.gym.get_camera_image(self.env.sim, self.env.envs[0], self.rendering_camera, gymapi.IMAGE_COLOR) # DO NOT REMOVE, to fix unknown issue
303298
w, h = img.shape
304299
return img.reshape([w, h // 4, 4])
305300

@@ -309,8 +304,6 @@ def __init__(self, env, attached_robot_asset=None):
309304
self.env = env
310305
self.attached_robot_asset = attached_robot_asset
311306

312-
313-
314307
def initialize(self, camera_label, camera_pose, camera_rpy, env_ids=None):
315308
if env_ids is None: env_ids = range(self.env.num_envs)
316309

@@ -319,7 +312,6 @@ def initialize(self, camera_label, camera_pose, camera_rpy, env_ids=None):
319312
camera_props.height = self.env.cfg.perception.image_height
320313
camera_props.horizontal_fov = self.env.cfg.perception.image_horizontal_fov
321314

322-
323315
self.cams = []
324316

325317
for env_id in env_ids:

openrl_ws/train.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11

2-
from openrl_ws.utils import make_env, get_args, custom_cfg, MATWrapper
2+
from openrl_ws.utils import make_env, get_args, MATWrapper
3+
from mqe.envs.utils import custom_cfg
34
from openrl.utils.logger import Logger
45

56
from datetime import datetime

0 commit comments

Comments
 (0)