From 11464c4887b6ac8d5ad4b82db026404e78eb12f5 Mon Sep 17 00:00:00 2001 From: Sanjay Rijal <37138338+zovelsanj@users.noreply.github.com> Date: Wed, 9 Aug 2023 15:31:37 +0545 Subject: [PATCH 01/12] save poses and images for eqrectangle --- examples/demo_runner.py | 36 ++++++++++++++++++++++++------------ examples/example.py | 41 +++++++++++++++++++++++++++++++++++------ 2 files changed, 59 insertions(+), 18 deletions(-) diff --git a/examples/demo_runner.py b/examples/demo_runner.py index 42219c0cfe..52b623a251 100644 --- a/examples/demo_runner.py +++ b/examples/demo_runner.py @@ -36,16 +36,17 @@ class ABTestGroup(Enum): class DemoRunner: - def __init__(self, sim_settings, simulator_demo_type): + def __init__(self, sim_settings, simulator_demo_type, out_path=None): if simulator_demo_type == DemoRunnerType.EXAMPLE: self.set_sim_settings(sim_settings) self._demo_type = simulator_demo_type + self.out_path = out_path def set_sim_settings(self, sim_settings): self._sim_settings = sim_settings.copy() def save_color_observation(self, obs, total_frames): - color_obs = obs["color_sensor"] + color_obs = obs["equirect_rgba_sensor"] color_img = Image.fromarray(color_obs, mode="RGBA") if self._demo_type == DemoRunnerType.AB_TEST: if self._group_id == ABTestGroup.CONTROL: @@ -53,10 +54,17 @@ def save_color_observation(self, obs, total_frames): else: color_img.save("test.rgba.test.%05d.png" % total_frames) else: - color_img.save("test.rgba.%05d.png" % total_frames) + if self.out_path is None: + color_img.save("test.rgba.%05d.png" % total_frames) + else: + image_path = os.path.join(self.out_path, "images") + if not os.path.exists(image_path): + print(f"{os.path.join(self.out_path, 'images')} doesn't exist, so creating one.") + os.mkdir(image_path) + color_img.save(os.path.join(image_path,"test.rgba.%05d.png" % total_frames)) def save_semantic_observation(self, obs, total_frames): - semantic_obs = obs["semantic_sensor"] + semantic_obs = obs["equirect_semantic_sensor"] semantic_img = Image.new("P", (semantic_obs.shape[1], semantic_obs.shape[0])) semantic_img.putpalette(d3_40_colors_rgb.flatten()) semantic_img.putdata((semantic_obs.flatten() % 40).astype(np.uint8)) @@ -69,7 +77,7 @@ def save_semantic_observation(self, obs, total_frames): semantic_img.save("test.sem.%05d.png" % total_frames) def save_depth_observation(self, obs, total_frames): - depth_obs = obs["depth_sensor"] + depth_obs = obs["equirect_depth_sensor"] depth_img = Image.fromarray((depth_obs / 10 * 255).astype(np.uint8), mode="L") if self._demo_type == DemoRunnerType.AB_TEST: if self._group_id == ABTestGroup.CONTROL: @@ -80,7 +88,7 @@ def save_depth_observation(self, obs, total_frames): depth_img.save("test.depth.%05d.png" % total_frames) def output_semantic_mask_stats(self, obs, total_frames): - semantic_obs = obs["semantic_sensor"] + semantic_obs = obs["equirect_semantic_sensor"] counts = np.bincount(semantic_obs.flatten()) total_count = np.sum(counts) print(f"Pixel statistics for frame {total_frames}") @@ -212,6 +220,7 @@ def do_time_steps(self): # get the rigid object manager, which provides direct # access to objects rigid_obj_mgr = self._sim.get_rigid_object_manager() + poses = [] total_sim_step_time = 0.0 total_frames = 0 @@ -259,15 +268,15 @@ def do_time_steps(self): total_sim_step_time += self._sim._previous_step_time if self._sim_settings["save_png"]: - if self._sim_settings["color_sensor"]: + if self._sim_settings["equirect_rgba_sensor"]: self.save_color_observation(observations, total_frames) - if self._sim_settings["depth_sensor"]: + if self._sim_settings["equirect_depth_sensor"]: self.save_depth_observation(observations, total_frames) - if self._sim_settings["semantic_sensor"]: + if self._sim_settings["equirect_semantic_sensor"]: self.save_semantic_observation(observations, total_frames) state = self._sim.last_state() - + poses.append([state.position, state.rotation]) if not self._sim_settings["silent"]: print("position\t", state.position, "\t", "rotation\t", state.rotation) @@ -283,12 +292,13 @@ def do_time_steps(self): print("len(action_path)", len(self._action_path)) if ( - self._sim_settings["semantic_sensor"] + self._sim_settings["equirect_semantic_sensor"] and self._sim_settings["print_semantic_mask_stats"] ): self.output_semantic_mask_stats(observations, total_frames) - total_frames += 1 + print(f"skipping frames by {self._sim_settings['skip']}, total_frames = {total_frames}") + total_frames += self._sim_settings["skip"] end_time = time.time() perf = {"total_time": end_time - start_time} @@ -296,6 +306,7 @@ def do_time_steps(self): perf["fps"] = 1.0 / perf["frame_time"] perf["time_per_step"] = time_per_step perf["avg_sim_step_time"] = total_sim_step_time / total_frames + perf["pose"] = poses return perf @@ -353,6 +364,7 @@ def init_common(self): def _bench_target(self, _idx=0): self.init_common() + print("inside _bench_target") best_perf = None for _ in range(3): diff --git a/examples/example.py b/examples/example.py index 3a86ddac71..be06d63d28 100755 --- a/examples/example.py +++ b/examples/example.py @@ -6,7 +6,9 @@ import argparse - +import os +import numpy as np +from habitat_sim.utils.common import to_campose import demo_runner as dr parser = argparse.ArgumentParser() @@ -28,6 +30,8 @@ parser.add_argument("--silent", action="store_true") parser.add_argument("--test_fps_regression", type=int, default=0) parser.add_argument("--enable_physics", action="store_true") +parser.add_argument("--out_path", default="./data", required=False, help="path to save poses as matrix") +parser.add_argument("--skip", type=int, default=1, required=False, help="num of frames to skip") parser.add_argument( "--physics_config_file", type=str, @@ -45,9 +49,13 @@ def make_settings(): settings["scene"] = args.scene settings["save_png"] = args.save_png settings["sensor_height"] = args.sensor_height - settings["color_sensor"] = not args.disable_color_sensor - settings["semantic_sensor"] = args.semantic_sensor - settings["depth_sensor"] = args.depth_sensor + # settings["color_sensor"] = not args.disable_color_sensor + # settings["semantic_sensor"] = args.semantic_sensor + # settings["depth_sensor"] = args.depth_sensor + settings["equirect_rgba_sensor"] = not args.disable_color_sensor + settings["equirect_semantic_sensor"] = args.semantic_sensor + settings["equirect_depth_sensor"] = args.depth_sensor + settings["print_semantic_scene"] = args.print_semantic_scene settings["print_semantic_mask_stats"] = args.print_semantic_mask_stats settings["compute_shortest_path"] = args.compute_shortest_path @@ -58,18 +66,32 @@ def make_settings(): settings["physics_config_file"] = args.physics_config_file settings["frustum_culling"] = not args.disable_frustum_culling settings["recompute_navmesh"] = args.recompute_navmesh + settings["skip"] = args.skip return settings +def save_poses(perfs, pose_file): + transformed_R = np.array([[0 for _ in range(16)]]).reshape(1, 16) + for pose in perfs[0]["pose"]: + T_cal = to_campose(pose[1], pose[0]) + T_cal = T_cal.reshape(1, 16) + transformed_R = np.vstack((transformed_R, T_cal)) + + transformed_R = np.delete(transformed_R, 0, 0) + transformed_R = np.matrix(transformed_R) + with open(pose_file, "wb") as f: + for line in transformed_R: + np.savetxt(f, line, fmt = "%f") + print(f"poses sucessfully saved at {pose_file}") + settings = make_settings() perfs = [] for _i in range(1): - demo_runner = dr.DemoRunner(settings, dr.DemoRunnerType.EXAMPLE) + demo_runner = dr.DemoRunner(settings, dr.DemoRunnerType.EXAMPLE, out_path=args.out_path) perf = demo_runner.example() perfs.append(perf) - print(" ========================= Performance ======================== ") print( " %d x %d, total time %0.2f s," @@ -82,6 +104,13 @@ def make_settings(): # "FPS is below regression threshold: %0.1f < %0.1f" # % (perf["fps"], args.test_fps_regression) # ) + +if not os.path.exists(args.out_path): + print(f"{args.out_path} doesn't exist, so creating one.") + os.mkdir(args.out_path) + +save_poses(perfs, os.path.join(args.out_path, "transformations.txt")) + if len(perfs) > 1: avg_fps = 0 avg_frame_time = 0 From e772a25505245fbd129b5e3b9ea79188f71e80d3 Mon Sep 17 00:00:00 2001 From: Sanjay Rijal <37138338+zovelsanj@users.noreply.github.com> Date: Wed, 9 Aug 2023 15:35:16 +0545 Subject: [PATCH 02/12] added quaternion-to-rotation --- src_python/habitat_sim/utils/common.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src_python/habitat_sim/utils/common.py b/src_python/habitat_sim/utils/common.py index f6f309a0f5..e1ae9393d4 100755 --- a/src_python/habitat_sim/utils/common.py +++ b/src_python/habitat_sim/utils/common.py @@ -167,7 +167,13 @@ def random_quaternion(): ) return mn.Quaternion(qAxis, math.sqrt(1 - u[0]) * math.sin(2 * math.pi * u[1])) - +def to_campose(q: qt.quaternion, t: np.ndarray): + R = qt.as_rotation_matrix(q) + T = np.eye(4) + T[:3, :3] = R + T[:3, 3] = t + return T + def download_and_unzip(file_url, local_directory): response = urlopen(file_url) zipfile = ZipFile(BytesIO(response.read())) From d840646b57d54ba2b66977715410c562c5aae7f7 Mon Sep 17 00:00:00 2001 From: Sanjay Rijal <37138338+zovelsanj@users.noreply.github.com> Date: Wed, 9 Aug 2023 15:44:46 +0545 Subject: [PATCH 03/12] added info on customized method --- README.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/README.md b/README.md index 985fe0824b..17378c7d8f 100644 --- a/README.md +++ b/README.md @@ -9,6 +9,14 @@ [![Supports Bullet](https://img.shields.io/static/v1?label=supports&message=Bullet%20Physics&color=informational&link=https://opensource.google/projects/bullet3)](https://opensource.google/projects/bullet3) [![Twitter Follow](https://img.shields.io/twitter/follow/ai_habitat?style=social)](https://twitter.com/ai_habitat) +## NOTE: +This code is slightly modified from original [Habitat-Sim](https://github.com/facebookresearch/habitat-sim) to obtain camera poses as transformation matrix and images as equirectangular projection. +Moreover, `skip` and `out_path` arguments are added to skip the frames and save outputs respectively. The modifications are made only for `examples/`. +You may run this version of code as follows: +``` +python3 examples/example.py --scene /path/to/data/scene_datasets/habitat-test-scenes/skokloster-castle.glb --save_png --max_frames 25 --height 2048 --width 4096 --skip 5 --out_path /path/to/data/scene_datasets/habitat-test-scenes/skokloster-castle/ +``` + # Habitat-Sim A high-performance physics-enabled 3D simulator with support for: From f1f7ceabbcb909fb6a53d85ccddaee61de6900cf Mon Sep 17 00:00:00 2001 From: Sanjay Rijal <37138338+zovelsanj@users.noreply.github.com> Date: Wed, 9 Aug 2023 16:02:16 +0545 Subject: [PATCH 04/12] fixed out_dir code position --- examples/example.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/examples/example.py b/examples/example.py index be06d63d28..f6bbba2782 100755 --- a/examples/example.py +++ b/examples/example.py @@ -87,6 +87,10 @@ def save_poses(perfs, pose_file): settings = make_settings() +if not os.path.exists(args.out_path): + print(f"{args.out_path} doesn't exist, so creating one.") + os.mkdir(args.out_path) + perfs = [] for _i in range(1): demo_runner = dr.DemoRunner(settings, dr.DemoRunnerType.EXAMPLE, out_path=args.out_path) @@ -105,10 +109,6 @@ def save_poses(perfs, pose_file): # % (perf["fps"], args.test_fps_regression) # ) -if not os.path.exists(args.out_path): - print(f"{args.out_path} doesn't exist, so creating one.") - os.mkdir(args.out_path) - save_poses(perfs, os.path.join(args.out_path, "transformations.txt")) if len(perfs) > 1: From b6b0eda46e75c295c08945ec868fe1cf408ad72b Mon Sep 17 00:00:00 2001 From: Sanjay Rijal <37138338+zovelsanj@users.noreply.github.com> Date: Wed, 9 Aug 2023 18:16:54 +0545 Subject: [PATCH 05/12] fixed frame-skip --- examples/demo_runner.py | 60 ++++++++++++++++++++--------------------- 1 file changed, 30 insertions(+), 30 deletions(-) diff --git a/examples/demo_runner.py b/examples/demo_runner.py index 52b623a251..56a7485f9f 100644 --- a/examples/demo_runner.py +++ b/examples/demo_runner.py @@ -246,7 +246,7 @@ def do_time_steps(self): print("action", action) start_step_time = time.time() - + # apply kinematic or dynamic control to all objects based on their MotionType if self._sim_settings["enable_physics"]: obj_names = rigid_obj_mgr.get_object_handles() @@ -266,39 +266,39 @@ def do_time_steps(self): # get simulation step time without sensor observations total_sim_step_time += self._sim._previous_step_time + if total_frames % self._sim_settings["skip"] == 0: + if self._sim_settings["save_png"]: + if self._sim_settings["equirect_rgba_sensor"]: + self.save_color_observation(observations, total_frames) + if self._sim_settings["equirect_depth_sensor"]: + self.save_depth_observation(observations, total_frames) + if self._sim_settings["equirect_semantic_sensor"]: + self.save_semantic_observation(observations, total_frames) + + state = self._sim.last_state() + poses.append([state.position, state.rotation]) + if not self._sim_settings["silent"]: + print("position\t", state.position, "\t", "rotation\t", state.rotation) + + if self._sim_settings["compute_shortest_path"]: + self.compute_shortest_path( + state.position, self._sim_settings["goal_position"] + ) - if self._sim_settings["save_png"]: - if self._sim_settings["equirect_rgba_sensor"]: - self.save_color_observation(observations, total_frames) - if self._sim_settings["equirect_depth_sensor"]: - self.save_depth_observation(observations, total_frames) - if self._sim_settings["equirect_semantic_sensor"]: - self.save_semantic_observation(observations, total_frames) - - state = self._sim.last_state() - poses.append([state.position, state.rotation]) - if not self._sim_settings["silent"]: - print("position\t", state.position, "\t", "rotation\t", state.rotation) - - if self._sim_settings["compute_shortest_path"]: - self.compute_shortest_path( - state.position, self._sim_settings["goal_position"] - ) - - if self._sim_settings["compute_action_shortest_path"]: - self._action_path = self.greedy_follower.find_path( - self._sim_settings["goal_position"] - ) - print("len(action_path)", len(self._action_path)) + if self._sim_settings["compute_action_shortest_path"]: + self._action_path = self.greedy_follower.find_path( + self._sim_settings["goal_position"] + ) + print("len(action_path)", len(self._action_path)) - if ( - self._sim_settings["equirect_semantic_sensor"] - and self._sim_settings["print_semantic_mask_stats"] - ): - self.output_semantic_mask_stats(observations, total_frames) + if ( + self._sim_settings["equirect_semantic_sensor"] + and self._sim_settings["print_semantic_mask_stats"] + ): + self.output_semantic_mask_stats(observations, total_frames) print(f"skipping frames by {self._sim_settings['skip']}, total_frames = {total_frames}") - total_frames += self._sim_settings["skip"] + total_frames += 1 end_time = time.time() perf = {"total_time": end_time - start_time} From d82a2351ecd30dcebf211b7146df9e1f392fc47c Mon Sep 17 00:00:00 2001 From: Sanjay Rijal <37138338+zovelsanj@users.noreply.github.com> Date: Thu, 7 Sep 2023 18:44:41 +0545 Subject: [PATCH 06/12] customized to save curr view and pose - customized the code to save the current view of simulator as image and the current state of the agent as transformation matrix. - added the above feature when pressing key 'R'. --- examples/viewer.py | 44 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) diff --git a/examples/viewer.py b/examples/viewer.py index 8c42f70b33..7f117a4870 100644 --- a/examples/viewer.py +++ b/examples/viewer.py @@ -25,6 +25,8 @@ from habitat_sim.utils.common import quat_from_angle_axis from habitat_sim.utils.settings import default_sim_settings, make_cfg +from habitat_sim.utils.common import to_campose +from PIL import Image class HabitatSimInteractiveViewer(Application): # the maximum number of chars displayable in the app window @@ -444,6 +446,7 @@ def move_and_look(self, repetitions: int) -> None: for _ in range(int(repetitions)): [agent.act(x) for x in action_queue] + self.state = agent.get_state() #get current state of agent # update the grabber transform when our agent is moved if self.mouse_grabber is not None: # update location of grabbed object @@ -457,6 +460,35 @@ def invert_gravity(self) -> None: gravity: mn.Vector3 = self.sim.get_gravity() * -1 self.sim.set_gravity(gravity) + def save_pose_observation(self) -> None: + """ + Transforms Rotation quaternion and translation vector to 4x4 transformation matrix, + and saves the flattened matrix as txt + """ + T_mat = to_campose(self.state.rotation, self.state.position) + T_vec = T_mat.reshape(1, 16) + pose_path = os.path.join(self.sim_settings['out_path'], "transformations.txt") + + if os.path.exists(pose_path): + with open(pose_path, "ab") as f: + np.savetxt(f, T_vec, fmt='%5f', delimiter=' ') + + def save_color_observation(self, obs) -> None: + """ + Retrieves current view of simulator and saves it as image" + """ + color_obs = obs["color_sensor"] + # color_obs = obs["equirect_rgba_sensor"] + color_img = Image.fromarray(color_obs, mode="RGBA") + if self.sim_settings['out_path'] is None: + color_img.save("test.rgba.%05d.png" % time.time()) + else: + image_path = os.path.join(self.sim_settings['out_path'], "images") + if not os.path.exists(image_path): + print(f"{os.path.join(self.sim_settings['out_path'], 'images')} doesn't exist, so creating one.") + os.mkdir(image_path) + color_img.save(os.path.join(image_path,"test.rgba.%05d.png" % time.time())) + def key_press_event(self, event: Application.KeyEvent) -> None: """ Handles `Application.KeyEvent` on a key press by performing the corresponding functions. @@ -476,6 +508,12 @@ def key_press_event(self, event: Application.KeyEvent) -> None: self.exit_event(Application.ExitEvent) return + elif key == pressed.R: + observations = self.sim.get_sensor_observations(self.agent_id) + if sim_settings["color_sensor"] and sim_settings['save_png']: + self.save_color_observation(observations) + self.save_pose_observation() + elif key == pressed.H: self.print_help_text() @@ -1157,6 +1195,9 @@ def next_frame() -> None: type=int, help="Vertical resolution of the window.", ) + parser.add_argument("--silent", action="store_true") + parser.add_argument("--save_png", action="store_true") + parser.add_argument("--out_path", help="path to save images and transformations") args = parser.parse_args() @@ -1180,6 +1221,9 @@ def next_frame() -> None: sim_settings["window_height"] = args.height sim_settings["pbr_image_based_lighting"] = args.ibl sim_settings["default_agent_navmesh"] = False + sim_settings["silent"] = args.silent + sim_settings["save_png"] = args.save_png + sim_settings["out_path"] = args.out_path # start the application HabitatSimInteractiveViewer(sim_settings).exec() From da098dbb55baa18b25c17ab6450666e51ae08925 Mon Sep 17 00:00:00 2001 From: Sanjay Rijal <37138338+zovelsanj@users.noreply.github.com> Date: Tue, 12 Sep 2023 09:30:04 +0545 Subject: [PATCH 07/12] reverted to previous version --- examples/demo_runner.py | 81 ++++++++++++++++++----------------------- 1 file changed, 35 insertions(+), 46 deletions(-) diff --git a/examples/demo_runner.py b/examples/demo_runner.py index 56a7485f9f..5da46c8829 100644 --- a/examples/demo_runner.py +++ b/examples/demo_runner.py @@ -36,17 +36,16 @@ class ABTestGroup(Enum): class DemoRunner: - def __init__(self, sim_settings, simulator_demo_type, out_path=None): + def __init__(self, sim_settings, simulator_demo_type): if simulator_demo_type == DemoRunnerType.EXAMPLE: self.set_sim_settings(sim_settings) self._demo_type = simulator_demo_type - self.out_path = out_path def set_sim_settings(self, sim_settings): self._sim_settings = sim_settings.copy() def save_color_observation(self, obs, total_frames): - color_obs = obs["equirect_rgba_sensor"] + color_obs = obs["color_sensor"] color_img = Image.fromarray(color_obs, mode="RGBA") if self._demo_type == DemoRunnerType.AB_TEST: if self._group_id == ABTestGroup.CONTROL: @@ -54,17 +53,10 @@ def save_color_observation(self, obs, total_frames): else: color_img.save("test.rgba.test.%05d.png" % total_frames) else: - if self.out_path is None: - color_img.save("test.rgba.%05d.png" % total_frames) - else: - image_path = os.path.join(self.out_path, "images") - if not os.path.exists(image_path): - print(f"{os.path.join(self.out_path, 'images')} doesn't exist, so creating one.") - os.mkdir(image_path) - color_img.save(os.path.join(image_path,"test.rgba.%05d.png" % total_frames)) + color_img.save("test.rgba.%05d.png" % total_frames) def save_semantic_observation(self, obs, total_frames): - semantic_obs = obs["equirect_semantic_sensor"] + semantic_obs = obs["semantic_sensor"] semantic_img = Image.new("P", (semantic_obs.shape[1], semantic_obs.shape[0])) semantic_img.putpalette(d3_40_colors_rgb.flatten()) semantic_img.putdata((semantic_obs.flatten() % 40).astype(np.uint8)) @@ -77,7 +69,7 @@ def save_semantic_observation(self, obs, total_frames): semantic_img.save("test.sem.%05d.png" % total_frames) def save_depth_observation(self, obs, total_frames): - depth_obs = obs["equirect_depth_sensor"] + depth_obs = obs["depth_sensor"] depth_img = Image.fromarray((depth_obs / 10 * 255).astype(np.uint8), mode="L") if self._demo_type == DemoRunnerType.AB_TEST: if self._group_id == ABTestGroup.CONTROL: @@ -88,7 +80,7 @@ def save_depth_observation(self, obs, total_frames): depth_img.save("test.depth.%05d.png" % total_frames) def output_semantic_mask_stats(self, obs, total_frames): - semantic_obs = obs["equirect_semantic_sensor"] + semantic_obs = obs["semantic_sensor"] counts = np.bincount(semantic_obs.flatten()) total_count = np.sum(counts) print(f"Pixel statistics for frame {total_frames}") @@ -220,7 +212,6 @@ def do_time_steps(self): # get the rigid object manager, which provides direct # access to objects rigid_obj_mgr = self._sim.get_rigid_object_manager() - poses = [] total_sim_step_time = 0.0 total_frames = 0 @@ -246,7 +237,7 @@ def do_time_steps(self): print("action", action) start_step_time = time.time() - + # apply kinematic or dynamic control to all objects based on their MotionType if self._sim_settings["enable_physics"]: obj_names = rigid_obj_mgr.get_object_handles() @@ -266,38 +257,37 @@ def do_time_steps(self): # get simulation step time without sensor observations total_sim_step_time += self._sim._previous_step_time - if total_frames % self._sim_settings["skip"] == 0: - if self._sim_settings["save_png"]: - if self._sim_settings["equirect_rgba_sensor"]: - self.save_color_observation(observations, total_frames) - if self._sim_settings["equirect_depth_sensor"]: - self.save_depth_observation(observations, total_frames) - if self._sim_settings["equirect_semantic_sensor"]: - self.save_semantic_observation(observations, total_frames) - - state = self._sim.last_state() - poses.append([state.position, state.rotation]) - if not self._sim_settings["silent"]: - print("position\t", state.position, "\t", "rotation\t", state.rotation) - - if self._sim_settings["compute_shortest_path"]: - self.compute_shortest_path( - state.position, self._sim_settings["goal_position"] - ) - if self._sim_settings["compute_action_shortest_path"]: - self._action_path = self.greedy_follower.find_path( - self._sim_settings["goal_position"] - ) - print("len(action_path)", len(self._action_path)) + if self._sim_settings["save_png"]: + if self._sim_settings["color_sensor"]: + self.save_color_observation(observations, total_frames) + if self._sim_settings["depth_sensor"]: + self.save_depth_observation(observations, total_frames) + if self._sim_settings["semantic_sensor"]: + self.save_semantic_observation(observations, total_frames) + + state = self._sim.last_state() + + if not self._sim_settings["silent"]: + print("position\t", state.position, "\t", "rotation\t", state.rotation) + + if self._sim_settings["compute_shortest_path"]: + self.compute_shortest_path( + state.position, self._sim_settings["goal_position"] + ) + + if self._sim_settings["compute_action_shortest_path"]: + self._action_path = self.greedy_follower.find_path( + self._sim_settings["goal_position"] + ) + print("len(action_path)", len(self._action_path)) - if ( - self._sim_settings["equirect_semantic_sensor"] - and self._sim_settings["print_semantic_mask_stats"] - ): - self.output_semantic_mask_stats(observations, total_frames) + if ( + self._sim_settings["semantic_sensor"] + and self._sim_settings["print_semantic_mask_stats"] + ): + self.output_semantic_mask_stats(observations, total_frames) - print(f"skipping frames by {self._sim_settings['skip']}, total_frames = {total_frames}") total_frames += 1 end_time = time.time() @@ -306,7 +296,6 @@ def do_time_steps(self): perf["fps"] = 1.0 / perf["frame_time"] perf["time_per_step"] = time_per_step perf["avg_sim_step_time"] = total_sim_step_time / total_frames - perf["pose"] = poses return perf From 05e2cad68e123bc0c75459e4e8bf11d66b887c9c Mon Sep 17 00:00:00 2001 From: Sanjay Rijal <37138338+zovelsanj@users.noreply.github.com> Date: Tue, 12 Sep 2023 09:31:38 +0545 Subject: [PATCH 08/12] reverted to previous version --- examples/example.py | 41 ++++++----------------------------------- 1 file changed, 6 insertions(+), 35 deletions(-) diff --git a/examples/example.py b/examples/example.py index f6bbba2782..3a86ddac71 100755 --- a/examples/example.py +++ b/examples/example.py @@ -6,9 +6,7 @@ import argparse -import os -import numpy as np -from habitat_sim.utils.common import to_campose + import demo_runner as dr parser = argparse.ArgumentParser() @@ -30,8 +28,6 @@ parser.add_argument("--silent", action="store_true") parser.add_argument("--test_fps_regression", type=int, default=0) parser.add_argument("--enable_physics", action="store_true") -parser.add_argument("--out_path", default="./data", required=False, help="path to save poses as matrix") -parser.add_argument("--skip", type=int, default=1, required=False, help="num of frames to skip") parser.add_argument( "--physics_config_file", type=str, @@ -49,13 +45,9 @@ def make_settings(): settings["scene"] = args.scene settings["save_png"] = args.save_png settings["sensor_height"] = args.sensor_height - # settings["color_sensor"] = not args.disable_color_sensor - # settings["semantic_sensor"] = args.semantic_sensor - # settings["depth_sensor"] = args.depth_sensor - settings["equirect_rgba_sensor"] = not args.disable_color_sensor - settings["equirect_semantic_sensor"] = args.semantic_sensor - settings["equirect_depth_sensor"] = args.depth_sensor - + settings["color_sensor"] = not args.disable_color_sensor + settings["semantic_sensor"] = args.semantic_sensor + settings["depth_sensor"] = args.depth_sensor settings["print_semantic_scene"] = args.print_semantic_scene settings["print_semantic_mask_stats"] = args.print_semantic_mask_stats settings["compute_shortest_path"] = args.compute_shortest_path @@ -66,36 +58,18 @@ def make_settings(): settings["physics_config_file"] = args.physics_config_file settings["frustum_culling"] = not args.disable_frustum_culling settings["recompute_navmesh"] = args.recompute_navmesh - settings["skip"] = args.skip return settings -def save_poses(perfs, pose_file): - transformed_R = np.array([[0 for _ in range(16)]]).reshape(1, 16) - for pose in perfs[0]["pose"]: - T_cal = to_campose(pose[1], pose[0]) - T_cal = T_cal.reshape(1, 16) - transformed_R = np.vstack((transformed_R, T_cal)) - - transformed_R = np.delete(transformed_R, 0, 0) - transformed_R = np.matrix(transformed_R) - with open(pose_file, "wb") as f: - for line in transformed_R: - np.savetxt(f, line, fmt = "%f") - print(f"poses sucessfully saved at {pose_file}") - settings = make_settings() -if not os.path.exists(args.out_path): - print(f"{args.out_path} doesn't exist, so creating one.") - os.mkdir(args.out_path) - perfs = [] for _i in range(1): - demo_runner = dr.DemoRunner(settings, dr.DemoRunnerType.EXAMPLE, out_path=args.out_path) + demo_runner = dr.DemoRunner(settings, dr.DemoRunnerType.EXAMPLE) perf = demo_runner.example() perfs.append(perf) + print(" ========================= Performance ======================== ") print( " %d x %d, total time %0.2f s," @@ -108,9 +82,6 @@ def save_poses(perfs, pose_file): # "FPS is below regression threshold: %0.1f < %0.1f" # % (perf["fps"], args.test_fps_regression) # ) - -save_poses(perfs, os.path.join(args.out_path, "transformations.txt")) - if len(perfs) > 1: avg_fps = 0 avg_frame_time = 0 From e1bd763d689023a6e373603db7ce6564e0bf6fbe Mon Sep 17 00:00:00 2001 From: Sanjay Rijal <37138338+zovelsanj@users.noreply.github.com> Date: Tue, 12 Sep 2023 09:39:06 +0545 Subject: [PATCH 09/12] added equirectangular mode --- examples/viewer.py | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/examples/viewer.py b/examples/viewer.py index 7f117a4870..12c7980143 100644 --- a/examples/viewer.py +++ b/examples/viewer.py @@ -469,16 +469,14 @@ def save_pose_observation(self) -> None: T_vec = T_mat.reshape(1, 16) pose_path = os.path.join(self.sim_settings['out_path'], "transformations.txt") - if os.path.exists(pose_path): - with open(pose_path, "ab") as f: - np.savetxt(f, T_vec, fmt='%5f', delimiter=' ') + with open(pose_path, "ab") as f: + np.savetxt(f, T_vec, fmt='%5f', delimiter=' ') - def save_color_observation(self, obs) -> None: + def save_color_observation(self, obs, sensor_type="color_sensor") -> None: """ Retrieves current view of simulator and saves it as image" """ - color_obs = obs["color_sensor"] - # color_obs = obs["equirect_rgba_sensor"] + color_obs = obs[sensor_type] color_img = Image.fromarray(color_obs, mode="RGBA") if self.sim_settings['out_path'] is None: color_img.save("test.rgba.%05d.png" % time.time()) @@ -509,10 +507,14 @@ def key_press_event(self, event: Application.KeyEvent) -> None: return elif key == pressed.R: + # Press R to record data (image, pose) observations = self.sim.get_sensor_observations(self.agent_id) - if sim_settings["color_sensor"] and sim_settings['save_png']: - self.save_color_observation(observations) - self.save_pose_observation() + if sim_settings['save_png']: + self.save_pose_observation() + if sim_settings["equirect_rgba_sensor"]: + self.save_color_observation(observations, sensor_type="equirect_rgba_sensor") + else: + self.save_color_observation(observations) elif key == pressed.H: self.print_help_text() @@ -1198,6 +1200,7 @@ def next_frame() -> None: parser.add_argument("--silent", action="store_true") parser.add_argument("--save_png", action="store_true") parser.add_argument("--out_path", help="path to save images and transformations") + parser.add_argument("--disable_color_sensor", action="store_true") args = parser.parse_args() @@ -1224,6 +1227,7 @@ def next_frame() -> None: sim_settings["silent"] = args.silent sim_settings["save_png"] = args.save_png sim_settings["out_path"] = args.out_path + sim_settings["equirect_rgba_sensor"] = args.disable_color_sensor # start the application HabitatSimInteractiveViewer(sim_settings).exec() From 0a9b386d056ccd256041f2740ac5108e38a3110f Mon Sep 17 00:00:00 2001 From: Sanjay Rijal <37138338+zovelsanj@users.noreply.github.com> Date: Thu, 11 Apr 2024 09:59:29 +0545 Subject: [PATCH 10/12] resolve merge conflict with main - accidentally removed `sim_settings["enable_hbao"] = args.hbao` line. Fixed it now. --- examples/viewer.py | 1 + 1 file changed, 1 insertion(+) diff --git a/examples/viewer.py b/examples/viewer.py index 12c7980143..0536302990 100644 --- a/examples/viewer.py +++ b/examples/viewer.py @@ -1224,6 +1224,7 @@ def next_frame() -> None: sim_settings["window_height"] = args.height sim_settings["pbr_image_based_lighting"] = args.ibl sim_settings["default_agent_navmesh"] = False + sim_settings["enable_hbao"] = args.hbao sim_settings["silent"] = args.silent sim_settings["save_png"] = args.save_png sim_settings["out_path"] = args.out_path From fab42207f0fb2fe37c2970fe44d3a5c9830ac312 Mon Sep 17 00:00:00 2001 From: Sanjay Rijal <37138338+zovelsanj@users.noreply.github.com> Date: Thu, 11 Apr 2024 10:03:18 +0545 Subject: [PATCH 11/12] resolve merge conflict - newline character showed conflict, so fixed it --- examples/viewer.py | 1 + 1 file changed, 1 insertion(+) diff --git a/examples/viewer.py b/examples/viewer.py index 0536302990..ed90cb25a2 100644 --- a/examples/viewer.py +++ b/examples/viewer.py @@ -1225,6 +1225,7 @@ def next_frame() -> None: sim_settings["pbr_image_based_lighting"] = args.ibl sim_settings["default_agent_navmesh"] = False sim_settings["enable_hbao"] = args.hbao + sim_settings["silent"] = args.silent sim_settings["save_png"] = args.save_png sim_settings["out_path"] = args.out_path From 8543eeeb5b9ecb845184727d3e4637eb401d102c Mon Sep 17 00:00:00 2001 From: Sanjay Rijal <37138338+zovelsanj@users.noreply.github.com> Date: Thu, 11 Apr 2024 10:07:48 +0545 Subject: [PATCH 12/12] resolve merge conflicts removed conflicting newline character --- examples/viewer.py | 1 - 1 file changed, 1 deletion(-) diff --git a/examples/viewer.py b/examples/viewer.py index ed90cb25a2..0536302990 100644 --- a/examples/viewer.py +++ b/examples/viewer.py @@ -1225,7 +1225,6 @@ def next_frame() -> None: sim_settings["pbr_image_based_lighting"] = args.ibl sim_settings["default_agent_navmesh"] = False sim_settings["enable_hbao"] = args.hbao - sim_settings["silent"] = args.silent sim_settings["save_png"] = args.save_png sim_settings["out_path"] = args.out_path