diff --git a/msgq_repo b/msgq_repo index a16cf1f608538d..518901635d020a 160000 --- a/msgq_repo +++ b/msgq_repo @@ -1 +1 @@ -Subproject commit a16cf1f608538d14f66bd6142230d8728f2d0abc +Subproject commit 518901635d020af7ae392df31a0ce283d3b4936d diff --git a/selfdrive/ui/mici/onroad/augmented_road_view.py b/selfdrive/ui/mici/onroad/augmented_road_view.py index 71ca03cccfac94..808493de1a3f4d 100644 --- a/selfdrive/ui/mici/onroad/augmented_road_view.py +++ b/selfdrive/ui/mici/onroad/augmented_road_view.py @@ -129,6 +129,9 @@ def _render(self, _): rl.draw_texture(self._icon, int(icon_x), int(icon_y), rl.WHITE) +times = {} + + class AugmentedRoadView(CameraView): def __init__(self, bookmark_callback=None, stream_type: VisionStreamType = VisionStreamType.VISION_STREAM_ROAD): super().__init__("camerad", stream_type) @@ -183,11 +186,24 @@ def _handle_mouse_release(self, mouse_pos: MousePos): super()._handle_mouse_release(mouse_pos) def _render(self, _): + def tlog(uid): + nonlocal t + now = time.monotonic() + if uid not in times: + times[uid] = [now - t] + else: + times[uid].append(now - t) + t = now + start_draw = time.monotonic() + t = time.monotonic() self._switch_stream_if_needed(ui_state.sm) + tlog("switch_stream") + # print(f"{(time.monotonic() - t) * 1000:.2f} ms - switch stream check") # Update calibration before rendering self._update_calibration() + tlog("update_calib") # Create inner content area with border padding self._content_rect = rl.Rectangle( @@ -208,9 +224,11 @@ def _render(self, _): # Render the base camera view super()._render(self._content_rect) + tlog("camera render") # Draw all UI overlays self._model_renderer.render(self._content_rect) + tlog("model render") # Fade out bottom of overlays for looks rl.draw_texture_ex(self._fade_texture, rl.Vector2(self._content_rect.x, self._content_rect.y), 0.0, 1.0, rl.WHITE) @@ -232,6 +250,8 @@ def _render(self, _): self._alert_renderer.render(self._content_rect) self._hud_renderer.render(self._content_rect) + tlog("UI overlays") + # Draw fake rounded border rl.draw_rectangle_rounded_lines_ex(self._content_rect, 0.2 * 1.02, 10, 50, rl.BLACK) @@ -249,6 +269,14 @@ def _render(self, _): rl.draw_rectangle(int(self.rect.x), int(self.rect.y), int(self.rect.width), int(self.rect.height), rl.Color(0, 0, 0, 175)) self._offroad_label.render(self._content_rect) + tlog("final overlays") + + print("*** AugmentedRoadView timing ***") + for uid, tlist in times.items(): + max_time = max(tlist) * 1000 + avg_time = sum(tlist) / len(tlist) * 1000 + print(f" {uid}: max {max_time:.2f} ms, avg {avg_time:.2f} ms over {len(tlist)} calls") + # publish uiDebug msg = messaging.new_message('uiDebug') msg.uiDebug.drawTimeMillis = (time.monotonic() - start_draw) * 1000 diff --git a/selfdrive/ui/mici/onroad/cameraview.py b/selfdrive/ui/mici/onroad/cameraview.py index f3e0ef409e95f6..7b0709959e0e4e 100644 --- a/selfdrive/ui/mici/onroad/cameraview.py +++ b/selfdrive/ui/mici/onroad/cameraview.py @@ -1,3 +1,4 @@ +import time import platform import numpy as np import pyray as rl @@ -103,6 +104,8 @@ } """ +times = {} + class CameraView(Widget): def __init__(self, name: str, stream_type: VisionStreamType): @@ -225,13 +228,27 @@ def _calc_frame_matrix(self, rect: rl.Rectangle) -> np.ndarray: ]) def _render(self, rect: rl.Rectangle): + def tlog(uid): + nonlocal t + now = time.monotonic() + if uid not in times: + times[uid] = [now - t] + else: + times[uid].append(now - t) + t = now + + t = time.monotonic() if self._switching: self._handle_switch() + tlog("switch") + if not self._ensure_connection(): self._draw_placeholder(rect) return + tlog("connect") + # Try to get a new buffer without blocking buffer = self.client.recv(timeout_ms=0) if buffer: @@ -245,6 +262,8 @@ def _render(self, rect: rl.Rectangle): self._draw_placeholder(rect) return + tlog("recv") + transform = self._calc_frame_matrix(rect) src_rect = rl.Rectangle(0, 0, float(self.frame.width), float(self.frame.height)) # Flip driver camera horizontally @@ -264,12 +283,22 @@ def _render(self, rect: rl.Rectangle): dst_rect = rl.Rectangle(x_offset, y_offset, scale_x, scale_y) + tlog("calc rects") + # Render with appropriate method if TICI: self._render_egl(src_rect, dst_rect) else: self._render_textures(src_rect, dst_rect) + tlog("render") + + print("*** CameraView timings ***") + for uid, tlist in times.items(): + max_time = max(tlist) * 1000 + avg_time = sum(tlist) / len(tlist) * 1000 + print(f" {uid}: max {max_time:.2f} ms, avg {avg_time:.2f} ms over {len(tlist)} calls") + def _draw_placeholder(self, rect: rl.Rectangle): if self._placeholder_color: rl.draw_rectangle_rec(rect, self._placeholder_color) @@ -340,8 +369,12 @@ def _ensure_connection(self) -> bool: return False self.last_connection_attempt = current_time - if not self.client.connect(False) or not self.client.num_buffers: + t = time.monotonic() + ret = self.client.connect(False) + print((time.monotonic() - t) * 1000) + if not ret or not self.client.num_buffers: return False + # return False cloudlog.debug(f"Connected to {self._name} stream: {self._stream_type}, buffers: {self.client.num_buffers}") self._initialize_textures() diff --git a/selfdrive/ui/onroad/augmented_road_view.py b/selfdrive/ui/onroad/augmented_road_view.py index 1f202141c3806b..29b34f0cd8b25f 100644 --- a/selfdrive/ui/onroad/augmented_road_view.py +++ b/selfdrive/ui/onroad/augmented_road_view.py @@ -30,6 +30,8 @@ ROAD_CAM_MIN_SPEED = 15.0 # m/s (34 mph) INF_POINT = np.array([1000.0, 0.0, 0.0]) +times = {} + class AugmentedRoadView(CameraView): def __init__(self, stream_type: VisionStreamType = VisionStreamType.VISION_STREAM_ROAD): @@ -53,15 +55,27 @@ def __init__(self, stream_type: VisionStreamType = VisionStreamType.VISION_STREA self._pm = messaging.PubMaster(['uiDebug']) def _render(self, rect): + def tlog(uid): + nonlocal t + now = time.monotonic() + if uid not in times: + times[uid] = [now - t] + else: + times[uid].append(now - t) + t = now + # Only render when system is started to avoid invalid data access start_draw = time.monotonic() if not ui_state.started: return + t = time.monotonic() self._switch_stream_if_needed(ui_state.sm) + tlog("switch_stream") # Update calibration before rendering self._update_calibration() + tlog("update_calib") # Create inner content area with border padding self._content_rect = rl.Rectangle( @@ -82,12 +96,15 @@ def _render(self, rect): # Render the base camera view super()._render(rect) + tlog("camera render") # Draw all UI overlays self.model_renderer.render(self._content_rect) + tlog("model render") self._hud_renderer.render(self._content_rect) self.alert_renderer.render(self._content_rect) self.driver_state_renderer.render(self._content_rect) + tlog("UI overlays") # Custom UI extension point - add custom overlays here # Use self._content_rect for positioning within camera bounds @@ -97,6 +114,13 @@ def _render(self, rect): # Draw colored border based on driving state self._draw_border(rect) + tlog("final overlays") + + print("*** AugmentedRoadView timing ***") + for uid, tlist in times.items(): + max_time = max(tlist) * 1000 + avg_time = sum(tlist) / len(tlist) * 1000 + print(f" {uid}: max {max_time:.2f} ms, avg {avg_time:.2f} ms over {len(tlist)} calls") # publish uiDebug msg = messaging.new_message('uiDebug') diff --git a/selfdrive/ui/onroad/cameraview.py b/selfdrive/ui/onroad/cameraview.py index 881a916df73aaf..8804ec1818efb9 100644 --- a/selfdrive/ui/onroad/cameraview.py +++ b/selfdrive/ui/onroad/cameraview.py @@ -1,3 +1,5 @@ +import time +import threading import platform import numpy as np import pyray as rl @@ -12,6 +14,8 @@ CONNECTION_RETRY_INTERVAL = 0.2 # seconds between connection attempts +times = {} + VERSION = """ #version 300 es precision mediump float; @@ -79,6 +83,9 @@ def __init__(self, name: str, stream_type: VisionStreamType): self._target_client: VisionIpcClient | None = None self._target_stream_type: VisionStreamType | None = None self._switching: bool = False + self._connecting: bool = False + self._prev_connected: bool = False + # self._connect_thread: threading.Thread | None = None self._texture_needs_update = True self.last_connection_attempt: float = 0.0 @@ -181,13 +188,27 @@ def _calc_frame_matrix(self, rect: rl.Rectangle) -> np.ndarray: ]) def _render(self, rect: rl.Rectangle): + def tlog(uid): + nonlocal t + now = time.monotonic() + if uid not in times: + times[uid] = [now - t] + else: + times[uid].append(now - t) + t = now + + t = time.monotonic() if self._switching: self._handle_switch() + tlog("switch") + if not self._ensure_connection(): self._draw_placeholder(rect) return + tlog("connect") + # Try to get a new buffer without blocking buffer = self.client.recv(timeout_ms=0) if buffer: @@ -201,6 +222,8 @@ def _render(self, rect: rl.Rectangle): self._draw_placeholder(rect) return + tlog("recv") + transform = self._calc_frame_matrix(rect) src_rect = rl.Rectangle(0, 0, float(self.frame.width), float(self.frame.height)) # Flip driver camera horizontally @@ -220,12 +243,22 @@ def _render(self, rect: rl.Rectangle): dst_rect = rl.Rectangle(x_offset, y_offset, scale_x, scale_y) + tlog("calc rects") + # Render with appropriate method if TICI: self._render_egl(src_rect, dst_rect) else: self._render_textures(src_rect, dst_rect) + tlog("render") + + print("*** CameraView timings ***") + for uid, tlist in times.items(): + max_time = max(tlist) * 1000 + avg_time = sum(tlist) / len(tlist) * 1000 + print(f" {uid}: max {max_time:.2f} ms, avg {avg_time:.2f} ms over {len(tlist)} calls") + def _draw_placeholder(self, rect: rl.Rectangle): if self._placeholder_color: rl.draw_rectangle_rec(rect, self._placeholder_color) @@ -279,23 +312,71 @@ def _render_textures(self, src_rect: rl.Rectangle, dst_rect: rl.Rectangle) -> No rl.end_shader_mode() def _ensure_connection(self) -> bool: - if not self.client.is_connected(): + t_start = time.monotonic() + t = t_start + + def tlog(uid): + nonlocal t + now = time.monotonic() + if uid not in times: + times[uid] = [now - t] + else: + times[uid].append(now - t) + t = now + + if self._connecting: + tlog("ensure_conn_check_connecting") + return False + + tlog("ensure_conn_check_connecting_done") + + now_connected = self.client.is_connected() + tlog("ensure_conn_is_connected") + + if now_connected != self._prev_connected: + cloudlog.debug(f"Connected to {self._name} stream: {self._stream_type}, buffers: {self.client.num_buffers}") + self._initialize_textures() + tlog("ensure_conn_init_textures") + self.available_streams = self.client.available_streams(self._name, block=False) + tlog("ensure_conn_get_streams") + + self._prev_connected = now_connected + + if not now_connected: self.frame = None self.available_streams.clear() + tlog("ensure_conn_clear_state") # Throttle connection attempts current_time = rl.get_time() if current_time - self.last_connection_attempt < CONNECTION_RETRY_INTERVAL: + tlog("ensure_conn_throttled") return False self.last_connection_attempt = current_time + tlog("ensure_conn_throttle_check") - if not self.client.connect(False) or not self.client.num_buffers: - return False + self._connecting = True - cloudlog.debug(f"Connected to {self._name} stream: {self._stream_type}, buffers: {self.client.num_buffers}") - self._initialize_textures() - self.available_streams = self.client.available_streams(self._name, block=False) + def do_connect(): + try: + self.client.connect(False) + finally: + self._connecting = False + + connect_thread = threading.Thread(target=do_connect, daemon=True) + connect_thread.start() + + # if not self.client.connect(False) or not self.client.num_buffers: + # return False + + # cloudlog.debug(f"Connected to {self._name} stream: {self._stream_type}, buffers: {self.client.num_buffers}") + # self._initialize_textures() + # self.available_streams = self.client.available_streams(self._name, block=False) + + tlog("ensure_conn_start_thread") + return False + tlog("ensure_conn_done") return True def _handle_switch(self) -> None: diff --git a/tools/replay/camera.cc b/tools/replay/camera.cc index 73243ed20d56df..fd0a4f5f1c97be 100644 --- a/tools/replay/camera.cc +++ b/tools/replay/camera.cc @@ -8,7 +8,7 @@ #include "third_party/linux/include/msm_media_info.h" #include "tools/replay/util.h" -const int BUFFER_COUNT = 40; +const int BUFFER_COUNT = 127; std::tuple get_nv12_info(int width, int height) { int nv12_width = VENUS_Y_STRIDE(COLOR_FMT_NV12, width);