From ee1b2cba3938e4f772d520e70c05ec6d04e6748b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gytis=20Sto=C5=A1kevi=C4=8Dius?= Date: Wed, 4 Dec 2024 13:16:17 +0200 Subject: [PATCH] apply tcpdump wrapper --- nat-lab/bin/derp-server | 4 + nat-lab/bin/dns-server.sh | 5 ++ nat-lab/docker-compose.yml | 10 +++ nat-lab/tests/conftest.py | 79 +++++++++++++------ nat-lab/tests/helpers.py | 35 ++++---- nat-lab/tests/mesh_api.py | 52 ------------ nat-lab/tests/telio.py | 23 +++--- .../utils/connection/docker_connection.py | 4 +- .../tests/utils/connection/ssh_connection.py | 11 ++- nat-lab/tests/utils/connection_util.py | 10 +++ nat-lab/tests/utils/process/docker_process.py | 21 ++++- nat-lab/tests/utils/process/process.py | 14 +++- nat-lab/tests/utils/process/ssh_process.py | 14 +++- nat-lab/tests/utils/tcpdump.py | 58 +++++++++++--- nat-lab/tests/utils/vm/mac_vm_util.py | 2 +- nat-lab/tests/utils/vm/windows_vm_util.py | 8 +- 16 files changed, 226 insertions(+), 124 deletions(-) diff --git a/nat-lab/bin/derp-server b/nat-lab/bin/derp-server index 851b832b1..11c6b2385 100644 --- a/nat-lab/bin/derp-server +++ b/nat-lab/bin/derp-server @@ -2,6 +2,10 @@ set -e +# Backup IP tables +iptables-save -f iptables_backup +ip6tables-save -f ip6tables_backup + /usr/bin/nordderper & sleep infinity diff --git a/nat-lab/bin/dns-server.sh b/nat-lab/bin/dns-server.sh index 87cc0f674..6e3b20af8 100755 --- a/nat-lab/bin/dns-server.sh +++ b/nat-lab/bin/dns-server.sh @@ -1,2 +1,7 @@ #!/bin/sh + +# Backup IP tables +iptables-save -f iptables_backup +ip6tables-save -f ip6tables_backup + /opt/bin/dns-server 2>&1 | tee /dns-server.log diff --git a/nat-lab/docker-compose.yml b/nat-lab/docker-compose.yml index fd1434075..07378f4e6 100644 --- a/nat-lab/docker-compose.yml +++ b/nat-lab/docker-compose.yml @@ -423,6 +423,8 @@ services: - net.netfilter.nf_conntrack_tcp_timeout_fin_wait=15 cap_drop: - ALL + cap_add: + - NET_ADMIN security_opt: - no-new-privileges:true networks: @@ -549,6 +551,10 @@ services: hostname: dns-server-1 image: nat-lab:base entrypoint: /opt/bin/dns-server.sh + cap_drop: + - ALL + cap_add: + - NET_ADMIN environment: PYTHONUNBUFFERED: 1 networks: @@ -561,6 +567,10 @@ services: hostname: dns-server-2 image: nat-lab:base entrypoint: /opt/bin/dns-server.sh + cap_drop: + - ALL + cap_add: + - NET_ADMIN environment: PYTHONUNBUFFERED: 1 networks: diff --git a/nat-lab/tests/conftest.py b/nat-lab/tests/conftest.py index eaca42468..4ccc45d08 100644 --- a/nat-lab/tests/conftest.py +++ b/nat-lab/tests/conftest.py @@ -4,22 +4,21 @@ import random import shutil import subprocess +from contextlib import AsyncExitStack from datetime import datetime from helpers import SetupParameters from interderp_cli import InterDerpClient from itertools import combinations -from mesh_api import start_tcpdump, stop_tcpdump from utils.bindings import TelioAdapterType -from utils.connection import DockerConnection from utils.connection_util import ( ConnectionTag, - container_id, DOCKER_GW_MAP, LAN_ADDR_MAP, new_connection_raw, ) from utils.process import ProcessExecError from utils.router import IPStack +from utils.tcpdump import make_tcpdump from utils.vm import windows_vm_util, mac_vm_util DERP_SERVER_1_ADDR = "http://10.0.10.1:8765" @@ -31,6 +30,11 @@ SETUP_CHECK_TIMEOUT_S = 30 SETUP_CHECK_RETRIES = 5 +# pylint: disable=unnecessary-dunder-call +TEST_SCOPE_ASYNC_EXIT_STACK = asyncio.run(AsyncExitStack().__aenter__()) +# pylint: disable=unnecessary-dunder-call +SESSION_ASYNC_EXIT_STACK = asyncio.run(AsyncExitStack().__aenter__()) + def _cancel_all_tasks(loop: asyncio.AbstractEventLoop): to_cancel = asyncio.tasks.all_tasks(loop) @@ -198,24 +202,25 @@ def pytest_make_parametrize_id(config, val): async def setup_check_interderp(): - async with new_connection_raw(ConnectionTag.DOCKER_CONE_CLIENT_1) as connection: - if not isinstance(connection, DockerConnection): - raise Exception("Not docker connection") - containers = [ - connection.container_name(), - "nat-lab-derp-01-1", - "nat-lab-derp-02-1", - "nat-lab-derp-03-1", + async with AsyncExitStack() as exit_stack: + connections = [ + await exit_stack.enter_async_context(new_connection_raw(conn_tag)) + for conn_tag in [ + ConnectionTag.DOCKER_CONE_CLIENT_1, + ConnectionTag.DOCKER_DERP_1, + ConnectionTag.DOCKER_DERP_2, + ConnectionTag.DOCKER_DERP_3, + ] ] - start_tcpdump(containers) - try: + + async with make_tcpdump(connections): for idx, (server1, server2) in enumerate( combinations( [DERP_SERVER_1_ADDR, DERP_SERVER_2_ADDR, DERP_SERVER_3_ADDR], 2 ) ): derp_test = InterDerpClient( - connection, + connections[0], server1, server2, DERP_SERVER_1_SECRET_KEY, @@ -224,8 +229,6 @@ async def setup_check_interderp(): ) await derp_test.execute() await derp_test.save_logs() - finally: - stop_tcpdump(containers) SETUP_CHECKS = [ @@ -406,13 +409,37 @@ def pytest_runtest_setup(): # pylint: disable=unused-argument def pytest_runtest_call(item): - start_tcpdump([f"nat-lab-dns-server-{i}-1" for i in range(1, 3)]) + if os.environ.get("NATLAB_SAVE_LOGS") is None: + return + + async def async_code(): + connections = [ + await TEST_SCOPE_ASYNC_EXIT_STACK.enter_async_context( + new_connection_raw(conn_tag) + ) + for conn_tag in [ + ConnectionTag.DOCKER_DNS_SERVER_1, + ConnectionTag.DOCKER_DNS_SERVER_2, + ] + ] + await TEST_SCOPE_ASYNC_EXIT_STACK.enter_async_context(make_tcpdump(connections)) + + asyncio.run(async_code()) # pylint: disable=unused-argument def pytest_runtest_makereport(item, call): + if os.environ.get("NATLAB_SAVE_LOGS") is None: + return + + async def async_code(): + global TEST_SCOPE_ASYNC_EXIT_STACK + await TEST_SCOPE_ASYNC_EXIT_STACK.aclose() + # pylint: disable=unnecessary-dunder-call + TEST_SCOPE_ASYNC_EXIT_STACK = await AsyncExitStack().__aenter__() + if call.when == "call": - stop_tcpdump([f"nat-lab-dns-server-{i}-1" for i in range(1, 3)]) + asyncio.run(async_code()) # pylint: disable=unused-argument @@ -420,8 +447,17 @@ def pytest_sessionstart(session): if os.environ.get("NATLAB_SAVE_LOGS") is None: return + async def async_code(): + connections = [ + await SESSION_ASYNC_EXIT_STACK.enter_async_context( + new_connection_raw(conn_tag) + ) + for conn_tag in DOCKER_GW_MAP.values() + ] + await SESSION_ASYNC_EXIT_STACK.enter_async_context(make_tcpdump(connections)) + if not session.config.option.collectonly: - start_tcpdump({container_id(gw_tag) for gw_tag in DOCKER_GW_MAP.values()}) + asyncio.run(async_code()) # pylint: disable=unused-argument @@ -430,10 +466,7 @@ def pytest_sessionfinish(session, exitstatus): return if not session.config.option.collectonly: - stop_tcpdump( - {container_id(gw_tag) for gw_tag in DOCKER_GW_MAP.values()}, "./logs" - ) - + asyncio.run(SESSION_ASYNC_EXIT_STACK.aclose()) collect_nordderper_logs() collect_dns_server_logs() asyncio.run(collect_kernel_logs(session.items, "after_tests")) diff --git a/nat-lab/tests/helpers.py b/nat-lab/tests/helpers.py index 66aeb55ee..7e1f74883 100644 --- a/nat-lab/tests/helpers.py +++ b/nat-lab/tests/helpers.py @@ -2,12 +2,11 @@ import itertools import json import pytest -from config import WG_SERVERS from contextlib import AsyncExitStack, asynccontextmanager from dataclasses import dataclass, field from datetime import datetime from itertools import product, zip_longest -from mesh_api import Node, API, stop_tcpdump +from mesh_api import Node, API from telio import Client from typing import AsyncIterator, List, Tuple, Optional, Union from utils.bindings import ( @@ -26,9 +25,11 @@ ConnectionManager, ConnectionTag, new_connection_manager_by_tag, + new_connection_raw, ) from utils.ping import ping from utils.router import IPStack +from utils.tcpdump import make_tcpdump from uuid import UUID @@ -294,6 +295,15 @@ async def setup_environment( ) if prepare_vpn: + connections = [ + await exit_stack.enter_async_context(new_connection_raw(conn_tag)) + for conn_tag in [ + ConnectionTag.DOCKER_NLX_1, + ConnectionTag.DOCKER_VPN_1, + ConnectionTag.DOCKER_VPN_2, + ] + ] + await exit_stack.enter_async_context(make_tcpdump(connections)) api.prepare_all_vpn_servers() clients = await setup_clients( @@ -317,18 +327,15 @@ async def setup_environment( ), ) - try: - yield Environment(api, nodes, connection_managers, clients) - - print(datetime.now(), "Checking connection limits") - for conn_manager in connection_managers: - if conn_manager.tracker: - violations = await conn_manager.tracker.find_conntracker_violations() - assert ( - violations is None - ), f"conntracker reported out of limits {violations}" - finally: - stop_tcpdump([server["container"] for server in WG_SERVERS]) + yield Environment(api, nodes, connection_managers, clients) + + print(datetime.now(), "Checking connection limits") + for conn_manager in connection_managers: + if conn_manager.tracker: + violations = await conn_manager.tracker.find_conntracker_violations() + assert ( + violations is None + ), f"conntracker reported out of limits {violations}" async def setup_mesh_nodes( diff --git a/nat-lab/tests/mesh_api.py b/nat-lab/tests/mesh_api.py index 2e0be517c..f4a941dc3 100644 --- a/nat-lab/tests/mesh_api.py +++ b/nat-lab/tests/mesh_api.py @@ -5,15 +5,12 @@ import random import time import uuid -from collections.abc import Iterable -from concurrent.futures import ThreadPoolExecutor from config import DERP_SERVERS, LIBTELIO_IPV6_WG_SUBNET, WG_SERVERS from datetime import datetime from ipaddress import ip_address from typing import Dict, Any, List, Tuple, Optional from utils.bindings import Config, Server, Peer, PeerBase from utils.router import IPStack, IPProto, get_ip_address_type -from utils.testing import get_current_test_log_path if platform.machine() != "x86_64": import pure_wg as Key @@ -373,7 +370,6 @@ def generate_peer_config(node: Node, allowed_ips: str) -> str: ) for node in node_list: - start_tcpdump([server_config["container"]]) if "type" in server_config and server_config["type"] == "nordlynx": priv_key = server_config["private_key"] commands = [ @@ -445,51 +441,3 @@ def config_dynamic_nodes( def prepare_all_vpn_servers(self): for wg_server in WG_SERVERS: self.setup_vpn_servers(list(self.nodes.values()), wg_server) - - -def start_tcpdump(container_names: Iterable[str]): - if os.environ.get("NATLAB_SAVE_LOGS") is None: - return - - def aux(container_name): - # First make sure that no leftover processes/files will interfere - cmd = f"docker exec --privileged {container_name} killall -w tcpdump" - os.system(cmd) - cmd = f"docker exec --privileged {container_name} rm {PCAP_FILE_PATH}" - os.system(cmd) - cmd = f"docker exec -d --privileged {container_name} tcpdump -i any -U -w {PCAP_FILE_PATH}" - os.system(cmd) - - with ThreadPoolExecutor() as executor: - executor.map(aux, container_names) - - -def stop_tcpdump(container_names, store_in=None): - if os.environ.get("NATLAB_SAVE_LOGS") is None: - return - log_dir = get_current_test_log_path() - os.makedirs(log_dir, exist_ok=True) - - def aux(container_name): - cmd = f"docker exec --privileged {container_name} killall -w tcpdump" - os.system(cmd) - path = find_unique_path_for_tcpdump( - store_in if store_in else log_dir, container_name - ) - cmd = f"docker container cp {container_name}:{PCAP_FILE_PATH} {path}" - os.system(cmd) - - with ThreadPoolExecutor() as executor: - executor.map(aux, container_names) - - -def find_unique_path_for_tcpdump(log_dir, container_name): - candidate_path = f"{log_dir}/{container_name}.pcap" - counter = 1 - # NOTE: counter starting from '1' means that the file will either have no suffix or - # will have a suffix starting from '2'. This is to make it clear that it's not the - # first log for that container/client. - while os.path.isfile(candidate_path): - counter += 1 - candidate_path = f"./{log_dir}/{container_name}-{counter}.pcap" - return candidate_path diff --git a/nat-lab/tests/telio.py b/nat-lab/tests/telio.py index 462a4fcd1..8a2489b57 100644 --- a/nat-lab/tests/telio.py +++ b/nat-lab/tests/telio.py @@ -8,9 +8,9 @@ import warnings from collections import Counter from config import DERP_SERVERS -from contextlib import asynccontextmanager +from contextlib import AsyncExitStack, asynccontextmanager from datetime import datetime -from mesh_api import Node, start_tcpdump, stop_tcpdump +from mesh_api import Node from typing import AsyncIterator, List, Optional, Set from uniffi.libtelio_proxy import LibtelioProxy, ProxyConnectionError from utils import asyncio_util @@ -39,6 +39,7 @@ from utils.router import IPStack, Router, new_router from utils.router.linux_router import LinuxRouter, FWMARK_VALUE as LINUX_FWMARK_VALUE from utils.router.windows_router import WindowsRouter +from utils.tcpdump import make_tcpdump from utils.testing import ( get_current_test_log_path, get_current_test_case_and_parameters, @@ -351,10 +352,6 @@ def get_proxy_port(self): async def run( self, meshnet_config: Optional[Config] = None ) -> AsyncIterator["Client"]: - if isinstance(self._connection, DockerConnection): - start_tcpdump([self._connection.container_name()]) - await self.clear_core_dumps() - async def on_stdout(stdout: str) -> None: supress_print_list = [ "- no login.", @@ -399,11 +396,16 @@ async def on_stderr(stderr: str) -> None: container_port, ]) - await self.clear_system_log() + async with AsyncExitStack() as exit_stack: + await exit_stack.enter_async_context(make_tcpdump([self._connection])) + if isinstance(self._connection, DockerConnection): + await self.clear_core_dumps() - async with self._process.run( - stdout_callback=on_stdout, stderr_callback=on_stderr - ): + await self.clear_system_log() + + await exit_stack.enter_async_context( + self._process.run(stdout_callback=on_stdout, stderr_callback=on_stderr) + ) try: await self._process.wait_stdin_ready() @@ -450,7 +452,6 @@ async def on_stderr(stderr: str) -> None: "Test cleanup: Stopping tcpdump and collecting core dumps", ) if isinstance(self._connection, DockerConnection): - stop_tcpdump([self._connection.container_name()]) await self.collect_core_dumps() print( diff --git a/nat-lab/tests/utils/connection/docker_connection.py b/nat-lab/tests/utils/connection/docker_connection.py index fb0712c27..8ac57623c 100644 --- a/nat-lab/tests/utils/connection/docker_connection.py +++ b/nat-lab/tests/utils/connection/docker_connection.py @@ -42,7 +42,9 @@ def aux(): await to_thread(aux) def create_process(self, command: List[str], kill_id=None) -> "Process": - process = DockerProcess(self._container, command, kill_id) + process = DockerProcess( + self._container, self.container_name(), command, kill_id + ) print( datetime.now(), "Executing", diff --git a/nat-lab/tests/utils/connection/ssh_connection.py b/nat-lab/tests/utils/connection/ssh_connection.py index 17b8839dc..bfbe5e55c 100644 --- a/nat-lab/tests/utils/connection/ssh_connection.py +++ b/nat-lab/tests/utils/connection/ssh_connection.py @@ -9,10 +9,17 @@ class SshConnection(Connection): _connection: asyncssh.SSHClientConnection + _vm_name: str _target_os: TargetOS - def __init__(self, connection: asyncssh.SSHClientConnection, target_os: TargetOS): + def __init__( + self, + connection: asyncssh.SSHClientConnection, + vm_name: str, + target_os: TargetOS, + ): super().__init__(target_os) + self._vm_name = vm_name self._connection = connection self._target_os = target_os @@ -25,7 +32,7 @@ def create_process(self, command: List[str], kill_id=None) -> "Process": else: assert False, f"not supported target_os '{self._target_os}'" - return SshProcess(self._connection, command, escape_argument) + return SshProcess(self._connection, self._vm_name, command, escape_argument) async def get_ip_address(self) -> tuple[str, str]: ip = self._connection._host # pylint: disable=protected-access diff --git a/nat-lab/tests/utils/connection_util.py b/nat-lab/tests/utils/connection_util.py index d334ecf18..3d07ff296 100644 --- a/nat-lab/tests/utils/connection_util.py +++ b/nat-lab/tests/utils/connection_util.py @@ -55,6 +55,11 @@ class ConnectionTag(Enum): DOCKER_VPN_2 = auto() DOCKER_NLX_1 = auto() DOCKER_INTERNAL_SYMMETRIC_GW = auto() + DOCKER_DERP_1 = auto() + DOCKER_DERP_2 = auto() + DOCKER_DERP_3 = auto() + DOCKER_DNS_SERVER_1 = auto() + DOCKER_DNS_SERVER_2 = auto() DOCKER_SERVICE_IDS: Dict[ConnectionTag, str] = { @@ -91,6 +96,11 @@ class ConnectionTag(Enum): ConnectionTag.DOCKER_VPN_1: "vpn-01", ConnectionTag.DOCKER_VPN_2: "vpn-02", ConnectionTag.DOCKER_INTERNAL_SYMMETRIC_GW: "internal-symmetric-gw-01", + ConnectionTag.DOCKER_DERP_1: "derp-01", + ConnectionTag.DOCKER_DERP_2: "derp-02", + ConnectionTag.DOCKER_DERP_3: "derp-03", + ConnectionTag.DOCKER_DNS_SERVER_1: "dns-server-01", + ConnectionTag.DOCKER_DNS_SERVER_2: "dns-server-02", } diff --git a/nat-lab/tests/utils/process/docker_process.py b/nat-lab/tests/utils/process/docker_process.py index 147d382fc..86ac03020 100644 --- a/nat-lab/tests/utils/process/docker_process.py +++ b/nat-lab/tests/utils/process/docker_process.py @@ -14,6 +14,7 @@ class DockerProcess(Process): _container: DockerContainer + _container_name: str _command: List[str] _stdout: str _stderr: str @@ -27,9 +28,14 @@ class DockerProcess(Process): ) def __init__( - self, container: DockerContainer, command: List[str], kill_id=None + self, + container: DockerContainer, + container_name: str, + command: List[str], + kill_id=None, ) -> None: self._container = container + self._container_name = container_name self._command = command self._stdout = "" self._stderr = "" @@ -43,7 +49,7 @@ async def execute( self, stdout_callback: Optional[StreamCallback] = None, stderr_callback: Optional[StreamCallback] = None, - privileged=False, + privileged: bool = False, ) -> "DockerProcess": self._execute = await self._container.exec( self._command, @@ -88,7 +94,13 @@ async def execute( # 0 success # suppress 137 linux sigkill, since we kill those processes if exit_code and exit_code not in [0, 137]: - raise ProcessExecError(exit_code, self._command, self._stdout, self._stderr) + raise ProcessExecError( + exit_code, + self._container_name, + self._command, + self._stdout, + self._stderr, + ) return self @@ -97,10 +109,11 @@ async def run( self, stdout_callback: Optional[StreamCallback] = None, stderr_callback: Optional[StreamCallback] = None, + privileged: bool = False, ) -> AsyncIterator["DockerProcess"]: async def mark_as_done(): try: - await self.execute(stdout_callback, stderr_callback) + await self.execute(stdout_callback, stderr_callback, privileged) finally: self._is_done.set() diff --git a/nat-lab/tests/utils/process/process.py b/nat-lab/tests/utils/process/process.py index cb8aed398..bea4fcdaa 100644 --- a/nat-lab/tests/utils/process/process.py +++ b/nat-lab/tests/utils/process/process.py @@ -7,21 +7,28 @@ class ProcessExecError(Exception): returncode: int + remote_name: str cmd: List[str] stdout: str stderr: str def __init__( - self, returncode: int, cmd: List[str], stdout: str, stderr: str + self, + returncode: int, + remote_name: str, + cmd: List[str], + stdout: str, + stderr: str, ) -> None: self.returncode = returncode + self.remote_name = remote_name self.cmd = cmd self.stdout = stdout self.stderr = stderr def print(self) -> None: print( - f"Executed command {self.cmd} exited with ret code '{self.returncode}'. STDOUT: '{self.stdout}'. STDERR: '{self.stderr}'" + f"Executed command {self.cmd} on {self.remote_name} exited with ret code '{self.returncode}'. STDOUT: '{self.stdout}'. STDERR: '{self.stderr}'" ) @@ -31,7 +38,7 @@ async def execute( self, stdout_callback: Optional[StreamCallback] = None, stderr_callback: Optional[StreamCallback] = None, - privileged=False, + privileged: bool = False, ) -> "Process": pass @@ -41,6 +48,7 @@ async def run( self, stdout_callback: Optional[StreamCallback] = None, stderr_callback: Optional[StreamCallback] = None, + privileged: bool = False, ) -> AsyncIterator["Process"]: yield self diff --git a/nat-lab/tests/utils/process/ssh_process.py b/nat-lab/tests/utils/process/ssh_process.py index 435b39d97..856813292 100644 --- a/nat-lab/tests/utils/process/ssh_process.py +++ b/nat-lab/tests/utils/process/ssh_process.py @@ -8,6 +8,7 @@ class SshProcess(Process): _ssh_connection: asyncssh.SSHClientConnection + _vm_name: str _command: List[str] _stdout: str _stderr: str @@ -20,10 +21,12 @@ class SshProcess(Process): def __init__( self, ssh_connection: asyncssh.SSHClientConnection, + vm_name: str, command: List[str], escape_argument: Callable[[str], str], ) -> None: self._ssh_connection = ssh_connection + self._vm_name = vm_name self._command = command self._stdout = "" self._stderr = "" @@ -38,7 +41,7 @@ async def execute( self, stdout_callback: Optional[StreamCallback] = None, stderr_callback: Optional[StreamCallback] = None, - privileged=False, + privileged: bool = False, ) -> "SshProcess": if privileged: print("'privileged' does nothing for ssh processes") @@ -69,7 +72,11 @@ async def execute( # 0 success if completed_process.returncode and completed_process.returncode != 0: raise ProcessExecError( - completed_process.returncode, self._command, self._stdout, self._stderr + completed_process.returncode, + self._vm_name, + self._command, + self._stdout, + self._stderr, ) return self @@ -79,10 +86,11 @@ async def run( self, stdout_callback: Optional[StreamCallback] = None, stderr_callback: Optional[StreamCallback] = None, + privileged: bool = False, ) -> AsyncIterator["SshProcess"]: async def mark_as_done(): try: - await self.execute(stdout_callback, stderr_callback) + await self.execute(stdout_callback, stderr_callback, privileged) finally: self._is_done.set() diff --git a/nat-lab/tests/utils/tcpdump.py b/nat-lab/tests/utils/tcpdump.py index 8ea2399df..9afed7a32 100644 --- a/nat-lab/tests/utils/tcpdump.py +++ b/nat-lab/tests/utils/tcpdump.py @@ -1,8 +1,10 @@ +import os from config import WINDUMP_BINARY_WINDOWS -from contextlib import asynccontextmanager +from contextlib import asynccontextmanager, AsyncExitStack from typing import AsyncIterator, Optional, List from utils.connection import TargetOS, Connection from utils.process import Process +from utils.testing import get_current_test_log_path PCAP_FILE_PATH = "/dump.pcap" @@ -19,19 +21,21 @@ class TcpDump: def __init__( self, connection: Connection, - interface: Optional[str], - output_file: Optional[str], filters: List[str], + interface: str = "any", + output_file: str = PCAP_FILE_PATH, verbose: bool = False, ) -> None: self.connection = connection - self.interface = interface or "any" - self.output_file = output_file or PCAP_FILE_PATH + self.interface = interface + self.output_file = output_file self.process = self.connection.create_process( [ self.get_tcpdump_binary(connection.target_os), - f"-i {self.interface}", - f"-w {self.output_file}", + "-i", + self.interface, + "-w", + self.output_file, ] + filters ) @@ -67,12 +71,48 @@ async def on_stderr(self, output: str) -> None: async def execute(self) -> None: try: - await self.process.execute(self.on_stdout, self.on_stderr) + await self.process.execute(self.on_stdout, self.on_stderr, True) except Exception as e: print(f"Error executing tcpdump: {e}") raise @asynccontextmanager async def run(self) -> AsyncIterator["TcpDump"]: - async with self.process.run(self.on_stdout, self.on_stderr): + async with self.process.run(self.on_stdout, self.on_stderr, True): yield self + + +def find_unique_path_for_tcpdump(log_dir, guest_name): + candidate_path = f"{log_dir}/{guest_name}.pcap" + counter = 1 + # NOTE: counter starting from '1' means that the file will either have no suffix or + # will have a suffix starting from '2'. This is to make it clear that it's not the + # first log for that guest/client. + while os.path.isfile(candidate_path): + counter += 1 + candidate_path = f"./{log_dir}/{guest_name}-{counter}.pcap" + return candidate_path + + +@asynccontextmanager +async def make_tcpdump( + connection_list: list[Connection], + download: bool = True, + store_in: Optional[str] = None, +): + async with AsyncExitStack() as exit_stack: + for conn in connection_list: + await exit_stack.enter_async_context( + TcpDump(conn, ["-U"], verbose=True).run() + ) + try: + yield + finally: + if download: + log_dir = get_current_test_log_path() + os.makedirs(log_dir, exist_ok=True) + for conn in connection_list: + path = find_unique_path_for_tcpdump( + store_in if store_in else log_dir, conn.target_name() + ) + await conn.download(PCAP_FILE_PATH, path) diff --git a/nat-lab/tests/utils/vm/mac_vm_util.py b/nat-lab/tests/utils/vm/mac_vm_util.py index 55cc6f20d..b7dec7eb7 100644 --- a/nat-lab/tests/utils/vm/mac_vm_util.py +++ b/nat-lab/tests/utils/vm/mac_vm_util.py @@ -44,7 +44,7 @@ async def new_connection( known_hosts=None, options=ssh_options, ) as ssh_connection: - connection = SshConnection(ssh_connection, TargetOS.Mac) + connection = SshConnection(ssh_connection, "Mac", TargetOS.Mac) if copy_binaries: await _copy_binaries(ssh_connection, connection) diff --git a/nat-lab/tests/utils/vm/windows_vm_util.py b/nat-lab/tests/utils/vm/windows_vm_util.py index 6ced09a33..a0084acf0 100644 --- a/nat-lab/tests/utils/vm/windows_vm_util.py +++ b/nat-lab/tests/utils/vm/windows_vm_util.py @@ -6,6 +6,7 @@ LIBTELIO_BINARY_PATH_WINDOWS_VM, UNIFFI_PATH_WINDOWS_VM, WINDOWS_1_VM_IP, + WINDOWS_2_VM_IP, ) from contextlib import asynccontextmanager from datetime import datetime @@ -17,6 +18,11 @@ VM_UNIFFI_DIR = UNIFFI_PATH_WINDOWS_VM VM_SYSTEM32 = "C:\\Windows\\System32" +NAME = { + WINDOWS_1_VM_IP: "Windows-1", + WINDOWS_2_VM_IP: "Windows-2", +} + @asynccontextmanager async def new_connection( @@ -46,7 +52,7 @@ async def new_connection( known_hosts=None, options=ssh_options, ) as ssh_connection: - connection = SshConnection(ssh_connection, TargetOS.Windows) + connection = SshConnection(ssh_connection, NAME[ip], TargetOS.Windows) keys = await _get_network_interface_tunnel_keys(connection) for key in keys: