diff --git a/.coveragerc b/.coveragerc new file mode 100644 index 00000000..e6b035dc --- /dev/null +++ b/.coveragerc @@ -0,0 +1,6 @@ +[run] +omit = + snafu/benchmarks/*/* + **/*_wrapper* + **/trigger* + **/__init__.py diff --git a/.github/workflows/doc.yaml b/.github/workflows/doc.yaml new file mode 100644 index 00000000..2e3dd211 --- /dev/null +++ b/.github/workflows/doc.yaml @@ -0,0 +1,24 @@ +name: Documentation Build Test +on: + - push + - pull_request + - workflow_dispatch + +jobs: + build_docs: + runs-on: ubuntu-latest + strategy: + matrix: + pyver: [6, 7, 8, 9] + name: Run Doc Tests -- Python 3.${{ matrix.pyver }} + steps: + - uses: actions/checkout@v2 + - name: Install build dependencies + run: sudo apt-get install -y pandoc + - uses: actions/setup-python@v2 + with: + python-version: 3.${{ matrix.pyver }} + - name: Install tox + run: pip install tox + - name: Run Tox + run: tox -e py3${{ matrix.pyver }}-docs diff --git a/.github/workflows/unit.yaml b/.github/workflows/unit.yaml new file mode 100644 index 00000000..b14e7a37 --- /dev/null +++ b/.github/workflows/unit.yaml @@ -0,0 +1,33 @@ +name: Unit Tests +on: + push: + paths-ignore: + - 'docs/**' + pull_request: + workflow_dispatch: + +jobs: + unit_tests: + runs-on: ubuntu-latest + strategy: + matrix: + pyver: [6, 7, 8, 9] + name: Run Unit Tests -- Python 3.${{ matrix.pyver }} + steps: + - uses: actions/checkout@v2 + - uses: actions/setup-python@v2 + with: + python-version: 3.${{ matrix.pyver }} + - name: Install tox + run: pip install tox + - name: Run Tox + run: tox -e py3${{ matrix.pyver }}-unit + - name: Generate coverage report + if: matrix.pyver == 6 + run: tox -e coverage + - name: Upload coverage report + if: matrix.pyver == 6 + uses: codecov/codecov-action@v1 + with: + files: ./coverage.xml + flags: unit,gha,python-3.${{ matrix.pyver }} diff --git a/.gitignore b/.gitignore index 00aaa5a8..7bf63bed 100644 --- a/.gitignore +++ b/.gitignore @@ -49,6 +49,11 @@ venv.bak/ /.project /.pydevproject +# Testing +.tox +.coverage +.coverage.* + # JetBrains settings .idea/ diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 00000000..48715ba5 --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1 @@ +include version.txt diff --git a/docs/source/conf.py b/docs/source/conf.py index 6033e58c..148fbc10 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -31,6 +31,7 @@ # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = ["myst_parser", "nbsphinx"] +suppress_warnings = ["myst.mathjax"] # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] @@ -55,4 +56,14 @@ # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ["_static"] +html_static_path = [] + + +# -- nbsphinx configuration -------------------------------------------------- + +nbsphinx_execute = "always" + + +# -- linkcheck configuration -------------------------------------------------- +linkcheck_retries = 5 +linkcheck_timeout = 60 diff --git a/docs/source/contributing.md b/docs/source/contributing.md index f579a6c7..f924e2c4 100644 --- a/docs/source/contributing.md +++ b/docs/source/contributing.md @@ -4,5 +4,6 @@ contributing/setting_up_env contributing/adding_workloads contributing/adding_exports +contributing/unit_testing contributing/documentation ``` diff --git a/docs/source/contributing/adding_workloads.ipynb b/docs/source/contributing/adding_workloads.ipynb index c0f6a07e..0d7e1333 100644 --- a/docs/source/contributing/adding_workloads.ipynb +++ b/docs/source/contributing/adding_workloads.ipynb @@ -59,7 +59,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "id": "2649c485", "metadata": {}, "outputs": [], @@ -143,26 +143,10 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "id": "86af872c", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "{'cluster_name': None,\n", - " 'config': 'my_config.yaml',\n", - " 'count': 5,\n", - " 'host': ['www.google.com', 'www.bing.com'],\n", - " 'htlhcdtwy': 'no',\n", - " 'labels': {'notebook': 'true'},\n", - " 'samples': 3,\n", - " 'user': 'snafu',\n", - " 'uuid': '1337'}\n" - ] - } - ], + "outputs": [], "source": [ "from snafu.registry import TOOLS\n", "from pprint import pprint\n", @@ -216,7 +200,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "id": "898ea74e", "metadata": {}, "outputs": [], @@ -335,46 +319,10 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": null, "id": "4226479c", "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Temporary file located at /tmp/snafu-pingtest already exists.\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Setup result is: True\n", - "Setup result is: False\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Unable to create temporary file at /tmp/snafu-pingtest: invalid literal for int() with base 10: \"I'm a string\"\n", - "Traceback (most recent call last):\n", - " File \"\", line 81, in setup\n", - " tmp_file = open(self.TMP_FILE_PATH, \"x\")\n", - " File \"\", line 15, in \n", - " open = lambda file, mode: int(\"I'm a string\")\n", - "ValueError: invalid literal for int() with base 10: \"I'm a string\"\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Setup result is: False\n" - ] - } - ], + "outputs": [], "source": [ "from snafu.registry import TOOLS\n", "pingtest = TOOLS[\"pingtest\"]()\n", @@ -411,7 +359,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": null, "id": "87c51692", "metadata": {}, "outputs": [], @@ -553,31 +501,10 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": null, "id": "a704e0f9", "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Unable to remove temporary file at /tmp/snafu-pingtest: [Errno 2] No such file or directory: '/tmp/snafu-pingtest'\n", - "Traceback (most recent call last):\n", - " File \"\", line 112, in cleanup\n", - " os.remove(self.TMP_FILE_PATH)\n", - "FileNotFoundError: [Errno 2] No such file or directory: '/tmp/snafu-pingtest'\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Cleanup result is False\n", - "Setup result is True\n", - "Cleanup result is True\n" - ] - } - ], + "outputs": [], "source": [ "from snafu.registry import TOOLS\n", "pingtest = TOOLS[\"pingtest\"]()\n", @@ -621,7 +548,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": null, "id": "7e2597c1", "metadata": {}, "outputs": [], @@ -639,10 +566,11 @@ "from snafu.benchmarks import BenchmarkResult\n", "from typing import Iterable, Optional\n", "from dataclasses import dataclass, asdict\n", - "# Also shlex for creating our ping command\n", + "# Also shlex and subprocess for creating our ping command\n", "import shlex\n", - "# And finally subprocess to help with running Ping\n", "import subprocess\n", + "# And finally re for regex\n", + "import re\n", "\n", "\n", "@dataclass\n", @@ -709,6 +637,11 @@ " \n", " TMP_FILE_PATH = \"/tmp/snafu-pingtest\"\n", "\n", + " HOST_RE = r\"PING ([a-zA-Z0-9.-]+) \\(([0-9.]+)\\) \\d+\\(([\\d]+)\\) bytes of data\\.\"\n", + " RTT_STATS_RE = r\"rtt min\\/avg\\/max\\/mdev = ([\\d.]+)\\/([\\d.]+)\\/([\\d.]+)\\/([\\d.]+) ms\"\n", + " PACKET_RE = r\"(\\d+) packets transmitted, (\\d+) received, ([\\d.]+)\\% packet loss, \" \\\n", + " r\"time (\\d+)ms\"\n", + " \n", " def setup(self) -> bool:\n", " \"\"\"\n", " Setup the Ping Test Benchmark.\n", @@ -747,8 +680,7 @@ " )\n", " return True\n", " \n", - " @staticmethod\n", - " def parse_host_line(host_line: str, store: PingResult) -> None:\n", + " def parse_host_info(self, stdout: str, store: PingResult) -> None:\n", " \"\"\"\n", " Parse the host line of ping stdout.\n", " \n", @@ -756,19 +688,17 @@ " \n", " Parameters\n", " ----------\n", - " host_line : str\n", - " Host line from ping to parse\n", + " stdout : str\n", + " ping stdout to parse\n", " store : PingResult\n", " PingResult instance to store parsed variables into\n", " \"\"\"\n", " \n", - " words = host_line.split(\" \")\n", - " host = words[1]\n", - " ip = words[2].strip(\"()\")\n", - " data_size = words[3]\n", - " \n", - " if \"(\" in data_size:\n", - " data_size = data_size.split(\"(\")[1].strip(\")\")\n", + " result = re.compile(self.HOST_RE).search(stdout)\n", + " if result is None:\n", + " self.logger.warning(f\"Unable to parse host info!\")\n", + " return\n", + " host, ip, data_size = result.groups()\n", " data_size = int(data_size)\n", " \n", " if host == ip:\n", @@ -778,8 +708,7 @@ " store.ip = ip\n", " store.packet_bytes = data_size\n", " \n", - " @staticmethod\n", - " def parse_packet_stats(packet_line: str, store: PingResult) -> None:\n", + " def parse_packet_stats(self, stdout: str, store: PingResult) -> None:\n", " \"\"\"\n", " Parse the packet statistics line of ping stdout.\n", " \n", @@ -788,21 +717,26 @@ " \n", " Parameters\n", " ----------\n", - " packet_line : str\n", - " Packet statistics line to parse from ping\n", + " stdout : str\n", + " ping stdout to parse\n", " store : PingResult\n", " PingResult instance to store parsed variables into\n", " \"\"\"\n", " \n", - " sections = [sec.strip().split(\" \") for sec in packet_line.split(\",\")]\n", + " result = re.compile(self.PACKET_RE).search(stdout)\n", + " if result is None:\n", + " self.logger.warning(\n", + " f\"Unable to parse packet stats!\"\n", + " )\n", + " return\n", + " transmitted, received, packet_loss, time_ms = map(int, result.groups())\n", "\n", - " store.transmitted = int(sections[0][0])\n", - " store.received = int(sections[1][0])\n", - " store.packet_loss = float(sections[2][0].strip(\"%\"))\n", - " store.time_ms = int(sections[3][1].strip(\"ms\"))\n", + " store.transmitted = transmitted\n", + " store.received = received\n", + " store.packet_loss = packet_loss\n", + " store.time_ms = time_ms\n", " \n", - " @staticmethod\n", - " def parse_rtt_stats(rtt_line: str, store: PingResult) -> None:\n", + " def parse_rtt_stats(self, stdout: str, store: PingResult) -> None:\n", " \"\"\"\n", " Parse the RTT statistics line of ping stdout.\n", " \n", @@ -810,15 +744,17 @@ " \n", " Parameters\n", " ----------\n", - " rtt_line : str\n", - " RTT statistics line to parse from ping\n", + " stdout : str\n", + " ping stdout to parse\n", " store : PingResult\n", " PingResult instance to store parsed variables into\n", " \"\"\"\n", " \n", - " rtt_min, rtt_avg, rtt_max, rtt_mdev = map(\n", - " float, rtt_line.split(\"=\")[1].strip(\" ms\").split(\"/\")\n", - " )\n", + " result = re.compile(self.RTT_STATS_RE).search(stdout)\n", + " if result is None:\n", + " self.logger.warning(f\"Unable to parse rtt stats!\")\n", + " return\n", + " rtt_min, rtt_avg, rtt_max, rtt_mdev = map(float, result.groups())\n", " store.rtt_min_ms = rtt_min\n", " store.rtt_avg_ms = rtt_avg\n", " store.rtt_max_ms = rtt_max\n", @@ -846,14 +782,11 @@ " success=False\n", " )\n", " \n", - " host_info = lines[0]\n", - " packet_info = lines[-2]\n", - " rtt_info = lines[-1]\n", " \n", " result = PingResult(success=True)\n", - " self.parse_host_line(host_info, result)\n", - " self.parse_packet_stats(packet_info, result)\n", - " self.parse_rtt_stats(rtt_info, result)\n", + " self.parse_host_info(stdout, result)\n", + " self.parse_packet_stats(stdout, result)\n", + " self.parse_rtt_stats(stdout, result)\n", " \n", " return result\n", " \n", @@ -968,62 +901,19 @@ "id": "909223e4", "metadata": {}, "source": [ - "We have finished our new ping test benchmark! Let's try it out!" + "We have finished our new ping test benchmark! Let's try it out! We'll ping two hosts:\n", + "\n", + "* `www.google.com`: Our jupyter notebooks are run on Azure instances, which have ICMP disabled. The output for this test should show 100% packet loss.\n", + "* `www.idontexist.heythere`: A domain name which doesn't exist. Ping should exit with a failure saying that the host couldn't be resolved\n", + "* `localhost`: We'll ping localhost, as we know regardless of environment we'll be able to ping ourselves." ] }, { "cell_type": "code", - "execution_count": 8, + "execution_count": null, "id": "50b793c6", "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Starting pingtest wrapper.\n", - "Running setup tasks.\n", - "Successfully created temp file at /tmp/snafu-pingtest\n", - "Collecting results from benchmark.\n", - "Running pings and collecting results.\n", - "Using config: {'config': 'my_config.yaml', 'labels': {'notebook': 'true'}, 'cluster_name': None, 'user': 'snafu', 'uuid': '1337', 'host': ['www.google.com', 'www.bing.com', 'www.idontexist.heythere'], 'count': 5, 'samples': 1, 'htlhcdtwy': 'no'}\n", - "Running ping test against host www.google.com\n", - "Using command: ['ping', '-c', '5', 'www.google.com']\n", - "Collecting sample 0\n", - "b'PING www.google.com (142.250.72.68) 56(84) bytes of data.\\n'\n", - "b'64 bytes from den16s09-in-f4.1e100.net (142.250.72.68): icmp_seq=1 ttl=117 time=12.5 ms\\n'\n", - "b'64 bytes from den16s09-in-f4.1e100.net (142.250.72.68): icmp_seq=2 ttl=117 time=13.8 ms\\n'\n", - "b'64 bytes from den16s09-in-f4.1e100.net (142.250.72.68): icmp_seq=3 ttl=117 time=16.6 ms\\n'\n", - "b'64 bytes from den16s09-in-f4.1e100.net (142.250.72.68): icmp_seq=4 ttl=117 time=28.1 ms\\n'\n", - "b'64 bytes from den16s09-in-f4.1e100.net (142.250.72.68): icmp_seq=5 ttl=117 time=28.0 ms\\n'\n", - "Got process run: {'rc': 0, 'stdout': 'PING www.google.com (142.250.72.68) 56(84) bytes of data.\\n64 bytes from den16s09-in-f4.1e100.net (142.250.72.68): icmp_seq=1 ttl=117 time=12.5 ms\\n64 bytes from den16s09-in-f4.1e100.net (142.250.72.68): icmp_seq=2 ttl=117 time=13.8 ms\\n64 bytes from den16s09-in-f4.1e100.net (142.250.72.68): icmp_seq=3 ttl=117 time=16.6 ms\\n64 bytes from den16s09-in-f4.1e100.net (142.250.72.68): icmp_seq=4 ttl=117 time=28.1 ms\\n64 bytes from den16s09-in-f4.1e100.net (142.250.72.68): icmp_seq=5 ttl=117 time=28.0 ms\\n\\n--- www.google.com ping statistics ---\\n5 packets transmitted, 5 received, 0% packet loss, time 4005ms\\nrtt min/avg/max/mdev = 12.519/19.813/28.079/6.859 ms\\n', 'stderr': '', 'time_seconds': 4.065928}\n", - "Got sample: {'ip': '142.250.72.68', 'success': True, 'fail_msg': None, 'host': 'www.google.com', 'transmitted': 5, 'received': 5, 'packet_loss': 0.0, 'packet_bytes': 84, 'time_ms': 4005, 'rtt_min_ms': 12.519, 'rtt_avg_ms': 19.813, 'rtt_max_ms': 28.079, 'rtt_mdev_ms': 6.859}\n", - "Finised collecting 1 sample against www.google.com\n", - "Running ping test against host www.bing.com\n", - "Using command: ['ping', '-c', '5', 'www.bing.com']\n", - "Collecting sample 0\n", - "b'PING dual-a-0001.a-msedge.net (13.107.21.200) 56(84) bytes of data.\\n'\n", - "b'64 bytes from 13.107.21.200 (13.107.21.200): icmp_seq=1 ttl=117 time=16.0 ms\\n'\n", - "b'64 bytes from 13.107.21.200 (13.107.21.200): icmp_seq=2 ttl=117 time=88.2 ms\\n'\n", - "b'64 bytes from 13.107.21.200 (13.107.21.200): icmp_seq=3 ttl=117 time=84.8 ms\\n'\n", - "b'64 bytes from 13.107.21.200 (13.107.21.200): icmp_seq=4 ttl=117 time=34.6 ms\\n'\n", - "b'64 bytes from 13.107.21.200 (13.107.21.200): icmp_seq=5 ttl=117 time=23.5 ms\\n'\n", - "Got process run: {'rc': 0, 'stdout': 'PING dual-a-0001.a-msedge.net (13.107.21.200) 56(84) bytes of data.\\n64 bytes from 13.107.21.200 (13.107.21.200): icmp_seq=1 ttl=117 time=16.0 ms\\n64 bytes from 13.107.21.200 (13.107.21.200): icmp_seq=2 ttl=117 time=88.2 ms\\n64 bytes from 13.107.21.200 (13.107.21.200): icmp_seq=3 ttl=117 time=84.8 ms\\n64 bytes from 13.107.21.200 (13.107.21.200): icmp_seq=4 ttl=117 time=34.6 ms\\n64 bytes from 13.107.21.200 (13.107.21.200): icmp_seq=5 ttl=117 time=23.5 ms\\n\\n--- dual-a-0001.a-msedge.net ping statistics ---\\n5 packets transmitted, 5 received, 0% packet loss, time 4005ms\\nrtt min/avg/max/mdev = 15.964/49.417/88.247/30.872 ms\\n', 'stderr': '', 'time_seconds': 4.212766}\n", - "Got sample: {'ip': '13.107.21.200', 'success': True, 'fail_msg': None, 'host': 'dual-a-0001.a-msedge.net', 'transmitted': 5, 'received': 5, 'packet_loss': 0.0, 'packet_bytes': 84, 'time_ms': 4005, 'rtt_min_ms': 15.964, 'rtt_avg_ms': 49.417, 'rtt_max_ms': 88.247, 'rtt_mdev_ms': 30.872}\n", - "Finised collecting 1 sample against www.bing.com\n", - "Running ping test against host www.idontexist.heythere\n", - "Using command: ['ping', '-c', '5', 'www.idontexist.heythere']\n", - "Collecting sample 0\n", - "b'ping: www.idontexist.heythere: Name or service not known\\n'\n", - "Got process run: {'rc': 2, 'stdout': 'ping: www.idontexist.heythere: Name or service not known\\n', 'stderr': '', 'time_seconds': 0.011917}\n", - "Got sample: {'ip': None, 'success': False, 'fail_msg': 'ping: www.idontexist.heythere: Name or service not known', 'host': 'www.idontexist.heythere', 'transmitted': None, 'received': None, 'packet_loss': None, 'packet_bytes': None, 'time_ms': None, 'rtt_min_ms': None, 'rtt_avg_ms': None, 'rtt_max_ms': None, 'rtt_mdev_ms': None}\n", - "Finised collecting 1 sample against www.idontexist.heythere\n", - "Finished\n", - "Cleaning up\n", - "Successfully removed temp file at /tmp/snafu-pingtest\n" - ] - } - ], + "outputs": [], "source": [ "from snafu.registry import TOOLS\n", "from pprint import pprint\n", @@ -1045,7 +935,7 @@ "!echo \"count: 5\" >> my_config.yaml\n", "# OS ENV\n", "import os\n", - "os.environ[\"HOST\"] = \"[www.google.com,www.bing.com,www.idontexist.heythere]\"\n", + "os.environ[\"HOST\"] = \"[www.google.com,www.idontexist.heythere,localhost]\"\n", "\n", "# Parse arguments and print result\n", "# Since we aren't running within the main script (run_snafu.py),\n", @@ -1065,73 +955,10 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": null, "id": "246d6054", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Got 3 results\n", - "{'config': {'count': 5, 'samples': 1},\n", - " 'data': {'fail_msg': None,\n", - " 'host': 'www.google.com',\n", - " 'ip': '142.250.72.68',\n", - " 'packet_bytes': 84,\n", - " 'packet_loss': 0.0,\n", - " 'received': 5,\n", - " 'rtt_avg_ms': 19.813,\n", - " 'rtt_max_ms': 28.079,\n", - " 'rtt_mdev_ms': 6.859,\n", - " 'rtt_min_ms': 12.519,\n", - " 'success': True,\n", - " 'time_ms': 4005,\n", - " 'transmitted': 5},\n", - " 'labels': {'notebook': 'true'},\n", - " 'metadata': {'htlhcdtwy': 'no', 'user': 'snafu', 'uuid': '1337'},\n", - " 'name': 'pingtest',\n", - " 'tag': 'jupyter'}\n", - "{'config': {'count': 5, 'samples': 1},\n", - " 'data': {'fail_msg': None,\n", - " 'host': 'dual-a-0001.a-msedge.net',\n", - " 'ip': '13.107.21.200',\n", - " 'packet_bytes': 84,\n", - " 'packet_loss': 0.0,\n", - " 'received': 5,\n", - " 'rtt_avg_ms': 49.417,\n", - " 'rtt_max_ms': 88.247,\n", - " 'rtt_mdev_ms': 30.872,\n", - " 'rtt_min_ms': 15.964,\n", - " 'success': True,\n", - " 'time_ms': 4005,\n", - " 'transmitted': 5},\n", - " 'labels': {'notebook': 'true'},\n", - " 'metadata': {'htlhcdtwy': 'no', 'user': 'snafu', 'uuid': '1337'},\n", - " 'name': 'pingtest',\n", - " 'tag': 'jupyter'}\n", - "{'config': {'count': 5, 'samples': 1},\n", - " 'data': {'fail_msg': 'ping: www.idontexist.heythere: Name or service not '\n", - " 'known',\n", - " 'host': 'www.idontexist.heythere',\n", - " 'ip': None,\n", - " 'packet_bytes': None,\n", - " 'packet_loss': None,\n", - " 'received': None,\n", - " 'rtt_avg_ms': None,\n", - " 'rtt_max_ms': None,\n", - " 'rtt_mdev_ms': None,\n", - " 'rtt_min_ms': None,\n", - " 'success': False,\n", - " 'time_ms': None,\n", - " 'transmitted': None},\n", - " 'labels': {'notebook': 'true'},\n", - " 'metadata': {'htlhcdtwy': 'no', 'user': 'snafu', 'uuid': '1337'},\n", - " 'name': 'pingtest',\n", - " 'tag': 'jupyter'}\n" - ] - } - ], + "outputs": [], "source": [ "print(f\"Got {len(results)} results\")\n", "for result in results[:5]:\n", diff --git a/docs/source/contributing/unit_testing.md b/docs/source/contributing/unit_testing.md new file mode 100644 index 00000000..3319a3b5 --- /dev/null +++ b/docs/source/contributing/unit_testing.md @@ -0,0 +1,38 @@ +# Testing + +benchmark-wrapper uses [tox](https://pypi.org/project/tox/) and [pytest](https://docs.pytest.org/en/6.2.x/) for unit testing as well as documentation build testing. + +As a quick reminder, unit testing is defined as follows by [guru99.com](https://www.guru99.com/unit-testing-guide.html): + +> UNIT TESTING is a type of software testing where individual units or components of a software are tested. The purpose is to validate that each unit of the software code performs as expected. + +The goal for unit testing within benchmark-wrapper specifically is to ensure that all shared units (common modules and functionality) behave as expected, in order to create a solid foundation that all benchmarks may be based upon. + +For documentation build testing, the goal is to ensure that the documentation can build without errors and that there are no broken external links. + +Here are the main takeaways: + +* To write unit tests for a module, place them in ``tests/unit/test_.py`` or within docstrings for small, basic functions. +* Use Tox to invoke pytest unit tests and documentation build tests, by choosing from the following environments: ``py{36,37,38,39}-{unit,docs}``. +* Use coverage reports to ensure thorough code testing. + +## Tox Usage + +There are eight distinct environments that Tox is configured for (through the ``tox.ini`` file within the project root), which is a permutation across four versions of Python and our two testing goals. These environments can be invoked by picking values from the following sets: + +``tox -e py{36,37,38,39}-{unit,docs}`` + +For instance, using ``py36-unit`` will run unit tests for Python 3.6, while ``py38-docs`` will run a documentation build test for Python 3.8. + + +## Writing Unit Tests + +Unit tests are placed under the ``tests/unit`` directory, container within individual Python modules. To keep a consistent structure, each module under this directory should correspond to one module within benchmark-wrapper. As an example, to create unit tests for ``snafu.module``, place them within ``tests/unit/test_module.py``. + +Tests can also live within docstrings, which will be invoked using pytest's [doctest functionality](https://docs.pytest.org/en/6.2.x/doctest.html). Docstring unit tests should be reserved for small, simple functions and/or to demonstrate example usage for users. This allows for automated regression testing and breaking change detection, as, if the example fails, then there must have been some change which will impact how the code is used. For numpy-style docstrings, docstring tests should live under the ["Examples"](https://numpydoc.readthedocs.io/en/latest/format.html#examples) section. + +For more information on how pytest can be leveraged to write unit tests, please check read the [pytest documentation](https://docs.pytest.org/en/6.2.x/example/index.html). + +## Code Coverage + +When unit tests are invoked using tox, [pytest-cov](https://pytest-cov.readthedocs.io/en/latest/readme.html) will be used to generate a coverage report, showing which lines were covered. Additionally, a [coverage file](https://coverage.readthedocs.io/en/coverage-5.5/) will be placed in the project root with the tox environment used as the file extension (i.e. ``.coverage.py{36,37,38,39}-unit``). Please use these coverage resources to help write unit tests for your PRs as needed. Note that code for benchmark wrappers are not included in these coverage reports, as benchmarks will be tested with [functional tests](https://www.guru99.com/functional-testing.html), rather than unit tests. diff --git a/setup.cfg b/setup.cfg index 8b273c25..91daf96d 100644 --- a/setup.cfg +++ b/setup.cfg @@ -22,14 +22,13 @@ packages = find: include_package_data = True # Add here dependencies of your project (semicolon/line-separated), e.g. install_requires = dataclasses; configargparse; configparser; elasticsearch>=7.0.0,<7.14.0; statistics; numpy; pyyaml; requests; redis; python-dateutil>=2.7.3; prometheus_api_client; scipy; openshift==0.11; kubernetes==11; setuptools>=40.3.0; boto3; flent; importlib_metadata; kafka-python; ttp -# tests_require = pytest; pytest-cov # Require a specific Python version, e.g. Python 2.7 or >= 3.4 python_requires = >=3.6 [options.extras_require] # Add here additional requirements for extra features, to install with: docs = sphinx<4; sphinx-rtd-theme; myst-parser; nbsphinx; ipykernel; notebook; IPython; pandoc - +tests = pytest; pytest-cov; tox [options.entry_points] # Add here console scripts like: console_scripts = diff --git a/snafu/benchmarks/uperf/uperf.py b/snafu/benchmarks/uperf/uperf.py index ffa71eca..e8d1b3ee 100644 --- a/snafu/benchmarks/uperf/uperf.py +++ b/snafu/benchmarks/uperf/uperf.py @@ -8,7 +8,7 @@ import shlex from snafu.benchmarks import Benchmark, BenchmarkResult from snafu.config import Config, ConfigArgument, FuncAction, check_file, none_or_type -from snafu.process import sample_process, ProcessSample +from snafu.process import sample_process class ParseRangeAction(FuncAction): @@ -262,41 +262,45 @@ def collect(self) -> Iterable[BenchmarkResult]: """ cmd = shlex.split(f"uperf -v -a -R -i 1 -m {self.config.workload}") + _plural = "s" if self.config.sample > 1 else "" + self.logger.info(f"Collecting {self.config.sample} sample{_plural} of Uperf") + + samples = sample_process( + cmd, + self.logger, + num_samples=self.config.sample, + retries=2, + expected_rc=0, + env=self.config.get_env(), + ) - for sample_num in range(1, self.config.sample + 1): - self.logger.info(f"Starting Uperf sample number {sample_num}") - sample: ProcessSample = sample_process( - cmd, self.logger, retries=2, expected_rc=0, env=self.config.get_env(), - ) - + for sample_num, sample in enumerate(samples): if not sample.success: self.logger.critical(f"Uperf failed to run! Got results: {sample}") return - else: - self.logger.info(f"Finished collecting sample {sample_num}") - self.logger.debug(f"Got sample: {sample}") - - if sample.successful.stdout is None: - self.logger.critical( - f"Uperf ran successfully, but didn't get stdout. Got results: {sample}" - ) - return - - stdout: UperfStdout = self.parse_stdout(sample.successful.stdout) - result_data: List[UperfStat] = self.get_results_from_stdout(stdout) - config: UperfConfig = UperfConfig.new(stdout, self.config) - - for result_datapoint in result_data: - result_datapoint.iteration = sample_num - result: BenchmarkResult = self.create_new_result( - data=dataclasses.asdict(result_datapoint), - config=dataclasses.asdict(config), - tag="results", - ) - self.logger.debug(f"Got sample result: {result}") - yield result - - self.logger.info(f"Successfully collected {self.config.sample} samples of Uperf.") + + self.logger.info(f"Finished collecting sample {sample_num}") + self.logger.debug(f"Got sample: {sample}") + + if sample.successful.stdout is None: + self.logger.critical(f"Uperf ran successfully, but didn't get stdout. Got results: {sample}") + return + + stdout: UperfStdout = self.parse_stdout(sample.successful.stdout) + result_data: List[UperfStat] = self.get_results_from_stdout(stdout) + config: UperfConfig = UperfConfig.new(stdout, self.config) + + for result_datapoint in result_data: + result_datapoint.iteration = sample_num + result: BenchmarkResult = self.create_new_result( + data=dataclasses.asdict(result_datapoint), + config=dataclasses.asdict(config), + tag="results", + ) + self.logger.debug(f"Got sample result: {result}") + yield result + + self.logger.info(f"Successfully collected {self.config.sample} sample{_plural} of Uperf.") def cleanup(self) -> bool: return True diff --git a/snafu/config.py b/snafu/config.py index 9534e5c9..8380b544 100644 --- a/snafu/config.py +++ b/snafu/config.py @@ -1,13 +1,11 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- """Tools for setting up config arguments.""" -import os -from typing import Any, Callable, Dict, Iterable, List, Mapping, Tuple, TypeVar, Union import argparse -import configargparse - +import os +from typing import Any, Callable, Dict, Iterable, List, Mapping, Sequence, Tuple, Union -_T = TypeVar("T") +import configargparse def check_file(file: str, perms: int = None) -> bool: @@ -37,13 +35,13 @@ def check_file(file: str, perms: int = None) -> bool: return os.access(os.path.abspath(file), perms) -def none_or_type(t: _T) -> Callable[[Any], Union[_T, None]]: +def none_or_type(target_type: type) -> Callable[[Any], Union[type, None]]: """ Return a function which supports allowing an argparse argument to be ``None`` or a specific type. Paramaters ---------- - t : type + target_type : type Type that the returned function will attempt to cast given arguments to. Returns @@ -53,8 +51,8 @@ def none_or_type(t: _T) -> Callable[[Any], Union[_T, None]]: the argument is casted to the given type ``t``. """ - def _t_or_none(val: Any) -> Union[_T, None]: - return val if val is None else t(val) + def _t_or_none(val: Any) -> Union[type, None]: + return val if val is None else target_type(val) return _t_or_none @@ -74,7 +72,7 @@ class FuncAction(argparse.Action): ... return str(arg) + "_this_is_my_string" >>> import argparse >>> p = argparse.ArgumentParser() - >>> p.add_argument("value", type=str, action=AppendStr) + >>> _ = p.add_argument("value", type=str, action=AppendStr) >>> p.parse_args(["my_input"]).value 'my_input_this_is_my_string' """ @@ -86,9 +84,10 @@ def __call__( self, parser: configargparse.ArgumentParser, namespace: argparse.Namespace, - values: str, + values: Union[str, Sequence[Any], None], option_string=None, ): + """Set destination attribute in namespace to output of performing func on given values.""" setattr(namespace, self.dest, self.func(values)) @@ -114,10 +113,11 @@ class ConfigArgument: >>> c.args ('one', 2, 'three') >>> c.kwargs - {"a": "b", "c": "d"} + {'a': 'b', 'c': 'd'} """ def __init__(self, *args, **kwargs): + """Set ``args`` to given args, and ``kwargs`` to given kwargs.""" self.args: Tuple[Any] = args self.kwargs: Dict[str, Any] = kwargs @@ -150,12 +150,14 @@ class Config: """ def __init__(self, tool_name: str): + """Create param namespace, pull global parser and create sub-group for arguments.""" self.params: argparse.Namespace = argparse.Namespace() self.parser: configargparse.ArgumentParser = configargparse.get_argument_parser() self.group = self.parser.add_argument_group(tool_name) self.env_to_params: Dict[str, str] = dict() def __getattr__(self, attr): + """If given ``attr`` doesn't already exist in instance, try to pull from ``params``.""" return getattr(self.params, attr, None) def get_env(self) -> Mapping[str, str]: @@ -165,8 +167,14 @@ def get_env(self) -> Mapping[str, str]: Will add in environment variables from the OS environment. """ - env = {env_var: str(getattr(self.params, dest)) for env_var, dest in self.env_to_params.items()} + env: Dict[str, str] = dict() env.update(os.environ) + for env_var, dest in self.env_to_params.items(): + try: + env[env_var] = str(getattr(self.params, dest)) + except AttributeError: + pass + return env def add_argument(self, *args, **kwargs) -> None: @@ -186,8 +194,9 @@ def populate_parser(self, args: Iterable[ConfigArgument]) -> None: Populate args into the parser from the given list of config arguments. Parameters - ----------- + ---------- args : list of :py:class:`~snafu.config.ConfigArgument` + Arguments to populate into the parser """ for arg in args: @@ -198,7 +207,7 @@ def parse_args(self, args: List[str] = None) -> None: Parse arguments and store them in the ``params`` attribute. Parameters - ----------- + ---------- args : list of str, optional List of arguments to be passed manually to the parser for parsing. """ diff --git a/snafu/process.py b/snafu/process.py index e5753fb1..5e23b71f 100644 --- a/snafu/process.py +++ b/snafu/process.py @@ -1,50 +1,110 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -"""Tools for running subprocesses""" -from typing import Any, List, Mapping, Optional, Union +"""Tools for running subprocesses.""" import dataclasses import datetime import logging +import queue import subprocess import threading -import queue +from typing import Any, Iterable, List, Mapping, Optional, Union @dataclasses.dataclass class ProcessRun: - rc: Optional[int] = None + """Represent a single run of a subprocess without retries.""" + + rc: Optional[int] = None # pylint: disable=C0103 stdout: Optional[str] = None stderr: Optional[str] = None time_seconds: Optional[float] = None + hit_timeout: Optional[bool] = None @dataclasses.dataclass class ProcessSample: + """Represent a process that will be retried on failure.""" + expected_rc: Optional[int] = None success: Optional[bool] = None attempts: Optional[int] = None - timeout: Optional[bool] = None + timeout: Optional[int] = None failed: List[ProcessRun] = dataclasses.field(default_factory=list) successful: ProcessRun = ProcessRun() class LiveProcess: + r""" + Open a subprocess and get live access to stdout and stderr. + + Context manager that runs the given command on entry, cleans up on exit, and creates + a ProcessRun object summarizing the results. The process's stdout and stderr will be captured and + exposed via the ``stdout`` and ``stderr`` :py:class:`queue.Queue` attributes. + + By default a pipe for stdout and stderr are created and given to the created subprocess. If ``stdout``, + ``stderr`` or ``capture_output`` options are given by the user through kwargs, then no automatic pipe + creation will be performed. + + Parameters + ---------- + cmd : str or list of str + Command to run. Can be string or list or string if using :py:mod:`shlex` + timeout : int, optional + When cleaning up the running process, this value specifies time in seconds to wait for + process to finish before killing it. + **kwargs + Additional kwargs given will be passed directly to the :py:class:`subprocess.Popen` call used in the + background to launch the command. + + Attributes + ---------- + cmd : str or list of str + Command to execute + timeout : int or None + Timeout value in seconds, if given + kwargs : mapping + kwargs to pass to :py:class:subprocess.Popen` + attempt : ProcessRun + ProcessRun instance describing the run process + process : subprocess.Popen + Popen object created for the run command + start_time : datetime.datetime + Datetime object representing approximate time that the process was started + end_time : datetime.datetime + Datetime object representing approximate time that the process exited + + Examples + -------- + >>> from snafu.process import LiveProcess + >>> with LiveProcess("echo 'test'; sleep 0.5; echo 'test2'", shell=True) as lp: + ... print(lp.stdout.get()) + ... print(lp.stdout.get()) # will block until another line is ready + ... run = lp.attempt + ... + b'test\n' + b'test2\n' + >>> run.stdout # decoded for us + 'test\ntest2\n' + """ + def __init__(self, cmd: Union[str, List[str]], timeout: Optional[int] = None, **kwargs): + """Create instance attributes with None for defaults as needed and check pipe arguments in kwargs.""" self.cmd: Union[str, List[str]] = cmd self.timeout: Optional[int] = timeout self.kwargs: Mapping[str, Any] = kwargs - self.cleaned: bool = False self.stdout: queue.Queue = queue.Queue() self.stderr: queue.Queue = queue.Queue() - self._stdout: bytes = b"" - self._stderr: bytes = b"" self.attempt: Optional[ProcessRun] = ProcessRun() - - # These set later on - self.start_time: Optional[datetime.datetime] = None self.process: Optional[subprocess.Popen] = None + self.start_time: Optional[datetime.datetime] = None self.end_time: Optional[datetime.datetime] = None - self.threads: Optional[List[threading.Thread]] = None + + self._cleaned: bool = False + self._stdout: bytes = b"" + self._stderr: bytes = b"" + self._threads: Optional[List[threading.Thread]] = None + + self._check_pipes(self.kwargs) @staticmethod def _check_pipes(kwargs): @@ -56,18 +116,19 @@ def _check_pipes(kwargs): kwargs["stdout"] = subprocess.PIPE kwargs["stderr"] = subprocess.PIPE - def _enqueue_line_from_fh(self, fh, queue, store): - if fh is not None: - for line in iter(fh.readline, b""): - queue.put(line) + def _enqueue_line_from_fh(self, file_handler, queue_attr, store): + if file_handler is not None: + for line in iter(file_handler.readline, b""): + queue_attr.put(line) # use this method since running in separate thread setattr(self, store, getattr(self, store) + line) def start(self): - self._check_pipes(self.kwargs) + """Start the subprocess and create threads for capturing output.""" + self.start_time = datetime.datetime.utcnow() - self.process = subprocess.Popen(self.cmd, **self.kwargs) - self.threads = [ + self.process = subprocess.Popen(self.cmd, **self.kwargs) # pylint: disable=R1732 + self._threads = [ threading.Thread( target=self._enqueue_line_from_fh, args=(self.process.stdout, self.stdout, "_stdout"), @@ -80,37 +141,44 @@ def start(self): ), ] - [t.start() for t in self.threads] + for thread in self._threads: + thread.start() def __enter__(self): + """Call start method and return self.""" self.start() return self def cleanup(self): - if not self.cleaned: + """Cleanup the subprocess with a timeout and populate the ProcessRun instance at ``attempt``.""" + if not self._cleaned: if self.timeout is not None: try: self.process.wait(timeout=self.timeout) + self.attempt.hit_timeout = False except subprocess.TimeoutExpired: + self.attempt.hit_timeout = True self.process.kill() self.process.wait() else: self.process.wait() self.end_time = datetime.datetime.utcnow() - [t.join() for t in self.threads] + for thread in self._threads: + thread.join() - self.cleaned = True + self._cleaned = True self.attempt.stdout = self._stdout.decode("utf-8") self.attempt.stderr = self._stderr.decode("utf-8") self.attempt.rc = self.process.returncode self.attempt.time_seconds = (self.end_time - self.start_time).total_seconds() def __exit__(self, *args, **kwargs): + """Call cleanup method on exit of context.""" self.cleanup() -def sample_process( +def get_process_sample( cmd: Union[str, List[str]], logger: logging.Logger, retries: int = 0, @@ -118,12 +186,40 @@ def sample_process( timeout: Optional[int] = None, **kwargs, ) -> ProcessSample: - """Run the given command as a subprocess within a shell""" + """ + Run the given command as a subprocess, retrying if the command fails. - logger.debug(f"Running command with timeout of {timeout}: {cmd}") + Essentially just a wrapper around :py:class:`~snafu.process.LiveProcess` that will retry running a + subprocess if it fails, returning a :py:class:`~snafu.process.ProcessSample` detailing the results. + + This function expects a logger because it is expected that it will be used by benchmarks, which should + be logging their progress anyways. + + Parameters + ---------- + cmd : str or list of str + Command to run. Can be string or list of strings if using :py:mod:`shlex` + logger : logging.Logger + Logger to use in order to log progress. + retries : int + Number of retries to perform. Defaults to zero, which means that the function will run the process + once, not retrying on failure. + expected_rc : int + Expected return code of the process. Will be used to determine if the process ran successfully or not. + timeout : int + Time in seconds to wait for process to complete before killing it. + kwargs + Extra kwargs will be passed to :py:class:`~snafu.process.LiveProcess` + + Returns + ------- + ProcessSample + """ + + logger.debug(f"Running command: {cmd}") logger.debug(f"Using args: {kwargs}") - result = ProcessSample(expected_rc=expected_rc) + result = ProcessSample(expected_rc=expected_rc, timeout=timeout) tries: int = 0 tries_plural: str = "" @@ -131,21 +227,20 @@ def sample_process( tries += 1 logger.debug(f"On try {tries}") - with LiveProcess(cmd, timeout, **kwargs) as lp: - lp.cleanup() - attempt: ProcessRun = lp.attempt + with LiveProcess(cmd, timeout=timeout, **kwargs) as proc: + proc.cleanup() + attempt: ProcessRun = proc.attempt logger.debug(f"Finished running. Got attempt: {attempt}") logger.debug(f"Got return code {attempt.rc}, expected {expected_rc}") - if attempt.rc == expected_rc: + if attempt.rc != expected_rc: + logger.warning(f"Got bad return code from command: {cmd}.") + result.failed.append(attempt) + else: logger.debug(f"Command finished with {tries} attempt{tries_plural}: {cmd}") result.successful = attempt result.success = True break - else: - logger.warning(f"Got bad return code from command: {cmd}.") - result.failed.append(attempt) - tries_plural = "s" else: # If we hit retry limit, we go here @@ -156,4 +251,32 @@ def sample_process( return result -# TODO: Add a sampler +def sample_process( + cmd: Union[str, List[str]], + logger: logging.Logger, + retries: int = 0, + expected_rc: int = 0, + timeout: Optional[int] = None, + num_samples: int = 1, + **kwargs, +) -> Iterable[ProcessSample]: + """Yield multiple samples of the given command.""" + + _plural = "s" if num_samples > 1 else "" + logger.info(f"Collecting {num_samples} sample{_plural} of command {cmd}") + for sample_num in range(1, num_samples + 1): + logger.debug(f"Starting sample {sample_num}") + sample: ProcessSample = get_process_sample( + cmd, logger, retries=retries, expected_rc=expected_rc, timeout=timeout, **kwargs + ) + logger.debug(f"Got sample for command {cmd}: {sample}") + + if not sample.success: + logger.warning(f"Sample {sample_num} has failed state for command {cmd}") + else: + logger.debug(f"Sample {sample_num} has success state for command {cmd}") + + yield sample + logger.debug(f"Collected sample {sample_num} for command {cmd}") + + logger.info(f"Finished collecting {num_samples} sample{_plural} for command {cmd}") diff --git a/tests/unit/test_config.py b/tests/unit/test_config.py new file mode 100644 index 00000000..b21907ee --- /dev/null +++ b/tests/unit/test_config.py @@ -0,0 +1,133 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +"""Test functionality in the config module.""" +import os +import stat + +import configargparse + +import snafu.config + + +def test_check_file_returns_bool_as_expected(tmpdir): + """Test that the check_file function will return ``True`` and ``False`` when it is supposed to.""" + + tmpdir.chmod(stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC) + test_file = tmpdir.join("testfile.txt") + test_file.write("some content") + test_file_path = test_file.realpath() + assert test_file.check() + + test_perms = ( + (os.R_OK, stat.S_IREAD), + (os.R_OK | os.W_OK, stat.S_IREAD | stat.S_IWRITE), + (os.R_OK | os.W_OK | os.EX_OK, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC), + ) + + with tmpdir.as_cwd(): + for check, perm in test_perms: + test_file.chmod(0) + assert snafu.config.check_file(test_file_path, perms=check) is False + test_file.chmod(perm) + assert snafu.config.check_file(test_file_path, perms=check) is True + + +def test_none_or_type_function(): + """Test that the none_or_type function returns function that casts values only when they aren't None.""" + + tests = ( + (int, 1), + (int, "1"), + (str, "hey"), + (str, 2), + (dict, (("a", 1), ("b", 2))), + ) + + for expected_type, value in tests: + wrapped = snafu.config.none_or_type(expected_type) + assert wrapped(None) is None + assert wrapped(value) == expected_type(value) + assert isinstance(wrapped(value), expected_type) + + +class TestConfig: + """Test functionality of Config class.""" + + test_args = ( + snafu.config.ConfigArgument("param1", type=int), + snafu.config.ConfigArgument("param2", type=str), + snafu.config.ConfigArgument("--param3", type=str, env_var="param3"), + snafu.config.ConfigArgument("--param4", type=int, env_var="p4"), + ) + test_input = ["1234", "my_string", "--param3", "1234", "--param4", "4321"] + + @staticmethod + def verify_args(config: snafu.config.Config): + """Verify that config contains expected args from cls.test_args and cls.test_input.""" + + assert config.param1 == 1234 + assert config.param2 == "my_string" + assert config.param3 == "1234" + assert config.param4 == 4321 + + @staticmethod + def get_config_instance() -> snafu.config.Config: + """Return a new Config instance with tool_name set to 'TEST'.""" + try: + configargparse.init_argument_parser() + except ValueError: + del configargparse._parsers["default"] # pylint: disable=W0212 + configargparse.init_argument_parser() + return snafu.config.Config("TEST") + + def test_init_creates_argparser(self): + """ + Test that init creates an instance of ``configargparse.ArgParser``. + + If this changes, then this test suite and benchmark options will need to be updated, as it's + expected that arguments to ``add_argument`` will change. + """ + + config = self.get_config_instance() + assert isinstance(config.parser, configargparse.ArgumentParser) + + def test_can_add_args_and_parse_using_add_argument_method(self): + """Test that we can populate a parser and parse args using the ``add_argument`` method.""" + + config = self.get_config_instance() + for arg in self.test_args: + config.add_argument(*arg.args, **arg.kwargs) + config.parse_args(self.test_input) + self.verify_args(config) + + def test_can_add_and_parse_using_populate_parser_method(self): + """Test that we can populate a parser and parse args using the ``populate_parser`` method.""" + + config = self.get_config_instance() + config.populate_parser(self.test_args) + config.parse_args(self.test_input) + self.verify_args(config) + + def test_can_get_env_param_mapping(self): + """Test that env_to_params attribute becomes populated appropriately as we add arguments.""" + + config = self.get_config_instance() + assert config.env_to_params == {} + config.populate_parser(self.test_args) + assert config.env_to_params == {"param3": "param3", "p4": "param4"} + + def test_get_env_returns_correct_env_mappings(self): + """Tests that the env_to_params attribute will return the correct env variable mappings.""" + + config = self.get_config_instance() + config.populate_parser(self.test_args) + assert config.get_env() == dict(os.environ) + + config.parse_args(self.test_input) + self.verify_args(config) + + env = config.get_env() + assert env["param3"] == str(config.param3) + assert env["p4"] == str(config.param4) + for key, val in os.environ.items(): + assert env[key] == val diff --git a/tests/unit/test_process.py b/tests/unit/test_process.py new file mode 100644 index 00000000..72885314 --- /dev/null +++ b/tests/unit/test_process.py @@ -0,0 +1,197 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +"""Test functionality in the process module.""" +import logging +import shlex +import subprocess + +import pytest + +import snafu.process + + +class TestLiveProcess: + """Test the LiveProcess context manager.""" + + @staticmethod + def test_check_pipes_method_only_modifies_if_no_user_given_values(): + """Test that LiveProcess._check_pipes only modifies pipes in kwargs if user didn't specify pipes.""" + + for key in ("stdout", "stderr", "capture_output"): + kwargs = {key: True} + proc = snafu.process.LiveProcess("", **kwargs) + assert proc.kwargs == {key: True} + + kwargs = {} + proc = snafu.process.LiveProcess("") + assert proc.kwargs.get("stdout", None) is not None and proc.kwargs.get("stderr", None) is not None + + @staticmethod + def test_live_process_calls_start_on_enter_and_cleanup_on_exit(monkeypatch): + """Test that when we enter the LiveProcess CM we call start, and on exit we call cleanup.""" + + def monkey_start(self): + self.monkey_start = True + + def monkey_cleanup(self): + self.monkey_cleanup = True + + monkeypatch.setattr("snafu.process.LiveProcess.start", monkey_start) + monkeypatch.setattr("snafu.process.LiveProcess.cleanup", monkey_cleanup) + + with snafu.process.LiveProcess("") as proc: + assert proc.monkey_start is True + assert proc.monkey_cleanup is True + + @staticmethod + def test_live_process_runs_a_command_and_gives_output(): + """Test that LiveProcess can run a process and give us the expected process information.""" + + tests = ( + ( + {"cmd": shlex.split("echo test")}, + {"stdout": "test\n", "stderr": "", "rc": 0, "time_seconds": [0, 0.2]}, + ), + ( + {"cmd": "echo 'test'; sleep 0.5; echo 'test2'", "shell": True}, + {"stdout": "test\ntest2\n", "stderr": "", "rc": 0, "time_seconds": [0.5, 1]}, + ), + ( + {"cmd": "echo 'test' >&2 | grep 'not here'", "shell": True}, + {"stdout": "", "stderr": "test\n", "rc": 1, "time_seconds": [0, 0.2]}, + ), + ( + {"cmd": "echo 'test' >&2", "shell": True, "stderr": subprocess.STDOUT}, + {"stdout": "", "stderr": "", "rc": 0, "time_seconds": [0, 0.2]}, + ), + ( + { + "cmd": "echo 'test' >&2", + "shell": True, + "stdout": subprocess.PIPE, + "stderr": subprocess.STDOUT, + }, + {"stdout": "test\n", "stderr": "", "rc": 0, "time_seconds": [0, 0.2]}, + ), + ) + + for kwargs, results in tests: + with snafu.process.LiveProcess(**kwargs) as proc: + pass + + attempt = proc.attempt + for key, val in results.items(): + if key == "time_seconds": + assert val[0] < attempt.time_seconds < val[1] + else: + assert getattr(attempt, key) == val + + @staticmethod + def test_live_process_kills_and_does_cleanup_after_timeout(): + """Test that LiveProcess will only kill a process after a timeout.""" + + with snafu.process.LiveProcess(shlex.split("sleep 0.5"), timeout=1) as proc: + pass + assert 0 < proc.attempt.time_seconds < 1 + assert proc.attempt.hit_timeout is False + + with snafu.process.LiveProcess(shlex.split("sleep 2"), timeout=0.5) as proc: + pass + assert 0 < proc.attempt.time_seconds < 1 + assert proc.attempt.hit_timeout is True + + +def test_get_process_sample_will_use_live_process(monkeypatch): + """Assert that get_process_sample will use LiveProcess in the background.""" + + class MyError(Exception): # pylint: disable=C0115 + pass + + def live_process_monkey(*args, **kwargs): + raise MyError + + monkeypatch.setattr("snafu.process.LiveProcess", live_process_monkey) + with pytest.raises(MyError): + snafu.process.get_process_sample("TEST_USES_LIVE_PROCESS", logging.getLogger()) + + +def test_get_process_sample_will_rerun_failed_process(tmpdir): + """ + Test that get_process_sample will rerun a failed process successfully. + + For this test, we'll run the following command three times, expecting it to succeed on the last run: + ``echo -n "a" >> testfile.txt ; grep "aaa" testfile.txt``. + """ + + test_file = tmpdir.join("testfile.txt") + test_file_path = test_file.realpath() + cmd = f'echo -n "a" >> {test_file_path} ; grep "aaa" {test_file_path}' + + result: snafu.process.ProcessSample = snafu.process.get_process_sample( + cmd, logging.getLogger(), shell=True, retries=2, expected_rc=0 + ) + + assert result.success is True + assert result.expected_rc == 0 + assert result.attempts == 3 + assert result.timeout is None + assert not any(failed.rc == 0 for failed in result.failed) + assert result.successful.rc == 0 + assert result.successful.stdout == "aaa\n" + + +def test_get_process_sample_sets_failed_if_no_tries_succeed(): + """Test that get_process_sample will set the "success" attribute to False if no tries are successful.""" + + result: snafu.process.ProcessSample = snafu.process.get_process_sample( + shlex.split("test 1 == 0"), logging.getLogger(), retries=0, expected_rc=0 + ) + assert result.success is False + + +def test_sample_process_uses_get_process_sample(monkeypatch): + """Test that the sample_process function uses get_process_sample in the background.""" + + class MyError(Exception): # pylint: disable=C0115 + pass + + def monkeypatch_get_process_sample(*args, **kwargs): + raise MyError + + monkeypatch.setattr("snafu.process.get_process_sample", monkeypatch_get_process_sample) + with pytest.raises(MyError): + # need to convert to list since sample_process yields + list(snafu.process.sample_process("TEST_SAMPLE_PROCESS", logging.getLogger())) + + +def test_sample_process_yields_appropriate_number_of_samples(tmpdir): + """ + Test that sample_process will yield the expected number of ProcessSample instances. + + Will use the same test methodology as test_get_process_sample_will_rerun_failed_process. + """ + + test_file = tmpdir.join("testfile.txt") + test_file_path = test_file.realpath() + cmd = f'echo -n "a" >> {test_file_path} ; grep "aaa" {test_file_path}' + + samples = snafu.process.sample_process( + cmd, logging.getLogger(), shell=True, retries=0, expected_rc=0, num_samples=3, timeout=10 + ) + for i, sample in enumerate(samples): + if i == 2: + assert sample.success is True + assert sample.expected_rc == 0 + assert sample.attempts == 1 + assert sample.timeout == 10 + assert len(sample.failed) == 0 + assert sample.successful.hit_timeout is False + assert sample.successful.rc == 0 + assert sample.successful.stdout == "aaa\n" + else: + assert sample.success is False + assert sample.expected_rc == 0 + assert sample.attempts == 1 + assert sample.timeout == 10 + assert len(sample.failed) == 1 + assert sample.failed[0].rc == 1 diff --git a/tox.ini b/tox.ini new file mode 100644 index 00000000..d3889347 --- /dev/null +++ b/tox.ini @@ -0,0 +1,28 @@ +[tox] +envlist = + py{36,37,38,39}{-unit,-docs} +skip_missing_interpreters = true + +[testenv:py{36,37,38,39}-unit] +extras = + tests +setenv = + py{36,37,38,39}-unit: COVERAGE_FILE = .coverage.{envname} +commands = + pytest --ignore=tests/functional --cov-config=.coveragerc --cov=snafu --cov-report=term-missing:skip-covered --doctest-modules + +[testenv:coverage] +skip_install = true +deps = coverage +commands = + coverage combine + coverage xml + +[testenv:py{36,37,38,39}-docs] +extras = + docs +allowlist_externals = + sphinx-build +commands = + sphinx-build -d "{toxworkdir}/docs_doctree" docs/source "{toxworkdir}/docs_out" --color -W -bhtml + sphinx-build -d "{toxworkdir}/docs_doctree" docs/source "{toxworkdir}/docs_out/linkchecks" --color -W -blinkcheck