diff --git a/.github/workflows/run-tests.yml b/.github/workflows/run-tests.yml index 08dc078..46e8490 100644 --- a/.github/workflows/run-tests.yml +++ b/.github/workflows/run-tests.yml @@ -112,11 +112,11 @@ jobs: shell: bash command: | . ~/.profile - pytest spyder_unittest -x -vv + pytest spyder_unittest -vv - name: Run tests (Windows) if: matrix.OS == 'windows' uses: nick-fields/retry@v2 with: timeout_minutes: 10 max_attempts: 3 - command: pytest spyder_unittest -x -vv + command: pytest spyder_unittest -vv diff --git a/spyder_unittest/backend/abbreviator.py b/spyder_unittest/backend/abbreviator.py index d92f4d7..e2f3784 100644 --- a/spyder_unittest/backend/abbreviator.py +++ b/spyder_unittest/backend/abbreviator.py @@ -5,6 +5,23 @@ # (see LICENSE.txt for details) """Class for abbreviating test names.""" +from __future__ import annotations + +# Standard imports +from dataclasses import dataclass + +@dataclass +class Abbreviation: + """ + Abbreviation for one component of a test name. + + Abbreviations are defined recursively, so `.head` is the abbreviation + for the first component and `.tail` specifies the abbreviations for the + second and later components. + """ + head: str + tail: Abbreviator + class Abbreviator: """ @@ -26,7 +43,7 @@ class Abbreviator: the higher-level components as its second element. """ - def __init__(self, names=[]): + def __init__(self, names: list[str]=[]) -> None: """ Constructor. @@ -35,11 +52,11 @@ def __init__(self, names=[]): names : list of str list of words which needs to be abbreviated. """ - self.dic = {} + self.dic: dict[str, Abbreviation] = {} for name in names: self.add(name) - def add(self, name): + def add(self, name: str) -> None: """ Add name to list of names to be abbreviated. @@ -61,18 +78,18 @@ def add(self, name): and len_abbrev < len(other)): len_abbrev += 1 if len_abbrev == len(start): - self.dic[other][0] = other[:len_abbrev + 1] + self.dic[other].head = other[:len_abbrev + 1] elif len_abbrev == len(other): - self.dic[other][0] = other + self.dic[other].head = other len_abbrev += 1 else: - if len(self.dic[other][0]) < len_abbrev: - self.dic[other][0] = other[:len_abbrev] + if len(self.dic[other].head) < len_abbrev: + self.dic[other].head = other[:len_abbrev] else: - self.dic[start] = [start[:len_abbrev], Abbreviator()] - self.dic[start][1].add(rest) + self.dic[start] = Abbreviation(start[:len_abbrev], Abbreviator()) + self.dic[start].tail.add(rest) - def abbreviate(self, name): + def abbreviate(self, name: str) -> str: """Return abbreviation of name.""" if '[' in name: name, parameters = name.split('[', 1) @@ -81,8 +98,8 @@ def abbreviate(self, name): parameters = '' if '.' in name: start, rest = name.split('.', 1) - res = (self.dic[start][0] - + '.' + self.dic[start][1].abbreviate(rest)) + res = (self.dic[start].head + + '.' + self.dic[start].tail.abbreviate(rest)) else: res = name return res + parameters diff --git a/spyder_unittest/backend/frameworkregistry.py b/spyder_unittest/backend/frameworkregistry.py index bb2f5d3..b9b0e0a 100644 --- a/spyder_unittest/backend/frameworkregistry.py +++ b/spyder_unittest/backend/frameworkregistry.py @@ -5,6 +5,16 @@ # (see LICENSE.txt for details) """Keep track of testing frameworks and create test runners when requested.""" +from __future__ import annotations + +# Standard imports +from typing import Optional, TYPE_CHECKING + +# Local imports +if TYPE_CHECKING: + from spyder_unittest.backend.runnerbase import RunnerBase + from spyder_unittest.widgets.unittestgui import UnitTestWidget + class FrameworkRegistry(): """ @@ -24,21 +34,22 @@ class FrameworkRegistry(): associated runners. """ - def __init__(self): + def __init__(self) -> None: """Initialize self.""" - self.frameworks = {} + self.frameworks: dict[str, type[RunnerBase]] = {} - def register(self, runner_class): + def register(self, runner_class: type[RunnerBase]) -> None: """Register runner class for a testing framework. Parameters ---------- - runner_class : type + runner_class Class used for creating tests runners for the framework. """ self.frameworks[runner_class.name] = runner_class - def create_runner(self, framework, widget, tempfilename): + def create_runner(self, framework: str, widget: UnitTestWidget, + tempfilename: Optional[str]) -> RunnerBase: """Create test runner associated to some testing framework. This creates an instance of the runner class whose `name` attribute @@ -46,11 +57,11 @@ def create_runner(self, framework, widget, tempfilename): Parameters ---------- - framework : str + framework Name of testing framework. - widget : UnitTestWidget + widget Unit test widget which constructs the test runner. - resultfilename : str or None + resultfilename Name of file in which to store test results. If None, use default. Returns diff --git a/spyder_unittest/backend/nose2runner.py b/spyder_unittest/backend/nose2runner.py index 81efb8f..87ff2dc 100644 --- a/spyder_unittest/backend/nose2runner.py +++ b/spyder_unittest/backend/nose2runner.py @@ -5,12 +5,19 @@ # (see LICENSE.txt for details) """Support for Nose framework.""" +from __future__ import annotations + +# Standard library imports +from typing import Optional, TYPE_CHECKING + # Third party imports from lxml import etree from spyder.config.base import get_translation # Local imports from spyder_unittest.backend.runnerbase import Category, RunnerBase, TestResult +if TYPE_CHECKING: + from spyder_unittest.widgets.configdialog import Config try: _ = get_translation('spyder_unittest') @@ -25,22 +32,26 @@ class Nose2Runner(RunnerBase): module = 'nose2' name = 'nose2' - def create_argument_list(self, config, cov_path): + def create_argument_list(self, config: Config, + cov_path: Optional[str], + single_test: Optional[str]) -> list[str]: """Create argument list for testing process.""" arguments = [ '-m', self.module, '--plugin=nose2.plugins.junitxml', '--junit-xml', '--junit-xml-path={}'.format(self.resultfilename) ] + if single_test: + arguments.append(single_test) arguments += config.args return arguments - def finished(self): + def finished(self, exitcode: int) -> None: """Called when the unit test process has finished.""" output = self.read_all_process_output() testresults = self.load_data() self.sig_finished.emit(testresults, output, True) - def load_data(self): + def load_data(self) -> list[TestResult]: """ Read and parse unit test results. @@ -56,7 +67,7 @@ def load_data(self): try: data = etree.parse(self.resultfilename).getroot() except OSError: - data = [] + return [] testresults = [] for testcase in data: diff --git a/spyder_unittest/backend/pytestrunner.py b/spyder_unittest/backend/pytestrunner.py index 641ce2a..b15128d 100644 --- a/spyder_unittest/backend/pytestrunner.py +++ b/spyder_unittest/backend/pytestrunner.py @@ -5,16 +5,21 @@ # (see LICENSE.txt for details) """Support for pytest framework.""" +from __future__ import annotations + # Standard library imports import os import os.path as osp import re +from typing import Any, Optional, TYPE_CHECKING # Local imports from spyder.config.base import get_translation from spyder_unittest.backend.runnerbase import (Category, RunnerBase, TestResult, COV_TEST_NAME) from spyder_unittest.backend.zmqreader import ZmqStreamReader +if TYPE_CHECKING: + from spyder_unittest.widgets.configdialog import Config try: _ = get_translation('spyder_unittest') @@ -29,30 +34,36 @@ class PyTestRunner(RunnerBase): module = 'pytest' name = 'pytest' - def create_argument_list(self, config, cov_path): + def create_argument_list(self, config: Config, + cov_path: Optional[str], + single_test: Optional[str]) -> list[str]: """Create argument list for testing process.""" dirname = os.path.dirname(__file__) pyfile = os.path.join(dirname, 'workers', 'pytestworker.py') arguments = [pyfile, str(self.reader.port)] if config.coverage: arguments += [f'--cov={cov_path}', '--cov-report=term-missing'] + if single_test: + arguments.append(self.convert_testname_to_nodeid(single_test)) arguments += config.args return arguments - def start(self, config, cov_path, executable, pythonpath): + def start(self, config: Config, cov_path: Optional[str], + executable: str, pythonpath: list[str], + single_test: Optional[str]) -> None: """Start process which will run the unit test suite.""" self.config = config self.reader = ZmqStreamReader() self.reader.sig_received.connect(self.process_output) - RunnerBase.start(self, config, cov_path, executable, pythonpath) + super().start(config, cov_path, executable, pythonpath, single_test) - def process_output(self, output): + def process_output(self, output: list[dict[str, Any]]) -> None: """ Process output of test process. Parameters ---------- - output : list + output list of decoded Python object sent by test process. """ collected_list = [] @@ -63,15 +74,16 @@ def process_output(self, output): if result_item['event'] == 'config': self.rootdir = result_item['rootdir'] elif result_item['event'] == 'collected': - testname = convert_nodeid_to_testname(result_item['nodeid']) - collected_list.append(testname) + name = self.convert_nodeid_to_testname(result_item['nodeid']) + collected_list.append(name) elif result_item['event'] == 'collecterror': - tupl = logreport_collecterror_to_tuple(result_item) + tupl = self.logreport_collecterror_to_tuple(result_item) collecterror_list.append(tupl) elif result_item['event'] == 'starttest': - starttest_list.append(logreport_starttest_to_str(result_item)) + name = self.logreport_starttest_to_str(result_item) + starttest_list.append(name) elif result_item['event'] == 'logreport': - testresult = logreport_to_testresult(result_item, self.rootdir) + testresult = self.logreport_to_testresult(result_item) result_list.append(testresult) if collected_list: @@ -83,7 +95,7 @@ def process_output(self, output): if result_list: self.sig_testresult.emit(result_list) - def process_coverage(self, output): + def process_coverage(self, output: str) -> None: """Search the output text for coverage details. Called by the function 'finished' at the very end. @@ -107,8 +119,11 @@ def process_coverage(self, output): for row in re.findall( r'^((.*?\.py) .*?(\d+%).*?(\d[\d\,\-\ ]*)?)$', cov_results.group(0), flags=re.M): - lineno = (int(re.search(r'^(\d*)', row[3]).group(1)) - 1 - if row[3] else None) + lineno: Optional[int] = None + if row[3]: + match = re.search(r'^(\d*)', row[3]) + if match: + lineno = int(match.group(1)) - 1 file_cov = TestResult( Category.COVERAGE, row[2], row[1], message=_('Missing: {}').format(row[3] if row[3] else _("(none)")), @@ -117,7 +132,7 @@ def process_coverage(self, output): self.sig_collected.emit([row[1]]) self.sig_testresult.emit([file_cov]) - def finished(self, exitcode): + def finished(self, exitcode: int) -> None: """ Called when the unit test process has finished. @@ -125,7 +140,7 @@ def finished(self, exitcode): Parameters ---------- - exitcode : int + exitcode Exit code of the test process. """ self.reader.close() @@ -137,56 +152,78 @@ def finished(self, exitcode): # 2 = interrupted, 5 = no tests collected self.sig_finished.emit([], output, normal_exit) + def normalize_module_name(self, name: str) -> str: + """ + Convert module name reported by pytest to Python conventions. + + This function strips the .py suffix and replaces '/' by '.', so that + 'ham/spam.py' becomes 'ham.spam'. -def normalize_module_name(name): - """ - Convert module name reported by pytest to Python conventions. - - This function strips the .py suffix and replaces '/' by '.', so that - 'ham/spam.py' becomes 'ham.spam'. - """ - if name.endswith('.py'): - name = name[:-3] - return name.replace('/', '.') - - -def convert_nodeid_to_testname(nodeid): - """Convert a nodeid to a test name.""" - module, name = nodeid.split('::', 1) - module = normalize_module_name(module) - return '{}.{}'.format(module, name) - - -def logreport_collecterror_to_tuple(report): - """Convert a 'collecterror' logreport to a (str, str) tuple.""" - module = normalize_module_name(report['nodeid']) - return (module, report['longrepr']) - - -def logreport_starttest_to_str(report): - """Convert a 'starttest' logreport to a str.""" - return convert_nodeid_to_testname(report['nodeid']) - - -def logreport_to_testresult(report, rootdir): - """Convert a logreport sent by test process to a TestResult.""" - status = report['outcome'] - if report['outcome'] in ('failed', 'xpassed') or report['witherror']: - cat = Category.FAIL - elif report['outcome'] in ('passed', 'xfailed'): - cat = Category.OK - else: - cat = Category.SKIP - testname = convert_nodeid_to_testname(report['nodeid']) - message = report.get('message', '') - extra_text = report.get('longrepr', '') - if 'sections' in report: - if extra_text: - extra_text += '\n' - for (heading, text) in report['sections']: - extra_text += '----- {} -----\n{}'.format(heading, text) - filename = osp.join(rootdir, report['filename']) - result = TestResult(cat, status, testname, message=message, - time=report['duration'], extra_text=extra_text, - filename=filename, lineno=report['lineno']) - return result + The result is relative to the directory from which tests are run and + not the pytest root dir. + """ + wdir = osp.realpath(self.config.wdir) + if wdir != self.rootdir: + abspath = osp.join(self.rootdir, name) + try: + name = osp.relpath(abspath, start=wdir) + except ValueError: + # Happens on Windows if paths are on different drives + pass + + if name.endswith('.py'): + name = name[:-3] + return name.replace(osp.sep, '.') + + def convert_nodeid_to_testname(self, nodeid: str) -> str: + """Convert a nodeid to a test name.""" + module, name = nodeid.split('::', 1) + module = self.normalize_module_name(module) + return '{}.{}'.format(module, name) + + def convert_testname_to_nodeid(self, testname: str) -> str: + """ + Convert a test name to a nodeid relative to wdir. + + A true nodeid is relative to the pytest root dir. The return value of + this function is like a nodeid but relative to the wdir (i.e., the + directory from which test are run). This is the format that pytest + expects when running single tests. + """ + *path_parts, last_part = testname.split('.') + path_parts[-1] += '.py' + nodeid = osp.join(*path_parts) + '::' + last_part + return nodeid + + def logreport_collecterror_to_tuple( + self, report: dict[str, Any]) -> tuple[str, str]: + """Convert a 'collecterror' logreport to a (str, str) tuple.""" + module = self.normalize_module_name(report['nodeid']) + return (module, report['longrepr']) + + def logreport_starttest_to_str(self, report: dict[str, Any]) -> str: + """Convert a 'starttest' logreport to a str.""" + return self.convert_nodeid_to_testname(report['nodeid']) + + def logreport_to_testresult(self, report: dict[str, Any]) -> TestResult: + """Convert a logreport sent by test process to a TestResult.""" + status = report['outcome'] + if report['outcome'] in ('failed', 'xpassed') or report['witherror']: + cat = Category.FAIL + elif report['outcome'] in ('passed', 'xfailed'): + cat = Category.OK + else: + cat = Category.SKIP + testname = self.convert_nodeid_to_testname(report['nodeid']) + message = report.get('message', '') + extra_text = report.get('longrepr', '') + if 'sections' in report: + if extra_text: + extra_text += '\n' + for (heading, text) in report['sections']: + extra_text += '----- {} -----\n{}'.format(heading, text) + filename = osp.join(self.rootdir, report['filename']) + result = TestResult(cat, status, testname, message=message, + time=report['duration'], extra_text=extra_text, + filename=filename, lineno=report['lineno']) + return result diff --git a/spyder_unittest/backend/runnerbase.py b/spyder_unittest/backend/runnerbase.py index 58267ed..38947bd 100644 --- a/spyder_unittest/backend/runnerbase.py +++ b/spyder_unittest/backend/runnerbase.py @@ -5,23 +5,34 @@ # (see LICENSE.txt for details) """Classes for running tests within various frameworks.""" +from __future__ import annotations + # Standard library imports +from enum import IntEnum +import logging import os -import sys import tempfile +from typing import ClassVar, Optional, TYPE_CHECKING # Third party imports -from importlib.util import find_spec as find_spec_or_loader -from qtpy.QtCore import (QObject, QProcess, QProcessEnvironment, QTextCodec, - Signal) +from qtpy.QtCore import ( + QObject, QProcess, QProcessEnvironment, QTextCodec, Signal) + +# Local imports +if TYPE_CHECKING: + from spyder_unittest.widgets.configdialog import Config + from spyder_unittest.widgets.unittestgui import UnitTestWidget +# Logging +logger = logging.getLogger(__name__) + # if generating coverage report, use this name for the TestResult # it's here in case we can get coverage results from unittest too COV_TEST_NAME = 'Total Test Coverage' -class Category: +class Category(IntEnum): """Enum type representing category of test result.""" FAIL = 1 @@ -36,21 +47,12 @@ class TestResult: __test__ = False # this is not a pytest test class - def __init__(self, category, status, name, message='', time=None, - extra_text='', filename=None, lineno=None): + def __init__(self, category: Category, status: str, name: str, + message: str = '', time: Optional[float] = None, + extra_text: str = '', filename: Optional[str] = None, + lineno: Optional[int] = None): """ Construct a test result. - - Parameters - ---------- - category : Category - status : str - name : str - message : str - time : float or None - extra_text : str - filename : str or None - lineno : int or None """ self.category = category self.status = status @@ -65,8 +67,10 @@ def __init__(self, category, status, name, message='', time=None, self.filename = filename self.lineno = lineno - def __eq__(self, other): + def __eq__(self, other: object) -> bool: """Test for equality.""" + if not isinstance(other, TestResult): + return NotImplemented return self.__dict__ == other.__dict__ @@ -75,7 +79,7 @@ class RunnerBase(QObject): Base class for running tests with a framework that uses JUnit XML. This is an abstract class, meant to be subclassed before being used. - Concrete subclasses should define executable and create_argument_list(), + Concrete subclasses should define create_argument_list() and finished(). All communication back to the caller is done via signals. @@ -90,9 +94,6 @@ class RunnerBase(QObject): Process running the unit test suite. resultfilename : str Name of file in which test results are stored. - executable : str - Path to Python executable used for test. This is required - by the UnittestRunner subclass. Signals ------- @@ -113,6 +114,9 @@ class RunnerBase(QObject): Emitted when test process is being stopped. """ + module: ClassVar[str] + name: ClassVar[str] + sig_collected = Signal(object) sig_collecterror = Signal(object) sig_starttest = Signal(object) @@ -120,7 +124,8 @@ class RunnerBase(QObject): sig_finished = Signal(object, str, bool) sig_stop = Signal() - def __init__(self, widget, resultfilename=None): + def __init__(self, widget: UnitTestWidget, + resultfilename: Optional[str] = None): """ Construct test runner. @@ -132,16 +137,16 @@ def __init__(self, widget, resultfilename=None): Name of file in which to store test results. If None, use default. """ QObject.__init__(self, widget) - self.process = None + self.process: Optional[QProcess] = None if resultfilename is None: self.resultfilename = os.path.join(tempfile.gettempdir(), 'unittest.results') else: self.resultfilename = resultfilename - # Set a sensible default - self.executable = sys.executable - def create_argument_list(self, config, cov_path): + def create_argument_list(self, config: Config, + cov_path: Optional[str], + single_test: Optional[str]) -> list[str]: """ Create argument list for testing process (dummy). @@ -149,7 +154,8 @@ def create_argument_list(self, config, cov_path): """ raise NotImplementedError - def _prepare_process(self, config, pythonpath): + def _prepare_process(self, config: Config, + pythonpath: list[str]) -> QProcess: """ Prepare and return process for running the unit test suite. @@ -161,7 +167,7 @@ def _prepare_process(self, config, pythonpath): process.finished.connect(self.finished) if pythonpath: env = QProcessEnvironment.systemEnvironment() - old_python_path = env.value('PYTHONPATH', None) + old_python_path = env.value('PYTHONPATH', '') python_path_str = os.pathsep.join(pythonpath) if old_python_path: python_path_str += os.pathsep + old_python_path @@ -169,7 +175,9 @@ def _prepare_process(self, config, pythonpath): process.setProcessEnvironment(env) return process - def start(self, config, cov_path, executable, pythonpath): + def start(self, config: Config, cov_path: Optional[str], + executable: str, pythonpath: list[str], + single_test: Optional[str]) -> None: """ Start process which will run the unit test suite. @@ -181,33 +189,36 @@ def start(self, config, cov_path, executable, pythonpath): Parameters ---------- - config : TestConfig + config Unit test configuration. - cov_path : str or None + cov_path Path to filter source for coverage report - executable : str + executable Path to Python executable - pythonpath : list of str + pythonpath List of directories to be added to the Python path + single_test + If None, run all tests; otherwise, it is the name of the only test + to be run. Raises ------ RuntimeError If process failed to start. """ - self.executable = executable self.process = self._prepare_process(config, pythonpath) - p_args = self.create_argument_list(config, cov_path) + p_args = self.create_argument_list(config, cov_path, single_test) try: os.remove(self.resultfilename) except OSError: pass + logger.debug(f'Starting Python process with arguments {p_args}') self.process.start(executable, p_args) running = self.process.waitForStarted() if not running: raise RuntimeError - def finished(self): + def finished(self, exitcode: int) -> None: """ Called when the unit test process has finished. @@ -216,13 +227,14 @@ def finished(self): """ raise NotImplementedError - def read_all_process_output(self): + def read_all_process_output(self) -> str: """Read and return all output from `self.process` as unicode.""" + assert self.process is not None qbytearray = self.process.readAllStandardOutput() locale_codec = QTextCodec.codecForLocale() return locale_codec.toUnicode(qbytearray.data()) - def stop_if_running(self): + def stop_if_running(self) -> None: """Stop testing process if it is running.""" if self.process and self.process.state() == QProcess.Running: self.process.kill() diff --git a/spyder_unittest/backend/tests/test_pytestrunner.py b/spyder_unittest/backend/tests/test_pytestrunner.py index 95862dd..140ffea 100644 --- a/spyder_unittest/backend/tests/test_pytestrunner.py +++ b/spyder_unittest/backend/tests/test_pytestrunner.py @@ -8,20 +8,27 @@ # Standard library imports import os.path as osp import sys -from unittest.mock import Mock +from unittest.mock import Mock, patch # Third party imports import pytest # Local imports -from spyder_unittest.backend.pytestrunner import (PyTestRunner, - logreport_to_testresult) +from spyder_unittest.backend.pytestrunner import PyTestRunner from spyder_unittest.backend.runnerbase import (Category, TestResult, COV_TEST_NAME) from spyder_unittest.widgets.configdialog import Config -def test_pytestrunner_create_argument_list(monkeypatch): +@pytest.fixture +def runner(): + res = PyTestRunner(None) + res.rootdir = 'ham' + res.config = Config(wdir='ham') + return res + + +def test_pytestrunner_create_argument_list(monkeypatch, runner): config = Config(args=['--extra-arg']) cov_path = None MockZMQStreamReader = Mock() @@ -34,7 +41,7 @@ def test_pytestrunner_create_argument_list(monkeypatch): runner.reader = mock_reader monkeypatch.setattr('spyder_unittest.backend.pytestrunner.os.path.dirname', lambda _: 'dir') - arg_list = runner.create_argument_list(config, cov_path) + arg_list = runner.create_argument_list(config, cov_path, None) pyfile, port, *coverage, last = arg_list assert pyfile == osp.join('dir', 'workers', 'pytestworker.py') assert port == '42' @@ -48,24 +55,23 @@ def test_pytestrunner_start(monkeypatch): MockZMQStreamReader) mock_reader = MockZMQStreamReader() - MockRunnerBase = Mock(name='RunnerBase') - monkeypatch.setattr('spyder_unittest.backend.pytestrunner.RunnerBase', - MockRunnerBase) + mock_base_start = Mock() + monkeypatch.setattr('spyder_unittest.backend.unittestrunner.RunnerBase.start', + mock_base_start) runner = PyTestRunner(None, 'results') config = Config() cov_path = None - runner.start(config, cov_path, sys.executable, ['pythondir']) + runner.start(config, cov_path, sys.executable, ['pythondir'], None) assert runner.config is config assert runner.reader is mock_reader runner.reader.sig_received.connect.assert_called_once_with( runner.process_output) - MockRunnerBase.start.assert_called_once_with( - runner, config, cov_path, sys.executable, ['pythondir']) + mock_base_start.assert_called_once_with( + config, cov_path, sys.executable, ['pythondir'], None) -def test_pytestrunner_process_output_with_collected(qtbot): - runner = PyTestRunner(None) +def test_pytestrunner_process_output_with_collected(qtbot, runner): output = [{'event': 'collected', 'nodeid': 'spam.py::ham'}, {'event': 'collected', 'nodeid': 'eggs.py::bacon'}] with qtbot.waitSignal(runner.sig_collected) as blocker: @@ -74,8 +80,7 @@ def test_pytestrunner_process_output_with_collected(qtbot): assert blocker.args == [expected] -def test_pytestrunner_process_output_with_collecterror(qtbot): - runner = PyTestRunner(None) +def test_pytestrunner_process_output_with_collecterror(qtbot, runner): output = [{ 'event': 'collecterror', 'nodeid': 'ham/spam.py', @@ -87,8 +92,7 @@ def test_pytestrunner_process_output_with_collecterror(qtbot): assert blocker.args == [expected] -def test_pytestrunner_process_output_with_starttest(qtbot): - runner = PyTestRunner(None) +def test_pytestrunner_process_output_with_starttest(qtbot, runner): output = [{'event': 'starttest', 'nodeid': 'ham/spam.py::ham'}, {'event': 'starttest', 'nodeid': 'ham/eggs.py::bacon'}] with qtbot.waitSignal(runner.sig_starttest) as blocker: @@ -97,7 +101,7 @@ def test_pytestrunner_process_output_with_starttest(qtbot): assert blocker.args == [expected] -@pytest.mark.parametrize('exitcode, normal_exit', +@pytest.mark.parametrize('exitcode, normal_exit', [(0, True), (1, True), (2, True), (3, False), (4, False), (5, True)]) def test_pytestrunner_finished(qtbot, exitcode, normal_exit): @@ -114,6 +118,39 @@ def test_pytestrunner_finished(qtbot, exitcode, normal_exit): assert blocker.args == [results, output, normal_exit] +@pytest.mark.parametrize('wdir, expected', [ + ('ham', 'spam.eggs'), + (osp.join('ham', 'spam'), 'eggs'), + (osp.join('link-to-ham', 'spam'), 'eggs')]) +def test_normalize_module_name(runner, wdir, expected): + def new_realpath(name): + """Simulate link from `link-to-ham` to `ham`""" + if name.startswith('link-to-ham'): + return name[len('link-to-'):] + else: + return name + + with patch('spyder_unittest.backend.pytestrunner.osp.realpath', + side_effect=new_realpath): + runner.config = Config(wdir=wdir) + result = runner.normalize_module_name(osp.join('spam', 'eggs.py')) + assert result == expected + + +def test_convert_nodeid_to_testname(runner): + nodeid = osp.join('spam', 'eggs.py') + '::test_foo' + testname = 'spam.eggs.test_foo' + result = runner.convert_nodeid_to_testname(nodeid) + assert result == testname + + +def test_convert_testname_to_nodeid(runner): + nodeid = osp.join('spam', 'eggs.py') + '::test_foo' + testname = 'spam.eggs.test_foo' + result = runner.convert_testname_to_nodeid(testname) + assert result == nodeid + + def standard_logreport_output(): return { 'event': 'logreport', @@ -125,9 +162,8 @@ def standard_logreport_output(): 'duration': 42 } -def test_pytestrunner_process_output_with_logreport_passed(qtbot): - runner = PyTestRunner(None) - runner.rootdir = 'ham' + +def test_pytestrunner_process_output_with_logreport_passed(qtbot, runner): output = [standard_logreport_output()] with qtbot.waitSignal(runner.sig_testresult) as blocker: runner.process_output(output) @@ -224,40 +260,39 @@ def test_pytestrunner_process_coverage(qtbot): ('---', True, Category.FAIL) # ('---', False, this is not possible) ]) -def test_logreport_to_testresult_with_outcome_and_possible_error(outcome, - witherror, - category): +def test_logreport_to_testresult_with_outcome_and_possible_error( + runner, outcome, witherror, category): report = standard_logreport_output() report['outcome'] = outcome report['witherror'] = witherror expected = TestResult(category, outcome, 'foo.bar', time=42, filename=osp.join('ham', 'foo.py'), lineno=24) - assert logreport_to_testresult(report, 'ham') == expected + assert runner.logreport_to_testresult(report) == expected -def test_logreport_to_testresult_with_message(): +def test_logreport_to_testresult_with_message(runner): report = standard_logreport_output() report['message'] = 'msg' expected = TestResult(Category.OK, 'passed', 'foo.bar', message='msg', time=42, filename=osp.join('ham', 'foo.py'), lineno=24) - assert logreport_to_testresult(report, 'ham') == expected + assert runner.logreport_to_testresult(report) == expected -def test_logreport_to_testresult_with_extratext(): +def test_logreport_to_testresult_with_extratext(runner): report = standard_logreport_output() report['longrepr'] = 'long msg' expected = TestResult(Category.OK, 'passed', 'foo.bar', time=42, extra_text='long msg', filename=osp.join('ham', 'foo.py'), lineno=24) - assert logreport_to_testresult(report, 'ham') == expected + assert runner.logreport_to_testresult(report) == expected @pytest.mark.parametrize('longrepr,prefix', [ ('', ''), ('msg', '\n') ]) -def test_logreport_to_testresult_with_output(longrepr, prefix): +def test_logreport_to_testresult_with_output(runner, longrepr, prefix): report = standard_logreport_output() report['longrepr'] = longrepr report['sections'] = [['Captured stdout call', 'ham\n'], @@ -268,5 +303,4 @@ def test_logreport_to_testresult_with_output(longrepr, prefix): expected = TestResult(Category.OK, 'passed', 'foo.bar', time=42, extra_text=txt, filename=osp.join('ham', 'foo.py'), lineno=24) - assert logreport_to_testresult(report, 'ham') == expected - + assert runner.logreport_to_testresult(report) == expected diff --git a/spyder_unittest/backend/tests/test_runnerbase.py b/spyder_unittest/backend/tests/test_runnerbase.py index 1f4e547..3e7ee0b 100644 --- a/spyder_unittest/backend/tests/test_runnerbase.py +++ b/spyder_unittest/backend/tests/test_runnerbase.py @@ -25,10 +25,10 @@ class FooRunner(RunnerBase): config = Config(foo_runner.module, 'wdir', True) with pytest.raises(NotImplementedError): - foo_runner.create_argument_list(config, 'cov_path') + foo_runner.create_argument_list(config, 'cov_path', None) with pytest.raises(NotImplementedError): - foo_runner.finished() + foo_runner.finished(0) @pytest.mark.parametrize('pythonpath,env_pythonpath', [ @@ -81,12 +81,12 @@ def test_runnerbase_start(monkeypatch): runner = RunnerBase(None, 'results') runner._prepare_process = lambda c, p: mock_process - runner.create_argument_list = lambda c, cp: ['arg1', 'arg2'] + runner.create_argument_list = lambda c, cp, st: ['arg1', 'arg2'] config = Config('pytest', 'wdir', False) cov_path = None mock_process.waitForStarted = lambda: False with pytest.raises(RuntimeError): - runner.start(config, cov_path, 'python_exec', ['pythondir']) + runner.start(config, cov_path, 'python_exec', ['pythondir'], None) mock_process.start.assert_called_once_with('python_exec', ['arg1', 'arg2']) mock_remove.assert_called_once_with('results') diff --git a/spyder_unittest/backend/tests/test_unittestrunner.py b/spyder_unittest/backend/tests/test_unittestrunner.py index 2cc5fea..3be1323 100644 --- a/spyder_unittest/backend/tests/test_unittestrunner.py +++ b/spyder_unittest/backend/tests/test_unittestrunner.py @@ -34,7 +34,7 @@ def test_unittestrunner_create_argument_list(monkeypatch): 'spyder_unittest.backend.unittestrunner.osp.dirname', lambda _: 'dir') - result = runner.create_argument_list(config, cov_path) + result = runner.create_argument_list(config, cov_path, None) pyfile = osp.join('dir', 'workers', 'unittestworker.py') assert result == [pyfile, '42', '--extra-arg'] @@ -58,14 +58,14 @@ def test_unittestrunner_start(monkeypatch): config = Config() cov_path = None - runner.start(config, cov_path, sys.executable, ['pythondir']) + runner.start(config, cov_path, sys.executable, ['pythondir'], None) assert runner.config is config assert runner.reader is mock_reader runner.reader.sig_received.connect.assert_called_once_with( runner.process_output) mock_base_start.assert_called_once_with( - config, cov_path, sys.executable, ['pythondir']) + config, cov_path, sys.executable, ['pythondir'], None) def test_unittestrunner_process_output_with_collected(qtbot): diff --git a/spyder_unittest/backend/unittestrunner.py b/spyder_unittest/backend/unittestrunner.py index e47d86f..ca8f1c2 100644 --- a/spyder_unittest/backend/unittestrunner.py +++ b/spyder_unittest/backend/unittestrunner.py @@ -9,6 +9,7 @@ # Standard library imports import os.path as osp +from typing import Any, Optional # Local imports from spyder_unittest.widgets.configdialog import Config @@ -22,23 +23,28 @@ class UnittestRunner(RunnerBase): module = 'unittest' name = 'unittest' - def create_argument_list(self, config: Config, cov_path: str) -> list[str]: + def create_argument_list(self, config: Config, + cov_path: Optional[str], + single_test: Optional[str]) -> list[str]: """Create argument list for testing process.""" dirname = osp.dirname(__file__) pyfile = osp.join(dirname, 'workers', 'unittestworker.py') arguments = [pyfile, str(self.reader.port)] + if single_test: + arguments.append(single_test) arguments += config.args return arguments - def start(self, config: Config, cov_path: str, executable: str, - pythonpath: str) -> None: + def start(self, config: Config, cov_path: Optional[str], + executable: str, pythonpath: list[str], + single_test: Optional[str]) -> None: """Start process which will run the unit test suite.""" self.config = config self.reader = ZmqStreamReader() self.reader.sig_received.connect(self.process_output) - super().start(config, cov_path, executable, pythonpath) + super().start(config, cov_path, executable, pythonpath, single_test) - def finished(self) -> None: + def finished(self, exitcode: int) -> None: """ Called when the unit test process has finished. @@ -48,7 +54,7 @@ def finished(self) -> None: output = self.read_all_process_output() self.sig_finished.emit([], output, True) - def process_output(self, output: list[dict]) -> None: + def process_output(self, output: list[dict[str, Any]]) -> None: """ Process output of test process. @@ -78,7 +84,7 @@ def process_output(self, output: list[dict]) -> None: self.sig_testresult.emit(result_list) -def add_event_to_testresult(event: dict) -> TestResult: +def add_event_to_testresult(event: dict[str, Any]) -> TestResult: """Convert an addXXX event sent by test process to a TestResult.""" status = event['event'][3].lower() + event['event'][4:] if status in ('error', 'failure', 'unexpectedSuccess'): diff --git a/spyder_unittest/backend/workers/tests/test_pytestworker.py b/spyder_unittest/backend/workers/tests/test_pytestworker.py index d0ea79a..143747e 100644 --- a/spyder_unittest/backend/workers/tests/test_pytestworker.py +++ b/spyder_unittest/backend/workers/tests/test_pytestworker.py @@ -313,49 +313,66 @@ def test_pytest_runtest_logfinish_handles_longrepr(plugin_ini, self_longrepr, }) -def test_pytestworker_integration(monkeypatch, tmpdir): - os.chdir(tmpdir.strpath) - testfilename = tmpdir.join('test_foo.py').strpath - with open(testfilename, 'w') as f: - f.write("def test_ok(): assert 1+1 == 2\n" - "def test_fail(): assert 1+1 == 3\n") +@pytest.fixture(scope='module') +def testfile_path(tmp_path_factory): + tmp_path = tmp_path_factory.mktemp('pytestworker') + res = tmp_path / 'test_pytestworker_foo.py' + res.write_text('def test_ok(): assert 1+1 == 2\n' + 'def test_fail(): assert 1+1 == 3\n') + return res + +@pytest.mark.parametrize('alltests', [True, False]) +def test_pytestworker_integration(monkeypatch, testfile_path, alltests): mock_writer = create_autospec(ZmqStreamWriter) MockZmqStreamWriter = Mock(return_value=mock_writer) monkeypatch.setattr( 'spyder_unittest.backend.workers.pytestworker.ZmqStreamWriter', MockZmqStreamWriter) - main(['mockscriptname', '42', testfilename]) - - args = mock_writer.write.call_args_list - - assert args[0][0][0]['event'] == 'config' - assert 'rootdir' in args[0][0][0] - - assert args[1][0][0]['event'] == 'collected' - assert args[1][0][0]['nodeid'] == 'test_foo.py::test_ok' - assert args[2][0][0]['event'] == 'collected' - assert args[2][0][0]['nodeid'] == 'test_foo.py::test_fail' + os.chdir(testfile_path.parent) + testfilename = testfile_path.name + pytest_args = ['mockscriptname', '42'] + if not alltests: + pytest_args.append(f'{testfilename}::test_ok') + main(pytest_args) - assert args[3][0][0]['event'] == 'starttest' - assert args[3][0][0]['nodeid'] == 'test_foo.py::test_ok' - - assert args[4][0][0]['event'] == 'logreport' - assert args[4][0][0]['outcome'] == 'passed' - assert args[4][0][0]['nodeid'] == 'test_foo.py::test_ok' - assert args[4][0][0]['sections'] == [] - assert args[4][0][0]['filename'] == 'test_foo.py' - assert args[4][0][0]['lineno'] == 0 - assert 'duration' in args[4][0][0] - - assert args[5][0][0]['event'] == 'starttest' - assert args[5][0][0]['nodeid'] == 'test_foo.py::test_fail' - - assert args[6][0][0]['event'] == 'logreport' - assert args[6][0][0]['outcome'] == 'failed' - assert args[6][0][0]['nodeid'] == 'test_foo.py::test_fail' - assert args[6][0][0]['sections'] == [] - assert args[6][0][0]['filename'] == 'test_foo.py' - assert args[6][0][0]['lineno'] == 1 - assert 'duration' in args[6][0][0] + args = mock_writer.write.call_args_list + messages = [arg[0][0] for arg in args] + assert len(messages) == 7 if alltests else 4 + + assert messages[0]['event'] == 'config' + assert 'rootdir' in messages[0] + + assert messages[1]['event'] == 'collected' + assert messages[1]['nodeid'] == f'{testfilename}::test_ok' + + if alltests: + n = 3 + assert messages[2]['event'] == 'collected' + assert messages[2]['nodeid'] == f'{testfilename}::test_fail' + else: + n = 2 + + assert messages[n]['event'] == 'starttest' + assert messages[n]['nodeid'] == f'{testfilename}::test_ok' + + assert messages[n+1]['event'] == 'logreport' + assert messages[n+1]['outcome'] == 'passed' + assert messages[n+1]['nodeid'] == f'{testfilename}::test_ok' + assert messages[n+1]['sections'] == [] + assert messages[n+1]['filename'] == testfilename + assert messages[n+1]['lineno'] == 0 + assert 'duration' in messages[n+1] + + if alltests: + assert messages[n+2]['event'] == 'starttest' + assert messages[n+2]['nodeid'] == f'{testfilename}::test_fail' + + assert messages[n+3]['event'] == 'logreport' + assert messages[n+3]['outcome'] == 'failed' + assert messages[n+3]['nodeid'] == f'{testfilename}::test_fail' + assert messages[n+3]['sections'] == [] + assert messages[n+3]['filename'] == testfilename + assert messages[n+3]['lineno'] == 1 + assert 'duration' in messages[n+3] diff --git a/spyder_unittest/backend/workers/tests/test_unittestworker.py b/spyder_unittest/backend/workers/tests/test_unittestworker.py index 6dc1ec5..de43a78 100644 --- a/spyder_unittest/backend/workers/tests/test_unittestworker.py +++ b/spyder_unittest/backend/workers/tests/test_unittestworker.py @@ -131,46 +131,61 @@ def test_unittestworker_report_collected(): assert mock_writer.write.mock_calls == expected -def test_unittestworker_main(monkeypatch, tmpdir): +@pytest.fixture(scope='module') +def testfile_path(tmp_path_factory): + tmp_path = tmp_path_factory.mktemp('unittestworker') + res = tmp_path / 'test_unittestworker_foo.py' + res.write_text('import unittest\n' + 'class MyTest(unittest.TestCase):\n' + ' def test_ok(self): self.assertEqual(1+1, 2)\n' + ' def test_fail(self): self.assertEqual(1+1, 3)\n') + return res + + +@pytest.mark.parametrize('alltests', [True, False]) +def test_unittestworker_main(monkeypatch, testfile_path, alltests): """ Test that the main function with some tests writes the expected output to the ZMQ stream. """ - os.chdir(tmpdir.strpath) - testfilename = tmpdir.join('test_foo_unittestworker.py').strpath - with open(testfilename, 'w') as f: - f.write("import unittest\n" - "class MyTest(unittest.TestCase):\n" - " def test_ok(self): self.assertEqual(1+1, 2)\n" - " def test_fail(self): self.assertEqual(1+1, 3)\n") - mock_writer = create_autospec(ZmqStreamWriter) MockZmqStreamWriter = Mock(return_value=mock_writer) monkeypatch.setattr( 'spyder_unittest.backend.workers.unittestworker.ZmqStreamWriter', MockZmqStreamWriter) - main(['mockscriptname', '42']) + os.chdir(testfile_path.parent) + testfilename = testfile_path.stem # `stem` removes the .py suffix + main_args = ['mockscriptname', '42'] + if not alltests: + main_args.append(f'{testfilename}.MyTest.test_fail') + main(main_args) args = mock_writer.write.call_args_list - # args[N][0][0] is dict sent over ZMQ stream in function call N + messages = [arg[0][0] for arg in args] + assert len(messages) == (6 if alltests else 3) - assert args[0][0][0]['event'] == 'collected' - assert args[0][0][0]['id'] == 'test_foo_unittestworker.MyTest.test_fail' + assert messages[0]['event'] == 'collected' + assert messages[0]['id'] == f'{testfilename}.MyTest.test_fail' - assert args[1][0][0]['event'] == 'collected' - assert args[1][0][0]['id'] == 'test_foo_unittestworker.MyTest.test_ok' + if alltests: + n = 2 + assert messages[1]['event'] == 'collected' + assert messages[1]['id'] == f'{testfilename}.MyTest.test_ok' + else: + n = 1 - assert args[2][0][0]['event'] == 'startTest' - assert args[2][0][0]['id'] == 'test_foo_unittestworker.MyTest.test_fail' + assert messages[n]['event'] == 'startTest' + assert messages[n]['id'] == f'{testfilename}.MyTest.test_fail' - assert args[3][0][0]['event'] == 'addFailure' - assert args[3][0][0]['id'] == 'test_foo_unittestworker.MyTest.test_fail' - assert 'AssertionError' in args[3][0][0]['reason'] - assert 'assertEqual(1+1, 3)' in args[3][0][0]['err'] + assert messages[n+1]['event'] == 'addFailure' + assert messages[n+1]['id'] == f'{testfilename}.MyTest.test_fail' + assert 'AssertionError' in messages[n+1]['reason'] + assert 'assertEqual(1+1, 3)' in messages[n+1]['err'] - assert args[4][0][0]['event'] == 'startTest' - assert args[4][0][0]['id'] == 'test_foo_unittestworker.MyTest.test_ok' + if alltests: + assert messages[n+2]['event'] == 'startTest' + assert messages[n+2]['id'] == f'{testfilename}.MyTest.test_ok' - assert args[5][0][0]['event'] == 'addSuccess' - assert args[5][0][0]['id'] == 'test_foo_unittestworker.MyTest.test_ok' + assert messages[n+3]['event'] == 'addSuccess' + assert messages[n+3]['id'] == f'{testfilename}.MyTest.test_ok' diff --git a/spyder_unittest/backend/workers/unittestworker.py b/spyder_unittest/backend/workers/unittestworker.py index f5d94e1..7f2ccb4 100644 --- a/spyder_unittest/backend/workers/unittestworker.py +++ b/spyder_unittest/backend/workers/unittestworker.py @@ -10,15 +10,17 @@ It runs tests via the unittest framework and transmits the results over a ZMQ socket so that the UnittestRunner can read them. -Usage: python unittestworker.py port +Usage: python unittestworker.py port [testname] -Here, port is the port number of the ZMQ socket. Use `file` to store the -results in the file `unittestworker.json`. +Here, `port` is the port number of the ZMQ socket. Use `file` to store the +results in the file `unittestworker.json`. The optional argument `testname` +is the test to run; if omitted, run all tests. """ from __future__ import annotations # Standard library imports +import os import sys from typing import ClassVar from unittest import ( @@ -116,7 +118,7 @@ def report_collected(writer: ZmqStreamWriter, test_suite: TestSuite) -> None: def main(args: list[str]) -> None: """Run unittest tests.""" - # Parse command line arguments and create writer + # Parse first command line argument and create writer if args[1] != 'file': writer = ZmqStreamWriter(args[1]) else: @@ -124,7 +126,12 @@ def main(args: list[str]) -> None: SpyderTestResult.writer = writer # Gather tests - test_suite = defaultTestLoader.discover('.') + if args[2:]: + # Add cwd to path so that modules can be found + sys.path = [os.getcwd()] + sys.path + test_suite = defaultTestLoader.loadTestsFromNames(args[2:]) + else: + test_suite = defaultTestLoader.discover('.') report_collected(writer, test_suite) # Run tests diff --git a/spyder_unittest/backend/zmqreader.py b/spyder_unittest/backend/zmqreader.py index 04d6e84..64b91a8 100644 --- a/spyder_unittest/backend/zmqreader.py +++ b/spyder_unittest/backend/zmqreader.py @@ -36,7 +36,7 @@ class ZmqStreamReader(QObject): sig_received = Signal(object) - def __init__(self): + def __init__(self) -> None: """Constructor; also constructs ZMQ stream.""" super().__init__() self.context = zmq.Context() @@ -46,7 +46,7 @@ def __init__(self): self.notifier = QSocketNotifier(fid, QSocketNotifier.Read, self) self.notifier.activated.connect(self.received_message) - def received_message(self): + def received_message(self) -> None: """Called when a message is received.""" self.notifier.setEnabled(False) messages = [] @@ -61,7 +61,7 @@ def received_message(self): if messages: self.sig_received.emit(messages) - def close(self): + def close(self) -> None: """Read any remaining messages and close stream.""" self.received_message() # Flush remaining messages self.notifier.setEnabled(False) diff --git a/spyder_unittest/unittestplugin.py b/spyder_unittest/unittestplugin.py index e308ba4..236dce3 100644 --- a/spyder_unittest/unittestplugin.py +++ b/spyder_unittest/unittestplugin.py @@ -18,7 +18,6 @@ from spyder.api.plugin_registration.decorators import ( on_plugin_available, on_plugin_teardown) from spyder.config.base import get_translation -from spyder.config.gui import is_dark_interface from spyder.plugins.mainmenu.api import ApplicationMenus from spyder.utils.palette import SpyderPalette @@ -166,7 +165,6 @@ def on_preferences_available(self): """ preferences = self.get_plugin(Plugins.Preferences) preferences.register_plugin_preferences(self) - self.get_widget().use_dark_interface(is_dark_interface()) @on_plugin_teardown(plugin=Plugins.Preferences) def on_preferences_teardown(self): diff --git a/spyder_unittest/widgets/datatree.py b/spyder_unittest/widgets/datatree.py index 2f89810..427fbaa 100644 --- a/spyder_unittest/widgets/datatree.py +++ b/spyder_unittest/widgets/datatree.py @@ -55,9 +55,13 @@ class TestDataView(QTreeView): ------- sig_edit_goto(str, int): Emitted if editor should go to some position. Arguments are file name and line number (zero-based). + sig_single_test_run_requested(str): Emitted to request a single test + to be run. Argument is the name of the test. """ sig_edit_goto = Signal(str, int) + sig_single_test_run_requested = Signal(str) + __test__ = False # this is not a pytest test class def __init__(self, parent=None): @@ -120,6 +124,13 @@ def go_to_test_definition(self, index): lineno = 0 self.sig_edit_goto.emit(filename, lineno) + def run_single_test(self, index): + """Ask plugin to run only the test corresponding to index.""" + index = self.make_index_canonical(index) + testresult = self.model().testresults[index.row()] + testname = testresult.name + self.sig_single_test_run_requested.emit(testname) + def make_index_canonical(self, index): """ Convert given index to canonical index for the same test. @@ -146,12 +157,21 @@ def build_context_menu(self, index): triggered=lambda: self.expand(index)) menuItem.setEnabled(self.model().hasChildren(index)) contextMenu.addAction(menuItem) + menuItem = create_action( self, _('Go to definition'), triggered=lambda: self.go_to_test_definition(index)) test_location = self.model().data(index, Qt.UserRole) menuItem.setEnabled(test_location[0] is not None) contextMenu.addAction(menuItem) + + menuItem = create_action( + self, _('Run only this test'), + triggered=lambda: self.run_single_test(index)) + result_category = self.model().testresults[index.row()].category + menuItem.setEnabled(result_category != Category.COVERAGE) + contextMenu.addAction(menuItem) + return contextMenu def resizeColumns(self): @@ -192,11 +212,6 @@ class TestDataModel(QAbstractItemModel, SpyderConfigurationAccessor): a tuple (row, column, id). The id is TOPLEVEL_ID for top-level items. For level-2 items, the id is the index of the test in `self.testresults`. - Attributes - ---------- - is_dark_interface : bool - Whether to use colours appropriate for a dark user interface. - Signals ------- sig_summary(str) @@ -212,7 +227,6 @@ def __init__(self, parent=None): """Constructor.""" QAbstractItemModel.__init__(self, parent) self.abbreviator = Abbreviator() - self.is_dark_interface = False self.testresults = [] try: self.monospace_font = parent.window().editor.get_plugin_font() diff --git a/spyder_unittest/widgets/tests/test_datatree.py b/spyder_unittest/widgets/tests/test_datatree.py index 5f9225d..17422a2 100644 --- a/spyder_unittest/widgets/tests/test_datatree.py +++ b/spyder_unittest/widgets/tests/test_datatree.py @@ -67,6 +67,12 @@ def test_go_to_test_definition_with_lineno_none(view_and_model, qtbot): view.go_to_test_definition(model.index(1, 0)) assert blocker.args == ['ham.py', 0] +def test_run_single_test(view_and_model, qtbot): + view, model = view_and_model + with qtbot.waitSignal(view.sig_single_test_run_requested) as blocker: + view.run_single_test(model.index(1, 0)) + assert blocker.args == ['foo.bar'] + def test_make_index_canonical_with_index_in_column2(view_and_model): view, model = view_and_model index = model.index(1, 2) @@ -88,20 +94,33 @@ def test_make_index_canonical_with_invalid_index(view_and_model): def test_build_context_menu(view_and_model): view, model = view_and_model menu = view.build_context_menu(model.index(0, 0)) + assert len(menu.actions()) == 3 assert menu.actions()[0].text() == 'Expand' assert menu.actions()[1].text() == 'Go to definition' + assert menu.actions()[2].text() == 'Run only this test' def test_build_context_menu_with_disabled_entries(view_and_model): view, model = view_and_model menu = view.build_context_menu(model.index(0, 0)) assert menu.actions()[0].isEnabled() == False assert menu.actions()[1].isEnabled() == False + assert menu.actions()[2].isEnabled() == True def test_build_context_menu_with_enabled_entries(view_and_model): view, model = view_and_model menu = view.build_context_menu(model.index(1, 0)) assert menu.actions()[0].isEnabled() == True assert menu.actions()[1].isEnabled() == True + assert menu.actions()[2].isEnabled() == True + +def test_build_context_menu_with_coverage_entry(view_and_model): + view, model = view_and_model + testresult = TestResult(Category.COVERAGE, 'coverage', 'foo') + model.testresults.append(testresult) + menu = view.build_context_menu(model.index(2, 0)) + assert menu.actions()[0].isEnabled() == False + assert menu.actions()[1].isEnabled() == False + assert menu.actions()[2].isEnabled() == False def test_build_context_menu_with_expanded_entry(view_and_model): view, model = view_and_model diff --git a/spyder_unittest/widgets/tests/test_unittestgui.py b/spyder_unittest/widgets/tests/test_unittestgui.py index 866c913..941b51f 100644 --- a/spyder_unittest/widgets/tests/test_unittestgui.py +++ b/spyder_unittest/widgets/tests/test_unittestgui.py @@ -8,7 +8,7 @@ # Standard library imports import os import sys -from unittest.mock import Mock +from unittest.mock import Mock, patch # Third party imports from qtpy.QtCore import Qt, QProcess @@ -151,8 +151,15 @@ def test_unittestwidget_process_finished_abnormally_status_label(widget): expected_text = '{}'.format('Test process exited abnormally') assert widget.status_label.text() == expected_text +def test_unittestwidget_handles_sig_single_test_run_requested(widget): + with patch.object(widget, 'run_tests') as mock_run_tests: + widget.testdataview.sig_single_test_run_requested.emit('testname') + mock_run_tests.assert_called_once_with(single_test='testname') + @pytest.mark.parametrize('framework', ['pytest', 'nose2']) -def test_run_tests_and_display_results(qtbot, widget, tmpdir, monkeypatch, framework): +@pytest.mark.parametrize('alltests', [True, False]) +def test_run_tests_and_display_results(qtbot, widget, tmpdir, monkeypatch, + framework, alltests): """Basic integration test.""" os.chdir(tmpdir.strpath) testfilename = tmpdir.join('test_foo.py').strpath @@ -167,24 +174,29 @@ def test_run_tests_and_display_results(qtbot, widget, tmpdir, monkeypatch, frame config = Config(wdir=tmpdir.strpath, framework=framework, coverage=False) with qtbot.waitSignal(widget.sig_finished, timeout=10000, raising=True): - widget.run_tests(config) + if alltests: + widget.run_tests(config) + else: + widget.run_tests(config, single_test='test_foo.test_fail') MockQMessageBox.assert_not_called() model = widget.testdatamodel - assert model.rowCount() == 2 + assert model.rowCount() == (2 if alltests else 1) assert model.index(0, 0).data( Qt.DisplayRole) == 'failure' if framework == 'nose2' else 'failed' assert model.index(0, 1).data(Qt.DisplayRole) == 'test_foo.test_fail' assert model.index(0, 1).data(Qt.ToolTipRole) == 'test_foo.test_fail' - assert model.index(1, 0).data( - Qt.DisplayRole) == 'ok' if framework == 'nose2' else 'passed' - assert model.index(1, 1).data(Qt.DisplayRole) == 'test_foo.test_ok' - assert model.index(1, 1).data(Qt.ToolTipRole) == 'test_foo.test_ok' - assert model.index(1, 2).data(Qt.DisplayRole) == '' + if alltests: + assert model.index(1, 0).data( + Qt.DisplayRole) == 'ok' if framework == 'nose2' else 'passed' + assert model.index(1, 1).data(Qt.DisplayRole) == 'test_foo.test_ok' + assert model.index(1, 1).data(Qt.ToolTipRole) == 'test_foo.test_ok' + assert model.index(1, 2).data(Qt.DisplayRole) == '' +@pytest.mark.parametrize('alltests', [True, False]) def test_run_tests_using_unittest_and_display_results( - qtbot, widget, tmpdir, monkeypatch): + qtbot, widget, tmpdir, monkeypatch, alltests): """Basic check.""" os.chdir(tmpdir.strpath) testfilename = tmpdir.join('test_foo.py').strpath @@ -201,18 +213,22 @@ def test_run_tests_using_unittest_and_display_results( config = Config(wdir=tmpdir.strpath, framework='unittest', coverage=False) with qtbot.waitSignal(widget.sig_finished, timeout=10000, raising=True): - widget.run_tests(config) + if alltests: + widget.run_tests(config) + else: + widget.run_tests(config, single_test='test_foo.MyTest.test_fail') MockQMessageBox.assert_not_called() model = widget.testdatamodel - assert model.rowCount() == 2 + assert model.rowCount() == (2 if alltests else 1) assert model.index(0, 0).data(Qt.DisplayRole) == 'failure' assert model.index(0, 1).data(Qt.DisplayRole) == 'test_foo.MyTest.test_fail' assert model.index(0, 1).data(Qt.ToolTipRole) == 'test_foo.MyTest.test_fail' - assert model.index(1, 0).data(Qt.DisplayRole) == 'success' - assert model.index(1, 1).data(Qt.DisplayRole) == 'test_foo.MyTest.test_ok' - assert model.index(1, 1).data(Qt.ToolTipRole) == 'test_foo.MyTest.test_ok' - assert model.index(1, 2).data(Qt.DisplayRole) == '' + if alltests: + assert model.index(1, 0).data(Qt.DisplayRole) == 'success' + assert model.index(1, 1).data(Qt.DisplayRole) == 'test_foo.MyTest.test_ok' + assert model.index(1, 1).data(Qt.ToolTipRole) == 'test_foo.MyTest.test_ok' + assert model.index(1, 2).data(Qt.DisplayRole) == '' def test_run_tests_with_print_using_unittest_and_display_results( qtbot, widget, tmpdir, monkeypatch): diff --git a/spyder_unittest/widgets/unittestgui.py b/spyder_unittest/widgets/unittestgui.py index 3125b8e..478a530 100644 --- a/spyder_unittest/widgets/unittestgui.py +++ b/spyder_unittest/widgets/unittestgui.py @@ -117,6 +117,8 @@ def __init__(self, name, plugin, parent): self.testdatamodel = TestDataModel(self) self.testdataview.setModel(self.testdatamodel) self.testdataview.sig_edit_goto.connect(self.sig_edit_goto) + self.testdataview.sig_single_test_run_requested.connect( + self.run_single_test) self.testdatamodel.sig_summary.connect(self.set_status_label) self.framework_registry = FrameworkRegistry() @@ -238,10 +240,6 @@ def set_config_without_emit(self, new_config): """Set test configuration but do not emit any signal.""" self._config = new_config - def use_dark_interface(self, flag): - """Set whether widget should use colours appropriate for dark UI.""" - self.testdatamodel.is_dark_interface = flag - def show_log(self): """Show output of testing process.""" if self.output: @@ -341,14 +339,15 @@ def maybe_configure_and_start(self): if self.config_is_valid(): self.run_tests() - def run_tests(self, config=None): + def run_tests(self, config=None, single_test=None): """ Run unit tests. First, run `self.pre_test_hook` if it is set, and abort if its return value is `False`. - Then, run the unit tests. + Then, run the unit tests. If `single_test` is not None, then only run + that test. The process's output is consumed by `read_output()`. When the process finishes, the `finish` signal is emitted. @@ -358,6 +357,9 @@ def run_tests(self, config=None): config : Config or None configuration for unit tests. If None, use `self.config`. In either case, configuration should be valid. + single_test : str or None + If None, run all tests; otherwise, it is the name of the only test + to be run. """ if self.pre_test_hook: if self.pre_test_hook() is False: @@ -384,7 +386,8 @@ def run_tests(self, config=None): cov_path = config.wdir if cov_path == 'None' else cov_path executable = self.get_conf('executable', section='main_interpreter') try: - self.testrunner.start(config, cov_path, executable, pythonpath) + self.testrunner.start( + config, cov_path, executable, pythonpath, single_test) except RuntimeError: QMessageBox.critical(self, _("Error"), _("Process failed to start")) @@ -498,6 +501,12 @@ def set_status_label(self, msg): """ self.status_label.setText('{}'.format(msg)) + def run_single_test(self, test_name: str) -> None: + """ + Run a single test with the given name. + """ + self.run_tests(single_test=test_name) + def test(): """