Skip to content

Commit

Permalink
adds option to show ignored targets
Browse files Browse the repository at this point in the history
  • Loading branch information
salotz committed Mar 4, 2024
1 parent 8723099 commit 60a9959
Show file tree
Hide file tree
Showing 8 changed files with 161 additions and 91 deletions.
4 changes: 2 additions & 2 deletions pytest.ini
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@

[pytest]
addopts = --color=yes --verbose --import-mode=importlib --capture=no --tb=native --test-data=test_data --checklist-collect src/pytest_checklist --checklist-report --checklist-func-min-pass=1 --checklist-fail-under=100
addopts = --color=yes --verbose --import-mode=importlib --capture=no --tb=native --test-data=test_data --checklist-collect src/pytest_checklist --checklist-report
log_cli = 1
markers =
unit: test for the smallest piece of code that can be logically isolated in a system
pointer: for unit coverage
pointer: for unit coverage checklists
28 changes: 19 additions & 9 deletions src/pytest_checklist/app.py
Original file line number Diff line number Diff line change
@@ -1,28 +1,38 @@
"""Stuff for dealing with configuration, inputs, etc."""

from pytest_checklist.collector import FuncResult
from dataclasses import dataclass

from pytest_checklist.collector import TargetResult

def resolve_ignore_patterns(ignore_str: str) -> set[str]:
if len(ignore_str) == 0:

@dataclass
class TargetReport:

result: TargetResult
passes: bool


def resolve_exclude_patterns(exclude_str: str) -> set[str]:
if len(exclude_str) == 0:
return set()

else:
return set(ignore_str.split(","))
return set(exclude_str.split(","))


def is_passing(
results: list[FuncResult], percent_pass_threshold: float
reports: list[TargetReport],
percent_pass_threshold: float,
) -> tuple[float, bool]:

num_funcs = len(results)
num_targets = sum([1 for report in reports if not report.result.target.ignored])

total_passes = sum([1 if res.is_pass else 0 for res in results])
total_passes = sum([1 if report.passes else 0 for report in reports])

if total_passes == num_funcs:
if total_passes == num_targets:
percent_passes = 100.0
elif total_passes > 0:
percent_passes = (total_passes / num_funcs) * 100
percent_passes = (total_passes / num_targets) * 100
else:
percent_passes = 0.0

Expand Down
54 changes: 27 additions & 27 deletions src/pytest_checklist/collector.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,30 +15,34 @@ class MethodQualNamesCollector(cst.CSTVisitor):
METADATA_DEPENDENCIES = (QualifiedNameProvider, ParentNodeProvider)

def __init__(self): # nochecklist:
self.found = []
self.found = set()
self.ignored = set()
super().__init__()

def visit_FunctionDef(self, node: cst.FunctionDef): # nochecklist: DEBUG
def visit_FunctionDef(self, node: cst.FunctionDef): # nochecklist: TODO

header = getattr(node.body, "header", None)
excluded = (
ignored = (
header is not None
and header.comment
and header.comment.value.find(DEFAULT_NO_COVER_TOKEN) > -1
)

if not excluded:
# TODO: Find better way to remove locals
qual_names = self.get_metadata(QualifiedNameProvider, node)
for qn in qual_names:
from_local = qn.name.find("<locals>") > -1
if not from_local:
self.found.append(qn.name)
# TODO: Find better way to remove locals
qual_names = self.get_metadata(QualifiedNameProvider, node)
for qn in qual_names:
from_local = qn.name.find("<locals>") > -1
if not from_local:

self.found.add(qn.name)
if ignored:
self.ignored.add(qn.name)


def detect_files(
start_dir: Path,
ignore_patterns: Union[list[str], None] = None,
) -> list[Path]:
) -> tuple[list[Path], list[Path]]:
"""Given the path and ignores return the set of files to parse."""

# first identify all the paths to ignore
Expand All @@ -53,7 +57,7 @@ def detect_files(
)

# return them in a sorted order so the output later on is stable
return sorted(paths)
return sorted(paths), sorted(list(ignore_paths))


@dataclass(eq=True, frozen=True)
Expand All @@ -68,6 +72,7 @@ class Target:

module: Module
name: str
ignored: bool = False

def fq_name(self) -> str:
return f"{self.module.fq_module_name}.{self.name}"
Expand Down Expand Up @@ -106,11 +111,11 @@ def resolve_fq_targets(
# with the tree use the collector to retrieve the method names
collector = MethodQualNamesCollector()
cst.MetadataWrapper(module_cst).visit(collector)

for method_name in collector.found:

target = Target(
module,
method_name,
module, method_name, ignored=(method_name in collector.ignored)
)

targets[module.fq_module_name].add(target)
Expand All @@ -119,30 +124,25 @@ def resolve_fq_targets(


@dataclass
class FuncResult:
name: str
class TargetResult:
target: Target
num_pointers: int
is_pass: bool


def collect_case_passes(
target_pointers: dict[str, set[str]],
targets: Iterable[Target],
num_min_pass: int,
) -> list[FuncResult]:
) -> list[TargetResult]:

func_results = []
target_results = []
for target in targets:
test_count: int = len(target_pointers.get(target.fq_name(), {}))

is_pass = test_count >= num_min_pass

func_results.append(
FuncResult(
name=target.fq_name(),
target_results.append(
TargetResult(
target=target,
num_pointers=test_count,
is_pass=is_pass,
)
)

return func_results
return target_results
2 changes: 1 addition & 1 deletion src/pytest_checklist/defaults.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
DEFAULT_MIN_NUM_POINTERS = 2
DEFAULT_MIN_NUM_POINTERS = 1
DEFAULT_PASS_THRESHOLD = 100.0

DEFAULT_NO_COVER_TOKEN = "nochecklist:" # noqa: S105
58 changes: 37 additions & 21 deletions src/pytest_checklist/plugin.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
from rich.console import Console

from pytest_checklist.pointer import resolve_pointer_mark_target
from pytest_checklist.app import is_passing, resolve_ignore_patterns
from pytest_checklist.app import is_passing, resolve_exclude_patterns, TargetReport
from pytest_checklist.defaults import DEFAULT_MIN_NUM_POINTERS, DEFAULT_PASS_THRESHOLD
from pytest_checklist.collector import (
collect_case_passes,
Expand All @@ -30,9 +30,9 @@ def pytest_addoption(parser) -> None: # nochecklist:
help="Show report in console",
)
group.addoption(
"--checklist-func-min-pass",
"--checklist-target-min-pass",
action="store",
dest="checklist_func_min_pass",
dest="checklist_target_min_pass",
default=DEFAULT_MIN_NUM_POINTERS,
type=int,
help="Minimum number of pointer marks for a unit to pass.",
Expand All @@ -41,7 +41,7 @@ def pytest_addoption(parser) -> None: # nochecklist:
"--checklist-fail-under",
action="store",
dest="checklist_fail_under",
default=0.0,
default=DEFAULT_PASS_THRESHOLD,
type=float,
help="Minimum percentage of units to pass (exit 0), if greater than exit 1.",
)
Expand All @@ -52,10 +52,17 @@ def pytest_addoption(parser) -> None: # nochecklist:
help="Gather targets and tests for them",
)
group.addoption(
"--checklist-ignore",
dest="checklist_ignore",
"--checklist-exclude",
dest="checklist_exclude",
default="",
help="Source files to ignore in collection, comma separated.",
help="Source files to exclude from collection, comma separated. Excluded files will not be collected and cannot be reported as ignored.",
)
group.addoption(
"--checklist-report-ignored",
action="store_true",
dest="checklist_report_ignored",
default=False,
help="Show ignored units in checklist report.",
)


Expand Down Expand Up @@ -140,13 +147,13 @@ def pytest_runtestloop(session) -> None: # nochecklist:
# otherwise it will collect a lot of wrong paths in virtualenvs etc.
source_dir = start_dir / session.config.option.checklist_collect

# parse the ignore paths
ignore_patterns = resolve_ignore_patterns(session.config.option.checklist_ignore)
# parse the exclude paths
exclude_patterns = resolve_exclude_patterns(session.config.option.checklist_exclude)

# collect all the functions by scanning the source code

# first collect all files to look in
check_paths = detect_files(source_dir, list(ignore_patterns))
check_paths, _ = detect_files(source_dir, list(exclude_patterns))

check_modules = resolve_fq_modules(
check_paths,
Expand All @@ -156,19 +163,23 @@ def pytest_runtestloop(session) -> None: # nochecklist:
targets = resolve_fq_targets(check_modules)

# collect the pass/fails for all the units
func_results = collect_case_passes(
target_results = collect_case_passes(
target_pointers,
it.chain(*targets.values()),
session.config.option.checklist_func_min_pass,
)

# test whether the whole thing passed
if session.config.option.checklist_fail_under is None:
percent_pass_threshold = DEFAULT_PASS_THRESHOLD
else:
percent_pass_threshold = session.config.option.checklist_fail_under
target_min_pass = session.config.option.checklist_target_min_pass
fail_under = session.config.option.checklist_fail_under

percent_passes, passes = is_passing(func_results, percent_pass_threshold)
target_reports = []
for result in target_results:

target_reports.append(
TargetReport(result, passes=result.num_pointers >= target_min_pass)
)

# test whether the whole thing passed
percent_passes, passes = is_passing(target_reports, fail_under)

console = Console()

Expand All @@ -178,8 +189,13 @@ def pytest_runtestloop(session) -> None: # nochecklist:
console.print("Checklist unit coverage")
console.print("========================================")

console.print(f"Minimum number of pointers per target: {target_min_pass}")

if session.config.option.checklist_report:
report_padding = make_report(func_results)
report_padding = make_report(
target_reports,
show_ignored=session.config.option.checklist_report_ignored,
)

console.print(report_padding)

Expand All @@ -188,14 +204,14 @@ def pytest_runtestloop(session) -> None: # nochecklist:
session.testsfailed = 1

console.print(
f"[bold red]Checklist unit coverage failed. Target was {percent_pass_threshold}, achieved {percent_passes}.[/bold red]"
f"[bold red]Checklist unit coverage failed. Target was {fail_under}, achieved {percent_passes}.[/bold red]"
)
console.print("")

else:

console.print(
f"[bold green]Checklist unit coverage passed! Target was {percent_pass_threshold}, achieved {percent_passes}.[/bold green]"
f"[bold green]Checklist unit coverage passed! Target was {fail_under}, achieved {percent_passes}.[/bold green]"
)
console.print("")

Expand Down
43 changes: 35 additions & 8 deletions src/pytest_checklist/report.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,24 +3,51 @@
from textwrap import dedent
from rich.padding import Padding

from pytest_checklist.collector import FuncResult
from pytest_checklist.app import TargetReport


def make_report(func_results: list[FuncResult]) -> Padding: # nochecklist:
def make_report(
target_reports: list[TargetReport], show_ignored: bool = False
) -> Padding: # nochecklist:

def report_line(func_result: FuncResult):
def report_line(target_report: TargetReport):

if func_result.is_pass:
# if it passes
if target_report.passes:
color = "green"
elif func_result.num_pointers > 0:
test_message_str = "PASS"

# doesn't pass but there are tests for it
elif target_report.result.num_pointers > 0:
color = "blue"
test_message_str = "FAIL"

# if its ignored and passing
elif target_report.result.target.ignored and target_report.passes:
color = "cyan"
test_message_str = "IGNORE"

elif target_report.result.target.ignored:
color = "yellow"
test_message_str = "IGNORE"

else:
color = "red"
test_message_str = "FAIL"

test_result_str = (
f"{test_message_str: <7}{target_report.result.num_pointers: <2}"
)

test_count_str = f"{func_result.num_pointers: <2}"
return f"[{color}]{test_count_str:·<5}[/{color}] {func_result.name}"
return f"[{color}]{test_result_str:-<5}[/{color}] {target_report.result.target.fq_name()}"

report_lines = "\n".join([report_line(func_result) for func_result in func_results])
report_lines = "\n".join(
[
report_line(target_report)
for target_report in target_reports
if not (not show_ignored and target_report.result.target.ignored)
]
)

report = dedent(
f"""
Expand Down
Loading

0 comments on commit 60a9959

Please sign in to comment.