diff --git a/.github/workflows/ci-tests.yaml b/.github/workflows/ci-tests.yaml index 66b953b..326fb57 100644 --- a/.github/workflows/ci-tests.yaml +++ b/.github/workflows/ci-tests.yaml @@ -14,15 +14,15 @@ jobs: strategy: fail-fast: false matrix: - python-version: ['3.6'] + python-versions: ['3.6'] steps: - uses: actions/checkout@v4 # https://github.com/actions/cache/releases - - name: Set up Python ${{ matrix.python-version }} + - name: Set up Python ${{ matrix.python-versions }} uses: actions/setup-python@v5 # https://github.com/actions/setup-python/releases with: - python-version: ${{ matrix.python-version }} + python-version: ${{ matrix.python-versions }} - name: Install Pipenv run: | @@ -33,15 +33,17 @@ jobs: run: | pipenv install --deploy --dev + - name: Check minimum required Python version + run: | + vermin -q -t=${{ matrix.python-versions }}- --violations cis_audit.py + - name: Lint with flake8 run: | - pipenv run flake8 *.py - pipenv run flake8 tests/*/*.py + pipenv run flake8 *.py tests/*/*.py - name: Format with black run: | - pipenv run black --check cis_audit.py - pipenv run black --check tests/*/*.py + pipenv run black --check cis_audit.py tests/*/*.py - name: Test with pytest run: | @@ -49,7 +51,7 @@ jobs: pipenv run coverage xml #- name: Codecov - # uses: codecov/codecov-action@v3 # https://github.com/codecov/codecov-action/releases + # uses: codecov/codecov-action@v4 # https://github.com/codecov/codecov-action/releases # # with: # files: coverage.xml @@ -64,7 +66,7 @@ jobs: if: ${{ false }} # disable for now steps: - - uses: actions/checkout@v4 # https://github.com/actions/cache/releases + - uses: actions/checkout@v4 # https://github.com/actions/checkout/releases - name: Cache Vagrant boxes uses: actions/cache@v4 # https://github.com/actions/cache/releases diff --git a/cis_audit.py b/cis_audit.py index 99c6e7f..afacd72 100755 --- a/cis_audit.py +++ b/cis_audit.py @@ -2377,7 +2377,7 @@ def output_text(results, host_os, benchmark_version, stats): print(f'Passed {passed} of {total} tests in {duration} seconds ({skipped} Skipped, {errors} Errors)') -def result_stats(results: dict, start_time, end_time) -> dict: +def result_stats(results: "list[dict]", start_time, end_time) -> dict: passed = 0 failed = 0 skipped = 0 @@ -2385,7 +2385,7 @@ def result_stats(results: dict, start_time, end_time) -> dict: time_delta = (end_time - start_time).total_seconds() if time_delta >= 10: - duration = round(time_delta, 0) + duration = round(time_delta, None) else: duration = round(time_delta, 2) diff --git a/tests/unit/test_result_stats.py b/tests/unit/test_result_stats.py new file mode 100644 index 0000000..4774874 --- /dev/null +++ b/tests/unit/test_result_stats.py @@ -0,0 +1,44 @@ +#!/usr/bin/env python3 + +from datetime import datetime + +import pytest + +from cis_audit import result_stats + + +@pytest.mark.parametrize( + "end_time_str,expected_duration", + [ + pytest.param("1970-01-01T00:01:00+0000", "60", id="slow"), + pytest.param("1970-01-01T00:00:05+0000", "5.0", id="fast"), + ], +) +def test_result_stats(end_time_str, expected_duration): + results = [ + {'result': "Error"}, + {'result': "Fail"}, + {'result': "Fail"}, + {'result': "Not Implemented"}, + {'result': "Pass"}, + {'result': "Pass"}, + {'result': "Pass"}, + {'result': "Skipped"}, + ] + datefmt = "%Y-%m-%dT%H:%M:%S%z" + start_time = datetime.strptime("1970-01-01T00:00:00+0000", datefmt) + end_time = datetime.strptime(end_time_str, datefmt) + + stats = result_stats(results=results, start_time=start_time, end_time=end_time) + + assert str(stats['duration']) == expected_duration + assert stats['errors'] == 1 + assert stats['failed'] == 2 + assert stats['passed'] == 3 + assert stats['skipped'] == 1 + + # assert stats is None + + +if __name__ == "__main__": + pytest.main([__file__, "--no-cov"])