diff --git a/.github/workflows/ci-tests.yaml b/.github/workflows/ci-tests.yaml index c3297a7..dab8582 100644 --- a/.github/workflows/ci-tests.yaml +++ b/.github/workflows/ci-tests.yaml @@ -3,11 +3,11 @@ name: CI Tests -on: [push] +on: [push, pull_request, workflow_dispatch] jobs: unit-tests: - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 strategy: fail-fast: false matrix: diff --git a/MD5SUMS b/MD5SUMS index a9d8eb7..ea9b715 100644 --- a/MD5SUMS +++ b/MD5SUMS @@ -1 +1 @@ -98e981f4bd21566fae7ca964797aa22a cis_audit.py +0d513e515a3f7e65047cb00a4f928471 cis_audit.py diff --git a/Pipfile b/Pipfile index d21fd37..00975c3 100644 --- a/Pipfile +++ b/Pipfile @@ -12,8 +12,8 @@ flake8 = "*" isort = "*" mock = "*" pyfakefs = "<4.6.0" +pytest = "*" pytest-cov = "*" -vermin = "*" [requires] python_version = "3.6" diff --git a/Pipfile.lock b/Pipfile.lock index 01629bb..6a3dd81 100644 --- a/Pipfile.lock +++ b/Pipfile.lock @@ -1,7 +1,7 @@ { "_meta": { "hash": { - "sha256": "e155aac900bbd7398d15974582057eacc791d5664f6ea5e6c322c2d6f3a863ed" + "sha256": "2f7eb8db6509cb8fedd8a15554b8739c9151fa2819ce0c2059bd2720520ab4b3" }, "pipfile-spec": 6, "requires": { @@ -19,11 +19,11 @@ "develop": { "attrs": { "hashes": [ - "sha256:29adc2665447e5191d0e7c568fde78b21f9672d344281d0c6e1ab085429b22b6", - "sha256:86efa402f67bf2df34f51a335487cf46b1ec130d02b8d39fd248abfd30da551c" + "sha256:29e95c7f6778868dbd49170f98f8818f78f3dc5e0e37c0b1f474e3561b240836", + "sha256:c9227bfc2f01993c03f68db37d1d15c9690188323c067c641f1a35ca58185f99" ], - "markers": "python_version >= '3.5'", - "version": "==22.1.0" + "markers": "python_version >= '3.6'", + "version": "==22.2.0" }, "black": { "hashes": [ @@ -171,18 +171,19 @@ }, "mock": { "hashes": [ - "sha256:122fcb64ee37cfad5b3f48d7a7d51875d7031aaf3d8be7c42e2bee25044eee62", - "sha256:7d3fbbde18228f4ff2f1f119a45cdffa458b4c0dee32eb4d2bb2f82554bac7bc" + "sha256:c41cfb1e99ba5d341fbcc5308836e7d7c9786d302f995b2c271ce2144dece9eb", + "sha256:e3ea505c03babf7977fd21674a69ad328053d414f05e6433c30d8fa14a534a6b" ], "index": "pypi", - "version": "==4.0.3" + "version": "==5.0.1" }, "mypy-extensions": { "hashes": [ - "sha256:090fedd75945a69ae91ce1303b5824f428daf5a028d2f6ab8a299250a846f15d", - "sha256:2d82818f5bb3e369420cb3c4060a7970edba416647068eb4c5343488a6c604a8" + "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d", + "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782" ], - "version": "==0.4.3" + "markers": "python_version >= '3.5'", + "version": "==1.0.0" }, "packaging": { "hashes": [ @@ -260,7 +261,7 @@ "sha256:9ce3ff477af913ecf6321fe337b93a2c0dcf2a0a1439c43f5452112c1e4280db", "sha256:e30905a0c131d3d94b89624a1cc5afec3e0ba2fbdb151867d8e0ebd49850f171" ], - "markers": "python_version >= '3.6'", + "index": "pypi", "version": "==7.0.1" }, "pytest-cov": { @@ -276,7 +277,7 @@ "sha256:05b6166bff487dc068d322585c7ea4ef78deed501cc124060e0f238e89a9231f", "sha256:e3069e4be3ead9668e21cb9b074cd948f7b3113fd9c8bba083f48247aab8b11c" ], - "markers": "python_full_version < '3.11.0a7'", + "markers": "python_version >= '3.6'", "version": "==1.2.3" }, "typed-ast": { @@ -317,13 +318,6 @@ "markers": "python_version < '3.10'", "version": "==4.1.1" }, - "vermin": { - "hashes": [ - "sha256:0e5e712686d47e6529c365748771ae0db2c8df22835134c10ad4c7f1fa62533c" - ], - "index": "pypi", - "version": "==1.4.2" - }, "zipp": { "hashes": [ "sha256:71c644c5369f4a6e07636f0aa966270449561fcea2e3d6747b8d23efaa9d7832", diff --git a/README.md b/README.md index 68ffb35..7aaa568 100644 --- a/README.md +++ b/README.md @@ -3,8 +3,8 @@ Latest version - - GitHub Actions + + GitHub Actions @@ -21,7 +21,7 @@

-This repo provides an unofficial, standalone, zero-install, zero-dependency, Python 3 application which can check your system against published CIS Hardening Benchmarks to offer an indication of your system's preparedness for compliance to the official standard. +This repo provides an unofficial, standalone, zero-install, zero-dependency, Python 3 script which can check your system against published CIS Hardening Benchmarks to offer an indication of your system's preparedness for compliance to the official standard. ### How do I use this? diff --git a/SHA256SUMS b/SHA256SUMS index 980bb87..d3cd233 100644 --- a/SHA256SUMS +++ b/SHA256SUMS @@ -1 +1 @@ -529b9f29474d5d29d948771706d50a073377fec1924aff075c9358ddcccf7cbd cis_audit.py +ef01e0d2cb9f2d347e8bb4d8a5a86db2f0446c77140ba3273c36553f57969158 cis_audit.py diff --git a/cis_audit.py b/cis_audit.py index a41b6be..98161cb 100755 --- a/cis_audit.py +++ b/cis_audit.py @@ -9,7 +9,7 @@ # You can obtain a copy of the CIS Benchmarks from https://www.cisecurity.org/cis-benchmarks/ # Use of the CIS Benchmarks are subject to the Terms of Use for Non-Member CIS Products - https://www.cisecurity.org/terms-of-use-for-non-member-cis-products -__version__ = '0.20.0-alpha.3' +__version__ = '0.20.0-rc.1' ### Imports ### import json # https://docs.python.org/3/library/json.html @@ -860,7 +860,7 @@ def audit_file_permissions(self, file: str, expected_mode: str, expected_user: s """ """ When looping over each of the permission bits. If the bits do not match or are not more restrictive, increment the failure state value by a unique amount, per below. This allows us to determine from the return value, which permissions did not match: - + index | penalty | description -------|---------|------------- - | 1 | User did not match @@ -893,7 +893,7 @@ def audit_file_permissions(self, file: str, expected_mode: str, expected_user: s try: file_stat = os.stat(file) except Exception as e: - self.log.warning(f'Error trying to stat file {file}: "{e}"') + self.log.debug(f'Error trying to stat file {file}: "{e}"') return -1 file_user = getpwuid(file_stat.st_uid).pw_name @@ -2052,59 +2052,73 @@ def audit_xdmcp_not_enabled(self) -> int: return state - def output(self, format: str, data: list) -> None: + def output(self, format: str, results: list, host_os: str, benchmark_version: str, stats: dict) -> None: if format in ['csv', 'psv', 'tsv']: if format == 'csv': - sep = ',' + separator = ',' elif format == 'psv': - sep = '|' + separator = '|' elif format == 'tsv': - sep = '\t' + separator = '\t' - self.output_csv(data, separator=sep) + self.output_csv(results=results, separator=separator, host_os=host_os, benchmark_version=benchmark_version) elif format == 'json': - self.output_json(data) + self.output_json(results=results, host_os=host_os, benchmark_version=benchmark_version, stats=stats) elif format == 'text': - self.output_text(data) + self.output_text(results=results, host_os=host_os, benchmark_version=benchmark_version, stats=stats) - def output_csv(self, data: list, separator: str): + def output_csv(self, results: list, separator: str, host_os, benchmark_version): ## Shorten the variable name so that it's easier to construct the print's below sep = separator ## Print Header + print(f'CIS {host_os} Benchmark v{benchmark_version} Results') print(f'ID{sep}Description{sep}Level{sep}Result{sep}Duration') ## Print Data - for record in data: - if len(record) == 2: - print(f'{record[0]}{sep}"{record[1]}"{sep}{sep}{sep}') - elif len(record) == 4: - print(f'{record[0]}{sep}"{record[1]}"{sep}{record[2]}{sep}{record[3]}{sep}') - elif len(record) == 5: - print(f'{record[0]}{sep}"{record[1]}"{sep}{record[2]}{sep}{record[3]}{sep}{record[4]}') - - def output_json(self, data): + for entry in results: + id = entry['_id'] + description = entry['description'] + if 'level' in entry: + level = entry['level'] + if 'result' in entry: + result = entry['result'] + if 'duration' in entry: + duration = entry['duration'] + + if len(entry) == 2: + print(f'{id}{sep}"{description}"{sep}{sep}{sep}') + elif len(entry) == 4: + print(f'{id}{sep}"{description}"{sep}{level}{sep}{result}{sep}') + elif len(entry) == 5: + print(f'{id}{sep}"{description}"{sep}{level}{sep}{result}{sep}{duration}') + + def output_json(self, results, host_os, benchmark_version, stats): output = {} + output['metadata'] = stats + output['metadata']['host_os'] = host_os + output['metadata']['benchmark_version'] = benchmark_version + # output['results'] = {} + output['results'] = results + # for result in results: + # id = result[0] + # output['results'][id] = {} + # output['results'][id]['description'] = result[1] - for record in data: - id = record[0] - output[id] = {} - output[id]['description'] = record[1] - - if len(record) >= 3: - output[id]['level'] = record[2] + # if len(result) >= 3: + # output['results'][id]['level'] = result[2] - if len(record) >= 4: - output[id]['result'] = record[3] + # if len(result) >= 4: + # output['results'][id]['result'] = result[3] - if len(record) >= 5: - output[id]['duration'] = record[4] + # if len(result) >= 5: + # output['results'][id]['duration'] = result[4] print(json.dumps(output)) - def output_text(self, data): + def output_text(self, results, host_os, benchmark_version, stats): ## Set starting/minimum width of columns to fit the column headers width_id = len("ID") width_description = len("Description") @@ -2113,18 +2127,18 @@ def output_text(self, data): width_duration = len("Duration") ## Find the max width of each column - for row in data: - row_length = len(row) + for entry in results: + row_length = len(entry) ## In the following section, len_level and len_duration are commented out because the ## headers are wider than the data in the rows, so they currently don't need expanding. ## If I leave them uncommented, then codecov complains about the tests not covering them. - len_id = len(str(row[0])) if row_length >= 1 else None - len_description = len(str(row[1])) if row_length >= 2 else None - # len_level = len(str(row[2])) if row_length >= 3 else None - len_result = len(str(row[3])) if row_length >= 4 else None - # len_duration = len(str(row[4])) if row_length >= 5 else None + len_id = len(str(entry['_id'])) if row_length >= 1 else None + len_description = len(str(entry['description'])) if row_length >= 2 else None + # len_level = len(str(row['level'])) if row_length >= 3 else None + len_result = len(str(entry['result'])) if row_length >= 4 else None + # len_duration = len(str(row['durtion'])) if row_length >= 5 else None if len_id and len_id > width_id: width_id = len_id @@ -2142,17 +2156,22 @@ def output_text(self, data): # if len_duration and len_duration > width_duration: # width_duration = len_duration - ## Print column headers + ## Print title + title = f'CIS {host_os} Benchmark v{benchmark_version} Results' + print(title) + print(f'{"-" :-<{len(title)}}') + + ## Print headers print(f'{"ID" : <{width_id}} {"Description" : <{width_description}} {"Level" : ^{width_level}} {"Result" : ^{width_result}} {"Duration" : >{width_duration}}') print(f'{"--" :-<{width_id}} {"-----------" :-<{width_description}} {"-----" :-^{width_level}} {"------" :-^{width_result}} {"--------" :->{width_duration}}') ## Print Data - for row in data: - id = row[0] if len(row) >= 1 else "" - description = row[1] if len(row) >= 2 else "" - level = row[2] if len(row) >= 3 else "" - result = row[3] if len(row) >= 4 else "" - duration = row[4] if len(row) >= 5 else "" + for entry in results: + id = entry['_id'] + description = entry['description'] + level = entry['level'] if 'level' in entry else "" + result = entry['result'] if 'result' in entry else "" + duration = entry['duration'] if 'duration' in entry else "" ## Print blank row before new major sections if len(id) == 1: @@ -2160,6 +2179,45 @@ def output_text(self, data): print(f'{id: <{width_id}} {description: <{width_description}} {level: ^{width_level}} {result: ^{width_result}} {duration: >{width_duration}}') + ## Print trailers + passed = stats['passed'] + skipped = stats['skipped'] + errors = stats['errors'] + total = stats['total'] + duration = stats['duration'] + + print() + print(f'Passed {passed} of {total} tests in {duration} seconds ({skipped} Skipped, {errors} Errors)') + + def result_stats(self, results: dict, start_time, end_time) -> dict: + passed = 0 + failed = 0 + skipped = 0 + errors = 0 + + time_delta = (end_time - start_time).total_seconds() + if time_delta >= 10: + duration = round(time_delta, 0) + else: + duration = round(time_delta, 2) + + for entry in results: + if 'result' in entry: + if entry['result'] == 'Pass': + passed += 1 + elif entry['result'] == 'Fail': + failed += 1 + elif entry['result'] == 'Skipped': + skipped += 1 + elif entry['result'] == 'Error': + errors += 1 + + total = passed + failed + errors + + stats = {'passed': passed, 'failed': failed, 'skipped': skipped, 'errors': errors, 'total': total, 'duration': duration} + + return stats + def run_tests(self, tests: "list[dict]") -> dict: results = [] @@ -2205,16 +2263,16 @@ def run_tests(self, tests: "list[dict]") -> dict: ## Check whether this test_id is included if self._is_test_included(test_id, test_level): if test_type == 'header': - results.append((test_id, test_description)) + results.append({'_id': test_id, 'description': test_description}) elif test_type == 'manual': - results.append((test_id, test_description, test_level, 'Manual')) + results.append({'_id': test_id, 'description': test_description, 'level': test_level, 'result': 'Manual'}) elif test_type == 'skip': - results.append((test_id, test_description, test_level, 'Skipped')) + results.append({'_id': test_id, 'description': test_description, 'level': test_level, 'result': 'Skipped'}) elif test_type == 'notimplemented': - results.append((test_id, test_description, test_level, 'Not Implemented')) + results.append({'_id': test_id, 'description': test_description, 'level': test_level, 'result': 'Not Implemented'}) elif test_type == 'test': start_time = self._get_utcnow() @@ -2232,7 +2290,7 @@ def run_tests(self, tests: "list[dict]") -> dict: state = -1 end_time = self._get_utcnow() - duration = f'{int((end_time.microsecond - start_time.microsecond) / 1000)}ms' + test_duration = f'{int((end_time.microsecond - start_time.microsecond) / 1000)}ms' if state == 0: self.log.debug(f'Test {test_id} passed') @@ -2245,14 +2303,14 @@ def run_tests(self, tests: "list[dict]") -> dict: self.log.debug(f'Test {test_id} failed with state {state}') result = "Fail" - results.append((test_id, test_description, test_level, result, duration)) + results.append({'_id': test_id, 'description': test_description, 'level': test_level, 'result': result, 'duration': test_duration}) return results ### Benchmarks ### benchmarks = { - 'centos7': { + 'CentOS 7': { '3.1.2': [ {'_id': "1", 'description': "Initial Setup", 'type': "header"}, {'_id': "1.1", 'description': "Filesystem Configuration", 'type': "header"}, @@ -2535,7 +2593,7 @@ def run_tests(self, tests: "list[dict]") -> dict: {'_id': "6.2.3", 'description': "Ensure all groups in /etc/passwd exist in /etc/group", 'function': CISAudit.audit_etc_passwd_gids_exist_in_etc_group, 'levels': {'server': 1, 'workstation': 1}}, {'_id': "6.2.4", 'description': "Ensure shadow group is empty", 'function': CISAudit.audit_shadow_group_is_empty, 'levels': {'server': 1, 'workstation': 1}}, {'_id': "6.2.5", 'description': "Ensure no duplicate user names exist", 'function': CISAudit.audit_duplicate_user_names, 'levels': {'server': 1, 'workstation': 1}}, - {'_id': "6.2.6", 'description': "Ensure no duplicate user names exist", 'function': CISAudit.audit_duplicate_user_names, 'levels': {'server': 1, 'workstation': 1}}, + {'_id': "6.2.6", 'description': "Ensure no duplicate group names exist", 'function': CISAudit.audit_duplicate_group_names, 'levels': {'server': 1, 'workstation': 1}}, {'_id': "6.2.7", 'description': "Ensure no duplicate UIDs exist", 'function': CISAudit.audit_duplicate_uids, 'levels': {'server': 1, 'workstation': 1}}, {'_id': "6.2.8", 'description': "Ensure no duplicate GIDs exist", 'function': CISAudit.audit_duplicate_gids, 'levels': {'server': 1, 'workstation': 1}}, {'_id': "6.2.9", 'description': "Ensure root is the only UID 0 account", 'function': CISAudit.audit_root_is_only_uid_0_account, 'levels': {'server': 1, 'workstation': 1}}, @@ -2557,32 +2615,38 @@ def main(): # pragma: no cover config = parse_arguments() audit = CISAudit(config=config) - host_os = 'centos7' + host_os = 'CentOS 7' benchmark_version = '3.1.2' # test_list = audit.get_tests_list(host_os, benchmarks_version) test_list = benchmarks[host_os][benchmark_version] + + start_time = datetime.utcnow() results = audit.run_tests(test_list) - audit.output(config.outformat, results) + end_time = datetime.utcnow() + + stats = audit.result_stats(results, start_time, end_time) + + audit.output(config.outformat, results, host_os, benchmark_version, stats) def parse_arguments(argv=sys.argv): description = "This script runs tests on the system to check for compliance against the CIS Benchmarks. No changes are made to system files by this script." epilog = f""" Examples: - + Run with debug enabled: {__file__} --debug - + Exclude tests from section 1.1 and 1.3.2: {__file__} --exclude 1.1 1.3.2 - + Include tests only from section 4.1 but exclude tests from section 4.1.1: {__file__} --include 4.1 --exclude 4.1.1 - + Run only level 1 tests {__file__} --level 1 - + Run level 1 tests and include some but not all SELinux questions {__file__} --level 1 --include 1.6 --exclude 1.6.1.2 """ diff --git a/pyproject.toml b/pyproject.toml index 80cd3b6..ffd2f59 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,3 +5,11 @@ target-version = ['py36'] [tool.isort] profile = "black" + +## https://docs.pytest.org/en/7.1.x/reference/customize.html#pyproject-toml +[tool.pytest.ini_options] +minversion = "6.0" +addopts = "-q --cov=cis_audit --cov-fail-under=100 --cov-report term-missing" +testpaths = [ + "tests/unit/" +] diff --git a/setup.py b/setup.py index 3ec21de..31eaa7f 100644 --- a/setup.py +++ b/setup.py @@ -15,7 +15,6 @@ classifiers=[ "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.6", - "License :: OSI Approved :: Apache Software License", ], python_requires='==3.6.*', ) diff --git a/tests/integration/test_integration_output.py b/tests/integration/test_integration_output.py index 7e73263..8b713d7 100644 --- a/tests/integration/test_integration_output.py +++ b/tests/integration/test_integration_output.py @@ -19,7 +19,7 @@ def test_integration_output_csv(capsys): - CISAudit().output(data=data, format='csv') + CISAudit().output(results=data, format='csv') output, error = capsys.readouterr() assert error == '' @@ -34,7 +34,7 @@ def test_integration_output_csv(capsys): def test_integration_output_json(capsys): - CISAudit().output(data=data, format='json') + CISAudit().output(results=data, format='json') output, error = capsys.readouterr() assert error == '' @@ -42,7 +42,7 @@ def test_integration_output_json(capsys): def test_integration_output_psv(capsys): - CISAudit().output(data=data, format='psv') + CISAudit().output(results=data, format='psv') output, error = capsys.readouterr() assert error == '' @@ -57,7 +57,7 @@ def test_integration_output_psv(capsys): def test_integration_output_text(capsys): - CISAudit().output(data=data, format='text') + CISAudit().output(results=data, format='text') output, error = capsys.readouterr() print(output) @@ -77,7 +77,7 @@ def test_integration_output_text(capsys): def test_integration_output_tsv(capsys): - CISAudit().output(data=data, format='tsv') + CISAudit().output(results=data, format='tsv') output, error = capsys.readouterr() assert error == '' diff --git a/tests/unit/test_output.py b/tests/unit/test_output.py index a4f4601..6bb7587 100644 --- a/tests/unit/test_output.py +++ b/tests/unit/test_output.py @@ -6,21 +6,32 @@ from cis_audit import CISAudit mock_data = [ - ('1', 'section header'), + {'_id': '1', 'description': 'section header'}, ] -def mock_output_function(self, data, separator=None): +def mock_output_function(self, results, separator=None, host_os=None, benchmark_version=None, stats=None): print(separator) - print(data) + print(results) +host_os = 'CentOS 7' +benchmark_version = '3.1.2' +stats = { + 'passed': 5, + 'failed': 3, + 'skipped': 2, + 'errors': 1, + 'duration': 20, + 'total': 9, +} + test = CISAudit() @patch.object(CISAudit, 'output_csv', mock_output_function) def test_output_calls_csv_function(capfd): - test.output(format='csv', data=mock_data) + test.output(format='csv', results=mock_data, host_os=host_os, benchmark_version=benchmark_version, stats=stats) stdout, stderr = capfd.readouterr() output = stdout.split('\n') @@ -31,7 +42,7 @@ def test_output_calls_csv_function(capfd): @patch.object(CISAudit, 'output_csv', mock_output_function) def test_output_calls_psv_function(capfd): - test.output(format='psv', data=mock_data) + test.output(format='psv', results=mock_data, host_os=host_os, benchmark_version=benchmark_version, stats=stats) stdout, stderr = capfd.readouterr() output = stdout.split('\n') @@ -42,7 +53,7 @@ def test_output_calls_psv_function(capfd): @patch.object(CISAudit, 'output_csv', mock_output_function) def test_output_calls_tsv_function(capfd): - test.output(format='tsv', data=mock_data) + test.output(format='tsv', results=mock_data, host_os=host_os, benchmark_version=benchmark_version, stats=stats) stdout, stderr = capfd.readouterr() output = stdout.split('\n') @@ -53,7 +64,7 @@ def test_output_calls_tsv_function(capfd): @patch.object(CISAudit, 'output_json', mock_output_function) def test_output_calls_json_function(capfd): - test.output(format='json', data=mock_data) + test.output(format='json', results=mock_data, host_os=host_os, benchmark_version=benchmark_version, stats=stats) stdout, stderr = capfd.readouterr() output = stdout.split('\n') @@ -64,7 +75,7 @@ def test_output_calls_json_function(capfd): @patch.object(CISAudit, 'output_text', mock_output_function) def test_output_calls_text_function(capfd): - test.output(format='text', data=mock_data) + test.output(format='text', results=mock_data, host_os=host_os, benchmark_version=benchmark_version, stats=stats) stdout, stderr = capfd.readouterr() output = stdout.split('\n') diff --git a/tests/unit/test_output_csv.py b/tests/unit/test_output_csv.py index 62c2c89..6137de2 100644 --- a/tests/unit/test_output_csv.py +++ b/tests/unit/test_output_csv.py @@ -5,59 +5,65 @@ from cis_audit import CISAudit results = [ - ('1', 'section header'), - ('1.1', 'subsection header'), - ('1.1.1', 'test 1.1.1', 1, 'Pass', '1ms'), - ('2', 'section header'), - ('2.1', 'test 2.1', 1, 'Fail', '10ms'), - ('2.2', 'test 2.2', 2, 'Pass', '100ms'), - ('2.3', 'test 2.3', 1, 'Not Implemented'), + {'_id': '1', 'description': 'section header'}, + {'_id': '1.1', 'description': 'subsection header'}, + {'_id': '1.1.1', 'description': 'test 1.1.1', 'level': 1, 'result': 'Pass', 'duration': '1ms'}, + {'_id': '2', 'description': 'section header'}, + {'_id': '2.1', 'description': 'test 2.1', 'level': 1, 'result': 'Fail', 'duration': '10ms'}, + {'_id': '2.2', 'description': 'test 2.2', 'level': 2, 'result': 'Pass', 'duration': '100ms'}, + {'_id': '2.3', 'description': 'test 2.3', 'level': 1, 'result': 'Not Implemented'}, ] +host_os = 'CentOS 7' +benchmark_version = '3.1.2' + def test_output_csv(capsys): - CISAudit().output_csv(data=results, separator=',') + CISAudit().output_csv(results=results, separator=',', host_os=host_os, benchmark_version=benchmark_version) output, error = capsys.readouterr() assert error == '' - assert output.split('\n')[0] == 'ID,Description,Level,Result,Duration' - assert output.split('\n')[1] == '1,"section header",,,' - assert output.split('\n')[2] == '1.1,"subsection header",,,' - assert output.split('\n')[3] == '1.1.1,"test 1.1.1",1,Pass,1ms' - assert output.split('\n')[4] == '2,"section header",,,' - assert output.split('\n')[5] == '2.1,"test 2.1",1,Fail,10ms' - assert output.split('\n')[6] == '2.2,"test 2.2",2,Pass,100ms' - assert output.split('\n')[7] == '2.3,"test 2.3",1,Not Implemented,' + assert output.split('\n')[0] == 'CIS CentOS 7 Benchmark v3.1.2 Results' + assert output.split('\n')[1] == 'ID,Description,Level,Result,Duration' + assert output.split('\n')[2] == '1,"section header",,,' + assert output.split('\n')[3] == '1.1,"subsection header",,,' + assert output.split('\n')[4] == '1.1.1,"test 1.1.1",1,Pass,1ms' + assert output.split('\n')[5] == '2,"section header",,,' + assert output.split('\n')[6] == '2.1,"test 2.1",1,Fail,10ms' + assert output.split('\n')[7] == '2.2,"test 2.2",2,Pass,100ms' + assert output.split('\n')[8] == '2.3,"test 2.3",1,Not Implemented,' def test_output_psv(capsys): - CISAudit().output_csv(data=results, separator='|') + CISAudit().output_csv(results=results, separator='|', host_os=host_os, benchmark_version=benchmark_version) output, error = capsys.readouterr() assert error == '' - assert output.split('\n')[0] == 'ID|Description|Level|Result|Duration' - assert output.split('\n')[1] == '1|"section header"|||' - assert output.split('\n')[2] == '1.1|"subsection header"|||' - assert output.split('\n')[3] == '1.1.1|"test 1.1.1"|1|Pass|1ms' - assert output.split('\n')[4] == '2|"section header"|||' - assert output.split('\n')[5] == '2.1|"test 2.1"|1|Fail|10ms' - assert output.split('\n')[6] == '2.2|"test 2.2"|2|Pass|100ms' - assert output.split('\n')[7] == '2.3|"test 2.3"|1|Not Implemented|' + assert output.split('\n')[0] == 'CIS CentOS 7 Benchmark v3.1.2 Results' + assert output.split('\n')[1] == 'ID|Description|Level|Result|Duration' + assert output.split('\n')[2] == '1|"section header"|||' + assert output.split('\n')[3] == '1.1|"subsection header"|||' + assert output.split('\n')[4] == '1.1.1|"test 1.1.1"|1|Pass|1ms' + assert output.split('\n')[5] == '2|"section header"|||' + assert output.split('\n')[6] == '2.1|"test 2.1"|1|Fail|10ms' + assert output.split('\n')[7] == '2.2|"test 2.2"|2|Pass|100ms' + assert output.split('\n')[8] == '2.3|"test 2.3"|1|Not Implemented|' def test_output_tsv(capsys): - CISAudit().output_csv(data=results, separator='\t') + CISAudit().output_csv(results=results, separator='\t', host_os=host_os, benchmark_version=benchmark_version) output, error = capsys.readouterr() assert error == '' - assert output.split('\n')[0] == 'ID Description Level Result Duration' - assert output.split('\n')[1] == '1 "section header" ' - assert output.split('\n')[2] == '1.1 "subsection header" ' - assert output.split('\n')[3] == '1.1.1 "test 1.1.1" 1 Pass 1ms' - assert output.split('\n')[4] == '2 "section header" ' - assert output.split('\n')[5] == '2.1 "test 2.1" 1 Fail 10ms' - assert output.split('\n')[6] == '2.2 "test 2.2" 2 Pass 100ms' - assert output.split('\n')[7] == '2.3 "test 2.3" 1 Not Implemented ' + assert output.split('\n')[0] == 'CIS CentOS 7 Benchmark v3.1.2 Results' + assert output.split('\n')[1] == 'ID Description Level Result Duration' + assert output.split('\n')[2] == '1 "section header" ' + assert output.split('\n')[3] == '1.1 "subsection header" ' + assert output.split('\n')[4] == '1.1.1 "test 1.1.1" 1 Pass 1ms' + assert output.split('\n')[5] == '2 "section header" ' + assert output.split('\n')[6] == '2.1 "test 2.1" 1 Fail 10ms' + assert output.split('\n')[7] == '2.2 "test 2.2" 2 Pass 100ms' + assert output.split('\n')[8] == '2.3 "test 2.3" 1 Not Implemented ' if __name__ == '__main__': diff --git a/tests/unit/test_output_json.py b/tests/unit/test_output_json.py index 0d1241f..cd3bc75 100644 --- a/tests/unit/test_output_json.py +++ b/tests/unit/test_output_json.py @@ -5,22 +5,34 @@ from cis_audit import CISAudit results = [ - ('1', 'section header'), - ('1.1', 'subsection header'), - ('1.1.1', 'test 1.1.1', 1, 'Pass', '1ms'), - ('2', 'section header'), - ('2.1', 'test 2.1', 1, 'Fail', '10ms'), - ('2.2', 'test 2.2', 2, 'Pass', '100ms'), - ('2.3', 'test 2.3', 1, 'Not Implemented'), + {'_id': '1', 'description': 'section header'}, + {'_id': '1.1', 'description': 'subsection header'}, + {'_id': '1.1.1', 'description': 'test 1.1.1', 'level': 1, 'result': 'Pass', 'duration': '1ms'}, + {'_id': '2', 'description': 'section header'}, + {'_id': '2.1', 'description': 'test 2.1', 'level': 1, 'result': 'Fail', 'duration': '10ms'}, + {'_id': '2.2', 'description': 'test 2.2', 'level': 2, 'result': 'Pass', 'duration': '100ms'}, + {'_id': '2.3', 'description': 'test 2.3', 'level': 1, 'result': 'Not Implemented'}, ] +host_os = 'CentOS 7' +benchmark_version = '3.1.2' +stats = { + 'passed': 5, + 'failed': 3, + 'skipped': 2, + 'errors': 1, + 'total': 9, + 'duration': 20, +} + def test_output_json(capsys): - CISAudit().output_json(data=results) + CISAudit().output_json(results=results, host_os=host_os, benchmark_version=benchmark_version, stats=stats) output, error = capsys.readouterr() assert error == '' - assert output == '{"1": {"description": "section header"}, "1.1": {"description": "subsection header"}, "1.1.1": {"description": "test 1.1.1", "level": 1, "result": "Pass", "duration": "1ms"}, "2": {"description": "section header"}, "2.1": {"description": "test 2.1", "level": 1, "result": "Fail", "duration": "10ms"}, "2.2": {"description": "test 2.2", "level": 2, "result": "Pass", "duration": "100ms"}, "2.3": {"description": "test 2.3", "level": 1, "result": "Not Implemented"}}\n' + print(output) + assert output == '{"metadata": {"passed": 5, "failed": 3, "skipped": 2, "errors": 1, "total": 9, "duration": 20, "host_os": "CentOS 7", "benchmark_version": "3.1.2"}, "results": [{"_id": "1", "description": "section header"}, {"_id": "1.1", "description": "subsection header"}, {"_id": "1.1.1", "description": "test 1.1.1", "level": 1, "result": "Pass", "duration": "1ms"}, {"_id": "2", "description": "section header"}, {"_id": "2.1", "description": "test 2.1", "level": 1, "result": "Fail", "duration": "10ms"}, {"_id": "2.2", "description": "test 2.2", "level": 2, "result": "Pass", "duration": "100ms"}, {"_id": "2.3", "description": "test 2.3", "level": 1, "result": "Not Implemented"}]}\n' if __name__ == '__main__': diff --git a/tests/unit/test_output_text.py b/tests/unit/test_output_text.py index 761df2c..f2d353b 100644 --- a/tests/unit/test_output_text.py +++ b/tests/unit/test_output_text.py @@ -5,34 +5,49 @@ from cis_audit import CISAudit results = [ - ('1', 'section header'), - ('1.1', 'subsection header'), - ('1.1.1', 'test 1.1.1', 1, 'Pass', '1ms'), - ('2', 'section header'), - ('2.1', 'test 2.1', 1, 'Fail', '10ms'), - ('2.2', 'test 2.2', 2, 'Pass', '100ms'), - ('2.3', 'test 2.3', 1, 'Not Implemented'), + {'_id': '1', 'description': 'section header'}, + {'_id': '1.1', 'description': 'subsection header'}, + {'_id': '1.1.1', 'description': 'test 1.1.1', 'level': 1, 'result': 'Pass', 'duration': '1ms'}, + {'_id': '2', 'description': 'section header'}, + {'_id': '2.1', 'description': 'test 2.1', 'level': 1, 'result': 'Fail', 'duration': '10ms'}, + {'_id': '2.2', 'description': 'test 2.2', 'level': 2, 'result': 'Pass', 'duration': '100ms'}, + {'_id': '2.3', 'description': 'test 2.3', 'level': 1, 'result': 'Not Implemented'}, ] +host_os = 'CentOS 7' +benchmark_version = '3.1.2' +stats = { + 'passed': 5, + 'failed': 3, + 'skipped': 2, + 'errors': 1, + 'duration': 20, + 'total': 9, +} + def test_output_text(capsys): - CISAudit().output_text(data=results) + CISAudit().output_text(results=results, host_os=host_os, benchmark_version=benchmark_version, stats=stats) output, error = capsys.readouterr() print(output) assert error == '' - assert output.split('\n')[0] == "ID Description Level Result Duration" - assert output.split('\n')[1] == "----- ----------------- ----- --------------- --------" - assert output.split('\n')[2] == "" - assert output.split('\n')[3] == "1 section header " - assert output.split('\n')[4] == "1.1 subsection header " - assert output.split('\n')[5] == "1.1.1 test 1.1.1 1 Pass 1ms" - assert output.split('\n')[6] == "" - assert output.split('\n')[7] == "2 section header " - assert output.split('\n')[8] == "2.1 test 2.1 1 Fail 10ms" - assert output.split('\n')[9] == "2.2 test 2.2 2 Pass 100ms" - assert output.split('\n')[10] == "2.3 test 2.3 1 Not Implemented " + assert output.split('\n')[0] == "CIS CentOS 7 Benchmark v3.1.2 Results" + assert output.split('\n')[1] == "-------------------------------------" + assert output.split('\n')[2] == "ID Description Level Result Duration" + assert output.split('\n')[3] == "----- ----------------- ----- --------------- --------" + assert output.split('\n')[4] == "" + assert output.split('\n')[5] == "1 section header " + assert output.split('\n')[6] == "1.1 subsection header " + assert output.split('\n')[7] == "1.1.1 test 1.1.1 1 Pass 1ms" + assert output.split('\n')[8] == "" + assert output.split('\n')[9] == "2 section header " + assert output.split('\n')[10] == "2.1 test 2.1 1 Fail 10ms" + assert output.split('\n')[11] == "2.2 test 2.2 2 Pass 100ms" + assert output.split('\n')[12] == "2.3 test 2.3 1 Not Implemented " + assert output.split('\n')[13] == "" + assert output.split('\n')[14] == "Passed 5 of 9 tests in 20 seconds (2 Skipped, 1 Errors)" if __name__ == '__main__': diff --git a/tests/unit/test_run_tests.py b/tests/unit/test_run_tests.py index 8d0cd33..06e10f1 100644 --- a/tests/unit/test_run_tests.py +++ b/tests/unit/test_run_tests.py @@ -51,35 +51,35 @@ def test_run_tests_pass(self): test_args['function'] = mock_run_tests_pass result = self.test.run_tests([test_args]) - assert result == [(test_args['_id'], test_args['description'], test_args['levels']['server'], 'Pass', '0ms')] + assert result == [{'_id': test_args['_id'], 'description': test_args['description'], 'level': test_args['levels']['server'], 'result': 'Pass', 'duration': '0ms'}] def test_run_tests_fail(self): test_args = self.test_args.copy() test_args['function'] = mock_run_tests_fail result = self.test.run_tests([test_args]) - assert result == [(test_args['_id'], test_args['description'], test_args['levels']['server'], 'Fail', '0ms')] + assert result == [{'_id': test_args['_id'], 'description': test_args['description'], 'level': test_args['levels']['server'], 'result': 'Fail', 'duration': '0ms'}] def test_run_tests_error(self): test_args = self.test_args.copy() test_args['function'] = mock_run_tests_error result = self.test.run_tests([test_args]) - assert result == [(test_args['_id'], test_args['description'], test_args['levels']['server'], 'Error', '0ms')] + assert result == [{'_id': test_args['_id'], 'description': test_args['description'], 'level': test_args['levels']['server'], 'result': 'Error', 'duration': '0ms'}] def test_run_tests_exception(self): test_args = self.test_args.copy() test_args['function'] = mock_run_tests_exception result = self.test.run_tests([test_args]) - assert result == [(test_args['_id'], test_args['description'], test_args['levels']['server'], 'Error', '0ms')] + assert result == [{'_id': test_args['_id'], 'description': test_args['description'], 'level': test_args['levels']['server'], 'result': 'Error', 'duration': '0ms'}] def test_run_tests_skipped(self): test_args = self.test_args.copy() test_args['function'] = mock_run_tests_skipped result = self.test.run_tests([test_args]) - assert result == [(test_args['_id'], test_args['description'], test_args['levels']['server'], 'Skipped', '0ms')] + assert result == [{'_id': test_args['_id'], 'description': test_args['description'], 'level': test_args['levels']['server'], 'result': 'Skipped', 'duration': '0ms'}] def test_run_tests_kwargs(self): test_args = self.test_args.copy() @@ -88,28 +88,28 @@ def test_run_tests_kwargs(self): test_args.pop('levels') result = self.test.run_tests([test_args]) - assert result == [(test_args['_id'], test_args['description'], None, 'Pass', '0ms')] + assert result == [{'_id': test_args['_id'], 'description': test_args['description'], 'level': None, 'result': 'Pass', 'duration': '0ms'}] def test_run_tests_type_header(self): test_args = self.test_args.copy() test_args['type'] = 'header' result = self.test.run_tests([test_args]) - assert result == [(test_args['_id'], test_args["description"])] + assert result == [{'_id': test_args['_id'], 'description': test_args['description']}] def test_run_tests_type_manual(self): test_args = self.test_args.copy() test_args['type'] = 'manual' result = self.test.run_tests([test_args]) - assert result == [(test_args['_id'], test_args["description"], test_args['levels']['server'], 'Manual')] + assert result == [{'_id': test_args['_id'], 'description': test_args['description'], 'level': test_args['levels']['server'], 'result': 'Manual'}] def test_run_tests_type_none(self, caplog): test_args = self.test_args.copy() test_args.pop('type', None) result = self.test.run_tests([test_args]) - assert result == [('1.1', 'pytest', 1, 'Not Implemented')] + assert result == [{'_id': test_args['_id'], 'description': test_args['description'], 'level': test_args['levels']['server'], 'result': 'Not Implemented'}] assert caplog.records[0].msg == "Test 1.1 does not explicitly define a type, so assuming it is a test" assert caplog.records[1].msg == "Checking whether to run test 1.1" assert caplog.records[2].msg == "Including test 1.1" @@ -119,14 +119,14 @@ def test_run_tests_type_skip(self, caplog): test_args['type'] = 'skip' result = self.test.run_tests([test_args]) - assert result == [(test_args['_id'], test_args["description"], test_args['levels']['server'], 'Skipped')] + assert result == [{'_id': test_args['_id'], 'description': test_args['description'], 'level': test_args['levels']['server'], 'result': 'Skipped'}] def test_run_tests_error_not_implemented(self, caplog): test_args = self.test_args.copy() test_args.pop('type') result = self.test.run_tests([test_args]) - assert result == [(test_args['_id'], test_args["description"], test_args['levels']['server'], 'Not Implemented')] + assert result == [{'_id': test_args['_id'], 'description': test_args['description'], 'level': test_args['levels']['server'], 'result': 'Not Implemented'}] if __name__ == '__main__':