Skip to content

Commit

Permalink
Feature/fix typos in readme (#7)
Browse files Browse the repository at this point in the history
- Fixed wrong characters in README and in code. `_` instead of '-'.
- README improvements.
  • Loading branch information
miroslavpojer authored Jun 20, 2024
1 parent 6016e61 commit 61c9cc9
Show file tree
Hide file tree
Showing 4 changed files with 61 additions and 49 deletions.
28 changes: 16 additions & 12 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ The **Filename Inspector** GitHub Action is designed to ensure naming convention
This action scans the specified `paths` for project files and checks if their file names fit the `name_patterns.` It reports the count of files not meeting the naming conventions, with options to fail the action if violations are found.

## Inputs
### `name_patterns`
### `name-patterns`
- **Description**: List of file name [glob](https://en.wikipedia.org/wiki/Glob_(programming)) patterns that project files should have, separated by commas.
- **Required**: Yes
- **Example**: `*UnitTest.*,*IntegrationTest.*`
Expand All @@ -36,27 +36,31 @@ This action scans the specified `paths` for project files and checks if their fi
- **Required**: No
- **Default**: ``

### `report_format`
### `report-format`
- **Description**: Specifies the format of the output report. Options include console, csv, and json.
- **Required**: No
- **Default**: `console`
- **Options**:
- `console`: Prints the list of violated files to the console.
- `csv`: Generates a CSV file with the report. No printout of violated files to the console, unless verbose is enabled. Path to the report file is provided in the `report-path` output.
- `json`: Generates a JSON file with the report. No printout of violated files to the console, unless verbose is enabled. Path to the report file is provided in the `report-path` output.

### `verbose_logging`
### `verbose-logging`
- **Description**: Enable verbose logging to provide detailed output during the action’s execution, aiding in troubleshooting and setup.
- **Required**: No
- **Default**: `false`

### `fail_on_violation`
### `fail-on-violation`
- **Description**: Set to true to fail the action if any convention violations are detected. Set false to continue without failure.
- **Required**: No
- **Default**: `false`

## Outputs
### `violation_count`
### `violation-count`
- **Description**: Count of files not complying with the specified file name patterns.

### `report_path`
- **Description**: Path to the generated report file.
### `report-path`
- **Description**: Path to the generated report file. **Not used** if the `report-format` is set to `console`.

## Usage Example
### Default
Expand All @@ -72,7 +76,7 @@ jobs:
id: scan-test-files
uses: AbsaOSS/[email protected]
with:
name_patterns: '*UnitTest.*,*IntegrationTest.*'
name-patterns: '*UnitTest.*,*IntegrationTest.*'
paths: '**/src/test/java/**,**/src/test/scala/**'
```
Expand All @@ -90,12 +94,12 @@ jobs:
id: scan-test-files
uses: AbsaOSS/[email protected]
with:
name_patterns: '*UnitTest.*,*IntegrationTest.*'
name-patterns: '*UnitTest.*,*IntegrationTest.*'
paths: '**/src/test/java/**,**/src/test/scala/**'
excludes: 'src/exclude_dir/*.py,tests/exclude_file.py'
report_format: 'console'
verbose_logging: 'false'
fail_on_violation: 'false'
report-format: 'console'
verbose-logging: 'false'
fail-on-violation: 'false'
```
## How to build
Expand Down
2 changes: 1 addition & 1 deletion action.yml
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ outputs:
description: 'Count of files that do not comply with the specified file name conventions.'
value: ${{ steps.scan-test-files.outputs.violation_count }}
report-path:
description: 'Path to the generated report file.'
description: "Path to the generated report file. Not used when 'console' is selected as the report format."
value: ${{ steps.scan-test-files.outputs.report_path }}
runs:
using: 'composite'
Expand Down
44 changes: 26 additions & 18 deletions src/filename_inspector.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,18 +9,25 @@
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')


def get_input(name: str) -> str:
value = os.getenv(f'INPUT_{name.upper()}')
return value
INPUT_NAME_PATTERNS = "INPUT_NAME_PATTERNS"
INPUT_PATHS = "INPUT_PATHS"
INPUT_EXCLUDES = "INPUT_EXCLUDES"
INPUT_REPORT_FORMAT = "INPUT_REPORT_FORMAT"
INPUT_VERBOSE_LOGGING = "INPUT_VERBOSE_LOGGING"
INPUT_FAIL_ON_VIOLATION = "INPUT_FAIL_ON_VIOLATION"


def set_output(name: str, value: str, default_output_path: str = "default_output.txt"):
def get_action_input(name: str) -> str:
return os.getenv(name)


def set_action_output(name: str, value: str, default_output_path: str = "default_output.txt"):
output_file = os.getenv('GITHUB_OUTPUT', default_output_path)
with open(output_file, 'a') as f:
f.write(f'{name}={value}\n')


def set_failed(message: str):
def set_action_failed(message: str):
logging.error(message)
exit(1)

Expand All @@ -47,15 +54,15 @@ def find_non_matching_files(name_patterns, paths, excludes):

def run():
try:
name_patterns_raw = get_input('name_patterns')
name_patterns_raw = get_action_input(INPUT_NAME_PATTERNS)
name_patterns = name_patterns_raw.split(',') if name_patterns_raw else []
paths_raw = get_input('paths')
paths_raw = get_action_input(INPUT_PATHS)
paths = paths_raw.split(',')
excludes_raw = get_input('excludes')
excludes_raw = get_action_input(INPUT_EXCLUDES)
excludes = excludes_raw.split(',')
report_format = get_input('report_format').lower()
verbose_logging = get_input('verbose_logging').lower() == 'true'
fail_on_violation = get_input('fail_on_violation').lower() == 'true'
report_format = get_action_input(INPUT_REPORT_FORMAT).lower()
verbose_logging = get_action_input(INPUT_VERBOSE_LOGGING).lower() == 'true'
fail_on_violation = get_action_input(INPUT_FAIL_ON_VIOLATION).lower() == 'true'

if verbose_logging:
logging.getLogger().setLevel(logging.DEBUG)
Expand All @@ -69,28 +76,29 @@ def run():
violations = find_non_matching_files(name_patterns, paths, excludes)
violation_count = len(violations)

set_output('violation_count', str(violation_count))
set_action_output('violation-count', str(violation_count))

logging.info(f'Total violations: {violation_count}')

if report_format == 'console' or verbose_logging:
logging.warning(f'Total violations: {violation_count}')
logging.warning(f'Violating files: {violations}')
logging.info(f'Violating files: {violations}')

if report_format == 'csv':
with open('violations.csv', mode='w', newline='') as file:
writer = csv.writer(file)
writer.writerows([[violation] for violation in violations])
set_output('report_path', 'violations.csv')
set_action_output('report-path', 'violations.csv')
elif report_format == 'json':
with open('violations.json', mode='w') as file:
json.dump({'violations': violations}, file)
set_output('report_path', 'violations.json')
set_action_output('report-path', 'violations.json')

if fail_on_violation and violation_count > 0:
set_failed(f'There are {violation_count} test file naming convention violations.')
set_action_failed(f'There are {violation_count} test file naming convention violations.')

except Exception as error:
logging.error(f'Action failed with error: {error}')
set_failed(f'Action failed with error: {error}')
set_action_failed(f'Action failed with error: {error}')


if __name__ == '__main__':
Expand Down
36 changes: 18 additions & 18 deletions tests/test_filename_inspector.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
import os

from unittest.mock import patch, mock_open
from src.filename_inspector import get_input, set_output, set_failed, run
from src.filename_inspector import get_action_input, set_action_output, set_action_failed, run

# Constants
DEFAULT_NAME_PATTERNS = '*UnitTest.*,*IntegrationTest.*'
Expand Down Expand Up @@ -55,30 +55,30 @@ def getenv_mock(key, default=''):
monkeypatch.setattr(os, 'getenv', getenv_mock)


def mock_set_output(name, value):
def mock_set_action_output(name, value):
output_values[name] = value


def mock_set_failed(message):
def mock_set_action_failed(message):
global failed_message
failed_message = message


# Tests
def test_set_output_env_var_set():
def test_set_action_output_env_var_set():
with tempfile.NamedTemporaryFile() as tmpfile:
with patch.dict(os.environ, {'GITHUB_OUTPUT': tmpfile.name}):
set_output('test_name', 'test_value')
set_action_output('test_name', 'test_value')
tmpfile.seek(0)
assert tmpfile.read().decode() == 'test_name=test_value\n'


def test_set_output_env_var_not_set():
def test_set_action_output_env_var_not_set():
with patch.dict(os.environ, {}, clear=True):
# Using mock_open with an in-memory stream to simulate file writing
mock_file = mock_open()
with patch('builtins.open', mock_file):
set_output('test_name', 'test_value')
set_action_output('test_name', 'test_value')
mock_file().write.assert_called_with('test_name=test_value\n')


Expand All @@ -87,17 +87,17 @@ def test_set_output_env_var_not_set():
('name2', 'value2', 'name2=value2\n'),
('foo', 'bar', 'foo=bar\n')
])
def test_set_output_parametrized(name, value, expected):
def test_set_action_output_parametrized(name, value, expected):
with tempfile.NamedTemporaryFile() as tmpfile:
with patch.dict(os.environ, {'GITHUB_OUTPUT': tmpfile.name}):
set_output(name, value)
set_action_output(name, value)
tmpfile.seek(0)
assert tmpfile.read().decode() == expected


def test_get_input(mock_getenv):
def test_get_action_input(mock_getenv):
with patch('os.getenv', return_value='test_value') as mock_getenv_func:
assert get_input('test') == 'test_value'
assert get_action_input('INPUT_TEST') == 'test_value'
mock_getenv_func.assert_called_with('INPUT_TEST')


Expand All @@ -108,7 +108,7 @@ def test_set_failed():
with mock.patch('logging.error') as mock_log_error:
# Test the SystemExit exception
with pytest.raises(SystemExit) as pytest_wrapped_e:
set_failed(test_message)
set_action_failed(test_message)
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code == 1

Expand Down Expand Up @@ -138,19 +138,19 @@ def getenv_mock(key, default=''):
return env.get(key, 'test_value')

monkeypatch.setattr(os, 'getenv', getenv_mock)
with (patch('src.filename_inspector.set_output', new=mock_set_output),
patch('src.filename_inspector.set_failed', new=mock_set_failed)):
with (patch('src.filename_inspector.set_action_output', new=mock_set_action_output),
patch('src.filename_inspector.set_action_failed', new=mock_set_action_failed)):
run()
assert output_values['violation_count'] == str(expected_violation_count)
assert output_values['violation-count'] == str(expected_violation_count)
if expected_report:
assert output_values['report_path'] == expected_report
assert output_values['report-path'] == expected_report
if expected_failed_message:
assert failed_message == expected_failed_message


def test_run_exception_handling():
with patch('src.filename_inspector.get_input', side_effect=Exception('Test exception')), \
patch('src.filename_inspector.set_failed', new=mock_set_failed):
with patch('src.filename_inspector.get_action_input', side_effect=Exception('Test exception')), \
patch('src.filename_inspector.set_action_failed', new=mock_set_action_failed):
run()
assert failed_message == 'Action failed with error: Test exception'

Expand Down

0 comments on commit 61c9cc9

Please sign in to comment.