Skip to content

Commit

Permalink
Merge branch 'trunk' into KAFKA-17285
Browse files Browse the repository at this point in the history
  • Loading branch information
bboyleonp666 committed Sep 4, 2024
2 parents 967b601 + 59f5d91 commit e13c873
Show file tree
Hide file tree
Showing 663 changed files with 13,623 additions and 9,823 deletions.
3 changes: 2 additions & 1 deletion .github/actions/setup-gradle/action.yml
Original file line number Diff line number Diff line change
Expand Up @@ -37,12 +37,13 @@ runs:
distribution: temurin
java-version: ${{ inputs.java-version }}
- name: Setup Gradle
uses: gradle/actions/setup-gradle@v4
uses: gradle/actions/setup-gradle@af1da67850ed9a4cedd57bfd976089dd991e2582 # v4.0.0
env:
GRADLE_BUILD_ACTION_CACHE_DEBUG_ENABLED: true
with:
gradle-version: wrapper
develocity-access-key: ${{ inputs.develocity-access-key }}
develocity-token-expiry: 4
cache-read-only: ${{ inputs.gradle-cache-read-only }}
# Cache downloaded JDKs in addition to the default directories.
gradle-home-cache-includes: |
Expand Down
98 changes: 98 additions & 0 deletions .github/scripts/checkstyle.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,98 @@
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from glob import glob
import logging
import os
import os.path
import sys
from typing import Tuple, Optional
import xml.etree.ElementTree


logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler(sys.stderr)
handler.setLevel(logging.DEBUG)
logger.addHandler(handler)


def get_env(key: str) -> str:
value = os.getenv(key)
logger.debug(f"Read env {key}: {value}")
return value


def parse_report(workspace_path, fp) -> Tuple[int, int]:
stack = []
errors = []
file_count = 0
error_count = 0
for (event, elem) in xml.etree.ElementTree.iterparse(fp, events=["start", "end"]):
if event == "start":
stack.append(elem)
if elem.tag == "file":
file_count += 1
errors.clear()
if elem.tag == "error":
logger.debug(f"Found checkstyle error: {elem.attrib}")
errors.append(elem)
error_count += 1
elif event == "end":
if elem.tag == "file" and len(errors) > 0:
filename = elem.get("name")
rel_path = os.path.relpath(filename, workspace_path)
logger.debug(f"Outputting errors for file: {elem.attrib}")
for error in errors:
line = error.get("line")
col = error.get("column")
severity = error.get("severity")
message = error.get('message')
title = f"Checkstyle {severity}"
print(f"::notice file={rel_path},line={line},col={col},title={title}::{message}")
stack.pop()
else:
logger.error(f"Unhandled xml event {event}: {elem}")
return file_count, error_count


if __name__ == "__main__":
"""
Parse checkstyle XML reports and generate GitHub annotations.
See: https://docs.github.com/en/actions/writing-workflows/choosing-what-your-workflow-does/workflow-commands-for-github-actions#setting-a-notice-message
"""
if not os.getenv("GITHUB_WORKSPACE"):
print("This script is intended to by run by GitHub Actions.")
exit(1)

reports = glob(pathname="**/checkstyle/*.xml", recursive=True)
logger.debug(f"Found {len(reports)} checkstyle reports")
total_file_count = 0
total_error_count = 0

workspace_path = get_env("GITHUB_WORKSPACE") # e.g., /home/runner/work/apache/kafka

for report in reports:
with open(report, "r") as fp:
logger.debug(f"Parsing report file: {report}")
file_count, error_count = parse_report(workspace_path, fp)
if error_count == 1:
logger.debug(f"Checked {file_count} files from {report} and found 1 error")
else:
logger.debug(f"Checked {file_count} files from {report} and found {error_count} errors")
total_file_count += file_count
total_error_count += error_count
exit(0)
256 changes: 256 additions & 0 deletions .github/scripts/junit.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,256 @@
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import argparse
import dataclasses
from functools import partial
from glob import glob
import logging
import os
import os.path
import sys
from typing import Tuple, Optional, List, Iterable
import xml.etree.ElementTree


logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler(sys.stderr)
handler.setLevel(logging.DEBUG)
logger.addHandler(handler)

PASSED = "PASSED ✅"
FAILED = "FAILED ❌"
FLAKY = "FLAKY ⚠️ "
SKIPPED = "SKIPPED 🙈"


def get_env(key: str) -> str:
value = os.getenv(key)
logger.debug(f"Read env {key}: {value}")
return value


@dataclasses.dataclass
class TestCase:
test_name: str
class_name: str
time: float
failure_message: Optional[str]
failure_class: Optional[str]
failure_stack_trace: Optional[str]

def key(self) -> Tuple[str, str]:
return self.class_name, self.test_name


@dataclasses.dataclass
class TestSuite:
name: str
path: str
tests: int
skipped: int
failures: int
errors: int
time: float
failed_tests: List[TestCase]
skipped_tests: List[TestCase]
passed_tests: List[TestCase]


def parse_report(workspace_path, report_path, fp) -> Iterable[TestSuite]:
cur_suite: Optional[TestSuite] = None
partial_test_case = None
test_case_failed = False
for (event, elem) in xml.etree.ElementTree.iterparse(fp, events=["start", "end"]):
if event == "start":
if elem.tag == "testsuite":
name = elem.get("name")
tests = int(elem.get("tests", 0))
skipped = int(elem.get("skipped", 0))
failures = int(elem.get("failures", 0))
errors = int(elem.get("errors", 0))
suite_time = float(elem.get("time", 0.0))
cur_suite = TestSuite(name, report_path, tests, skipped, failures, errors, suite_time, [], [], [])
elif elem.tag == "testcase":
test_name = elem.get("name")
class_name = elem.get("classname")
test_time = float(elem.get("time", 0.0))
partial_test_case = partial(TestCase, test_name, class_name, test_time)
test_case_failed = False
elif elem.tag == "failure":
failure_message = elem.get("message")
failure_class = elem.get("type")
failure_stack_trace = elem.text
failure = partial_test_case(failure_message, failure_class, failure_stack_trace)
cur_suite.failed_tests.append(failure)
test_case_failed = True
elif elem.tag == "skipped":
skipped = partial_test_case(None, None, None)
cur_suite.skipped_tests.append(skipped)
else:
pass
elif event == "end":
if elem.tag == "testcase":
if not test_case_failed:
passed = partial_test_case(None, None, None)
cur_suite.passed_tests.append(passed)
partial_test_case = None
elif elem.tag == "testsuite":
yield cur_suite
cur_suite = None
else:
logger.error(f"Unhandled xml event {event}: {elem}")


def pretty_time_duration(seconds: float) -> str:
time_min, time_sec = divmod(int(seconds), 60)
time_hour, time_min = divmod(time_min, 60)
time_fmt = ""
if time_hour > 0:
time_fmt += f"{time_hour}h"
if time_min > 0:
time_fmt += f"{time_min}m"
time_fmt += f"{time_sec}s"
return time_fmt


if __name__ == "__main__":
"""
Parse JUnit XML reports and generate GitHub job summary in Markdown format.
A Markdown summary of the test results is written to stdout. This should be redirected to $GITHUB_STEP_SUMMARY
within the action. Additional debug logs are written to stderr.
Exits with status code 0 if no tests failed, 1 otherwise.
"""
parser = argparse.ArgumentParser(description="Parse JUnit XML results.")
parser.add_argument("--path",
required=False,
default="**/test-results/**/*.xml",
help="Path to XML files. Glob patterns are supported.")

if not os.getenv("GITHUB_WORKSPACE"):
print("This script is intended to by run by GitHub Actions.")
exit(1)

args = parser.parse_args()

reports = glob(pathname=args.path, recursive=True)
logger.debug(f"Found {len(reports)} JUnit results")
workspace_path = get_env("GITHUB_WORKSPACE") # e.g., /home/runner/work/apache/kafka

total_file_count = 0
total_run = 0 # All test runs according to <testsuite tests="N"/>
total_skipped = 0 # All skipped tests according to <testsuite skipped="N"/>
total_errors = 0 # All test errors according to <testsuite errors="N"/>
total_time = 0 # All test time according to <testsuite time="N"/>
total_failures = 0 # All unique test names that only failed. Re-run tests not counted
total_flaky = 0 # All unique test names that failed and succeeded
total_success = 0 # All unique test names that only succeeded. Re-runs not counted
total_tests = 0 # All unique test names that were run. Re-runs not counted

failed_table = []
flaky_table = []
skipped_table = []

for report in reports:
with open(report, "r") as fp:
logger.debug(f"Parsing {report}")
for suite in parse_report(workspace_path, report, fp):
total_skipped += suite.skipped
total_errors += suite.errors
total_time += suite.time
total_run += suite.tests

# Due to how the Develocity Test Retry plugin interacts with our generated ClusterTests, we can see
# tests pass and then fail in the same run. Because of this, we need to capture all passed and all
# failed for each suite. Then we can find flakes by taking the intersection of those two.
all_suite_passed = {test.key() for test in suite.passed_tests}
all_suite_failed = {test.key(): test for test in suite.failed_tests}
flaky = all_suite_passed & all_suite_failed.keys()
all_tests = all_suite_passed | all_suite_failed.keys()
total_tests += len(all_tests)
total_flaky += len(flaky)
total_failures += len(all_suite_failed) - len(flaky)
total_success += len(all_suite_passed) - len(flaky)

# Display failures first. Iterate across the unique failed tests to avoid duplicates in table.
for test_failure in all_suite_failed.values():
if test_failure.key() in flaky:
continue
logger.debug(f"Found test failure: {test_failure}")
simple_class_name = test_failure.class_name.split(".")[-1]
failed_table.append((simple_class_name, test_failure.test_name, test_failure.failure_message, f"{test_failure.time:0.2f}s"))
for test_failure in all_suite_failed.values():
if test_failure.key() not in flaky:
continue
logger.debug(f"Found flaky test: {test_failure}")
simple_class_name = test_failure.class_name.split(".")[-1]
flaky_table.append((simple_class_name, test_failure.test_name, test_failure.failure_message, f"{test_failure.time:0.2f}s"))
for skipped_test in suite.skipped_tests:
simple_class_name = skipped_test.class_name.split(".")[-1]
logger.debug(f"Found skipped test: {skipped_test}")
skipped_table.append((simple_class_name, skipped_test.test_name))
duration = pretty_time_duration(total_time)
logger.info(f"Finished processing {len(reports)} reports")

# Print summary
report_url = get_env("REPORT_URL")
report_md = f"Download [HTML report]({report_url})."
summary = (f"{total_run} tests cases run in {duration}. "
f"{total_success} {PASSED}, {total_failures} {FAILED}, "
f"{total_flaky} {FLAKY}, {total_skipped} {SKIPPED}, and {total_errors} errors.")
print("## Test Summary\n")
print(f"{summary} {report_md}\n")
if len(failed_table) > 0:
logger.info(f"Found {len(failed_table)} test failures:")
print("### Failed Tests\n")
print(f"| Module | Test | Message | Time |")
print(f"| ------ | ---- | ------- | ---- |")
for row in failed_table:
logger.info(f"{FAILED} {row[0]} > {row[1]}")
row_joined = " | ".join(row)
print(f"| {row_joined} |")
print("\n")
if len(flaky_table) > 0:
logger.info(f"Found {len(flaky_table)} flaky test failures:")
print("### Flaky Tests\n")
print(f"| Module | Test | Message | Time |")
print(f"| ------ | ---- | ------- | ---- |")
for row in flaky_table:
logger.info(f"{FLAKY} {row[0]} > {row[1]}")
row_joined = " | ".join(row)
print(f"| {row_joined} |")
print("\n")
if len(skipped_table) > 0:
print("<details>")
print(f"<summary>{len(skipped_table)} Skipped Tests</summary>\n")
print(f"| Module | Test |")
print(f"| ------ | ---- |")
for row in skipped_table:
row_joined = " | ".join(row)
print(f"| {row_joined} |")
print("\n</details>")

logger.debug(summary)
if total_failures > 0:
logger.debug(f"Failing this step due to {total_failures} test failures")
exit(1)
elif total_errors > 0:
logger.debug(f"Failing this step due to {total_errors} test errors")
exit(1)
else:
exit(0)
Loading

0 comments on commit e13c873

Please sign in to comment.