Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

24.3 Universal config for crossing out failed tests #514

Open
wants to merge 8 commits into
base: 24.3.5-snapshot_runners
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 10 additions & 0 deletions docker/test/util/process_functional_tests_result.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
import logging
import argparse
import csv
import json

OK_SIGN = "[ OK "
FAIL_SIGN = "[ FAIL "
Expand Down Expand Up @@ -206,13 +207,22 @@ def write_results(results_file, status_file, results, status):
parser.add_argument("--out-results-file", default="/test_output/test_results.tsv")
parser.add_argument("--out-status-file", default="/test_output/check_status.tsv")
parser.add_argument("--broken-tests", default="/analyzer_tech_debt.txt")
parser.add_argument("--broken-tests-json", default="/broken_tests.json")
args = parser.parse_args()

broken_tests = list()
if os.path.exists(args.broken_tests):
logging.info(f"File {args.broken_tests} with broken tests found")
with open(args.broken_tests) as f:
broken_tests = f.read().splitlines()

if os.path.exists(args.broken_tests_json):
logging.info(f"File {args.broken_tests_json} with broken tests found")

with open(args.broken_tests_json) as f:
broken_tests.extend(json.load(f).keys())

if broken_tests:
logging.info(f"Broken tests in the list: {len(broken_tests)}")

state, description, test_results = process_result(args.in_results_dir, broken_tests)
Expand Down
22 changes: 22 additions & 0 deletions tests/broken_tests.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
{
"test_postgresql_replica_database_engine_2/test.py::test_quoting_publication": {
"message": "DB::Exception: Syntax error:",
"reason": "syntax error"
},
"test_distributed_inter_server_secret/test.py::test_secure_cluster_distributed_over_distributed_different_users": {
"message": "DB::NetException: Connection reset by peer, while reading from socket",
"reason": "network issue"
},
"02920_alter_column_of_projections": {
"reason": "requires different settings"
},
"02888_system_tables_with_inaccsessible_table_function": {
"reason": "todo investigate"
},
"03094_grouparraysorted_memory": {
"reason": "flaky"
},
"02700_s3_part_INT_MAX": {
"reason": "fails with asan"
}
}
3 changes: 3 additions & 0 deletions tests/ci/functional_test_check.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,9 @@ def get_run_command(
if "analyzer" not in check_name
else ""
)
volume_with_broken_test += (
f"--volume={repo_path}/tests/broken_tests.json:/broken_tests.json "
)

return (
f"docker run --volume={builds_path}:/package_folder "
Expand Down
35 changes: 35 additions & 0 deletions tests/ci/integration_tests_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -474,6 +474,19 @@ def _get_parallel_tests_skip_list(repo_path):
skip_list_tests = json.load(skip_list_file)
return list(sorted(skip_list_tests))

@staticmethod
def _get_broken_tests_list(repo_path: str) -> dict:
skip_list_file_path = f"{repo_path}/tests/broken_tests.json"
if (
not os.path.isfile(skip_list_file_path)
or os.path.getsize(skip_list_file_path) == 0
):
return {}

with open(skip_list_file_path, "r", encoding="utf-8") as skip_list_file:
skip_list_tests = json.load(skip_list_file)
return skip_list_tests

@staticmethod
def group_test_by_file(tests):
result = {} # type: Dict
Expand Down Expand Up @@ -891,6 +904,8 @@ def run_impl(self, repo_path, build_path):
" ".join(not_found_tests[:3]),
)

known_broken_tests = self._get_broken_tests_list(repo_path)

grouped_tests = self.group_test_by_file(filtered_sequential_tests)
i = 0
for par_group in chunks(filtered_parallel_tests, PARALLEL_GROUP_SIZE):
Expand Down Expand Up @@ -921,6 +936,26 @@ def run_impl(self, repo_path, build_path):
group_counters, group_test_times, log_paths = self.try_run_test_group(
repo_path, group, tests, MAX_RETRY, NUM_WORKERS
)

for fail_status in ("ERROR", "FAILED"):
for failed_test in group_counters[fail_status]:
if failed_test in known_broken_tests.keys():
fail_message = known_broken_tests[failed_test].get("message")
if not fail_message:
mark_as_broken = True
else:
mark_as_broken = False
for log_path in log_paths:
if log_path.endswith(".log"):
with open(log_path) as log_file:
if fail_message in log_file.read():
mark_as_broken = True
break

if mark_as_broken:
group_counters[fail_status].remove(failed_test)
group_counters["BROKEN"].append(failed_test)

total_tests = 0
for counter, value in group_counters.items():
logging.info(
Expand Down
2 changes: 1 addition & 1 deletion tests/integration/compose/docker_compose_hdfs.yml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
version: '2.3'
services:
hdfs1:
image: sequenceiq/hadoop-docker:2.7.0
image: prasanthj/docker-hadoop:2.6.0
hostname: hdfs1
restart: always
expose:
Expand Down
Loading