Skip to content
This repository has been archived by the owner on Jul 25, 2024. It is now read-only.

Commit

Permalink
wip
Browse files Browse the repository at this point in the history
  • Loading branch information
chaws committed Oct 23, 2020
1 parent 5f4ab77 commit 1550d64
Show file tree
Hide file tree
Showing 3 changed files with 101 additions and 20 deletions.
97 changes: 81 additions & 16 deletions squad/core/comparison.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
from collections import OrderedDict
from collections import OrderedDict, defaultdict
from django.db.models import F
from itertools import groupby
from functools import reduce
import statistics
import time


from squad.core.utils import parse_name, join_name, split_dict
Expand Down Expand Up @@ -43,11 +44,14 @@ class BaseComparison(object):
(name, env)
"""

def __init__(self, *builds):
def __init__(self, *builds, suites=None, offset=0, per_page=50):
self.builds = list(builds)
self.environments = OrderedDict()
self.all_environments = set()
self.results = OrderedDict()
self.suites = suites
self.offset = offset
self.per_page = per_page

for build in self.builds:
self.environments[build] = set()
Expand Down Expand Up @@ -164,22 +168,75 @@ class TestComparison(BaseComparison):

__test__ = False

def __init__(self, *builds):
def __init__(self, *builds, suites=None, offset=0, per_page=50):
self.__intermittent__ = {}
self.tests_with_issues = {}
self.__failures__ = OrderedDict()
BaseComparison.__init__(self, *builds)
BaseComparison.__init__(self, *builds, suites=suites, offset=offset, per_page=per_page)

def __extract_results__(self):

"""
problem:
- comparing takes a huge amount of memory and time
- it does this because it blindly takes all tests from
the first build and try to find a matching test in the
second/target build
- after tests match, transitions are applied (pass to fail, fail to pass, n/a to pass, etc)
- after transitions are applied we paginate the whole thing just
to display a really small portion of it
idea:
- i think i can improve it by running smaller comparisons by
comparing by suite, and then paginate tests in database
- compare results by suite means reducing number of testruns
and also reducing number of returned tests since we'd be using
suite.id
- if filtering tests by suite.id and a very small number of testruns
we'll be dealing with only a few million tests, which DB handles just fine
so I think it's ok to apply order by, offset and limit :)
- results look weird, but promising :)
- keep trying tomorrow!
"""

start = time.time()
print('fetching testruns')

test_runs = models.TestRun.objects.filter(
build__in=self.builds,
).prefetch_related(
'build',
'environment',
).only('build', 'environment')

statuses = models.Status.objects.filter(test_run__in=test_runs, suite__isnull=False)
if self.suites:
statuses = statuses.filter(suite__slug__in=self.suites)

# Group testruns by suite.slug
partial_results = defaultdict(lambda: [])
test_runs = []
for status in statuses.all():
partial_results[status.suite.slug].append((status.suite, status.test_run))
test_runs.append(status.test_run)

results = defaultdict(lambda: [])
for suite_slug in partial_results.keys():
results[suite_slug] = self.__extract_suite_results__(suite_slug, partial_results[suite_slug])

self.__resolve_intermittent_tests__()

self.results = OrderedDict(sorted(self.results.items()))
for build in self.builds:
self.environments[build] = sorted(self.environments[build])

def __extract_suite_results__(self, suite_slug, suites_and_testruns):

test_runs_ids = {}
for test_run in test_runs:
for suite, test_run in suites_and_testruns:
build = test_run.build
env = test_run.environment.slug

Expand All @@ -189,24 +246,29 @@ def __extract_results__(self):
if test_runs_ids.get(test_run.id, None) is None:
test_runs_ids[test_run.id] = (build, env)

for ids in split_dict(test_runs_ids, chunk_size=100):
self.__extract_test_results__(ids)
print('splitting tests')
start = time.time()

self.__resolve_intermittent_tests__()
self.__extract_test_results__(test_runs_ids, suite)

self.results = OrderedDict(sorted(self.results.items()))
for build in self.builds:
self.environments[build] = sorted(self.environments[build])
duration = time.time() - start
print('finish splitting tests! took %f' % duration)

def __extract_test_results__(self, test_runs_ids):
tests = models.Test.objects.filter(test_run_id__in=test_runs_ids.keys()).annotate(
suite_slug=F('suite__slug'),
).prefetch_related('metadata').defer('log')
def __extract_test_results__(self, test_runs_ids, suite):
print('\tfetching tests')
start = time.time()

tests = models.Test.objects.filter(test_run_id__in=test_runs_ids.keys(), suite=suite) \
.prefetch_related('metadata') \
.defer('log') \
.order_by('metadata__name') \
.all()[self.offset:self.offset+self.per_page]

for test in tests:
print('.', end='', flush=True)
build, env = test_runs_ids.get(test.test_run_id)

full_name = join_name(test.suite_slug, test.name)
full_name = join_name(suite.slug, test.name)
if full_name not in self.results:
self.results[full_name] = OrderedDict()

Expand All @@ -221,6 +283,9 @@ def __extract_test_results__(self, test_runs_ids):
self.__failures__[env] = []
self.__failures__[env].append(test)

duration = time.time() - start
print('\tfinish fetching tests! took %f' % duration)

def __resolve_intermittent_tests__(self):
if len(self.tests_with_issues) == 0:
return
Expand Down
22 changes: 19 additions & 3 deletions squad/frontend/comparison.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,6 +101,9 @@ def compare_test(request):


def compare_builds(request):
import time
outer_start = time.time()
print('comparing builds')
project_slug = request.GET.get('project')
comparison_type = request.GET.get('comparison_type', 'test')
transitions = __get_transitions(request)
Expand All @@ -112,15 +115,24 @@ def compare_builds(request):

baseline_build = request.GET.get('baseline')
target_build = request.GET.get('target')
print('comparing %s against %s' % (baseline_build, target_build))
if baseline_build and target_build:
baseline = get_object_or_404(project.builds, version=baseline_build)
target = get_object_or_404(project.builds, version=target_build)

comparison_class = __get_comparison_class(comparison_type)
comparison = comparison_class.compare_builds(baseline, target)

start = time.time()
print('starting comparison')
comparison = comparison_class(baseline, target, suites=['cts-lkft/arm64-v8a.CtsDeqpTestCases'])
duration = time.time() - start
print('finished comparison! took %f' % duration, flush=True)

start = time.time()
print('applying transitions')
if comparison_type == 'test' and len(transitions):
comparison.apply_transitions([t for t, checked in transitions.items() if checked])
duration = time.time() - start
print('finished applying transitions! took %f' % duration, flush=True)

comparison.results = __paginate(comparison.results, request)

Expand All @@ -131,4 +143,8 @@ def compare_builds(request):
'transitions': transitions,
}

return render(request, 'squad/compare_builds.jinja2', context)
response = render(request, 'squad/compare_builds.jinja2', context)

duration = time.time() - outer_start
print('finished comparing builds! took %f' % duration, flush=True)
return response
2 changes: 1 addition & 1 deletion squad/settings.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@

django_toolbar = None
django_toolbar_middleware = None
if DEBUG:
if DEBUG and False:
try:

DEBUG_TOOLBAR_CONFIG = {
Expand Down

0 comments on commit 1550d64

Please sign in to comment.