Skip to content

Commit

Permalink
Seperation of test running and result printing
Browse files Browse the repository at this point in the history
  • Loading branch information
Sir-NoChill committed Sep 8, 2024
1 parent aca76f6 commit 4894575
Show file tree
Hide file tree
Showing 2 changed files with 117 additions and 1 deletion.
2 changes: 2 additions & 0 deletions include/testharness/TestHarness.h
Original file line number Diff line number Diff line change
Expand Up @@ -64,9 +64,11 @@ class TestHarness {
private:
// test running
bool runTestsForToolChain(std::string tcId, std::string exeName);
void threadRunTestsForToolChain(std::string tcId, std::string exeName);

// helper for formatting tester output
void printTestResult(const TestFile *test, TestResult result);
bool aggregateTestResultsForToolChain(std::string tcName, std::string exeName);

// test finding and filling methods
void addTestFileToSubPackage(SubPackage& subPackage, const fs::path& file);
Expand Down
116 changes: 115 additions & 1 deletion src/testharness/TestHarness.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,12 @@
#include "tests/TestRunning.h"
#include "util.h"

#include <chrono>
#include <filesystem>
#include <iostream>
#include <optional>
#include <sstream>
#include <thread>
#include <utility>

namespace tester {
Expand All @@ -23,8 +25,11 @@ bool TestHarness::runTests() {
for (auto exePair : cfg.getExecutables()) {
// Iterate over toolchains.
for (auto& tcPair : cfg.getToolChains()) {
if (runTestsForToolChain(exePair.first, tcPair.first) == 1)
std::thread t(&TestHarness::threadRunTestsForToolChain, this, tcPair.first, exePair.first);
if (aggregateTestResultsForToolChain(tcPair.first, exePair.first) == 1)
failed = true;

t.join();
}
}
return failed;
Expand Down Expand Up @@ -59,6 +64,115 @@ void TestHarness::printTestResult(const TestFile *test, TestResult result) {
std::cout << "\n";
}

bool TestHarness::aggregateTestResultsForToolChain(std::string tcName, std::string exeName) {
bool failed = false;

ToolChain toolChain = cfg.getToolChain(tcName); // Get the toolchain to use.
const fs::path& exe = cfg.getExecutablePath(exeName); // Set the toolchain's exe to be tested.
toolChain.setTestedExecutable(exe);

if (cfg.hasRuntime(exeName)) // If we have a runtime, set that as well.
toolChain.setTestedRuntime(cfg.getRuntimePath(exeName));
else
toolChain.setTestedRuntime("");

std::cout << "\nTesting executable: " << exeName << " -> " << exe << '\n';
std::cout << "With toolchain: " << tcName << " -> " << toolChain.getBriefDescription() << '\n';

unsigned int toolChainCount = 0, toolChainPasses = 0; // Stat tracking for toolchain tests.

// Iterate over each package.
for (auto& [packageName, package] : testSet) {
std::cout << "Entering package: " << packageName << '\n';
unsigned int packageCount = 0, packagePasses = 0;

// Iterate over each subpackage
for (auto& [subPackageName, subPackage] : package) {
std::cout << " Entering subpackage: " << subPackageName << '\n';
unsigned int subPackagePasses = 0, subPackageSize = subPackage.size();

// Iterate over each test in the package
for (size_t i = 0; i < subPackage.size(); ++i) {
TestPair& pair = subPackage[i];
std::unique_ptr<TestFile>& test = pair.first;
if (test->getParseError() == ParseError::NoError) {

// Poll while we wait for the result
// TODO this could probably be replaced with some sort of interrupt,
// (and probably should be), but better this than no threads
while (!pair.second.has_value())
std::this_thread::sleep_for(std::chrono::milliseconds(100));

TestResult result = pair.second.value();

// keep the result with the test for pretty printing
std::optional<TestResult> res_clone = std::make_optional(result.clone());
subPackage[i].second.swap(res_clone);

results.addResult(exeName, tcName, subPackageName, result);
printTestResult(test.get(), result);

if (result.pass) {
++packagePasses;
++subPackagePasses;
} else {
failed = true;
}
} else {
std::cout << " " << (Colors::YELLOW + "[INVALID]" + Colors::RESET) << " "
<< test->getTestPath().stem().string() << '\n';
--subPackageSize;
}
}
std::cout << " Subpackage passed " << subPackagePasses << " / " << subPackageSize << '\n';
// Track how many tests we run.
packageCount += subPackageSize;
}

// Update the toolchain stats from the package stats.
toolChainPasses += packagePasses;
toolChainCount += packageCount;

std::cout << " Package passed " << packagePasses << " / " << packageCount << '\n';
}

std::cout << "Toolchain passed " << toolChainPasses << " / " << toolChainCount << "\n\n";
std::cout << "Invalid " << invalidTests.size() << " / " << toolChainCount + invalidTests.size()
<< "\n";

for (auto& test : invalidTests) {
std::cout << " Skipped: " << test.first->getTestPath().filename().stem() << std::endl
<< " Error: " << Colors::YELLOW << test.first->getParseErrorMsg() << Colors::RESET << "\n";
}
std::cout << "\n";

return failed;
}

void TestHarness::threadRunTestsForToolChain(std::string tcName, std::string exeName) {
ToolChain toolChain = cfg.getToolChain(tcName); // Get the toolchain to use.
const fs::path& exe = cfg.getExecutablePath(exeName); // Set the toolchain's exe to be tested.
toolChain.setTestedExecutable(exe);

// Iterate over each package.
for (auto& [packageName, package] : testSet) {
// Iterate over each subpackage
for (auto& [subPackageName, subPackage] : package) {
// Iterate over each test in the package
for (size_t i = 0; i < subPackage.size(); ++i) {
std::unique_ptr<TestFile>& test = subPackage[i].first;
if (test->getParseError() == ParseError::NoError) {

TestResult result = runTest(test.get(), toolChain, cfg);
// keep the result with the test for pretty printing
std::optional<TestResult> res_clone = std::make_optional(result.clone());
subPackage[i].second.swap(res_clone);
}
}
}
}
}

bool TestHarness::runTestsForToolChain(std::string exeName, std::string tcName) {
bool failed = false;

Expand Down

0 comments on commit 4894575

Please sign in to comment.