From 450ec9365e17b1f4c220a9ddbdc3f1f764424b2a Mon Sep 17 00:00:00 2001 From: Veronica Melesse Vergara Date: Wed, 24 Feb 2021 09:46:29 -0500 Subject: [PATCH] Updated the machine registerations functionality. (1) Added Summit and Peak to machine registered_machines.ini file and updated the python regular expression for these machines to match the fully qualified domain name. (2) Updated get_machine_name.py to get the fully qualified domain name. --- .gitignore | 17 + .gitlab-ci.yml | 123 +++ SoftwareLicense.txt => LICENSE | 2 +- ci_testing_utilities/__init__.py | 4 + .../bin/run_generic_unit_tests.py | 93 ++ .../bin/run_machine_specific_unit_tests.py | 116 +++ .../bin/test_driver_program.sh | 221 +++++ .../harness_unit_tests/Ascent/__init__.py | 4 + .../Ascent/test_olcf5_acceptance.py | 25 + .../Repository_Tests/__init__.py | 0 .../git_test_incremental_repository.py | 0 .../Repository_Tests/git_test_repository.py | 0 .../repository_tests_utility_functions.py | 0 .../svn_test_incremental_repository.py | 0 .../Repository_Tests/svn_test_repository.py | 0 .../harness_unit_tests/__init__.py | 8 + .../harness_unittests_exceptions.py | 73 ++ .../harness_unittests_logging.py | 76 ++ .../harness_unit_tests}/test_concurrency.py | 0 .../test_machine_specific_tests.py | 251 ++++++ .../harness_unit_tests/test_runtests.py | 227 +++++ .../input_files/Crest/rgt.input | 0 .../Crest/rgt_environmental_variables.bash.x | 0 .../input_files/Crest/unit_testing/1.0 | 0 .../ANY_MACHINE/Foo/file_1.txt | 0 .../ANY_MACHINE/file_0.txt | 0 .../ANY_MACHINE/HelloWorld/Source/Makefile | 0 .../ANY_MACHINE/HelloWorld/Source/main.cpp | 0 .../Test_16cores/Scripts/build_executable.x | 0 .../Test_16cores/Scripts/check_executable.x | 0 .../Test_16cores/Scripts/pbs.template.x | 0 .../Test_16cores/Scripts/submit_executable.x | 0 .../HelloWorld/Test_16cores/test_info.txt | 0 .../Test_32cores/Scripts/build_executable.x | 0 .../Test_32cores/Scripts/check_executable.x | 0 .../Test_32cores/Scripts/pbs.template.x | 0 .../Test_32cores/Scripts/submit_executable.x | 0 .../HelloWorld/Test_32cores/test_info.txt | 0 .../HelloWorld/application_info.txt | 0 .../rgt_environmental_variables.bash.x | 0 .../input_files/Summitdev/unit_testing/1.0 | 0 .../input_files/Titan/rgt.input | 0 .../Titan/rgt_environmental_variables.bash.x | 0 .../input_files/Titan/unit_testing/1.0 | 0 .../lyra/MPI_Hello_World/lyra.1_node.txt | 16 + .../MPI_Hello_World/pircdefense.1_node.txt | 16 + .../rhea/MPI_Hello_World/rhea.1_node.txt | 16 + .../summit/MPI_Hello_World/summit.1_node.txt | 16 + .../Ascent-olcf5_acceptance.unit_tests.lua | 3 + ...ricMachine-GenericConfigTag.unit_tests.lua | 56 ++ .../lyra-olcf5_acceptance.unit_tests.lua | 15 + ...ircdefense-olcf5_acceptance.unit_tests.lua | 15 + .../rhea-olcf5_acceptance.unit_tests.lua | 15 + .../summit-olcf5_acceptance.unit_tests.lua | 15 + configs/example_machine.ini | 24 + configs/lyra.ini | 23 - configs/master.ini | 1 - configs/rhea.ini | 23 - configs/summit.ini | 23 - doc-sphinx/build_documentation.sh | 7 +- .../source/_static/css/theme_overrides.css | 41 + doc-sphinx/source/_static/js/custom.js | 49 ++ doc-sphinx/source/conf.py | 35 +- doc-sphinx/source/developer_guide/intro.rst | 59 ++ doc-sphinx/source/developer_guide/modules.rst | 7 + .../modules/create_alt_config_file.rst | 12 + .../modules/layout_of_apps_directory.rst | 12 + .../modules/repositories/git_repository.rst | 6 +- .../developer_guide/modules/runtests.rst | 12 + .../notational_conventions.rst | 42 + .../source/{ => developer_guide}/packages.rst | 3 +- .../source/developer_guide/packages/bin.rst | 19 + .../packages/repositories.rst | 0 .../source/developer_guide/references.rst | 5 + .../developer_guide/unit_test_framework.rst | 112 +++ .../git_ci_test_framework.rst | 10 + .../pytest_test_framework.rst | 10 + doc-sphinx/source/images/olcf_logo.png | Bin 0 -> 21616 bytes doc-sphinx/source/index.rst | 23 +- doc-sphinx/source/intro.rst | 16 - doc-sphinx/source/modules.rst | 8 - .../source/user_guide/adding_new_machine.rst | 9 + .../source/user_guide/adding_new_test.rst | 249 ++++++ doc-sphinx/source/user_guide/launching.rst | 99 +++ doc-sphinx/source/user_guide/overview.rst | 49 ++ harness/bin/__init__.py | 3 +- harness/bin/check_executable_driver.py | 49 +- harness/bin/convert_test_input.bash | 48 ++ harness/bin/create_alt_config_file.py | 565 ++++++++++++ harness/bin/filelock.py | 14 +- harness/bin/log_binary_execution_time.py | 89 +- harness/bin/parse_test_status.py | 82 +- harness/bin/recheck_tests.py | 4 +- harness/bin/rgt_calculate_percentages.py | 14 +- harness/bin/runtests.py | 417 +++++++-- harness/bin/test_harness_driver.py | 524 ++++++------ harness/libraries/.gitignore | 2 + harness/libraries/__init__.py | 13 +- harness/libraries/apptest.py | 337 +++++--- harness/libraries/aprun_2.py | 117 --- harness/libraries/aprun_3.py | 433 ---------- harness/libraries/aprun_eos.py | 242 ------ harness/libraries/aprun_titan.py | 306 ------- harness/libraries/aprun_utility.py | 265 ------ harness/libraries/base_apptest.py | 137 ++- harness/libraries/command_line.py | 97 +++ harness/libraries/computers.py | 805 ------------------ harness/libraries/computers_1.py | 172 ---- harness/libraries/config_file.py | 82 ++ harness/libraries/get_machine_name.py | 232 +++++ harness/libraries/input_files.py | 66 +- harness/libraries/job_info.py | 8 +- harness/libraries/layout_of_apps_directory.py | 179 +++- harness/libraries/regression_test.py | 324 +++++-- harness/libraries/rgt_loggers/__init__.py | 1 + .../rgt_loggers/rgt_logger_factory.py | 44 + harness/libraries/rgt_loggers/rgt_logging.py | 111 +++ harness/libraries/rgt_logging.py | 58 -- harness/libraries/rgt_utilities.py | 29 + harness/libraries/schedulers.py | 10 +- harness/libraries/status_file.py | 493 ++++++++--- harness/libraries/status_file_factory.py | 53 ++ harness/libraries/subtest_factory.py | 80 ++ harness/libraries/threadedDecorator.py | 334 -------- harness/machine_types/__init__.py | 4 + harness/machine_types/base_machine.py | 496 +++++++++-- harness/machine_types/base_scheduler.py | 13 +- harness/machine_types/cray_xk7.py | 18 - harness/machine_types/ibm_power8.py | 110 --- harness/machine_types/ibm_power9.py | 153 ++-- harness/machine_types/linux_utilities.py | 466 ++++++++++ harness/machine_types/linux_x86_64.py | 79 ++ harness/machine_types/lsf.py | 38 +- harness/machine_types/machine_factory.py | 106 +-- .../machine_factory_exceptions.py | 10 +- harness/machine_types/pbs.py | 72 +- harness/machine_types/rgt_test.py | 608 ++++++++++--- harness/machine_types/rhel_x86.py | 112 --- harness/machine_types/scheduler_factory.py | 1 - harness/machine_types/slurm.py | 35 +- harness/machine_types/tests/__init__.py | 1 + modulefiles/olcf_harness | 2 +- modulefiles/olcf_harness_unit.lua | 112 +++ modulefiles/runtime_environment | 1 + test/__init__.py | 5 - test/src/__init__.py | 8 - test/src/test_runtests.py | 412 --------- 147 files changed, 7327 insertions(+), 4785 deletions(-) create mode 100644 .gitlab-ci.yml rename SoftwareLicense.txt => LICENSE (97%) create mode 100644 ci_testing_utilities/__init__.py create mode 100755 ci_testing_utilities/bin/run_generic_unit_tests.py create mode 100755 ci_testing_utilities/bin/run_machine_specific_unit_tests.py create mode 100755 ci_testing_utilities/bin/test_driver_program.sh create mode 100644 ci_testing_utilities/harness_unit_tests/Ascent/__init__.py create mode 100644 ci_testing_utilities/harness_unit_tests/Ascent/test_olcf5_acceptance.py rename {test/src => ci_testing_utilities/harness_unit_tests}/Repository_Tests/__init__.py (100%) rename {test/src => ci_testing_utilities/harness_unit_tests}/Repository_Tests/git_test_incremental_repository.py (100%) rename {test/src => ci_testing_utilities/harness_unit_tests}/Repository_Tests/git_test_repository.py (100%) rename {test/src => ci_testing_utilities/harness_unit_tests}/Repository_Tests/repository_tests_utility_functions.py (100%) rename {test/src => ci_testing_utilities/harness_unit_tests}/Repository_Tests/svn_test_incremental_repository.py (100%) rename {test/src => ci_testing_utilities/harness_unit_tests}/Repository_Tests/svn_test_repository.py (100%) create mode 100644 ci_testing_utilities/harness_unit_tests/__init__.py create mode 100644 ci_testing_utilities/harness_unit_tests/harness_unittests_exceptions.py create mode 100644 ci_testing_utilities/harness_unit_tests/harness_unittests_logging.py rename {test/src => ci_testing_utilities/harness_unit_tests}/test_concurrency.py (100%) create mode 100644 ci_testing_utilities/harness_unit_tests/test_machine_specific_tests.py create mode 100644 ci_testing_utilities/harness_unit_tests/test_runtests.py rename {test => ci_testing_utilities}/input_files/Crest/rgt.input (100%) rename {test => ci_testing_utilities}/input_files/Crest/rgt_environmental_variables.bash.x (100%) rename {test => ci_testing_utilities}/input_files/Crest/unit_testing/1.0 (100%) rename {test => ci_testing_utilities}/input_files/Sample_Directory_For_Repository_Testing/ANY_MACHINE/Foo/file_1.txt (100%) rename {test => ci_testing_utilities}/input_files/Sample_Directory_For_Repository_Testing/ANY_MACHINE/file_0.txt (100%) rename {test => ci_testing_utilities}/input_files/Sample_Directory_For_Repository_Testing/trunk/ANY_MACHINE/HelloWorld/Source/Makefile (100%) rename {test => ci_testing_utilities}/input_files/Sample_Directory_For_Repository_Testing/trunk/ANY_MACHINE/HelloWorld/Source/main.cpp (100%) rename {test => ci_testing_utilities}/input_files/Sample_Directory_For_Repository_Testing/trunk/ANY_MACHINE/HelloWorld/Test_16cores/Scripts/build_executable.x (100%) rename {test => ci_testing_utilities}/input_files/Sample_Directory_For_Repository_Testing/trunk/ANY_MACHINE/HelloWorld/Test_16cores/Scripts/check_executable.x (100%) rename {test => ci_testing_utilities}/input_files/Sample_Directory_For_Repository_Testing/trunk/ANY_MACHINE/HelloWorld/Test_16cores/Scripts/pbs.template.x (100%) rename {test => ci_testing_utilities}/input_files/Sample_Directory_For_Repository_Testing/trunk/ANY_MACHINE/HelloWorld/Test_16cores/Scripts/submit_executable.x (100%) rename {test => ci_testing_utilities}/input_files/Sample_Directory_For_Repository_Testing/trunk/ANY_MACHINE/HelloWorld/Test_16cores/test_info.txt (100%) rename {test => ci_testing_utilities}/input_files/Sample_Directory_For_Repository_Testing/trunk/ANY_MACHINE/HelloWorld/Test_32cores/Scripts/build_executable.x (100%) rename {test => ci_testing_utilities}/input_files/Sample_Directory_For_Repository_Testing/trunk/ANY_MACHINE/HelloWorld/Test_32cores/Scripts/check_executable.x (100%) rename {test => ci_testing_utilities}/input_files/Sample_Directory_For_Repository_Testing/trunk/ANY_MACHINE/HelloWorld/Test_32cores/Scripts/pbs.template.x (100%) rename {test => ci_testing_utilities}/input_files/Sample_Directory_For_Repository_Testing/trunk/ANY_MACHINE/HelloWorld/Test_32cores/Scripts/submit_executable.x (100%) rename {test => ci_testing_utilities}/input_files/Sample_Directory_For_Repository_Testing/trunk/ANY_MACHINE/HelloWorld/Test_32cores/test_info.txt (100%) rename {test => ci_testing_utilities}/input_files/Sample_Directory_For_Repository_Testing/trunk/ANY_MACHINE/HelloWorld/application_info.txt (100%) rename {test => ci_testing_utilities}/input_files/Summitdev/rgt_environmental_variables.bash.x (100%) rename {test => ci_testing_utilities}/input_files/Summitdev/unit_testing/1.0 (100%) rename {test => ci_testing_utilities}/input_files/Titan/rgt.input (100%) rename {test => ci_testing_utilities}/input_files/Titan/rgt_environmental_variables.bash.x (100%) rename {test => ci_testing_utilities}/input_files/Titan/unit_testing/1.0 (100%) create mode 100644 ci_testing_utilities/input_files/lyra/MPI_Hello_World/lyra.1_node.txt create mode 100644 ci_testing_utilities/input_files/pircdefense/MPI_Hello_World/pircdefense.1_node.txt create mode 100644 ci_testing_utilities/input_files/rhea/MPI_Hello_World/rhea.1_node.txt create mode 100644 ci_testing_utilities/input_files/summit/MPI_Hello_World/summit.1_node.txt create mode 100644 ci_testing_utilities/runtime_environment/Ascent-olcf5_acceptance.unit_tests.lua create mode 100644 ci_testing_utilities/runtime_environment/GenericMachine-GenericConfigTag.unit_tests.lua create mode 100644 ci_testing_utilities/runtime_environment/lyra-olcf5_acceptance.unit_tests.lua create mode 100644 ci_testing_utilities/runtime_environment/pircdefense-olcf5_acceptance.unit_tests.lua create mode 100644 ci_testing_utilities/runtime_environment/rhea-olcf5_acceptance.unit_tests.lua create mode 100644 ci_testing_utilities/runtime_environment/summit-olcf5_acceptance.unit_tests.lua create mode 100644 configs/example_machine.ini delete mode 100644 configs/lyra.ini delete mode 120000 configs/master.ini delete mode 100644 configs/rhea.ini delete mode 100644 configs/summit.ini create mode 100644 doc-sphinx/source/_static/css/theme_overrides.css create mode 100644 doc-sphinx/source/_static/js/custom.js create mode 100644 doc-sphinx/source/developer_guide/intro.rst create mode 100644 doc-sphinx/source/developer_guide/modules.rst create mode 100644 doc-sphinx/source/developer_guide/modules/create_alt_config_file.rst create mode 100644 doc-sphinx/source/developer_guide/modules/layout_of_apps_directory.rst rename doc-sphinx/source/{ => developer_guide}/modules/repositories/git_repository.rst (95%) create mode 100644 doc-sphinx/source/developer_guide/modules/runtests.rst create mode 100644 doc-sphinx/source/developer_guide/notational_conventions.rst rename doc-sphinx/source/{ => developer_guide}/packages.rst (74%) create mode 100644 doc-sphinx/source/developer_guide/packages/bin.rst rename doc-sphinx/source/{ => developer_guide}/packages/repositories.rst (100%) create mode 100644 doc-sphinx/source/developer_guide/references.rst create mode 100644 doc-sphinx/source/developer_guide/unit_test_framework.rst create mode 100644 doc-sphinx/source/developer_guide/unit_test_framework/git_ci_test_framework.rst create mode 100644 doc-sphinx/source/developer_guide/unit_test_framework/pytest_test_framework.rst create mode 100644 doc-sphinx/source/images/olcf_logo.png delete mode 100644 doc-sphinx/source/intro.rst delete mode 100644 doc-sphinx/source/modules.rst create mode 100644 doc-sphinx/source/user_guide/adding_new_machine.rst create mode 100644 doc-sphinx/source/user_guide/adding_new_test.rst create mode 100644 doc-sphinx/source/user_guide/launching.rst create mode 100644 doc-sphinx/source/user_guide/overview.rst create mode 100755 harness/bin/convert_test_input.bash create mode 100755 harness/bin/create_alt_config_file.py create mode 100644 harness/libraries/.gitignore delete mode 100644 harness/libraries/aprun_2.py delete mode 100644 harness/libraries/aprun_3.py delete mode 100644 harness/libraries/aprun_eos.py delete mode 100644 harness/libraries/aprun_titan.py delete mode 100644 harness/libraries/aprun_utility.py create mode 100644 harness/libraries/command_line.py delete mode 100644 harness/libraries/computers.py delete mode 100644 harness/libraries/computers_1.py create mode 100644 harness/libraries/config_file.py create mode 100755 harness/libraries/get_machine_name.py create mode 100644 harness/libraries/rgt_loggers/__init__.py create mode 100644 harness/libraries/rgt_loggers/rgt_logger_factory.py create mode 100644 harness/libraries/rgt_loggers/rgt_logging.py delete mode 100644 harness/libraries/rgt_logging.py create mode 100644 harness/libraries/status_file_factory.py create mode 100644 harness/libraries/subtest_factory.py delete mode 100644 harness/libraries/threadedDecorator.py delete mode 100644 harness/machine_types/cray_xk7.py delete mode 100644 harness/machine_types/ibm_power8.py create mode 100644 harness/machine_types/linux_utilities.py create mode 100644 harness/machine_types/linux_x86_64.py delete mode 100644 harness/machine_types/rhel_x86.py create mode 100755 modulefiles/olcf_harness_unit.lua create mode 120000 modulefiles/runtime_environment delete mode 100644 test/__init__.py delete mode 100644 test/src/__init__.py delete mode 100644 test/src/test_runtests.py diff --git a/.gitignore b/.gitignore index 25f81d5..c970ed6 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,20 @@ # Ignore python bytecode file. # *.pyc **/__pycache__ + +#Ignore all html files. +*.html + +# Ignore all *.doctree files. These files are +# generated when sphinx-doc creates python documentation. +*.doctree + +# Ignore all files in the sphinx documentation build directory. +doc-sphinx/build + +# Ignore all files with *.orig suffix +*.orig" + +# Ignore all vim temporary files. +*.swp +*.swo diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml new file mode 100644 index 0000000..86a75a0 --- /dev/null +++ b/.gitlab-ci.yml @@ -0,0 +1,123 @@ +#----------------------------------------------------- +# GIT_STRATEGY : - +# Set the GIT_STRATEGY used for getting recent - +# application code. See - +# https://docs.gitlab.com/ee/ci/yaml/README.html - +# for more details on GIT_STRATEGY. - +# - +# HUT_MACHINE_NAME : - +# The name of the machine the unit tests. - +# We use this variable to select the correct - +# runtime environment. - +# - +# HUT_CONFIG_TAG : - +# A unique identifier that corresponds to the - +# HARNESS test configuration for a particular - +# machine. - +# - +# HUT_PATH_TO_SSPACE: - +# The path to the scratch space directory - +# where the unit machine specfifc unit tests - +# are run. - +# - +# HUT_RTE_ENV_FILE: - +# A Lua module file, lmod, that when set ups the - +# Harness unit test runtime environment. - +# The lmod filename is formed by the combining the - +# HUT_CONFIG_TAG and HUT_RTE_ENV_FILE - +# variables as follows: - +# "${HUT_MACHINE_NAME}-${HUT_CONFIG_TAG}.environment.sh" - +# - +# HUT_SCHED_ACCT_ID: - +# The account to be used with the job resource - +# manager and scheduler. - +# - +#----------------------------------------------------- +variables: + GIT_STRATEGY : fetch + HUT_MACHINE_NAME : 'GenericMachine' + HUT_CONFIG_TAG: 'GenericConfigTag' + HUT_RTE_ENV_FILE: "${HUT_MACHINE_NAME}-${HUT_CONFIG_TAG}.environment.sh" + HUT_PATH_TO_SSPACE: 'NOT_SET' + HUT_SCHED_ACCT_ID: 'NOT_SET' + +#----------------------------------------------------- +# This hidden job defines a template for a set of - +# command commands to setup the Harness unit tests - +# rutime environment. - +# - +#----------------------------------------------------- +.core_tests_template: + timeout : 15 minutes + script : + - bash # Change to the bash shell. + - export OLCF_HARNESS_DIR=${CI_PROJECT_DIR} # Set the HARNESS top level directory. + - export HUT_MACHINE_NAME + - export HUT_CONFIG_TAG + - export HUT_RTE_ENV_FILE + - module --ignore-cache use ${OLCF_HARNESS_DIR}/modulefiles + - module load olcf_harness + - ci_testing_utilities/bin/run_generic_unit_tests.py + - ci_testing_utilities/bin/run_machine_specific_unit_tests.py --machine ${HUT_MACHINE_NAME} --harness-config-tag ${HUT_CONFIG_TAG} + +#----------------------------------------------------- +# This section defines jobs for machine Ascent. - +# - +# Machine: Ascent (OLCF training cluster) - +# Description: - +# LSB Version: core-4.1-noarch:core-4.1-ppc64le- +# Distributor ID: RedHatEnterpriseServer - +# Description: Red Hat Enterprise Linux - +# Server release 7.6 (Maipo) - +# Release: 7.6 - +# Codename: Maipo - +# - +# Architecture: ppc64le - +# Byte Order: Little Endian - +# CPU(s): 128 - +# On-line CPU(s) list: 0-127 - +# Thread(s) per core: 4 - +# Core(s) per socket: 16 - +# Socket(s): 2 - +# NUMA node(s): 6 - +# Model: 2.1 (pvr 004e 1201) - +# Model name: POWER9, altivec supported - +# - +# CUDA Version: 10.1 - +# Driver Version: 418.40.04 - +# 3 GPUs - +# Product Name: Tesla V100-SXM2-16GB - +#----------------------------------------------------- + +# This job performs basic tests on machine Ascent +.Ascent_Basic_Tests : + extends : .core_tests_template + variables : + HUT_MACHINE_NAME : 'Ascent' + HUT_CONFIG_TAG : 'olcf5_acceptance' + HUT_SCHED_ACCT_ID: 'stf006' + tags : + - olcf_harness_unit_tests + +#----------------------------------------------------- +# This section defines jobs for machine Lyra. - +# - +# Machine: Lyra - +# Description: - +# Intentionally left blank - +# - +# This runners runs under user arnoldt on Lyra. - +# Hence we must set the scratch space to a directory - +# user arnoldt can write to, and the account is to - +# stf006. - +#----------------------------------------------------- +Lyra_Basic_Tests : + extends : .core_tests_template + variables : + HUT_MACHINE_NAME : 'lyra' + HUT_CONFIG_TAG : 'olcf5_acceptance' + HUT_PATH_TO_SSPACE : '/ccs/home/arnoldt/scratch_hut' + HUT_SCHED_ACCT_ID: 'stf006' + tags : + - Lyra_OLCF_Harness + diff --git a/SoftwareLicense.txt b/LICENSE similarity index 97% rename from SoftwareLicense.txt rename to LICENSE index 4b8568d..4627ed4 100755 --- a/SoftwareLicense.txt +++ b/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2016 UT-Battelle, LLC +Copyright (c) 2021 UT-Battelle, LLC All rights reserved. diff --git a/ci_testing_utilities/__init__.py b/ci_testing_utilities/__init__.py new file mode 100644 index 0000000..3cab468 --- /dev/null +++ b/ci_testing_utilities/__init__.py @@ -0,0 +1,4 @@ +__all__ = [ + "harness_unit_tests", + "Ascent" + ] diff --git a/ci_testing_utilities/bin/run_generic_unit_tests.py b/ci_testing_utilities/bin/run_generic_unit_tests.py new file mode 100755 index 0000000..688896b --- /dev/null +++ b/ci_testing_utilities/bin/run_generic_unit_tests.py @@ -0,0 +1,93 @@ +#! /usr/bin/env python3 +## @package run_basic_unit_tests +# This module contains the main function to run the Harness basic unit tests. + +# System imports +import sys +import string +import argparse # Needed for parsing command line arguments. +import logging # Needed for logging events. +import shlex,subprocess # Needed for launching command line jobs +from collections import OrderedDict + +# Local imports +import harness_unit_tests.test_runtests +from harness_unit_tests.harness_unittests_logging import create_logger_description +from harness_unit_tests.harness_unittests_logging import create_logger + +## @fn parse_arguments( ) +## @brief Parses the command line arguments. +## +## @details Parses the command line arguments and +## returns A namespace. +## +## @return A namespace. The namespace contains attributes +## that are the command line arguments. +def parse_arguments(): + + # Create a string of the description of the + # program + program_description = "Your program description" + + # Create an argument parser. + my_parser = argparse.ArgumentParser( + description=program_description, + formatter_class=argparse.RawTextHelpFormatter, + add_help=True) + + # Add an optional argument for the logging level. + my_parser.add_argument("--log-level", + type=int, + default=logging.WARNING, + help=create_logger_description() ) + + my_args = my_parser.parse_args() + + return my_args + + + +def _do_generic_tests(): + my_unittests = OrderedDict() + my_unittests_return_code = OrderedDict() + + # Add test for runtests.py module. + my_unittests["runtests.py"] = "python3 -m unittest -v harness_unit_tests.test_runtests" + my_unittests_return_code["runtests.py"] = 0 + + for module_name,test_command_line in my_unittests.items(): + args = shlex.split(test_command_line) + my_test_process = subprocess.run(args) + my_unittests_return_code[module_name] = my_test_process.returncode + return my_unittests_return_code + +## @fn main () +## @brief The main function. +def main(): + args = parse_arguments() + + logger = create_logger(log_id='generic_unit_tests.log', + log_level=args.log_level) + + logger.info("Start of main program") + + retcode = 0 + retcode = _do_generic_tests() + + logger.info("End of main program") + + return retcode + +if __name__ == "__main__": + my_unittests_return_code = main() + + if my_unittests_return_code: + nm_failed_tests = 0 + for module_name,test_return_code in my_unittests_return_code.items(): + if test_return_code != 0: + nm_failed_tests += 1 + retcode = 0 if nm_failed_tests == 0 else 1 + else: + retcode = 1 + + sys.exit(retcode) diff --git a/ci_testing_utilities/bin/run_machine_specific_unit_tests.py b/ci_testing_utilities/bin/run_machine_specific_unit_tests.py new file mode 100755 index 0000000..91d254f --- /dev/null +++ b/ci_testing_utilities/bin/run_machine_specific_unit_tests.py @@ -0,0 +1,116 @@ +#! /usr/bin/env python3 +## @package run_basic_unit_tests +# This module contains the main function to run the Harness basic unit tests. + +# System imports +import os +import sys +import string +import argparse # Needed for parsing command line arguments. +import logging # Needed for logging events. +import shlex,subprocess # Needed for launching command line jobs +from collections import OrderedDict + +# Local imports +from harness_unit_tests import harness_unittests_exceptions +from harness_unit_tests.harness_unittests_logging import create_logger_description +from harness_unit_tests.harness_unittests_logging import create_logger + +## @fn parse_arguments( ) +## @brief Parses the command line arguments. +## +## @details Parses the command line arguments and +## returns A namespace. +## +## @return A namespace. The namespace contains attributes +## that are the command line arguments. +def parse_arguments(): + + # Create a string of the description of the + # program + program_description = "Your program description" + + # Create an argument parser. + my_parser = argparse.ArgumentParser( + description=program_description, + formatter_class=argparse.RawTextHelpFormatter, + add_help=True) + + # Add an optional argument for the logging level. + my_parser.add_argument("--log-level", + type=str, + default=logging.WARNING, + help=create_logger_description() ) + + my_parser.add_argument("--machine", + type=str, + help="The name of the machine", + required=True) + + my_parser.add_argument("--harness-config-tag", + type=str, + help="The harness configig tag", + required=True) + + my_args = my_parser.parse_args() + + return my_args + +def _do_machine_specific_tests(): + my_machine_name = os.getenv('HUT_MACHINE_NAME') + if my_machine_name == None: + message = "The environmental variable HUT_MACHINE_NAME is not set." + raise harness_unittests_exceptions.EnvironmentalVariableNotSet('HUT_MACHINE_NAME',message) + + my_config_tag = os.getenv('HUT_CONFIG_TAG') + if my_config_tag == None: + message = "The environmental variable HUT_CONFIG_TAG is not set." + raise harness_unittests_exceptions.EnvironmentalVariableNotSet('HUT_MACHINE_NAME',message) + + my_unittests = OrderedDict() + my_unittests_return_code = OrderedDict() + + # Add test for machine specific tests. + my_unittests["test_machine_specific_tests.py"] = "python3 -m unittest -v harness_unit_tests.test_machine_specific_tests" + my_unittests_return_code["test_machine_specific_tests.py"] = 0 + + for module_name,test_command_line in my_unittests.items(): + args = shlex.split(test_command_line) + my_test_process = subprocess.run(args) + my_unittests_return_code[module_name] = my_test_process.returncode + + return my_unittests_return_code + +## @fn main () +## @brief The main function. +def main(): + args = parse_arguments() + + logger = create_logger(log_id='machine_unit_tests.log', + log_level=args.log_level) + + logger.info("Start of main program") + + retcode = None + try: + retcode = _do_machine_specific_tests() + except harness_unittests_exceptions.EnvironmentalVariableNotSet as err: + print(err.message) + + logger.info("End of main program") + + return retcode + +if __name__ == "__main__": + my_unittests_return_code = main() + + if my_unittests_return_code: + nm_failed_tests = 0 + for module_name,test_return_code in my_unittests_return_code.items(): + if test_return_code != 0: + nm_failed_tests += 1 + retcode = 0 if nm_failed_tests == 0 else 1 + else: + retcode = 1 + + sys.exit(retcode) diff --git a/ci_testing_utilities/bin/test_driver_program.sh b/ci_testing_utilities/bin/test_driver_program.sh new file mode 100755 index 0000000..2b10cc7 --- /dev/null +++ b/ci_testing_utilities/bin/test_driver_program.sh @@ -0,0 +1,221 @@ +# +# +# +# + +# /usr/bin/env bash + +#----------------------------------------------------- +# This program runs the harness unit tests. - +# - +# Prerequisites: - +# The olcf harness test harness must be properly - +# initialized or this program will fail - +# with fail in an indeterminate manner. - +# - +#----------------------------------------------------- + +#----------------------------------------------------- +# Function: - +# declare_global_variabes - +# - +# Synopsis: - +# Declares global variables used in this bash - +# script. - +# - +# Positional parameters: - +# - +#----------------------------------------------------- +function declare_global_variabes () { + # Define some global variables. + declare -gr PROGNAME=$(basename ${0}) + + # Boolean variable for flagging if to perform + # generic harness unit tests. + declare -g do_generic_tests=false + + # Boolean variable for flagging if to perform + # machine specific harness unit tests. + declare -g do_machine_specific_tests=false +} + +#----------------------------------------------------- +# Function: - +# error_exit - +# - +# Synopsis: - +# An error handling function. - +# - +# Positional parameters: - +# ${1} A string containing descriptive error - +# message - +# - +#----------------------------------------------------- +function error_exit { + echo "${PROGNAME}: ${1:-"Unknown Error"}" 1>&2 + exit 1 +} + +#----------------------------------------------------- +# function usage() : - +# - +# Synopsis: - +# Prints the usage of this bash script. - +# - +#----------------------------------------------------- +function usage () { + column_width=50 + let separator_line_with=2*column_width+1 + help_frmt1="%-${column_width}s %-${column_width}s\n" + printf "Description:\n" + printf "\tThis program performs the harness unit tests.\n" + printf "\tThere are two types of tests:\n\n" + printf "\t\t- Tests that are independent of the machine you are on\n" + printf "\t\t- Tests that are dependent on the machine you are on\n" + printf "\n" + printf "\tTo run this program the OLCF harness runtime environment must be properly initialized.\n" + printf "\tNote that the environment variables :\n\n" + printf "\t\t - HUT_CONFIG_TAG\n" + printf "\t\t - HUT_MACHINE_NAME\n" + printf "\t\t - HUT_PATH_TO_SSPACE\n" + printf "\t\t - HUT_SCHED_ACCT_ID\n\n" + printf "\tmust be defined.\n" + + printf "\n" + printf "Usage:\n" + printf "\tharness_test_driver.sh [ -h | --help ] [--generic-tests] [ --machine-specific-tests ] \n\n" + printf "${help_frmt1}" "Option" "Description" + for ((ip=0; ip < ${separator_line_with}; ip++));do + printf "%s" "-" + done + printf "\n" + printf "${help_frmt1}" "-h | --help" "Prints the help message" + printf "${help_frmt1}" "--generic-tests" "Performs generic harness unit tests." + printf "${help_frmt1}" "--machine-specific-tests" "Performs machine specific harness unit tests." +} + + +#----------------------------------------------------- +# Function: - +# check_hut_variables - +# - +# Synopsis: - +# Checks that the HUT environmental variables are - +# set. - +# - +# Positional parameters: - +# - +#----------------------------------------------------- +function check_hut_variables { + if [ -z ${HUT_CONFIG_TAG} ];then + error_exit "The environmental variable HUT_CONFIG_TAG is not set." + fi + + if [ -z ${HUT_MACHINE_NAME} ];then + error_exit "The environmental variable HUT_MACHINE_NAME is not set." + fi + + if [ -z ${HUT_PATH_TO_SSPACE} ];then + error_exit "The environmental variable HUT_PATH_TO_SSPACE is not set." + fi + + # Make sure HUT_PATH_TO_SSPACE is not the users ${HOME} directory. + if [[ ${HUT_PATH_TO_SSPACE} -ef ${HOME} ]];then + error_message="The environmental variable HUT_PATH_TO_SSPACE points to your home directory: ${HOME}.\n" + error_message+="Change HUT_PATH_TO_SSPACE to point to another directory so as to not inadvertly erase any important files.\n" + error_exit "${error_message}" + fi + + if [ -z ${HUT_SCHED_ACCT_ID} ];then + printf "The environmental variable HUT_SCHED_ACCT_ID is not set." + exit 1 + fi +} + +#----------------------------------------------------- +# Process the arguments to this bash script. - +# - +#----------------------------------------------------- +function parse_command_line { + eval set -- $@ + while true;do + case ${1} in + -h | --help) + usage + shift + exit 0;; + + --generic-tests ) + do_generic_tests=true + shift;; + + --machine-specific-tests ) + do_machine_specific_tests=true + shift;; + + -- ) + shift + break;; + + * ) + echo "Internal parsing error!" + usage + exit 1;; + esac + done +} + + +#----------------------------------------------------- +# - +# Start of main body of bash script. - +# - +#----------------------------------------------------- +function main () { + declare_global_variabes + + # Validate and parse the command line options. + long_options=help,generic-tests,machine-specific-tests + short_options=h + OPTS=$(getopt --options ${short_options} --long ${long_options} --name "${PROGNAME}" -- "$@") + if [ $? != 0 ]; then + error_exit "The function get_opt failed ... exiting" + fi + + parse_command_line ${OPTS} + if [ $? != 0 ]; then + error_exit "The function parse_command_line failed ... exiting." + fi + + if [[ ${do_generic_tests} ]];then + echo "Performing generic tests.\n" + run_generic_unit_tests.py + fi + + if [[ ${do_machine_specific_tests} ]];then + # Verify that the HUT environmental variables are set. + check_hut_variables + if [ $? != 0 ];then + error_exit "The function check_hut_variables failed ... exiting." + fi + + # Remove the prior scratch space. + if [[ -d "${HUT_PATH_TO_SSPACE}" ]];then + rm -rf ${HUT_PATH_TO_SSPACE}/* + fi + + # Remove all prior test runs. + rm -rf MPI_HelloWorld_*/ + rm -f main.log + + echo "Performing machine specific tests." + run_machine_specific_unit_tests.py --machine ${HUT_MACHINE_NAME} --harness-config-tag $HUT_CONFIG_TAG + fi +} +#----------------------------------------------------- +# - +# End of main body of bash script. - +# - +#----------------------------------------------------- + +main $@ diff --git a/ci_testing_utilities/harness_unit_tests/Ascent/__init__.py b/ci_testing_utilities/harness_unit_tests/Ascent/__init__.py new file mode 100644 index 0000000..44395b8 --- /dev/null +++ b/ci_testing_utilities/harness_unit_tests/Ascent/__init__.py @@ -0,0 +1,4 @@ +__all__ = [ + "test_olcf5_acceptance", + ] + diff --git a/ci_testing_utilities/harness_unit_tests/Ascent/test_olcf5_acceptance.py b/ci_testing_utilities/harness_unit_tests/Ascent/test_olcf5_acceptance.py new file mode 100644 index 0000000..1c466d0 --- /dev/null +++ b/ci_testing_utilities/harness_unit_tests/Ascent/test_olcf5_acceptance.py @@ -0,0 +1,25 @@ +#! /usr/bin/env python3 +import unittest + +class Test_configFile(unittest.TestCase): + """ Tests for main program runtests.py """ + + def setUp(self): + """ Stud documentation of HelloWorld test on Ascent.""" + return + + def test_machine_name(self): + message="The machine name for Ascent in the config is not correct." + self.assertEqual("Ascent","NotAscent",msg=message) + return + + def tearDown(self): + """ Stud doc for tear down """ + return + + + + +if __name__ == "__main__": + unittest.main() + diff --git a/test/src/Repository_Tests/__init__.py b/ci_testing_utilities/harness_unit_tests/Repository_Tests/__init__.py similarity index 100% rename from test/src/Repository_Tests/__init__.py rename to ci_testing_utilities/harness_unit_tests/Repository_Tests/__init__.py diff --git a/test/src/Repository_Tests/git_test_incremental_repository.py b/ci_testing_utilities/harness_unit_tests/Repository_Tests/git_test_incremental_repository.py similarity index 100% rename from test/src/Repository_Tests/git_test_incremental_repository.py rename to ci_testing_utilities/harness_unit_tests/Repository_Tests/git_test_incremental_repository.py diff --git a/test/src/Repository_Tests/git_test_repository.py b/ci_testing_utilities/harness_unit_tests/Repository_Tests/git_test_repository.py similarity index 100% rename from test/src/Repository_Tests/git_test_repository.py rename to ci_testing_utilities/harness_unit_tests/Repository_Tests/git_test_repository.py diff --git a/test/src/Repository_Tests/repository_tests_utility_functions.py b/ci_testing_utilities/harness_unit_tests/Repository_Tests/repository_tests_utility_functions.py similarity index 100% rename from test/src/Repository_Tests/repository_tests_utility_functions.py rename to ci_testing_utilities/harness_unit_tests/Repository_Tests/repository_tests_utility_functions.py diff --git a/test/src/Repository_Tests/svn_test_incremental_repository.py b/ci_testing_utilities/harness_unit_tests/Repository_Tests/svn_test_incremental_repository.py similarity index 100% rename from test/src/Repository_Tests/svn_test_incremental_repository.py rename to ci_testing_utilities/harness_unit_tests/Repository_Tests/svn_test_incremental_repository.py diff --git a/test/src/Repository_Tests/svn_test_repository.py b/ci_testing_utilities/harness_unit_tests/Repository_Tests/svn_test_repository.py similarity index 100% rename from test/src/Repository_Tests/svn_test_repository.py rename to ci_testing_utilities/harness_unit_tests/Repository_Tests/svn_test_repository.py diff --git a/ci_testing_utilities/harness_unit_tests/__init__.py b/ci_testing_utilities/harness_unit_tests/__init__.py new file mode 100644 index 0000000..3c76493 --- /dev/null +++ b/ci_testing_utilities/harness_unit_tests/__init__.py @@ -0,0 +1,8 @@ +__all__ = [ + "harness_unittests_exceptions", + "harness_unittests_logging", + "test_runtests", + "test_machine_specific_tests", + "Ascent" + ] + diff --git a/ci_testing_utilities/harness_unit_tests/harness_unittests_exceptions.py b/ci_testing_utilities/harness_unit_tests/harness_unittests_exceptions.py new file mode 100644 index 0000000..9cf4020 --- /dev/null +++ b/ci_testing_utilities/harness_unit_tests/harness_unittests_exceptions.py @@ -0,0 +1,73 @@ +#! /usr/bin/env python3 +## @package harness_unittests_exceptions +# This module conatins the exceptions that are raised for the Harness unit tests. +# + +# System imports +import string +import argparse # Needed for parsing command line arguments. +import logging # Needed for logging events. + +# Local imports +from harness_unit_tests.harness_unittests_logging import create_logger_description +from harness_unit_tests.harness_unittests_logging import create_logger +class Error(Exception): + """Base class for exceptions in this module.""" + pass + +class EnvironmentalVariableNotSet(Error): + """Exception raised for environment variables not set + + Attributes: + expression -- The environmnetal not set. + message -- explanation of the error + """ + + def __init__(self, expression, message): + self.expression = expression + self.message = message + +## @fn _parse_arguments( ) +## @brief Parses the command line arguments. +## +## @details Parses the command line arguments and +## returns A namespace. +## +## @return A namespace. The namespace contains attributes +## that are the command line arguments. +def _parse_arguments(): + + # Create a string of the description of the + # program + program_description = "Your program description" + + # Create an argument parser. + my_parser = argparse.ArgumentParser( + description=program_description, + formatter_class=argparse.RawTextHelpFormatter, + add_help=True) + + # Add an optional argument for the logging level. + my_parser.add_argument("--log-level", + type=int, + default=logging.WARNING, + help=create_logger_description() ) + + my_args = my_parser.parse_args() + + return my_args + +## @fn main () +## @brief The main function. +def main(): + args = _parse_arguments() + + logger = create_logger(log_id='hut_exceptions.log', + log_level=args.log_level) + + logger.info("Start of main program") + + logger.info("End of main program") + +if __name__ == "main": + main() diff --git a/ci_testing_utilities/harness_unit_tests/harness_unittests_logging.py b/ci_testing_utilities/harness_unit_tests/harness_unittests_logging.py new file mode 100644 index 0000000..5dd7f69 --- /dev/null +++ b/ci_testing_utilities/harness_unit_tests/harness_unittests_logging.py @@ -0,0 +1,76 @@ +#! /usr/bin/env python3 +## @package harness_unittests_logging +# Provides utility functions for logging. +# + +# System imports +import string +import argparse # Needed for parsing command line arguments. +import logging # Needed for logging events. + +# Local imports + +## @fn create_logger_description( ) +## @brief Returns a string whose contents are the log level option help description. +## +## @return A string +def create_logger_description(): + frmt_header = "{0:10s} {1:40.40s} {2:5s}\n" + frmt_items = frmt_header + header1 = frmt_header.format("Level", "Description", "Option Value" ) + header1_len = len(header1) + log_option_desc = "The logging level. The standard levels are the following:\n\n" + log_option_desc += header1 + log_option_desc += "-"*header1_len + "\n" + log_option_desc += frmt_items.format("NOTSET", "All messages will be processed", "0" ) + log_option_desc += frmt_items.format("", "processed", " \n" ) + log_option_desc += frmt_items.format("DEBUG", "Detailed information, typically of ", "10" ) + log_option_desc += frmt_items.format("", "interest only when diagnosing problems.", "\n" ) + log_option_desc += frmt_items.format("INFO", "Confirmation that things", "20" ) + log_option_desc += frmt_items.format("", "are working as expected.", " \n" ) + log_option_desc += frmt_items.format("WARNING ", "An indication that something unexpected , ", "30" ) + log_option_desc += frmt_items.format("", "happened or indicative of some problem", "" ) + log_option_desc += frmt_items.format("", "in the near future.", "\n" ) + log_option_desc += frmt_items.format("ERROR ", "Due to a more serious problem ", "40" ) + log_option_desc += frmt_items.format("", "the software has not been able ", "" ) + log_option_desc += frmt_items.format("", "to perform some function. ", "\n" ) + log_option_desc += frmt_items.format("CRITICAL ", "A serious error, indicating ", "50" ) + log_option_desc += frmt_items.format("", "that the program itself may be unable", "" ) + log_option_desc += frmt_items.format("", "to continue running.", "\n" ) + return log_option_desc + +## @brief Creates and returns a logger object. +## +## @details Creates a logger object with name log_id and returns it. +## log level log_level. +## +## @param log_id A string +## @param log_level A logging level (e.g. logging.DEBUG, logging.INFO, etc.) +## @retval logger A logger object - see logging python documentation +def create_logger(log_id, log_level): + logger = logging.getLogger(log_id) + logger.setLevel(log_level) + + # create console handler and set level to debug + ch = logging.StreamHandler() + ch.setLevel(log_level) + + # create formatter + formatter = logging.Formatter( + '%(asctime)s - %(name)s - %(levelname)s - %(message)s') + + # add formatter to ch + ch.setFormatter(formatter) + + # add ch to logger + logger.addHandler(ch) + + return logger + +## @fn main () +## @brief The main function. +def main(): + return + +if __name__ == "main": + main() diff --git a/test/src/test_concurrency.py b/ci_testing_utilities/harness_unit_tests/test_concurrency.py similarity index 100% rename from test/src/test_concurrency.py rename to ci_testing_utilities/harness_unit_tests/test_concurrency.py diff --git a/ci_testing_utilities/harness_unit_tests/test_machine_specific_tests.py b/ci_testing_utilities/harness_unit_tests/test_machine_specific_tests.py new file mode 100644 index 0000000..89828a5 --- /dev/null +++ b/ci_testing_utilities/harness_unit_tests/test_machine_specific_tests.py @@ -0,0 +1,251 @@ +#! /usr/bin/env python3 + +# Python package imports +import unittest +import os +import re +import shutil + +# My harness package imports +from bin import runtests # This imports the olcf harness module runtests.py + # which is the is the fist module called in running + # the harness. + +from libraries import get_machine_name + +class Test_MPI_Hello_World(unittest.TestCase): + """Run the MPI_Hello World Test""" + + TOP_LEVEL=os.getenv("OLCF_HARNESS_DIR") + + UNIT_TESTS_CWD=os.getcwd() + + MACHINE_NAME=get_machine_name.get_registered_unique_name_based_on_hostname() + + if os.environ['HUT_PATH_TO_SSPACE'] != 'NOT_SET': + SCRATCH_SPACE = os.path.join(os.environ['HUT_PATH_TO_SSPACE'],"MPI_HelloWorld_Scratch_Space") + else: + SCRATCH_SPACE=os.path.join(UNIT_TESTS_CWD,"MPI_HelloWorld_Scratch_Space") + + if os.environ['HUT_SCHED_ACCT_ID'] != 'NOT_SET': + HUT_SCHED_ACCT_ID = os.environ['HUT_SCHED_ACCT_ID'] + else: + HUT_SCHED_ACCT_ID = 'stf006' + + + INPUT_FILES_DIR=os.path.join(UNIT_TESTS_CWD,"MPI_HelloWorld_Input_Files") + APPLICATION_PATH=os.path.join(UNIT_TESTS_CWD,"MPI_HelloWorld_Applications") + + @classmethod + def setUpClass(cls): + # Create a scratch space directory to run in. + os.makedirs(cls.SCRATCH_SPACE) + + # Create a directory to store the input files + os.makedirs(cls.INPUT_FILES_DIR) + + # Create a directory to store the cloned MPI_Hello_World test applications. + os.makedirs(cls.APPLICATION_PATH) + + # Export to the environment 'UNIT_TESTS_CWD'. + os.environ["UNIT_TESTS_CWD"] = cls.UNIT_TESTS_CWD + + @classmethod + def tearDownClass(cls): + # Export to the environment 'UNIT_TESTS_CWD'. + os.unsetenv("UNIT_TESTS_CWD") + + def setUp(self): + print("In test setup.") + return + + def tearDown(self): + print("In test teardown.") + return + + def test_single_node_MPI_Hello_World(self): + """Runs a MPI Hello World on a single node""" + + test_input_files_directory = self.INPUT_FILES_DIR + (harness_input_file,config_ini_filepath) = \ + self._create_input_files_for_test_single_node_MPI_Hello_World(test_input_files_directory) + + # Run the test. + rgt = self._start_harness_job(test_input_files_directory,harness_input_file,config_ini_filepath) + + # Set the maximum wait, in minutes, for jobs to leave the queue. Five minutes should + # be suffcient time for a one node MPI Hello workd job to complete. + time_to_wait = 20 + rgt.wait_for_completion_in_queue(time_to_wait) + + tests_status = rgt.didAllTestsPass() + + if tests_status: + error_message = "No error message." + else: + error_message="MPI Hello World on single node failed." + + self.assertTrue(tests_status,msg=error_message) + + @unittest.skip("Test not fully implemented.") + def test_multiple_nodes(self): + """Runs a MPI Hello World on multiple nodes""" + + # Create the single node input_file. + + tests_status = False + if tests_status: + pass + else: + error_message="MPI Hello World on multiple nodes failed. The test is not fully implemented." + + self.assertTrue(tests_status,msg=error_message) + + def _get_harness_input_file_records(self,harness_template_rgt_input_filepath,re_patterns): + """Returns a list of strings that are the input file records for running the MPI Hello World Test. + + Parameters + ---------- + harness_template_rgt_input_filepath: A string + The absolute file path to the template harness input file. + + re_patterns : A dictionary of with elements of form { compiled regular expression : a string } + + Returns + ------- + new_input_file_records. + A list of strings. + """ + + old_input_file_records = [] + with open(harness_template_rgt_input_filepath,"r") as file_obj : + old_input_file_records = file_obj.readlines() + + new_input_file_records = [] + for a_record in old_input_file_records: + for (regex, replacement_str) in re_patterns.items(): + new_input_file_records.append( regex.sub(replacement_str,a_record) ) + + return new_input_file_records + + + def _write_input_file_to_disk(self,file_records,filepath): + """Writes file records to disk. + + The file records, file_records, are written to disk at directory destination_directory with the + filename file_name. + + Parameters + ---------- + file_records : A list of strings + The file records to be written to disk. + + filepath : A string + The absolute file path to write the file records. + + """ + + # Now write the records to disk. + with open(filepath,"w") as file_obj: + for a_record in file_records: + file_obj.write(a_record) + + def _start_harness_job(self,destination_dir,filename,ini_file): + """Starts the OLCF harness job. + + Parameters + ---------- + destination_dir : A string + The file path where the harness is to be launched. + + filename : A string + The name of the harness input file. + + ini_file : A string + The fully qualified file path to the config ini file. + """ + import subprocess + import shlex + + os.chdir(destination_dir) + my_arg_str = "--configfile {} --inputfile {} --loglevel DEBUG".format(ini_file,filename) + rgt = runtests.runtests(my_arg_str) + os.chdir(self.UNIT_TESTS_CWD) + + return rgt + + def _create_input_files_for_test_single_node_MPI_Hello_World(self,destination_dir): + input_template_filename_suffix="1_node.txt" + + # We compute the absolute template harness input file path. + harness_template_rgt_input_filename = \ + self.MACHINE_NAME + "." + input_template_filename_suffix + src_directory = \ + os.path.join(self.TOP_LEVEL,"ci_testing_utilities","input_files",self.MACHINE_NAME,"MPI_Hello_World") + harness_template_rgt_input_filepath = os.path.join(src_directory,harness_template_rgt_input_filename) + + # Get the records of the harness input file. + re_patterns = { re.compile("__pathtotests__") : self.APPLICATION_PATH } + harness_input_file_records = \ + self._get_harness_input_file_records(harness_template_rgt_input_filepath,re_patterns) + + # We compute the absolute harness input file path. + harness_rgt_input_filename = "{}.{}".format(self.MACHINE_NAME,input_template_filename_suffix) + harness_rgt_input_filepath = os.path.join(destination_dir,harness_rgt_input_filename) + + # Write the harness input file records to "harness_rgt_input_filepath". + self._write_input_file_to_disk(harness_input_file_records,harness_rgt_input_filepath) + + # Create the new config ini file. + new_config_filepath = self._create_new_ini_file(destination_dir) + + return (harness_rgt_input_filename,new_config_filepath ) + + def _create_new_ini_file(self,destination_dir): + """Creates a new config ini file and returns the absolute path to the ini file. + + Parameters + ---------- + destination_dir : str + The directory path to the newly created ini config file. + + Returns + ------- + str + The absolute path to the newly created config ini file. + + """ + import subprocess + import shlex + + # Get the registerd unique file name for this machine. + registered_machine_name = get_machine_name.get_registered_unique_name_based_on_hostname() + + # Form the fully qualified path to the old config ini file. + old_config_file = "{}.ini".format(registered_machine_name) + old_config_filepath = os.path.join(self.TOP_LEVEL,"configs",old_config_file) + + # Form the fully qualified path to the new config ini file. + new_config_file = "{}.unit_tests.ini".format(registered_machine_name) + new_config_filepath = os.path.join(destination_dir,new_config_file) + + # Form the command line arguments for creating a new config ini file. + command_line_args="--keys {section} {key} {value}".format(value=self.SCRATCH_SPACE, + section="TestshotDefaults", + key="path_to_sspace" ) + + command_line_args+=" {section} {key} {value}".format(value=self.HUT_SCHED_ACCT_ID, + section="TestshotDefaults", + key="acct_id") + + command_line_args+=" -i {}".format(old_config_filepath) + command_line_args+=" -o {}".format(new_config_filepath) + + # Form the command to generate the new config ini file. + command1 = "create_alt_config_file.py {args}".format(args=command_line_args) + command2 = shlex.split(command1) + + # Execute the command. + p = subprocess.run(command2) + + return new_config_filepath diff --git a/ci_testing_utilities/harness_unit_tests/test_runtests.py b/ci_testing_utilities/harness_unit_tests/test_runtests.py new file mode 100644 index 0000000..265986e --- /dev/null +++ b/ci_testing_utilities/harness_unit_tests/test_runtests.py @@ -0,0 +1,227 @@ +#! /usr/bin/env python3 + +# Python package imports +import unittest +import argparse +import shlex + +# My harness package imports +from bin import runtests # This imports the olcf harness module runtests.py + # which is the is the fist module called in running + # the harness. + +class Test_command_line_arguments(unittest.TestCase): + """ Tests for main program runtests.py """ + + def setUp(self): + """ Stud documentation of runtests.py """ + return + + def test_mode_option_short(self): + """Tests the short option for the harness mode.""" + + # The error message for a failed command line short option. + frmt_message = ("\n\nError Details\n" + "\tFailure in harness mode short option:\n" + "\tcommand line: {}\n") + + # We loop over each valid harness mode, form the appropiate short + # option command line, and verify that the stored mode is permitted + # and is actually that value. + for task in runtests.PERMITTED_HARNESS_TASKS: + command_line_arguments ="-m {}".format(task) + argv = shlex.split(command_line_arguments) + harness_arguments = runtests.parse_commandline_argv(argv) + error_message=frmt_message.format(command_line_arguments) + self.assertEqual(harness_arguments.runmode[0],task,msg=error_message) + + def test_mode_option_long(self): + """Tests the long option for the harness mode.""" + + # The error message for a failed command line option. + frmt_message = ("\n\nError Details\n" + "\tFailure in harness mode long option:\n" + "\tcommand line: {}\n") + + # We loop over each valid harness mode, form the appropiate long + # option command line, and verify that the stored mode is permitted + # and is actually that value. + for task in runtests.PERMITTED_HARNESS_TASKS: + command_line_arguments ="--mode {}".format(task) + argv = shlex.split(command_line_arguments) + harness_arguments = runtests.parse_commandline_argv(argv) + error_message=frmt_message.format(command_line_arguments) + self.assertEqual(harness_arguments.runmode[0],task,msg=error_message) + + def test_mode_option_short_with_multiple_values(self): + """Tests the short option for the harness mode with multiple values.""" + + # The error message for a failed command line option. + frmt_message = ("\n\nError Details\n" + "\tFailure in harness mode short option:\n" + "\tcommand line: {}\n") + + # Test the short option for multiple harness tasks. + command_line_arguments ="-m " + for task in runtests.PERMITTED_HARNESS_TASKS: + command_line_arguments += "{} ".format(task) + argv = shlex.split(command_line_arguments) + harness_arguments = runtests.parse_commandline_argv(argv) + error_message=frmt_message.format(command_line_arguments) + for ip in range(len(harness_arguments.runmode)): + self.assertEqual(harness_arguments.runmode[ip], + runtests.PERMITTED_HARNESS_TASKS[ip], + msg=error_message) + + def test_mode_option_long_with_multiple_values(self): + """Tests the long option for the harness mode with multiple values.""" + + # The error message for a failed command line option. + frmt_message = ("\n\nError Details\n" + "\tFailure in harness mode long option:\n" + "\tcommand line: {}\n") + + # Test the short option for multiple harness tasks. + command_line_arguments ="--mode " + for task in runtests.PERMITTED_HARNESS_TASKS: + command_line_arguments += "{} ".format(task) + argv = shlex.split(command_line_arguments) + harness_arguments = runtests.parse_commandline_argv(argv) + error_message=frmt_message.format(command_line_arguments) + for ip in range(len(harness_arguments.runmode)): + self.assertEqual(harness_arguments.runmode[ip], + runtests.PERMITTED_HARNESS_TASKS[ip], + msg=error_message) + + def test_mode_option_default_value(self): + """Tests the defult of for mode option values.""" + + # The error message for a failed command line option. + frmt_message = ("\n\nError Details\n" + "\tFailure in harness mode default value:\n" + "\tcommand line: {}\n") + + command_line_arguments = "" + argv = shlex.split(command_line_arguments) + harness_arguments = runtests.parse_commandline_argv(argv) + error_message=frmt_message.format(command_line_arguments) + self.assertEqual(harness_arguments.runmode[0], + runtests.DEFAULT_HARNESS_TASK, + msg=error_message) + + def test_input_file_option_short(self): + """Tests the short option for the input file.""" + + # The error message for a failed command line option. + frmt_message = ("\n\nError Details\n" + "\tFailure in harness input file short option:\n" + "\tcommand line: {}\n") + + # Test the short option for the input file. + filename = "rgt.dummy.input" + command_line_arguments ="-i " + filename + error_message = frmt_message.format(command_line_arguments) + argv = shlex.split(command_line_arguments) + harness_arguments = runtests.parse_commandline_argv(argv) + self.assertEqual(harness_arguments.inputfile,filename,msg=error_message) + + def test_input_file_option_long(self): + """Tests the long option for the input file.""" + + # The error message for a failed command line option. + frmt_message = ("\n\nError Details\n" + "\tFailure in harness input file long option:\n" + "\tcommand line: {}\n") + + # Test the long option for the input file. + filename = "rgt.dummy.input" + command_line_arguments ="--inputfile " + filename + error_message = frmt_message.format(command_line_arguments) + argv = shlex.split(command_line_arguments) + harness_arguments = runtests.parse_commandline_argv(argv) + self.assertEqual(harness_arguments.inputfile,filename,msg=error_message) + + def test_input_file_option_default(self): + """Tests the default option for the input file.""" + + # The error message for a failed command line option. + frmt_message = ("\n\nError Details\n" + "\tFailure in harness input file default option:\n" + "\tcommand line: {}\n") + + command_line_arguments = "" + error_message = frmt_message.format(command_line_arguments) + argv = shlex.split(command_line_arguments) + harness_arguments = runtests.parse_commandline_argv(argv) + self.assertEqual(harness_arguments.inputfile, + runtests.DEFAULT_INPUT_FILE, + msg=error_message) + + def test_configfile_option_long(self): + """Tests the long option for the config file option.""" + + # The error message for a failed command line option. + frmt_message = ("\n\nError Details\n" + "\tFailure in harness config file long option:\n" + "\tcommand line: {}\n") + + config_filename = "DummyMachine.ini" + command_line_arguments = "--configfile {}".format(config_filename) + error_message=frmt_message.format(command_line_arguments) + argv = shlex.split(command_line_arguments) + harness_arguments = runtests.parse_commandline_argv(argv) + self.assertEqual(harness_arguments.configfile,config_filename,msg=error_message) + + def test_loglevel_option(self): + """Tests the log level option.""" + + # The error message for a failed command line option. + frmt_message = ("\n\nError Details\n" + "\tFailure in harness log level option:\n" + "\tcommand line: {}\n") + + for loglevel in runtests.PERMITTED_LOG_LEVELS: + command_line_arguments="--loglevel {}".format(loglevel) + error_message = frmt_message.format(command_line_arguments) + argv = shlex.split(command_line_arguments) + harness_arguments = runtests.parse_commandline_argv(argv) + self.assertEqual(harness_arguments.loglevel,loglevel,msg=error_message) + + def test_loglevel_default_option(self): + """Tests the log level default option.""" + + # The error message for a failed command line option. + frmt_message = ("\n\nError Details\n" + "\tFailure in harness log level default option:\n" + "\tcommand line: {}\n") + + # Test the default option for loglevel + command_line_arguments = "" + error_message = frmt_message.format(command_line_arguments) + argv = shlex.split(command_line_arguments) + harness_arguments = runtests.parse_commandline_argv(argv) + self.assertEqual(harness_arguments.loglevel,runtests.DEFAULT_LOG_LEVEL,msg=error_message) + + def test_loglevel_option_incorrect_value(self): + """Tests for an invalid log level.""" + + # The error message for a failed command line option. + frmt_message = ("\n\nError Details\n" + "\tFailure in harness log level with an invalid level:\n" + "\tcommand line: {}\n") + + # Test an incorrect choice for loglevel. This test will fail. + invalid_loglevel = "INVALID_LOG_LEVEL" + command_line_arguments="--loglevel {}".format(invalid_loglevel) + error_message = frmt_message.format(command_line_arguments) + argv = shlex.split(command_line_arguments) + self.assertRaises(SystemExit,runtests.parse_commandline_argv,argv) + + def tearDown(self): + """ Stud doc for tear down """ + return + +if __name__ == "__main__": + unittest.main() + + diff --git a/test/input_files/Crest/rgt.input b/ci_testing_utilities/input_files/Crest/rgt.input similarity index 100% rename from test/input_files/Crest/rgt.input rename to ci_testing_utilities/input_files/Crest/rgt.input diff --git a/test/input_files/Crest/rgt_environmental_variables.bash.x b/ci_testing_utilities/input_files/Crest/rgt_environmental_variables.bash.x similarity index 100% rename from test/input_files/Crest/rgt_environmental_variables.bash.x rename to ci_testing_utilities/input_files/Crest/rgt_environmental_variables.bash.x diff --git a/test/input_files/Crest/unit_testing/1.0 b/ci_testing_utilities/input_files/Crest/unit_testing/1.0 similarity index 100% rename from test/input_files/Crest/unit_testing/1.0 rename to ci_testing_utilities/input_files/Crest/unit_testing/1.0 diff --git a/test/input_files/Sample_Directory_For_Repository_Testing/ANY_MACHINE/Foo/file_1.txt b/ci_testing_utilities/input_files/Sample_Directory_For_Repository_Testing/ANY_MACHINE/Foo/file_1.txt similarity index 100% rename from test/input_files/Sample_Directory_For_Repository_Testing/ANY_MACHINE/Foo/file_1.txt rename to ci_testing_utilities/input_files/Sample_Directory_For_Repository_Testing/ANY_MACHINE/Foo/file_1.txt diff --git a/test/input_files/Sample_Directory_For_Repository_Testing/ANY_MACHINE/file_0.txt b/ci_testing_utilities/input_files/Sample_Directory_For_Repository_Testing/ANY_MACHINE/file_0.txt similarity index 100% rename from test/input_files/Sample_Directory_For_Repository_Testing/ANY_MACHINE/file_0.txt rename to ci_testing_utilities/input_files/Sample_Directory_For_Repository_Testing/ANY_MACHINE/file_0.txt diff --git a/test/input_files/Sample_Directory_For_Repository_Testing/trunk/ANY_MACHINE/HelloWorld/Source/Makefile b/ci_testing_utilities/input_files/Sample_Directory_For_Repository_Testing/trunk/ANY_MACHINE/HelloWorld/Source/Makefile similarity index 100% rename from test/input_files/Sample_Directory_For_Repository_Testing/trunk/ANY_MACHINE/HelloWorld/Source/Makefile rename to ci_testing_utilities/input_files/Sample_Directory_For_Repository_Testing/trunk/ANY_MACHINE/HelloWorld/Source/Makefile diff --git a/test/input_files/Sample_Directory_For_Repository_Testing/trunk/ANY_MACHINE/HelloWorld/Source/main.cpp b/ci_testing_utilities/input_files/Sample_Directory_For_Repository_Testing/trunk/ANY_MACHINE/HelloWorld/Source/main.cpp similarity index 100% rename from test/input_files/Sample_Directory_For_Repository_Testing/trunk/ANY_MACHINE/HelloWorld/Source/main.cpp rename to ci_testing_utilities/input_files/Sample_Directory_For_Repository_Testing/trunk/ANY_MACHINE/HelloWorld/Source/main.cpp diff --git a/test/input_files/Sample_Directory_For_Repository_Testing/trunk/ANY_MACHINE/HelloWorld/Test_16cores/Scripts/build_executable.x b/ci_testing_utilities/input_files/Sample_Directory_For_Repository_Testing/trunk/ANY_MACHINE/HelloWorld/Test_16cores/Scripts/build_executable.x similarity index 100% rename from test/input_files/Sample_Directory_For_Repository_Testing/trunk/ANY_MACHINE/HelloWorld/Test_16cores/Scripts/build_executable.x rename to ci_testing_utilities/input_files/Sample_Directory_For_Repository_Testing/trunk/ANY_MACHINE/HelloWorld/Test_16cores/Scripts/build_executable.x diff --git a/test/input_files/Sample_Directory_For_Repository_Testing/trunk/ANY_MACHINE/HelloWorld/Test_16cores/Scripts/check_executable.x b/ci_testing_utilities/input_files/Sample_Directory_For_Repository_Testing/trunk/ANY_MACHINE/HelloWorld/Test_16cores/Scripts/check_executable.x similarity index 100% rename from test/input_files/Sample_Directory_For_Repository_Testing/trunk/ANY_MACHINE/HelloWorld/Test_16cores/Scripts/check_executable.x rename to ci_testing_utilities/input_files/Sample_Directory_For_Repository_Testing/trunk/ANY_MACHINE/HelloWorld/Test_16cores/Scripts/check_executable.x diff --git a/test/input_files/Sample_Directory_For_Repository_Testing/trunk/ANY_MACHINE/HelloWorld/Test_16cores/Scripts/pbs.template.x b/ci_testing_utilities/input_files/Sample_Directory_For_Repository_Testing/trunk/ANY_MACHINE/HelloWorld/Test_16cores/Scripts/pbs.template.x similarity index 100% rename from test/input_files/Sample_Directory_For_Repository_Testing/trunk/ANY_MACHINE/HelloWorld/Test_16cores/Scripts/pbs.template.x rename to ci_testing_utilities/input_files/Sample_Directory_For_Repository_Testing/trunk/ANY_MACHINE/HelloWorld/Test_16cores/Scripts/pbs.template.x diff --git a/test/input_files/Sample_Directory_For_Repository_Testing/trunk/ANY_MACHINE/HelloWorld/Test_16cores/Scripts/submit_executable.x b/ci_testing_utilities/input_files/Sample_Directory_For_Repository_Testing/trunk/ANY_MACHINE/HelloWorld/Test_16cores/Scripts/submit_executable.x similarity index 100% rename from test/input_files/Sample_Directory_For_Repository_Testing/trunk/ANY_MACHINE/HelloWorld/Test_16cores/Scripts/submit_executable.x rename to ci_testing_utilities/input_files/Sample_Directory_For_Repository_Testing/trunk/ANY_MACHINE/HelloWorld/Test_16cores/Scripts/submit_executable.x diff --git a/test/input_files/Sample_Directory_For_Repository_Testing/trunk/ANY_MACHINE/HelloWorld/Test_16cores/test_info.txt b/ci_testing_utilities/input_files/Sample_Directory_For_Repository_Testing/trunk/ANY_MACHINE/HelloWorld/Test_16cores/test_info.txt similarity index 100% rename from test/input_files/Sample_Directory_For_Repository_Testing/trunk/ANY_MACHINE/HelloWorld/Test_16cores/test_info.txt rename to ci_testing_utilities/input_files/Sample_Directory_For_Repository_Testing/trunk/ANY_MACHINE/HelloWorld/Test_16cores/test_info.txt diff --git a/test/input_files/Sample_Directory_For_Repository_Testing/trunk/ANY_MACHINE/HelloWorld/Test_32cores/Scripts/build_executable.x b/ci_testing_utilities/input_files/Sample_Directory_For_Repository_Testing/trunk/ANY_MACHINE/HelloWorld/Test_32cores/Scripts/build_executable.x similarity index 100% rename from test/input_files/Sample_Directory_For_Repository_Testing/trunk/ANY_MACHINE/HelloWorld/Test_32cores/Scripts/build_executable.x rename to ci_testing_utilities/input_files/Sample_Directory_For_Repository_Testing/trunk/ANY_MACHINE/HelloWorld/Test_32cores/Scripts/build_executable.x diff --git a/test/input_files/Sample_Directory_For_Repository_Testing/trunk/ANY_MACHINE/HelloWorld/Test_32cores/Scripts/check_executable.x b/ci_testing_utilities/input_files/Sample_Directory_For_Repository_Testing/trunk/ANY_MACHINE/HelloWorld/Test_32cores/Scripts/check_executable.x similarity index 100% rename from test/input_files/Sample_Directory_For_Repository_Testing/trunk/ANY_MACHINE/HelloWorld/Test_32cores/Scripts/check_executable.x rename to ci_testing_utilities/input_files/Sample_Directory_For_Repository_Testing/trunk/ANY_MACHINE/HelloWorld/Test_32cores/Scripts/check_executable.x diff --git a/test/input_files/Sample_Directory_For_Repository_Testing/trunk/ANY_MACHINE/HelloWorld/Test_32cores/Scripts/pbs.template.x b/ci_testing_utilities/input_files/Sample_Directory_For_Repository_Testing/trunk/ANY_MACHINE/HelloWorld/Test_32cores/Scripts/pbs.template.x similarity index 100% rename from test/input_files/Sample_Directory_For_Repository_Testing/trunk/ANY_MACHINE/HelloWorld/Test_32cores/Scripts/pbs.template.x rename to ci_testing_utilities/input_files/Sample_Directory_For_Repository_Testing/trunk/ANY_MACHINE/HelloWorld/Test_32cores/Scripts/pbs.template.x diff --git a/test/input_files/Sample_Directory_For_Repository_Testing/trunk/ANY_MACHINE/HelloWorld/Test_32cores/Scripts/submit_executable.x b/ci_testing_utilities/input_files/Sample_Directory_For_Repository_Testing/trunk/ANY_MACHINE/HelloWorld/Test_32cores/Scripts/submit_executable.x similarity index 100% rename from test/input_files/Sample_Directory_For_Repository_Testing/trunk/ANY_MACHINE/HelloWorld/Test_32cores/Scripts/submit_executable.x rename to ci_testing_utilities/input_files/Sample_Directory_For_Repository_Testing/trunk/ANY_MACHINE/HelloWorld/Test_32cores/Scripts/submit_executable.x diff --git a/test/input_files/Sample_Directory_For_Repository_Testing/trunk/ANY_MACHINE/HelloWorld/Test_32cores/test_info.txt b/ci_testing_utilities/input_files/Sample_Directory_For_Repository_Testing/trunk/ANY_MACHINE/HelloWorld/Test_32cores/test_info.txt similarity index 100% rename from test/input_files/Sample_Directory_For_Repository_Testing/trunk/ANY_MACHINE/HelloWorld/Test_32cores/test_info.txt rename to ci_testing_utilities/input_files/Sample_Directory_For_Repository_Testing/trunk/ANY_MACHINE/HelloWorld/Test_32cores/test_info.txt diff --git a/test/input_files/Sample_Directory_For_Repository_Testing/trunk/ANY_MACHINE/HelloWorld/application_info.txt b/ci_testing_utilities/input_files/Sample_Directory_For_Repository_Testing/trunk/ANY_MACHINE/HelloWorld/application_info.txt similarity index 100% rename from test/input_files/Sample_Directory_For_Repository_Testing/trunk/ANY_MACHINE/HelloWorld/application_info.txt rename to ci_testing_utilities/input_files/Sample_Directory_For_Repository_Testing/trunk/ANY_MACHINE/HelloWorld/application_info.txt diff --git a/test/input_files/Summitdev/rgt_environmental_variables.bash.x b/ci_testing_utilities/input_files/Summitdev/rgt_environmental_variables.bash.x similarity index 100% rename from test/input_files/Summitdev/rgt_environmental_variables.bash.x rename to ci_testing_utilities/input_files/Summitdev/rgt_environmental_variables.bash.x diff --git a/test/input_files/Summitdev/unit_testing/1.0 b/ci_testing_utilities/input_files/Summitdev/unit_testing/1.0 similarity index 100% rename from test/input_files/Summitdev/unit_testing/1.0 rename to ci_testing_utilities/input_files/Summitdev/unit_testing/1.0 diff --git a/test/input_files/Titan/rgt.input b/ci_testing_utilities/input_files/Titan/rgt.input similarity index 100% rename from test/input_files/Titan/rgt.input rename to ci_testing_utilities/input_files/Titan/rgt.input diff --git a/test/input_files/Titan/rgt_environmental_variables.bash.x b/ci_testing_utilities/input_files/Titan/rgt_environmental_variables.bash.x similarity index 100% rename from test/input_files/Titan/rgt_environmental_variables.bash.x rename to ci_testing_utilities/input_files/Titan/rgt_environmental_variables.bash.x diff --git a/test/input_files/Titan/unit_testing/1.0 b/ci_testing_utilities/input_files/Titan/unit_testing/1.0 similarity index 100% rename from test/input_files/Titan/unit_testing/1.0 rename to ci_testing_utilities/input_files/Titan/unit_testing/1.0 diff --git a/ci_testing_utilities/input_files/lyra/MPI_Hello_World/lyra.1_node.txt b/ci_testing_utilities/input_files/lyra/MPI_Hello_World/lyra.1_node.txt new file mode 100644 index 0000000..4a2fbfe --- /dev/null +++ b/ci_testing_utilities/input_files/lyra/MPI_Hello_World/lyra.1_node.txt @@ -0,0 +1,16 @@ +################################################################################ +# Set the path to the top level of the application directory. # +################################################################################ + +Path_to_tests = __pathtotests__ + +Test = mpi-hello-world cxx_nodes_1 + + +################################################ +# Harness tasks # +################################################ +Harness_task = check_out_tests +Harness_task = start_tests +Harness_task = stop_tests + diff --git a/ci_testing_utilities/input_files/pircdefense/MPI_Hello_World/pircdefense.1_node.txt b/ci_testing_utilities/input_files/pircdefense/MPI_Hello_World/pircdefense.1_node.txt new file mode 100644 index 0000000..4a2fbfe --- /dev/null +++ b/ci_testing_utilities/input_files/pircdefense/MPI_Hello_World/pircdefense.1_node.txt @@ -0,0 +1,16 @@ +################################################################################ +# Set the path to the top level of the application directory. # +################################################################################ + +Path_to_tests = __pathtotests__ + +Test = mpi-hello-world cxx_nodes_1 + + +################################################ +# Harness tasks # +################################################ +Harness_task = check_out_tests +Harness_task = start_tests +Harness_task = stop_tests + diff --git a/ci_testing_utilities/input_files/rhea/MPI_Hello_World/rhea.1_node.txt b/ci_testing_utilities/input_files/rhea/MPI_Hello_World/rhea.1_node.txt new file mode 100644 index 0000000..4a2fbfe --- /dev/null +++ b/ci_testing_utilities/input_files/rhea/MPI_Hello_World/rhea.1_node.txt @@ -0,0 +1,16 @@ +################################################################################ +# Set the path to the top level of the application directory. # +################################################################################ + +Path_to_tests = __pathtotests__ + +Test = mpi-hello-world cxx_nodes_1 + + +################################################ +# Harness tasks # +################################################ +Harness_task = check_out_tests +Harness_task = start_tests +Harness_task = stop_tests + diff --git a/ci_testing_utilities/input_files/summit/MPI_Hello_World/summit.1_node.txt b/ci_testing_utilities/input_files/summit/MPI_Hello_World/summit.1_node.txt new file mode 100644 index 0000000..4a2fbfe --- /dev/null +++ b/ci_testing_utilities/input_files/summit/MPI_Hello_World/summit.1_node.txt @@ -0,0 +1,16 @@ +################################################################################ +# Set the path to the top level of the application directory. # +################################################################################ + +Path_to_tests = __pathtotests__ + +Test = mpi-hello-world cxx_nodes_1 + + +################################################ +# Harness tasks # +################################################ +Harness_task = check_out_tests +Harness_task = start_tests +Harness_task = stop_tests + diff --git a/ci_testing_utilities/runtime_environment/Ascent-olcf5_acceptance.unit_tests.lua b/ci_testing_utilities/runtime_environment/Ascent-olcf5_acceptance.unit_tests.lua new file mode 100644 index 0000000..ea032e1 --- /dev/null +++ b/ci_testing_utilities/runtime_environment/Ascent-olcf5_acceptance.unit_tests.lua @@ -0,0 +1,3 @@ +-- -*- lua -*- +whatis([[Name : OLCH Harness Unit Tests]]) + diff --git a/ci_testing_utilities/runtime_environment/GenericMachine-GenericConfigTag.unit_tests.lua b/ci_testing_utilities/runtime_environment/GenericMachine-GenericConfigTag.unit_tests.lua new file mode 100644 index 0000000..9452f60 --- /dev/null +++ b/ci_testing_utilities/runtime_environment/GenericMachine-GenericConfigTag.unit_tests.lua @@ -0,0 +1,56 @@ +-- -*- lua -*- + +-- Sourcing this file sets up the ci_testing_environment +-- for running non machine specific tests. The machine +-- specific tests runtime environment is set up if +-- the environment variables +-- HUT_MACHINE_NAME +-- HUT_CONFIG_TAG +-- are defined - the module file ${HUT_MACHINE_NAME}-${HUT_CONFIG_TAG} +-- is loaded. + +-- @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ +-- Status messages for user. +-- @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ +local load_message = "Loading modulefile " .. myModuleFullName() .. " ..." +local unload_message = "Unloading modulefile " .. myModuleFullName() .. " ..." + +if ( mode() == "load" ) then + LmodMessage(load_message) +end + +if ( mode() == "unload") then + LmodMessage(unload_message) +end + +-- Set the top level directory for the CI testing directory. +local ci_testing_toplevel = pathJoin(os.getenv("OLCF_HARNESS_DIR"),'ci_testing_utilities') + +-- Set the path to the CI bin directory. +local ci_testing_bin_dir = pathJoin(ci_testing_toplevel,'bin') + +-- Add to the PYTHONPATH the CI top level directory. +prepend_path('PYTHONPATH',ci_testing_toplevel) + +-- Add to the PATH varibale the generic harness unit tests. +prepend_path('PATH',ci_testing_bin_dir) + +-- Load machine specific unit tests modules. + +-- Check to see if environmnetal variables HUT_MACHINE_NAME +-- HUT_CONFIG_TAG are defined. If both variables are defined +-- the load the appropriate module file. +if ( os.getenv('HUT_MACHINE_NAME') ~= nil ) and ( os.getenv('HUT_CONFIG_TAG') ~= nil ) then + local _hutmachinename=os.getenv('HUT_MACHINE_NAME') + local _hutconfigtag=os.getenv('HUT_CONFIG_TAG') + local rt_file1 = _hutmachinename .. "-" .. _hutconfigtag .. ".unit_tests.lua" + local rt_file = pathJoin('runtime_environment',rt_file1) + try_load(rt_file) + if not isloaded(rt_file) then + message = "WARNING! Unsucessfully loaded machine spefic unit test module " .. rt_file .. "." + LmodMessage(message) + else + message = "Sucessfully loaded machine spefic unit test module " .. rt_file .. "." + LmodMessage(message) + end +end diff --git a/ci_testing_utilities/runtime_environment/lyra-olcf5_acceptance.unit_tests.lua b/ci_testing_utilities/runtime_environment/lyra-olcf5_acceptance.unit_tests.lua new file mode 100644 index 0000000..8b1ad68 --- /dev/null +++ b/ci_testing_utilities/runtime_environment/lyra-olcf5_acceptance.unit_tests.lua @@ -0,0 +1,15 @@ +-- -*- lua -*- +whatis([[Name : OLCH Harness Unit Tests]]) +load_message = "Loading modulefile " .. myModuleFullName() .. " ..." +unload_message = "Unloading modulefile " .. myModuleFullName() .. " ..." + +-- @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ +-- Status messages for user. +-- @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ +if mode() == "load" then + LmodMessage(load_message) +end + +if mode() == "unload" then + LmodMessage(unload_message) +end diff --git a/ci_testing_utilities/runtime_environment/pircdefense-olcf5_acceptance.unit_tests.lua b/ci_testing_utilities/runtime_environment/pircdefense-olcf5_acceptance.unit_tests.lua new file mode 100644 index 0000000..d2e4774 --- /dev/null +++ b/ci_testing_utilities/runtime_environment/pircdefense-olcf5_acceptance.unit_tests.lua @@ -0,0 +1,15 @@ +-- -*- lua -*- +whatis([[Name : OLCF Harness Unit Tests]]) +load_message = "Loading modulefile " .. myModuleFullName() .. " ..." +unload_message = "Unloading modulefile " .. myModuleFullName() .. " ..." + +-- @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ +-- Status messages for user. +-- @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ +if mode() == "load" then + LmodMessage(load_message) +end + +if mode() == "unload" then + LmodMessage(unload_message) +end diff --git a/ci_testing_utilities/runtime_environment/rhea-olcf5_acceptance.unit_tests.lua b/ci_testing_utilities/runtime_environment/rhea-olcf5_acceptance.unit_tests.lua new file mode 100644 index 0000000..d2e4774 --- /dev/null +++ b/ci_testing_utilities/runtime_environment/rhea-olcf5_acceptance.unit_tests.lua @@ -0,0 +1,15 @@ +-- -*- lua -*- +whatis([[Name : OLCF Harness Unit Tests]]) +load_message = "Loading modulefile " .. myModuleFullName() .. " ..." +unload_message = "Unloading modulefile " .. myModuleFullName() .. " ..." + +-- @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ +-- Status messages for user. +-- @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ +if mode() == "load" then + LmodMessage(load_message) +end + +if mode() == "unload" then + LmodMessage(unload_message) +end diff --git a/ci_testing_utilities/runtime_environment/summit-olcf5_acceptance.unit_tests.lua b/ci_testing_utilities/runtime_environment/summit-olcf5_acceptance.unit_tests.lua new file mode 100644 index 0000000..d2e4774 --- /dev/null +++ b/ci_testing_utilities/runtime_environment/summit-olcf5_acceptance.unit_tests.lua @@ -0,0 +1,15 @@ +-- -*- lua -*- +whatis([[Name : OLCF Harness Unit Tests]]) +load_message = "Loading modulefile " .. myModuleFullName() .. " ..." +unload_message = "Unloading modulefile " .. myModuleFullName() .. " ..." + +-- @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ +-- Status messages for user. +-- @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ +if mode() == "load" then + LmodMessage(load_message) +end + +if mode() == "unload" then + LmodMessage(unload_message) +end diff --git a/configs/example_machine.ini b/configs/example_machine.ini new file mode 100644 index 0000000..8c0881b --- /dev/null +++ b/configs/example_machine.ini @@ -0,0 +1,24 @@ +[MachineDetails] +machine_name = a_system +machine_type = linux_x86_64 +scheduler_type = slurm +joblauncher_type = srun +cpus_per_node = 16 +gpus_per_node = 1 +submit_queue = batch +submit_args = +nccs_test_harness_module = olcf_harness + +[RepoDetails] +type_of_repository = git +git_reps_branch = master +git_data_transfer_protocol = ssh +git_machine_name = a_system +git_server_application_parent_dir = another_path/another_dir +git_ssh_server_url = gitlab@github.com +git_https_server_url = https://github.com + +[TestshotDefaults] +path_to_sspace = /home/a_user/other_dir +system_log_tag = a_label +acct_id = abc123 diff --git a/configs/lyra.ini b/configs/lyra.ini deleted file mode 100644 index 4ca0ba7..0000000 --- a/configs/lyra.ini +++ /dev/null @@ -1,23 +0,0 @@ -[MachineDetails] -machine_name = lyra -scheduler_type = slurm -joblauncher_type = srun -cpus_per_node = 48 -gpus_per_node = 1 -submit_queue = batch -submit_args = -nccs_test_harness_module = olcf_harness - -[RepoDetails] -type_of_repository = git -git_reps_branch = master -git_data_transfer_protocol = ssh -git_machine_name = lyra -git_server_application_parent_dir = olcf-system-test/applications -git_ssh_server_url = gitlab@gitlab.ccs.ornl.gov -git_https_server_url = https://gitlab.ccs.ornl.gov - -[TestshotDefaults] -path_to_sspace = /ccs/home/vgv/lyra_tshot -system_log_tag = tshot_lyra -acct_id = stf016 diff --git a/configs/master.ini b/configs/master.ini deleted file mode 120000 index 94d2aab..0000000 --- a/configs/master.ini +++ /dev/null @@ -1 +0,0 @@ -lyra.ini \ No newline at end of file diff --git a/configs/rhea.ini b/configs/rhea.ini deleted file mode 100644 index 90001e7..0000000 --- a/configs/rhea.ini +++ /dev/null @@ -1,23 +0,0 @@ -[MachineDetails] -machine_name = rhea -scheduler_type = slurm -joblauncher_type = srun -cpus_per_node = 16 -gpus_per_node = 0 -submit_queue = batch -submit_args = -nccs_test_harness_module = olcf_harness_rhea - -[RepoDetails] -type_of_repository = git -git_reps_branch = master -git_data_transfer_protocol = ssh -git_machine_name = rhea -git_server_application_parent_dir = olcf-system-test/applications -git_ssh_server_url = gitlab@gitlab.ccs.ornl.gov -git_https_server_url = https://gitlab.ccs.ornl.gov - -[TestshotDefaults] -path_to_sspace = /gpfs/alpine/stf006/world-shared/vgv/rhea_tshot_20191204/Scratch -system_log_tag = tshot_rhea -acct_id = stf006accept diff --git a/configs/summit.ini b/configs/summit.ini deleted file mode 100644 index aed2add..0000000 --- a/configs/summit.ini +++ /dev/null @@ -1,23 +0,0 @@ -[MachineDetails] -machine_name = summit -scheduler_type = lsf -joblauncher_type = jsrun -cpus_per_node = 42 -gpus_per_node = 6 -submit_queue = batch -submit_args = -nccs_test_harness_module = olcf_harness_summit - -[RepoDetails] -type_of_repository = git -git_reps_branch = master -git_data_transfer_protocol = ssh -git_machine_name = summit -git_server_application_parent_dir = olcf-system-test/applications -git_ssh_server_url = gitlab@gitlab.ccs.ornl.gov -git_https_server_url = https://gitlab.ccs.ornl.gov - -[TestshotDefaults] -path_to_sspace = /gpfs/alpine/stf006/world-shared/vgv/summit_tshot/Scratch -system_log_tag = tshot_summit -acct_id = stf006accept diff --git a/doc-sphinx/build_documentation.sh b/doc-sphinx/build_documentation.sh index 7d2da06..79aedc1 100755 --- a/doc-sphinx/build_documentation.sh +++ b/doc-sphinx/build_documentation.sh @@ -1,4 +1,9 @@ #!/usr/bin/env bash -sphinx-apidoc -f -o source ../harness_source_documentation +#----------------------------------------------------- +# Build the documentaion using the provided - +# makefile. - +# - +#----------------------------------------------------- +make clean make html diff --git a/doc-sphinx/source/_static/css/theme_overrides.css b/doc-sphinx/source/_static/css/theme_overrides.css new file mode 100644 index 0000000..b951b7d --- /dev/null +++ b/doc-sphinx/source/_static/css/theme_overrides.css @@ -0,0 +1,41 @@ +.wy-nav-content { + max-width: 1200px; +} + +/* Adds whitespace between OLCF logo and Docs Home link */ +body > div > nav > div > div.wy-side-nav-search > a > img { + padding-bottom: 10px; +} + +/* Clicking on the OLCF logo does nothing + (disable RTD theme's default behavior) */ +body > div > nav > div > div.wy-side-nav-search > a{ + pointer-events: none; + cursor: default; +} + +/* Supersede the above block, and allow the Docs Home link to be clickable */ +body > div > nav > div > div.wy-side-nav-search > a > a{ + pointer-events: auto !important; + cursor: pointer !important; + color: grey !important; +} + +/* Don't let the color of the Docs Home link change. */ +body > div > nav > div > div.wy-side-nav-search > a > a:visited{ + color: grey !important; +} + +/* override table width restrictions */ +@media screen and (min-width: 767px) { + + .wy-table-responsive table td { + /* !important prevents the common CSS stylesheets from overriding + this as on RTD they are loaded after this stylesheet */ + white-space: normal !important; + } + + .wy-table-responsive { + overflow: auto !important; + } +} diff --git a/doc-sphinx/source/_static/js/custom.js b/doc-sphinx/source/_static/js/custom.js new file mode 100644 index 0000000..0813831 --- /dev/null +++ b/doc-sphinx/source/_static/js/custom.js @@ -0,0 +1,49 @@ +$( document ).ready(function() { + + // Create link and text for navigation back to the OLCF home page + var olcf_link = document.createElement("a"); + var olcf_text = document.createTextNode("OLCF Home Page"); + olcf_link.appendChild(olcf_text); + olcf_link.setAttribute("href", "https://olcf.ornl.gov"); + + // Open OLCF home page in new tab when clicked + olcf_link.setAttribute("target","_blank"); + + var separator = document.createTextNode(" | "); + + // These items are right-aligned in the RTD theme breadcrumbs + aside = document.querySelector("body > div > section > div > div > div:nth-child(1) > ul > li.wy-breadcrumbs-aside"); + + // Next to the default "Edit on GitHub", add a separator, then the OLCF link. + aside.appendChild(separator); + aside.appendChild(olcf_link); + + // Insert Project Name "OLCF Test Harness Documentation" below html_logo in sidebar navigation +// var project_name_link = document.createElement("a"); +// var project_name_text = document.createTextNode(" OLCF Test Harness Documentation"); +// project_name_link.appendChild(project_name_text); +// project_name_link.setAttribute("href", "https://docs.olcf.ornl.gov"); +// project_name_link.classList.add("icon"); +// project_name_link.classList.add("icon-home"); +// wysidenavsearch = document.querySelector("body > div > nav > div > div.wy-side-nav-search > a"); +// wysidenavsearch.appendChild(project_name_link); + + + // For any external links in the main navigation, append the FontAwesome external link icon. + function iconize_external_links(nav_level){ + a_elements = nav_level.getElementsByTagName("A"); + for (var i = 0; i < a_elements.length; ++i){ + if (a_elements[i].getAttribute("href").includes("http")){ + var icon = document.createElement("i"); + icon.classList.add("fa"); + icon.classList.add("fa-external-link"); + var spacer = document.createTextNode(" "); + a_elements[i].appendChild(spacer); + a_elements[i].appendChild(icon); + } + } + } + + iconize_external_links(document.querySelector("body > div > nav > div > div.wy-menu.wy-menu-vertical")) + +}); diff --git a/doc-sphinx/source/conf.py b/doc-sphinx/source/conf.py index 24125a5..878df80 100644 --- a/doc-sphinx/source/conf.py +++ b/doc-sphinx/source/conf.py @@ -36,6 +36,7 @@ 'sphinx.ext.doctest', 'sphinx.ext.viewcode', 'sphinx.ext.githubpages', + 'sphinx.ext.napoleon' ] # Add any paths that contain templates here, relative to this directory. @@ -57,7 +58,7 @@ # General information about the project. project = 'NCCS_Test_Harness' copyright = '2016' -author = 'Arnold Tharrington, Veronica Vergara, Wayne Joubert, Reuben Budiardja, Mark Berrill' +author = 'Arnold Tharrington, Veronica Vergara, Wayne Joubert, Reuben Budiardja, Mark Berrill, and Mike Brim' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the @@ -65,6 +66,7 @@ # # The short X.Y version. version = '1.0' + # The full version, including alpha/beta/rc tags. release = '1.0' @@ -95,6 +97,13 @@ # 'inherited-members' and 'show-inheritance'. # autodoc_default_flags = ['members','prvate-members'] + +# This value selects if automatically documented members are sorted alphabetical +# (value 'alphabetical'), by member type (value 'groupwise') or by source order +# (value 'bysource'). The default is alphabetical. +# Note that for source order, the module must be a Python module with the source code available. +autodoc_member_order = 'bysource' + # The reST default role (used for this markup: `text`) to use for all # documents. # @@ -107,7 +116,7 @@ # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # -# add_module_names = True +add_module_names = False # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. @@ -135,13 +144,32 @@ # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # -html_theme = 'alabaster' +#html_theme = 'classic' +html_theme = 'sphinx_rtd_theme' + +html_css_files = [ + 'css/theme_overrides.css', +] + +html_js_files = [ + 'js/custom.js', +] # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # # html_theme_options = {} +# see https://sphinx-rtd-theme.readthedocs.io/en/stable/configuring.html +html_theme_options = { +# 'canonical_url': 'https://docs.olcf.ornl.gov', + 'collapse_navigation': False, + 'sticky_navigation': True, + 'navigation_depth': 4, + 'style_external_links': True, + 'style_nav_header_background': '#efefef', + 'logo_only': True, +} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] @@ -159,6 +187,7 @@ # of the sidebar. # # html_logo = None +html_logo = 'images/olcf_logo.png' # The name of an image file (relative to this directory) to use as a favicon of # the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 diff --git a/doc-sphinx/source/developer_guide/intro.rst b/doc-sphinx/source/developer_guide/intro.rst new file mode 100644 index 0000000..d0ff54d --- /dev/null +++ b/doc-sphinx/source/developer_guide/intro.rst @@ -0,0 +1,59 @@ +============ +Introduction +============ + +.. toctree:: + :maxdepth: 1 + +Raison d'etre +============= +The NCCS_Test_Harness, hereafter referred as the Harness, is used for OLCF +machine acceptances, and is generally designed for Linux +or UNIX like operating systems. The Harness goal is to replicate users +development and production environment for machine validation and stress +testing. This is accomplished by repeatedly building applications, submitting these +application's jobs to the job scheduler (PBS, LSF, etc.), and recording the +results of the applications job builds and runs. + +Organizational Structure +======================== +The Harness top-level directory contains the directory *.git* and is hereafter +referred to as *olcf-test-harness*. The Harness organization structure has 4 +parts. The first part is the python files to run the application tests for +acceptance. These files are predominantly located in the directory +*olcf-test-harness/harness*. + +The second part is the runtime configuration files for various OLCF machines. +The files are predominantly located in directories +*olcf-test-harness/modulefiles* and *olcf-test-harness/configs*. + +The third part is the unit tests for CI development. These files are +predominantly located in directory *olcf-test-harness/ci_testing_utilities*. + +The fourth part is the Harness user and developer documentation located in the +directory *olcf-test-harness/doc-sphinx*. :: + + olcf-test-harness + |-- ci_testing_utilities/ + |-- configs/ + |-- doc-sphinx/ + |-- harness/ + `-- modulefiles/ + +Prequisites +=========== +The harness requires Python 3.6 or greater (needs reference), the Lmod module system (needs +reference), Git X.Y or higher, + +* Unix/Linux operation system +* Bash shell +* Sphinx for documentation +* Git for source control +* GitLab CI/CD + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` diff --git a/doc-sphinx/source/developer_guide/modules.rst b/doc-sphinx/source/developer_guide/modules.rst new file mode 100644 index 0000000..7208063 --- /dev/null +++ b/doc-sphinx/source/developer_guide/modules.rst @@ -0,0 +1,7 @@ +harness +======= + +.. toctree:: + :maxdepth: 4 + + harness diff --git a/doc-sphinx/source/developer_guide/modules/create_alt_config_file.rst b/doc-sphinx/source/developer_guide/modules/create_alt_config_file.rst new file mode 100644 index 0000000..a0e2027 --- /dev/null +++ b/doc-sphinx/source/developer_guide/modules/create_alt_config_file.rst @@ -0,0 +1,12 @@ +.. _create_alt_config_file_module: + +create_alt_config_file.py Module Documentation +============================================== +.. We set the default Sphinx domain to python +.. default-domain:: py + +.. module:: create_alt_config_file + :synopsis: This module function is create alternative configurations files. + +.. automodule:: create_alt_config_file + :members: diff --git a/doc-sphinx/source/developer_guide/modules/layout_of_apps_directory.rst b/doc-sphinx/source/developer_guide/modules/layout_of_apps_directory.rst new file mode 100644 index 0000000..95d9eba --- /dev/null +++ b/doc-sphinx/source/developer_guide/modules/layout_of_apps_directory.rst @@ -0,0 +1,12 @@ +.. _layout_of_apps_directory_module: + +layout_of_apps_directory.py Module Documentation +================================================ +.. We set the default Sphinx domain to python +.. default-domain:: py + +.. module:: layout_of_apps_directory + :synopsis: This modules stores the information of the location of key harness application files and directories. + +.. automodule:: layout_of_apps_directory + :members: diff --git a/doc-sphinx/source/modules/repositories/git_repository.rst b/doc-sphinx/source/developer_guide/modules/repositories/git_repository.rst similarity index 95% rename from doc-sphinx/source/modules/repositories/git_repository.rst rename to doc-sphinx/source/developer_guide/modules/repositories/git_repository.rst index ad1d56e..b997824 100644 --- a/doc-sphinx/source/modules/repositories/git_repository.rst +++ b/doc-sphinx/source/developer_guide/modules/repositories/git_repository.rst @@ -17,17 +17,17 @@ git_repository Module Documentation .. py:method:: binaryName - :property: + :property: A property for the name of the git binary. .. py:method:: repository_branch - :property: + :property: A property for the name of the remote repository branch .. py:method:: remote_repository_URL - :property: + :property: A property for the name of the remote repository URL. diff --git a/doc-sphinx/source/developer_guide/modules/runtests.rst b/doc-sphinx/source/developer_guide/modules/runtests.rst new file mode 100644 index 0000000..6c59d4b --- /dev/null +++ b/doc-sphinx/source/developer_guide/modules/runtests.rst @@ -0,0 +1,12 @@ +.. _runtests_module: + +runtests.py Module Documentation +================================ +.. We set the default Sphinx domain to python +.. default-domain:: py + +.. module:: runtests + :synopsis: This module is the entry point for running the harness. + +.. automodule:: runtests + :members: diff --git a/doc-sphinx/source/developer_guide/notational_conventions.rst b/doc-sphinx/source/developer_guide/notational_conventions.rst new file mode 100644 index 0000000..42f942f --- /dev/null +++ b/doc-sphinx/source/developer_guide/notational_conventions.rst @@ -0,0 +1,42 @@ +====================== +Notational Conventions +====================== + +.. toctree:: + :maxdepth: 1 + +We shall adopt the conventions used in the book Learning the bash Shell [NR1998]_: + + - The *italic* font is used for filenames, directories, non-unix shell commands, and shell + functions. + + - The **bold** font is used for shell built-in commands, variables, + options, command lines when they are within regular text, and text to + be typed in by the user within regular text. + + - ``Constant width`` font is used for file content. + +The following text demonstrates some of the conventions. + +We shall create a Bash program called *helloWold.sh*. Create program file with the +following text using the *vi* editor: + +:: + + #!/usr/bin/env bash + + declare -gr message='Hello World' + printf "%s\n" "${message}" + +It will be necessary to set executable permission on *helloWorld.sh* by typing +**chmod +x ./helloWorld.sh** . After setting executable permission, run as follows: + + **./helloWorld.sh** + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` + diff --git a/doc-sphinx/source/packages.rst b/doc-sphinx/source/developer_guide/packages.rst similarity index 74% rename from doc-sphinx/source/packages.rst rename to doc-sphinx/source/developer_guide/packages.rst index 6f5c7c8..5578dbb 100644 --- a/doc-sphinx/source/packages.rst +++ b/doc-sphinx/source/developer_guide/packages.rst @@ -5,4 +5,5 @@ NCCS Harness Python Packages :maxdepth: 1 packages/repositories - + packages/bin + packages/libraries diff --git a/doc-sphinx/source/developer_guide/packages/bin.rst b/doc-sphinx/source/developer_guide/packages/bin.rst new file mode 100644 index 0000000..0d47eb3 --- /dev/null +++ b/doc-sphinx/source/developer_guide/packages/bin.rst @@ -0,0 +1,19 @@ +NCCS Harness bin package +======================== + +.. toctree:: + :maxdepth: 2 + +Intoduction +----------- + +This packages provides binaries to to perform harness tasks. +The major binary is the runtests.py module. The module is the +entry point for running the harness. + + +Modules +------- + +:ref:`runtests_module` +:ref:`create_alt_config_file_module` diff --git a/doc-sphinx/source/packages/repositories.rst b/doc-sphinx/source/developer_guide/packages/repositories.rst similarity index 100% rename from doc-sphinx/source/packages/repositories.rst rename to doc-sphinx/source/developer_guide/packages/repositories.rst diff --git a/doc-sphinx/source/developer_guide/references.rst b/doc-sphinx/source/developer_guide/references.rst new file mode 100644 index 0000000..d0783aa --- /dev/null +++ b/doc-sphinx/source/developer_guide/references.rst @@ -0,0 +1,5 @@ +========== +References +========== + +.. [NR1998] Newham, C., & Rosenblatt, B. (1998). Learning the bash Shell, 3rd edition. Sesbastopol, USA: O'Reilly diff --git a/doc-sphinx/source/developer_guide/unit_test_framework.rst b/doc-sphinx/source/developer_guide/unit_test_framework.rst new file mode 100644 index 0000000..abf11f8 --- /dev/null +++ b/doc-sphinx/source/developer_guide/unit_test_framework.rst @@ -0,0 +1,112 @@ +Harness's Git Unit Test Framework +================================= + +.. .. toctree:: +.. :maxdepth: 1 + +The Harness unit testing framework uses Python's pytest module +to implement the unit tests, and GitLab's CI/CD service to run +the tests. The unit tests can also be ran without GitLabs CI/CD +service. + +Organizational Structure +------------------------ + +The source for the Harness unit tests are located in directory +*olcf-test-harness/ci_testing_utilities*. The main driver scripts to run the +generic and machine specific tests are located in directory +*olcf-test-harness/ci_testing_utilities/bin*. + +The python unit tests are located in directory +*olcf-test-harness/ci_testing_utilities/harness_unit_tests*. The machine +specific unit tests are located in an eponymous subdirectory. +For machine *Ascent*, its machine specific tets are located in +*olcf-test-harness/ci_testing_utilities/harness_unit_tests/Ascent*. +The directory *olcf-test-harness/ci_testing_utilities/input_files* +contains input files for running tests. These files are sometimes +required by the test fixtures for setting up tests. Lastly, +the directory *olcf-test-harness/ci_testing_utilities/harness_unit_tests/runtime_environment* +contains files that set up the runtime environment for the +generic and machine specific tests. :: + + ci_testing_utilities + |-- __init__.py + |-- bin/ + | |-- run_generic_unit_tests.py* + | `-- run_machine_specific_unit_tests.py* + |-- harness_unit_tests/ + | |-- Ascent/ + | |-- __init__.py + | |-- __pycache__/ + | |-- harness_unittests_exceptions.py + | |-- harness_unittests_logging.py + | |-- test_concurrency.py + | `-- test_runtests.py + |-- input_files/ + `-- runtime_environment/ + |-- Ascent-olcf5_acceptance.unit_tests.lua + `-- GenericMachine-GenericConfigTag.unit_tests.lua + + +Setting Up the Harness Unit Testing Runtime Environment +------------------------------------------------------- + +Generic Tests +~~~~~~~~~~~~~ + +To set up the runtime environment to run the generic +Harness unit tests, within directory *olcf-test-harness* +one needs to do following commands: + + **export OLCF_HARNESS_DIR=$(pwd)** + + **module \\-\\-ignore-cache use modulefiles** + + **module load olcf_harness** + +Machine Specific Tests +~~~~~~~~~~~~~~~~~~~~~~ + +To set up the runtime environment to run the machine specific +Harness unit tests, within directory *olcf-test-harness* +one needs to do following commands: + + **export OLCF_HARNESS_DIR=$(pwd)** + + **module \\-\\-ignore-cache use modulefiles** + + **module load olcf_harness** + + **export HUT_MACHINE_NAME ** + + **export HUT_CONFIG_TAG ** + + +Running the Harness Unit Tests +------------------------------ + +Generic Tests +~~~~~~~~~~~~~ + +To run the generic unit tests run the command: + + **run_generic_unit_tests.py** + +Machine Specific Tests +~~~~~~~~~~~~~~~~~~~~~~ + +To run the machine specific unit tests, appropiately define the environmental +variables *HUT_MACHINE_NAME* and *HUT_CONFIG_TAG*, then run the command: + + **run_machine_specific_unit_tests.py** + +See ?? for a list of machine names and config tags. + +Harness Unit Testing API +------------------------ + +.. toctree:: + :maxdepth: 1 + + unit_test_framework/git_ci_test_framework.rst + unit_test_framework/pytest_test_framework.rst diff --git a/doc-sphinx/source/developer_guide/unit_test_framework/git_ci_test_framework.rst b/doc-sphinx/source/developer_guide/unit_test_framework/git_ci_test_framework.rst new file mode 100644 index 0000000..b9c136a --- /dev/null +++ b/doc-sphinx/source/developer_guide/unit_test_framework/git_ci_test_framework.rst @@ -0,0 +1,10 @@ +.. NCCS GitLab CI/CD documentation. + +Usage and Developer Documentation for GitLab CI/CD +================================================== + +.. toctree:: + :maxdepth: 1 + + +Stud text for git ci test framework. diff --git a/doc-sphinx/source/developer_guide/unit_test_framework/pytest_test_framework.rst b/doc-sphinx/source/developer_guide/unit_test_framework/pytest_test_framework.rst new file mode 100644 index 0000000..53b4245 --- /dev/null +++ b/doc-sphinx/source/developer_guide/unit_test_framework/pytest_test_framework.rst @@ -0,0 +1,10 @@ +.. NCCS GitLab CI/CD documentation. + +Usage and Developer Documentation for Harness' pytest Unit Tests +================================================================ + +.. toctree:: + :maxdepth: 1 + + + diff --git a/doc-sphinx/source/images/olcf_logo.png b/doc-sphinx/source/images/olcf_logo.png new file mode 100644 index 0000000000000000000000000000000000000000..39c15d3629aff62ed2abe134708b4a52a4734e0f GIT binary patch literal 21616 zcmaHSWmH_xvM=uL8r&J&WpH(+fM1PE%|iy{;HVhn1=0yp*R=_9 zwh^|al>kzU1&I6wa0U5TQ3trXxOs~Nh|~THSLAR1pJ`57>VJ{=IE&N%mr{mmn$$Ax zULb0I4gr7-mw+I(pfCqFzn~zWAUib=7dJmA7cVC-7l4OXgpW&vkB9od8|`0dUbc23 z+OqQhE$i<{oYuj|$3ujZ)8F5p!=IPK-OHYnTUc0_lZ%IwhX?SN0^l9!=3^BAaPy}7 zH-aq4+s4b$!^hFxjrt!%D{FUOA932hn*Nswt{(pb>*oF6Zu;vm&HyV9PHqmae@yx} zp_AJhS z{M(C~4(>kg-VW{_)G{*v@)|X>nw5>C+dq>m|Kd_p6H#*W_OWua0V&Cf)BeTbaCEd4 z5t8SYk>L^$;FaR%lM&<>k`j;+{5P+xyN$0a$j#^9yte<#EBGII z{|N|JkH3;-L0*o2AX|AacUS6v87|`ZpLOB;k9hyhYx|#d;s1}koPU+!{AYRpujT&t z)ZZBTr~5y_`*-j^;SX~A8}nX&gFEdZe+>pkb5luHN;hEj#0)uvV&o=Fq#)6?gB*g1 zq@#L{6U5yUO(cz7pb1|hLNa2OhQ+s2s-rsmKgOYwX+>5LK?&3%}EQ) zQj5{&6)^#BADA@m`PYFcJ4LnYP$~XtXhgJXdpy3HwcmfYX(I7`u4sWcu{KsnOyVqk z#Y(Xh7W&)%xY5jMGu9s3J@|OI>KA7cvg+n$&D_-KQBn*vj#jxCE%LYhBNSW4=(g`O z+9x1H-R_SG{YxHNtF^GuALeqEKcb&p0vSwFCbHXtJ%c%m0LONdxhp(eCXaS1zO9x( z<9q;m6G*)^Bo{yhniQ-{5)l&e@y%?@RQg;h2c+zllwSZKPVa4Pq39=s*%O63o3`rS zmfj*JtTLKljc4B|LR?_yZuDAJ$FiTA_VheIU!TQW*iCk0nO?7J6E?NgfObl3U-q+D_3QE{B3rV{3}P^1AQ zY$vByyhKr}W$ya3R~o|x`x>II9->@2l5D)|oWtU@OQsU_K3$I(*VWb28DOa&GIOcH z^BvAKfk>2w=~j4nO!U8J3x?E@<^p&b><0$aN|ZJs3i?rMW@v><_mRuZ*dt4T9%NPZNHTnH>#Y~aH2n$jNb$0F|)Dr zTjT(8It{ioS)0p^n=0h|@&u&i7Y!z#m7Up$_WM|#Lqc2gkBonowBfN^rn&@17KKm3 zdMUS_fB`eeX-R7Lq>(nKE-Tz7<|nYO9f7UuGGNC+GlJ#}>7x}@FJVhRY+-lnR*HN8 zRhQfP9xPsRh16IszYs7s&eYqeHQ)Wdm-A^^eZ|YCwT40)`*G9?Jj1=al}2IQlq>67 z#x7`Ie8OWh&OYGO=o{dVFj+E6qc_a2p*)=nx31`gC^k9{9h#uG>Gz+wbH0Yw+5~04 zG=YApJB|LLZ<}|dHGr(TG6*BTDG~YhOx;S8hmm@`L%30>St@ZZOGiN0uX(%2VWRZE z4}nq(TU1vw*uT60j&FuKpQp^$ojnxILH@WgSXd)?H9s3czT*!%pI|UUNhFrENuJqE z1bFhh)G=mYnJ#zB#(kZYSSsvh!6H z1o9gTG|b){=I@c2i{RA14R^(j*#oc~B<=l;W&~TsT!?IoPJdwdL7&H1uYk#IW`0q2 z3alJ(sBFbcv-O1a7VFM)dk)4Kp|lgSZt$Uu`?eqgd!|TwT2#Z!&Nr@*_v|a}JaYJ0 zLw0%3Du18M-2GepU@nsVh?fU`sO-Ab4nd}vkNvn&tz~P`v zD*%riALp{o_d}nrKoK_zlghMvCB3p!Yd%LuCLC5wiv@`>?IJ4JS7}W61@M?2C38th zq0esTU+p0#)gn4|1t)u#m9c3A^$7Wdj*>3U<_JE*Ml$l~8e0&C*4LX#| zukO{Wp#(cw)`mdw*0KBSwu3+cHJ~r~1Ahe=m^pq$D57LRer4F&+u!-!PVPN~d#}(o|Tea3) zk<#=6lJ}a=kT2J+%g~b~Ss7$^oym66YnOiMu^IK-p3WS1%)H@i;p*!=t9ihC@gPQ zfF@@Z)YS%K?)b-bnIT{e6bbe=^Pl5aUvOxWZX*}&CnC8kH{*tz;`%0P^b#%1KN zkn$>UR9V}8IC(Jw_*si@Q@~L)}qv#?~?U&d{*i-FoXw+38#dz`(Pz8(wkUO3W zAVX9YO*gGetB!MQ{?y->Q>oe)Ik6y7Zc{jB1iagg<5ZgOMRCN(XEE4FtJ{Na@?!t$ zPW0>pChT>FAyls)RQwM37!|IH8#Q2e6_$)avU`*o`>TxwJIE{&-XA^&@MKiagR1fF zqk}sNjTbemA#3NCF{t$0y-fclnbTqYw?8hk9A|b#%Y|egi;A}JDL$xyp^@LPAqX`? zeu1(g#%_JI+X2Z@E}cFWs?9=7Xe*{U=fX0Rqn~M3L)mJ>-%9<=_se`aXdc!r+2)+u zZTmyJ3jqhVlYG7IC5Uz?N@xhg+2)J0btVaWT;M!sVT<8pI%am&hbhaRuJp)XCPjA* zj4`L?tK92?R~u%G59 zXJDnR8z_TawT&90nX9wmCT2D63MD!I8C4n!M(BB`<=9SZc{a-34yGPEVo6D+HdUdu zU@qhv+qnMn+kL9nJKx8YkVLsUf7aM1;=QK1QpXeTp|QzGffW~fzLa$^?prS_WwChC zFzQ8i8kKbBTCm)@!$<7JPqy{dk7;aXYvCLN@;$u6c>cbq7H6pWBrg%c4=U-=?pQHD z8ek7j2^b`oFc@BWvd?s!b!c%+CS2G0;>oDt#N2dQ{P{C5;?B)d`Y**{?#+ z(}pPyytwS(66Tr)FI_78D*mNgFO+^|S_E5&?Bg_^( z9HSkRBq6gYy#MA{{^vV~Vx8=jl2XRtGi^Qg6*oJd-b8nV%}?5Sc&QkMPRxg1wSJ** zjnjy7twTrjr(I7tKU)A}St)O`)&!Yr(|+MU$GMZK3^6$EyAD;DLg^iX>)8h;%8e_y z7ihmMx{<1o9H<3*W3qo*QrUzJ5loN>od}ztz{_Uh&e{_Dbu*BKvD+=5cO8Vbxzm^B zG{hmth>3|4#>JIvZL=*dWEA0|_o!>AiuHLlOi34gA-dgOd-LdG`Z!ZoHpEs zSN$vXK-l!4iXxCLe4ROU0Pfc|(iEE4hs^0662hc%2(z?1MVys&`kg^~ge2Nk5i|W< z09;mS65RBAxqWxU&2!^t%nazy9QuF}Ssn}OSFDy+DKSrU^*?zxnDox>(CIV=z2zbZ zi%BQTKq9(c^A1cQXEAY1YF2rf&1;V^*njTZvshQ+5#A0ey>Z@yLv)O_Im$pI;$j*T!@MP%p`n*K+oeiARdGph|JCZP+;i*! z`wLIPod)oLp~Xxit!Snc<%WWz;}62Slw$;G3c?s9EEgxw={31clTbJWXDjK`dt^Dt z!%ecn-@h5;mrI~NvKVrHGV!R#`H~Lztd;YCvb8wY zRXy=eM=CSoHP|isv5+`ig5=&JgvzEX-&&IbnNT<^!kSj5GTkFiBeSA`7GSK zI=9ogQ7i5DE9{70%pS(;lg9W#v5e(l-$Nk$w4SGRWCgCR;uvpik4RsBP`Cj~29<)4 z{1e7t49ZFvqFDV@yR&M0Uk&R602a#gDUDN;#4k->C;@}QZVV{5o@Oz;I72(%5Il%U z%-CFbTt4tt9FyHyM9``W$!UfzlLv@P>a5VdKe(pTN&>Xh0q20xWKXwd%;LlzrI<~I zrQ&vk52913Y4x4aOd}@z$>yV=Y`C-Mh-)rvv8?{!1;9hAC^G`?&sf@X)XCyaaIkUY z^NR7Lt(#m1yW929k)MqdFCI!?W1k#lb($n>(4QhFB?+PZV1wF0sq~?6i~t}z@>2Bk z^c6i3v$OV>(XVmDRj_O)0Y%ORTl^-Dp_5u8HW!eVDSuj5Zi@Spjx zsT(+1Cnl+$OHtC{EM>SkkrPW>YKVrCA0I3!tpdfLhd0$dBDN9hsl29#)ybZr4Rm!R z9#~#D$=Rl71wbk?bavST`@*1JmpL}D(Tyoyeu7aQDbrA8gs@bO> z%rNfSiY2R+aEP*s&2Wv{{UdJn2hvbU5@l!oWEGfvpdT&H4oKi#LLoe;c$iA{dU+_YD%R(Q+#FP207On0} zd!+W3yakOSc~JI5ZX37+BWqRe)j2Cmb4v$#^}U3*#rmyZr8G-_{Dd>K&%cdo7` zv1bek7XSjhMLjRn(P#_{V{v0FrPfvqY4nb~!_MfU{gk#=Cg9k~tW=_BF$A@m%PsuM zP&<@L6&5Tb;s6VqY?miIv&O}mZ40d#gVyRhVOo05nonT@mfAP)>;_QAfb&W=1azR` z0#SK0FnV|tw!G)Q8XD}rk7C}?T2yYLGpD_H8aYzjeQp|U4MQ4SCEx60s{XP#~(ITk_eN7lqFSt~ZEfg`&?IcCDnoJwNgbZQXK`wL~=)Sn9$S9Y*f2q$| zITTi};1nA$zpx{6y|L<^0rBi@kkwV&>gxH@59mF z08fwgKLQ(9yIaQ%N42LTJ)U!}>Qr^~&6hC9J#pC=ti%)t3UzjQ%{9QW zVUY|v>5r+~zHR6fz5b{8?vFiCSulPTCtAr$f zOxzeRdvyR|JDe?LDMy~Z2&;4+JW zY8N(YybyM2*~N2_>2s@iys{VyIoSNoh{=F;z^Q~fQe(y_zxfAuHmel!yXy3^Jo-l< zXjF5oTUupmjoxi&CGZD|J7GY$fVyA7m)sQ>TQX2;V^*ep?}q*0&C?WJ#eL|p%j}5^ zWXt`t-N9yVXlGZ6-bqbuysC8LATBIERd%*tVn^yC!^L-WmE~TmV7zzRWe<afA7Thv-+Su9^|2o847aYcu-z}+1tqdG1^-SJQY ztr-Cz#ybQX^Y^~1yZK*^hKXJAtqA30E?3{k9jW%&&`9Gf79D}EG39<#S^b?hxwRGs zn}|qzaT`9?w{%}{e7LgC2gbcsBNnz{@pkD^?9$$*cMy&~kh^;TFI4;?DcCC)L!u1b zcYWCxZS%hmg|#?bN(y0)^@{er9IG6rD_nk?UEC%WFm}wo5ETWYU-^?xw-M4a7!W?J zQTr9XT}&1fI}94Vgvx0_m$~9Nr#w>|NCU|^>{61o;FI7CISsaKV;lyM zyDo<@lIAN<+I;hMCC!(vXb>|LgRB-Lhga^^yYJ2*SkVkGo;yX<-j1i|{4xZ!D;tp1 zmHYL4GZffwwi#c>#)|*u-b6QPUm9Iz5I17~q)-|Tkv?m$HLD>7h!vofUcM*-5O1Tk@j2U8Ae zAXiK>2wp0Qd{M0sbN`Rfv$_j{yc!eG-uXiYg7k9F&0Akg8mXn$Oi&i5f5D&!ZtZ9 zJJN+!ZzM&~ZEB~Fv%O!U57UkP%pEJ;DP2R92RbT|hb_zi&jvob1tfAH?ifvuCPKn{ z`N~q;?Jw$;fN!7PDrdEoOQ=$03vpmfQPg8Cu&9e+K$|-QBxXTI;n-HmC3Eyr+jY@Y zf?qh0b8CsqzG3I+Y5CWXqK}0G5IZt3LgUYQE7QDlsLnggl` zKEpx~nn8k$hCk`s2>7CGu*d+}VlTAL29&P#Y=!ZUyAk@j0Z0B*eX=(DmbnysYu|Atd3{c7>Ha4 zHg~fXt27nsM;8Uq-R~+5Wnj}?kZnvX_2~#<5p_udg;X zt$*w&q<;uSE;IfqCE1;coK|f>wyO#n6PH@)l!}ZJW^K7eGx=l;lsv=0Y(KLd=X|qz z4N|{ao9K}P`wF8P&b~rEE@26VNbr@io5~0Gg}GXLjgXvpySX5!j4H4TLU{ApDiUa~ z;q!wVD8K1Ei%G!P)057{Lt^l7?cgp1?s*_uIqyHHPQ!we*I1J4J;s zWV;BP_Wn_h}X?)lqbR%f5Sd@o3*OR zVI9RryV*Gc#GH$uKN_K=PwaS<8?;ED(2GRML;8E$e*(hnMF7emA~d)&IjA}Mj4xkm zD2~S|w(fe)fzS^F(^th(Up`C$m5lY~#+T3Fj~fR%Z?5?VyxFdy7)pzGu&&xH`v_m| zF2l$GYeE9ZenJ^T-vc;f&Qcf(*)N{#4UNr92N^}L_qFr0YDku&`@`&3E@7hc-)udB zSDF1RKW#^(rzzM?)RcdJviC~U^^g`>I}pP_*=2nAifm+lyM;iJd5vhZ&CR_FIvo6M zvI9k*;BVH{nI6nuM)n&D`xANZPRSn`siCIoTp+=`kq`d%b>wPg3z5ff5h4(&XWdOM z1AVi{T>_d6xBL$Ff20WJ{w5&()>|Ea9FFr8zX%!rQm+DBamq!g)H~5`3&D}T@DbCK z%?qK${b@E#NpnxQ!pd%xljXo=h3RjhH2q`#vsB3qK%o{gzr4JY&q6b5P<#u+t^#&x zV#l)$g9=MU?m7OP)eKjt9d$3$z32Y`E<`j$8ETD&d7#{sLD9!k@sQ%c{HSuoCruSy z!o5E8_e&qoSZxrkA|Dv`oVU|mu+z;OP3Ak_!Nj~oT|*e>&2bT_fV5&z&@-iK+)JX% z=KTGXlYeCfeKK!kU;vKvaJ?JcK>LLNBgY%&jdA-a4rhgJho2u0phe49a){TP-6z9@ zTayLgTY&W7Y3Kso83*!0*5FppTTsP$qmw#saJ{4loScj~%wku6Z*Dl4#^NSAf=$4E zz%2?PyP<*Gc08y>i7hk!RqwBFkhEJpT!A{|Q4fjb@3*#l=Sa8b^5X{@S19@nPvI7F z+f|tklQjb8ht@Hca;rR;`ZcP)WvUdWY-`h|(qwa=Ha|Mdm*mimQyAdDAS-XKaJqd+ zQml*B-YVk!!K;;;aS^)i@w|`MC(-P>+^hTssTxgUj6}S0oecXip%kvc&BSAtNA@7d z;DqincWlr4iio%j+ORcU#YrT00HG+;M2t~K$M+Flzhp6RRU(Lc#wqlvTrk69~DbVXbUukA})`Yl3GH4o3P$kVP<<4F06?Y1->Ay{1{yj(deT5%b z5+VQSCG9KY@Fc8_UFxyKB*}i`s#)CMH{&&E{&n~qTcu3O#8C1HbB$=17<@+~94C8e zK(uH1`eZ7(fz0T@@&i_3R0S@Dj!M-(p{~g{cXMVZpNal;UH1i_TINN0x%VTLKAI44 zd=@)OQI+E176;IZaq#sAc+H3oCH%8jOcoq{v8#G6tK=xdaoFdI>0 zhp5mW`N#^5-m!2mobj03=LukQXIbxZHAq&6fC%I?~rReTLXqU%q%G7B&RhRCE?KxMHpE> zr#<7k!iILFAVy6lC687kDpT&hfT$}Eo>YH6|-{v#cE1H*KvG#|^| z@un<{jFmrD8#U{07}t$s=wK+70$pP8=$oDAXqgqeqZbx45Wclc&)rlbio|gBHBZ>w zD4Fvl_9hI-ff~+lm&`(J%bh>scswtm;^E?3I*PGJ-@i3vjGB>nez4|u%f6LD3%ghN za0jKh7KL}htNMvxxDZa{@r+hU4J9;F4_%r`# zw`A#ah)e?ofsA-rdGNlZcRbL-2GK=Uw}L8)07!-mj-ck80OA$OI@4ueJK{>dvYf;S zB$s2-GbgYQ7$8#&@t*Y0L}9@Ho+Al+3&5MB?jQ6^qVJP1n)PD3y3M!N8g(ZG|B=MK zgHv$o0}-cfc<&Uoz#}5CLxPj~_|d;mZr#85Fkvn?k3^)v85(e8ZZQPjBh#`#8@SiQ z)gmLI(L!oe2C>W@{!bLnG~9Pd>td?U*UE&6^r3?p5EARx5e|gYRxGC^<{BsPc$5?9 z=Xu;%=@s}K84IfNH&skm&pR&xd&H%ieUK`9=jrMyh2*c#`Dowy@d|lQWx-8{cmQ`g z0X6wz*iseTnhJ%DLiPuTMIP5WSv>9b@%{=+<%q^)AMqrka6@&4^XD)5?&49H>$ZN5)dQYbgxxNRmu|UwyXJ)oof(B1s0@dlDRwVkGAJFcuHly|_WG$%*0nUm*Y>F`S zK!$UIB=(nost&$Nyh0EZmZD*`9Q44dnG^Q4VO>J~X(`y?PDHnarJ7Mz59@2|biv4BQ;Ey@xb~@;06tt3%ZpzHVFulnH zez#|_=yfYR3j7}vkWb5U=x1~zCQVzc9$hmH_6z}{R1y^Ls6B9s8`UvRu`0NjVfWl7 zK3>rEjwS51iTUFlEfTUbG}{K zIkNDgNod?#a9%z%0jVjYT)W2<36T{cMWp%)SaZGAS+}rM5?ag>M6{z~A9g4}GO>Ds zAHwL3!)}nai(IfbYT?rx%Lw7+mYtIQfge~W*(UUK($Aeicv4#^<4mt&KP@dkNg6Tb ziw@R|jfq?cNxVrv!}qt^hJFp6q-8ZaIvSMqg0-+ow6CwFZ3>r6G?6z^0;?R|D@Wjj zGe(%U%vs|SUx1HfreT$OxJ&#9l9 zG^9+pE7P@pE(Y&30g7ajiC%vkY`kb_$*a>yjr1MplziR0>>0_&4NH_JbA9|qaB2Zl z2Y(^+EgM+Lf_PWL(IX+1lovEjD(0RoA1(f!dwqy+k9TZ&k><|y_}VCEqn>T|$eOha z-Y`2oq*uhSwJ-+;y-Vbat2(9FHu9qHqMp zM;hF1L+G%h7ba~+^9}jU{y|~lu}?wF{Sz0?ScO$3yN58_mb|FlYNh1O3@EMY%RR%P z!Y-(g#F2^u}#AGLGNi&oRNxhoWu`DM}9Q@*L6K=nzguR|V&uqB& zfIFw~CCbR)s2yJ_z`VLOnaKPUGIOkX*eO01+46BPQ0>zH>cl0);oglZY5G8z&te2C zJZbr~g%|OHhw-if~t*VtJM|HHl4+GrHVmPX!-FmpF&y$ z!tSG<@Z|L|{(ZKYqIp&Q2nWN^qg={05H+m?h}tR`qKtJRyr>GYv5J1YC4Q?-#7etg z$3W7OIs z0HJ<4ej=>7`5q8x<@)#$1;1JZ_nGbU-E}9zla*X@9XtLTsK2U2(MlyIhgcB?UG5|3 zt2CO1ad7Wmp#77fmB=Iyo-A_?V8`6jSBQU0Ji=6NW5)@%{#wP^%LaoUJ=)tRzSRsP zal;DU2Ip1(EB0KEyu4QSlZD0Z0BiD%G6S9f8b>s}K7$Wcp#^Kvo2Awaow9|9;&-jh z&0v$_4@1-x1K~aW@2%m0-evyU&;=krm)i z4dA2zha=kLs=9$XHzgPy3YY3@_mk?IFJ9{=Dx3uIg=^A-1!@gqw++#vqj?a)W9Za@ z5H0!42TkbB9B^gdp1X(2VO|QzPA(Vnx(EXx&zRpq@eQTl)?%cFRIC!L+If-d3=1{* z;>1QadcpRkV`({|NzID!>XwR%pHK9L#?N>nXi{rHyAU@yL*%yVFMVOgJ1gRWaIln_ zIx~z3-md)(dx^p)Z@ z^{%0SD(<*#yxu!jb8^u5MbKgUMiOck{p&%4xGNy43xUGK1Z^$Sc`EIffl~WOEfv!$ z@ygQk7~S_bXQNgvoZj8*S31duf!%P)XpuPWV64@pKSrontUnIEQzxlpMj#vZOoO@( zwdOKL+(io-^}jf&D}hv$b0_g~j`t>bttv`2@8X=uNMQ^QT%-Ett9GYC6jLf^duC?B z78?qLk%0xeVO_sFhmnN#HYLbiQH-<0-{7hQSNkimwn+LGsX zG_%mXXzuivb2&7O?cmkOnU)v?WYccR#?=Y`V|xIJtae9sG^d4B)I)v-DH5sIlo++$ zn}ms}}4HmbW@}`qb=BiyeyNHNpXYzM{nb@|QB6M%rT%x>pv0uu{9=7Fi2C z47mN2P(#Ul176j2Z0-17mOzW57>b(wJHj)*m zqwnkF79aGraJ(*~8i13ErF#|6&lyz&ioX}}4`@6ZC0X6)+q)JQkGBlWHa|)2B7Wj9 z^>#~@q(}B68o2VXXTq=Hr|(pH0#Zr0_LOoKG8p4tN?Bzxx`Fq~>YZ!v_ zhvf&2s_dFmdiU0I9r}S)S+vyeh%~zRWb!LDWRdtl#5TG1Z@lTLx+sevqD2PEYKQE8 zHU3CnZ`{)kjwDPmL?j^IT}B#1%%~w&#fUP~2eu~Jsr$3^#xDARK*mdreB_1(R~?mp{Isw!l%&M4 zfAhmd*z=V^0aKvLQFWqR4O6jE`i6pcjigclUu4KBzD^&@M@!0|Q)HB~yFNuWPSJ6j zp3wO1HsUKmw{rO|{+3MF5nv>o@o$BBV=Lvb-iW96nAIhAzh3z$fvnHe=R;C&Wy?=L zR4cPJ$B&N!UCZt{iS@OHB0#A7?!JM0tY8$`S9Uw`sX1<5&%~R6=`9`UvrD(d8#(~@ z=O8^c6XmN1uXiksoa-`;kK!#~kdg$n&&#OJZ4dx7S{|Znq?3(M&nvU?7eBV}$)-90 z+v%Ni^az6ftT1>cz3JmyZX!i$4&7&gV6q~?B zq9H^{#o&u4Cptk4KHfYmt*7jt1HX+rt*{-^11%TcWzt?5!}J{7%%m2Ux~FtRK2GqSdM zph|jS96dnCNCgOVY@W>M6pH)qD^b@29`nl=|W!!22QLNeOUBQhT_ z5L_hp<7>uXNP_(_A+~?gn?6Gf+-ZO;)9r|YHMQeN@v@l9-IEbhV*lRUHFv_uWGZi> zt#|QmsF;b8-?0)_c;+C!_}?;$m-X)+b5q(q5M}+UQ>L4}sfzn?eKo(4_sln$YzKm| zX-4ksE*hn1=!^W_uO9FEfTi5Fgxm@)oV!9mC6Px;;^MnT+?`* zOQ2BZ@KkiCG%AtmCAJbzN}tj;E9&O=yRQ*84ppQ0-EA2B@%6(410{mn0OmQ! z#jcn17NH9z{|Z_N~A!u*1S5F;tMiCo~mHK$Igs-lJ5D$>TC#5lQUS9887fnlVaGZc9ozKAC zzKGK6lMlBCaA(1i?`MwFzoIpm;N65()}i~$@MDvf?%@`k+j9k4Jh;%^iA(4>FCM{8 zgKZcNL@kHZ$;Wfu>1<@np$>k^vVr2?^WGATVQHT4?^#m`DxDt2KN$NlXO7 zj$H z%*yE3$tI+V4PG8o`t9+s^tkUyczyDv!jZ?pzs>Y^1atsG&N2C-%2z}+ZJmGXty3>s zNmorHbN+B>1!x&U^0Jf!zDr>iV9^wjH)?J3#WVC4j3MFXK831c!p^)s$eEt!R9)~i zKoLmy)#hywk zH0=sz-1{Ya=9_XDyzjmF3vfrA?#PvsjL#A=xMbM%w)yIT6R&%|%8mKWOYk??sc^LT z?7mo`n92dq%Y3wigAfrA35CA|+EF+%~42qwh5ZaBwnhle}WC9D>eA z!K6GDsTmJMs$d5b9}@93|GL;{vOMH?Ax3dkS0fp~vY1vxGfR^oQ? zifCmkj;1lh4EndgO_s*+W1GglJ-zD(=L8I`K!`1Wx+G?$%f}fThzoVnV-C$tw>oJI zh{nBMK`3}Ad7luku8dx6i6G=-1ICSe!aiP9aNN%)n{IQ9!okpfgGVbCxB98@hSTG~ zaCjy8+=s3iu*a*ob&o^1VHZRPtwy?uy@+)97W8_Cy?SXBqTihZiD zo~;FIM+}uh^kC1sI!1#p$*U`1nC!z_`ldXVhnE4eab=UL21|+0StDWLz0$BvP)e{0 z19wZrvADBgnZ`l2+E&bs!E%$#5>g0h$rsh^-(k_ARj6@$8Sig(e zB1@?#6Y2Ah`-c*bS9JpC46_CVqRmo}zsV*NC54ZLYsl0dZ@6B&?E+%Kw1!L>mdV*W zFJBW!e8DE7oQ%?*c;nOvYZzEHc23-i>W|_@{@T$Z=|#VA!V2pgkgNQC(R5`cUdL}v zIneJPXo3iP!5nV^&byi3^=t^mv&ssV!^G zRMN@TRU;9xkYBi)dM#KepET_f`eQz<1ffQ0E$KIkk}c`RFTy;osg$P|50f-x&8KfD zyUmUL1rg{O@(zR4Tzi6nwkc3^Q6y)W#U!YoS+hzBIP~cNoJ6o{BnqsL5ZOkKM`$lo zoK=meA0q}g6^E@W&0wjlF@moRR^sRCWg5|3C!|7&x z3$Zc8)q=&{_wRKHbFp{U)P}P=ZRfPRPOt#FAAm(S#Nu?6k;|N)ukN&bo)_rGlN9o)g@f^xR5y4DO`YI zgK>8sB}EX;&eg!6P>|7T<&*-EuK0r3mF06koc-2e;$mto3Zz>A43ZrVjsRIXC9F;* zy?YNYPRnTUBV{O%@rcd*i#Nhf$_MvfYH#FwHsl*ncgCu^CWa!$W zu=iILyAcr{liOsEFp_~tufW!;yV>|euDTxS2l7vYeZNQ|`Tt%GbGhwK^#inKUFt{1 zlA{of?Z!Nd)k#LiJSwP6w0YmC`DlO&1LoP7US1i_daC1^DdcqAi%3(S-qo{ zK7MFMrM-D-s(m}sIp>`?U~=$*HIkdi$)i7Lavh9C>lj4(StJsvM`nraybBxjz8vws zCJAN5Bv6C#z+Pyxo|^>=nY{p`YX-b9FEj!baU*!Htn znV>WEm(R`5%Y9P4WFMQXr|Q`Q{lA$vB_+1MBVyQ~+(J-^Z0V4$`<&syIXnGH<`e8g zM5Y3S%#6uw$m<0m2{jIEDHUdFYXm){AeK=ye%qnzquT59c6&R+Zk9pus ze`{KTzClj%hBvW$1dH{=>DeEHs%;V()8a47UdPbE0K{0=5TbqtSGzF^_cegB&bQ)P z-tU$zP)o8Y+TNU+SYA=G2}9f2*qbHhyDD}=uCfn8d&aNAqWxkx`_(Cm&7X>~Po|-U z-v{nPTA!Judz>!{T>Aazr2$w#M^1m-$gJ{%iP#gFv2#R;jG)_+O4`<6wiV(H`KcAx zS_G{1nly=x8E}t$A;P!=on%ndbu7s4tvrHs+;Msr`j1o#7C7R-C&+i+!Xh=e0P#)u zXY9Bp=(cdtzAs~qvR6xjz^&l?BkH#$;@lh9D%+>ieouk;>vd7~B153yw*xn@Np9}s zdDu|km)MslAK=nAu=k!}&7T(iiC7w!Hco=Fd3Be1=$G`W*^wJ9a!BNFw{!1?6`ltW*k{ggj6fubb67DjZ23Rf$7 zdzCRS&s?uBXroyygld@MzuXlyd5S#{dJ#xfYo&;-I}0yPZ1Jx{*BJCKfzkvXa{z9Ac-SaoGMz#g`ta z)bU)&yq~{arJg4p6Ee1F`m(B^;n;p7orcZj;{1=^aQgAiA~@&btM#VPvuh{gA7Wm< z6ioZTZfHG034%qmGsZC5{+;`M%`uDjP0w=}{rTzL9Czn#0Jx00_DTtF4Bv5J)l}p& zf$*$l!pT|02we7jpUxBTLn0bV#95Mm7Hw4&45lB2(hs`%KfZ8W(KdVz`Gv!;Jw3Fu zNR44A)dvJ6{$1OYm5$=lecdLw`h|ptLCL0p4w`3|MvG7Is55rD?0*!ulKA$lL<`@N z2d*>~(DMa*?JoXL0GtwI?H#B~^Z#VOT$bnW%6ScAVIO78g224RKCZUyGbFly(camH z95Xu^o*NdJH%TB)H5|`yTwJ9oN!S{jS--zBl4_9=3#(*6Q(Br+-6~hZules6 zd@tW9?;>n&P!<2EiLZNlLW_endEpUC&%u5H=YwvK?(UrWj^v%?Zj!tyJ85_WpijU# zoAQ2^(Y`;06RlsW1Cx`27mYE3!6gymFC z!LkX-%4p>~jMERLW7O73R0`C0da>3Ich&mt0qhe8DE(t;F-OEc&oSghF&!m2|B>fO z(mm_YI?8j`E7y-B302(@e#Z6`lFDoO`3^-e3zdbF1K!JXlHb!3)@v+3sY_b_AQ@KE z(A+}n*Q0zFSdQ&ysQX3E@u#v~4rV{Vn0$d__&+RLaZIS=dy;fq*7}N;!0VMU;+uM> zjZihGWZjm@783fL0ijcvxU1jHNn8ye2h6%kaS((F_jYHcJWuS<|dUmhVRb&o=f1u7b9vwx zX)Tl1iY8fpevOQ*ua?@1OlPcpO)2m?xJ3-)m{bXtn#Ld~b7868xn+u{X}!`DxQ>$c zOEEzLKrA(J6-$O3hqK~J+IO=Sw`0NmEjH5Qr|h&D*%3mu#lgk6$B+*=yS~&$*&Sz8PKQE2YwkMTk9Q< z@&#Nt)K!klEXIGgLQhH%2;p%sts{-n-VrmYcC!spg;2l}-_-_z`X(&^LCMr6_-K=G z{zYntLy92JWigzyk>R_rFykK$-|eUgk%|ZeE9`k;rZ+`!!lHsD%S#N$s!H64lguYI z$)^vk%szBf1ac4sv)T58H7f|PsbLI-<&FB@@E!cJUe-vLZNKqI-Oh&ZmnJbiB|?8| zwH!P8{-v0+!j_~#idWP6u9}9RxWLypK}vjpZTie_btMa96?OiUH!Tp7wA(Ed%rY zoOEs_q1iv7-|3@FaJIOHmL}c*2=@sy_SX^W+LRjD0UWr$A-UXCn|w9D@>}!i%gslz&5*Zl@7c+a*Fj^f$CqSaL~|kgksLx$*lCDbEdH$;R{l zI)r`ncVY8L+rERDl#FPo%Kp=-L2mbDLHGB{2!5=KHUyXzQ zZIY`?Ns@0dOungr2_$Zq_P?qLxu&0wl_f5e1t=AN<3K)z?Kzx6^y^OUyOl9ok>CS& zxPh=>JYcE$0}7I>*;j5b^dVca5JuYddzcqM!uu4U6i$h${b5N)AjL-{8F#ZEgM90L zG*v~CUS^70r$k0VqvZ%aMjW;VsJbxc$ zUdJ*htgkb)(je`l1WhRgzER%mU~#bFAgP}Ga6JjI2*Ko!XrSrTxZ<=9c&vL+0oNWz zp&k{m08FGN4Ir}bf&k&{Czz`v3P&4EE`Z1lHGKC%jPIz~=bk<$@j&0levAme|K8`G zO09ag#56`nOU}pVYfZCWl_b_n+6m%trt+NC7G%bBe_=#ScY^nFTA9MvM}^e2wBfsP zF|<9b5OAR1lYhhVhbY5(ABCaZk^uURuUQ6y6}BX;J5DWA7?Jj$`ho`x5wyFXHb#uS^#3Dfp9ij&25T@_FCY7U$VmQChW$Uf0N1 z38)XkX)EnJEbrD8@xJwYG-bqxMMBL0uM3M}(=rq*NlZe!vVeNL#Iv_+mLcRt@+^mQ zG=gsG3pS>QvJed&zo+JpLV5wu;simZN4~8Q@_YMge@~E(k%fieA;nTtD;-mlWGvgJ z!jfP;ZTK!|%q$}v16eLlP(f7z%hfX^n}4(=SW7olOLa>sw_Mmg03zw7Z$X-uF(kDk zv@)E8-19?2ztNrD)o9qWEXV$m{Jjcsa0IIx0Lws|QNw_P=outjKadn)(m*sD%(=%z z)OHIYW-2z1AQ)hcQ~i_X7bu8R(s5~SRDu0O6&zRsI?6{AG~pMTHV$ERg2jCV5tuM* zOO;^~oLhj!CQNV^gE)L5Z&yf)D#X{7_DOH&8`@JR-wi&`tv~zP0rzvLq8MZh@A*6f zf%5Y!T0dj%ckoof7ctxW|>9Xctlpkk^ys*PLNIvxW|(<0uEyNNXT;+ zx;FP`tu6$yAaB|^HWxRyWfV=xk_)S(sjOQiB|o44!gBSp#rbN~rQWZxl=!SdoPQNk z7n1zK;yg_XcjIsoi;iG*3jR*5V;@>dnkW>Ii5HHmONcp&$Pb>$cT5rY1YxtW!MRucFj)o4Oq_br=k(6)5{8Ep6QH%;Td+(%Q>BamRx zGi`*b|Hiz4HmV6BPbQ(gxAsm?GibEJ_jI*3US=7DZm|gkO;)$22-m|xd$dxQx;!{Q zI~OS#R4XE!y7lbtUeLrZl|EEqAoGU=^xJEdzqdEML->OILA%fUed^P9KR8Ovi8+6W zNU(GzW4N^6KkxwQxS!ybieNln`CeCG(00pdM6ItvF2`$%hiM){3Y6byeZsWuPhAmx zM+L^>bJ}-5Nyjt}W-TbyA{Ci(>6b}MQ!2^8*jUkD@`;GAw!CJF0b7{|6nQR!@}Z(LAalyMT3aIA@)*EZf{6% zW^^636yETQ8RVlW1Ex@6{1z;5J5rF{E$#QmI*tQ+HELs5QqVx)soRh&#ovF46FCIO zv9yG2N>Z__;rBsC{!L-#3ArPA{$a|bfi}C7llC&4q?c8!C=gzHg4lIQde=6PMr?00 zQ4!Q@@fr2y75LJ8%=hvk+j^lU01=kH1+ND|eUJjtBM^wR`;3+o{2nZzn{eFhU{Fwy zSLYsD4nC3aCC@>uFU7VuZMwaoCQMsce_xWuf$YB!^lHwcr!<0UN-X*u#{;tP!?)=! zX}?Xy@$5sZOU}>fN>1X{27>%o66)zBNn5c!t4TcZo?r86hl@zu&Q*vntTR{!$5JM& zw@6sJonomuM%ssU59WKUkdD^WE!aMA2wx-}lgxf*xTmLi4;yj3-^OvFlFAL(Hr^Z{ z?CWacQuy`3TK)w4|F&!^Pxx-dvA31-PR`)(H_@_ow<0|MOCmNyd+sX3IK_F7v;F7$ zJf|P7(dxTGY@_kYn`L-yUm3i(Lh93*&Y0&5F&z9>QR*IwaNS*;C(AhR-K-p&Df#Vm zQXE6KQsMuB*DOz>?M2xh?4y+WJy)65XB#Bk-}pIczbitjfOad7$w*V?^SMW$EPc*c zM>FkUW9wc=g0qeLVO5fWDo(U-_*mgggYSI`;F%O?dUj&?ZtbFdha~{N<%mZqSS!RX zQQ>&$m?a%T`Xp(efnEiK=ED@MvkPeJLnWW=Z?S|gkCu~|93vew&I<+a34Hb=DI`^V zO#;_X?4!1Jl8*jGI>t(JyAPbGP4+^;ZpwG{2}#WqwvWDC{wj%lhQi#7_C%1t3I@xU z3J8=7q+@bi6VQ+0bibL;@n%}L;0H9>V2!lhVx!E*`gewpmEsUuDlU_betYloJ*ttC z=Ss(|XFNR1qP#b;A`&lRvmaQ_CO|s#4 z!{leP2g#hOCR?ewg5-Vkkct1)BKp2MSK7?U0~Lks9*MTed$~e98%cy)3L-yX6T535vM?-=2uS#v_-T)zfD6JJO)PUaf5j3k%7)_*=%Y@CSH4UV7 z2^#S$lF!%DpB=uLB&e3p+e!=006zD()Zk6nj@tjyBte+uzobPi$PB&>+jADRc<*;{ zPs2$nzgI{~74P3Ww}qCIk$g`tD?(`}S_RU4)`)w{|+&BICJB@q?bw1j7x$+xT>~k%ArdG-OMb#+hP7w2w>Os_+TiJ)+F<3T! zt_Xw4w7#fTx1-o@&nxe1MGAuf&gNCF^T6jHW$F5y(OKIZDuSbE6wXCwUyQutT+i4 z-p^Vb`%j74{XhlU2flyxv?fOFv)k~U;r%=iVCg%I*4}L-A6fee-z}7b4}AK>UL5xr z)B0-07%I574V@-gSNp=Y9J|w{;|?2s8b9KmvMaBy%i z0gBVQ5@G2kQ{Ezd8`HA;*Tbc%IVFwd);?Hq60Gh3FB0ADQ`m!%c1_^m;Nak(VM~&$ z3nN{XB~(i+s*(X&l2uokajobNN341QQmzRc92^`RG0R92^`R9R45hGlNeK(q~wwdhqri9NADUnKDPO92^|Ma&T~Pa8U6e|Gg;6MO|qB zK?jE}0q29|;NalUdx5krNJD|BOGHE>!W22^&Wv0W_aE$&NjZY$;NS?BgM))ZuLaUO zJgEHb?QDP)C|5_WElaRQF07WSmXtK4BNHrjHKoeE8i$?$w?ydR;NZ|Z;kUKQ&!bk9 zNwrFOQ(Bg->m!Q`+a5=C+H&SEhaLfE!E$hLaOj;t;#Opk$F@C4Qj27+h#H&_xu&vK zsmv0r>gMu79o(^70+O>LA@O(a)j0G7{GR{=09H46RJ5q`1ONa407*qoM6N<$f|L`_. This top-level 'applications' group contains a sub-group per target machine. For example, there are sub-groups for the OLCF machines 'summit' and 'rhea'. + +Repository URL +^^^^^^^^^^^^^^ + +Each application that can be tested on a given machine has a repository within the machine's group. + +.. code-block:: bash + + https://gitlab.ccs.ornl.gov/olcf-system-test/applications//.git + + +Repository structure +^^^^^^^^^^^^^^^^^^^^ + +The application repository must be structured as shown below: + +.. code-block:: + + //Scripts/ + /rgt_test_input.ini + / + / + / + /Source/ + / + / + + +First, each application test must have its own subdirectory. The test directory has a mandatory *Scripts* subdirectory, which should contain the test input file (see :ref:`application-test-input` below) and other required scripts (see :ref:`required-application-test-scripts` below). + +Second, the application's source code and required build script should reside within the *Source* directory of the repository. + +Example Repository +^^^^^^^^^^^^^^^^^^ + +For instance, to add a new application called *hello_mpi* to the Lyra system, we would create the following repo: `https://gitlab.ccs.ornl.gov/olcf-system-test/applications/lyra/hello_mpi.git `_. + +To add a single node test and a two node test, we would create a separate subdirectory for each test, including their required *Scripts* subdirectory: + +.. code-block:: bash + + hello_mpi/c_n001/Scripts/ + /c_n002/Scripts/ + +Note that the test names are not required to follow any specific naming convention, but you should avoid spaces in the names. + +.. _application-test-input: + +Application Test Input +---------------------- + +Each test scripts directory should contain a test input file named *rgt_test_input.ini*. The test input file contains information that is used by the OTH to build, submit, and check the results of application tests. All the fields in the ``[Replacements]`` section can be used in the job script template and will be replaced when creating the batch script (see :ref:`job-script-template` section below). The fields in the ``[EnvVars]`` section allow you to set environment variables that your job will be able to use. + +The following is a sample input for the single node test of the *hello_mpi* application mentioned above: + +.. code-block:: bash + + [Replacements] + #-- This is a comment + #-- The following variables are required + job_name = hello_mpi_c + batch_queue = batch + walltime = 10 + project_id = stf006accept + executable_path = hello + batch_filename = run_hello_mpi_c.sh + build_cmd = ./build_hello_mpi_c.sh + check_cmd = ./check_hello_mpi_c.sh + report_cmd = ./report_hello_mpi_c.sh + resubmit = 0 + + #-- The following are user's defined and used for Key-Value replacements + nodes = 1 + total_processes = 16 + processes_per_node = 16 + + [EnvVars] + FOO = bar + +.. _required-application-test-scripts: + +Required Application Test Scripts +--------------------------------- + +The OTH requires each application test to provide a build script, a check script, and a job script template. An optional report script may also be provided. These scripts should be placed in the locations described above. If the OTH cannot find the scripts specified in the test input, it will fail to launch. + +Build Script +^^^^^^^^^^^^ + +The build script can be a shell script, a Python script, or other executable command. It is specified in the test input file as *build_cmd*, and the OTH will execute the provided value as a subprocess. The build script should return 0 on success, non-zero otherwise. + +For *hello_mpi*, an example build script named *build_hello_mpi_c.sh* may contain the following: + +.. code-block:: bash + + #!/bin/bash -l + + module load gcc + module load openmpi + module list + + mkdir -p bin + mpicc hello_mpi.c -o bin/hello + +.. _job-script-template: + +Job Script Template +^^^^^^^^^^^^^^^^^^^ + +The OTH will generate the batch job script from the job script template by replacing keywords of the form ``__keyword__`` with the values specified in the test input ``[Replacements]`` section. + +The job script template must be named appropriately to match the specific scheduler of the target machine. For SLURM systems, use *slurm.template.x* as the name. For LSF systems, use *lsf.template.x*. An example SLURM template script for the *hello_mpi* application follows: + +.. code-block:: bash + + #!/bin/bash -l + #SBATCH -J __job_name__ + #SBATCH -N __nodes__ + #SBATCH -t __walltime__ + #SBATCH -A __project_id__ + #SBATCH -o __job_name__.o%j + + module load openmpi + module list + + # Define environment variables needed + EXECUTABLE="__executable_path__" + SCRIPTS_DIR="__scripts_dir__" + WORK_DIR="__working_dir__" + RESULTS_DIR="__results_dir__" + HARNESS_ID="__harness_id__" + BUILD_DIR="__build_dir__" + + echo "Printing test directory environment variables:" + env | fgrep RGT_APP_SOURCE_ + env | fgrep RGT_TEST_ + echo + + # Ensure we are in the starting directory + cd $SCRIPTS_DIR + + # Make the working scratch space directory. + if [ ! -e $WORK_DIR ] + then + mkdir -p $WORK_DIR + fi + + # Change directory to the working directory. + cd $WORK_DIR + + env &> job.environ + scontrol show hostnames > job.nodes + + # Run the executable. + log_binary_execution_time.py --scriptsdir $SCRIPTS_DIR --uniqueid $HARNESS_ID --mode start + + #CMD="srun -n __total_processes__ -N __nodes__ $BUILD_DIR/bin/$EXECUTABLE" + CMD="mpirun -n __total_processes__ --map-by node --hostfile job.nodes $BUILD_DIR/$EXECUTABLE" + echo "$CMD" + $CMD + + log_binary_execution_time.py --scriptsdir $SCRIPTS_DIR --uniqueid $HARNESS_ID --mode final + + # Ensure we return to the starting directory. + cd $SCRIPTS_DIR + + # Copy the output and results back to the $RESULTS_DIR + cp -rf $WORK_DIR/* $RESULTS_DIR + cp $BUILD_DIR/output_build.*.txt $RESULTS_DIR + + # Check the final results. + check_executable_driver.py -p $RESULTS_DIR -i $HARNESS_ID + + # Resubmit if needed + case __resubmit__ in + 0) + echo "No resubmit";; + 1) + test_harness_driver.py -r;; + esac + + +Check Script +^^^^^^^^^^^^ + +The check script can be a shell script, Python script, or other executable command. + +Check scripts are used to verify that application tests ran as expected, and thus use standardized return codes to inform the OTH on the test result. The check script return value must be one of the following: + +* ``0``: test succeeded +* ``1``: test failed +* ``5``: test completed correctly but failed a performance target + +For *hello_mpi*, an example check script named *check_hello_mpi_c.sh* may contain the following: + +.. code-block:: bash + + #!/bin/bash + echo "This is the check script for hello_mpi." + echo + echo -n "Working Directory: "; pwd + echo + echo "Test Result Files:" + ls ./* + echo + exit 0 + +Notes on Where Things Are +^^^^^^^^^^^^^^^^^^^^^^^^^ + +It can be a little bit confusing to know where everything is and from which directory they are executed. These are explained briefly in :doc:`overview`. The following elaborates on this topic a bit more with some concrete examples. + +In reading these notes, please keep in mind the application repository structure describe above. + +1. Building +^^^^^^^^^^^ + +The first step of building the application will be executed from the directory **$BUILD_DIR**, which will be a copy of *Source/*. This means the build script should be written as if it were executed from *Source/*, regardless of where it actually is. + +Correspondingly, the path to the build script given in *rgt_test_input.ini* should be relative to the *Source/* directory. + +2. Job Submission and Running +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Using the job template above, the job will be submitted from the test *Scripts/* directory and starts there. This is **$SCRIPT_DIR** in the job template. The executable will then be run from **$WORK_DIR** directory, an entirely new directory. + +One can access or copy any files relative to the *Scripts/* directory using the **$SCRIPT_DIR** environment variable. For example, if one stores a *CorrectResults* directory under for a test case, it can be be copied by adding the line + +.. code-block:: bash + + cp -a ${SCRIPT_DIR}/../CorrectResults ${WORK_DIR}/ + +inside the job script. + +The environment variable **$EXECUTABLE** is also populated based on ``executable_path`` entry in *rgt_test_input.ini* file. This is relative to the **$WORK_DIR**, an entirely new directory created for every harness run. + +Since the actual executable may still be inside **$BUILD_DIR** from the previous step, one would need to either copy it to **$WORK_DIR** or prepend the path in the job script such as **$BUILD_DIR/$EXECUTABLE**. + + diff --git a/doc-sphinx/source/user_guide/launching.rst b/doc-sphinx/source/user_guide/launching.rst new file mode 100644 index 0000000..7014db9 --- /dev/null +++ b/doc-sphinx/source/user_guide/launching.rst @@ -0,0 +1,99 @@ +===================================== +Launching the OLCF Test Harness (OTH) +===================================== + +To launch the OLCF Test Harness (OTH) you must first access the harness code. +This can be done in two ways, one by obtaining your own copy of the code or +using the centralized harness code that will be available on the system under +test. + +OTH Setup +--------- + +Option 1: Using the centralized OTH on Lyra +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The code is already installed in: */sw/lyra/acceptance/olcf-test-harness* + +Setup the environment: + +.. code-block:: bash + + export OLCF_HARNESS_DIR=/sw/lyra/acceptance/olcf-test-harness + module use $OLCF_HARNESS_DIR/modulefiles + module load olcf_harness + +Option 2: Using your own copy of the harness +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Clone the repo on the target system: + +.. code-block:: bash + + git clone gitlab@gitlab.ccs.ornl.gov:olcf-system-test/olcf-test-harness.git + +Setup the environment: + +.. code-block:: bash + + cd olcf-test-harness + export OLCF_HARNESS_DIR=``pwd`` + module use $OLCF_HARNESS_DIR/modulefiles + module load olcf_harness + export OLCF_HARNESS_MACHINE= + + +Launching the OTH +----------------- + +Create a directory for your run: + +.. code-block:: bash + + mkdir lyra_testshot + cd lyra_testshot + +Prepare an input file of tests (e.g., *rgt.input.lyra*). First, set ``Path_to_tests`` +to the location where you would like application source and run files to be +kept (note that the directory provided must be an existing directory on a file +system visible to the current machine). Next, provide one or more tests to run +in the format ``Test = ``. In this example for Lyra, the +application **hello_mpi** is used and we specify two tests: **c_n001** and **c_n002**. + +.. code-block:: bash + + ################################################################################ + # Set the path to the top level of the application directory. # + ################################################################################ + + Path_to_tests = /some/path/to/my/applications + + Test = hello_mpi c_n001 + Test = hello_mpi c_n002 + + +Set a different scratch area for this specific instance of the harness (a +default is set but this lets you change the default): + +.. code-block:: bash + + export RGT_PATH_TO_SSPACE=/Scratch + + +The latest version of the harness supports command line tasks as well as input +file tasks. If no tasks are provided in the input file, it will use the command +line mode. To launch via the CLI: + +.. code-block:: bash + + runtests.py --inputfile rgt.input.lyra --mode checkout + runtests.py --inputfile rgt.input.lyra --mode start + runtests.py --inputfile rgt.input.lyra --mode checkout start stop + + +When using the checkout mode, the application source repository will be cloned +to the */* directory. + + +After using the start mode, results of the most recent test run can be found in +*///Run_Archive/latest*. diff --git a/doc-sphinx/source/user_guide/overview.rst b/doc-sphinx/source/user_guide/overview.rst new file mode 100644 index 0000000..0126287 --- /dev/null +++ b/doc-sphinx/source/user_guide/overview.rst @@ -0,0 +1,49 @@ +============================ +Overview of the Test Harness +============================ + +OLCF Test Harness +================= + +.. toctree:: + :maxdepth: 1 + +The OLCF Test Harness (OTH) helps automate the testing of applications, tools, +and other system software. Users of the OTH have the flexibility to run +individual standalone tests or to simulate production workloads by running a +large collection of tests continuously. Each test provides its own scripts that +support the core operations of building executables, running jobs, and checking +test results. + +OLCF Test Harness Execution Overview +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +*runtests.py* is the program used to execute the OTH. Users provide a test +input file that lists the set of application tests to run, and a command-line +run mode option controls whether to run a single iteration (**\-\-mode start stop**) +or continuously (**\-\-mode start**). In continuous mode, the test job script has the +option to resubmit another iteration of the test. + +A brief logical flow of harness execution follows: + +* read 'RGT_PATH_TO_SSPACE' from environment (OTH_SCRATCH) +* read 'Path_to_tests' from inputfile (OTH_APPS) +* foreach (app,test) in inputfile + + 1. generate unique id (UID) + 2. create *Run_Archive* and *Status* directories @ *OTH_APPS/app/test/{Run_Archive,Status}/UID* + 3. create scratch directory (APPTEST_SCRATCH) @ *OTH_SCRATCH/app/test/UID* + 4. recursively copy *OTH_APPS/app/Source/ to APPTEST_SCRATCH/build_directory/* + 5. change working directory to *APPTEST_SCRATCH/build_directory/*, and execute test's build + command + 6. if build script succeeds, generate test's job script from template in *OTH_APPS/app/test/Scripts* + 7. submit job script to scheduler - when job runs, it: + + - changes working directory to *APPTEST_SCRATCH/workdir* (after creating it + if necessary) + - copies any needed input files from *OTH_APPS/app/test* + - runs the test executable + - copies any needed output files back to the *Run_Archive* directory + - runs the test's check command, passing it the *Run_Archive* directory + location + - if in continuous mode, start another iteration of the harness test end diff --git a/harness/bin/__init__.py b/harness/bin/__init__.py index 9d70da6..5678457 100644 --- a/harness/bin/__init__.py +++ b/harness/bin/__init__.py @@ -5,7 +5,8 @@ "parse_test_status", "test_harness_driver", "runtests", - "record_application_metric" + "record_application_metric", + "create_alt_config_file" ] version = 2.0 diff --git a/harness/bin/check_executable_driver.py b/harness/bin/check_executable_driver.py index 419de08..d62defa 100755 --- a/harness/bin/check_executable_driver.py +++ b/harness/bin/check_executable_driver.py @@ -1,13 +1,18 @@ #! /usr/bin/env python3 +# Python imports import os import sys import subprocess import getopt import string +# Harness imports from libraries.apptest import subtest +from libraries.subtest_factory import SubtestFactory from libraries.layout_of_apps_directory import get_layout_from_runarchivedir +from libraries.layout_of_apps_directory import get_path_to_logfile_from_runarchivedir +from libraries.rgt_loggers import rgt_logger_factory # # Author: Arnold Tharrington, Scientific Computing Group @@ -22,6 +27,25 @@ # It is designed such that it will be called from the Scripts directory. # +MODULE_THRESHOLD_LOG_LEVEL = "DEBUG" +"""str : The logging level for this module. """ + +MODULE_LOGGER_NAME = "check_executable_driver" +"""The logger name for this module.""" + +def get_log_level(): + """Returns the test harness driver threshold log level. + + Returns + ------- + int + """ + return MODULE_THRESHOLD_LOG_LEVEL + +def get_logger_name(): + """Returns the logger name for this module.""" + return MODULE_LOGGER_NAME + def usage(): print ("Usage: check_executable_driver.py [-h|--help] [-i ] -p ") print ("A driver program that calls check_executable.x") @@ -71,10 +95,23 @@ def main(): print("ERROR: user-provided test id", test_id_string, "does not match run archive id", testid) sys.exit(1) - apptest = subtest(name_of_application=app, - name_of_subtest=test, - local_path_to_tests=apps_root, - harness_id=testid) + logger_threshold = "INFO" + fh_threshold_log_level = "INFO" + ch_threshold_log_level = "CRITICAL" + fh_filepath = get_path_to_logfile_from_runarchivedir(path_to_results) + a_logger = rgt_logger_factory.create_rgt_logger( + logger_name=get_logger_name(), + fh_filepath=fh_filepath, + logger_threshold_log_level=logger_threshold, + fh_threshold_log_level=fh_threshold_log_level, + ch_threshold_log_level=ch_threshold_log_level) + + apptest = SubtestFactory.make_subtest(name_of_application=app, + name_of_subtest=test, + local_path_to_tests=apps_root, + logger=a_logger, + tag=testid) + currentdir = os.getcwd() scriptsdir = apptest.get_path_to_scripts() @@ -85,11 +122,13 @@ def main(): check_command = "test_harness_driver.py --check -i " + testid check_exit_value = os.system(check_command) + message = f"The check command return status is {check_exit_value}." + apptest.doInfoLogging(message) + if currentdir != scriptsdir: os.chdir(currentdir) return check_exit_value - if __name__ == "__main__": main() diff --git a/harness/bin/convert_test_input.bash b/harness/bin/convert_test_input.bash new file mode 100755 index 0000000..fbd2274 --- /dev/null +++ b/harness/bin/convert_test_input.bash @@ -0,0 +1,48 @@ +#!/bin/bash + +usage() { + my_program=$1 + echo >&2 "USAGE: $my_program []" + echo >&2 " - if output file (2nd parameter) omitted, contents will be generated to stdout" +} + +if [[ $# -lt 1 || $# -gt 2 || $1 == "-h" ]]; then + usage $(basename $0) + exit 1 +fi + +input_file=$1 +output_file=$2 + +#echo >&2 "DEBUG: input_file $input_file" +#echo >&2 "DEBUG: output_file $output_file" + +if [[ ! -f $input_file ]]; then + echo >&2 "ERROR: input file $input_file does not exist" + exit 2 +fi + +if [[ -n $output_file ]]; then + if [[ -f $output_file ]]; then + echo >&2 "WARNING: output file $output_file exists and will be overwritten (backing up)" + /bin/mv $output_file ${output_file}.bak + fi + exec 1>&$output_file # send stdout to output_file +fi + +sed_substitutions=' +s|batchfilename *=|batch_filename =|; +s|batchqueue *=|batch_queue =|; +s|buildcmd *=|build_cmd =|; +s|buildscriptname *=|build_cmd =|; +s|checkcmd *=|check_cmd =|; +s|checkscriptname *=|check_cmd =|; +s|executablename *=|executable_path =|; +s|jobname *=|job_name =|; +s|projectid *=|project_id =|; +s|reportcmd *=|report_cmd =|; +s|reportscriptname *=|report_cmd =|; +s|resubmitme *=|resubmit =|' + +echo "[Replacements]" +sed -e "$sed_substitutions" $input_file diff --git a/harness/bin/create_alt_config_file.py b/harness/bin/create_alt_config_file.py new file mode 100755 index 0000000..64167d0 --- /dev/null +++ b/harness/bin/create_alt_config_file.py @@ -0,0 +1,565 @@ +#! /usr/bin/env python3 +""" This module modifies the INI config files of the HARNESS. + +The module is to be used as main program. For module usage +do the following command: + + create_alt_config_file.py -h | --help. + +Only 1 entry of an INI file can be modified per program call. +The user gives on the command line the section, key and new key value. +The specified command line input INI config file +is read, modfied, and written to disk with the specified command +line output filename. +""" + +# Python package imports + +# My harness package imports + + +_KEY_GROUP_SIZE=3 +"""int: The size of a key group. + +The module level variable is used to validate +that the number of -k | --keys arguments is a +multiple of 3, and for writing the new key values +to file. +""" +#----------------------------------------------------- +# Public members - +# - +#----------------------------------------------------- + +def main(): + """The entry point of this module. + """ + + # Parse the arguments on the command line arguments. + args = _parse_arguments() + + # Create a logger object for general purpose debugging. + logger = _create_logger(log_id='main_logger', + log_level=args.log_level) + + logger.info("Start of main program") + + # Check the validity of the command line arguments. + _check_commandline_arguments_validity(logger,args) + + # Write the new config file. + _write_new_config_file(logger, + args.input_config_filename[0], + args.output_config_filename[0], + args.keys[0]) + + logger.info("End of main program") + +#----------------------------------------------------- +# End of public members - +# - +#----------------------------------------------------- + +#----------------------------------------------------- +# Private members - +# - +#----------------------------------------------------- + +def _create_logger_description(): + """Returns a string that contains the logger description. + """ + frmt_header = "{0:10s} {1:40.40s} {2:5s}\n" + frmt_items = frmt_header + header1 = frmt_header.format("Level", "Description", "Option Value" ) + header1_len = len(header1) + log_option_desc = "The logging level. The standard levels are the following:\n\n" + log_option_desc += header1 + log_option_desc += "-"*header1_len + "\n" + log_option_desc += frmt_items.format("NOTSET", "All messages will be processed", "0" ) + log_option_desc += frmt_items.format("", "processed", " \n" ) + log_option_desc += frmt_items.format("INFO", "Confirmation that things", "10" ) + log_option_desc += frmt_items.format("", "are working as expected.", " \n" ) + log_option_desc += frmt_items.format("DEBUG", "Detailed information, typically of ", "20" ) + log_option_desc += frmt_items.format("", "interest only when diagnosing problems.", "\n" ) + log_option_desc += frmt_items.format("WARNING ", "An indication that something unexpected , ", "30" ) + log_option_desc += frmt_items.format("", "happened or indicative of some problem", "" ) + log_option_desc += frmt_items.format("", "in the near future.", "\n" ) + log_option_desc += frmt_items.format("ERROR ", "Due to a more serious problem ", "40" ) + log_option_desc += frmt_items.format("", "the software has not been able ", "" ) + log_option_desc += frmt_items.format("", "to perform some function. ", "\n" ) + log_option_desc += frmt_items.format("CRITICAL ", "A serious error, indicating ", "50" ) + log_option_desc += frmt_items.format("", "that the program itself may be unable", "" ) + log_option_desc += frmt_items.format("", "to continue running.", "\n" ) + return log_option_desc + +def _create_logger(log_id, log_level): + """ + Returns a Logger object. + + The returned logger object is named *log_id* and has loglevel *log_level*. + See logging module python documentation. + + Parameters + ---------- + log_id : str + The name of the logger + + log_level : A logging level + The log level (e.g. logging.DEBUG, logging.INFO, etc.). + + Returns + ------- + Logger + """ + import logging # Needed for logging events. + logger = logging.getLogger(log_id) + logger.setLevel(log_level) + + # create console handler and set level to debug + ch = logging.StreamHandler() + ch.setLevel(log_level) + + # create formatter + formatter = logging.Formatter( + '%(asctime)s - %(name)s - %(levelname)s - %(message)s') + + # add formatter to ch + ch.setFormatter(formatter) + + # add ch to logger + logger.addHandler(ch) + + return logger + +def _parse_arguments(): + """ + Parses the command line arguments and returns namespace. + + Returns + ------- + A namespace. + The namespace contains attributes + that are the command line arguments. + """ + import argparse # Needed for parsing command line arguments. + + # Create a string of the description of the + # program + program_description = "This program creates a new config file" + program_description += "by copying and modfying an existing config file." + + # Create an argument parser. + my_parser = argparse.ArgumentParser( + description=program_description, + formatter_class=argparse.RawTextHelpFormatter, + add_help=True) + + my_parser = _add_all_arguments(my_parser) + + + my_args = my_parser.parse_args() + + return my_args + +def _add_all_arguments(a_parser): + """ + Adds all arguments to parser and returns a parser. + + Parameters + ---------- + a_parser: An ArgumentParser + The parser of which is to be modified an returned. + """ + a_parser = _add_argument_loglevel(a_parser) + a_parser = _add_argument_multiple_keys(a_parser) + a_parser = _add_argument_output(a_parser) + a_parser = _add_argument_inputfile(a_parser) + return a_parser + +def _add_argument_loglevel(a_parser): + """ + Adds the help argument to the parser argument and returns the parser object + + Parameters + ---------- + a_parser: An ArgumentParser + The parser of which is to be modified an returned. + + Returns + ------- + A parser object + """ + import logging # Needed for logging events. + + # Add an optional argument for the logging level. + a_parser.add_argument("--log-level", + type=int, + default=logging.WARNING, + help=_create_logger_description() ) + return a_parser + +def _add_argument_multiple_keys(a_parser): + """ + Adds multiple key arguments to the parser argument and returns the parser object. + + The key arguments are required requires multiple of _KEY_GROUP_SIZE arguments: + -k | --keys ... + + Parameters + ---------- + a_parser: An ArgumentParser + The parser of which is to be modified an returned. + + Returns + ------- + A parser object + """ + a_parser.add_argument("-k","--keys", + action="append", + required=True, + nargs="+", + type=str, + help="The section, key and new key value .\n\n") + return a_parser + +def _add_argument_output(a_parser): + """ + Adds the output argument to the parser argument and returns the parser object. + + The outp file argument is mandatory. The output file is the name of the new + machine config file. + + Parameters + ---------- + a_parser: An ArgumentParser + The parser of which is to be modified an returned. + + Returns + ------- + A parser object + """ + a_parser.add_argument("-o","--output-config-filename", + required=True, + type=str, + nargs=1, + help=("The name of the newly created config file.\n" + "The output config filename must be different from the\n" + "input config filename.\n\n") + ) + return a_parser + +def _add_argument_inputfile(a_parser): + """ + Adds the input file argument to the parser argument and returns the parser object. + + Parameters + ---------- + a_parser: An ArgumentParser + The parser of which is to be modified an returned. + + Returns + ------- + A parser object + """ + a_parser.add_argument("-i","--input-config-filename", + required=True, + type=str, + nargs=1, + help=("The name of the input config file.\n" + "The input config filename must be different from the\n" + "output config filename.\n\n") + ) + return a_parser + +def _check_commandline_arguments_validity(a_logger,args): + """ + Check the validity of the values of the command line arguments. + + Parameters + ---------- + logger: A Logger object + Used primarily for debugging and logging messages to std out. + + args: A namespace + Stores the attributes of the command line argument. + + """ + import sys + + try : + _validate_io_file_arguments(input_filename=args.input_config_filename[0], + output_filename=args.output_config_filename[0]) + + _validate_multiple_key_arguments(key_arguments=args.keys[0]) + + except (_SameIOFileError, _InputConfigFileError, _OutputConfigFileError) as err: + a_logger.error("Error in i/o file arguments.") + print(err.error_message) + sys.exit() + except ( _NumberKeyError ) as err: + a_logger.error("Error in key arguments.") + print(err.error_message) + sys.exit() + +def _validate_io_file_arguments(input_filename, + output_filename): + """Validates the filenames with respect several criteria. + + If input_filename is the same as output_filename, then raises a _SameIOFileError exception. + If the input file doesn't exist, then raise a _InputConfigFileError exception. + If the output file exists, then raise a _OutputConfigFileError exception. + + Parameters + ---------- + input_filename : str + The input config file name + + output_filename : str + The output config file name + + Raises + ------ + _SameIOFileError + _InputConfigFileError + _OutputConfigFileError + + """ + + import os + + if input_filename == output_filename: + raise _SameIOFileError(input_filename,output_filename) + + if not os.path.exists(input_filename): + raise _InputConfigFileError(input_filename) + + if os.path.exists(output_filename): + raise _OutputConfigFileError(output_filename) + + return + +def _validate_multiple_key_arguments(key_arguments): + """Validates the key arguments. + + The key arguments must be a multiple of _KEY_GROUP_SIZE or the + program will fail. + + Parameters + ---------- + key_arguments: A list of strings + Stores the sections, keys, and new key values. + + Raises + ------ + _NumberKeyError + + + """ + + if len(key_arguments)%_KEY_GROUP_SIZE != 0: + raise _NumberKeyError(key_arguments) + + pass + +def _write_new_config_file(logger, + input_config_filename, + output_config_filename, + keys): + """Writes to disk the new config file. + + The INI file "input_config_filename" is read and stored as + a ConfigParser objects, obj1. The object is then modified + by the using the key variable. The modified object is written + back to disk with filename "output_config_filename". + + Parameters + ---------- + logger: A Logger object + Used primarily for debugging and logging messages to std out. + + input_config_filename: A string + The name of a file in INI format. This file will be read and + stored in a ConfigParser object. + + output_config_filename: A string + The new ConfigParser object will be named output_config_filename. + + keys: A string list of length of some multiple _KEY_GROUP_SIZE + Each new config INI entry has group size of _KEY_GROUP_SIZE, + therefore the number of changes to make is + + nm_groups = len(keys)/_KEY_GROUP_SIZE. + + The ip'th group entry has an offset of + + offset_ip = ip*{_KEY_GROUP_SIZE) + + The following must be satisfied. + keys[offset_ip] contains the name of the section. + keys[offset_ip + 1] contains the name of the key. + keys[offset_ip + 2] contains the new key value. + """ + + import configparser + + with open(input_config_filename,"r") as in_file: + old_parser = configparser.ConfigParser() + old_parser.read_file(in_file,input_config_filename) + with open(output_config_filename,"w") as out_file: + nm_groups = len(keys)//_KEY_GROUP_SIZE + for ip in range(nm_groups): + offset = ip*(_KEY_GROUP_SIZE) + (section,key,new_key_value)=(keys[offset],keys[offset+1],keys[offset+2]) + old_parser.set(section,key,new_key_value) + old_parser.write(out_file) + +class _Error(Exception): + """Base class for exceptions in this module""" + pass + +class _SameIOFileError(_Error): + """Raised when the input and output config files are the same. """ + + def __init__(self,input_filename,output_filename): + """The class constructor. + + Parameters + ---------- + input_filename : str + The input config file name. + + output_filename : str + The output config file name. + """ + + self._input_filename=input_filename + """str: The input config file name.""" + + self._output_filename=output_filename + """str: The output config file name.""" + + self._errormessage=("On the command line, the specified input " + "and output config files have the same name - {}.\n".format(self._input_filename)) + """str: The message for this error.""" + + @property + def error_message(self): + """Returns the error message. + + Returns + ------- + str + The error message string. + + """ + return self._errormessage + +class _InputConfigFileError(_Error): + """Raised when the input config file does not exist. """ + + def __init__(self,filename): + """The class constructor + + Parameters + ---------- + filename : str + The name of the input config file. + + """ + + self._filename = filename + """str: The input filename""" + + self._errormessage="On the command line arguments, the specified input config file doesn't exist - {}.\n".format(self._filename) + """str: The message for this error.""" + + @property + def error_message(self): + """Returns the error message. + + Returns + ------- + str + The error message string. + + """ + return self._errormessage + +class _OutputConfigFileError(_Error): + """Raised when the output config file exists. """ + + def __init__(self,filename): + """The class constructor + + Parameters + ---------- + filename : str + The name of the output config file. + + """ + + self._filename = filename + """str: The output filename""" + + self._errormessage="On the command, the specified output config file exists - {}.\n".format(self._filename) + """str: The message for this error.""" + + @property + def error_message(self): + """Returns the error message. + + Returns + ------- + str + The error message string. + + """ + return self._errormessage + +class _NumberKeyError(_Error): + """Raised when the number of key values is not a multiple of _KEY_GROUP_SIZE """ + + def __init__(self,key_args): + """The class constructor + + Parameters + ---------- + list of strings + The list of key arguments. + """ + import copy + + self._key_arguments = copy.deepcopy(key_args) + """list of str: The key arguments.""" + + self._errormessage=self._form_error_message() + """str: The message for this error.""" + + @property + def error_message(self): + """Returns the error message. + + Returns + ------- + str + The error message string. + + """ + return self._errormessage + + def _form_error_message(self): + err_msg="On the command line, the number of specified key arguments is not a multiple of _KEY_GROUP_SIZE.\n" + err_msg+="The number of key arguments: {}.\n".format(len(self._key_arguments)) + index = -1 + for arg in self._key_arguments: + index+=1 + err_msg+="Key argument ({}): {}\n".format(index,arg) + return err_msg + +#----------------------------------------------------- +# End of private members. - +# - +#----------------------------------------------------- + +if __name__ == "__main__": + main() diff --git a/harness/bin/filelock.py b/harness/bin/filelock.py index c0b0099..4bfba15 100755 --- a/harness/bin/filelock.py +++ b/harness/bin/filelock.py @@ -60,7 +60,7 @@ def main (): for line in lines: if test_id_regexp.match(line): - print "macthing lock, removing lock" + print("matching lock, removing lock") os.remove(path_to_lock_file) try_again = 0 break @@ -71,13 +71,13 @@ def main (): def usage(): - print "Usage: filelock.py [-h|--help] [-i ] [-p ] [-c ]" - print "A program that manipulates the harness file locks" + print ("Usage: filelock.py [-h|--help] [-i ] [-p ] [-c ]") + print ("A program that manipulates the harness file locks") print - print "-h, --help Prints usage information." - print "-p The absoulte path to the lock file." - print "-i The test string unique id." - print "-c The lock command [lock | unlock ]." + print ("-h, --help Prints usage information.") + print ("-p The absoulte path to the lock file.") + print ("-i The test string unique id.") + print ("-c The lock command [lock | unlock ].") diff --git a/harness/bin/log_binary_execution_time.py b/harness/bin/log_binary_execution_time.py index bbfd642..fc1d037 100755 --- a/harness/bin/log_binary_execution_time.py +++ b/harness/bin/log_binary_execution_time.py @@ -3,50 +3,12 @@ import argparse import sys import os -from libraries import status_file -def main(): - - argv = sys.argv - - #---------------------------------- - # Create a parse for my arguments - - #---------------------------------- - parser = create_a_parser() - - Vargs = parser.parse_args() - - log_mode = str(Vargs.mode[0]) - unique_id = Vargs.uniqueid[0] - scriptsdir = Vargs.scriptsdir[0] - - - # - # Get the current working directory. - # - cwd = os.getcwd() - - # - # Change to the scripts directory of the test, - # - os.chdir(scriptsdir) - - jstatus = status_file.StatusFile(unique_id,mode="Old") - - if log_mode == "start": - jstatus.log_event(status_file.StatusFile.EVENT_BINARY_EXECUTE_START, - '17') - #jstatus.log_start_execution_time() - #jstatus.add_result("17",mode="Add_Binary_Running") - else: - jstatus.log_event(status_file.StatusFile.EVENT_BINARY_EXECUTE_END) - #jstatus.log_final_execution_time() +from libraries.subtest_factory import SubtestFactory +from libraries.status_file_factory import StatusFileFactory +from libraries.status_file import StatusFile +from libraries.layout_of_apps_directory import get_layout_from_scriptdir - # - # Change back to the starting directory of the test, - # - os.chdir(cwd) - def create_a_parser(): """Parses the arguments. @@ -62,17 +24,54 @@ def create_a_parser(): status execution log file.", add_help=True) - parser.add_argument("--scriptsdir", nargs=1,required=True, + parser.add_argument("--scriptsdir", nargs=1, required=True, help="The location of the test scripts directory. Must be an absolute path.") - parser.add_argument("--uniqueid", nargs=1,required=True, + parser.add_argument("--uniqueid", nargs=1, required=True, help="The unique id of the test.") - parser.add_argument("--mode", required=True,nargs=1,choices=["start","final"], + parser.add_argument("--mode", required=True, nargs=1, choices=["start","final"], help="Used to decide to where log the current time the start or final execution log file") return parser +def main(): + + #---------------------------------- + # Create a parse for my arguments - + #---------------------------------- + parser = create_a_parser() + Vargs = parser.parse_args() + + log_mode = str(Vargs.mode[0]) + unique_id = Vargs.uniqueid[0] + scriptsdir = Vargs.scriptsdir[0] + + # Change to the scripts directory of the test + cwd = os.getcwd() + if cwd != scriptsdir: + os.chdir(scriptsdir) + + # Get status file + (apps_root, app, test) = get_layout_from_scriptdir(scriptsdir) + apptest = SubtestFactory.make_subtest(name_of_application=app, + name_of_subtest=test, + local_path_to_tests=apps_root, + tag=unique_id) + path_to_status_file = apptest.get_path_to_status_file() + jstatus = StatusFileFactory.create(path_to_status_file=path_to_status_file) + jstatus.initialize_subtest(unique_id) + + # Log the appropriate event to the status file + if log_mode == "start": + jstatus.log_event(StatusFile.EVENT_BINARY_EXECUTE_START) + else: + jstatus.log_event(StatusFile.EVENT_BINARY_EXECUTE_END) + + # Change back to the starting directory of the test + if cwd != scriptsdir: + os.chdir(cwd) + if __name__ == '__main__': main() diff --git a/harness/bin/parse_test_status.py b/harness/bin/parse_test_status.py index 4f7059c..b8ee165 100755 --- a/harness/bin/parse_test_status.py +++ b/harness/bin/parse_test_status.py @@ -1,46 +1,48 @@ #!/usr/bin/env python3 +# NCCS Test Harness Package Imports +from libraries.layout_of_apps_directory import apptest_layout + def main(): - string1 = "--------------------" - - filename = "test_status.txt" - - file_obj = open(filename,"r") - - filerecords = file_obj.readlines() - - ip = -1 - total_test = 0 - total_passed = 0 - total_failed = 0 - total_inconclusive = 0 - test_with_no_passes = [] - for tmprecord in filerecords: - ip = ip + 1 - if tmprecord.find(string1) != -1 : - #print filerecords[ip+4], - tmprecord1 = filerecords[ip+4].strip() - words = tmprecord1.split() - total_test = total_test + int(words[0]) - total_passed = total_passed + int(words[1]) - total_failed = total_failed + int(words[2]) - total_inconclusive = total_inconclusive + int(words[3]) - if int(words[1]) == 0: - test_with_no_passes.append(filerecords[ip+2].strip()) - - - print "Total tests: ",total_test - print "Total passed: ",total_passed - print "Total failed: ",total_failed - print "Total inconclusive : ", total_inconclusive - print "Pass percentage :", float(total_passed)/float(total_test) - print - print - print + string1 = "--------------------" + + filename=apptest_layout.test_pass_failed_status_filename + file_obj = open(filename,"r") + filerecords = file_obj.readlines() + + ip = -1 + total_test = 0 + total_passed = 0 + total_failed = 0 + total_inconclusive = 0 + test_with_no_passes = [] + for tmprecord in filerecords: + ip = ip + 1 + if tmprecord.find(string1) != -1 : + #print filerecords[ip+4], + tmprecord1 = filerecords[ip+4].strip() + words = tmprecord1.split() + total_test = total_test + int(words[0]) + total_passed = total_passed + int(words[1]) + total_failed = total_failed + int(words[2]) + total_inconclusive = total_inconclusive + int(words[3]) + if int(words[1]) == 0: + test_with_no_passes.append(filerecords[ip+2].strip()) + + + print ("Total tests: ",total_test) + print ("Total passed: ",total_passed) + print ("Total failed: ",total_failed) + print ("Total inconclusive : ", total_inconclusive) + print ("Pass percentage :", float(total_passed)/float(total_test)) + print + print + print - print "The following tests have no sucesses:" - for test in test_with_no_passes: - print test + print ("The following tests have no sucesses:") + for test in test_with_no_passes: + print (test) -main() +if __name__ == "__main__": + main() diff --git a/harness/bin/recheck_tests.py b/harness/bin/recheck_tests.py index 4427c87..499af33 100755 --- a/harness/bin/recheck_tests.py +++ b/harness/bin/recheck_tests.py @@ -66,7 +66,7 @@ def doReTest(test_name): records = file_obj.readlines() for tmp_record in records: message = " {}".format(tmp_record.strip()) - print message + print (message) file_obj.close() print @@ -78,7 +78,7 @@ def doReTest(test_name): records = file_obj.readlines() for tmp_record in records: message = " {}".format(tmp_record.strip()) - print message + print (message) file_obj.close() print diff --git a/harness/bin/rgt_calculate_percentages.py b/harness/bin/rgt_calculate_percentages.py index b13a136..a956cfa 100755 --- a/harness/bin/rgt_calculate_percentages.py +++ b/harness/bin/rgt_calculate_percentages.py @@ -1,8 +1,10 @@ #! /usr/bin/python +# Python package imports import re - +# NCCS Test Harness Package Imports +from libraries.layout_of_apps_directory import apptest_layout class teststatusfile: def __init__(self,test_status_file): @@ -64,10 +66,12 @@ def __init__(self,test_status_file): str(float(self.__totalfailed)/float(self.__totaltests)), str(float(self.__totalincinclusive)/float(self.__totaltests)) ) - print s1,s2,s3 + print (s1,s2,s3) + def main(): - - t1 = teststatusfile("test_status.txt") + filename=apptest_layout.test_pass_failed_status_filename + t1 = teststatusfile(filename) -main() +if __name__ == "__main__" : + main() diff --git a/harness/bin/runtests.py b/harness/bin/runtests.py index d0ba6d3..83cb3e1 100755 --- a/harness/bin/runtests.py +++ b/harness/bin/runtests.py @@ -5,28 +5,271 @@ import argparse import os import sys -import string -import configparser +import logging # My harness package imports from libraries import input_files from libraries import regression_test +from libraries import command_line +from libraries.config_file import rgt_config_file # -# Authors: Arnold Tharrington, Wayne Joubert, Veronica Vergera, and Mark Berrill +# Authors: Arnold Tharrington, Wayne Joubert, Veronica Vergera, Mark Berrill, and Mike Brim # National Center for Computational Sciences, Scientific Computing Group. # Oak Ridge National Laboratory # +#----------------------------------------------------- +# This section defines the main logger and its - +# file and console handlers. - +# - +#----------------------------------------------------- + +def _create_main_logger(logger_name, + logger_level, + logger_filehandler_filename, + logger_filehandler_loglevel, + logger_consolehandler_loglevel): + """Returns the main logging object. + + Parameters + ---------- + logger_name : A string + The name of the logger object + + logger_level : A numeric integer + The log level of the returned logger object. + + logger_filehandler_filename : A string + The name of the logging file handler. + + logger_filehandler_loglevel : A numeric integer + The log level of the file handler of the returned logger object. + + logger_consolehandler_loglevel : A numeric integer + The log level of the console handler of the returned logger object. + + Returns + ------- + Logger + A logger object + + """ + my_logger = logging.getLogger(logger_name) + my_logger.setLevel(logger_level) + fh = logging.FileHandler(logger_filehandler_filename,mode="a") + fh.setLevel(logger_filehandler_loglevel) + ch = logging.StreamHandler() + ch.setLevel(logger_consolehandler_loglevel) + formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') + ch.setFormatter(formatter) + fh.setFormatter(formatter) + my_logger.addHandler(fh) + my_logger.addHandler(ch) + my_logger.info("Created the main logger.") + return my_logger + +MAIN_LOGGER_NAME='main_logger' +"""str: The name of the main logger.""" + +MAIN_LOGGER_LEVEL=logging.DEBUG +"""The log level of the main logger.""" + +MAIN_LOGGER_FILEHANDLER_FILENAME="main.log" +"""str: The file name for the main logger fileHandler.""" + +MAIN_LOGGER_FILEHANDLER_LOGLEVEL=logging.DEBUG +"""The log level for the main log file handler.""" + +MAIN_LOGGER_CONSOLE_HANDLER_LOGLEVEL=logging.ERROR +"""The log level for the main log console handler.""" + +def get_main_logger(): + """ Returns the main logger. + + Returns + ------- + Logger + """ + if MAIN_LOGGER_NAME in logging.Logger.manager.loggerDict: + my_main_logger = logging.getLogger(MAIN_LOGGER_NAME) + else: + my_main_logger = _create_main_logger(MAIN_LOGGER_NAME, + MAIN_LOGGER_LEVEL, + MAIN_LOGGER_FILEHANDLER_FILENAME, + MAIN_LOGGER_FILEHANDLER_LOGLEVEL, + MAIN_LOGGER_CONSOLE_HANDLER_LOGLEVEL) + return my_main_logger + +#----------------------------------------------------- +# End of section we define the main logger and its - +# file and console handlers. - +# - +#----------------------------------------------------- + +#----------------------------------------------------- +# This section sets the permitted and/or default - +# for the command line options of the command - +# runtests.py. - +# - +# We put the permitted values in a tuple so as - +# to make the values immutable. - +#----------------------------------------------------- + +DEFAULT_CONFIGURE_FILE = rgt_config_file.getDefaultConfigFile() +""" +The default configuration filename. + +The configuration file contains the machine settings, number of CPUs +per node, etc., for the machine the harness is being run on. Each machine +has a default configuration file that will be used unless another +configuration is specified by the command line or input file. + +""" + +# This section pertains to the harness tasks option. + +USE_HARNESS_TASKS_IN_RGT_INPUT_FILE="use_harness_tasks_in_rgt_input_file" +""" +str: A flag for the harness to use the designated input file for runtests.py for the + the harness tasks. + +""" + +DEFAULT_HARNESS_TASK=USE_HARNESS_TASKS_IN_RGT_INPUT_FILE +""" +str: The default harness task. + +If no task option is specified on the command line runtests.py, then the +default harness task will we default to the tasks in the runtests.py input file. + +""" + +PERMITTED_HARNESS_TASKS=(USE_HARNESS_TASKS_IN_RGT_INPUT_FILE,'checkout','start','stop','status') +""" +A tuple of the permitted harness tasks. + +These tasks are set by means of command line arguments to the runtests.py +command: --mode | -m . The following tasks are supported. + +* use_harness_tasks_in_rgt_input_file - Uses the harness tasks in the runtests.py input file. +* checkout - Checks out via a git clone command the harness application-test form the repository. +* start - Starts the application-test(s). +* stop - Stops an application-test(s). +* status - Prints to std out the status of the application-test(s). + +Specifying an unsupported task will result in the harness aborting with +an error. + +To add an additional task do the following: + +* Add the value to the PERMITTED_HARNESS_TASKS tuple. +* In function create_parser(), located in module bin.runtests.py, add the appropriate arguments to the parser. +* Implement unit tests in module ci_testing_utilities.harness_unit_tests.test_runtests.py. + +""" + + +# This section pertains to the log level option. +PERMITTED_LOG_LEVELS=("NOTSET","DEBUG","INFO","WARNING", "ERROR", "CRITICAL") +""" +A tuple of str: The permitted log levels. + +The log levels are set by means of the command line arguments to the +runtests.py command: --loglevel . The following loglevels +are supported. + +* NOTSET +* DEBUG +* INFO +* WARNING +* ERROR +* CRITICAL + +Specifying an unsupported loglevel will result in the harness aborting with an +error. +""" + +DEFAULT_LOG_LEVEL=PERMITTED_LOG_LEVELS[0] +""" +str: The default log level. + +If no loglevel option is specified on the command line, then default loglevel +will be set to NOTSET. +""" + +# This section pertains to the concurrency option. It +# is deprecated. +PERMITTED_CONCURRENCY_VALUES=('serial','parallel') +""" +A tuple of str: deprecated feature. This concurrency option is hard-coded to serial. + +This tuple are vestiges of running the harness in parallel. +The parallel running of the harness is disabled, and this +tuple will be removed. +""" + +DEFAULT_CONCURRENCY=PERMITTED_CONCURRENCY_VALUES[0] +""" +str: A deprecated feature. This concurrency option is hard-coded to serial. + +The parallel running of the harness is disabled, and this +module constant will be removed. +""" + +# This section pertains to the input file option. +DEFAULT_INPUT_FILE='rgt.input' +""" +str: The default harness input file. + +The harness input file is set by means of the command line arguments to the +runtests.py command: --inputfile | -i +If no input file is specified on the command line, then the default input +file will be used. +""" + +# This section pertains to the output option. +PERMITTED_OUTPUT_VALUES=('screen','logfile') +""" +A tuple of str: The permitted values of the output option. + +The harness output options is set by means of the command line arguments to the +runtests.py command: --outputfile | -o . +This tuple contains the permitted values of the output option. If no output option +is specified, then the default option will be used. + +* screen - Writes logging to std out and std err. +* logfile - Writes logging to a file +""" + +DEFAULT_OUTPUT=PERMITTED_OUTPUT_VALUES[0] +""" +str: The default output option. +""" + +#----------------------------------------------------- +# End of section that sets the permitted and/or - +# default for the command line options of the - +# command runtests.py. - +# - +#----------------------------------------------------- + def create_parser(): - """Parses the command line arguments. + """ + Returns an ArgumentParser object. - Arguments: - None + The returned ArgumentParser object holds the information + to parse the command line into Python data types. - Returns: - An ArgParser object that contains the information of the - command line arguments. + Parameters + ---------- + None + + Returns + ------- + ArgumentParser + An object that contains the information of the + command line arguments. """ parser = argparse.ArgumentParser(description="Execute specified harness task for application tests listed in input file", @@ -35,86 +278,152 @@ def create_parser(): parser.add_argument('-i', '--inputfile', required=False, - default="rgt.input", + default=DEFAULT_INPUT_FILE, help="Input file name (default: %(default)s)") - parser.add_argument("--loglevel", + parser.add_argument('-c', '--configfile', + required=False, + default=DEFAULT_CONFIGURE_FILE, + type=str, + help="Configuration file name (default: %(default)s)") + + parser.add_argument('-l', '--loglevel', required=False, - choices=["DEBUG","INFO","WARNING", "ERROR", "CRITICAL"], - default="INFO", + choices=PERMITTED_LOG_LEVELS, + default=DEFAULT_LOG_LEVEL, help="Logging level (default: %(default)s)") out_help = ("Destination for harness stdout/stderr messages:\n" " 'screen' - print messages to console (default)\n" - " 'logfile' - print messages to log file") + " 'logfile' - print messages to log file\n") parser.add_argument('-o', '--output', required=False, - choices=['logfile','screen'], - default='screen', + choices=PERMITTED_OUTPUT_VALUES, + default=DEFAULT_OUTPUT, + type=str, help=out_help) mode_help = ("Harness task:\n" " 'checkout' - checkout application tests listed in input file\n" " 'start' - start application tests listed in input file\n" " 'stop' - stop application tests listed in input file\n" - " 'status' - check status of application tests listed in input file") + " 'status' - check status of application tests listed in input file\n") parser.add_argument('-m', '--mode', required=False, - help=mode_help) - #choices=['checkout', 'start', 'stop', 'status'], + help=mode_help, + default=[DEFAULT_HARNESS_TASK], + nargs='*', + choices=PERMITTED_HARNESS_TASKS) + + parser.add_argument("--fireworks", + action='store_true', + help="Use FireWorks to run harness tasks") return parser +def parse_commandline_argv(argv): + """ + Returns a object of type HarnessParsedArguments. + + The returned object holds the Python data types of + the Python command line. + + Parameters + ---------- + argv : list + Holds the command line arguments + + Returns + ------- + HarnessParsedArguments + Stores the Python data of the command line arguments. See module + command_line.py for details on HarnessParsedArguments object. + + + """ + + parser = create_parser() + Vargs = parser.parse_args(argv) + harness_parsed_args = command_line.HarnessParsedArguments(inputfile=Vargs.inputfile, + loglevel=Vargs.loglevel, + configfile=Vargs.configfile, + stdout_stderr=Vargs.output, + runmode=Vargs.mode, + use_fireworks=Vargs.fireworks) + return harness_parsed_args + def runtests(my_arg_string=None): + """ + The entry point of running the Harness. + + If my_arg_string is None, then sys.argv is the command line argument list. + + Parameters + ---------- + my_arg_string : list + A list that holds command line arguments. + + Returns + ------- + Harness + An object that stores/encapsulates the running of the NCCS Harness. See module + regression_test.py for details on Harness object. + """ + + main_logger = get_main_logger() + argv = None if my_arg_string == None: argv = sys.argv[1:] else: argv = shlex.split(my_arg_string) + main_logger.info("Parsing the command line arguments.") - # - # Parse command line arguments - # - parser = create_parser() - Vargs = parser.parse_args(argv) - inputfile = Vargs.inputfile - loglevel = Vargs.loglevel - stdout_stderr = Vargs.output - runmode = Vargs.mode + harness_arguments = parse_commandline_argv(argv) # Print the effective command line to stdout. - command_options = ("Effective command line: " - "runtests.py" - " --inputfile {my_inputfile}" - " --loglevel {my_loglevel}" - " --output {my_output}" - " --mode {my_runmode}") - effective_command_line = command_options.format(my_inputfile = inputfile, - my_loglevel = loglevel, - my_output = stdout_stderr, - my_runmode = runmode) - print(effective_command_line) - - - # Determine the machine - machinename = 'master' - if 'OLCF_HARNESS_MACHINE' in os.environ: - machinename = os.environ['OLCF_HARNESS_MACHINE'] - configfile = machinename + '.ini' - print('Using machine config:', configfile) - - # Read the harnesss input file - ifile = input_files.rgt_input_file(inputfilename=inputfile, - configfilename=configfile, - runmodecmd=runmode) + effective_command_line = harness_arguments.effective_command_line + main_logger.info(effective_command_line) + + main_logger.info("Completed parsing command line arguments.") + + # Read the input and master config + main_logger.info("Reading the harness input file.") + ifile = input_files.rgt_input_file(inputfilename=harness_arguments.inputfile, + runmodecmd=harness_arguments.runmode) + main_logger.info("Completed reading the harness input file.") + + main_logger.info("Reading the harness config file.") + config = rgt_config_file(configfilename=harness_arguments.configfile) + main_logger.info("Completed reading the harness config file.") # Create and run the harness - rgt = regression_test.Harness(ifile, loglevel, stdout_stderr) + rgt = regression_test.Harness(config, ifile, + harness_arguments.loglevel, + harness_arguments.stdout_stderr, + harness_arguments.use_fireworks) + + main_logger.info("Created an instance of the harness.") + main_logger.info("Harness: " + str(rgt)) + main_logger.info("Running the harness tasks.") rgt.run_me(my_effective_command_line=effective_command_line) + main_logger.info("Completed running the harness tasks.") return rgt if __name__ == "__main__": - runtests() + + my_main_logger = _create_main_logger(MAIN_LOGGER_NAME, + MAIN_LOGGER_LEVEL, + MAIN_LOGGER_FILEHANDLER_FILENAME, + MAIN_LOGGER_FILEHANDLER_LOGLEVEL, + MAIN_LOGGER_CONSOLE_HANDLER_LOGLEVEL) + + my_main_logger.info("Start of harness") + + rgt = runtests() + + my_main_logger.info("End of harness.") + diff --git a/harness/bin/test_harness_driver.py b/harness/bin/test_harness_driver.py index 180234d..6b94f8a 100755 --- a/harness/bin/test_harness_driver.py +++ b/harness/bin/test_harness_driver.py @@ -1,31 +1,71 @@ #! /usr/bin/env python3 -import os -import sys -import string +# +# Author: Arnold Tharrington (arnoldt@ornl.gov) +# Scientific Computing Group. +# +# Modified by: Veronica G. Vergara Larrea (vergaravg@ornl.gov) +# User Assistance Group. +# +# National Center for Computational Sciences +# Oak Ridge National Laboratory +# + +# Python imports import argparse import datetime +import logging +import os import shutil +import stat +import string +import subprocess +import sys + from shlex import split -from libraries.apptest import subtest +# Harness imports +from libraries.subtest_factory import SubtestFactory from libraries.layout_of_apps_directory import apptest_layout as layout from libraries.layout_of_apps_directory import get_layout_from_scriptdir +from libraries.layout_of_apps_directory import get_path_to_logfile_from_scriptdir from libraries import rgt_utilities +from libraries.config_file import rgt_config_file +from libraries.status_file_factory import StatusFileFactory from libraries import status_file +from libraries.rgt_loggers import rgt_logger_factory from machine_types.machine_factory import MachineFactory +from machine_types.base_machine import SetBuildRTEError +DEFAULT_CONFIGURE_FILE = rgt_config_file.getDefaultConfigFile() +""" +The default configuration filename. -# -# Author: Arnold Tharrington (arnoldt@ornl.gov) -# Scientific Computing Group. -# -# Modified by: Veronica G. Vergara Larrea (vergaravg@ornl.gov) -# User Assistance Group. -# -# National Center for Computational Sciences -# Oak Ridge National Laboratory -# +The configuration file contains the machine settings, number of CPUs +per node, etc., for the machine the harness is being run on. Each machine +has a default configuration file that will be used unless another +configuration is specified by the command line or input file. + +""" + +MODULE_THRESHOLD_LOG_LEVEL = "DEBUG" +"""str : The logging level for this module. """ + +MODULE_LOGGER_NAME = __name__ +"""The logger name for this module.""" + +def get_log_level(): + """Returns the test harness driver threshold log level. + + Returns + ------- + int + """ + return MODULE_THRESHOLD_LOG_LEVEL + +def get_logger_name(): + """Returns the logger name for this module.""" + return MODULE_LOGGER_NAME def create_parser(): my_parser = argparse.ArgumentParser(description="Application Test Driver", @@ -36,6 +76,11 @@ def create_parser(): my_parser.add_argument('-c', '--check', help='Check the application test results', action='store_true') + my_parser.add_argument('-C', '--configfile', + required=False, + default=DEFAULT_CONFIGURE_FILE, + type=str, + help="Configuration file name (default: %(default)s)") my_parser.add_argument('-d', '--scriptsdir', default=os.getcwd(), help='Provide full path to app/test/Scripts directory (default: current working directory)') @@ -44,6 +89,9 @@ def create_parser(): my_parser.add_argument('-r', '--resubmit', help='Have the application test batch script resubmit itself', action='store_true') + my_parser.add_argument('-R', '--run', + help='Run the application test batch script (NOTE: for use within a job)', + action='store_true') my_parser.add_argument('-s', '--submit', help='Submit the application test batch script', action='store_true') @@ -73,7 +121,6 @@ def backup_status_file(test_status_dir): if os.path.exists(src): shutil.copyfile(src, dest) - def read_job_file(test_status_dir): """ Read test_status_dir/job_id.txt to get job id """ job_id = "0" @@ -86,11 +133,11 @@ def read_job_file(test_status_dir): return job_id -def auto_generated_scripts(apptest, - test_workspace, - unique_id, +def auto_generated_scripts(harness_config, + apptest, jstatus, - actions): + actions, + a_logger): """ Generates and executes scripts to build, run, and check a test. @@ -98,225 +145,121 @@ def auto_generated_scripts(apptest, """ + messloc = "In function {functionname}:".format(functionname="auto_generated_scripts") + status_dir = apptest.get_path_to_status() + ra_dir = apptest.get_path_to_runarchive() # Instantiate the machine for this computer. - mymachine = MachineFactory.create_machine(apptest) + mymachine = MachineFactory.create_machine(harness_config, apptest) + #----------------------------------------------------- + # In this section we build the binary. - + # - + #----------------------------------------------------- build_exit_value = 0 if actions['build']: # Build the executable for this test on the specified machine jstatus.log_event(status_file.StatusFile.EVENT_BUILD_START) - build_exit_value = mymachine.build_executable() - jstatus.log_event(status_file.StatusFile.EVENT_BUILD_END, build_exit_value) - - submit_exit_value = 0 + try: + build_exit_value = mymachine.build_executable() + except SetBuildRTEError as error: + message = f"{messloc} Unable to set the build runtime environnment." + message += error.message + a_logger.doCriticalLogging(message) + finally: + jstatus.log_event(status_file.StatusFile.EVENT_BUILD_END, build_exit_value) + + #----------------------------------------------------- + # In this section we run the the binary. - + # - + #----------------------------------------------------- job_id = "0" - if actions['submit']: - if build_exit_value != 0: - # do not submit a failed build - submit_exit_value = 1 - else: - # Create and submit the batch script - mymachine.make_batch_script() - jstatus.log_event(status_file.StatusFile.EVENT_SUBMIT_START) - submit_exit_value = mymachine.submit_batch_script() - jstatus.log_event(status_file.StatusFile.EVENT_SUBMIT_END, submit_exit_value) - - if 0 == submit_exit_value: - # Log the job id. - job_id = read_job_file(status_dir) - if job_id != "0": - jstatus.log_event(status_file.StatusFile.EVENT_JOB_QUEUED, job_id) - else: - print("SUBMIT ERROR: failed to retrieve job id!") - submit_exit_value = 1 - - check_exit_value = 0 - if actions['check']: - if not actions['submit']: - job_id = read_job_file(status_dir) - if job_id != "0": - jstatus.log_event(status_file.StatusFile.EVENT_CHECK_START) - check_exit_value = mymachine.check_executable() - mymachine.report_executable() - else: - print("CHECK ERROR: failed to retrieve job id!") - check_exit_value = 1 - - exit_values = {'build': build_exit_value, - 'check': check_exit_value, - 'submit': submit_exit_value} - return exit_values - - -def user_generated_scripts(apptest, - test_workspace, - unique_id, - jstatus, - actions): - """ - Executes user-provided build, submit, and check scripts for a test. - """ - - status_dir = apptest.get_path_to_status() - runarchive_dir = apptest.get_path_to_runarchive() - - build_exit_value = 0 - if actions['build']: - jstatus.log_event(status_file.StatusFile.EVENT_BUILD_START) - build_exit_value = execute_user_build_script(test_workspace, unique_id) - jstatus.log_event(status_file.StatusFile.EVENT_BUILD_END, build_exit_value) - submit_exit_value = 0 - job_id = "0" - if actions['submit']: - if build_exit_value != 0: - # do not submit a failed build - submit_exit_value = 1 - else: + if actions['submit'] and (build_exit_value != 0): + submit_exit_value = 1 + message = f"{messloc} No submit action due to prior failed build." + a_logger.doCriticalLogging(message) + elif actions['submit'] and (build_exit_value == 0): + # Create the batch script + make_batch_script_status = mymachine.make_batch_script() + + # Submit the batch script + if make_batch_script_status: + # Submit the batch script jstatus.log_event(status_file.StatusFile.EVENT_SUBMIT_START) - submit_exit_value = execute_user_submit_script(test_workspace, - unique_id, - actions['resubmit']) - jstatus.log_event(status_file.StatusFile.EVENT_SUBMIT_END, submit_exit_value) - - if 0 == submit_exit_value: - # Log the job id. - job_id = read_job_file(status_dir) - if job_id != "0": - jstatus.log_event(status_file.StatusFile.EVENT_JOB_QUEUED, job_id) - else: - print("SUBMIT ERROR: failed to retrieve job id!") - submit_exit_value = 1 - + try: + submit_exit_value = mymachine.submit_batch_script() + finally: + jstatus.log_event(status_file.StatusFile.EVENT_SUBMIT_END, submit_exit_value) + + if submit_exit_value == 0: + # Log the job id. + job_id = read_job_file(status_dir) + if job_id != "0": + jstatus.log_event(status_file.StatusFile.EVENT_JOB_QUEUED, job_id) + else: + print("SUBMIT ERROR: failed to retrieve job id!") + message = f"{messloc} Failed to retrieve the job id." + a_logger.doCriticalLogging(message) + submit_exit_value = 1 + + run_exit_value = 0 + if actions['run']: + # The 'run' action should be executed within a job + + # Create the batch script + jstatus.log_event(status_file.StatusFile.EVENT_SUBMIT_START) + make_batch_script_status = mymachine.make_batch_script() + jstatus.log_event(status_file.StatusFile.EVENT_SUBMIT_END, 0) + + # Find the current job id and write it to the associated status file + mymachine.write_jobid_to_status() + job_id = read_job_file(status_dir) + if make_batch_script_status and job_id != "0": + jstatus.log_event(status_file.StatusFile.EVENT_JOB_QUEUED, job_id) + + # now run the batch script as a subprocess + batch_script = os.path.join(ra_dir, mymachine.test_config.get_batch_file()) + os.chmod(batch_script, (stat.S_IREAD|stat.S_IWRITE|stat.S_IEXEC)) + args = [batch_script] + run_outfile = os.path.join(ra_dir, "output_run.txt") + run_stdout = open(run_outfile, "w") + p = subprocess.Popen(args, stdout=run_stdout, stderr=subprocess.STDOUT) + p.wait() + run_exit_value = p.returncode + run_stdout.close() + else: + print("RUN ERROR: failed to retrieve job id!") + message = f"{messloc} Failed to retrieve the job id." + a_logger.doCriticalLogging(message) + run_exit_value = 1 + + #----------------------------------------------------- + # In this section we check the the results. - + # - + #----------------------------------------------------- check_exit_value = 0 if actions['check']: if not actions['submit']: job_id = read_job_file(status_dir) if job_id != "0": jstatus.log_event(status_file.StatusFile.EVENT_CHECK_START) - check_exit_value = execute_user_check_script(runarchive_dir, unique_id) + check_exit_value = mymachine.check_executable() + mymachine.start_report_executable() else: print("CHECK ERROR: failed to retrieve job id!") check_exit_value = 1 - exit_values = {'build': build_exit_value, - 'check': check_exit_value, - 'submit': submit_exit_value} + exit_values = { + 'build' : build_exit_value, + 'check' : check_exit_value, + 'run' : run_exit_value, + 'submit' : submit_exit_value + } return exit_values -def execute_user_build_script(test_workspace, unique_id): - # save current dir - path_to_scripts_dir = os.getcwd() - - # - # Use build_executable.py if it exists. - # Otherwise, use build_executable.x script. - # - python_file = "./build_executable.py" - script_file = "./build_executable.x" - if os.path.isfile(python_file): - # Call build_executable.py as a main program. - import build_executable - build_exit_value = build_executable.build_executable(test_workspace, - unique_id) - elif os.path.isfile(script_file): - # Execute the build script via os.system(). - build_command = script_file + " -p " + test_workspace + " -i " + unique_id - build_exit_value = os.system(build_command) - else: - print("BUILD ERROR: no build script found!") - build_exit_value = 1 - - # restore current dir - os.chdir(path_to_scripts_dir) - - return build_exit_value - - -def execute_user_submit_script(test_workspace, unique_id, resubmit): - # save current dir - path_to_scripts_dir = os.getcwd() - - # - # Use submit_executable.py if it exists. - # Otherwise, use submit_executable.x script. - # - python_file = "./submit_executable.py" - script_file = "./submit_executable.x" - if os.path.isfile(python_file): - # Call submit_executable.py as a main program. - import submit_executable - submit_exit_value = submit_executable.submit_executable(test_workspace, - unique_id, - batch_recursive_mode=resubmit) - elif os.path.isfile(script_file): - # Execute the submit script via os.system(). - submit_command = script_file + " -p " + test_workspace + " -i " + unique_id - if resubmit: - submit_command += " -r" - submit_exit_value = os.system(submit_command) - else: - print("SUBMIT ERROR: no submit script found!") - submit_exit_value = 1 - - # restore current dir - os.chdir(path_to_scripts_dir) - - return submit_exit_value - - -def execute_user_check_script(path_to_results, unique_id): - # save current dir - path_to_scripts_dir = os.getcwd() - - # - # Use check_executable.py if it exists. - # Otherwise, use check_executable.x script. - # - python_file = "./check_executable.py" - script_file = "./check_executable.x" - if os.path.isfile(python_file): - # Call check_executable.py as a main program. - import check_executable - check_exit_value = check_executable.check_executable(path_to_results, unique_id) - elif os.path.isfile(script_file): - # Execute the check script via os.system(). - check_command = script_file + " -p " + path_to_results + " -i " + unique_id - check_exit_value = os.system(check_command) - else: - # check scripts are optional - check_exit_value = 0 - - # - # Use report_executable.py if it exists. - # Otherwise, use report_executable.x script. - # - report_python_file = "./report_executable.py" - report_script_file = "./report_executable.x" - report_exit_value = 0 - if os.path.isfile(report_python_file): - # Call report_executable.py as a main program. - import report_executable - report_exit_value = report_executable.report_executable(path_to_results, unique_id) - elif os.path.isfile(report_script_file): - # Execute the report script via os.system(). - report_command = report_script_file + " -p " + path_to_results + " -i " + unique_id - report_exit_value = os.system(report_command) - else: - # check scripts are optional - report_exit_value = 0 - - # restore current dir - os.chdir(path_to_scripts_dir) - - # Q: Do we care if report_executable fails? - return check_exit_value + report_exit_value - - -# # This program coordinates the scripts build_executable.x and submit_executable.x # and is designed such that it will be called from the Scripts directory. # @@ -335,12 +278,13 @@ def test_harness_driver(argv=None): do_build = Vargs.build do_check = Vargs.check do_submit = Vargs.submit + do_run = Vargs.run # # If none of the individual actions were specified, act - # like the previous version and do build + submit + # like the previous version and do 'build + submit' # - if not (do_build or do_submit or do_check): + if not (do_build or do_submit or do_check or do_run): do_build = True do_submit = True @@ -348,16 +292,21 @@ def test_harness_driver(argv=None): if do_submit: do_resubmit = Vargs.resubmit - actions = {'build' : do_build, - 'check' : do_check, - 'submit' : do_submit, - 'resubmit' : do_resubmit} + actions = { + 'build' : do_build, + 'check' : do_check, + 'submit' : do_submit, + 'resubmit' : do_resubmit, + 'run' : do_run + } + + + # Create a harness config (which sets harness env vars) + harness_cfg = rgt_config_file(configfilename=Vargs.configfile) # Get the unique id for this test instance. - existing_id = True unique_id = Vargs.uniqueid if unique_id == None: - existing_id = False unique_id = rgt_utilities.unique_harness_id() print(f'Generated test unique id: {unique_id}') @@ -371,41 +320,43 @@ def test_harness_driver(argv=None): sys.path.insert(0, testscripts) # Instantiate application subtest - apptest = subtest(name_of_application=app, - name_of_subtest=test, - local_path_to_tests=apps_root, - harness_id=unique_id) + logger_name = get_logger_name() + fh_filepath = get_path_to_logfile_from_scriptdir(testscripts,unique_id) + logger_threshold = "INFO" + fh_threshold_log_level = "INFO" + ch_threshold_log_level = "CRITICAL" + a_logger = rgt_logger_factory.create_rgt_logger( + logger_name=logger_name, + fh_filepath=fh_filepath, + logger_threshold_log_level=logger_threshold, + fh_threshold_log_level=fh_threshold_log_level, + ch_threshold_log_level=ch_threshold_log_level) + + apptest = SubtestFactory.make_subtest(name_of_application=app, + name_of_subtest=test, + local_path_to_tests=apps_root, + logger=a_logger, + tag=unique_id) + message = "The length of sys.path is " + str(len(sys.path)) + apptest.doInfoLogging(message) - if do_submit: - # Check for the existence of the file "kill_test". - # If the file exists then the program will exit - # without building and submitting scripts. + # + # Check for the existence of the file "kill_test". + # If the file exists then the program will return + # without building and submitting scripts. + # + if do_submit: kill_file = apptest.get_path_to_kill_file() if os.path.exists(kill_file): - message = f'The kill file {kill_file} exists. It must be removed to run this test.' - sys.exit(message) - - # Q: What is the purpose of the testrc file?? - testrc_file = apptest.get_path_to_rc_file() - if os.path.exists(testrc_file): - file_obj = open(testrc_file,"r") - lines = file_obj.readlines() - file_obj.close() - - attempts = int(lines[0].strip()) - limits = int(lines[1].strip()) - - if attempts >= limits: - message = f'Number of tests {attempts} exceeds limit {limits}.' - sys.exit(message) - else: - attempts = attempts + 1 - file_obj = open(testrc_file,"w") - string1 = str(attempts) + "\n" - string2 = str(limits) + "\n" - file_obj.write(string1) - file_obj.write(string2) - file_obj.close() + import time + import shutil + message = f'The kill file {kill_file} exists. It must be removed to run this test.\n' + message += "Stopping test cycle." + print(message) + runarchive_dir = apptest.get_path_to_runarchive() + logging.shutdown() + shutil.rmtree(runarchive_dir,ignore_errors=True) + return # Create the status and run archive directories for this test instance status_dir = apptest.create_test_status() @@ -413,24 +364,40 @@ def test_harness_driver(argv=None): # Create the temporary workspace path for this test instance workspace = rgt_utilities.harness_work_space() - test_workspace = apptest.create_test_workspace(workspace) + apptest.create_test_workspace(workspace) # Update environment with the paths to test directories - os.putenv('RGT_APP_SOURCE_DIR', apptest.get_path_to_source()) - os.putenv('RGT_TEST_SCRIPTS_DIR', testscripts) - os.putenv('RGT_TEST_BUILD_DIR', apptest.get_path_to_workspace_build()) - os.putenv('RGT_TEST_WORK_DIR', apptest.get_path_to_workspace_run()) - os.putenv('RGT_TEST_STATUS_DIR', status_dir) - os.putenv('RGT_TEST_RUNARCHIVE_DIR', ra_dir) + apptest_env_vars = { + 'APP_SOURCE_DIR' : apptest.get_path_to_source(), + 'TEST_SCRIPTS_DIR' : testscripts, + 'TEST_BUILD_DIR' : apptest.get_path_to_workspace_build(), + 'TEST_WORK_DIR' : apptest.get_path_to_workspace_run(), + 'TEST_STATUS_DIR' : status_dir, + 'TEST_RUNARCHIVE_DIR' : ra_dir + } + rgt_utilities.set_harness_environment(apptest_env_vars) # Make backup of master status file backup_status_file(status_dir) - # Add entry to status file - mode_str = 'New' - if existing_id: - mode_str = 'Old' - jstatus = status_file.StatusFile(unique_id, mode_str) + # We now create the status file if it doesn't already exist. + logger_name = "status_file."+ app + "__" + test + fh_filepath = apptest.path_to_status_logfile + logger_threshold = "INFO" + fh_threshold_log_level = "INFO" + ch_threshold_log_level = "CRITICAL" + sfile_logger = rgt_logger_factory.create_rgt_logger( + logger_name=logger_name, + fh_filepath=fh_filepath, + logger_threshold_log_level=logger_threshold, + fh_threshold_log_level=fh_threshold_log_level, + ch_threshold_log_level=ch_threshold_log_level) + path_to_status_file = apptest.get_path_to_status_file() + jstatus = StatusFileFactory.create(path_to_status_file=path_to_status_file, + logger=sfile_logger) + + # Initialize subtest entry 'unique_id' to status file. + jstatus.initialize_subtest(unique_id) # # Determine whether we are using auto-generated or user-generated @@ -439,26 +406,35 @@ def test_harness_driver(argv=None): input_txt = os.path.join(testscripts, layout.test_input_txt_filename) input_ini = os.path.join(testscripts, layout.test_input_ini_filename) if (os.path.isfile(input_txt) or os.path.isfile(input_ini)): - exit_values = auto_generated_scripts(apptest, test_workspace, - unique_id, jstatus, actions) + exit_values = auto_generated_scripts(harness_cfg, + apptest, + jstatus, + actions, + a_logger) else: - exit_values = user_generated_scripts(apptest, test_workspace, - unique_id, jstatus, actions) + error_message = "The user generated scripts functionality is no longer supported" + a_logger.doCriticalLogging(error_message) + sys.exit(error_message) build_exit_value = 0 if actions['build']: build_exit_value = exit_values['build'] - print("build_exit_value = " + str(build_exit_value)) + print(f'build exit value = {build_exit_value}') submit_exit_value = 0 if actions['submit']: submit_exit_value = exit_values['submit'] - print("submit_exit_value = " + str(submit_exit_value)) + print(f'submit exit value = {submit_exit_value}') + + run_exit_value = 0 + if actions['run']: + run_exit_value = exit_values['run'] + print(f'run exit value = {run_exit_value}') check_exit_value = 0 if actions['check']: check_exit_value = exit_values['check'] - print("check_exit_value = " + str(check_exit_value)) + print(f'check exit value = {check_exit_value}') # Now read the result from the job_status.txt file. jspath = os.path.join(status_dir, layout.job_status_filename) @@ -470,7 +446,7 @@ def test_harness_driver(argv=None): jstatus.log_event(status_file.StatusFile.EVENT_CHECK_END, job_correctness) - return build_exit_value + submit_exit_value + check_exit_value + return build_exit_value + submit_exit_value + run_exit_value + check_exit_value if __name__ == "__main__": diff --git a/harness/libraries/.gitignore b/harness/libraries/.gitignore new file mode 100644 index 0000000..3819313 --- /dev/null +++ b/harness/libraries/.gitignore @@ -0,0 +1,2 @@ +*.swp +*.swo diff --git a/harness/libraries/__init__.py b/harness/libraries/__init__.py index 9f2440e..4d40aaf 100644 --- a/harness/libraries/__init__.py +++ b/harness/libraries/__init__.py @@ -1,12 +1,12 @@ __all__ = [ - 'computers', - 'computers_1', 'schedulers', 'rgt_utilities', 'rgt_job_info', 'layout_of_apps_directory', 'apptest', + 'subtest_factory', 'base_apptest', + 'config_file', 'input_files', 'regression_test', 'status_file', @@ -14,14 +14,13 @@ 'pop_utility_library', 'ccsm_utility_library', 'aprun_utility', - 'aprun_2', - 'aprun_3', - 'aprun_titan', - 'threadedDecorator', 'application_metric', 'repositories', 'application_test_dictionary', - 'rgt_logging' + 'rgt_loggers' + 'command_line', + 'get_machine_name', + 'status_file_factory', ] version = 2.0 diff --git a/harness/libraries/apptest.py b/harness/libraries/apptest.py index a3e521c..f2f72de 100644 --- a/harness/libraries/apptest.py +++ b/harness/libraries/apptest.py @@ -1,12 +1,10 @@ #! /usr/bin/env python3 -""" -.. module:: apptest - :synopsis: This module implements an abstraction of an application and subtest. +""" The apptest module encapsulates the application-test directory structure layout. -.. moduleauthor:: Arnold Tharrington """ +# Python package imports import subprocess import shlex import time @@ -14,18 +12,15 @@ import os import sys import copy -import logging from types import * # NCCS Test Harness Package Imports from libraries.base_apptest import base_apptest +from libraries.base_apptest import BaseApptestError from libraries.layout_of_apps_directory import apptest_layout -from libraries.rgt_logging import rgt_logger from libraries.status_file import parse_status_file from libraries.status_file import parse_status_file2 from libraries.status_file import summarize_status_file -from libraries.repositories.common_repository_utility_functions import run_as_subprocess_command -from libraries.repositories.common_repository_utility_functions import run_as_subprocess_command_return_stdout_stderr from libraries.repositories.common_repository_utility_functions import run_as_subprocess_command_return_exitstatus from libraries.repositories.common_repository_utility_functions import run_as_subprocess_command_return_stdout_stderr_exitstatus @@ -33,30 +28,47 @@ # Inherits "apptest_layout". # class subtest(base_apptest, apptest_layout): + """Encapsulates the application-test layout. + + Only one method is public and it exposes the doing of harness tasks: + * do_tasks. + + The class is derived from classes base_apptest and apptest_layout. + + + """ + + #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + # @ + # Special methods @ + # @ + #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ - # - # Constructor - # def __init__(self, name_of_application=None, name_of_subtest=None, local_path_to_tests=None, - harness_id=None, - application_log_level="CRITICAL", number_of_iterations=-1, - timestamp=None): + logger=None, + tag=None): + + # Ensure that tag is not None. + if (tag == None): + keywords = {"timestamp" : tag} + message = "The argument tag must not be None." + raise ApptestImproperInstantiationError(message,keywords) base_apptest.__init__(self, name_of_application, name_of_subtest, local_path_to_tests, - time_stamp=timestamp) + tag) apptest_layout.__init__(self, local_path_to_tests, name_of_application, name_of_subtest, - harness_id) + tag) # Format of data is [, , ] self.__apps_test_checked_out = [] @@ -64,21 +76,26 @@ def __init__(self, self.getNameOfApplication(), name_of_subtest]) self.__number_of_iterations = -1 + self.__myLogger = logger + + #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + # @ + # End of special methods @ + # @ + #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + + #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + # @ + # Public methods. @ + # @ + #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ - # Set the logger for this application and subtest. - if timestamp != None: - dir1 = self.getDirPathToLogFiles() - log_name1 = name_of_application + "." + name_of_subtest - log_name2 = os.path.join(dir1,log_name1) - log_name3 = os.path.abspath(log_name2) - self.__myLogger = rgt_logger(log_name3, - application_log_level, - timestamp) + @property + def logger(self): + """logger: Returns the logger of the subtest class. """ + return self.__myLogger - ################## - # Public Methods # - ################## def doTasks(self, tasks=None, test_checkout_lock=None, @@ -91,15 +108,17 @@ def doTasks(self, """ from libraries.regression_test import Harness - message = "In {app1} {test1} doing {task1}".format(app1=self.getNameOfApplication(), - test1=self.getNameOfSubtest(), - task1=tasks) - self.__myLogger.doInfoLogging(message) + if tasks != None: tasks = copy.deepcopy(tasks) tasks = subtest.reorderTaskList(tasks) + message = "In {app1} {test1} doing {task1}".format(app1=self.getNameOfApplication(), + test1=self.getNameOfSubtest(), + task1=tasks) + self.doInfoLogging(message) + for harness_task in tasks: if harness_task == Harness.checkout: @@ -117,13 +136,13 @@ def doTasks(self, url_to_remote_repsitory_application, my_repository_branch) - self.__myLogger.doInfoLogging("Start of cloning repository") + self.doInfoLogging("Start of cloning repository") destination = self.getLocalPathToTests() self.cloneRepository(my_repository, destination) - self.__myLogger.doInfoLogging("End of cloning repository") + self.doInfoLogging("End of cloning repository") if test_checkout_lock: @@ -131,14 +150,15 @@ def doTasks(self, elif harness_task == Harness.starttest: message = "Start of starting test." - self.__myLogger.doInfoLogging(message) + self.doInfoLogging(message) + + self._start_test(stdout_stderr) - self.start_test(stdout_stderr) message = "End of starting test" - self.__myLogger.doInfoLogging(message) + self.doInfoLogging(message) elif harness_task == Harness.stoptest: - self.stop_test() + self._stop_test() elif harness_task == Harness.displaystatus: if test_display_lock: @@ -152,19 +172,13 @@ def doTasks(self, elif harness_task == Harness.summarize_results: self.generateReport() - def getTestName(self): - return self.getNameOfSubtest() - - def appTestName(self): - return [self.getNameOfApplication(),self.getNameOfSubtest()] - def cloneRepository(self,my_repository,destination): #Get the current working directory. cwd = os.getcwd() message = "For the cloning, my current directory is " + cwd - self.__myLogger.doInfoLogging(message) + self.doInfoLogging(message) my_repository.cloneRepository(destination, self.__myLogger) @@ -173,61 +187,18 @@ def cloneRepository(self,my_repository,destination): if exit_status > 0: string1 = "Cloning of repository failed." - self.__myLogger.doInfoLogging(string1) + self.doInfoLogging(string1) sys.exit(string1) else: message = "Cloning of repository passed" - self.__myLogger.doInfoLogging(message) + self.doInfoLogging(message) return - # - # Starts the regression test. - # - def start_test(self, - stdout_stderr): - - # If the file kill file exits then remove it. - pathtokillfile = self.get_path_to_kill_file() - if os.path.lexists(pathtokillfile): - os.remove(pathtokillfile) - - start_test_log_files = self.getPathToStartTestLogFiles() - stdout_path = start_test_log_files["stdout"] - stderr_path = start_test_log_files["stderr"] - - starttestcomand = "test_harness_driver.py -r" - - with open(stdout_path,"a") as out: - with open(stderr_path,"a") as err: - - pathtoscripts = self.get_path_to_scripts() - - if stdout_stderr == "logfile": - (stdout,stderr,exit_status) = \ - run_as_subprocess_command_return_stdout_stderr_exitstatus(starttestcomand, - command_execution_directory=pathtoscripts) - elif stdout_stderr == "screen": - (stdout,stderr,exit_status) = \ - run_as_subprocess_command_return_exitstatus(starttestcomand, - command_execution_directory=pathtoscripts) - out.writelines(stdout) - err.writelines(stderr) - - if exit_status > 0: - string1 = "Command failed: " + starttestcomand - sys.exit(string1) # # Stops the test. # - def stop_test(self): - - pathtokillfile = self.get_path_to_kill_file() - - kill_file = open(pathtokillfile,"w") - kill_file.write("") - kill_file.close() # # Displays the status of the tests. @@ -254,8 +225,8 @@ def display_status(self): ) bheader = "\n====================\n" - - dfile_obj = open("test_status.txt","a") + filename= apptest_layout.test_status_filename + dfile_obj = open(filename,"a") dfile_obj.write(theader) dfile_obj.write(time1) dfile_obj.write(appname) @@ -316,7 +287,8 @@ def display_status2(self,taskwords,mycomputer_with_events_record): bheader = "\n====================\n" - dfile_obj = open("test_status.txt","a") + filename= apptest_layout.test_status_filename + dfile_obj = open(filename,"a") dfile_obj.write(theader) dfile_obj.write(time1) dfile_obj.write(appname) @@ -447,14 +419,189 @@ def reorderTaskList(cls,tasks): return app_tasks1 + def doInfoLogging(self,message): + if self.__myLogger: + self.__myLogger.doInfoLogging(message) + + def doCriticalLogging(self,message): + if self.__myLogger: + self.__myLogger.doCriticalLogging(message) + + def waitForAllJobsToCompleteQueue(self, harness_config, timeout): + """Waits for subtest cycle to end. + + A subtest cycle is the build, submit to job scheduler, and the + completion of the subtest in the scheduler. + + Parameters + ---------- + timeout : int + The maximum time to wait in minutes till the subtest cycle is complete. + + Returns + ------- + None + + """ + + from machine_types.machine_factory import MachineFactory + import datetime + + # Set the time counters and other flags for ensuring a maximum + # wait time while checking completion of the test cycle. + time_between_checks = 5.0 + timeout_secs = timeout*60.0 + elapsed_time = 0.0 + + # Print an informational message on the maximum wait time. + message = 'Waiting for all {} : {} tests to complete the testing cycle.\n'.format(self.getNameOfApplication(),self.getNameOfSubtest()) + message += 'The maximum wait time is {}.\n'.format(str(timeout_secs)) + message += 'The time between checks is {}.\n'.format(str(time_between_checks)) + print(message) + + # Instantiate the machine for this computer. + mymachine = MachineFactory.create_machine(harness_config, self) + + continue_checking = True + start_time = datetime.datetime.now() + while continue_checking: + time.sleep(time_between_checks) + elapsed_time = datetime.datetime.now() - start_time + message = 'Checking for subtest cycle completion at {} seconds.\n'.format(str(elapsed_time)) + print(message) + + if mymachine.isTestCycleComplete(self): + continue_checking = False + break + + elapsed_time = datetime.datetime.now() - start_time + if elapsed_time.total_seconds() > timeout_secs: + continue_checking = False + message_elapsed_time = 'After {} seconds the testing cycle has exceeded the maximum wait time.\n'.format(str(elapsed_time)) + print(message_elapsed_time) + + return + + def did_all_tests_pass(self, harness_config): + from machine_types.machine_factory import MachineFactory + from libraries.status_file_factory import StatusFileFactory + + # Instantiate the machine for this computer. + mymachine = MachineFactory.create_machine(harness_config, self) + + ret_val = mymachine.did_all_tests_pass(self) + + return ret_val + + #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + # @ + # End of public methods. @ + # @ + #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + + + #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + # @ + # Private methods. @ + # @ + #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + def _start_test(self, + stdout_stderr): + + # If the file kill file exits then remove it. + pathtokillfile = self.get_path_to_kill_file() + if os.path.lexists(pathtokillfile): + os.remove(pathtokillfile) + + starttestcomand = "test_harness_driver.py -r" + + pathtoscripts = self.get_path_to_scripts() + + if stdout_stderr == "logfile": + (stdout,stderr,exit_status) = \ + run_as_subprocess_command_return_stdout_stderr_exitstatus(starttestcomand, + command_execution_directory=pathtoscripts) + elif stdout_stderr == "screen": + (stdout,stderr,exit_status) = \ + run_as_subprocess_command_return_exitstatus(starttestcomand, + command_execution_directory=pathtoscripts) + if exit_status > 0: + message = ( "In function {function_name} we have a critical error.\n" + "The command '{cmd}' has exited with a failure.\n" + "The exit return value is {value}\n.").format(function_name=self.__name_of_current_function(), cmd=starttestcomand,value=exit_status) + self.doCriticalLogging(message) + + string1 = "Command failed: " + starttestcomand + sys.exit(string1) + else: + message = "In function {function_name}, the command '{cmd}' has executed sucessfully.\n".format(function_name=self.__name_of_current_function(),cmd=starttestcomand) + message += "stdout of command : {}\n".format(stdout) + message += "stderr of command : {}\n".format(stderr) + self.doInfoLogging(message) + + def _stop_test(self): + + pathtokillfile = self.get_path_to_kill_file() + with open(pathtokillfile,"w") as kill_file: + kill_file.write("") + + message = "In function {function_name}, The kill file '{filename}' has been created.\n".format(function_name=self.__name_of_current_function(),filename=pathtokillfile) + self.doInfoLogging(message) + + def __name_of_current_function(self): + classname = self.__class__.__name__ + functionname = sys._getframe(1).f_code.co_name + my_name = classname + "." + functionname + return my_name + + #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + # @ + # End of private methods. @ + # @ + #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + +class ApptestImproperInstantiationError(BaseApptestError): + """Raised when the class subtest is instantiated with improper parameters.""" + def __init__(self, + message, + args): + self.__message = message + self.__args = args + return + + @property + def message(self): + return self.__message + def do_application_tasks(app_test_list, tasks, stdout_stderr): - import random - for app_test in app_test_list: app_test.doTasks(tasks=tasks, stdout_stderr=stdout_stderr) + return + +def wait_for_jobs_to_complete_in_queue(harness_config, + app_test_list, + timeout): + """ Waits for the list of subtests to complete a subtestb cycle. + + Parameters + ---------- + app_test_list : subtest + A list of subtests. + + timeout : int + The maximum time in minutes to wait for the subtest cycle to complete. + + Returns + ------- + ??? + + """ + for app_test in app_test_list: + app_test.waitForAllJobsToCompleteQueue(harness_config, timeout) return + diff --git a/harness/libraries/aprun_2.py b/harness/libraries/aprun_2.py deleted file mode 100644 index a9ced73..0000000 --- a/harness/libraries/aprun_2.py +++ /dev/null @@ -1,117 +0,0 @@ -#!/usr/bin/env python3 - -import re - - -class nodelist: - def __init__(self): - pass - -class baseaprun: - def __init__(self): - self.__stringpattern1 = "total number of sockets" - self.__re1 = re.compile(self.__stringpattern1 + "$") - - self.__stringpattern2 = "number of cores per socket" - self.__re2 = re.compile(self.__stringpattern2 + "$") - - self.__delimiter = "=" - - self.__filename = "size.txt" - - self.__total_number_of_sockets = 0 - self.__number_of_cores_per_socket = 0 - self.__number_of_processors = 0 - self.__tag = "" - - #Read all the lines of the file. - fileobj = open(self.__filename,'r') - filerecords = fileobj.readlines() - fileobj.close() - - #Parse for the total number of sockets. - for record1 in filerecords: - words = record1.split(self.__delimiter) - words[0] = words[0].strip().lower() - if self.__re1.match(words[0]): - self.__total_number_of_sockets = int(words[1]) - break - - - #Parse for the number of cores per socket. - for record1 in filerecords: - words = record1.split(self.__delimiter) - words[0] = words[0].strip().lower() - if self.__re2.match(words[0]): - self.__number_of_cores_per_socket= int(words[1]) - break - - #Compute the number of processors. - self.__number_of_processors = self.__total_number_of_sockets*self.__number_of_cores_per_socket - - #Make the job tag. - string1 = str(self.__total_number_of_sockets) - startindex = 0 - finalindex = 4 - len1 = len(string1) - len2 = finalindex - (len1 - 1) - self.__tag = string1 + len2*"_" - - startindex=0 - finalindex=4 - string2 = str(self.__number_of_processors) - len1 = len(string2) - len2 = finalindex - (len1 -1) - self.__tag = self.__tag + string2 + len2*"_" - - startindex=0 - finalindex=1 - string3 = str(self.__number_of_cores_per_socket) - len1 = len(string3) - len2 = finalindex - (len1 - 1) - self.__tag = self.__tag + string3 + len2*"_" - - def get_problem_size(self): - return (self.__total_number_of_sockets,self.__number_of_cores_per_socket,self.__number_of_processors) - - def get_running_nm_procs(self): - return self.__number_of_processors - - def get_number_of_cores_per_socket(self): - return self.__number_of_cores_per_socket - - def get_job_tag(self): - return self.__tag - - def get_total_number_of_sockets(self): - return self.__total_number_of_sockets - - def get_allocated_nm_procs(self): - return self.__maximum_number_processors - - def set_allocated_nm_procs(self,nm1): - self.__maximum_number_processors = nm1 - -class quadcore(baseaprun): - #Maximum cores per socket. - MAX_CORES_PER_SOCKET = 4 - - def __init__(self): - baseaprun.__init__(self) - - #Compute the maximum number of processors. - maximum_number_processors = self.get_total_number_of_sockets()*quadcore.MAX_CORES_PER_SOCKET - self.set_allocated_nm_procs(maximum_number_processors) - - -class dualcore(baseaprun): - #Maximum cores per socket. - MAX_CORES_PER_SOCKET = 2 - - def __init__(self): - self.maximum_number_processors = 0 - baseaprun.__init__(self) - - #Compute the maximum number of processors. - maximum_number_processors = self.get_total_number_of_sockets()*dualcore.MAX_CORES_PER_SOCKET - self.set_allocated_nm_procs(maximum_number_processors) diff --git a/harness/libraries/aprun_3.py b/harness/libraries/aprun_3.py deleted file mode 100644 index b18c323..0000000 --- a/harness/libraries/aprun_3.py +++ /dev/null @@ -1,433 +0,0 @@ -#!/usr/bin/env python3 - -import re - -class baseaprun: - #----------------------------------------------------- - # Define the mpi run command for alps. - - # - - #----------------------------------------------------- - MPIRUN = "aprun" - - def __init__(self): - #----------------------------------------------------- - # Define the name of the input file that - - # contains the job size configuration. - - # - - #----------------------------------------------------- - self.__filename = "size.txt" - - #----------------------------------------------------- - # Define the text patterns for tokenizing and - - # parsing the input file. - - # - - #----------------------------------------------------- - self.__stringpattern1 = "number of sockets per node" - self.__re1 = re.compile(self.__stringpattern1 + "$") - - self.__stringpattern2 = "number of cores per socket" - self.__re2 = re.compile(self.__stringpattern2 + "$") - - self.__stringpattern3 = "number of threads per mpi task" - self.__re3 = re.compile(self.__stringpattern3 + "$") - - self.__stringpattern4 = "number of nodes" - self.__re4 = re.compile(self.__stringpattern4 + "$") - - self.__delimiter = "=" - - - #----------------------------------------------------- - # Initialize the attributes of the aprun class. - - # - - #----------------------------------------------------- - self.__total_number_of_nodes = None - self.__number_of_sockets_per_node = None - self.__number_of_cores_per_socket = None - self.__threads_per_mpi_task = None - self.__total_number_of_sockets = None - self.__number_of_processors = None - self.__number_mpi_tasks = None - self.__tag = "" - - #----------------------------------------------------- - # Read all the lines of the file and store in a list - - # named "filerecords". - - # - - #----------------------------------------------------- - fileobj = open(self.__filename,'r') - filerecords = fileobj.readlines() - fileobj.close() - - #----------------------------------------------------- - # Parse for the total number of nodes. - - # - - #----------------------------------------------------- - for record1 in filerecords: - words = record1.split(self.__delimiter) - words[0] = words[0].strip().lower() - if self.__re4.match(words[0]): - self.__total_number_of_nodes = int(words[1]) - break - - - #----------------------------------------------------- - # Parse for the number of sockets per node. - - # - - #----------------------------------------------------- - for record1 in filerecords: - words = record1.split(self.__delimiter) - words[0] = words[0].strip().lower() - if self.__re1.match(words[0]): - self.__number_of_sockets_per_node = int(words[1]) - break - - - #----------------------------------------------------- - # Parse for the number of cores per socket. - - # - - #----------------------------------------------------- - for record1 in filerecords: - words = record1.split(self.__delimiter) - words[0] = words[0].strip().lower() - if self.__re2.match(words[0]): - self.__number_of_cores_per_socket= int(words[1]) - break - - - #----------------------------------------------------- - # Parse for the number of threads per core. - - # - - #----------------------------------------------------- - for record1 in filerecords: - words = record1.split(self.__delimiter) - words[0] = words[0].strip() - words[0] = words[0].lower() - if self.__re3.match(words[0]): - self.__threads_per_mpi_task = int(words[1]) - break - - - def get_problem_size(self): - return (self.__total_number_of_nodes, - self.__number_of_sockets_per_node, - self.__number_of_cores_per_socket, - self.__number_of_processors, - self.__threads_per_mpi_task) - - def get_number_of_running_procs(self): - return self.__number_of_processors - - def set_number_of_running_procs(self,nm1): - self.__number_of_processors = nm1 - - def get_number_of_nodes(self): - return self.__total_number_of_nodes - - def set_number_of_nodes(self,nm1): - nm1 = self.__total_number_of_nodes - - def get_number_sockets_per_node(self): - return self.__number_of_sockets_per_node - - def get_number_of_cores_per_socket(self): - return self.__number_of_cores_per_socket - - def get_job_tag(self): - return self.__tag - - def get_total_number_of_sockets(self): - return self.__total_number_of_sockets - - def get_allocated_nm_procs(self): - return self.__maximum_number_processors - - def set_allocated_nm_procs(self,nm1): - self.__maximum_number_processors = nm1 - - def get_mpirun_command(self): - return baseaprun.MPIRUN - - def get_threads_per_mpi_task(self): - return self.__threads_per_mpi_task - -class titan_16_core(baseaprun): - #----------------------------------------------------- - # Define the maximum number of cores per socket. - - # Define the feature for this node. - - # - - #----------------------------------------------------- - MAX_SOCKETS_PER_NODE = 2 - - MAX_CPU_SOCKETS_PER_NODE = 1 - MAX_GPU_SOCKETS_PER_NODE = 1 - - MAX_GPU_PER_NODE = 1 - MAX_CORES_PER_NODE = 16 - - MAX_GPU_PER_SOCKET = 1 - - MAX_CORES_PER_SOCKET = 16 - FEATURE = None - - def __init__(self): - baseaprun.__init__(self) - - #----------------------------------------------------- - # Compute the number of cores that the we - - # are running on, th job size, etc. - - #----------------------------------------------------- - maximum_number_processors = 0 - nm_running_procs = 0 - job_tag = "" - - if self.get_number_of_nodes(): - nm_running_procs = self.get_number_of_nodes()*self.get_number_sockets_per_node()*self.get_number_of_cores_per_socket() - self.set_number_of_running_procs(nm_running_procs) - - #----------------------------------------------------- - # Compute the maximum number of processors. - - # - - #----------------------------------------------------- - maximum_number_processors = self.get_number_of_nodes()*jaguar_twelve_core.MAX_CORES_PER_NODE - self.set_allocated_nm_procs(maximum_number_processors) - - #----------------------------------------------------- - # Make the job tag. - - # - - #----------------------------------------------------- - string1 = str(self.get_number_of_nodes()) - finalindex = 4 - len1 = len(string1) - len2 = finalindex - (len1 - 1) - job_tag = string1 + len2*"_" - - finalindex=4 - string2 = str(self.get_number_sockets_per_node()) - len1 = len(string2) - len2 = finalindex - (len1 -1) - job_tag = job_tag + string2 + len2*"_" - - finalindex=1 - string3 = str(self.get_threads_per_mpi_task()) - len1 = len(string3) - len2 = finalindex - (len1 - 1) - job_tag = job_tag + string3 + len2*"_" - - self.__tag = job_tag - - def get_job_launch_command(self): - cmmd = baseaprun.MPIRUN - cmmd = cmmd + " -n " + str(self.get_number_of_running_procs()) + " " - cmmd = cmmd + " -S " + str(self.get_number_of_cores_per_socket()) + " " - return cmmd - - def get_feature_option(self): - return jaguar_twelve_core.FEATURE - - - -class jaguar_twelve_core(baseaprun): - #----------------------------------------------------- - # Define the maximum number of cores per socket. - - # Define the feature for this node. - - # - - #----------------------------------------------------- - MAX_SOCKETS_PER_NODE= 2 - MAX_CORES_PER_SOCKET = 6 - MAX_CORES_PER_NODE = 12 - FEATURE = None - - def __init__(self): - baseaprun.__init__(self) - - #----------------------------------------------------- - # Compute the number of cores that the we - - # are running on, th job size, etc. - - #----------------------------------------------------- - maximum_number_processors = 0 - nm_running_procs = 0 - job_tag = "" - - if self.get_number_of_nodes(): - nm_running_procs = self.get_number_of_nodes()*self.get_number_sockets_per_node()*self.get_number_of_cores_per_socket() - self.set_number_of_running_procs(nm_running_procs) - - #----------------------------------------------------- - # Compute the maximum number of processors. - - # - - #----------------------------------------------------- - maximum_number_processors = self.get_number_of_nodes()*jaguar_twelve_core.MAX_CORES_PER_NODE - self.set_allocated_nm_procs(maximum_number_processors) - - #----------------------------------------------------- - # Make the job tag. - - # - - #----------------------------------------------------- - string1 = str(self.get_number_of_nodes()) - finalindex = 4 - len1 = len(string1) - len2 = finalindex - (len1 - 1) - job_tag = string1 + len2*"_" - - finalindex=4 - string2 = str(self.get_number_sockets_per_node()) - len1 = len(string2) - len2 = finalindex - (len1 -1) - job_tag = job_tag + string2 + len2*"_" - - finalindex=1 - string3 = str(self.get_threads_per_mpi_task()) - len1 = len(string3) - len2 = finalindex - (len1 - 1) - job_tag = job_tag + string3 + len2*"_" - - self.__tag = job_tag - - def get_job_launch_command(self): - cmmd = baseaprun.MPIRUN - cmmd = cmmd + " -n " + str(self.get_number_of_running_procs()) + " " - cmmd = cmmd + " -S " + str(self.get_number_of_cores_per_socket()) + " " - return cmmd - - def get_feature_option(self): - return jaguar_twelve_core.FEATURE - -class jaguar_octcore(baseaprun): - #----------------------------------------------------- - # Define the maximum number of cores per socket. - - # Define the feature for this node. - - # - - #----------------------------------------------------- - MAX_SOCKETS_PER_NODE= 2 - MAX_CORES_PER_SOCKET = 4 - MAX_CORES_PER_NODE = 8 - FEATURE = None - - def __init__(self): - baseaprun.__init__(self) - - #----------------------------------------------------- - # Compute the number of cores that the we - - # are running on, th job size, etc. - - #----------------------------------------------------- - maximum_number_processors = 0 - nm_running_procs = 0 - job_tag = "" - - if self.get_number_of_nodes(): - nm_running_procs = self.get_number_of_nodes()*self.get_number_sockets_per_node()*self.get_number_of_cores_per_socket() - self.set_number_of_running_procs(nm_running_procs) - - #----------------------------------------------------- - # Compute the maximum number of processors. - - # - - #----------------------------------------------------- - maximum_number_processors = self.get_number_of_nodes()*jaguar_octcore.MAX_CORES_PER_NODE - self.set_allocated_nm_procs(maximum_number_processors) - - - #----------------------------------------------------- - # Make the job tag. - - # - - #----------------------------------------------------- - string1 = str(self.get_number_of_nodes()) - finalindex = 4 - len1 = len(string1) - len2 = finalindex - (len1 - 1) - job_tag = string1 + len2*"_" - - finalindex=4 - string2 = str(self.get_number_sockets_per_node()) - len1 = len(string2) - len2 = finalindex - (len1 -1) - job_tag = job_tag + string2 + len2*"_" - - finalindex=1 - string3 = str(self.get_threads_per_mpi_task()) - len1 = len(string3) - len2 = finalindex - (len1 - 1) - job_tag = job_tag + string3 + len2*"_" - - self.__tag = job_tag - - def get_job_launch_command(self): - cmmd = baseaprun.MPIRUN - cmmd = cmmd + " -n " + str(self.get_number_of_running_procs()) + " " - cmmd = cmmd + " -S " + str(self.get_number_of_cores_per_socket()) + " " - return cmmd - - def get_feature_option(self): - return jaguar_octcore.FEATURE - -class jaguar_quadcore(baseaprun): - #----------------------------------------------------- - # Define the maximum number of cores per socket. - - # Define the feature for this node. - - # - - #----------------------------------------------------- - MAX_CORES_PER_SOCKET = 4 - MAX_CORES_PER_NODE = 4 - FEATURE = "quad" - - def __init__(self): - baseaprun.__init__(self) - - #----------------------------------------------------- - # Compute the maximum number of processors. - - # - - #----------------------------------------------------- - maximum_number_processors = self.get_total_number_of_sockets()*jaguar_quadcore.MAX_CORES_PER_SOCKET - self.set_allocated_nm_procs(maximum_number_processors) - - def get_feature_option(self): - return jaguar_quadcore.FEATURE - -class jaguar_dualcore(baseaprun): - #----------------------------------------------------- - # Define the maximum number of cores per socket. - - # Define the feature for this node. - - # - - #----------------------------------------------------- - MAX_CORES_PER_SOCKET = 2 - MAX_CORES_PER_NODE = 2 - FEATURE = "xt4" - - def __init__(self): - self.maximum_number_processors = 0 - baseaprun.__init__(self) - - #----------------------------------------------------- - # Compute the maximum number of processors. - - # - - #----------------------------------------------------- - maximum_number_processors = self.get_total_number_of_sockets()*jaguar_dualcore.MAX_CORES_PER_SOCKET - self.set_allocated_nm_procs(maximum_number_processors) - - def get_feature_option(self): - return jaguar_dualcore.FEATURE - - -class jaguar_cnl(baseaprun): - #----------------------------------------------------- - # Define the maximum number of cores per socket. - - # Define the feature for this node. - - # - - #----------------------------------------------------- - MAX_CORES_PER_SOCKET = 2 - MAX_CORES_PER_NODE = 2 - FEATURE = "xt3" - - def __init__(self): - baseaprun.__init__(self) - - #----------------------------------------------------- - # Compute the maximum number of processors. - - # - - #----------------------------------------------------- - maximum_number_processors = self.get_total_number_of_sockets()*jaguar_cnl.MAX_CORES_PER_SOCKET - self.set_allocated_nm_procs(maximum_number_processors) - - def get_feature_option(self): - return jaguar_cnl.FEATURE - diff --git a/harness/libraries/aprun_eos.py b/harness/libraries/aprun_eos.py deleted file mode 100644 index a26e837..0000000 --- a/harness/libraries/aprun_eos.py +++ /dev/null @@ -1,242 +0,0 @@ -#!/usr/bin/env python3 - -import re - -class baseaprun: - #----------------------------------------------------- - # Define the mpi run command for alps. - - # - - #----------------------------------------------------- - MPIRUN = "aprun" - - def __init__(self): - #----------------------------------------------------- - # Define the name of the input file that - - # contains the job size configuration. - - # - - #----------------------------------------------------- - self.__filename = "size.txt" - - #----------------------------------------------------- - # Define the text patterns for tokenizing and - - # parsing the input file. - - # - - #----------------------------------------------------- - self.__stringpattern1 = "number of eos cores per numa" - self.__re1 = re.compile(self.__stringpattern1 + "$") - - self.__stringpattern2 = "number of cores per eos socket" - self.__re2 = re.compile(self.__stringpattern2 + "$") - - self.__stringpattern3 = "number of threads per mpi task" - self.__re3 = re.compile(self.__stringpattern3 + "$") - - self.__stringpattern4 = "number of nodes" - self.__re4 = re.compile(self.__stringpattern4 + "$") - - self.__delimiter = "=" - - - #----------------------------------------------------- - # Initialize the attributes of the aprun class. - - # - - #----------------------------------------------------- - self.__total_number_of_nodes = None - self.__number_of_eos_sockets_per_node = 1 - self.__number_of_accelerator_sockets_per_node = 1 - self.__numberOfCoresPerEosSocket = None - self.__numberOfCoresPerEosNuma=None - self.__threads_per_mpi_task = None - self.__total_number_of_sockets = None - self.__number_of_processors = None - self.__number_mpi_tasks = None - self.__tag = "" - - #----------------------------------------------------- - # Read all the lines of the file and store in a list - - # named "filerecords". - - # - - #----------------------------------------------------- - fileobj = open(self.__filename,'r') - filerecords = fileobj.readlines() - fileobj.close() - - #----------------------------------------------------- - # Parse for the total number of nodes. - - # - - #----------------------------------------------------- - for record1 in filerecords: - words = record1.split(self.__delimiter) - words[0] = words[0].strip().lower() - if self.__re4.match(words[0]): - self.__total_number_of_nodes = int(words[1]) - break - - - #----------------------------------------------------- - # Parse for the number of eos cores per numa - - # - - #----------------------------------------------------- - for record1 in filerecords: - words = record1.split(self.__delimiter) - words[0] = words[0].strip().lower() - if self.__re1.match(words[0]): - self.__numberOfCoresPerEosNuma = int(words[1]) - break - - - #----------------------------------------------------- - # Parse for the number of cores per socket. - - # - - #----------------------------------------------------- - for record1 in filerecords: - words = record1.split(self.__delimiter) - words[0] = words[0].strip().lower() - if self.__re2.match(words[0]): - self.__numberOfCoresPerEosSocket= int(words[1]) - break - - - #----------------------------------------------------- - # Parse for the number of threads per core. - - # - - #----------------------------------------------------- - for record1 in filerecords: - words = record1.split(self.__delimiter) - words[0] = words[0].strip() - words[0] = words[0].lower() - if self.__re3.match(words[0]): - self.__threads_per_mpi_task = int(words[1]) - break - - - def get_problem_size(self): - return (self.__total_number_of_nodes, - self.__numberOfCoresPerEosNuma, - self.__numberOfCoresPerEosSocket, - self.__number_of_processors, - self.__threads_per_mpi_task) - - def get_number_of_running_procs(self): - return self.__number_of_processors - - def set_number_of_running_procs(self,nm1): - self.__number_of_processors = nm1 - - def get_number_of_nodes(self): - return self.__total_number_of_nodes - - def set_number_of_nodes(self,nm1): - nm1 = self.__total_number_of_nodes - - def get_number_of_cores_per_eos_numa(self): - return self.__numberOfCoresPerEosNuma - - def get_number_of_eos_sockets_per_node(self): - return self.__number_of_eos_sockets_per_node - - def getNumberOfCoresPerEosSocket(self): - return self.__numberOfCoresPerEosSocket - - def get_job_tag(self): - return self.__tag - - def get_total_number_of_sockets(self): - return self.__total_number_of_sockets - - def get_allocated_nm_procs(self): - return self.__maximum_number_processors - - def set_allocated_nm_procs(self,nm1): - self.__maximum_number_processors = nm1 - - def get_mpirun_command(self): - return baseaprun.MPIRUN - - def get_threads_per_mpi_task(self): - return self.__threads_per_mpi_task - - - def get_job_launch_command(self): - cmmd = baseaprun.MPIRUN - cmmd = cmmd + " -n " + str(self.get_number_of_running_procs()) + " " - cmmd = cmmd + " -S " + str(self.get_number_of_cores_per_eos_numa()) + " " - return cmmd - - def get_feature_option(self): - return eos_16_core_depracated.FEATURE - - - -class eos_16_core(baseaprun): - #----------------------------------------------------- - # Define the maximum number of cores per socket. - - # Define the feature for this node. - - # - - #----------------------------------------------------- - MAX_CORES_PER_NODE = 16 - MAX_SOCKETS_PER_NODE= 2 - MAX_INTERLAGOS_SOCKETS_PER_NODE = 1 - MAX_ACCELERATOR_SOCKETS_PER_NODE = 1 - - MAX_NUMA_PER_INTERLAGOS_SOCKET = 2 - MAX_CORES_PER_NODE = 16 - MAX_CORES_PER_INTERLAGOS_SOCKET = 16 - MAX_CORES_PER_INTERLAGOS_NUMA = 8 - FEATURE = None - - def __init__(self): - baseaprun.__init__(self) - - #----------------------------------------------------- - # Compute the number of cores that the we - - # are running on, job size, etc. - - #----------------------------------------------------- - maximum_number_processors = 0 - nm_running_procs = 0 - job_tag = "" - - if self.get_number_of_nodes(): - nm_running_procs = self.get_number_of_nodes()*self.get_number_of_eos_sockets_per_node()*self.getNumberOfCoresPerEosSocket() - self.set_number_of_running_procs(nm_running_procs) - - #----------------------------------------------------- - # Compute the maximum number of processors. - - # - - #----------------------------------------------------- - maximum_number_processors = self.get_number_of_nodes()*eos_16_core.MAX_CORES_PER_NODE - self.set_allocated_nm_procs(maximum_number_processors) - - #----------------------------------------------------- - # Make the job tag. - - # - - #----------------------------------------------------- - string1 = str(self.get_number_of_nodes()) - finalindex = 4 - len1 = len(string1) - len2 = finalindex - (len1 - 1) - job_tag = string1 + len2*"_" - - finalindex=4 - string2 = str(self.get_number_of_eos_sockets_per_node()) - len1 = len(string2) - len2 = finalindex - (len1 -1) - job_tag = job_tag + string2 + len2*"_" - - finalindex=1 - string3 = str(self.get_threads_per_mpi_task()) - len1 = len(string3) - len2 = finalindex - (len1 - 1) - job_tag = job_tag + string3 + len2*"_" - - self.__tag = job_tag - - def get_job_launch_command(self): - cmmd = baseaprun.MPIRUN - cmmd = cmmd + " -n " + str(self.get_number_of_running_procs()) + " " - cmmd = cmmd + " -S " + str(self.get_number_of_cores_per_eos_numa()) + " " - return cmmd - - def get_feature_option(self): - return eos_16_core.FEATURE - diff --git a/harness/libraries/aprun_titan.py b/harness/libraries/aprun_titan.py deleted file mode 100644 index 4f1e8fa..0000000 --- a/harness/libraries/aprun_titan.py +++ /dev/null @@ -1,306 +0,0 @@ -#!/usr/bin/env python3 - -import re - -class baseaprun: - #----------------------------------------------------- - # Define the mpi run command for alps. - - # - - #----------------------------------------------------- - MPIRUN = "aprun" - - def __init__(self): - #----------------------------------------------------- - # Define the name of the input file that - - # contains the job size configuration. - - # - - #----------------------------------------------------- - self.__filename = "size.txt" - - #----------------------------------------------------- - # Define the text patterns for tokenizing and - - # parsing the input file. - - # - - #----------------------------------------------------- - self.__stringpattern1 = "number of interlagos cores per numa" - self.__re1 = re.compile(self.__stringpattern1 + "$") - - self.__stringpattern2 = "number of cores per interlagos socket" - self.__re2 = re.compile(self.__stringpattern2 + "$") - - self.__stringpattern3 = "number of threads per mpi task" - self.__re3 = re.compile(self.__stringpattern3 + "$") - - self.__stringpattern4 = "number of nodes" - self.__re4 = re.compile(self.__stringpattern4 + "$") - - self.__delimiter = "=" - - - #----------------------------------------------------- - # Initialize the attributes of the aprun class. - - # - - #----------------------------------------------------- - self.__total_number_of_nodes = None - self.__number_of_interlagos_sockets_per_node = 1 - self.__number_of_accelerator_sockets_per_node = 1 - self.__numberOfCoresPerInterlagosSocket = None - self.__numberOfCoresPerInterlagosNuma=None - self.__threads_per_mpi_task = None - self.__total_number_of_sockets = None - self.__number_of_processors = None - self.__number_mpi_tasks = None - self.__tag = "" - - #----------------------------------------------------- - # Read all the lines of the file and store in a list - - # named "filerecords". - - # - - #----------------------------------------------------- - fileobj = open(self.__filename,'r') - filerecords = fileobj.readlines() - fileobj.close() - - #----------------------------------------------------- - # Parse for the total number of nodes. - - # - - #----------------------------------------------------- - for record1 in filerecords: - words = record1.split(self.__delimiter) - words[0] = words[0].strip().lower() - if self.__re4.match(words[0]): - self.__total_number_of_nodes = int(words[1]) - break - - - #----------------------------------------------------- - # Parse for the number of interlagos cores per numa - - # - - #----------------------------------------------------- - for record1 in filerecords: - words = record1.split(self.__delimiter) - words[0] = words[0].strip().lower() - if self.__re1.match(words[0]): - self.__numberOfCoresPerInterlagosNuma = int(words[1]) - break - - - #----------------------------------------------------- - # Parse for the number of cores per socket. - - # - - #----------------------------------------------------- - for record1 in filerecords: - words = record1.split(self.__delimiter) - words[0] = words[0].strip().lower() - if self.__re2.match(words[0]): - self.__numberOfCoresPerInterlagosSocket= int(words[1]) - break - - - #----------------------------------------------------- - # Parse for the number of threads per core. - - # - - #----------------------------------------------------- - for record1 in filerecords: - words = record1.split(self.__delimiter) - words[0] = words[0].strip() - words[0] = words[0].lower() - if self.__re3.match(words[0]): - self.__threads_per_mpi_task = int(words[1]) - break - - - def get_problem_size(self): - return (self.__total_number_of_nodes, - self.__numberOfCoresPerInterlagosNuma, - self.__numberOfCoresPerInterlagosSocket, - self.__number_of_processors, - self.__threads_per_mpi_task) - - def get_number_of_running_procs(self): - return self.__number_of_processors - - def set_number_of_running_procs(self,nm1): - self.__number_of_processors = nm1 - - def get_number_of_nodes(self): - return self.__total_number_of_nodes - - def set_number_of_nodes(self,nm1): - nm1 = self.__total_number_of_nodes - - def get_number_of_cores_per_interlagos_numa(self): - return self.__numberOfCoresPerInterlagosNuma - - def get_number_of_interlagos_sockets_per_node(self): - return self.__number_of_interlagos_sockets_per_node - - def getNumberOfCoresPerInterlagosSocket(self): - return self.__numberOfCoresPerInterlagosSocket - - def get_job_tag(self): - return self.__tag - - def get_total_number_of_sockets(self): - return self.__total_number_of_sockets - - def get_allocated_nm_procs(self): - return self.__maximum_number_processors - - def set_allocated_nm_procs(self,nm1): - self.__maximum_number_processors = nm1 - - def get_mpirun_command(self): - return baseaprun.MPIRUN - - def get_threads_per_mpi_task(self): - return self.__threads_per_mpi_task - -class titan_16_core_depracated(baseaprun): - #----------------------------------------------------- - # Define the maximum number of cores per socket. - - # Define the feature for this node. - - # - - #----------------------------------------------------- - MAX_SOCKETS_PER_NODE = 2 - - MAX_CPU_SOCKETS_PER_NODE = 1 - MAX_GPU_SOCKETS_PER_NODE = 1 - - MAX_GPU_PER_NODE = 1 - MAX_CORES_PER_NODE = 16 - - MAX_GPU_PER_SOCKET = 1 - - MAX_CORES_PER_SOCKET = 16 - FEATURE = None - - def __init__(self): - baseaprun.__init__(self) - - #----------------------------------------------------- - # Compute the number of cores that the we - - # are running on, th job size, etc. - - #----------------------------------------------------- - maximum_number_processors = 0 - nm_running_procs = 0 - job_tag = "" - - if self.get_number_of_nodes(): - nm_running_procs = self.get_number_of_nodes()*self.get_number_of_interlagos_sockets_per_node()*2*self.get_number_of_cores_per_interlagos_numa() - self.set_number_of_running_procs(nm_running_procs) - - #----------------------------------------------------- - # Compute the maximum number of processors. - - # - - #----------------------------------------------------- - maximum_number_processors = self.get_number_of_nodes()*titan_16_core_depracated.MAX_CORES_PER_NODE - self.set_allocated_nm_procs(maximum_number_processors) - - #----------------------------------------------------- - # Make the job tag. - - # - - #----------------------------------------------------- - string1 = str(self.get_number_of_nodes()) - finalindex = 4 - len1 = len(string1) - len2 = finalindex - (len1 - 1) - job_tag = string1 + len2*"_" - - finalindex=4 - string2 = str(self.get_number_of_interlagos_sockets_per_node()) - len1 = len(string2) - len2 = finalindex - (len1 -1) - job_tag = job_tag + string2 + len2*"_" - - finalindex=1 - string3 = str(self.get_threads_per_mpi_task()) - len1 = len(string3) - len2 = finalindex - (len1 - 1) - job_tag = job_tag + string3 + len2*"_" - - self.__tag = job_tag - - def get_job_launch_command(self): - cmmd = baseaprun.MPIRUN - cmmd = cmmd + " -n " + str(self.get_number_of_running_procs()) + " " - cmmd = cmmd + " -S " + str(self.get_number_of_cores_per_interlagos_numa()) + " " - return cmmd - - def get_feature_option(self): - return titan_16_core_depracated.FEATURE - - - -class titan_16_core(baseaprun): - #----------------------------------------------------- - # Define the maximum number of cores per socket. - - # Define the feature for this node. - - # - - #----------------------------------------------------- - MAX_CORES_PER_NODE = 16 - MAX_SOCKETS_PER_NODE= 2 - MAX_INTERLAGOS_SOCKETS_PER_NODE = 1 - MAX_ACCELERATOR_SOCKETS_PER_NODE = 1 - - MAX_NUMA_PER_INTERLAGOS_SOCKET = 2 - MAX_CORES_PER_NODE = 16 - MAX_CORES_PER_INTERLAGOS_SOCKET = 16 - MAX_CORES_PER_INTERLAGOS_NUMA = 8 - FEATURE = None - - def __init__(self): - baseaprun.__init__(self) - - #----------------------------------------------------- - # Compute the number of cores that the we - - # are running on, job size, etc. - - #----------------------------------------------------- - maximum_number_processors = 0 - nm_running_procs = 0 - job_tag = "" - - if self.get_number_of_nodes(): - nm_running_procs = self.get_number_of_nodes()*self.get_number_of_interlagos_sockets_per_node()*self.getNumberOfCoresPerInterlagosSocket() - self.set_number_of_running_procs(nm_running_procs) - - #----------------------------------------------------- - # Compute the maximum number of processors. - - # - - #----------------------------------------------------- - maximum_number_processors = self.get_number_of_nodes()*titan_16_core.MAX_CORES_PER_NODE - self.set_allocated_nm_procs(maximum_number_processors) - - #----------------------------------------------------- - # Make the job tag. - - # - - #----------------------------------------------------- - string1 = str(self.get_number_of_nodes()) - finalindex = 4 - len1 = len(string1) - len2 = finalindex - (len1 - 1) - job_tag = string1 + len2*"_" - - finalindex=4 - string2 = str(self.get_number_of_interlagos_sockets_per_node()) - len1 = len(string2) - len2 = finalindex - (len1 -1) - job_tag = job_tag + string2 + len2*"_" - - finalindex=1 - string3 = str(self.get_threads_per_mpi_task()) - len1 = len(string3) - len2 = finalindex - (len1 - 1) - job_tag = job_tag + string3 + len2*"_" - - self.__tag = job_tag - - def get_job_launch_command(self): - cmmd = baseaprun.MPIRUN - cmmd = cmmd + " -n " + str(self.get_number_of_running_procs()) + " " - cmmd = cmmd + " -S " + str(self.get_number_of_cores_per_interlagos_numa()) + " " - return cmmd - - def get_feature_option(self): - return titan_16_core.FEATURE - diff --git a/harness/libraries/aprun_utility.py b/harness/libraries/aprun_utility.py deleted file mode 100644 index 943542f..0000000 --- a/harness/libraries/aprun_utility.py +++ /dev/null @@ -1,265 +0,0 @@ -#!/usr/bin/env python3 - -import re - -class aprun: - def __init__(self): - self.__stringpattern1 = "total number of processors" - self.__re1 = re.compile(self.__stringpattern1 + "$") - self.__stringpattern2 = "number of processors" - self.__re2 = re.compile(self.__stringpattern2 + "$") - self.__stringpattern3 = "depth of processors" - self.__re3 = re.compile(self.__stringpattern3 + "$") - self.__delimiter = "=" - self.__filename = "size.txt" - self.__total_number_of_processors = 0 - self.__nm_procs = 0 - self.__depth = None - self.__tag = "" - - #Read all the lines of the file. - fileobj = open(self.__filename,'r') - filerecords = fileobj.readlines() - fileobj.close() - - #Parse for the total number of processors. - for record1 in filerecords: - words = record1.split(self.__delimiter) - words[0] = words[0].strip().lower() - if self.__re1.match(words[0]): - self.__total_number_of_processors = int(words[1]) - break - - - #Parse for the number of processors. - for record1 in filerecords: - words = record1.split(self.__delimiter) - words[0] = words[0].strip().lower() - if self.__re2.match(words[0]): - self.__nm_procs = int(words[1]) - break - - #Parse for the depth of the processors - for record1 in filerecords: - words = record1.split(self.__delimiter) - words[0] = words[0].strip() - words[0] = words[0].lower() - if self.__re3.match(words[0]): - self.__depth = int(words[1]) - break - - #Make the job tag. - string1 = str(self.__total_number_of_processors) - startindex = 0 - finalindex = 4 - len1 = len(string1) - len2 = finalindex - (len1 - 1) - self.__tag = string1 + len2*"_" - - startindex=0 - finalindex=4 - string2 = str(self.__nm_procs) - len1 = len(string2) - len2 = finalindex - (len1 -1) - self.__tag = self.__tag + string2 + len2*"_" - - startindex=0 - finalindex=1 - string3 = str(self.__depth) - len1 = len(string3) - len2 = finalindex - (len1 - 1) - self.__tag = self.__tag + string3 + len2*"_" - - def get_problem_size(self): - return (self.__total_number_of_processors,self.__nm_procs,self.__depth) - - def get_total_number_of_procs(self): - return self.__total_number_of_processors - - def get_nm_procs(self): - return self.__nm_procs - - def get_depth(self): - return self.__depth - - def get_job_tag(self): - return self.__tag - -class aprun2: - #Maximum cores per socket. - MAX_CORES_PER_SOCKET = 4 - - - def __init__(self): - self.__stringpattern1 = "total number of sockets" - self.__re1 = re.compile(self.__stringpattern1 + "$") - - self.__stringpattern2 = "number of cores per socket" - self.__re2 = re.compile(self.__stringpattern2 + "$") - - self.__delimiter = "=" - - self.__filename = "size.txt" - - self.__total_number_of_sockets = 0 - self.__number_of_cores_per_socket = 0 - self.__maximum_number_processors = 0 - self.__number_of_processors = 0 - self.__tag = "" - - #Read all the lines of the file. - fileobj = open(self.__filename,'r') - filerecords = fileobj.readlines() - fileobj.close() - - #Parse for the total number of sockets. - for record1 in filerecords: - words = record1.split(self.__delimiter) - words[0] = words[0].strip().lower() - if self.__re1.match(words[0]): - self.__total_number_of_sockets = int(words[1]) - break - - - #Parse for the number of cores per socket. - for record1 in filerecords: - words = record1.split(self.__delimiter) - words[0] = words[0].strip().lower() - if self.__re2.match(words[0]): - self.__number_of_cores_per_socket= int(words[1]) - break - - #Compute the number of processors. - self.__number_of_processors = self.__total_number_of_sockets*self.__number_of_cores_per_socket - - #Compute the maximum number of processors. - self.__maximum_number_processors = self.__total_number_of_sockets*aprun2.MAX_CORES_PER_SOCKET - - #Make the job tag. - string1 = str(self.__total_number_of_sockets) - startindex = 0 - finalindex = 4 - len1 = len(string1) - len2 = finalindex - (len1 - 1) - self.__tag = string1 + len2*"_" - - startindex=0 - finalindex=4 - string2 = str(self.__number_of_processors) - len1 = len(string2) - len2 = finalindex - (len1 -1) - self.__tag = self.__tag + string2 + len2*"_" - - startindex=0 - finalindex=1 - string3 = str(self.__number_of_cores_per_socket) - len1 = len(string3) - len2 = finalindex - (len1 - 1) - self.__tag = self.__tag + string3 + len2*"_" - - def get_problem_size(self): - return (self.__total_number_of_sockets,self.__number_of_cores_per_socket,self.__number_of_processors) - - def get_allocated_nm_procs(self): - return self.__maximum_number_processors - - def get_running_nm_procs(self): - return self.__number_of_processors - - def get_number_of_cores_per_socket(self): - return self.__number_of_cores_per_socket - - def get_job_tag(self): - return self.__tag - -class quadcore: - #Maximum cores per socket. - MAX_CORES_PER_SOCKET = 4 - -class aprun(quadcore): - - def __init__(self): - self.__stringpattern1 = "total number of sockets" - self.__re1 = re.compile(self.__stringpattern1 + "$") - - self.__stringpattern2 = "number of cores per socket" - self.__re2 = re.compile(self.__stringpattern2 + "$") - - self.__delimiter = "=" - - self.__filename = "size.txt" - - self.__total_number_of_sockets = 0 - self.__number_of_cores_per_socket = 0 - self.__maximum_number_processors = 0 - self.__number_of_processors = 0 - self.__tag = "" - - #Read all the lines of the file. - fileobj = open(self.__filename,'r') - filerecords = fileobj.readlines() - fileobj.close() - - #Parse for the total number of sockets. - for record1 in filerecords: - words = record1.split(self.__delimiter) - words[0] = words[0].strip().lower() - if self.__re1.match(words[0]): - self.__total_number_of_sockets = int(words[1]) - break - - - #Parse for the number of cores per socket. - for record1 in filerecords: - words = record1.split(self.__delimiter) - words[0] = words[0].strip().lower() - if self.__re2.match(words[0]): - self.__number_of_cores_per_socket= int(words[1]) - break - - #Compute the number of processors. - self.__number_of_processors = self.__total_number_of_sockets*self.__number_of_cores_per_socket - - #Compute the maximum number of processors. - self.__maximum_number_processors = self.__total_number_of_sockets*aprun2.MAX_CORES_PER_SOCKET - - #Make the job tag. - string1 = str(self.__total_number_of_sockets) - startindex = 0 - finalindex = 4 - len1 = len(string1) - len2 = finalindex - (len1 - 1) - self.__tag = string1 + len2*"_" - - startindex=0 - finalindex=4 - string2 = str(self.__number_of_processors) - len1 = len(string2) - len2 = finalindex - (len1 -1) - self.__tag = self.__tag + string2 + len2*"_" - - startindex=0 - finalindex=1 - string3 = str(self.__number_of_cores_per_socket) - len1 = len(string3) - len2 = finalindex - (len1 - 1) - self.__tag = self.__tag + string3 + len2*"_" - - def get_problem_size(self): - return (self.__total_number_of_sockets,self.__number_of_cores_per_socket,self.__number_of_processors) - - def get_allocated_nm_procs(self): - return self.__maximum_number_processors - - def get_running_nm_procs(self): - return self.__number_of_processors - - def get_number_of_cores_per_socket(self): - return self.__number_of_cores_per_socket - - def get_job_tag(self): - return self.__tag - -class nodelist: - def __init__(self): - pass diff --git a/harness/libraries/base_apptest.py b/harness/libraries/base_apptest.py index a9b5015..eb29269 100644 --- a/harness/libraries/base_apptest.py +++ b/harness/libraries/base_apptest.py @@ -14,42 +14,43 @@ class base_apptest(object): """ __metaclass__ = abc.ABCMeta + #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + # @ + # Special methods @ + # @ + #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + def __init__(self, name_of_application, name_of_subtest, local_path_to_tests, - time_stamp=None): + tag): self.__appName = name_of_application self.__testName = name_of_subtest self.__threadTag = "<" + str(name_of_application) + "::" + str(name_of_subtest) + ">" self.__localPathToTests = local_path_to_tests - # Create harness log directories if given a timestamp - if time_stamp: - logdir_name = 'harness_log_files.' + time_stamp - self.__dirPathToLogFiles = os.path.join(os.getcwd(), logdir_name) - self.__appLogFileBaseDir = os.path.join(self.__dirPathToLogFiles, self.__appName) - self.__appTestLogFileBaseDir = os.path.join(self.__appLogFileBaseDir, self.__testName) - os.makedirs(self.__appTestLogFileBaseDir, exist_ok=True) - - # Set harness log file names - self.__appLogFilePathBase = os.path.join(self.__appLogFileBaseDir, self.__appName) - self.__appTestLogFilePathBase = os.path.join(self.__appTestLogFileBaseDir, self.__appName + '__' + self.__testName) - - self.__appLogFilePath = self.__appLogFilePathBase + ".logfile.txt" - self.__appTestLogFilePath = self.__appTestLogFilePathBase + ".logfile.txt" - self.__appCheckOutLogFilePathStdOut = self.__appTestLogFilePathBase + ".appcheckout.stdout.txt" - self.__appCheckOutLogFilePathStdErr = self.__appTestLogFilePathBase + ".appcheckout.stderr.txt" - self.__appTestCheckOutLogFilePathStdOut = self.__appTestLogFilePathBase + ".testcheckout.stdout.txt" - self.__appTestCheckOutLogFilePathStdErr = self.__appTestLogFilePathBase + ".testcheckout.stderr.txt" - self.__appTestUpdateSourceOutLogFilePathStdOut = self.__appTestLogFilePathBase + ".sourceupdate.stdout.txt" - self.__appTestUpdateSourceOutLogFilePathStdErr = self.__appTestLogFilePathBase + ".sourceupdate.stderr.txt" - self.__appStartTestLogFilePathStdOut = self.__appTestLogFilePathBase + ".starttest.stdout.txt" - self.__appStartTestLogFilePathStdErr = self.__appTestLogFilePathBase + ".starttest.stderr.txt" - - self.__initializeLogFiles() + def __str__(self): + tmp_string = "--\n" + tmp_string += "Application name: {}\n".format(str(self.__appName)) + tmp_string += "Subtest name: {}\n".format(str(self.__testName)) + tmp_string += "Local path to tests: {}\n".format(str(self.__localPathToTests)) + tmp_string += "--\n" + return tmp_string + + #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + # @ + # End of special methods @ + # @ + #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + + #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + # @ + # Public methods. @ + # @ + #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ def getNameOfApplication(self): return self.__appName @@ -59,9 +60,6 @@ def getNameOfSubtest(self): def getLocalPathToTests(self): return self.__localPathToTests - def getDirPathToLogFiles(self): - return self.__dirPathToLogFiles - def getPathToApplicationTestLogFile(self,test_name): return self.__appTestLogFilePath @@ -81,29 +79,14 @@ def getPathToStartTestLogFiles(self): return {"stdout":self.__appStartTestLogFilePathStdOut, "stderr":self.__appStartTestLogFilePathStdErr} - # def writeToLogFile(self, - # message): - # now = str(datetime.now()) - # now = now.strip() - # message2 = "{0!s:<32} {1!s:<}\n".format(now, message) - # log_filehandle = open(self.__appLogFilePath,"a") - # log_filehandle.write(message2) - # log_filehandle.close() - # - # def writeToLogTestFile(self,message): - # now = str(datetime.now()) - # now = now.strip() - # message2 = "{0!s:<32} Thread tag={1!s:<} {2!s:<}\n".format(now, self.__threadTag, message) - # log_filehandle = open(self.__appTestLogFilePath,"a") - # log_filehandle.write(message2) - # log_filehandle.close() + @property @abc.abstractmethod - def doTasks(self,myTasks,myTestCheckoutLock): + def logger(self): return @abc.abstractmethod - def appTestName(self): + def doTasks(self,myTasks,myTestCheckoutLock): return @abc.abstractmethod @@ -111,50 +94,54 @@ def check_out_test(self): return @abc.abstractmethod - def start_test(self): + def display_status(self): return @abc.abstractmethod - def stop_test(self): + def generateReport(self,logfile,taskwords,mycomputer_with_events_record): return @abc.abstractmethod - def display_status(self): + def debug_apptest(self): return @abc.abstractmethod - def generateReport(self,logfile,taskwords,mycomputer_with_events_record): + def waitForAllJobsToCompleteQueue(self): return - @abc.abstractmethod - def debug_apptest(self): - return + #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + # @ + # End of public methods. @ + # @ + #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ - #----------------------------------------------------- - # Special methods - - # - - #----------------------------------------------------- - def __str__(self): - tmp_string = "--\n" - tmp_string += "Application name: {}\n".format(str(self.__appName)) - tmp_string += "Subtest name: {}\n".format(str(self.__testName)) - tmp_string += "Local path to tests: {}\n".format(str(self.__localPathToTests)) - tmp_string += "--\n" - return tmp_string + #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + # @ + # Private methods. @ + # @ + #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + @abc.abstractmethod + def _start_test(self): + return - #----------------------------------------------------- - # Private methods - - # - - #----------------------------------------------------- - def __initializeLogFiles(self): - file_handle = open(self.__appLogFilePath,"a") - file_handle.close() + @abc.abstractmethod + def _stop_test(self): + return - file_handle = open(self.__appTestLogFilePath,"a") - file_handle.close() + #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + # @ + # End of private methods. @ + # @ + #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + +class BaseApptestError(Exception): + """The base error class for this module.""" + + @property + @abc.abstractmethod + def message(self): + return - def __absolutePathToStdOut(self): - pass diff --git a/harness/libraries/command_line.py b/harness/libraries/command_line.py new file mode 100644 index 0000000..54d228f --- /dev/null +++ b/harness/libraries/command_line.py @@ -0,0 +1,97 @@ +#! /usr/bin/env python3 +## @module command_line +# This module contains utility classes, functions, etc for command line management. +# + +# System imports + +# Local imports + +# A class that will store the parsed command line arguments. +class HarnessParsedArguments: + def __init__(self, inputfile=None, + loglevel=None, + configfile=None, + runmode=None, + stdout_stderr=None, + use_fireworks=False): + + self.__inputfile = inputfile + self.__loglevel = loglevel + self.__configfile = configfile + self.__mode = runmode + self.__stdout_stderr = stdout_stderr + self.__use_fireworks = use_fireworks + + self.__verify_attributes() + + def __verify_attributes(self): + for attr, value in self.__dict__.items(): + if value is None: + message="The class HarnessParsedArguments instantiated incorrectly!\n" + message+="The class HarnessParsedArguments must be instantiated with\n" + message+="all keyword arguments having non-None values." + message+="Key:{attr}; value:{value}\n".format(attr=attr,value=value) + raise HPA_AttributeError(message) + + @property + def inputfile(self): + return self.__inputfile + + @property + def loglevel(self): + return self.__loglevel + + @property + def configfile(self): + return self.__configfile + + @property + def runmode(self): + return self.__mode + + @property + def stdout_stderr(self): + return self.__stdout_stderr + + @stdout_stderr.setter + def stdout_stderr(self,value): + self.__stdout_stderr = value + + @property + def use_fireworks(self): + return self.__use_fireworks + + @property + def effective_command_line(self): + command_options = ("Effective command line: " + "runtests.py" + " --inputfile {my_inputfile}" + " --configfile {my_configfile}" + " --loglevel {my_loglevel}" + " --output {my_output}" + " --mode {my_runmode}") + + run_mode_args=" ".join(self.runmode) + + efc = command_options.format(my_inputfile = self.inputfile, + my_configfile = self.configfile, + my_loglevel = self.loglevel, + my_output = self.stdout_stderr, + my_runmode = run_mode_args) + + return efc + + +class BaseError(Exception): + pass + +class HPA_AttributeError(BaseError): + def __init__(self,message=None): + self.message = message + +def main(): + pass + +if __name__ == "__main__": + main() diff --git a/harness/libraries/computers.py b/harness/libraries/computers.py deleted file mode 100644 index f4ecd27..0000000 --- a/harness/libraries/computers.py +++ /dev/null @@ -1,805 +0,0 @@ -#! /usr/bin/env python3 - - -#-------------------# -# Import statements # -#-------------------# -import os -import socket -import schedulers -import string -import re -import time -import datetime -from Locks.rgt_locks import * -from Data_Files.report_files import * - -#--------------------------------------------# -# Package: computers.py # -#--------------------------------------------# -# -# Classes: base_computer -# rizzo_computer -#--------------------------------------------# - - -#vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv# -#--------- Class definitions ----------------# -#vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv# - - - -############################################################################################################### -# Class name: base_computer -# -# Inherited classes: -# -# Class variables : -# -# Public variables : -# -# Public methods : -# -# Private variables: -# -# Private methods : -# -############################################################################################################### -class base_computer: - - list_of_computers = ["ram","jaguar","phoenix","robin","rizzo"] - - ####################################################################### - # - # Function name: __init__ - # - # Description: Init constructor call. - # - # Function arguments: Name Description - # - # - ######################################################################## - def __init__(self): - self.name = "none" - self.scratchspace_location = "/tmp" - self.batch_scheduler_name = "none" - self.batch_scheduler = "none" - self.submitted_batch_jobs = [] - self.cshpath = "/usr/bin/csh" - ######################################################################## - - - - ######################################################################## - # - # Function name: get_job_id - # - # Description: - # - # Function arguments: Name Description - # - # self - ######################################################################## - def get_job_id(self): - return "Stub id\n" - ######################################################################## - - - - ######################################################################## - # - # Function name: set_name - # - # Description: - # - # Function arguments: Name Description - # - # self - ######################################################################## - def set_name(self,name): - self.name = name - ######################################################################## - - - - ######################################################################## - # - # Function name: get_name - # - # Description: - # - # Function arguments: Name Description - # - ######################################################################## - def get_name(self): - return self.name - ######################################################################## - - - - ######################################################################## - # - # Function name: set_scratchspace_location - # - # Description: - # - # Function arguments: Name Description - # - # self - ######################################################################## - def set_scratchspace_location(self,name): - self.scratchspace_location = name - ######################################################################## - - - - ######################################################################## - # - # Function name: get_scratchspace_location - # - # Description: - # - # Function arguments: Name Description - # - ######################################################################## - def get_scratchspace_location(self): - return self.scratchspace_location - ######################################################################## - - - - ######################################################################## - # - # Function name: set_batchschdeuler_name - # - # Description: - # - # Function arguments: Name Description - # - # self - ######################################################################## - def set_batchschdeuler_name(self,name): - self.batch_scheduler_name = name - ######################################################################## - - - - ######################################################################## - # - # Function name: get_batchschdeuler_name - # - # Description: - # - # Function arguments: Name Description - # - # self - ######################################################################## - def get_batchschdeuler_name(self): - return self.batch_scheduler_name - ######################################################################## - - - - ######################################################################## - # - # Function name: make_executable - # - # Description: - # - # Function arguments: Name Description - # - # self - ######################################################################## - def make_software_bin(self,make_binary_path,history_file_path,lock_location_id,iteration_no=0): - - #print "\n\n\n" - #print "//////////////////////////////////" - #print "Start of make_software_bin" - #print "//////////////////////////////////" - #Get the current working directory - starting_directory = os.getcwd() - - # Get the name of the binary and the parent path. - parent_make_binary_path = os.path.dirname(make_binary_path) - binary = "nohup " + os.path.basename(make_binary_path) - binary = binary + " " + history_file_path + " " + lock_location_id[0] + " " + str(lock_location_id[1]) + " " + str(iteration_no) + " &" - - #Change to the directory of the parent path. - os.chdir(parent_make_binary_path) - - # Execute make script - #print "Full command line: ", binary - os.system(binary) - - #Change back to the starting directory. - os.chdir(starting_directory) - - #print "//////////////////////////////////" - #print "//////////////////////////////////" - ######################################################################## - - - - ######################################################################## - # - # Function name: run_software_bin - # - # Description: - # - # Function arguments: Name Description - # - ######################################################################## - def run_software_bin(self,run_binary_path,path_to_binary,history_file_path, - results_dir,scratch_dir,path_to_standard_results,lock_location_id, - iteration_number=0): - - # Get the name and location of the script that make the - # the pbs file that which runs our test. - parent_run_binary_path = os.path.dirname(run_binary_path) - script1 = os.path.basename(run_binary_path) - - #print "\n\n\n" - #print "//////////////////////////////////" - #print "Start of run_software_bin" - #print "//////////////////////////////////" - - #print "In run_software_bin" - #print "starting_directory ", os.getcwd() - #print "run_binary_path :",run_binary_path - #print "path_to_binary :",path_to_binary - #print "history_file_path :",history_file_path - #print "results_dir :",results_dir - #print "scratch_dir :",scratch_dir - - # Make the command line arguments. - argument1 = "--scratchdir=" + scratch_dir - argument2 = "--history_file_path=" + history_file_path - argument3 = "--results_dir=" + results_dir - argument4 = "--starting_dir=" + parent_run_binary_path - argument5 = "--pathtobinary=" + path_to_binary - argument6 = "--pathtostandardresults=" + path_to_standard_results - argument7 = "--pathtolock=" + lock_location_id[0] - argument8 = "--lockid=" + str(lock_location_id[1]) - argument9 = "--iteration_number=" + str(iteration_number) - - # Make the full command line. - fullcommand = "nohup " + script1 + " " + argument1 + " " + argument2 + " " + argument3 - fullcommand = fullcommand + " " + argument4 + " " + argument5 + " " + argument6 - fullcommand = fullcommand + " " + argument7 + " " + argument8 + " " + argument9 + " &" - #print "Full command line: ", fullcommand - - #Get the current working directory - primary_directory = os.getcwd() - - #Change to the directory of the parent path. - os.chdir(parent_run_binary_path) - - # Execute run script - os.system(fullcommand) - - #Change back to the starting directory. - os.chdir(primary_directory) - - #print "//////////////////////////////////" - #print "//////////////////////////////////" - - ######################################################################## - - - - ######################################################################## - # - # Function name: launch_stability_job - # - # Description: - # - # Function arguments: Name Description - # - ######################################################################## - def launch_stability_job(self,spack,iteration_no=0): - lock_mr = self.__st_make_and_run_executable(spack,iteration_no=iteration_no) - - return lock_mr - ######################################################################## - - - ######################################################################## - # - # Function name: __make_and_run_executable - # - # Description: - # - # Function arguments: Name Description - # - # self - ######################################################################## - def __st_make_and_run_executable(self,spack,iteration_no=0): - #print "\n\n\n" - #print "===============================" - #print "In __st_make_and_run_executable" - #print "===============================" - #The time to wait between making and running the job. - sleeptime = 5.00 - - # Get the main lock id. - main_lock_id = spack.get_main_lock_id() - - # Create the lock for making and running. - lock_path = spack.subtest_path() - unique_id = spack.get_lock_id() - lock_mr = make_and_run_lock(lock_id=unique_id,lock_location=lock_path) - - # Load the main stability report file. - rgt_report = load_report_file(main_lock_id) - rgt_report.modify(unique_id, - spack.get_name_of_st_software(), - spack.get_name_of_st_subtest()) - - - # Make the binary - make_binary_path = spack.make_only_path() - history_file_path = spack.history_text_path() - lock_location_id =lock_mr.path_id() - - #print "lock_mr lock path: ",lock_location_id[0] - #print "lock_mr lock id : ",lock_location_id[1] - #print "history_file_path : ", history_file_path - #print "make_binary_path : ", make_binary_path - self.make_software_bin(make_binary_path,history_file_path,lock_location_id,iteration_no) - - #Sleep for a moment. - time.sleep(sleeptime) - - # Run the binary. - run_only_script_location = spack.run_only_path() - history_text_location = spack.history_text_path() - results_directory = spack.get_results_dir() - scratch_directory = spack.get_scratchspace_location() - binpath1 = spack.get_path_to_binary() - pathtostandardresults1 = spack.path_to_standard_results() - - #print "run_only_script_location : ", run_only_script_location - #print "history_text_location : ", history_text_location - #print "results_directory : ", results_directory - #print "scratch_directory : ",scratch_directory - #print "binpath1 : ",binpath1 - #print "pathtostandardresults1 : ",pathtostandardresults1 - - self.run_software_bin(run_binary_path=run_only_script_location, - path_to_binary=binpath1, - history_file_path=history_text_location, - results_dir=results_directory, - scratch_dir=scratch_directory, - path_to_standard_results=pathtostandardresults1, - lock_location_id=lock_mr.path_id(), - iteration_number=iteration_no) - - return lock_mr - #print "===============================" - ######################################################################## - - - - -############################################################################################################### -# -# End of class base_computer -# -############################################################################################################### - - - -############################################################################################################### -# Class name: rizzo_computer -# -# Inherited classes: base_computer -# -# Class variables : -# -# Public variables : -# -# Public methods : -# -# Private variables: -# -# Private methods : -# -############################################################################################################### -class rizzo_computer (base_computer): - - ####################################################################### - # - # Default initialization method. - # - ####################################################################### - def __init__(self): - base_computer.__init__(self) - self.set_name("rizzo") - self.set_scratchspace_location("/lustre/scratch") - self.set_batchschdeuler_name(schedulers.base_scheduler.pbs) - self.__batchscheduler = schedulers.pbs_scheduler() - self.cshpath = "/usr/bin/csh" - ####################################################################### - - - ######################################################################## - # - # Function name: set_batchschdeuler - # - # Description: - # - # Function arguments: Name Description - # - # self - ######################################################################## - def set_batchschdeuler(self,scheduler): - self.__batchscheduler = scheduler - ######################################################################## - - - ######################################################################## - # - # Function name: get_cshpath - # - # Description: - # - # Function arguments: Name Description - # - # self - ######################################################################## - def get_cshpath(self): - return self.cshpath - ######################################################################## - - - ######################################################################## - # - # Function name: submit_batch_script - # - # Description: - # - # Function arguments: Name Description - # - ######################################################################## - def submit_batch_script(self,batch_file_name): - jobid = self.__batchscheduler.submit_batch_script(batch_file_name) - return jobid - ######################################################################## - - - ######################################################################## - # - # Function name: numbercoresoption - # - # Description: - # - # Function arguments: Name Description - # - ######################################################################## - def numbercoresoption(self,numberofnodes,mode): - nmcores = str(numberofnodes*1) + "-SN" - - if mode == "SingleCore": - nmcores = str(numberofnodes*1) + " -SN" - elif mode == "DualCore": - nmcores = str(numberofnodes*2) + " -VN" - - return nmcores - ######################################################################## - - - ######################################################################## - # - # Function name: numbercoremultiplier - # - # Description: - # - # Function arguments: Name Description - # - ######################################################################## - def numbercoremultiplier(self,mode): - - multiplier = 1 - - if mode == "SingleCore": - multiplier = 1 - elif mode == "DualCore": - multiplier = 2 - elif mode == "QuadCore": - multiplier = 4 - - return multiplier - ######################################################################## - - - ######################################################################## - # - # Function name: programming_environment - # - # Description: - # - # Function arguments: Name Description - # - ######################################################################## - def programming_environment_command(self): - return "/opt/modules/3.1.6/bin/modulecmd tcsh list" - - ######################################################################## - - -############################################################################################################### -# -# End of class rizzo computer -# -############################################################################################################### - - - - -############################################################################################################### -# Class name: ram_computer -# -# Inherited classes: base_computer -# -# Class variables : -# -# Public variables : -# -# Public methods : -# -# Private variables: -# -# Private methods : -# -############################################################################################################### -class ram_computer (base_computer): - - ####################################################################### - # - # Default initialization method. - # - ####################################################################### - def __init__(self): - base_computer.__init__(self) - self.set_name("ram") - self.set_scratchspace_location("/tmp/work") - self.set_batchschdeuler_name(schedulers.base_scheduler.pbs) - ####################################################################### - - -############################################################################################################### -# -# End of class ram computer -# -############################################################################################################### - - - -############################################################################################################### -# Class name: jaguar_computer -# -# Inherited classes: base_computer -# -# Class variables : -# -# Public variables : -# -# Public methods : -# -# Private variables: -# -# Private methods : -# -############################################################################################################### -class jaguar_computer (base_computer): - - ####################################################################### - # - # Default initialization method. - # - ####################################################################### - def __init__(self): - base_computer.__init__(self) - self.set_name("jaguar") - self.set_scratchspace_location("/lustre/scratch") - self.set_batchschdeuler_name(schedulers.base_scheduler.pbs) - self.__batchscheduler = schedulers.pbs_scheduler() - self.cshpath = "/usr/bin/csh" - ####################################################################### - - - ######################################################################## - # - # Function name: set_batchschdeuler - # - # Description: - # - # Function arguments: Name Description - # - # self - ######################################################################## - def set_batchschdeuler(self,scheduler): - self.__batchscheduler = scheduler - ######################################################################## - - - ######################################################################## - # - # Function name: get_cshpath - # - # Description: - # - # Function arguments: Name Description - # - # self - ######################################################################## - def get_cshpath(self): - return self.cshpath - ######################################################################## - - - ######################################################################## - # - # Function name: submit_batch_script - # - # Description: - # - # Function arguments: Name Description - # - ######################################################################## - def submit_batch_script(self,batch_file_name=None,batch_job=None): - if batch_file_name: - jobid = self.__batchscheduler.submit_batch_script(batch_file_name) - return jobid - - elif batch_job: - batchfilename = batch_job.get_batchfilename() - jobid = self.__batchscheduler.submit_batch_script(batchfilename) - batch_job.set_jobid(jobid) - self.submitted_batch_jobs = self.submitted_batch_jobs + [batch_job] - ######################################################################## - - - ######################################################################## - # - # Function name: numbercoresoption - # - # Description: - # - # Function arguments: Name Description - # - ######################################################################## - def numbercoresoption(self,numberofnodes,mode): - nmcores = str(numberofnodes*1) + "-SN" - - if mode == "SingleCore": - nmcores = str(numberofnodes*1) + " -SN" - elif mode == "DualCore": - nmcores = str(numberofnodes*2) + " -VN" - - return nmcores - ######################################################################## - - - ######################################################################## - # - # Function name: numbercoremultiplier - # - # Description: - # - # Function arguments: Name Description - # - ######################################################################## - def numbercoremultiplier(self,mode): - - multiplier = 1 - - if mode == "SingleCore": - multiplier = 1 - elif mode == "DualCore": - multiplier = 2 - elif mode == "QuadCore": - multiplier = 4 - - return multiplier - ######################################################################## - - - ######################################################################## - # - # Function name: programming_environment - # - # Description: - # - # Function arguments: Name Description - # - ######################################################################## - def programming_environment_command(self): - return "/opt/modules/3.1.6/bin/modulecmd tcsh list" - - ######################################################################## - - - - -############################################################################################################### -# -# End of class jaguar computer -# -############################################################################################################### - - - -######################################################################## -# -# Function name: create_computer -# -# Description: -# -# Function arguments: Name Description -# -######################################################################## -def create_computer(): - hostname = gethostname() - - ram_regep = re.compile("^ram",re.I) - rizzo_regep = re.compile("^rizzo",re.I) - jaguar_regep = re.compile("^jaguar",re.I) - yodjaguar_regep = re.compile("^yodjag",re.I) - - #--Are we on rizzo? - if rizzo_regep.match(hostname): - return rizzo_computer() - #--Are we on ram? - elif ram_regep.match(hostname): - return ram_computer() - #--Are we on jaguar? - elif jaguar_regep.match(hostname): - return jaguar_computer() - #--Are we on a jaguar yod node? - elif yodjaguar_regep.match(hostname): - return jaguar_computer() - else: - string1 = "Computer " + hostname + " not defined." - print string1 -######################################################################## - - - -######################################################################## -# -######################################################################## -def getdnsnames(name): - d = socket.gethostbyaddr(name) - names = [ d[0] ] + d[1] + d[2] - return names -######################################################################## - - - -######################################################################## -# -######################################################################## -def resolve(name): - names = getdnsnames(name) - for dnsname in names: - if '.' in dnsname: - fullname = dnsname - break - else: - fullname = name - return fullname -######################################################################## - - - -######################################################################## -# -######################################################################## -def gethostname(): - fullname = socket.gethostname() - if '.' not in fullname: - fullname = resolve(fullname) - - return fullname -######################################################################## diff --git a/harness/libraries/computers_1.py b/harness/libraries/computers_1.py deleted file mode 100644 index a105ad1..0000000 --- a/harness/libraries/computers_1.py +++ /dev/null @@ -1,172 +0,0 @@ -#! /usr/bin/env python3 - - -import os -import socket -import string -import re -import time -import datetime - -from libraries import aprun_3 - - - -class base_computer: - - MX_PEAK_FLOPS_PER_CPU = None - - - list_of_computers = ["Chester","Titan"] - days_of_week = { 1: "Mon", - 2: "Tue", - 3: "Wed", - 4: "Thu", - 5: "Fri", - 6: "Sat", - 7: "Sun", - } - months_of_year = { 1: "Jan", - 2: "Feb", - 3: "Mar", - 4: "Apr", - 5: "May", - 6: "Jun", - 7: "Jul", - 8: "Aug", - 9: "Sep", - 10: "Oct", - 11: "Nov", - 12: "Dec", - } - - days_of_month = { 1 : "01", - 2 : "02", - 3 : "03", - 4 : "04", - 5 : "05", - 6 : "06", - 7 : "07", - 8 : "08", - 9 : "09", - 10 :"10", - 11 :"11", - 12 :"12", - 13 :"13", - 14 :"14", - 15 :"15", - 16 :"16", - 17 :"17", - 18 :"18", - 19 :"19", - 20 :"20", - 21 :"21", - 22 :"22", - 23 :"23", - 24 :"24", - 25 :"25", - 26 :"26", - 27 :"27", - 28 :"28", - 29 :"29", - 30 :"30", - 31 :"31", - } - - def __init__(self): - self.name = None - self.hasheventrecords = {} - - def get_event_records(self,startdate,enddate): - oneday = datetime.timedelta(days=1) - - self.new_start_date = datetime.datetime(startdate.year,startdate.month,startdate.day) - self.new_end_date = datetime.datetime(enddate.year,enddate.month,enddate.day) - - nextday = self.new_start_date - tmpeventrecords = {} - while nextday <= self.new_end_date: - event_file_record = "/ccs/sys/adm/MOAB/titan/" + str(nextday.year) + "/" + str(titan_computer.days_of_month[nextday.month]) + "/" + str(titan_computer.days_of_month[nextday.day]) - if os.path.exists(event_file_record): - - fileobj = open(event_file_record,"r") - self.hasheventrecords[nextday.isoformat("T")] = fileobj.readlines() - fileobj.close() - - nextday = nextday + oneday - - - def in_time_range(self,pbsid,creationtime,startdate,enddate): - - oneday = datetime.timedelta(days=1) - - (creationtime1, creationtime2) = creationtime.split("T") - (year,month,day) = creationtime1.split("-") - (time1,time2) = creationtime2.split(".") - (hour,min,sec) = time1.split(":") - creationdate = datetime.datetime(int(year),int(month),int(day)) - - if creationdate > enddate: - return False - nextday = creationdate - - while nextday <= self.new_end_date: - if nextday.isoformat("T") in self.hasheventrecords: - for tmpevent in self.hasheventrecords[nextday.isoformat("T")]: - words = tmpevent.split() - if (len(words) >= 3) and (words[3] == pbsid) and (words[4] == "JOBEND"): - return True - nextday = nextday + oneday - - - return False - - def set_name(self,name): - self.name = name - - def get_name(self): - return self.name - - - def end_time_of_job(self, pbsid,creationtime,startdate,enddate): - return False - -class chester_computer(base_computer): - MX_PEAK_FLOPS_PER_CPU = 9.2 - MOAB_LOG_FILE_PATH = "/moab/stats" - def __init__(self,hostname): - base_computer.__init__(self) - self.set_name(hostname) - - def get_max_flops_per_cpu(self): - return chester_computer.MX_PEAK_FLOPS_PER_CPU - -class titan_computer(base_computer): - MX_PEAK_FLOPS_PER_CPU = 9.2 - MOAB_LOG_FILE_PATH = "/moab/stats" - def __init__(self,hostname): - base_computer.__init__(self) - self.set_name(hostname) - - def get_max_flops_per_cpu(self): - return titan_computer.MX_PEAK_FLOPS_PER_CPU - - -def create_computer(): - hostname = socket.getfqdn() - chester_regep = re.compile("^Chester",re.I) - titan_regep = re.compile("^Titan",re.I) - - #--Are we chester? - if chester_regep.match(hostname): - return chester_computer(hostname) - #--Are we on Titan? - elif titan_regep.match(hostname): - return titan_computer(hostname) - else: - string1 = "Computer " + hostname + " not defined as a class." - print(string1) - - - - diff --git a/harness/libraries/config_file.py b/harness/libraries/config_file.py new file mode 100644 index 0000000..49607c0 --- /dev/null +++ b/harness/libraries/config_file.py @@ -0,0 +1,82 @@ +import string +import os +import configparser + +from rgt_utilities import set_harness_environment + +class rgt_config_file: + + # These are the named sections in the config file. + machine_section = 'MachineDetails' + repository_section = 'RepoDetails' + testshot_section = 'TestshotDefaults' + + def __init__(self, + configfilename=None, + machinename=None): + + self.__machine_vars = {} + self.__repo_vars = {} + self.__testshot_vars = {} + + if machinename != None: + self.__configFileName = machinename + ".ini" + else: + if configfilename == None: + configfilename = self.getDefaultConfigFile() + self.__configFileName = configfilename + + base_filename = os.path.basename(self.__configFileName) + if base_filename == self.__configFileName: + # Only base file given, resolve full path by searching CWD, then OLCF_HARNESS_DIR/configs + working_dir_config = os.path.join(os.getcwd(), self.__configFileName) + if os.path.isfile(working_dir_config): + self.__configFileName = os.path.abspath(working_dir_config) + elif 'OLCF_HARNESS_DIR' in os.environ: + harness_dir = os.environ['OLCF_HARNESS_DIR'] + harness_dir_config = os.path.join(harness_dir, "configs", self.__configFileName) + if os.path.isfile(harness_dir_config): + self.__configFileName = harness_dir_config + + # Read the master config file + self.__read_config_file() + + def __read_config_file(self): + if os.path.isfile(self.__configFileName): + print(f'reading harness config {self.__configFileName}') + master_cfg = configparser.ConfigParser() + master_cfg.read(self.__configFileName) + + self.__machine_vars = master_cfg[rgt_config_file.machine_section] + set_harness_environment(self.__machine_vars) + + self.__repo_vars = master_cfg[rgt_config_file.repository_section] + set_harness_environment(self.__repo_vars) + + self.__testshot_vars = master_cfg[rgt_config_file.testshot_section] + set_harness_environment(self.__testshot_vars) + else: + raise NameError("Harness config file not found: %s" % self.__configFileName) + + def get_config_file(self): + return self.__configFileName + + def get_machine_config(self): + return self.__machine_vars + + def get_repository_config(self): + return self.__repo_vars + + def get_testshot_config(self): + return self.__testshot_vars + + @staticmethod + def getDefaultConfigFile(): + """Returns the default config file name.""" + machinename = 'master' + if 'OLCF_HARNESS_MACHINE' in os.environ: + machinename = os.environ['OLCF_HARNESS_MACHINE'] + configfile = machinename + '.ini' + print('Using machine config:', configfile) + return configfile + diff --git a/harness/libraries/get_machine_name.py b/harness/libraries/get_machine_name.py new file mode 100755 index 0000000..bb3c708 --- /dev/null +++ b/harness/libraries/get_machine_name.py @@ -0,0 +1,232 @@ +#! /usr/bin/env python3 +"""Contains utilities for returning the registered unique name for a given machine. + +This module makes avaiable methods for getting the registered unique machine names. +Each machine must be registered in the INI file 'registered_machines.ini'. + +Exceptions Raised +----------------- +_NotFoundRegisteredMachineNameError +_RegisteredMachineFileError + +""" + + +# System imports +import os +import sys +import string +import argparse +import logging + +# Local imports + +MODULE_LOGGER_NAME=__name__ +"""str: The name of this module's logger.""" + +REGISTER_MACHINES = os.path.join(os.getenv("OLCF_HARNESS_DIR"),"configs","registered_machines.ini") +"""str : The file path to the the ini file that stores the registered machines""" + +def _create_logger_description(): + frmt_header = "{0:10s} {1:40.40s} {2:5s}\n" + frmt_items = frmt_header + header1 = frmt_header.format("Level", "Description", "Option Value" ) + header1_len = len(header1) + log_option_desc = "The logging level. The standard levels are the following:\n\n" + log_option_desc += header1 + log_option_desc += "-"*header1_len + "\n" + log_option_desc += frmt_items.format("NOTSET", "All messages will be processed", "0" ) + log_option_desc += frmt_items.format("", "processed", " \n" ) + log_option_desc += frmt_items.format("DEBUG", "Detailed information, typically of ", "10" ) + log_option_desc += frmt_items.format("", "interest only when diagnosing problems.", "\n" ) + log_option_desc += frmt_items.format("INFO", "Confirmation that things", "20" ) + log_option_desc += frmt_items.format("", "are working as expected.", " \n" ) + log_option_desc += frmt_items.format("WARNING ", "An indication that something unexpected , ", "30" ) + log_option_desc += frmt_items.format("", "happened or indicative of some problem", "" ) + log_option_desc += frmt_items.format("", "in the near future.", "\n" ) + log_option_desc += frmt_items.format("ERROR ", "Due to a more serious problem ", "40" ) + log_option_desc += frmt_items.format("", "the software has not been able ", "" ) + log_option_desc += frmt_items.format("", "to perform some function. ", "\n" ) + log_option_desc += frmt_items.format("CRITICAL ", "A serious error, indicating ", "50" ) + log_option_desc += frmt_items.format("", "that the program itself may be unable", "" ) + log_option_desc += frmt_items.format("", "to continue running.", "\n" ) + return log_option_desc + +def _create_module_logger(log_id, log_level): + logger = logging.getLogger(log_id) + logger.setLevel(log_level) + + # create console handler and set level to debug + ch = logging.StreamHandler() + ch.setLevel(log_level) + + # create formatter + formatter = logging.Formatter( + '%(asctime)s - %(name)s - %(levelname)s - %(message)s') + + # add formatter to ch + ch.setFormatter(formatter) + + # add ch to logger + logger.addHandler(ch) + + return logger + +def _get_module_logger(): + logger = logging.getLogger(MODULE_LOGGER_NAME) + return logger + + +def _read_registered_machines_ini(): + """ Read the ini file REGISTERED_MACHINES and returns a ConfigParser object.""" + import configparser + + if not os.path.exists(REGISTER_MACHINES): + raise _RegisteredMachineFileError(REGISTER_MACHINES) + + with open(REGISTER_MACHINES,"r") as in_file: + ini_parser = configparser.ConfigParser() + ini_parser.read_file(in_file,REGISTER_MACHINES) + + return ini_parser + +class _Error(Exception): + """Base class for exceptions in this module""" + pass + +class _NotFoundRegisteredMachineNameError(_Error): + """Raised when the hostname does not match any regular expressions the registered machine INI file. + + Parameters + ---------- + regex : str + A string containing the Python regular expression. + """ + + def __init__(self,regex): + self._regex=regex + self._errormessage = ("No cluster in registered machines INI file " + "found that matches the python regular expression {}".format(self._regex)) + + @property + def error_message(self): + """Returns the error message. + + Returns + ------- + str + The error message string. + + """ + return self._errormessage + +class _RegisteredMachineFileError(_Error): + """Raised when the registered machine file is not found.""" + def __init__(self,filepath): + """ The class constructor + + Parameters + ---------- + filepath : str + The path to the registered machine ini file. + + """ + + self._filepath = filepath + self._errormessage = ("The INI file that contains " + "the machines registrations is not found: {}".format(self._filepath)) + + @property + def error_message(self): + """Returns a string containing the error message. + + Returns + ------- + str + The error message string. + + """ + return self._errormessage + +def get_registered_unique_name_based_on_hostname(): + """Returns the registered unique machine name for the machine on which this program is launched. + + Returns + ------- + str + The regestered unique machine name as regestered in file regestered_machine.ini. + + """ + import socket + import re + + # Initialize this machine registered unique machine name to temporary value. + # We will get the actual registered machine name for this computer. + unique_machine_name = None + + # Get this machine hostname as returned by getfqdn. + machine_gethostname = socket.getfqdn() + + # Read the INI file that stores registered machines names. + rm_ini = _read_registered_machines_ini() + + for cluster in rm_ini.sections(): + # For this cluster get the python regular expression for matching against. + regex_pattern=rm_ini[cluster]["python regular expression"] + re_compiled = re.compile(regex_pattern) + if re_compiled.search(machine_gethostname) : + # We have found a match, therefore get the + # registered machine name of this cluster + unique_machine_name = rm_ini[cluster]["unique machine name"] + unique_machine_name = unique_machine_name.strip() + break + if unique_machine_name == None: + raise _NotFoundRegisteredMachineNameError(regex_pattern) + + return unique_machine_name + +def parse_arguments(argv): + + # Create a string of the description of the + # program + program_description = "Returns a unique name for the machine that this program is launched on." + + # Create an argument parser. + my_parser = argparse.ArgumentParser( + description=program_description, + formatter_class=argparse.RawTextHelpFormatter, + add_help=True) + + # Add an optional argument for the logging level. + my_parser.add_argument("--log-level", + type=int, + default=logging.WARNING, + help=_create_logger_description() ) + + my_args = my_parser.parse_args(argv) + + return my_args + +def main(): + + argv = sys.argv[1:] + args = parse_arguments(argv) + + logger = _create_module_logger(log_id=MODULE_LOGGER_NAME, + log_level=args.log_level) + + logger.info("Start of main program") + + try: + registered_unique_machine_name = get_registered_unique_name_based_on_hostname() + except (_NotFoundRegisteredMachineNameError) as err: + logger.error("Error in getting machine unique registered name.") + print(err.error_message) + sys.exit() + + logger.info("Unique Machine Name: {}".format(registered_unique_machine_name)) + logger.info("End of main program") + +if __name__ == "__main__" : + + main() diff --git a/harness/libraries/input_files.py b/harness/libraries/input_files.py index 4fc14ae..043eab5 100644 --- a/harness/libraries/input_files.py +++ b/harness/libraries/input_files.py @@ -1,8 +1,15 @@ #! /usr/bin/env python3 + +# Python package imports import string import os import configparser +# My harness package imports +from runtests import USE_HARNESS_TASKS_IN_RGT_INPUT_FILE +from runtests import get_main_logger +from libraries import rgt_utilities + # # Author: Arnold Tharrington (arnoldt@ornl.gov) # National Center for Computational Sciences, Scientific Computing Group. @@ -17,27 +24,23 @@ class rgt_input_file: comment_line_entry = "#" harness_task_entry = "harness_task" - def __init__(self,inputfilename="rgt.input", configfilename="master.ini", runmodecmd=None): + def __init__(self, + inputfilename="rgt.input", + runmodecmd=None): self.__tests = [] self.__harness_task = [] self.__path_to_tests = "" self.__inputFileName = inputfilename - self.__configFileName = configfilename - - # Read the master config file - self.__read_config() # Read the input file. self.__read_file() # If a CLI task was input use that instead - if runmodecmd != None: + if USE_HARNESS_TASKS_IN_RGT_INPUT_FILE not in runmodecmd : print("Overriding tasks in inputfile since CLI mode was provided") print("runmodecmd = ", runmodecmd) - modetasklist = runmodecmd.split(",") - print("modetasklist = ", modetasklist) self.__harness_task = [] - for modetask in modetasklist: + for modetask in runmodecmd: if modetask == "checkout": runmodetask = ["check_out_tests",None,None] elif modetask == "start": @@ -61,39 +64,6 @@ def __init__(self,inputfilename="rgt.input", configfilename="master.ini", runmod if self.__harness_task == []: print("ERROR: No valid tasks found in the inputfile or the CLI") - def __read_config(self): - - if os.path.basename(self.__configFileName) == self.__configFileName: - # Search CWD, then OLCF_HARNESS_DIR/configs - if ( os.path.isfile(os.path.join("./", self.__configFileName)) ): - configfileused \ - = os.path.abspath(os.path.join("./", self.__configFileName)) - else: - configfileused \ - = os.path.join(os.environ["OLCF_HARNESS_DIR"], \ - "configs/", self.__configFileName) - else: - configfileused = self.__configFileName - - if os.path.isfile(configfileused): - print("reading master config") - master_cfg = configparser.ConfigParser() - master_cfg.read(configfileused) - - machine_vars = master_cfg['MachineDetails'] - repo_vars = master_cfg['RepoDetails'] - testshot_vars = master_cfg['TestshotDefaults'] - - self.set_rgt_env_vars(machine_vars) - self.set_rgt_env_vars(repo_vars) - self.set_rgt_env_vars(testshot_vars) - - #print(os.environ.get("RGT_MACHINE_NAME")) - #print(os.environ.get("RGT_ACCT_ID")) - else: - raise NameError("Cannot find config file: %s" % self.__configFileName) - - def __read_file(self): ifile_obj = open(self.__inputFileName,"r") lines = ifile_obj.readlines() @@ -155,16 +125,6 @@ def __is_comment_line(self,word): else: return False - def set_rgt_env_vars(self,env_vars): - for k in env_vars: - envk = "RGT_" + str.upper(k) - v = env_vars[k] - - if envk in os.environ: - print(envk + " is already set. Skipping.") - else: - os.environ[envk] = v - def get_harness_tasks(self): return self.__harness_task @@ -173,3 +133,5 @@ def get_tests(self): def get_path_to_tests(self): return self.__path_to_tests + + diff --git a/harness/libraries/job_info.py b/harness/libraries/job_info.py index 1f4c336..8323c81 100644 --- a/harness/libraries/job_info.py +++ b/harness/libraries/job_info.py @@ -143,12 +143,12 @@ def modify_job_status(self,jid=None,jstatus=base_rgt_job_status.inconclusive_job line1 = string.rstrip(line) words1 = string.split(line1) jid1 = words1[1] - print "jid : ",jid - print "jid1: ",jid1 + print ("jid : ",jid) + print ("jid1: ",jid1) if jid == jid1: - print "Modifying lines" + print ("Modifying lines") lines[ip-1] = "%(line1)s %(pass_fail)20s\n" % {"line1":line1, "pass_fail":jstatus} - print lines[ip-1] + print (lines[ip-1]) fname_obj = open(fname,"w") fname_obj.writelines(lines) diff --git a/harness/libraries/layout_of_apps_directory.py b/harness/libraries/layout_of_apps_directory.py index c6db044..3aa75d9 100644 --- a/harness/libraries/layout_of_apps_directory.py +++ b/harness/libraries/layout_of_apps_directory.py @@ -1,18 +1,29 @@ #!/usr/bin/env python3 +"""Encapsulates the app/test dirctory layout + +This contains classes that encapsulate the directorty structure of an +app/test, and provides module level methods for accessing the paths +to files and directories of the app/test layout. +""" + +# Python package imports import copy import os import sys from pathlib import Path from string import Template -from libraries.repositories.repository_factory import RepositoryFactory -from libraries.rgt_utilities import try_symlink - +# NCCS Test Harness Package Imports +from libraries.repositories.repository_factory import RepositoryFactory +from libraries.rgt_utilities import try_symlink, unique_harness_id -class apptest_layout(object): +class apptest_layout: + """ + Stores information on the directory structure of the checked out harness applications and tests. + """ - # define specific file names + # Define specific file names app_info_filename = 'application_info.txt' test_info_filename = 'test_info.txt' test_input_txt_filename = 'rgt_test_input.txt' @@ -23,8 +34,17 @@ class apptest_layout(object): test_summary_filename = 'rgt_summary.txt' job_status_filename = 'job_status.txt' job_id_filename = 'job_id.txt' + app_logger_filename = 'application_logfile.txt' + status_logger_filename = 'status_logfile.txt' + """ + str: The filename generated by the harness in the *status* mode. - # define specific directory names + Each app/test will generate this status filei, which is in turn read by other + programs to calculate various harness properties, e.g. pass/fail precentage. + """ + + + # Define specific directory names app_source_dirname = 'Source' test_build_dirname = 'build_directory' test_correct_results_dirname = 'Correct_Results' @@ -33,10 +53,12 @@ class apptest_layout(object): test_scripts_dirname = 'Scripts' test_status_dirname = 'Status' test_performance_dirname = 'Performance' + test_logfile_dirname = 'LogFiles' - # define special file suffixes + # Define special file suffixes suffix_for_ignored_tests = '.ignore_test' suffix_for_ignored_apps = '.ignore_app' + suffix_for_stoptest_run_archive= ".action_stop_test" directory_structure_template = { 'app' : os.path.join("${pdir}", "${app}"), @@ -55,7 +77,10 @@ class apptest_layout(object): 'status_dir' : os.path.join("${pdir}", "${app}", "${test}", test_status_dirname, "${id}"), 'job_id_file' : os.path.join("${pdir}", "${app}", "${test}", test_status_dirname, "${id}", job_id_filename), 'job_status_file' : os.path.join("${pdir}", "${app}", "${test}", test_status_dirname, "${id}", job_status_filename), - 'status_file' : os.path.join("${pdir}", "${app}", "${test}", test_status_dirname, test_status_filename) + 'status_file' : os.path.join("${pdir}", "${app}", "${test}", test_status_dirname, test_status_filename), + 'logfile_dir' : os.path.join("${pdir}", "${app}", "${test}", test_run_archive_dirname, "${id}",test_logfile_dirname), + 'logfile' : os.path.join("${pdir}", "${app}", "${test}", test_run_archive_dirname, "${id}",test_logfile_dirname,app_logger_filename), + 'status_logfile' : os.path.join("${pdir}", "${app}", "${test}", test_run_archive_dirname, "${id}",test_logfile_dirname,status_logger_filename) } # @@ -69,9 +94,12 @@ def __init__(self, self.__applications_root = applications_rootdir self.__appname = name_of_application self.__testname = name_of_subtest - self.__testid = harness_id self.__workspace = None + if harness_id == None: + harness_id = unique_harness_id() + self.__testid = harness_id + # Set the application and test layout self.__apptest_layout = copy.deepcopy(apptest_layout.directory_structure_template) self.__setApplicationTestLayout() @@ -80,6 +108,22 @@ def __init__(self, def get_harness_id(self): return self.__testid + @property + def path_of_test_input_file(self): + """Returns the path to the subtest INI input file. """ + return self.__apptest_layout['test_input_ini'] + + + @property + def path_to_logfile(self) : + """Returns the path to the subtest logfile.""" + return self.__apptest_layout['logfile'] + + @property + def path_to_status_logfile(self) : + """Returns the path to the subtest status logfile.""" + return self.__apptest_layout['status_logfile'] + # # Debug function. # @@ -186,14 +230,14 @@ def create_test_runarchive(self): if not os.path.exists(rpath): os.makedirs(rpath) - # - # Create convenience link to latest Run_Archive dir - # - apptest_dir = self.get_path_to_test() - latest_lnk = os.path.join(apptest_dir, apptest_layout.test_run_archive_dirname, 'latest') - if os.path.exists(latest_lnk): - os.unlink(latest_lnk) - try_symlink(rpath, latest_lnk) + # + # Create convenience link to latest Run_Archive dir + # + apptest_dir = self.get_path_to_test() + latest_lnk = os.path.join(apptest_dir, apptest_layout.test_run_archive_dirname, 'latest') + if os.path.exists(latest_lnk): + os.unlink(latest_lnk) + try_symlink(rpath, latest_lnk) return rpath @@ -270,6 +314,10 @@ def get_path_to_start_binary_time(self,uniqueid): return path + @property + def logfile_directory_path(self): + return self.__apptest_layout['logfile_dir'] + def get_path_to_end_binary_time(self,uniqueid): path = None tmppath = os.path.join(self.__apptest_layout['status_dir'], @@ -297,10 +345,10 @@ def __setApplicationTestLayout(self): def get_layout_from_scriptdir(scripts_path): - """ + """Returns layout of scripts directory when called from the scripts directory. Convert given scripts directory path into apps_root, app name, and test name, after checking that it is actually a scripts directory - ///Scripts + ///Scripts """ apps_root = None app = None @@ -322,7 +370,7 @@ def get_layout_from_runarchivedir(archive_path): """ Convert given scripts directory path into apps_root, app name, and test name, after checking that it is actually a scripts directory - ///Run_Archive/ + ///Run_Archive/ """ apps_root = None app = None @@ -342,4 +390,95 @@ def get_layout_from_runarchivedir(archive_path): return apps_root, app, test, testid +def get_path_to_logfile_from_runarchivedir(archive_path): + """ Returns the path to the subtest logfile when given the fully qualified run archive path. + + The path to the run archive directory must exist, or the exception FileNotFoundError will be raised. + If the path to the run archive directory is illformed, then we exit with failure. + + Parameters + ---------- + archive_path : str + The fully qualified path to a subtest archive directory. + + Returns + ------- + str + The fully qualified path to the subtest logfile. + """ + # Get the parts of the path to the Run Archive directory. + radir = Path(archive_path).resolve(strict=True) + num_parent_dirs = len(radir.parents) + if num_parent_dirs < 4: + message = f'The run archive directory {radir} is ill-formed. It has less than 4 parent directories.' + sys.exit(message) + + testid = radir.name + test = radir.parents[1].name + app = radir.parents[2].name + apps_root = str(radir.parents[3]) + + # We now form the path to the subtest logfile. We use the apptest_layout.directory_structure_template['logfile'] + # dictionary entry with string substitution to form the path, + key = 'logfile' + + logfile_val = copy.deepcopy(apptest_layout.directory_structure_template[key]) + + subs = dict(pdir=apps_root, + app=app, + test=test, + id=testid) + + logfile_template = Template(logfile_val) + + path_to_logfile = logfile_template.substitute(subs) + + return path_to_logfile + +def get_path_to_logfile_from_scriptdir(scriptdir_path,unique_id): + """ Returns the path to the subtest logfile when given the fully qualified Scripts directory path. + + The path to the Scripts directory must exist, or the exception FileNotFoundError will be raised. + If the path to the Scripts directory is illformed, then we exit with failure. + + Parameters + ---------- + scriptdir_path : str + The fully qualified path to a Script directory. + + unique_id : str + The unique id for the subtest iteration. + + Returns + ------- + str + The fully qualified path to the subtest logfile. + """ + + # Get the parts of the path to the Scripts directory. + scriptdir = Path(scriptdir_path).resolve(strict=True) + num_parent_dirs = len(scriptdir.parents) + if num_parent_dirs < 3: + message = f'The scripts directory {scriptdir} is ill-formed. It has less than 3 parent directories.' + sys.exit(message) + + test = scriptdir.parents[0].name + app = scriptdir.parents[1].name + apps_root = str(scriptdir.parents[2]) + + # We now form the path to the subtest logfile. We use the apptest_layout.directory_structure_template['logfile'] + # dictionary entry with string substitution to form the path, + key = 'logfile' + + logfile_val = copy.deepcopy(apptest_layout.directory_structure_template[key]) + + subs = dict(pdir=apps_root, + app=app, + test=test, + id=unique_id) + + logfile_template = Template(logfile_val) + + path_to_logfile = logfile_template.substitute(subs) + return path_to_logfile diff --git a/harness/libraries/regression_test.py b/harness/libraries/regression_test.py index 11cbe16..f723d57 100644 --- a/harness/libraries/regression_test.py +++ b/harness/libraries/regression_test.py @@ -1,15 +1,18 @@ #! /usr/bin/env python3 -import time -import datetime +# Python imports + import collections -import queue import concurrent.futures -import logging +import os +import time +# Harness package imports. from libraries import apptest +from libraries.subtest_factory import SubtestFactory from fundamental_types.rgt_state import RgtState -from libraries.rgt_logging import rgt_logger +from libraries.rgt_loggers import rgt_logger_factory +from machine_types.machine_factory import MachineFactory # # Author: Arnold Tharrington (arnoldt@ornl.gov) @@ -27,35 +30,71 @@ class Harness: summarize_results = "summarize_results" # Defines the harness log file name. - LOG_FILE_NAME = "harness_log_file" + LOGGER_NAME = __name__ + + #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + # @ + # Special methods @ + # @ + #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ def __init__(self, + config, rgt_input_file, log_level, - stdout_stderr): + stdout_stderr, + use_fireworks): + self.__config = config self.__tests = rgt_input_file.get_tests() self.__tasks = rgt_input_file.get_harness_tasks() self.__local_path_to_tests = rgt_input_file.get_path_to_tests() self.__apptests_dict = collections.OrderedDict() + self.__app_subtests = [] self.__log_level = log_level self.__myLogger = None self.__stdout_stderr = stdout_stderr self.__num_workers = 1 - + self.__use_fireworks = use_fireworks self.__formAppTests() + currenttime = time.localtime() + time_stamp = time.strftime("%Y%m%d_%H%M%S",currenttime) + self.__timestamp = time_stamp + # Define a logger that streams to file. + logger_name=Harness.LOGGER_NAME + fh_filepath="./harness_log_files" + "." + self.__timestamp + "/" + Harness.LOGGER_NAME + "." + self.__timestamp + ".txt" + logger_threshold = self.__log_level + fh_threshold_log_level = "INFO" + ch_threshold_log_level = "CRITICAL" + self.__myLogger = rgt_logger_factory.create_rgt_logger( + logger_name=logger_name, + fh_filepath=fh_filepath, + logger_threshold_log_level=logger_threshold, + fh_threshold_log_level=fh_threshold_log_level, + ch_threshold_log_level=ch_threshold_log_level) + def __str__(self): + message = ( "\n Local path to tests: " + self.__local_path_to_tests + "\n" + "Tests: " + str(self.__tests) + "\n" + "Tasks: " + str(self.__tasks) + "\n") + return message + + #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + # @ + # End of special methods @ + # @ + #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + + + #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + # @ + # Public methods. @ + # @ + #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ def run_me(self, my_effective_command_line=None, my_warning_messages=None): - # Define a logger that streams to file. - currenttime = time.localtime() - time_stamp = time.strftime("%Y%m%d_%H%M%S",currenttime) - self.__myLogger = rgt_logger(Harness.LOG_FILE_NAME, - self.__log_level, - time_stamp) - # Log the start of the harness. message = "Start of harness." self.__myLogger.doInfoLogging(message) @@ -71,57 +110,84 @@ def run_me(self, # Mark status as tasks not completed. self.__returnState = RgtState.ALL_TASKS_NOT_COMPLETED - app_subtests = collections.OrderedDict() + # Form a collection of applications with their subtests. + self.__app_subtests = self.__formCollectionOfTests() + + # Run subtests + if self.__use_fireworks: + self.__run_fireworks() + else: + self.__run_subtests_asynchronously() + + # If we get to this point mark all task as completed. + self.__returnState = RgtState.ALL_TASKS_COMPLETED + + message = "End of harness." + self.__myLogger.doInfoLogging(message) + return + + def getState(self): + return self.__returnState + + def wait_for_completion_in_queue(self,timeout): + """Waits 'timeout' minutes for all jobs to be completed in the queue. + + Parameters + ---------- + timeout : float + The maximum time to wait in minutes for the subtest cycle to complete. + """ + future_to_appname = {} with concurrent.futures.ThreadPoolExecutor(max_workers=self.__num_workers) as executor: - for (appname, tests) in self.__apptests_dict.items(): - if appname not in app_subtests: - app_subtests[appname] = [] - for testname in tests: - subtest = apptest.subtest(name_of_application=appname, - name_of_subtest=testname, - local_path_to_tests=self.__local_path_to_tests, - application_log_level=self.__log_level, - timestamp=time_stamp) - app_subtests[appname].append(subtest) - - future_to_appname = {} - for appname in app_subtests.keys(): - future = executor.submit(apptest.do_application_tasks, - app_subtests[appname], - self.__tasks, - self.__stdout_stderr) - future_to_appname[future] = appname + for appname in self.__app_subtests.keys(): + future = executor.submit(apptest.wait_for_jobs_to_complete_in_queue, + self.__config, + self.__app_subtests[appname], + timeout) - # Log that the application has been submitted for tasks. - message = "Application " + appname + " has been submitted for running tasks." - self.__myLogger.doInfoLogging(message) + future_to_appname[future] = appname for my_future in concurrent.futures.as_completed(future_to_appname): appname = future_to_appname[my_future] - - # Check if an exception has been raised my_future_exception = my_future.exception() if my_future_exception: - message = "Application {} future exception:\n{}".format(appname, my_future_exception) - self.__myLogger.doInfoLogging(message) + message = "Application {} future for queue exception:\n{}".format(appname, my_future_exception) + self.__myLogger.doCriticalLogging(message) else: - message = "Application {} future is completed.".format(appname) + message = "Application {} future for queue is completed.".format(appname) self.__myLogger.doInfoLogging(message) + return - message = "All applications completed. Yahoo!!" - self.__myLogger.doInfoLogging(message) + def didAllTestsPass(self): + """Returns True if all tests have passed, otherwise False is returned. - # If we get to this point mark all task as completed. - self.__returnState = RgtState.ALL_TASKS_COMPLETED + Returns + ------- + bool + A True return value means all tests have passed, otherwise a False value + is returned. + """ + ret_value = True + for appname in self.__app_subtests.keys(): + for stests in self.__app_subtests[appname]: + tmp_ret_value = stests.did_all_tests_pass(self.__config) + ret_value = ret_value and tmp_ret_value - message = "End of harness." - self.__myLogger.doInfoLogging(message) - return + return ret_value - def getState(self): - return self.__returnState + #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + # @ + # End of public methods. @ + # @ + #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + + + #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + # @ + # Private methods. @ + # @ + #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ - # Private member functions def __formAppTests(self): """ Sets up __apptests_dict. Keys are application name, values are list of test names. """ application_names = set() @@ -132,3 +198,157 @@ def __formAppTests(self): application_names.add(appname) self.__apptests_dict[appname] = [] self.__apptests_dict[appname].append(testname) + + def __doing_unit_testing(self): + value = False + if os.getenv('UNIT_TESTS_CWD'): + value = True + message = f"UNIT_TESTS_CWD: {value}" + self.__myLogger.doInfoLogging(message) + return value + + def __formCollectionOfTests(self): + app_subtests = collections.OrderedDict() + for (appname, tests) in self.__apptests_dict.items(): + if appname not in app_subtests: + app_subtests[appname] = [] + for testname in tests: + + logger_name = appname + "." + testname + "." + self.__timestamp + fh_filepath = "harness_log_files" + "." + self.__timestamp + "/" + appname + "/" + appname + "__" + testname + ".logfile.txt" + logger_threshold = self.__log_level + fh_threshold_log_level = "INFO" + ch_threshold_log_level = "CRITICAL" + a_logger = rgt_logger_factory.create_rgt_logger(logger_name=logger_name, + fh_filepath=fh_filepath, + logger_threshold_log_level=logger_threshold, + fh_threshold_log_level=fh_threshold_log_level, + ch_threshold_log_level=ch_threshold_log_level) + + subtest = SubtestFactory.make_subtest(name_of_application=appname, + name_of_subtest=testname, + local_path_to_tests=self.__local_path_to_tests, + logger = a_logger, + tag=self.__timestamp) + + app_subtests[appname].append(subtest) + + return app_subtests + + def __run_subtests_asynchronously(self): + future_to_appname = {} + + # Submit futures by means of thread pool. + with concurrent.futures.ThreadPoolExecutor(max_workers=self.__num_workers) as executor: + for appname in self.__app_subtests.keys(): + future = executor.submit(apptest.do_application_tasks, + self.__app_subtests[appname], + self.__tasks, + self.__stdout_stderr) + future_to_appname[future] = appname + + # Log when all job tasks are initiated. + for my_future in concurrent.futures.as_completed(future_to_appname): + appname = future_to_appname[my_future] + + # Check if an exception has been raised + my_future_exception = my_future.exception() + if my_future_exception: + message = "Application {} future exception:\n{}".format(appname, my_future_exception) + self.__myLogger.doCriticalLogging(message) + else: + message = "Application {} future is completed.".format(appname) + self.__myLogger.doInfoLogging(message) + + message = "All applications completed futures. Yahoo!!" + self.__myLogger.doInfoLogging(message) + + return + + def __run_fireworks(self): + from fireworks import Firework, Workflow, LaunchPad, ScriptTask + + # set up the LaunchPad + launchpad = LaunchPad() + + cfg_file = self.__config.get_config_file() + + for (appname, tests) in self.__app_subtests.items(): + message = "Application " + appname + " has been submitted for running tasks." + self.__myLogger.doInfoLogging(message) + + for subtest in tests: + + uid = subtest.get_harness_id() + testname = subtest.getNameOfSubtest() + task_suffix = f'{appname}.{testname}_@_{uid}' + #print(f'Using task suffix: {task_suffix}') + + # create machine and run status files/directories for current subtest + # (NOTE: working dir must be scripts_dir) + scripts_dir = subtest.get_path_to_scripts() + current_dir = os.getcwd() + os.chdir(scripts_dir) + subtest.create_test_status() + ra_dir = subtest.create_test_runarchive() + machine = MachineFactory.create_machine(self.__config, subtest) + machine_name = machine.get_machine_name() + os.chdir(current_dir) + + # create build FireWork + taskname = f'OTH-BLD.{machine_name}.{task_suffix}' + driver_cmd = f'test_harness_driver.py -C {cfg_file} --build --scriptsdir {scripts_dir} --uniqueid {uid}' + script_cmd = f'echo "Running: {driver_cmd}"; {driver_cmd} &> fwbuild.log' + build_task = ScriptTask(script=script_cmd, + store_stdout=True, store_stderr=True) + category = f'{machine_name}-build' + fw1 = Firework(build_task, fw_id=1, name=taskname, + spec={'_category':category, '_launch_dir':ra_dir}) + + # create batch run FireWork + taskname = f'OTH-RUN.{machine_name}.{task_suffix}' + driver_cmd = f'test_harness_driver.py -C {cfg_file} --run --scriptsdir {scripts_dir} --uniqueid {uid}' + script_cmd = f'echo "Running: {driver_cmd}"; {driver_cmd} &> fwrun.log' + run_task = ScriptTask(script=script_cmd, + store_stdout=True, store_stderr=True) + rgt_test = machine.test_config + replacements = rgt_test.get_test_replacements() + job_overrides = { + 'job_name' : replacements['__job_name__'], + 'walltime' : replacements['__walltime__'], + 'nodes' : replacements['__nodes__'] + } + if '__batch_queue__' in replacements.keys(): + job_overrides['queue'] = replacements['__batch_queue__'] + if '__project_id__' in replacements.keys(): + job_overrides['account'] = replacements['__project_id__'] + + category = f'{machine_name}-run' + fw2 = Firework(run_task, fw_id=2, name=taskname, + spec={'_category':category, '_launch_dir':ra_dir, '_queueadapter':job_overrides}) + + # create check FireWork + taskname = f'OTH-CHK.{machine_name}.{task_suffix}' + driver_cmd = f'test_harness_driver.py -C {cfg_file} --check --scriptsdir {scripts_dir} --uniqueid {uid}' + script_cmd = f'echo "Running: {driver_cmd}"; {driver_cmd} &> fwcheck.log' + check_task = ScriptTask(script=script_cmd, + store_stdout=True, store_stderr=True) + category = f'{machine_name}-check' + fw3 = Firework(check_task, fw_id=3, name=taskname, + spec={'_category':category, '_launch_dir':ra_dir}) + + # make workflow and add it to the LaunchPad + wfname = f'OTH-WF.{machine_name}.{task_suffix}' + workflow = Workflow([fw1, fw2, fw3], {1: [2], 2: [3]}, name=wfname) + launchpad.add_wf(workflow) + + message = "Added workflow " + wfname + "\n========\n" + message += str(workflow.to_display_dict()) + self.__myLogger.doInfoLogging(message) + + #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + # @ + # End of private methods. @ + # @ + #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + diff --git a/harness/libraries/rgt_loggers/__init__.py b/harness/libraries/rgt_loggers/__init__.py new file mode 100644 index 0000000..01f05d1 --- /dev/null +++ b/harness/libraries/rgt_loggers/__init__.py @@ -0,0 +1 @@ +__all__ = [ 'rgt_logging' ] diff --git a/harness/libraries/rgt_loggers/rgt_logger_factory.py b/harness/libraries/rgt_loggers/rgt_logger_factory.py new file mode 100644 index 0000000..d14f5f6 --- /dev/null +++ b/harness/libraries/rgt_loggers/rgt_logger_factory.py @@ -0,0 +1,44 @@ +#! /usr/bin/env python3 +""" +This module implements the factory class for creating rgt_loggers. +""" + + +from .rgt_logging import rgt_logger + +def create_rgt_logger(logger_name=None, + fh_filepath=None, + logger_threshold_log_level=None, + fh_threshold_log_level=None, + ch_threshold_log_level=None): + """Creates and returns an instance of rgt_logger. + + Parameters + ---------- + logger_name : str + The name of the logger. + + fh_filepath : str + The fully qualified file path for the logger file handler. + + logger_threshold_log_level : str + The logging threshold level. + + fh_threshold_log_level : str + The file handler logging threshold log level. + + ch_threshold_log_level : str + The console file handler threshold log level. + + Returns + ------- + rgt_logger + An instance of the rgt_logger. + """ + + a_rgt_logger = rgt_logger(logger_name=logger_name, + fh_filepath=fh_filepath, + logger_threshold_log_level=logger_threshold_log_level, + fh_threshold_log_level=fh_threshold_log_level, + ch_threshold_log_level=ch_threshold_log_level) + return a_rgt_logger diff --git a/harness/libraries/rgt_loggers/rgt_logging.py b/harness/libraries/rgt_loggers/rgt_logging.py new file mode 100644 index 0000000..d67357e --- /dev/null +++ b/harness/libraries/rgt_loggers/rgt_logging.py @@ -0,0 +1,111 @@ +#! /usr/bin/env python3 + +""" +This module implements the logging capability of the harness. + +""" + +import logging +import time +import os + +class rgt_logger: + + def __init__(self, + logger_name, + fh_filepath=None, + logger_threshold_log_level="CRITICAL", + fh_threshold_log_level="CRITICAL", + ch_threshold_log_level="CRITICAL"): + """ + Parameters + ---------- + logger_name : str + The name of the logger. + + logger_threshold_log_level : str + The threshold level for the logger object. + + fh_filepath : str + The filepath for the logger file handler. + + fh_threshold_log_level : str + The lower bound threshold to use the file handler. + + ch_threshold_log_level : str + The lower bound threshold to use the console handler + + """ + + # Get the numeric threshhold level for logging + # messages. + self.__logger_numeric_threshold_level = getattr(logging, logger_threshold_log_level.upper(), None) + self.__fh_numeric_threshold_level = getattr(logging, fh_threshold_log_level.upper(), None) + self.__ch_numeric_threshold_level = getattr(logging, ch_threshold_log_level.upper(), None) + self.__filepath = fh_filepath + + # We now create the parent directories for the file handler logger. + dirname = os.path.dirname(fh_filepath) + os.makedirs(dirname,exist_ok=True) + + # Instantiate the logger. + self.__myLogger = logging.getLogger(logger_name) + self.__myLogger.setLevel(self.__logger_numeric_threshold_level) + + # Add the file handler. + self._add_file_handler() + + # Add the console handler. + self._add_console_handler() + + return + + def doDebugLogging(self, + message): + self.__myLogger.debug(message) + return + + def doInfoLogging(self, + message): + self.__myLogger.info(message) + return + + def doWarningLogging(self, + message): + self.__myLogger.warning(message) + return + + def doErrorLogging(self, + message): + self.__myLogger.error(message) + return + + def doCriticalLogging(self, + message): + self.__myLogger.critical(message) + + # Private methods + def _add_file_handler(self): + # Define a file handler and set to fh threshold level. + fh = logging.FileHandler(self.__filepath) + fh.setLevel(self.__fh_numeric_threshold_level) + + # Define the formatter for the file handler. + my_format_string = "-----\n" + my_format_string += "Time: %(asctime)s\n" + my_format_string += "Logger: %(name)s\n" + my_format_string += "Loglevel: %(levelname)s\n" + my_format_string += "Message:\n" + my_format_string += "%(message)s\n" + my_format_string += "-----\n" + formatter = logging.Formatter(my_format_string) + fh.setFormatter(formatter) + + # Add file handler to logger. + self.__myLogger.addHandler(fh) + + def _add_console_handler(self): + ch = logging.StreamHandler() + ch.setLevel(self.__ch_numeric_threshold_level) + self.__myLogger.addHandler(ch) + return diff --git a/harness/libraries/rgt_logging.py b/harness/libraries/rgt_logging.py deleted file mode 100644 index 916d159..0000000 --- a/harness/libraries/rgt_logging.py +++ /dev/null @@ -1,58 +0,0 @@ -#! /usr/bin/env python3 - -""" -.. module:: rgt_logging - :synopsis: This module implements the logging capability of the harness - -.. moduleauthor:: Arnold Tharrington -""" - -import logging -import time - -class rgt_logger: - - def __init__(self, - log_name, - log_level, - time_stamp=None): - # Get the numeric threshhold level for logging - # messages. - numeric_threshold_level = getattr(logging, log_level.upper(), None) - - # Instantiate a logger that has name log_name. - self.__myLogger = logging.getLogger(log_name) - - # Define the file name of the logger. - currenttime = time.localtime() - if time_stamp: - timestamp = time_stamp - else: - timestamp = time.strftime("%Y%b%d_%H:%M:%S",currenttime) - - self.__fileName = log_name + ".logger." + timestamp + ".txt" - - # Define the handler and set to DEBUG threshold level. - ch = logging.FileHandler(self.__fileName) - ch.setLevel(logging.DEBUG) - - # Define the formatter - my_format_string = "-----\n" - my_format_string += "Time: %(asctime)s\n" - my_format_string += "Logger: %(name)s\n" - my_format_string += "Loglevel: %(levelname)s\n" - my_format_string += "Message:\n" - my_format_string += "%(message)s\n" - my_format_string += "-----\n" - formatter = logging.Formatter(my_format_string) - ch.setFormatter(formatter) - - self.__myLogger.setLevel(numeric_threshold_level) - self.__myLogger.addHandler(ch) - - return - - def doInfoLogging(self, - message): - self.__myLogger.info(message) - return diff --git a/harness/libraries/rgt_utilities.py b/harness/libraries/rgt_utilities.py index d1bf407..a254000 100644 --- a/harness/libraries/rgt_utilities.py +++ b/harness/libraries/rgt_utilities.py @@ -64,3 +64,32 @@ def try_symlink(target, link_name): raise except: print(f'HARNESS ERROR: Failed to create symlink {link_name} to {target}.') + +######################################################################## +# Set environment variables that control harness behavior. +######################################################################## +def set_harness_environment(env_vars, override=False): + for k in env_vars: + envk = rgt_variable_name_modification(k) + v = env_vars[k] + if envk in os.environ and not override: + print(envk + " is already set. Skipping.") + else: + os.environ[envk] = v + +def rgt_variable_name_modification(variable_name): + """Transforms the variable name. + + Parameters + ---------- + variable_name : str + The name of the variable to be transformed. + + Returns + ------- + str: + The transformed variable name. + """ + new_var_name = "RGT_" + str.upper(variable_name) + return new_var_name + diff --git a/harness/libraries/schedulers.py b/harness/libraries/schedulers.py index 98042f6..5a8da6c 100644 --- a/harness/libraries/schedulers.py +++ b/harness/libraries/schedulers.py @@ -89,7 +89,7 @@ def get_batchfilename(self): return self.batchfilename def is_job_still_in_queue(self): - print "Stub to check if generic job ", self.get_jobname()," is in queue." + print ("Stub to check if generic job ", self.get_jobname()," is in queue.") ############################################################################################################### # @@ -114,10 +114,10 @@ def __init__(self,pbsjobname='undefined',pbsjobowner='undefined',pbsjobstate='un self.set_jobstate(pbsjobstate) self.set_jobid(pbsjobid) self.set_batchfilename(pbsfilename) - if pbsprecommand: - self.__submit_command = pbsprecommand + "; qsub -V " - else: - self.__submit_command = "qsub " + if pbsprecommand: + self.__submit_command = pbsprecommand + "; qsub -V " + else: + self.__submit_command = "qsub " def submit_batch_script(self,batch_file_name): commandstring = "qsub " + batch_file_name diff --git a/harness/libraries/status_file.py b/harness/libraries/status_file.py index af29b4a..97baa08 100644 --- a/harness/libraries/status_file.py +++ b/harness/libraries/status_file.py @@ -14,8 +14,8 @@ import re import socket import pprint +import abc -#from libraries import computers_1 from libraries.layout_of_apps_directory import apptest_layout class StatusFile: @@ -43,14 +43,10 @@ class StatusFile: # Name of the input file. FILENAME = apptest_layout.test_status_filename + """str: The filename of the subtest status file.""" COMMENT_LINE_INDICATOR = '#' - -# FAILURE_CODES = {'Pass_Fail': 0, -# 'Hardware_Failure': 1, -# 'Performance_Failure': 2, -# 'Incorrect_Result': 3 -# } + """str: The comment character for the subtest status file.""" #---Event identifiers. @@ -100,14 +96,6 @@ class StatusFile: ['Event_200_check_end.txt', 'check', 'end'] } -# @staticmethod -# def event_name_from_event_filename(event_filename): -# """ -# """ -# assert isinstance(event_filename, str) -# return re.subst(r'^Event_[^_]+_', '', -# re.subst(r'.txt$', '', event_filename)) - #---Field identifiers. FIELDS_PER_TEST_INSTANCE = [ @@ -141,8 +129,86 @@ class StatusFile: NO_VALUE = '[NO_VALUE]' + #----------------------------------------------------- + # - + # Start of section for StatusFiles modes. - + # - + #----------------------------------------------------- MODE_NEW = 'New' + """str: Indicates that we logging a new subtest entry in the status file.""" + MODE_OLD = 'Old' + """str: Indicates that we updating an old subtest entry in the status file.""" + + #----------------------------------------------------- + # - + # End of section for StatusFiles modes. - + # - + #----------------------------------------------------- + + #----------------------------------------------------- + # - + # Standard values for test results of the status file- + # - + #----------------------------------------------------- + PENDING = "-1" + PASS = "0" + FAIL = "1" + PLACE_HOLDER = r"***" + #----------------------------------------------------- + # - + # End of section for standard values of the - + # status file. - + # - + # - + #----------------------------------------------------- + + #----------------------------------------------------- + # - + # Values for the build results in the status file. - + # - + #----------------------------------------------------- + BUILD_RESULTS = {"Pending" : PENDING, + "Pass" : PASS, + "Failure" : FAIL} + #----------------------------------------------------- + # - + # End of section for values for the build results in - + # the status file. - + # - + #----------------------------------------------------- + + #----------------------------------------------------- + # - + # Values for the submit results in the status file. - + # - + #----------------------------------------------------- + SUBMIT_RESULTS = {"Pending" : PENDING, + "Pass" : PASS, + "Failure" : FAIL} + #----------------------------------------------------- + # - + # End of section for values for the submit results - + # in the status file. - + # - + #----------------------------------------------------- + + #----------------------------------------------------- + # - + # Values for the correct results in the status file. - + # - + #----------------------------------------------------- + CORRECT_RESULTS = {"Pending" : PENDING, + "In progress" : 17, + "Pass" : PASS, + "Failure" : FAIL, + "Performance failure" : 5} + #----------------------------------------------------- + # - + # End of section for values for the correct results - + # in the status file. - + # - + #----------------------------------------------------- ################# # Class methods # @@ -163,35 +229,90 @@ def ignore_line(line): return result + @classmethod + def validate_mode(cls,mode): + """Validates that we are using a valid mode.""" + if mode == cls.MODE_NEW : + pass + elif mode == cls.MODE_OLD: + pass + else: + raise + ################### # Special methods # ################### - def __init__(self, test_id, mode): - """Constructor.""" - self.__job_id = '' - self.__status_file_path = '' - self.__test_id = test_id + def __init__(self,logger,path_to_status_file): + """Constructor. - # Make the status file. - self.__status_file_make() + Parameters + ---------- + path_to_status_file : str + The fully qualified path to the subtest status file. - # Add job to status file. - if mode == StatusFile.MODE_NEW: - event_time = self.log_event(StatusFile.EVENT_LOGGING_START) - #currenttime = datetime.datetime.now() - self.__status_file_add_test_instance(event_time) + """ + self.__logger = logger + self.__job_id = None + self.__test_id = None - elif mode == StatusFile.MODE_OLD: - pass + # The first task is set the path to status file. + self.__status_file_path = path_to_status_file + + # The second task is to create the status file. + self.__create_status_file(path_to_status_file) ################### # Public methods # ################### + @property + def status_file_path(self): + return self.__status_file_path + + def initialize_subtest(self,unique_id): + """Initializes a new entry to the status file. + + Parameters + ---------- + unique_id : str + The unique is for the subtest. + """ + + + self.__test_id = unique_id + if self._subtest_already_initialized(unique_id): + pass + else: + event_time = self.log_event(StatusFile.EVENT_LOGGING_START) + self.__status_file_add_test_instance(event_time,unique_id) + return + + def getLastHarnessID(self): + """Returns the last harness ID of the subtest status file. + + If there are no entries, then None is returned. If the + status file doesn\'t exist then an error is raised. + + Returns + ------- + str + The harness id of the latest entry in the subtest status file. + """ + with open(self.__status_file_path, "r") as file_obj: + records = file_obj.readlines() + + line = records[-1] + words = line.rstrip().split() + if len(words) > 2: + subtest_harness_id = words[1] + else: + subtest_harness_id = None + return subtest_harness_id + def log_event(self, event_id, event_value=NO_VALUE): """Log the occurrence of a harness event. - This version logs a predefined event specified in the dictionary. + This version logs a predefined event specified in the EVENT_DICT dictionary. """ if event_id in StatusFile.EVENT_DICT: @@ -224,9 +345,125 @@ def log_custom_event(self, event_type, event_subtype, event_value=NO_VALUE): return self.__log_event(event_id, event_filename, event_type, event_subtype, str(event_value)) + def isTestFinished(self, subtest_harness_id): + """Checks if the subtest of subtest_harness_id has completed. + + Parameters + ---------- + subtest_harness_id : str + The harness id of the subest that we wish to check. + + Returns + ------- + bool + If the subtest with subtest_harness_id is complete, then True is + returned, otherwise False is returned. + """ + # Get the corresponding record from the status file that + # lists the test results for subtest_harness_id. + record = self.__get_harness_id_record(subtest_harness_id) + + if record == None: + test_finished = False + else: + # Strip line/record of all leading and trailing whitespace. + record = record.strip() + + # If words[2], words[3], words[4], or words[5] equals StatusFile.PLACE_HOLDER + # then we are not finished. The test is still in progress. + words = record.split() + tmp_words = words[2:] + if tmp_words.count(self.PLACE_HOLDER) > 1: + test_finished = False + # The last word must indicate that the test is no longer pending and not in progress. + elif (int(words[5]) > int(self.CORRECT_RESULTS["Pending"])) and (int(words[5]) != int(self.CORRECT_RESULTS["In progress"])) : + test_finished = True + else: + test_finished = False + + return test_finished + + def didAllTestsPass(self): + """ Checks if all tests have passed. + + Returns + ------- + bool + A True value is returned when all tests have passed. Explicitly stated, this + means all tests have 0's for build, sumbit, and correct results. Otherwise a + False value is returned. + """ + ret_value = True + + with open(self.__status_file_path, 'r') as status_file_obj: + records = status_file_obj.readlines() + + verify_test_passed = lambda a_list : True if a_list.count(self.PASS) == 3 else False + + for index, line in enumerate(records): + if self.ignore_line(line): + continue + + words = line.rstrip().split() + + tmp_words = words[2:] + + ret_value = ret_value and verify_test_passed(tmp_words) + + return ret_value + ################### # Private methods # ################### + def _subtest_already_initialized(self,unique_id): + found_instance = False + + with open(self.__status_file_path, 'r') as status_file_obj: + records = status_file_obj.readlines() + + for index, line in enumerate(records): + words = line.rstrip().split() + + if self.ignore_line(line): + continue + + if len(words) > 1: + test_id = words[1] + if test_id == unique_id: + found_instance = True + break + + return found_instance + + def __get_harness_id_record(self,harness_id): + record = None + with open(self.__status_file_path, 'r') as status_file_obj: + records = status_file_obj.readlines() + + for index, line in enumerate(records): + if self.ignore_line(line): + continue + words = line.rstrip().split() + if len(words) > 1: + test_id = words[1] + if test_id == harness_id: + record = line + break + return record + + def __get_all_harness_id(self): + with open(self.__status_file_path, 'r') as status_file_obj: + records = status_file_obj.readlines() + + harness_ids = [] + for index, line in enumerate(records): + if self.ignore_line(line): + continue + words = line.rstrip().split() + if len(words) > 1: + harness_ids.append(words[1]) + + return harness_ids def __log_event(self, event_id, event_filename, event_type, event_subtype, event_value): @@ -289,22 +526,11 @@ def __log_event(self, event_id, event_filename, event_type, event_subtype, #---------- - def __status_file_make(self): - """Create the status file for this app/test if doesn't exist.""" - - # Get the head dir in cwd. - cwd = os.getcwd() - dir_head1 = os.path.split(cwd)[0] - - # Form path to rgt status file. - self.__status_file_path = os.path.join(dir_head1, apptest_layout.test_status_dirname, - StatusFile.FILENAME) - - # Create. - if not os.path.lexists(self.__status_file_path): - file_obj = open(self.__status_file_path, "w") - file_obj.write(StatusFile.header) - file_obj.close() + def __create_status_file(self,path_to_status_file): + """Create the status file for this app/test if it doesn't exist.""" + if not os.path.exists(path_to_status_file): + with open(self.__status_file_path, "w") as file_obj : + file_obj.write(StatusFile.header) #---------- @@ -376,14 +602,13 @@ def __status_file_add_result(self, exit_value, mode): #---------- - def __status_file_add_test_instance(self, event_time): + def __status_file_add_test_instance(self, event_time,unique_id): """Start new line in master status file for app/test.""" - file_obj = open(self.__status_file_path, "a") - format_ = StatusFile.__LINE_FORMAT % ( - (event_time, self.__test_id, "***", "***", "***", "***")) - file_obj.write(format_) - file_obj.close() + with open(self.__status_file_path, "a") as file_obj: + format_ = StatusFile.__LINE_FORMAT % ( + (event_time, unique_id, "***", "***", "***", "***")) + file_obj.write(format_) #------------------------------------------------------------------------------ @@ -436,9 +661,12 @@ def get_status_info(test_id, event_type, event_subtype, test_instance_info['test'], test_instance_info['test_id'], apptest_layout.test_run_dirname) - test_instance_info['job_account_id'] = ( - os.environ['RGT_ACCT_ID'] - if 'RGT_ACCT_ID' in os.environ else no_value) + job_account = no_value + if 'RGT_PROJECT_ID' in os.environ: + job_account = os.environ['RGT_PROJECT_ID'] + elif 'RGT_ACCT_ID' in os.environ: + job_account = os.environ['RGT_ACCT_ID'] + test_instance_info['job_account_id'] = job_account test_instance_info['path_to_rgt_package'] = ( os.environ['PATH_TO_RGT_PACKAGE'] @@ -557,88 +785,6 @@ def write_system_log(test_id, status_info): #------------------------------------------------------------------------------ -#class JobExitStatus: -# """Class to tally different kinds of job errors.""" -# -# def __init__(self): -# """Constructor.""" -# self.status = {"Pass_Fail": 0, -# "Hardware_Failure": 0, -# "Performance_Failure": 0, -# "Incorrect_Result": 0} -# -# def change_job_exit_status(self, category="Pass_Fail", -# new_status="FAILURE"): -# """Change the exit status for a specific failure.""" -# -# if category == "Pass_Fail": -# self.add_pass_fail(pf_failure=new_status) -# elif category == "Hardware_Failure": -# self.add_hardware_failure(hw_failure=new_status) -# elif category == "Performance_Failure": -# self.add_performance_failure(pf_failure=new_status) -# elif category == "Incorrect_Result": -# self.add_incorrect_result_failure(ir_failure=new_status) -# else: -# print("Warning! The category " + category + " is not defined.") -# print("The failure will be categorized a general Pass_Fail.") -# self.add_pass_fail(pf_failure=new_status) -# -# def add_pass_fail(self, pf_failure="NO_FAILURE"): -# """ -# """ -# if pf_failure == "FAILURE": -# self.status["Pass_Fail"] = 1 -# elif pf_failure == "NO_FAILURE": -# self.status["Pass_Fail"] = 0 -# -# def add_hardware_failure(self, hw_failure="NO_FAILURE"): -# """ -# """ -# if hw_failure == "FAILURE": -# self.status["Hardware_Failure"] = 1 -# elif hw_failure == "NO_FAILURE": -# self.status["Hardware_Failure"] = 0 -# -# def add_performance_failure(self, pf_failure="NO_FAILURE"): -# """ -# """ -# if pf_failure == "FAILURE": -# self.status["Performance_Failure"] = 1 -# elif pf_failure == "NO_FAILURE": -# self.status["Performance_Failure"] = 0 -# -# def add_incorrect_result_failure(self, ir_failure="NO_FAILURE"): -# """ -# """ -# if ir_failure == "FAILURE": -# self.status["Incorrect_Result"] = 1 -# elif ir_failure == "NO_FAILURE": -# self.status["Incorrect_Result"] = 0 -# -##------------------------------------------------------------------------------ -# -#def convert_to_job_status(job_exit_status): -# """Convert job status to numerical value. """ -# -# tmpsum = 0 -# -# ival = job_exit_status.status["Pass_Fail"] -# tmpsum = tmpsum + ival*1 -# -# ival = job_exit_status.status["Hardware_Failure"] -# tmpsum = tmpsum + ival*2 -# -# ival = job_exit_status.status["Performance_Failure"] -# tmpsum = tmpsum + ival*4 -# -# ival = job_exit_status.status["Incorrect_Result"] -# tmpsum = tmpsum + ival*8 -# -# return tmpsum -# -#------------------------------------------------------------------------------ - def parse_status_file(path_to_status_file, startdate, enddate, mycomputer_with_events_record): """Function: parse_status_file. Parser for rgt_status_file.txt""" @@ -903,3 +1049,76 @@ def summarize_status_file(path_to_status_file, startdate, enddate, return shash #------------------------------------------------------------------------------ + +#----------------------------------------------------- +# - +# Start of defining errors classes for this module. - +# - +#----------------------------------------------------- + +class StatusFileError(Exception): + """Base class for exceptions of the StatusFile""" + def __init__(self): + pass + + @property + @abc.abstractmethod + def message(self): + pass + +class IncompatibleStatusFileModeError(StatusFileError): + """Exception raised for errors of incompatible modes for various StatusFile tasks.""" + def __init__(self,message): + """The class constructor + + Parameters + ---------- + message : string + The error message for this exception. + """ + self._message = message + + @property + def message(self): + """str: The error message.""" + return self._message + +class InvalidStatusFileModeError(StatusFileError): + """Exception raised for error of invalid mode for StatusFile.""" + def __init__(self,message): + """The class constructor + + Parameters + ---------- + message : string + The error message for this exception. + """ + self._message = message + + @property + def message(self): + """str: The error message.""" + return self._message + +class StatusFileMissingError(StatusFileError): + """Exception raised for missing status file.""" + def __init__(self,message): + """The class constructor + + Parameters + ---------- + message : string + The error message for this exception. + """ + self._message = message + + @property + def message(self): + """str: The error message.""" + return self._message + +#----------------------------------------------------- +# - +# End of defining errors classes for this module. - +# - +#----------------------------------------------------- diff --git a/harness/libraries/status_file_factory.py b/harness/libraries/status_file_factory.py new file mode 100644 index 0000000..799b0c4 --- /dev/null +++ b/harness/libraries/status_file_factory.py @@ -0,0 +1,53 @@ +#! /usr/bin/env python3 +"""The factory class creating StatusFile objects.""" + +# Python imports +import sys + +# Harness imports +from libraries.status_file import StatusFile + +class StatusFileFactory: + """This is the factory class of StatusFile objects.""" + + #----------------------------------------------------- + # - + # Class attributes and methods. - + # - + #----------------------------------------------------- + @classmethod + def create(cls,path_to_status_file=None,logger=None): + """ Returns a StatusFile object. + + Notes + ----- + This factory method is called if we are creating a StatusFile object + + Parameters + ---------- + path_to_status_file : str + The fully qualified path to the status file. + + Returns + ------- + StatusFile + """ + a_status_file = StatusFile(logger=logger, + path_to_status_file=path_to_status_file) + + return a_status_file + + #----------------------------------------------------- + # - + # Special methods - + # - + #----------------------------------------------------- + def __init__(self): + pass + + + #----------------------------------------------------- + # - + # Public methods - + # - + #----------------------------------------------------- diff --git a/harness/libraries/subtest_factory.py b/harness/libraries/subtest_factory.py new file mode 100644 index 0000000..9d97257 --- /dev/null +++ b/harness/libraries/subtest_factory.py @@ -0,0 +1,80 @@ +#! /usr/bin/env python3 +"""Contains the subtest factory class SubtestFactory + +This module contains the factory class SubtestFactory which +creates an instance of subtest. + +""" + +# Python imports +import os + +# Harness imports +from libraries.apptest import subtest + + +class SubtestFactory(): + """This class resposibility is creating an instance subtest.""" + + #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + # @ + # Special methods @ + # @ + #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + def __init__(self): + return + + #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + # @ + # End of special methods @ + # @ + #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + + #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + # @ + # Public methods. @ + # @ + #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + @staticmethod + def make_subtest(name_of_application=None, + name_of_subtest=None, + local_path_to_tests=None, + logger=None, + tag=None): + """Returns an instance of a subtest object. + + Notes + ----- + All parameters are mandatory. + + Parameters + ---------- + name_of_application : str + The name of the application. + + name_of_subtest : str + The name of test within the application. + + local_path_to_tests : str + The path to to the subtest. + + logger : A rgt_logger object. + A logger object + + tag : str + A string that serves as unique identifier for a subtest test iteration. + + """ + + a_subtest = subtest(name_of_application=name_of_application, + name_of_subtest=name_of_subtest, + local_path_to_tests=local_path_to_tests, + logger=logger, + tag=tag) + return a_subtest + + #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + # @ + # End of public methods. @ + # @ + #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ diff --git a/harness/libraries/threadedDecorator.py b/harness/libraries/threadedDecorator.py deleted file mode 100644 index 3bf316a..0000000 --- a/harness/libraries/threadedDecorator.py +++ /dev/null @@ -1,334 +0,0 @@ -#! /usr/bin/env python3 - -""" -.. module:: threadedDecorator - :synopsis: This module implements a thread decorator for base_apptest objects. - -.. moduleauthor:: Arnold Tharrington - -""" - -import threading -import subprocess -import multiprocessing -import time -from datetime import datetime -from libraries.apptest import subtest -from libraries import apptest -from libraries.base_apptest import base_apptest -from libraries import regression_test -import abc -import copy -import random -import sys -import os -#from queue import * - - -class BaseAppThreadDecorator(threading.Thread,base_apptest): - """ - An abstract base class that implements the "application" concurrency interface. - - - This class inherents from the "threading.Thread" and "base_apptest" classes. - """ - __metaclass__ = abc.ABCMeta - - def __init__(self): - - threading.Thread.__init__(self) - - @abc.abstractmethod - def run(self): - return - - @abc.abstractmethod - def doTasks(self,myTasks): - return - -class ThreadDecorator(BaseAppThreadDecorator): - """ - When this class is instantiated, the working directory must be - in the same directory as the "rgt.input" file location. This - class instantiates a "python" thread over an application. Each - application contains tests on which tasks are done on. Each test - is threaded with the "multiprocessing.Process" thread. Each - multiprocessing.Process test thread does the tasks on the on the - test. - - :param string name_of_application: The name of the application. - :param name_of_subtest: List of the application tests. - :type name_of_subtest: List of strings - :param integer number_of_iterations: The number of iterations to run the - harness application/subtest. - :param tasks: The harness tasks the application/subtest will perform. - :type tasks: A list of strings - - """ - - - MAX_APP_THREADS = 10 - """ The maximum number of python threads over the applications""" - - app_semaphore = threading.Semaphore(MAX_APP_THREADS) - """ - A threading locking semaphore that restricts to running - at most MAX_APP_THREADS concurrently. - """ - - MAX_APPTEST_PROCESS_THREADS = 32 - """ - The maximum number of python process threads over - all applications subtests. - """ - - apptest_semaphore = multiprocessing.Semaphore(MAX_APPTEST_PROCESS_THREADS) - """ - A multiprocessing threading locking semaphore that restricts to running - at most MAX_APPTEST_PROCESS_THREADS concurrently. - """ - - def __init__(self,name_of_application=None, - name_of_subtest=None, - local_path_to_tests=None, - number_of_iterations=None, - tasks=None): - - BaseAppThreadDecorator.__init__(self) - - self.__nameOfApplication = copy.deepcopy(name_of_application) - self.__nameOfApplicationTests = copy.deepcopy(name_of_subtest) - self.__pathToApplicationTests = copy.deepcopy(local_path_to_tests) - self.__numberIterations = -1 - self.__harnessTasks = copy.deepcopy(tasks) - if tasks: - self.__harnessTasks = apptest.subtest.reorderTaskList(self.__harnessTasks) - - - - self.__applicationTests = [] - self.__testQueue = Queue() - - for name_of_test in self.__nameOfApplicationTests: - #Instantiate the application tests. - self.__applicationTests = self.__applicationTests + [apptest.subtest(name_of_application=self.__nameOfApplication, - name_of_subtest=name_of_test, - local_path_to_tests=self.__pathToApplicationTests, - number_of_iterations=self.__numberIterations) ] - - #----------------------------------------------------- - # Public Methods - - # - - #----------------------------------------------------- - def run(self): - - # Acquire a application semaphore lock. - ThreadDecorator.app_semaphore.acquire() - - tmp_string = "Starting thread of application {application1}".format(application1=self.__nameOfApplication) - self.__writeToLogFile(tmp_string) - - self.__fillApplicationTestQueue() - - # The application source checkout must be performed first, - # then all other harnes tasks may proceed. - if Harness.checkout in self.__harnessTasks: - self.__checkoutApplicationSource() - - #Start the application tests. - number_tests = self.__testQueue.qsize() - application_test_processes = [None for x in range(number_tests)] - self.__startApplicationTests(application_test_processes) - - self.__waitForApplicationTestsToFinishStarting(application_test_processes) - - tmp_string = "Ending thread of application {application1}".format(application1=self.__nameOfApplication) - self.__writeToLogFile(tmp_string) - - # Release the application lock semaphore. - ThreadDecorator.app_semaphore.release() - - return - - - def doTasks(self,tasks=None): - """ - Starts the tasks on the application and its tests. - - :param tasks: A list of strings that contain the test tasks. - :type tasks: List of strings. - - """ - - if tasks != None: - self.__harnessTasks = copy.deepcopy(tasks) - self.__harnessTasks = apptest.subtest.reorderTaskList(self.__harnessTasks) - self.start() - - - def appTestName(self): - return self.__myApp.appTestName() - - - def getNameOfApplication(self): - return self.__nameOfApplication - - - #---------------------------------------- - #--These functions need to be threaded. - - #---------------------------------------- - def check_out_test(self): - self.__myApp.check_out_test() - - - def start_test(self,my_tasks): - self.__myApp.start_test() - - - def stop_test(self): - self.__myApp.stop_test() - - - def debug_apptest(self): - self.__myApp.debug_apptest() - - - def display_status(self): - self.__myApp.display_status() - - - def generateReport(self): - self.__myApp.generateReport() - - - #----------------------------------------------------- - # Private Methods - - # - - #----------------------------------------------------- - - - def __fillApplicationTestQueue(self): - """ - This function does 2 things: Fills the application test queue and - if unable raises an exception and release the application locking - semaphore. - """ - name_of_application = self.__nameOfApplication - for job in self.__applicationTests: - test_name = job.getTestName() - try: - self.__testQueue.put(copy.deepcopy(job),block=True) - tmpstring = "Inserted application '{}', test '{}' into the queue.".format(name_of_application,test_name) - self.__writeToLogFile(tmpstring) - except Exception: - tmp_string = "Error in Filling Application test queue in {application1}".format(application1=name_of_application) - self.__writeToLogFile(tmp_string) - tmp_string = "Ending python thread of application {application1}".format(application1=name_of_application) - self.__writeToLogFile(tmp_string) - ThreadDecorator.app_semaphore.release() - raise - - - def __checkoutApplicationSource(self): - try: - job = copy.deepcopy(self.__applicationTests[0]) - tmp_string = "In __checkoutApplicationSource job is {}".format(str(job)) - self.__writeToLogFile(tmp_string) - tmp_string = "Current directory is {}".format(os.getcwd()) - self.__writeToLogFile(tmp_string) - - if Harness.checkout in self.__harnessTasks: - job.check_out_source() - tmp_string = "Checked out the source of application {application1}".format(application1=self.__nameOfApplication) - self.__writeToLogFile(tmp_string) - except Exception as exc: - tmp_string = "Failed to checkout the source of application {application1}".format(application1=self.__nameOfApplication) - self.__writeToLogFile(tmp_string) - tmp_string = "Ending python thread of application {application1}".format(application1=self.__nameOfApplication) - self.__writeToLogFile(tmp_string) - ThreadDecorator.app_semaphore.release() - raise - except subprocess.CalledProcessError as exc: - tmp_string = "Caught exception 'CalledProcessError' {application1}".format(application1=self.__nameOfApplication) - self.__writeToLogFile(tmp_string) - ThreadDecorator.app_semaphore.release() - raise - - def __startApplicationTests(self,application_test_processes): - test_index = 0 - - while True: - if self.__testQueue.empty() == True: - tmp_string = "The test queue is empty for application {}!".format(self.__nameOfApplication) - self.__writeToLogFile(tmp_string) - break - else: - try: - job = self.__testQueue.get(block=True) - try: - ThreadDecorator.apptest_semaphore.acquire() - application_test_processes[test_index] = ProcessAppTest(job,self.__harnessTasks) - application_test_processes[test_index].start() - tmp_string = "Started tasks on job {}".format(job.appTestName()) - self.__writeToLogFile(tmp_string) - except: - tmp_string = "Failed to perform tasks on job {}".format(job.appTestName()) - self.__writeToLogFile(tmp_string) - ThreadDecorator.apptest_semaphore.release() - raise - test_index = test_index + 1 - except Exception: - tmp_string = "Failed to get job from Queue" - self.__writeToLogFile(tmp_string) - tmp_string = "Ending python thread of application {application1}".format(application1=self.__nameOfApplication) - self.__writeToLogFile(tmp_string) - raise - - - def __waitForApplicationTestsToFinishStarting(self,application_test_processes): - TEST_PROCESSS_IS_DEAD = 0 - TEST_PROCESS_IS_ALIVE = 1 - number_test_processes = len(application_test_processes) - plist = [ TEST_PROCESS_IS_ALIVE for x in range(number_test_processes)] - while True: - - time.sleep(5) - - number_alive_test_processes = 0 - - for ip in range(number_test_processes): - [application_name,test_name] = application_test_processes[ip].getApplicationTestName() - if plist[ip] == TEST_PROCESS_IS_ALIVE: - if application_test_processes[ip].is_alive(): - number_alive_test_processes = number_alive_test_processes + 1 - else: - plist[ip] = TEST_PROCESSS_IS_DEAD - message = "The python process for application {application1} test {test1} has completed.".format(application1=application_name, - test1=test_name) - self.__writeToLogFile(message) - ThreadDecorator.apptest_semaphore.release() - - if number_alive_test_processes <= 0: - break - - - def __writeToLogFile(self,message): - self.__applicationTests[0].writeToLogFile(message) - - -class ProcessAppTest(multiprocessing.Process): - test_checkout_lock = multiprocessing.Lock() - test_display_lock = multiprocessing.Lock() - def __init__(self,my_job,my_tasks=None): - multiprocessing.Process.__init__(self) - self.__job = copy.deepcopy(my_job) - self.__harnessTasks = copy.deepcopy(my_tasks) - - def run(self): - self.__job.doTasks(self.__harnessTasks, - test_checkout_lock=ProcessAppTest.test_checkout_lock, - test_display_lock=ProcessAppTest.test_display_lock) - - def getApplicationTestName(self): - return self.__job.appTestName() - diff --git a/harness/machine_types/__init__.py b/harness/machine_types/__init__.py index ad1cb33..591f2ce 100644 --- a/harness/machine_types/__init__.py +++ b/harness/machine_types/__init__.py @@ -1,2 +1,6 @@ __all__ = ["machine_factory", + "machine_factory_exceptions", + "ibm_power9", + "linux_x86_64", + "linux_utilities", "rgt_test"] diff --git a/harness/machine_types/base_machine.py b/harness/machine_types/base_machine.py index 4f33ad0..c759c47 100644 --- a/harness/machine_types/base_machine.py +++ b/harness/machine_types/base_machine.py @@ -1,12 +1,11 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # # Author: Veronica G. Vergara L. # -# +"""The base class for all machine classes """ -from libraries.apptest import subtest -from .scheduler_factory import SchedulerFactory -from .jobLauncher_factory import JobLauncherFactory + +# Python imports from abc import abstractmethod, ABCMeta from pathlib import Path import os @@ -14,6 +13,12 @@ import subprocess import shlex +# Harness imports +from libraries.apptest import subtest +from .scheduler_factory import SchedulerFactory +from .jobLauncher_factory import JobLauncherFactory +from machine_types import linux_utilities + class BaseMachine(metaclass=ABCMeta): """ BaseMachine represents a compute resource and has the following @@ -32,21 +37,121 @@ class BaseMachine(metaclass=ABCMeta): set_numNodes: """ + #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + # @ + # Special methods @ + # @ + #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + + # The constructor of class base_machine. def __init__(self, name, scheduler_type, jobLauncher_type, numNodes, numSockets, numCoresPerSocket, apptest): + self.__name = name + self.__scheduler = SchedulerFactory.create_scheduler(scheduler_type) + """An object of type BaseScheduler : This object is the job resource scheduler. See the + classs SchedulerFactory for more details.""" + self.__jobLauncher = JobLauncherFactory.create_jobLauncher(jobLauncher_type) self.__numNodes = numNodes self.__numSockets = numSockets self.__numCoresPerSocket = numCoresPerSocket - self.apptest = apptest + self.__apptest = apptest + + runarchive_dir = self.apptest.get_path_to_runarchive() + log_filepath = os.path.join(runarchive_dir,self.__class__.__module__) + + #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + # @ + # End of special methods @ + # @ + #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + + #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + # @ + # Public methods. @ + # @ + #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + @property + def scheduler(self): + """Returns the scheduler. """ + return self.__scheduler + + @property + def apptest(self): + """ Object of type subtest : The object subtest for the application-test.""" + return self.__apptest + + @property + def logger(self): + return self.__apptest.logger + + @property + def machine_name(self): + """str: The name of the machine.""" + return self.__name + + @property + def check_command(self): + """Returns the check command string. If no check command string then returns None.""" + return self.test_config.get_check_command() + + @property + @abstractmethod + def test_config(self): + return + + @property + @abstractmethod + def build_runtime_environment_command_file(self): + return + + @property + @abstractmethod + def submit_runtime_environment_command_file(self): + return + + @property + @abstractmethod + def check_runtime_environment_command_file(self): + return + + def isTestCycleComplete(self,stest): + """Checks if the subtest has completed its cycle. + Parameters + ---------- + stest : A subtest object + The subtest instance is used to check if the testing cycle is complete. + + Returns + ------- + bool + If the bool is True, then the subtest cycle is complete. Otherwise + the cycle in progress. + """ + return linux_utilities.isTestCycleComplete(stest) + + def did_all_tests_pass(self,stest): + """Checks if the subtest has passed all of its tests. + Parameters + ---------- + stest : A subtest object + The subtest instance is used to check if the all tests have passed. + + Returns + ------- + bool + If the bool is True, then the all tests have passed. + """ + + return linux_utilities.is_all_tests_passed(stest) def print_machine_info(self): """ Print information about the machine""" print("Machine name:\n"+self.get_machine_name()) - self.__scheduler.print_scheduler_info() + self.scheduler.print_scheduler_info() print("Job Launcher info: ") self.print_jobLauncher_info() @@ -56,19 +161,67 @@ def get_machine_name(self): def get_scheduler_type(self): """ Return a string with the system's name.""" - return self.__scheduler.get_scheduler_type() + return self.scheduler.get_scheduler_type() def get_scheduler_template_file_name(self): """ Return a string with the name of the scheduler's template file.""" - return self.__scheduler.get_scheduler_template_file_name() + return self.scheduler.get_scheduler_template_file_name() - def write_jobid_to_status(self): - """ Write the job id to the appropriate status file """ - jobid_file = self.apptest.get_path_to_job_id_file() - fileobj = open(jobid_file, "w") - id_string = "%20s\n" % (self.__scheduler.get_job_id()) - fileobj.write(id_string) - fileobj.close() + def get_jobLauncher_command(self): + message = "Building jobLauncher command for machine {}.".format(self.machine_name) + print(message) + jobLauncher_command = self._build_jobLauncher_command(self.test_config.test_parameters) + return jobLauncher_command + + def print_jobLauncher_info(self): + """ Print information about the machine's job launcher.""" + print("Job Launcher Information") + print(str(self.__jobLauncher)) + + def set_numNodes(self,numNodes): + self.__numNodes = numNodes + + def submit_batch_script(self): + """Submits the batch script to the job resource manager of scheduler. + + Returns + ------- + int + The exit status of submitting the batch script to the scheduler. An + exit_status of 0 indicates success, other wise failure. + """ + messloc = "In function {functionname}:".format(functionname=self._name_of_current_function()) + + message = f"{messloc} Submitting a batch script." + self.logger.doInfoLogging(message) + + currentdir = os.getcwd() + message = f"The initial directory is {currentdir}" + self.logger.doInfoLogging(message) + + # Get the environment using the submit runtime environment file. + new_env = None + filename = self.submit_runtime_environment_command_file + + try: + if filename != "": + message = f"{messloc} The submit runtime environmental file is {filename}." + self.logger.doInfoLogging(message) + new_env = linux_utilities.get_new_environment(self,filename) + except SetBuildRTEError as error: + message = f"{messloc} Unable to set the submit runtime environment." + self.logger.doCriticalLogging(message) + + exit_status = linux_utilities.submit_batch_script(self,new_env) + + if exit_status != 0: + message = f"{messloc} Unsuccessful batch script submission with exit status of {exit_status}." + self.logger.doCriticalLogging(message) + else: + message = f"{messloc} Successful batch script submission with exit status of {exit_status}." + self.logger.doInfoLogging(message) + + return exit_status def submit_to_scheduler(self, batchfilename): """ Return the jobID for the submission.""" @@ -79,7 +232,7 @@ def submit_to_scheduler(self, batchfilename): if cwd != ra_dir: os.chdir(ra_dir) - submit_exit_value = self.__scheduler.submit_job(batchfilename) + submit_exit_value = self.scheduler.submit_job(batchfilename) if cwd != ra_dir: os.chdir(cwd) @@ -89,76 +242,225 @@ def submit_to_scheduler(self, batchfilename): return submit_exit_value - def build_jobLauncher_command(self,template_dict): - """ Return the jobLauncher command.""" - return self.__jobLauncher.build_job_command(template_dict) + def write_jobid_to_status(self): + """ Write the job id to the appropriate status file """ + jobid_file = self.apptest.get_path_to_job_id_file() + fileobj = open(jobid_file, "w") + id_string = "%20s\n" % (self.scheduler.get_job_id()) + fileobj.write(id_string) + fileobj.close() + + def make_batch_script(self): + """Creates the batch script for running the test. + + We use the template strategy pattern for creating the batch script. + This method serves as the interface and each machine will implement its + private method to make the batch script - i.e., every subclass of + BaseMachine must implement the method _make_batch_script. + + Returns + ------- + bool + True if successful creation of batch file. + """ + self.logger.doInfoLogging("Start of making a batch script.") - def start_build_script(self, buildcmd): - """ Return the status of the build.""" - path_to_source = self.apptest.get_path_to_source() - print("Path to Source:", path_to_source) - path_to_build_directory = self.apptest.get_path_to_workspace_build() - print("Path to Build Dir:", path_to_build_directory) - shutil.copytree(src=path_to_source, - dst=path_to_build_directory) currentdir = os.getcwd() + message = f"The initial directory is {currentdir}" + self.logger.doInfoLogging(message) + + bstatus = self._make_batch_script() + + self.logger.doInfoLogging("End of making a batch script.") + return bstatus + + def build_executable(self): + """ Executes the build command and returns the exit status of the build command. + + Returns + ------- + int + The exit status of the build command. + + """ + messloc = "In function {functionname}:".format(functionname=self._name_of_current_function()) + + message = f"{messloc} Start of buiding executable.\n" + self.logger.doInfoLogging(message) + + currentdir = os.getcwd() + message = f"The initial directory is {currentdir}" + self.logger.doInfoLogging(message) + + path_to_build_directory = self.apptest.get_path_to_workspace_build() + message = f"The build directory is {path_to_build_directory}" + self.logger.doInfoLogging(message) + + # Copy the source to the build directory. + self._copy_source_to_build_directory() + + message = f"{messloc} Copied source to build directory.\n" + self.logger.doInfoLogging(message) + + # Get the environment using the build runtime environment file. + new_env = None + filename = self.build_runtime_environment_command_file + + if filename != "": + message = f"{messloc} The build runtime environmental file is {filename}." + self.logger.doInfoLogging(message) + new_env = linux_utilities.get_new_environment(self,filename) + message = f"{messloc} The new build environment is as follows:\n" + message += str(new_env) + self.logger.doInfoLogging(message) + + # We now change directories to the build directory. os.chdir(path_to_build_directory) - print("Using build command:", buildcmd) - args = shlex.split(buildcmd) - build_outfile = "output_build.txt" - build_stdout = open(build_outfile,"w") - p = subprocess.Popen(args, stdout=build_stdout, stderr=subprocess.STDOUT) - p.wait() - build_exit_status = p.returncode - build_stdout.close() + + message = f"{messloc} Changed to build directory {path_to_build_directory}. Commencing build ..." + self.logger.doInfoLogging(message) + + # We run the build command. + exit_status = self._build_executable(new_env) + + message = f"{messloc} The build exit status is {exit_status}." + if exit_status == 0: + self.logger.doInfoLogging(message) + else: + self.logger.doCriticalLogging(message) + + # We now change back to starting directory. os.chdir(currentdir) - return build_exit_status - def check_results(self, checkcmd): - """ Run the check script provided by the user and log the result to the status file.""" - cstatus = self.start_check_script(checkcmd) - self.write_check_exit_status(cstatus) - return cstatus + message = f"{messloc} Changed back to Scripts directory {currentdir}." + self.logger.doInfoLogging(message) + + message = f"{messloc} End of buiding executable." + self.logger.doInfoLogging(message) + + return exit_status + + def check_executable(self): + """Checks the results of the test and returns pass-failure status of the test. + + Returns + ------- + int : + The exist status of the check command. + """ + + messloc = "In function {functionname}:".format(functionname=self._name_of_current_function()) - def start_check_script(self, checkcmd): - """ Check if results are correct. """ currentdir = os.getcwd() - print("current directory in base_machine:", currentdir) runarchive_dir = self.apptest.get_path_to_runarchive() + + # Get the environment using the check runtime environment file. + new_env = None + filename = self.check_runtime_environment_command_file + try: + if filename != "": + message = f"{messloc} The check runtime environmental file is {filename}." + new_env = linux_utilities.get_new_environment(self,filename) + except SetBuildRTEError as error: + message = f"{messloc} Unable to set the check runtime environment." + self.logger.doCriticalLogging(message) + + # We now change to the runarchive directory. os.chdir(runarchive_dir) - print("Starting check script in base_machine:", os.getcwd()) - path_to_checkscript = os.path.join(self.apptest.get_path_to_scripts(), checkcmd) - print("Using check command:", path_to_checkscript) - - args = shlex.split(path_to_checkscript) - check_outfile = "output_check.txt" - check_stdout = open(check_outfile, "w") - p = subprocess.Popen(args, stdout=check_stdout, stderr=subprocess.STDOUT) - p.wait() - check_stdout.close() - check_exit_status = p.returncode + + message = f"{messloc} The current working directory is {runarchive_dir} " + self.logger.doInfoLogging(message) + + # We now run the check command. + check_status = linux_utilities.check_executable(self,new_env) + + self._write_check_exit_status(check_status) + + # We now change back to starting directory. os.chdir(currentdir) - return check_exit_status - def write_check_exit_status(self, cstatus): + message = f"{messloc} The current working directory is {currentdir} " + self.logger.doInfoLogging(message) + + return check_status + + def start_report_executable(self): + """Starts the report executable command. + + Return + ------ + int : The exit status of executing the report command. + """ + report_command_str = self.test_config.get_report_command() + + messloc = "In function {functionname}: ".format(functionname=self._name_of_current_function()) + message = f"{messloc} Running report executable script report script {report_command_str }." + + print(message) + self.logger.doInfoLogging(message) + + exit_status = self._start_report_script(self.test_config.get_report_command()) + return exit_status + + #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + # @ + # End of public methods. @ + # @ + #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + + #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + # @ + # Private methods. @ + # @ + #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + + def _build_executable(self,new_env): + exit_status=linux_utilities.build_executable(self,new_env) + return exit_status + + def _make_batch_script(self): + bstatus = linux_utilities.make_batch_script_for_linux(self) + return bstatus + + def _submit_batch_script(self,new_env): + exit_status = linux_utilities.submit_batch_script(self,new_env) + return exit_status + + def _copy_source_to_build_directory(self): + messloc = "In function {functionname}:".format(functionname=self._name_of_current_function()) + + path_to_source = self.apptest.get_path_to_source() + message = f"{messloc} Path to Source: {path_to_source}" + print("Path to Source:", path_to_source) + self.logger.doInfoLogging(message) + + path_to_build_directory = self.apptest.get_path_to_workspace_build() + message = f"{messloc} Path to build directory: {path_to_build_directory}" + self.logger.doInfoLogging(message) + print("Path to Build Dir:", path_to_build_directory) + + shutil.copytree(src=path_to_source, + dst=path_to_build_directory) + + def _write_check_exit_status(self, cstatus): """ Write the status of checking results to the status directory.""" + messloc = "In function {functionname}:".format(functionname=self._name_of_current_function()) + status_file = self.apptest.get_path_to_job_status_file() - file1_obj = open(status_file, "w") - - print("Writing check_exit_status =", cstatus, "into", status_file) - # Set the string to write to the job_status.txt file. - #if jstatus == 0: - # pf = "1" - #elif jstatus == 1: - # pf = "0" - #elif jstatus >= 2: - # pf = "2" - string1 = "%s\n" % (cstatus) - - file1_obj.write(string1) - file1_obj.close() - - def start_report_script(self, reportcmd): + with open(status_file, "w") as file_obj: + message = f"{messloc} Writing check_exit_status {cstatus} into {status_file}" + self.logger.doInfoLogging(message) + file_obj.write(str(cstatus)) + return + + def _name_of_current_function(self): + import sys + classname = self.__class__.__name__ + functionname = sys._getframe(1).f_code.co_name + my_name = classname + "." + functionname + return my_name + + def _start_report_script(self, reportcmd): """ Check if results are correct. """ currentdir = os.getcwd() print("current directory in base_machine:", currentdir) @@ -178,24 +480,36 @@ def start_report_script(self, reportcmd): os.chdir(currentdir) return report_exit_status - def print_jobLauncher_info(self): - """ Print information about the machine's job launcher.""" - print("Job Launcher Information") - print(str(self.__jobLauncher)) - - def set_numNodes(self,numNodes): - self.__numNodes = numNodes - - @abstractmethod - def make_batch_script(self): - print("Making a batch script in the BaseMachine class") - return - - @abstractmethod - def submit_batch_script(self): - print("Submitting a batch script in the BaseMachine class") - return + def _build_jobLauncher_command(self,template_dict): + """ Return the jobLauncher command.""" + return self.__jobLauncher.build_job_command(template_dict) + #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + # @ + # End of private methods. @ + # @ + #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + +class BaseMachineError(Exception): + """Base class for exceptions in this module""" + pass + +class SetBuildRTEError(BaseMachineError): + """Exception raised for errors in setting the build runtime environment.""" + def __init__(self,message): + """The class constructor + + Parameters + ---------- + message : string + The error message for this exception. + """ + self._message = message + + @property + def message(self): + """str: The error message.""" + return self._message if __name__ == "__main__": print("This is the BaseMachine class!") diff --git a/harness/machine_types/base_scheduler.py b/harness/machine_types/base_scheduler.py index 45b3490..2642b0f 100644 --- a/harness/machine_types/base_scheduler.py +++ b/harness/machine_types/base_scheduler.py @@ -4,7 +4,9 @@ # # -class BaseScheduler: +from abc import abstractmethod, ABCMeta + +class BaseScheduler(metaclass=ABCMeta): """ BaseScheduler represents a batch scheduler and has the following properties: @@ -15,8 +17,8 @@ class BaseScheduler: print_scheduler_info: """ - def __init__(self,type,submitCmd,statusCmd,deleteCmd, - walltimeOpt,numTasksOpt,jobNameOpt,templateFile): + def __init__(self, type, submitCmd, statusCmd, deleteCmd, + walltimeOpt, numTasksOpt, jobNameOpt, templateFile): self.__type = type self.__submitCmd = submitCmd self.__statusCmd = statusCmd @@ -37,6 +39,11 @@ def set_job_id(self,jobid): self.__job_id = jobid return + @abstractmethod + def set_job_id_from_environ(self): + print("Setting job id from environment in BaseScheduler class") + return + def get_scheduler_template_file_name(self): return self.__templateFile diff --git a/harness/machine_types/cray_xk7.py b/harness/machine_types/cray_xk7.py deleted file mode 100644 index 74cf17d..0000000 --- a/harness/machine_types/cray_xk7.py +++ /dev/null @@ -1,18 +0,0 @@ -#!/usr/bin/env python -# -# Author: Veronica G. Vergara L. -# -# - -from .base_machine import BaseMachine - -class CrayXK7(BaseMachine): - - def __init__(self,name='Cray XK7',scheduler=None,jobLauncher=None, - numNodes=0,numSocketsPerNode=0,numCoresPerSocket=0, - apptest=None): - BaseMachine.__init__(self,name,scheduler,jobLauncher,numNodes, - numSocketsPerNode,numCoresPerSocket,apptest) - -if __name__ == '__main__': - print('This is the Cray XK7 class') diff --git a/harness/machine_types/ibm_power8.py b/harness/machine_types/ibm_power8.py deleted file mode 100644 index 200fe8f..0000000 --- a/harness/machine_types/ibm_power8.py +++ /dev/null @@ -1,110 +0,0 @@ -#!/usr/bin/env python -# -# Author: Veronica G. Vergara L. -# - -from .base_machine import BaseMachine -from .rgt_test import RgtTest -import os -import re - -class IBMpower8(BaseMachine): - - def __init__(self, - name='IBM Power8', - scheduler=None, - jobLauncher=None, - numNodes=0, - numSocketsPerNode=0, - numCoresPerSocket=0, - rgt_test_input_file="rgt_test_input.txt", - apptest=None): - BaseMachine.__init__(self, - name, - scheduler, - jobLauncher, - numNodes, - numSocketsPerNode, - numCoresPerSocket, - apptest) - - # process test input file - self.__rgt_test = RgtTest(rgt_test_input_file) - self.__rgt_test.read_input_file() - - test_params = {} - - # add test parameters needed by the harness - test_params['results_dir'] = self.apptest.get_path_to_runarchive() - test_params['working_dir'] = self.apptest.get_path_to_workspace_run() - test_params['build_dir'] = self.apptest.get_path_to_workspace_build() - test_params['scripts_dir'] = self.apptest.get_path_to_scripts() - test_params['harness_id'] = self.apptest.get_harness_id() - - # some older tests expect "pathtoexecutable" - exepath = self.__rgt_test.get_executable() - test_params["pathtoexecutable"] = exepath - - # some older job scripts need these env vars as replacements - if 'RGT_ENVIRONMENTAL_FILE' in os.environ: - test_params["rgtenvironmentalfile"] = os.environ['RGT_ENVIRONMENTAL_FILE'] - if 'RGT_NCCS_TEST_HARNESS_MODULE' in os.environ: - test_params["nccstestharnessmodule"] = os.environ["RGT_NCCS_TEST_HARNESS_MODULE"] - - # update the test parameters - self.__rgt_test.set_test_parameters(test_params.items()) - - # is this actually used? if so, it has to come after updating test parameters - #joblaunch_cmd = self.get_jobLauncher_command() - #self.__rgt_test.set_user_param("joblaunchcommand", joblaunch_cmd) - - def get_jobLauncher_command(self): - print("Building jobLauncher command for Power8") - jobLauncher_command = self.build_jobLauncher_command(self.__rgt_test.get_test_parameters()) - return jobLauncher_command - - def make_batch_script(self): - print("Making batch script for Power8 using template called " + self.get_scheduler_template_file_name()) - # Get batch job template lines - templatefileobj = open(self.get_scheduler_template_file_name(), "r") - templatelines = templatefileobj.readlines() - templatefileobj.close() - - # Create test batch job script in run archive directory - batch_file_path = os.path.join(self.apptest.get_path_to_runarchive(), - self.__rgt_test.get_batch_file()) - batch_job = open(batch_file_path, "w") - - # Replace all the wildcards in the batch job template with the values in - # the test config - test_replacements = self.__rgt_test.get_test_replacements() - for record in templatelines: - for (replace_key,val) in test_replacements.items(): - re_tmp = re.compile(replace_key) - record = re_tmp.sub(val, record) - batch_job.write(record) - - # Close batch job script file - batch_job.close() - - def build_executable(self): - print("Building executable on Power8 using build script " + self.__rgt_test.get_build_command()) - return self.start_build_script(self.__rgt_test.get_build_command()) - - def submit_batch_script(self): - print("Submitting batch script for Power8") - batch_script = self.__rgt_test.get_batch_file() - submit_exit_value = self.submit_to_scheduler(batch_script) - print("Submitting " + batch_script + " submit_exit_value = " + str(submit_exit_value)) - return submit_exit_value - - def check_executable(self): - print("Running check executable script on Power8 using check script " + self.__rgt_test.get_check_command()) - return self.check_results(self.__rgt_test.get_check_command()) - - def report_executable(self): - print("Running report executable script on Power8 using report script " + self.__rgt_test.get_report_command()) - return self.start_report_script(self.__rgt_test.get_report_command()) - -if __name__ == "__main__": - print('This is the IBM Power8 class') diff --git a/harness/machine_types/ibm_power9.py b/harness/machine_types/ibm_power9.py index 29b31ac..831351f 100644 --- a/harness/machine_types/ibm_power9.py +++ b/harness/machine_types/ibm_power9.py @@ -1,13 +1,19 @@ -#!/usr/bin/env python -# -# Author: Veronica G. Vergara L. -# +#!/usr/bin/env python3 -from .base_machine import BaseMachine -from .rgt_test import RgtTest +# %Authors% + +# Python imports. +import sys import os +import shlex +import subprocess +import time import re +# Local imports. +from machine_types.base_machine import BaseMachine +from machine_types.rgt_test import RgtTest + class IBMpower9(BaseMachine): def __init__(self, @@ -17,94 +23,57 @@ def __init__(self, numNodes=1, numSocketsPerNode=2, numCoresPerSocket=21, - rgt_test_input_file="rgt_test_input.txt", + rgt_test_input_file=None, apptest=None): - BaseMachine.__init__(self, - name, - scheduler, - jobLauncher, - numNodes, - numSocketsPerNode, - numCoresPerSocket, - apptest) - - # process test input file - self.__rgt_test = RgtTest(rgt_test_input_file) - self.__rgt_test.read_input_file() - - test_params = {} - - # add test parameters needed by the harness - test_params['results_dir'] = self.apptest.get_path_to_runarchive() - test_params['working_dir'] = self.apptest.get_path_to_workspace_run() - test_params['build_dir'] = self.apptest.get_path_to_workspace_build() - test_params['scripts_dir'] = self.apptest.get_path_to_scripts() - test_params['harness_id'] = self.apptest.get_harness_id() - - # some older tests expect "pathtoexecutable" - exepath = self.__rgt_test.get_executable() - test_params["pathtoexecutable"] = exepath - - # some older job scripts need these env vars as replacements - if 'RGT_ENVIRONMENTAL_FILE' in os.environ: - test_params["rgtenvironmentalfile"] = os.environ['RGT_ENVIRONMENTAL_FILE'] - if 'RGT_NCCS_TEST_HARNESS_MODULE' in os.environ: - test_params["nccstestharnessmodule"] = os.environ["RGT_NCCS_TEST_HARNESS_MODULE"] - - # update the test parameters - self.__rgt_test.set_test_parameters(test_params.items()) - - # is this actually used? if so, it has to come after updating test parameters - #joblaunch_cmd = self.get_jobLauncher_command() - #self.__rgt_test.set_user_param("joblaunchcommand", joblaunch_cmd) - def get_jobLauncher_command(self): - print("Building jobLauncher command for Power9") - jobLauncher_command = self.build_jobLauncher_command(self.__rgt_test.get_test_parameters()) - return jobLauncher_command - - def make_batch_script(self): - print("Making batch script for Power9 using template called " + self.get_scheduler_template_file_name()) - # Get batch job template lines - templatefileobj = open(self.get_scheduler_template_file_name(), "r") - templatelines = templatefileobj.readlines() - templatefileobj.close() - - # Create test batch job script in run archive directory - batch_file_path = os.path.join(self.apptest.get_path_to_runarchive(), - self.__rgt_test.get_batch_file()) - batch_job = open(batch_file_path, "w") - - # Replace all the wildcards in the batch job template with the values in - # the test config - test_replacements = self.__rgt_test.get_test_replacements() - for record in templatelines: - for (replace_key,val) in test_replacements.items(): - re_tmp = re.compile(replace_key) - record = re_tmp.sub(val, record) - batch_job.write(record) - - # Close batch job script file - batch_job.close() - - def build_executable(self): - print("Building executable on Power9 using build script " + self.__rgt_test.get_build_command()) - return self.start_build_script(self.__rgt_test.get_build_command()) - - def submit_batch_script(self): - print("Submitting batch script for Power9") - batch_script = self.__rgt_test.get_batch_file() - submit_exit_value = self.submit_to_scheduler(batch_script) - print("Submitted " + batch_script + " submit_exit_value = " + str(submit_exit_value)) - return submit_exit_value - - def check_executable(self): - print("Running check executable script on Power9 using check script " + self.__rgt_test.get_check_command()) - return self.check_results(self.__rgt_test.get_check_command()) - - def report_executable(self): - print("Running report executable script on Power9 using report script " + self.__rgt_test.get_report_command()) - return self.start_report_script(self.__rgt_test.get_report_command()) + BaseMachine.__init__(self, + name=name, + scheduler_type=scheduler, + jobLauncher_type=jobLauncher, + numNodes = numNodes, + numSockets = numSocketsPerNode, + numCoresPerSocket = numCoresPerSocket, + apptest=apptest) + + # process test input file. The subtest knows the path to the + # the test input file. + if rgt_test_input_file == None: + path_to_test_input_file = apptest.path_of_test_input_file + else: + path_to_test_input_file = rgt_test_input_file + self._rgt_test = RgtTest(path_to_test_input_file,logger=self.logger) + self._rgt_test.read_input_file() + + # Add test parameters needed by the harness + harness_parameters = {} + harness_parameters['results_dir'] = self.apptest.get_path_to_runarchive() + harness_parameters['working_dir'] = self.apptest.get_path_to_workspace_run() + harness_parameters['build_dir'] = self.apptest.get_path_to_workspace_build() + harness_parameters['scripts_dir'] = self.apptest.get_path_to_scripts() + harness_parameters['harness_id'] = self.apptest.get_harness_id() + self._rgt_test.harness_parameters.update(harness_parameters) + + @property + def build_runtime_environment_command_file(self): + return self.test_config.build_runtime_environment_command_file + + @property + def submit_runtime_environment_command_file(self): + return self.test_config.submit_runtime_environment_command_file + + @property + def check_runtime_environment_command_file(self): + return self.test_config.check_runtime_environment_command_file + + @property + def test_config(self): + return self._rgt_test + + #----------------------------------------------------- + # - + # Private methods - + # - + #----------------------------------------------------- if __name__ == "__main__": print('This is the IBM Power9 class') diff --git a/harness/machine_types/linux_utilities.py b/harness/machine_types/linux_utilities.py new file mode 100644 index 0000000..8a751be --- /dev/null +++ b/harness/machine_types/linux_utilities.py @@ -0,0 +1,466 @@ +#!/usr/bin/env python3 + +"""Utility functions for linux operating systems. + + +""" + +# Python imports +import re +import inspect +import os +import subprocess +import shlex +import time + +class LinuxEnvRegxp: + """ + When one does an env | less on Linux, we get results similar to the following: + + ForwardX11=yes + LMOD_PKG=/sw/summit/lmod/8.2.10 + PAMI_ENABLE_STRIPING=0 + OLCF_XL_ROOT=/sw/summit/xl/16.1.1-5 + LSF_SERVERDIR=/opt/ibm/spectrumcomputing/lsf/10.1.0.9/linux3.10-glibc2.17-ppc64le-csm/etc + BASH_FUNC_module()=() { eval $($LMOD_CMD bash "$@") && eval $(${LMOD_SETTARG_CMD:-:} -s sh) + } + BASH_FUNC_ml()=() { eval $($LMOD_DIR/ml_cmd "$@") + } + + Sometimes the environmental variable values are multiline entries, or the varible name + contains non-alphanumeric charatcers. To ensure that one parses the environmental + variable for the correct name and value, we need a complicated compiled python regular expression + to match the start of a new environmental variable. + """ + + _reg_expression='(?P^[\w_]+|^BASH_FUNC_.*)=(?P[^ \t].*$)' + """string: The regular expression + + If the regular expression is matched, we have the start of a new environmental + variable. When matched, the regular expression has 2 groups. The first group + is accessed by "key", and it returns the environmental variable name. The + second group is accessed by "value", and it returns the character string after + the first equal sign. + """ + + env_variable_regxp = re.compile(_reg_expression,flags=re.ASCII) + """re.compile : The compiled regular expression for matching the start of an environmental variable.""" + +def make_batch_script_for_linux(a_machine): + """ Creates a batch script for Linux machines. + + Notes + ----- + Raises an OSError exception if unable to read batch template file. + Raises an OSError exception if unable to write batch file. + + Parameters + ---------- + a_machine : A machine object with a Linux operating system. + + Returns + ------- + logical : bstatus + If bstatus is True, then batch script was successfully created. + If bstatus is False, then batch script wasn't successfully created. + + """ + # Get the name of the current function. + frame = inspect.currentframe() + function_name = inspect.getframeinfo(frame).function + + # Log that our execution location. + messloc = "In function {functionname}:".format(functionname=function_name ) + message = "Making batch script for {} using file {}.".format(a_machine.machine_name,a_machine.get_scheduler_template_file_name()) + a_machine.logger.doInfoLogging(message) + + bstatus = True + + batch_template_file = a_machine.get_scheduler_template_file_name() + + batch_file_path = os.path.join(a_machine.apptest.get_path_to_runarchive(), + a_machine.test_config.get_batch_file()) + + message = f"{messloc} The batch scheduler template file is {batch_template_file}." + a_machine.logger.doInfoLogging(message) + + # Get batch job template lines + try : + with open(batch_template_file, "r") as templatefileobj: + templatelines = templatefileobj.readlines() + except OSError as err: + bstatus = False + message = ( f"{messloc} Error opening bath template file '{batch_template_file}' for reading." + f"Handling error: {err}\n" ) + a_machine.logger.doCriticalLogging(message) + + if bstatus: + message = f"{messloc} Completed reading lines of the batch template file {batch_template_file}." + a_machine.logger.doInfoLogging(message) + + # Create test batch job script in run archive directory + try : + with open(batch_file_path, "w") as batch_job: + # Replace all the wildcards in the batch job template with the values in + # the test config + test_replacements = a_machine.test_config.get_test_replacements() + for record in templatelines: + for (replace_key,val) in test_replacements.items(): + re_tmp = re.compile(replace_key) + record = re_tmp.sub(val, record) + batch_job.write(record) + except OSError as err: + bstatus = False + message = ( f"{messloc} Error opening bath template file '{batch_file_path}' for writing.\n" + f"Handling error: {err}\n" ) + a_machine.logger.doCriticalLogging(message) + + message = f"{messloc} Completed regex substitutions." + a_machine.logger.doInfoLogging(message) + + return bstatus + +def check_executable(a_machine,new_env): + """ + Parameters + ---------- + a_machine : A machine object with a Linux operating system. + + new_env : A dictionary + A dictionary of environmental variables to be passed to Popen. + + Returns + ------- + int + The value of the check command exit status. + """ + + # Get the current location of execution. + my_current_frame = inspect.currentframe() + my_current_frame_info = inspect.getframeinfo(my_current_frame) + my_functioname = my_current_frame_info.function + messloc = "In function {functionname}:".format(functionname=my_functioname ) + + checkcmd = a_machine.check_command + path_to_checkscript = a_machine.apptest.get_path_to_scripts() + check_command_line = _form_proper_command_line(path_to_checkscript,checkcmd) + + message = f"{messloc} The check command line is {check_command_line}." + a_machine.logger.doInfoLogging(message) + + check_outfile = "output_check.txt" + check_stdout = open(check_outfile, "w") + + if new_env != None: + p = subprocess.Popen(check_command_line,shell=True,env=new_env,stdout=check_stdout,stderr=subprocess.STDOUT) + else: + p = subprocess.Popen(check_command_line,shell=True,stdout=check_stdout,stderr=subprocess.STDOUT) + + p.wait() + check_stdout.close() + + check_exit_status = p.returncode + + message = f"{messloc} The check command return code {check_exit_status}." + a_machine.logger.doInfoLogging(message) + + return check_exit_status + +def is_all_tests_passed(stest): + """ Verify that all tests have pased. + + Parameters + ---------- + stest : A subtest object + The subtest instance is used to check if the testing cycle is complete. + + Returns + ------- + bool + If the bool is True, then all tests have passed. Otherwise, if + any test has failed False is returned. + """ + + from libraries.status_file_factory import StatusFileFactory + path_to_status_file = stest.get_path_to_status_file() + sfile = StatusFileFactory.create(path_to_status_file=path_to_status_file) + ret_val = sfile.didAllTestsPass() + return ret_val + +def isTestCycleComplete(stest): + """ Verify that the testing cycle for the Harness jobs are completed. + + Parameters + ---------- + stest : A subtest object + The subtest instance is used to check if the testing cycle is complete. + + Returns + ------- + bool + If the bool is True, then the subtest cycle is complete. Otherwise + the cycle in progress and False is returned. + """ + # From the test status file, verify all jobs + # are completed and no new jobs are waiting to run. + # Get the path to the status file + from libraries.status_file_factory import StatusFileFactory + path_to_status_file = stest.get_path_to_status_file() + sfile = StatusFileFactory.create(path_to_status_file=path_to_status_file) + + # Set the time between checks for verifying all jobs are complete + # and no new jobs have started. + time_between_checks = 10 + time_to_wait_for_new_job = 5 + jobs_still_pending = True + + # Set the number of checks to perform. + max_checks = 3 + for check_nm in range(max_checks): + last_harness_id = sfile.getLastHarnessID() + time.sleep(time_between_checks) + if sfile.isTestFinished(last_harness_id): + # Wait a short time in case a new job is submitted. + time.sleep(time_to_wait_for_new_job) + new_harness_id = sfile.getLastHarnessID() + if new_harness_id == last_harness_id: + # No new job was submitted so no jobs pending. + jobs_still_pending = False + else : + # A new job was submitted so we set the harness_id to + # its new value. + jobs_still_pending = True + harness_id = new_harness_id + + if jobs_still_pending == True: + subtest_cyle_complete = False + else: + subtest_cyle_complete = True + + return subtest_cyle_complete + +def get_new_environment(a_machine,filename): + """ Returns a dictionary of the environmental variables. + + The method returns a dictionary of the environment of a process that + runs the command to set the build runtime environment. The command + along with the env command is writen to random file. The random file is + executed and the output is captured parsed into a dictionary. + + Parameters + ---------- + filename : str + The name of the file that contains the command to set the environment. + + Returns + ------- + dict + A dictionary obj["env_key"] = env_value where env_key is the environmental + variable and env_value is its value. + """ + path_to_build_directory = a_machine.apptest.get_path_to_workspace_build() + tmp_source_file = os.path.join(path_to_build_directory,"tmp_source_file") + std_out_file = os.path.join(path_to_build_directory,"std.env.out.txt") + std_err_file = os.path.join(path_to_build_directory,"std.env.err.txt") + + #----------------------------------------------------- + # Write the current environmental variables to file. - + # - + #----------------------------------------------------- + with open(tmp_source_file, 'w') as tmp_src_file: + tmp_src_file.write('#!/usr/bin/env bash\n') + tmp_src_file.write('source %s\n'%filename) + tmp_src_file.write('env\n') + + # Execute the random file with Popen and capture the std output. + os.chmod(tmp_source_file,0o755) + with open(std_out_file, 'w') as out: + with open(std_err_file, 'w') as err: + with subprocess.Popen([tmp_source_file], + shell=False, + cwd=path_to_build_directory, + stdout=out, stderr=err) as process1: + process1.wait() + + if process1.returncode != 0: + message = "The return code of the Popen process to set the environment != 0." + raise BaseMachine.SetBuildRTEError(message) + + #----------------------------------------------------- + # Read the file and store the in list records. - + # - + #----------------------------------------------------- + with open(std_out_file, 'r') as infile: + records = infile.readlines() + + #----------------------------------------------------- + # Now loop over the records and process - + # the environment variables. - + # - + #----------------------------------------------------- + env_dict = {} + current_line_nm = 0 + nm_records = len(records) + while current_line_nm < nm_records: + # Check that on the current line we have a new environmental + # variable entry for this line. If a new environmental variable + # is not found then proceed to the next line. + record_decoded = records[current_line_nm] + + search = LinuxEnvRegxp.env_variable_regxp.search(record_decoded) + if search: + key=search.group('key') + a_machine.logger.doInfoLogging(f"Found new env variable {key} at line: {current_line_nm}") + else: + message = "Error in finding the next environment variable.\n" + message += f"The following line, #{current_line_nm}, had no matches for searches:\n" + message += record_decoded + "\n" + a_machine.logger.doCriticalLogging(message) + raise BaseMachine.SetBuildRTEError(message) + + # We now get the range of entries for this environmental variable. + start_line = current_line_nm + + # Set the pending current line number to the current + # line number. + pending_current_line_nm = current_line_nm + + if ( start_line == (nm_records-1) ): + + # We are at the last line and the finish + # line is the last line. + finish_line = nm_records - 1 + + pending_current_line_nm += 1 + else: + + search_range_begin = current_line_nm + 1 # Set the start range for + # searching the next environmental entry + + max_search_range_end = nm_records - 1 # Set the maximum rnage of lines to search. + # Offset by 1 because records + # list index starts at 0. + + a_machine.logger.doInfoLogging(f"Search range is {search_range_begin} to {max_search_range_end}.") + + for tmp_line_nm in range(search_range_begin,max_search_range_end+1,1): + pending_current_line_nm += 1 + record_decoded = records[tmp_line_nm] + search = LinuxEnvRegxp.env_variable_regxp.search(record_decoded) + if search: + # We have found the next environmental variable entry + # so break from for loop. + break + + + # The finish_line is 1 less than the pending_current_line_nm + # due to the prior for loop breaking at the start of the + # next environmental variable. + finish_line = pending_current_line_nm - 1 + + # We now parse the range of entries for the environmental key and + # value. + _parse_env_variable(records[start_line:(finish_line+1)],env_dict) + + # The current line now is now equal to pending_current_line_nm. + current_line_nm = pending_current_line_nm + + return env_dict + +def build_executable(a_machine, new_env): + """ Return the status of the build. Runs the build command. + + Parameters + ---------- + a_machine : A machine object with a Linux operating system. + + new_env : A dictionary + A dictionary of environmental variables to be passed to Popen. + + Returns + ------- + int + The value of the build command exit status. + """ + # Get the name of the current function. + frame = inspect.currentframe() + function_name = inspect.getframeinfo(frame).function + messloc = "In function {functionname}:".format(functionname=function_name ) + + # We get the command for bulding the binary. + buildcmd = a_machine.test_config.get_build_command() + build_std_out = "output_build.stdout.txt" + build_std_err = "output_build.stderr.txt" + message = f"{messloc} The build command: {buildcmd}" + a_machine.logger.doInfoLogging(message) + with open(build_std_out,"w") as build_std_out : + with open(build_std_err,"w") as build_std_err : + if new_env is not None: + message = f"{messloc} Setting new environment for build." + p = subprocess.Popen(buildcmd,shell=True,env=new_env,stdout=build_std_out,stderr=build_std_err) + a_machine.logger.doInfoLogging(message) + else: + message = f"{messloc} Using old environment for build." + p = subprocess.Popen(buildcmd,shell=True,stdout=build_std_out,stderr=build_std_err) + a_machine.logger.doInfoLogging(message) + p.wait() + build_exit_status = p.returncode + + return build_exit_status + +def submit_batch_script(a_machine, new_env): + # Get the name of the current function. + frame = inspect.currentframe() + function_name = inspect.getframeinfo(frame).function + messloc = "In function {functionname}:".format(functionname=function_name) + + env_vars = a_machine.test_config.test_environment + message = "" + for e in env_vars: + v = env_vars[e] + print("Setting env var", e, "=", v) + os.putenv(e.upper(), v) + message += f"Set environmental variable {e}={v}\n" + if new_env is not None: + for e in new_env: + v = new_env[e] + os.putenv(e.upper(), v) + message += f"Set environmental variable {e}={v}\n" + a_machine.logger.doInfoLogging(message) + + batch_script = a_machine.test_config.get_batch_file() + submit_exit_value = a_machine.submit_to_scheduler(batch_script) + + message = f"{messloc} Submitted batch script {batch_script} with exit status of {submit_exit_value}." + return submit_exit_value + +#----------------------------------------------------- +# - +# Private methods - +# - +#----------------------------------------------------- + +def _form_proper_command_line(path_to_scripts,command_line): + args = shlex.split(command_line) + proper_command = path_to_scripts + for ip in range(len(args)): + if ip == 0 : + proper_command = proper_command + "/" + args[0] + else: + proper_command = proper_command + " " + args[ip] + return proper_command + +def _parse_env_variable(records,new_env): + new_record = "" + for (ip,tmp_line) in enumerate(records): + if ip == 0 : + search = LinuxEnvRegxp.env_variable_regxp.search(tmp_line) + key=search.group('key') + tmp_value = search.group('value') + else: + tmp_value = tmp_line + new_record += tmp_value + + new_env[key] = new_record + + return new_env diff --git a/harness/machine_types/linux_x86_64.py b/harness/machine_types/linux_x86_64.py new file mode 100644 index 0000000..fa80c62 --- /dev/null +++ b/harness/machine_types/linux_x86_64.py @@ -0,0 +1,79 @@ +#!/usr/bin/env python3 + +# %Authors% + +# Python imports. +import sys +import os +import shlex +import subprocess +import time +import re + +# Local imports. +from machine_types.base_machine import BaseMachine +from machine_types.rgt_test import RgtTest + +class Linux_x86_64(BaseMachine): + + def __init__(self, + name='Linux x86_64', + scheduler=None, + jobLauncher=None, + numNodes=1, + numSocketsPerNode=1, + numCoresPerSocket=1, + rgt_test_input_file=None, + apptest=None): + + BaseMachine.__init__(self, + name=name, + scheduler_type=scheduler, + jobLauncher_type=jobLauncher, + numNodes=numNodes, + numSockets=numSocketsPerNode, + numCoresPerSocket=numCoresPerSocket, + apptest=apptest) + + # process test input file. The subtest knows the path to the + # the test input file. + if rgt_test_input_file == None: + path_to_test_input_file = apptest.path_of_test_input_file + else: + path_to_test_input_file = rgt_test_input_file + self._rgt_test = RgtTest(path_to_test_input_file,logger=self.logger) + self._rgt_test.read_input_file() + + # Add test parameters needed by the harness + harness_parameters = {} + harness_parameters['results_dir'] = self.apptest.get_path_to_runarchive() + harness_parameters['working_dir'] = self.apptest.get_path_to_workspace_run() + harness_parameters['build_dir'] = self.apptest.get_path_to_workspace_build() + harness_parameters['scripts_dir'] = self.apptest.get_path_to_scripts() + harness_parameters['harness_id'] = self.apptest.get_harness_id() + self._rgt_test.harness_parameters.update(harness_parameters) + + @property + def build_runtime_environment_command_file(self): + return self.test_config.build_runtime_environment_command_file + + @property + def submit_runtime_environment_command_file(self): + return self.test_config.submit_runtime_environment_command_file + + @property + def check_runtime_environment_command_file(self): + return self.test_config.check_runtime_environment_command_file + + @property + def test_config(self): + return self._rgt_test + + #----------------------------------------------------- + # - + # Private methods - + # - + #----------------------------------------------------- + +if __name__ == "__main__": + print('This is the Linux x86_64 class') diff --git a/harness/machine_types/lsf.py b/harness/machine_types/lsf.py index ac5d117..eb85427 100644 --- a/harness/machine_types/lsf.py +++ b/harness/machine_types/lsf.py @@ -3,13 +3,13 @@ # Author: Veronica G. Vergara L. # # - -from libraries.layout_of_apps_directory import apptest_layout -from .base_scheduler import BaseScheduler +import os import shlex import subprocess import re -import os + +from .base_scheduler import BaseScheduler + class LSF(BaseScheduler): @@ -24,19 +24,25 @@ def __init__(self): self.__numTasksOpt = '-n' self.__jobNameOpt = '-N' self.__templateFile = 'lsf.template.x' - BaseScheduler.__init__(self,self.__name,self.__submitCmd,self.__statusCmd, - self.__deleteCmd,self.__walltimeOpt,self.__numTasksOpt, - self.__jobNameOpt,self.__templateFile) + BaseScheduler.__init__(self, self.__name, + self.__submitCmd, self.__statusCmd, self.__deleteCmd, + self.__walltimeOpt, self.__numTasksOpt, self.__jobNameOpt, + self.__templateFile) - def submit_job(self,batchfilename): + def submit_job(self, batchfilename): print("Submitting job from LSF class using batchfilename " + batchfilename) qargs = "" - if "RGT_SUBMIT_QUEUE" in os.environ: - qargs = " -q " + os.environ.get('RGT_SUBMIT_QUEUE') + if 'RGT_SUBMIT_QUEUE' in os.environ: + qargs += " -q " + os.environ.get('RGT_SUBMIT_QUEUE') + + if 'RGT_SUBMIT_ARGS' in os.environ: + qargs += " " + os.getenv('RGT_SUBMIT_ARGS') - if "RGT_SUBMIT_ARGS" in os.environ: - qargs = qargs + os.getenv('RGT_SUBMIT_ARGS') + if 'RGT_PROJECT_ID' in os.environ: + qargs += " -P " + os.environ.get('RGT_PROJECT_ID') + elif 'RGT_ACCT_ID' in os.environ: + qargs += " -P " + os.environ.get('RGT_ACCT_ID') qcommand = self.__submitCmd + " " + qargs + " " + batchfilename print(qcommand) @@ -77,6 +83,14 @@ def submit_job(self,batchfilename): return p.returncode + def set_job_id_from_environ(self): + print("Setting job id from environment in LSF class") + jobvar = 'LSB_JOBID' + if jobvar in os.environ: + self.set_job_id(os.environ[jobvar]) + else: + print(f'{jobvar} not set in environment!') + return if __name__ == '__main__': print('This is the LSF scheduler class') diff --git a/harness/machine_types/machine_factory.py b/harness/machine_types/machine_factory.py index f40a78d..8f91251 100644 --- a/harness/machine_types/machine_factory.py +++ b/harness/machine_types/machine_factory.py @@ -3,12 +3,10 @@ import sys # Local package imports -from .cray_xk7 import CrayXK7 -from .ibm_power8 import IBMpower8 from .ibm_power9 import IBMpower9 -from .rhel_x86 import RHELx86 +from .linux_x86_64 import Linux_x86_64 from .machine_factory_exceptions import MachineTypeNotImplementedError -from .machine_factory_exceptions import MachineTypeUndefinedEnvironmentalVariableError +from .machine_factory_exceptions import MachineTypeUndefinedVariableError class MachineFactory: @@ -16,70 +14,82 @@ def __init__(self): return @staticmethod - def create_machine(app_subtest): - rgt_machine_name = os.environ.get("RGT_MACHINE_NAME") - rgt_scheduler_type = os.environ.get("RGT_SCHEDULER_TYPE") - rgt_jobLauncher_type = os.environ.get("RGT_JOBLAUNCHER_TYPE") + def create_machine(harness_config, + app_subtest): - # Verify that the environmental variables 'RGT_MACHINE_NAME', - # 'RGT_SCHEDULER_TYPE', and 'RGT_JOBLAUNCHER_TYPE' are defined. + machine_config = harness_config.get_machine_config() + + # Verify that the machine configuration variables 'machine_name', + # 'scheduler_type', and 'joblauncher_type' are defined. # Otherwise throw an exception and stop. + rgt_machine_name = None + rgt_machine_type = None + rgt_scheduler = None + rgt_launcher = None try: + rgt_machine_name = machine_config.get('machine_name') if rgt_machine_name == None: - print('No machine name provided. Please set the RGT_MACHINE_NAME variable'.format(rgt_machine_name)) - raise MachineTypeUndefinedEnvironmentalVariableError("RGT_MACHINE_NAME") + print('No machine name provided by harness configuration!') + raise MachineTypeUndefinedVariableError("MachineDetails.machine_name") + + rgt_machine_type = machine_config.get('machine_type') + if rgt_machine_type == None: + print('No machine type provided by harness configuration!') + raise MachineTypeUndefinedVariableError("MachineDetails.machine_type") - if rgt_scheduler_type == None: - print('No scheduler type provided. Please set the RGT_SCHEDULER_TYPE variable'.format(rgt_scheduler_type)) - raise MachineTypeUndefinedEnvironmentalVariableError("RGT_SCHEDULER_TYPE") + rgt_scheduler = machine_config.get('scheduler_type') + if rgt_scheduler == None: + print('No scheduler type provided by harness configuration!') + raise MachineTypeUndefinedVariableError("MachineDetails.scheduler_type") - if rgt_jobLauncher_type == None: - print('No scheduler type provided. Please set the RGT_JOBLAUNCHER_TYPE variable'.format(rgt_jobLauncher_type)) - raise MachineTypeUndefinedEnvironmentalVariableError("RGT_JOBLAUNCHER_TYPE") + rgt_launcher = machine_config.get('joblauncher_type') + if rgt_launcher == None: + print('No scheduler type provided by harness configuration!') + raise MachineTypeUndefinedVariableError("MachineDetails.joblauncher_type") - except MachineTypeUndefinedEnvironmentalVariableError as my_exception: + except MachineTypeUndefinedVariableError as my_exception: my_exception.what() sys.exit() + rgt_num_nodes = machine_config.get('node_count') + if rgt_num_nodes == None: + rgt_num_nodes = 1 + + rgt_cores_per_node = machine_config.get('cpus_per_node') + if rgt_cores_per_node == None: + rgt_cores_per_node = 1 + + rgt_sockets_per_node = machine_config.get('sockets_per_node') + if rgt_sockets_per_node == None: + rgt_sockets_per_node = 1 - message = "Creating machine of type {machine_type} with scheduler of type {scheduler_type} and job launcher of type {job_launcher_type}\n".format( - machine_type=rgt_machine_name, - scheduler_type=rgt_scheduler_type, - job_launcher_type = rgt_jobLauncher_type) + rgt_cores_per_socket = int(rgt_cores_per_node) / int(rgt_sockets_per_node) + + message = f'Creating machine {rgt_machine_name}: Type = {rgt_machine_type} ; Scheduler = {rgt_scheduler} ; Job launcher = {rgt_launcher}' print(message) # We now create a new machine. If the new machine type is not implemented, # then warn user, throw an exception and stop. tmp_machine = None try: - if rgt_machine_name == "summitdev": - tmp_machine = IBMpower8(name=rgt_machine_name, - scheduler=rgt_scheduler_type, - jobLauncher=rgt_jobLauncher_type, - apptest=app_subtest) - elif rgt_machine_name == "peak": - tmp_machine = IBMpower9(name=rgt_machine_name, - scheduler=rgt_scheduler_type, - jobLauncher=rgt_jobLauncher_type, - apptest=app_subtest) - elif rgt_machine_name == "summit": + if rgt_machine_type == "ibm_power9": tmp_machine = IBMpower9(name=rgt_machine_name, - scheduler=rgt_scheduler_type, - jobLauncher=rgt_jobLauncher_type, + scheduler=rgt_scheduler, + jobLauncher=rgt_launcher, + numNodes=int(rgt_num_nodes), + numSocketsPerNode=int(rgt_sockets_per_node), + numCoresPerSocket=int(rgt_cores_per_socket), apptest=app_subtest) - elif rgt_machine_name == "rhea": - tmp_machine = RHELx86(name=rgt_machine_name, - scheduler=rgt_scheduler_type, - jobLauncher=rgt_jobLauncher_type, - apptest=app_subtest) - elif rgt_machine_name == "lyra": - tmp_machine = RHELx86(name=rgt_machine_name, - scheduler=rgt_scheduler_type, - jobLauncher=rgt_jobLauncher_type, - apptest=app_subtest) + elif rgt_machine_type == "linux_x86_64": + tmp_machine = Linux_x86_64(name=rgt_machine_name, + scheduler=rgt_scheduler, + jobLauncher=rgt_launcher, + numNodes=int(rgt_num_nodes), + numSocketsPerNode=int(rgt_sockets_per_node), + numCoresPerSocket=int(rgt_cores_per_socket), + apptest=app_subtest) else: - print("Machine name does not exist. Good bye!") - raise MachineTypeNotImplementedError(rgt_machine_name) + raise MachineTypeNotImplementedError(rgt_machine_type) except MachineTypeNotImplementedError as my_exception: my_exception.what() sys.exit() diff --git a/harness/machine_types/machine_factory_exceptions.py b/harness/machine_types/machine_factory_exceptions.py index 8e1cda6..1d5adf1 100644 --- a/harness/machine_types/machine_factory_exceptions.py +++ b/harness/machine_types/machine_factory_exceptions.py @@ -13,13 +13,13 @@ def what(self): message = "The machine type '{}' is not implemented.".format(self.machine_type) print(message) -class MachineTypeUndefinedEnvironmentalVariableError(MachineTypeError): - """An environmental variable is undefined""" +class MachineTypeUndefinedVariableError(MachineTypeError): + """A machine configuration variable is undefined""" def __init__(self, - env_variable): - self.env_variable = env_variable + cfg_variable): + self.cfg_variable = cfg_variable return def what(self): - message = "The environmental varaible '{}' is undefined.".format(self.env_variable) + message = "The machine configuration variable '{}' is undefined.".format(self.cfg_variable) print(message) diff --git a/harness/machine_types/pbs.py b/harness/machine_types/pbs.py index bbac55a..b86f659 100644 --- a/harness/machine_types/pbs.py +++ b/harness/machine_types/pbs.py @@ -4,21 +4,85 @@ # # +import os +import shlex +import subprocess +import re + from .base_scheduler import BaseScheduler class PBS(BaseScheduler): def __init__(self): - self.__type = 'PBS' + self.__name = 'PBS' self.__submitCmd = 'qsub' self.__statusCmd = 'qstat' self.__deleteCmd = 'qdel' self.__walltimeOpt = '-l walltime=' self.__numTasksOpt = '-l nodes=' self.__jobNameOpt = '-N' - BaseScheduler.__init__(self,self.__name,self.__submitCmd,self.__statusCmd, - self.__deleteCmd,self.__walltimeOpt,self.__numTasksOpt, - self.__jobNameOpt) + self.__templateFile = 'pbs.template.x' + BaseScheduler.__init__(self, self.__name, + self.__submitCmd, self.__statusCmd, self.__deleteCmd, + self.__walltimeOpt, self.__numTasksOpt, self.__jobNameOpt, + self.__templateFile) + + def submit_job(self, batchfilename): + print("Submitting job from PBS class using batchfilename " + batchfilename) + + qargs = "" + if 'RGT_SUBMIT_QUEUE' in os.environ: + qargs += " -q " + os.environ.get('RGT_SUBMIT_QUEUE') + + if 'RGT_SUBMIT_ARGS' in os.environ: + qargs += " " + os.getenv('RGT_SUBMIT_ARGS') + + if 'RGT_PROJECT_ID' in os.environ: + qargs += " -A " + os.environ.get('RGT_PROJECT_ID') + elif 'RGT_ACCT_ID' in os.environ: + qargs += " -A " + os.environ.get('RGT_ACCT_ID') + + qcommand = self.__submitCmd + " " + qargs + " " + batchfilename + print(qcommand) + + args = shlex.split(qcommand) + temp_stdout = "submit.out" + temp_stderr = "submit.err" + + submit_stdout = open(temp_stdout,"w") + submit_stderr = open(temp_stderr,"w") + + p = subprocess.Popen(args,stdout=submit_stdout,stderr=submit_stderr) + p.wait() + + submit_stdout.close() + submit_stderr.close() + + submit_stdout = open(temp_stdout,"r") + records = submit_stdout.readlines() + submit_stdout.close() + + #print("records = ") + #print(records) + + #print("Extracting LSF jobID from PBS class") + jobid_pattern = re.compile('\d+') + #print("jobid_pattern = ") + #print(jobid_pattern) + #print("jobid_pattern.findall = ") + jobid = jobid_pattern.findall(records[0])[0] + self.set_job_id(jobid) + print("PBS jobID = ",self.get_job_id()) + + return p.returncode + + def set_job_id_from_environ(self): + print("Setting job id from environment in PBS class") + jobvar = 'PBS_JOBID' + if jobvar in os.environ: + self.set_job_id(os.environ[jobvar]) + else: + print(f'{jobvar} not set in environment!') if __name__ == '__main__': print('This is the PBS scheduler class') diff --git a/harness/machine_types/rgt_test.py b/harness/machine_types/rgt_test.py index 6d34884..bcc7646 100644 --- a/harness/machine_types/rgt_test.py +++ b/harness/machine_types/rgt_test.py @@ -1,199 +1,401 @@ +#!/usr/bin/env python3 +"""This module abstracts the application-test input file rgt_test_input.ini. + +This modules main responsibilty is to process the file application-test input file. +The input file stores various key-value entries that are parameters for running +the application-test. The extant OLCF Harness currently supports processing input files +of one format: The INI file format. + +INI format of application-test input file rgt_test_input.ini +------------------------------------------------------------ +The following sections are allowed: + + [Replacements] + [EnvVars] + [RuntimeEnvironmentCommands] + +The [Replacements] section is the only madatory section. TThe key-value entries +in the section is stored in 2 dictionaries which are attributes of the class +RgtTest: + + * self.__builtin_params + * self.__user_params + +The method class method self.__is_builtin_param tests if a key-value pair +belongs in dictionary self.__builtin_params. If a the key-valule fails the test +then the key-value pair is in the dictionary self.__user_params. The key-value pairs +are used for replacements of patterns in template files. + +The [EnvVars] section is optional +The section contains keys-value pairs for setting environmental varibles. + +The [RuntimeEnvironmentCommands] section optional. This section contains +key-value entries for where the values ar commands to be run that set the +runtime environment for each harness task. +The only permmited keys are + + * "build_rte_cmd" - key for the command to set the rte for the build task. + * "submit_rte_cmd" - key for the command to set the rte for the submit task. + * "check_rte_cmd" - key for the command to set the rte for the check task. + * "report_rte_cmd" - key for the command to set the rte for the report task. + * "all_rte_cmd" - key for the command to set the rte for the all tasks. """ -.. module:: rgt_test - :platform: Linux - :synopsis: Abstracts the regression test input file. -""" # # Author: Veronica G. Vergara L. # +# Python imports import configparser import os +import sys -class RgtTest(): - """ This class is the abstraction of regression test input file. +# Harness imports +from libraries.rgt_utilities import rgt_variable_name_modification +from libraries import rgt_utilities - """ - def __init__(self, filename): +class RgtTest(): + """This class is the abstraction of regression test input file.""" + + RUNTIME_ENVIRONMENT_SECTION_KEYS = {"build" : 'build_rte_cmd', + "submit" : 'submit_rte_cmd', + "check" : 'check_rte_cmd', + "report" : 'report_rte_cmd', + "all" : 'all_rte_cmd'} + """Valid key values for the runtime environment section in the rgt_test_input.ini file.""" + + HARNESS_SECTION_KEYS = {"application_test_results_dir" : 'results_dir', + "application_test_work_dir" : 'working_dir', + "application_test_build_dir" : 'build_dir', + "application_test_scripts_dir" : 'scripts_dir', + "application_test_harness_id" : 'harness_id', + "rgt_environmental_file" : "rgtenvironmentalfile", + "nccs_test_harness_module_file" : "nccstestharnessmodule" } + """Valid key values for the Harness parameter dictionary.""" + + + OBTAIN_FROM_ENVIRONMENT="" + """str: The string value for an INI entry that indicates to get the value from the shell environment.""" + + def __init__(self, filename,logger=None): + """ The constructor of the RgtTest class. + + Parameters + ---------- + filename : str + The name to the rgt_test.py input file. The input file contains + test settings, environmental varibles settings and other information + to run a test/application. + + a_logger: A rgt_logger class + An instance of the rgt_logger class. + """ + self.__inputfile = filename + """str: The name to the rgt_test.py application-test input file. """ + + self.__logger = logger + """rgt_logger: An instance of the rgt_logger class.""" + self.__builtin_params = {} + """dict: The buitin parameters of the application-test input file.""" + self.__user_params = {} + """dict: A dictionary of the keys and values that are not builtin params. + + These keys and values are found in the "Replacements" section of the + application-test input file. These key-values are not builtin key-values. + """ + self.__environ = {} + """dict: A dictionary of the environmental variables in the application-test input file. + + The dictonary store the keys and values of the EnvVars section of the application-test + input file. + """ + + self._runtime_environment_params = {} + """ A dictionary: A dictionary of commands to set the runtime environment. + + The keys of the dictionary are strings, and the corrsponding values + specify a command. See the class variable RUNTIME_ENVIRONMENT_KEYS + for valid keys. + + For example, self._runtime_environment_params['build_rte_cmd'] is + the command to set the runtime environment for building the binary. + """ + + self._harness_params = {} + """A dictionary: A dictionary of keys and values needed by the harness + + The keys of the dictionary are strings, and the corrsponding values + are strings. See the class variable HARNESS_KEYS for valid keys. + """ + # dict of builtin keys - value indicates whether it is required self.__builtin_keys = { "batch_filename" : True, - "batch_queue" : True, + "batch_queue" : False, "build_cmd" : True, "check_cmd": True, "executable_path" : False, "job_name" : True, "nodes" : True, "processes_per_node" : False, - "project_id" : True, + "project_id" : False, "report_cmd" : True, "resubmit" : False, "total_processes" : False, "walltime" : True } - def get_test_input_file(self): + def __str__(self): + message = "\n" + message += "RgtTest class" + "\n" + message += "-------------" + "\n" + message += "Input file name: {}".format(self.test_input_filename) + "\n\n" + message += "Environmental Variables" + "\n" + message += "-----------------------" + "\n" + for (key,value) in self.test_environment.items(): + message += "{} = {}\n".format(key,value) + message += "\n\n" + message += "builtin params" + "\n" + message += "-----------------------" + "\n" + for (key,value) in self.builtin_parameters.items(): + message += "{} = {}\n".format(key,value) + message += "\n\n" + message += "user params" + "\n" + message += "-----------------------" + "\n" + for (key,value) in self.user_parameters.items(): + message += "{} = {}\n".format(key,value) + message += "harness params" + "\n" + message += "-----------------------" + "\n" + for (key,value) in self.harness_parameters.items(): + message += "{} = {}\n".format(key,value) + message += "\n\n" + + + return message + + @property + def test_input_filename(self): + """str: Returns the application-test input filename.""" return self.__inputfile # - # Methods to manage builtin parameters + # Methods to manage user parameters and environment # - - def is_builtin_param(self, key): - return key in self.__builtin_keys - - def add_builtin_param(self, key, required, warn=True): - if key not in self.__builtin_keys: - self.__builtin_keys[key] = required - return True - else: - if warn: - print("WARNING: provided key {} is already built-in") - return False - - def set_builtin_param(self, key, val, warn=True): - if self.is_builtin_param(key): - self.__builtin_params[key] = val - return True - else: - if warn: - print("WARNING: Ignoring invalid built-in parameter key {}".format(key)) - return False - - def get_builtin_param(self, key): - if key in self.__builtin_params: - return self.__builtin_params[key] - else: - return None - - def set_builtin_parameters(self, params_view): - for (k,v) in params_view: - self.set_builtin_param(k, v) - - def print_builtin_parameters(self): - print("RGT Test Parameters - Builtin") - print("=============================") - for (k,v) in self.__builtin_params.items(): - print(k,"=",v) - - def check_required_parameters(self): - missing = 0 - for (k,required) in self.__builtin_keys.items(): - if required and k not in self.__builtin_params: - missing = 1 - print("ERROR: required test input parameter {} is not set!".format(k)) - if missing: - exit(1) - + @property + def user_parameters(self): + """dict: The dictionary user parameters of the application-test input file.""" + return self.__user_params # - # Methods to manage user parameters and environment + # Methods to manage builtin parameters # - def set_user_param(self, key, val): - self.__user_params[key] = val - - def get_user_param(self, key): - if key in self.__user_params: - return self.__user_params[key] - else: - return None - - def set_user_parameters(self, params_view): - # NOTE: usign dict.update() to support append - self.__user_params.update(params_view) + @property + def builtin_parameters(self): + """dict: The dictionary of builtin parameters of the application-test input file.""" + return self.__builtin_params def print_user_parameters(self): print("RGT Test Parameters - User") print("==========================") - for (k,v) in self.__user_params.items(): + for (k,v) in (self.user_parameters).items(): print(k,"=",v) + # Methods to manage runtime environment commands + @property + def runtime_environment_params(self): + """dict: The dictionary of key-values for setting the runtime environment commands.""" + return self._runtime_environment_params + + @runtime_environment_params.setter + def runtime_environment_params(self,params): + """Sets the commands for the setting various runtime environment commands. + + Parameters + ---------- + params + A dictionary where the keys and values are strings. + """ + for (key,val) in params.items(): + if key in self.RUNTIME_ENVIRONMENT_SECTION_KEYS.values(): + self._runtime_environment_params[key] = val + else: + # To do is throw an exception if an invalid key,value is assigned. + pass + + @property + def build_runtime_environment_command_file(self): + """str: The command file to set the runtime environment for building the binary.""" + key = self.RUNTIME_ENVIRONMENT_SECTION_KEYS["build"] + command = self._get_rte_param(key) + return command + + @property + def submit_runtime_environment_command_file(self): + """str: The command file to set the runtime environment for submitting the batch script.""" + key = self.RUNTIME_ENVIRONMENT_SECTION_KEYS["submit"] + command = self._get_rte_param(key) + return command + + @property + def check_runtime_environment_command_file(self): + """str: The command file to set the runtime environment for checking the test results.""" + key = self.RUNTIME_ENVIRONMENT_SECTION_KEYS["check"] + command = self._get_rte_param(key) + return command + + @property + def report_runtime_environment_command_file(self): + """str: The command file to set the runtime environment for reporting the test results.""" + key = self.RUNTIME_ENVIRONMENT_SECTION_KEYS["report"] + command = self._get_rte_param(key) + return command + # # Methods to retrieve full test dictionaries # - def get_test_environment(self): + @property + def test_environment(self): + """dict: Returns a dictionary of the application-test environmental variables.""" return self.__environ - def set_test_environment(self, envvars_view): - # NOTE: usign dict.update() to support append + @test_environment.setter + def test_environment(self, envvars_view): self.__environ.update(envvars_view) - def get_test_parameters(self): - parameters = self.__builtin_params - parameters.update(self.__user_params) + @property + def test_parameters(self): + """dict: Returns a dictionary of all key-values in the application-test input file.""" + parameters = self.builtin_parameters + parameters.update(self.user_parameters) + parameters.update(self.runtime_environment_params) return parameters - def set_test_parameters(self, params_view): - for (k,v) in params_view: - if self.is_builtin_param(k): - self.set_builtin_param(k, v) + @test_parameters.setter + def test_parameters(self,params): + """Updates the appropiate test parameter dictionary. + + This method is fragile. Suppose there is builtin and + user parameter that has the same key, then only one 1 + dictionary gets updated. What we really need to know is the + section and the key-value to update the appropiate dictionaries. + """ + for (k,v) in params.items(): + if self._is_builtin_param(k): + self._set_builtin_param(k, v) + else: + self._set_user_param(k, v) + + @property + def harness_parameters(self): + """Returns a dictionary of the harness test parameters + + Returns + ------- + dict: + The dictionary will have the key values found in + HARNESS_SECTION_KEYS.values(), HARNESS_SECTION_KEYS is + a dictionary. + """ + return self._harness_params + + @harness_parameters.setter + def harness_parameters(self,params): + """Updates the harness parameter dictionary. + + Parameters + ---------- + params : dict + A dictionary of the harness parameters. The keys and values + are strings. + """ + for (key,value) in params.items(): + if key in self.HARNESS_SECTION_KEYS.values(): + self._harness_params[key] = value else: - self.set_user_param(k, v) + # TODO: Throw an exception if an invalid key,value is assigned. + print("No key found for", key) def get_test_replacements(self): + """Returns a dictionary of key word replacements. + + In the application-test input file there are entries in the replacement + of the form key1 = value1. Correspondingly, there are records in the template + files with __key1__. The returned dictionary contains {..., __key1__ : value1, ...} + and is used to make the appropiate substitutions in the templates file to form the + correct files. + + Returns + ------- + dict: + Returns a dictionary with entries of form + { ..., "__key1__" : value1, ...} where key1 is a replacement + key found in the Replacements section of application-test input file + rgt_test_input.ini. + """ replacements = {} - for (k,v) in self.__builtin_params.items(): + for (k,v) in (self.builtin_parameters).items(): replace_key = '__' + k + '__' replacements[replace_key] = v - for (k,v) in self.__user_params.items(): + + for (k,v) in (self.user_parameters).items(): replace_key = '__' + k + '__' replacements[replace_key] = v - return replacements - def print_test_parameters(self): - self.print_builtin_parameters() - self.print_user_parameters() + for (k,v) in (self.harness_parameters).items(): + replace_key = '__' + k + '__' + replacements[replace_key] = v + + return replacements # # Convenience methods for retrieving specific parameters # def get_batch_file(self): - return self.get_builtin_param("batch_filename") + return self._get_builtin_param("batch_filename") def get_batch_queue(self): - return self.get_builtin_param("batch_queue") + return self._get_builtin_param("batch_queue") def get_build_command(self): - return self.get_builtin_param("build_cmd") + return self._get_builtin_param("build_cmd") def get_check_command(self): - return self.get_builtin_param("check_cmd") + return self._get_builtin_param("check_cmd") def get_report_command(self): - return self.get_builtin_param("report_cmd") + return self._get_builtin_param("report_cmd") def get_executable(self): - return self.get_builtin_param("executable_path") + return self._get_builtin_param("executable_path") def get_jobname(self): - return self.get_builtin_param("job_name") + return self._get_builtin_param("job_name") def get_nodes(self): - return self.get_builtin_param("nodes") + return self._get_builtin_param("nodes") def get_project(self): - return self.get_builtin_param("project_id") + return self._get_builtin_param("project_id") def get_walltime(self): - return self.get_builtin_param("walltime") + return self._get_builtin_param("walltime") def get_total_processes(self): - val = self.get_builtin_param("total_processes") + val = self._get_builtin_param("total_processes") if val is None: return str(0) else: return val def get_processes_per_node(self): - val = self.get_builtin_param("processes_per_node") + val = self._get_builtin_param("processes_per_node") if val is None: return str(0) else: @@ -204,49 +406,191 @@ def get_processes_per_node(self): # def read_input_file(self): - if os.path.isfile(self.__inputfile): - basename = os.path.basename(self.__inputfile) - if basename == "rgt_test_input.txt": - self.read_rgt_input_txt() - elif basename == "rgt_test_input.ini": - self.read_rgt_input_ini() + """Processes the appliction-test input file. + + The functions exits if the application-test input filename + is not a permitted value. + """ + try: + if os.path.isfile(self.test_input_filename): + self._read_rgt_input_ini() + self._reconcile_with_shell_environment_variables() + self._check_required_parameters() + self._print_test_parameters() else: - print("ERROR: unsupported test input file name {}".format(basename)) - exit(1) - self.print_test_parameters() - self.check_required_parameters() + error_message = "Test input file {} not found".format(self.test_input_filename) + raise ErrorRgtTestInputFileNotFound(error_message) + except ErrorRgtParameterReconcile as err: + self.__logger.doCriticalLogging(err.message) + sys.exit(err.message) + except ErrorRgtTestInputFileNotFound as err: + self.__logger.doCriticalLogging(err.message) + sys.exit(err.message) + + # Private methods + + def _get_builtin_param(self, key): + if key in self.builtin_parameters: + return (self.builtin_parameters)[key] else: - print("ERROR: test input file {} not found".format(self.__inputfile)) - exit(1) + return None + + def _is_builtin_param(self, key): + return key in self.__builtin_keys + + def _get_rte_param(self,key): + command = "" + if key in self.runtime_environment_params: + command = self.runtime_environment_params[key] + return command - def read_rgt_input_txt(self): - params_dict = {} - delimiter = '=' - fileobj = open(self.__inputfile) - filelines = fileobj.readlines() - fileobj.close() - for line in filelines: - stripline = line.strip() - if not stripline or stripline[0] == '#': - continue - (k,v) = stripline.split(delimiter) - params_dict[k.strip().lower()] = v.strip() - self.set_test_parameters(params_dict.items()) - - def read_rgt_input_ini(self): + def _set_builtin_param(self, key, val, warn=True): + if self._is_builtin_param(key): + self.__builtin_params[key] = val + return True + else: + if warn: + print("WARNING: Ignoring invalid built-in parameter key {}".format(key)) + return False + + def _is_rte_param(self,key): + return key in self.RUNTIME_ENVIRONMENT_SECTION_KEYS.values() + + def _update_replacement_parameters(self,params_view): + """Updates the appropiate replacement parameter dictionary as required.""" + for (k,v) in params_view: + if self._is_builtin_param(k): + self._set_builtin_param(k, v) + else: + self._set_user_param(k, v) + + def _read_rgt_input_ini(self): rgt_test_config = configparser.ConfigParser() - rgt_test_config.read(self.__inputfile) + rgt_test_config.read(self.test_input_filename) if not 'Replacements' in rgt_test_config: print("ERROR: missing [Replacements] section in test input") replace = dict() else: replace = rgt_test_config['Replacements'] - self.set_test_parameters(replace.items()) + self._update_replacement_parameters(replace.items()) + + # Update environment if either batch_queue or project_id is set + env_dict = {} + bq = self.get_batch_queue() + if bq is not None: + env_dict = {'submit_queue' : bq} + proj = self.get_project() + if proj is not None: + env_dict = {'project_id' : proj} + rgt_utilities.set_harness_environment(env_dict, override=True) if 'EnvVars' in rgt_test_config: env_vars = rgt_test_config['EnvVars'] - self.set_test_environment(env_vars) + self.test_environment = env_vars + + # We now extract the runtime environment commands. + rte_section = 'RuntimeEnvironmentCommands' + if rte_section in rgt_test_config: + runtime_env_commands = rgt_test_config[rte_section] + else: + runtime_env_commands = dict() + self.runtime_environment_params = runtime_env_commands + + def _print_test_parameters(self): + self._print_builtin_parameters() + self.print_user_parameters() + + def _reconcile_with_shell_environment_variables(self): + # Reconcile the builtin parameters, self.__builtin_params, with the + # shell environment variables. + for (key,value) in self.__builtin_params.items(): + if value == self.OBTAIN_FROM_ENVIRONMENT: + key_modified = rgt_variable_name_modification(key) + tmp_value = os.getenv(key_modified) + if tmp_value : + self.__builtin_params[key] = tmp_value + else : + error_message = "Unable to reconcile shell environmental variables and self.__builtin_params[{key}]={value}.".format(key=key,value=value) + raise ErrorRgtParameterReconcile(error_message) + + # Reconcile the user parameters, self.__user_params, with + # the shell environment variables. + for (key,value) in self.__user_params.items(): + if value == self.OBTAIN_FROM_ENVIRONMENT: + key_modified = rgt_variable_name_modification(key) + tmp_value = os.getenv(key_modified) + if tmp_value : + self.__builtin_params[key] = tmp_value + else : + error_message = "Unable to reconcile shell environmental variables and self.__user_params[{key}]={value}.".format(key=key,value=value) + raise ErrorRgtParameterReconcile(error_message) + + + # Reconcile the environment parameters, self.__environ, with + # the shell environment variables. + for (key,value) in self.__environ.items(): + if value == self.OBTAIN_FROM_ENVIRONMENT: + key_modified = rgt_variable_name_modification(key) + tmp_value = os.getenv(key_modified) + if tmp_value : + self.__environ[key] = tmp_value + else : + error_message = "Unable to reconcile shell environmental variables and self.__environ[{key}]={value}.".format(key=key,value=value) + raise ErrorRgtParameterReconcile(error_message) + return + + def _check_required_parameters(self): + missing = 0 + error_message = "" + for (k,required) in self.__builtin_keys.items(): + if required and k not in self.builtin_parameters: + missing = 1 + error_message += "ERROR: required test input parameter {} is not set!\n".format(k) + if missing: + self.__logger.doCriticalLogging(error_message) + print(error_message) + exit(1) + + def _print_builtin_parameters(self): + print("RGT Test Parameters - Builtin") + print("=============================") + for (k,v) in (self.builtin_parameters).items(): + print(k,"=",v) + + def _set_user_param(self, key, val): + self.__user_params[key] = val + + +class RgtTestError(Exception): + """Base error class for RgtTest.""" + def __init__(self,message): + """The class constructor + + Parameters + ---------- + message : string + The error message for this exception. + """ + self._message = message + return + + @property + def message(self): + """str: The error message.""" + return self._message + +class ErrorRgtParameterReconcile(RgtTestError): + """Exception raised for errors in reconciling RgtTest parameters.""" + def __init__(self,message): + RgtTestError.__init__(self,message) + return + +class ErrorRgtTestInputFileNotFound(RgtTestError): + """Exception raised for errors when the rgt_test_input.ini is not found.""" + def __init__(self,message): + RgtTestError.__init__(self,message) + return if __name__ == "__main__": print('This is the RgtTest class') diff --git a/harness/machine_types/rhel_x86.py b/harness/machine_types/rhel_x86.py deleted file mode 100644 index 7f3109d..0000000 --- a/harness/machine_types/rhel_x86.py +++ /dev/null @@ -1,112 +0,0 @@ -#!/usr/bin/env python -# -# Author: Veronica G. Vergara L. -# - -from .base_machine import BaseMachine -from .rgt_test import RgtTest -import os -import re - -class RHELx86(BaseMachine): - - def __init__(self, - name='RHEL x86', - scheduler=None, - jobLauncher=None, - numNodes=1, - numSocketsPerNode=1, - numCoresPerSocket=16, - rgt_test_input_file="rgt_test_input.ini", - apptest=None): - BaseMachine.__init__(self, - name, - scheduler, - jobLauncher, - numNodes, - numSocketsPerNode, - numCoresPerSocket, - apptest) - - # process test input file - self.__rgt_test = RgtTest(rgt_test_input_file) - self.__rgt_test.read_input_file() - - test_params = {} - - # add test parameters needed by the harness - test_params['results_dir'] = self.apptest.get_path_to_runarchive() - test_params['working_dir'] = self.apptest.get_path_to_workspace_run() - test_params['build_dir'] = self.apptest.get_path_to_workspace_build() - test_params['scripts_dir'] = self.apptest.get_path_to_scripts() - test_params['harness_id'] = self.apptest.get_harness_id() - - # update the test parameters - self.__rgt_test.set_test_parameters(test_params.items()) - - # is this actually used? if so, it has to come after updating test parameters - #joblaunch_cmd = self.get_jobLauncher_command() - #self.__rgt_test.set_user_param("joblaunchcommand", joblaunch_cmd) - - - def get_jobLauncher_command(self): - print("Building jobLauncher command for x86") - jobLauncher_command = self.build_jobLauncher_command(self.__rgt_test.get_test_parameters()) - return jobLauncher_command - - def make_batch_script(self): - #print("[LOG] BEGIN: make_batch_script") - print("Making batch script for a RHEL x86 system using " + self.get_scheduler_template_file_name()) - - # Get batch job template lines - templatefileobj = open(self.get_scheduler_template_file_name(), "r") - templatelines = templatefileobj.readlines() - templatefileobj.close() - - # Create test batch job script in run archive directory - batch_file_path = os.path.join(self.apptest.get_path_to_runarchive(), - self.__rgt_test.get_batch_file()) - batch_job = open(batch_file_path, "w") - - # Replace all the wildcards in the batch job template with the values in - # the test config - test_replacements = self.__rgt_test.get_test_replacements() - for record in templatelines: - for (replace_key,val) in test_replacements.items(): - re_tmp = re.compile(replace_key) - record = re_tmp.sub(val, record) - batch_job.write(record) - - # Close batch job script file - batch_job.close() - #print("[LOG] END: make_batch_script") - - def build_executable(self): - print("Building executable on x86 using build script " + self.__rgt_test.get_build_command()) - return self.start_build_script(self.__rgt_test.get_build_command()) - - def submit_batch_script(self): - # Set environment vars using os.putenv() so that submit subprocess will - # inherit them - env_vars = self.__rgt_test.get_test_environment() - for e in env_vars: - v = env_vars[e] - print("Setting env var", e, "=", v) - os.putenv(e.upper(), v) - - print("Submitting batch script for x86") - batch_script = self.__rgt_test.get_batch_file() - submit_exit_value = self.submit_to_scheduler(batch_script) - print("Submitting " + batch_script + " submit_exit_value = " + str(submit_exit_value)) - return submit_exit_value - - def check_executable(self): - print("Running check executable script on x86 using check script " + self.__rgt_test.get_check_command()) - return self.check_results(self.__rgt_test.get_check_command()) - - def report_executable(self): - print("Running report executable script on x86 using report script " + self.__rgt_test.get_report_command()) - return self.start_report_script(self.__rgt_test.get_report_command()) - -if __name__ == "__main__": - print('This is the RHEL x86 class') diff --git a/harness/machine_types/scheduler_factory.py b/harness/machine_types/scheduler_factory.py index cfe147e..bc83a2e 100644 --- a/harness/machine_types/scheduler_factory.py +++ b/harness/machine_types/scheduler_factory.py @@ -21,7 +21,6 @@ def create_scheduler(scheduler_type): return tmp_scheduler - def __init__(self): pass diff --git a/harness/machine_types/slurm.py b/harness/machine_types/slurm.py index 7399cdc..2b5ac0c 100644 --- a/harness/machine_types/slurm.py +++ b/harness/machine_types/slurm.py @@ -4,12 +4,12 @@ # # -from libraries.layout_of_apps_directory import apptest_layout -from .base_scheduler import BaseScheduler +import os import shlex import subprocess import re -import os + +from .base_scheduler import BaseScheduler class SLURM(BaseScheduler): @@ -24,19 +24,25 @@ def __init__(self): self.__numTasksOpt = '-n' self.__jobNameOpt = '-J' self.__templateFile = 'slurm.template.x' - BaseScheduler.__init__(self,self.__name,self.__submitCmd,self.__statusCmd, - self.__deleteCmd,self.__walltimeOpt,self.__numTasksOpt, - self.__jobNameOpt,self.__templateFile) + BaseScheduler.__init__(self, self.__name, + self.__submitCmd, self.__statusCmd, self.__deleteCmd, + self.__walltimeOpt, self.__numTasksOpt, self.__jobNameOpt, + self.__templateFile) def submit_job(self, batchfilename): print("Submitting job from SLURM class using batchfilename " + batchfilename) qargs = "" - if "RGT_SUBMIT_QUEUE" in os.environ: - qargs = " -p " + os.environ.get('RGT_SUBMIT_QUEUE') + if 'RGT_SUBMIT_QUEUE' in os.environ: + qargs += " -p " + os.environ.get('RGT_SUBMIT_QUEUE') + + if 'RGT_SUBMIT_ARGS' in os.environ: + qargs += " " + os.environ.get('RGT_SUBMIT_ARGS') - if "RGT_SUBMIT_ARGS" in os.environ: - qargs = qargs + os.environ.get('RGT_SUBMIT_ARGS') + if 'RGT_PROJECT_ID' in os.environ: + qargs += " -A " + os.environ.get('RGT_PROJECT_ID') + elif 'RGT_ACCT_ID' in os.environ: + qargs += " -A " + os.environ.get('RGT_ACCT_ID') qcommand = self.__submitCmd + " " + qargs + " " + batchfilename print(qcommand) @@ -60,7 +66,6 @@ def submit_job(self, batchfilename): #print("records = ") #print(records) - #print("Extracting SLURM jobID from SLURM class") jobid_pattern = re.compile('\d+') #print("jobid_pattern = ") @@ -72,6 +77,14 @@ def submit_job(self, batchfilename): return p.returncode + def set_job_id_from_environ(self): + print("Setting job id from environment in SLURM class") + jobvar = 'SLURM_JOB_ID' + if jobvar in os.environ: + self.set_job_id(os.environ[jobvar]) + else: + print(f'{jobvar} not set in environment!') + if __name__ == '__main__': print('This is the SLURM scheduler class') diff --git a/harness/machine_types/tests/__init__.py b/harness/machine_types/tests/__init__.py index e69de29..0729850 100644 --- a/harness/machine_types/tests/__init__.py +++ b/harness/machine_types/tests/__init__.py @@ -0,0 +1 @@ +__all__ = ['test_classes'] diff --git a/modulefiles/olcf_harness b/modulefiles/olcf_harness index 80c0a8b..c47c985 100755 --- a/modulefiles/olcf_harness +++ b/modulefiles/olcf_harness @@ -18,7 +18,7 @@ prepend-path PATH $harness/utilities prepend-path LD_LIBRARY_PATH $harness/libraries prepend-path LIBRARY_PATH $harness/libraries -module load python +#module load python prepend-path PYTHONPATH $harness/utilities prepend-path PYTHONPATH $harness/bin prepend-path PYTHONPATH $harness/libraries diff --git a/modulefiles/olcf_harness_unit.lua b/modulefiles/olcf_harness_unit.lua new file mode 100755 index 0000000..189a736 --- /dev/null +++ b/modulefiles/olcf_harness_unit.lua @@ -0,0 +1,112 @@ +-- -*- lua -*- + +help ([[ +Sets up environment to use the OLCF Test Harness. +]]) + +whatis("Version: 2.0") +whatis("Repository: gitlab@gitlab.ccs.ornl.gov:olcf-system-test/olcf-test-harness.git") + + +-- @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ +-- Status messages for user. +-- @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ +local load_message = "Loading modulefile " .. myModuleFullName() .. " ..." +local unload_message = "Unloading modulefile " .. myModuleFullName() .. " ..." +if mode() == "load" then + LmodMessage(load_message) +end + +if mode() == "unload" then + LmodMessage(unload_message) +end + +-- @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ +-- Start of section for loading the python +-- module +-- @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + +-- The very first task is ensure we use Python3. +-- This load is fragile because often clusters +-- have many python module files with +-- denominations as python/X.Y. +-- Consequently one can't guarantee what python version +-- is loaded. +load("python") + +-- @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ +-- End of section for loading the python +-- module +-- @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + + +-- @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ +-- Start of section for modfying various +-- environmental variables - PATH, +-- LD_LIBRARY_PATH, etc. +-- @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + +-- Define the base path to test harness python scripts. - +local harness = pathJoin(os.getenv('OLCF_HARNESS_DIR'),'harness') + +-- Set path to harness driver programs, binaries, ... - +setenv('PATH_TO_RGT_PACKAGE',harness) +prepend_path('PATH',pathJoin(harness,'bin')) +prepend_path('PATH',pathJoin(harness,'utilities')) +prepend_path('LD_LIBRARY_PATH',pathJoin(harness,'libraries')) +prepend_path('LIBRARY_PATH',pathJoin(harness,'libraries')) + +-- Modify the PYTHONPATH +prepend_path('PYTHONPATH',pathJoin(harness,'utilities')) +prepend_path('PYTHONPATH',pathJoin(harness,'bin')) +prepend_path('PYTHONPATH',pathJoin(harness,'libraries')) +prepend_path('PYTHONPATH',pathJoin(harness)) + +-- @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ +-- End of section for modfying various +-- PATHS, etc. module file. +-- @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + + +-- @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ +-- Start of section for loading the Sphinx +-- module file. +-- @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + +-- Load the Sphinx module if available. Sphinx +-- is neeed to build the Harness Python documentation. +-- but not to run the Harness. +local sphinx_module = "sphinx/3.1.0" +try_load(sphinx_module) +if mode() == "load" then + if not isloaded(sphinx_module) then + LmodMessage("WARNING! Unsucessfully loaded Sphinx module.") + LmodMessage("The Sphinx module is not necessary to run the harness, but") + LmodMessage("one needs the Sphinx module to build the harness documentation.") + LmodMessage("The minimum required Sphinx version 3.1.0.") + end +end + +-- @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ +-- End of section for loading the Sphinx +-- module file. +-- @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + + +-- @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ +-- Start of section for loading the HARNESS +-- unit test module file. +-- @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + +-- This module file is needed to run the +-- GitLab CI framework. The harness can +-- still be run without loading this module. +-- Nevertheless, we make it mandatory to +-- enable the Gitlab unit tests. +local rt_file = "runtime_environment/GenericMachine-GenericConfigTag.unit_tests" +load(rt_file) + +-- @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ +-- End of section for loading the HARNESS +-- unit test module file. +-- @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ diff --git a/modulefiles/runtime_environment b/modulefiles/runtime_environment new file mode 120000 index 0000000..b609b91 --- /dev/null +++ b/modulefiles/runtime_environment @@ -0,0 +1 @@ +../ci_testing_utilities/runtime_environment \ No newline at end of file diff --git a/test/__init__.py b/test/__init__.py deleted file mode 100644 index 3ff687c..0000000 --- a/test/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -__all__ = [ - "src" - ] - -version = 3.5 diff --git a/test/src/__init__.py b/test/src/__init__.py deleted file mode 100644 index 3214d69..0000000 --- a/test/src/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -__all__ = [ - "test_runtests", - "Repository_Tests", - "svn_test_repository", - "git_test_repository" - ] - -version = 3.5 diff --git a/test/src/test_runtests.py b/test/src/test_runtests.py deleted file mode 100644 index bd37555..0000000 --- a/test/src/test_runtests.py +++ /dev/null @@ -1,412 +0,0 @@ -#! /usr/bin/env python3 -""" Test class module runs a basic hello world test. """ - -import unittest -import shlex -import os -import time - -from bin import runtests -from fundamental_types.rgt_state import RgtState -from libraries.status_database import StatusDatabase -from libraries import application_test_dictionary - -class Test_runtests(unittest.TestCase): - """ Tests for main program runtests.py """ - - def setUp(self): - """ Set ups to run a basic harness tests. """ - - # - # Set environmental variables for the harness. - # - - - # Define the fully qualified name to the harness top level. - my_path_to_harness_top_level = os.getenv("PATH_TO_HARNESS_TOP_LEVEL") - - # PBS account id. - my_job_account_id = os.getenv("my_job_account_id") - os.putenv("RGT_PBS_JOB_ACCNT_ID",my_job_account_id) - - my_project_id = os.getenv("my_project_id") - os.putenv("RGT_PROJECTID",my_project_id) - - # Scratch space for running unit tests. - my_member_work = os.getenv("path_to_member_work") - my_path_to_sspace = os.path.join(my_member_work,"Harness_Unit_Testing_Scratch_Space") - os.putenv("RGT_PATH_TO_SSPACE",my_path_to_sspace) - - # The path to the rgt module file - my_rgt_module_file = os.getenv("MY_RGT_MODULE_FILE") - os.putenv("RGT_NCCS_TEST_HARNESS_MODULE",my_rgt_module_file) - - # Path to environmental variables file. - my_home_directory = os.getenv("HOME") - - # Path to my input file directory. - my_test_directory = os.getenv("MY_RGT_TEST_DIRECTORY") - my_rgt_input_directory = os.path.join(my_test_directory,"Harness_Unit_Testing_Input") - - # File name of rgt input file. - my_rgt_input_file_name = "rgt.input" - - # Define the fully qualified name to the rgt environmental variables - # file that will be in the input directory, and export to environmental variables. - fqpn_rgt_env_file_path = self.__fqpn_of_rgt_env_file(my_rgt_input_directory) - self.__export_to_environment("RGT_ENVIRONMENTAL_FILE",fqpn_rgt_env_file_path ) - - # Define the fully qualified domain name to the application repo. - my_fqdn_to_app_repo = os.getenv("MY_APP_REPO") - - # Define the repository branch. - my_repository_branch = os.getenv("MY_APP_REPO_BRANCH") - - - # The tests to run - hw_tests_dictionary = application_test_dictionary.ApplicationSubtestDictionary("HelloWorld") - hw_tests_dictionary.addAppSubtest("HelloWorld", - "Test_16cores") - - hw_tests_dictionary.addAppSubtest("HelloWorld", - "Test_16cores_A") - - hw_tests_dictionary.addAppSubtest("HelloWorld", - "Test_16cores_B") - - hw_tests_dictionary.addAppSubtest("HelloWorld", - "Test_16cores_C") - - - blm_tests_dictionary = application_test_dictionary.ApplicationSubtestDictionary("Bonjour_le_Monde") - - blm_tests_dictionary.addAppSubtest("Bonjour_le_Monde", - "Test_16cores") - - blm_tests_dictionary.addAppSubtest("Bonjour_le_Monde", - "Test_16cores_A") - - blm_tests_dictionary.addAppSubtest("Bonjour_le_Monde", - "Test_16cores_B") - - blm_tests_dictionary.addAppSubtest("Bonjour_le_Monde", - "Test_16cores_C") - - my_harness_tasks = ["check_out_tests", - "start_tests", - "stop_tests"] - - - my_tests = [hw_tests_dictionary, - blm_tests_dictionary] - - - # Create the input directory along with the input files. - self.__createInputDirectoryAndFiles(my_path_to_sspace, - my_rgt_input_directory, - my_rgt_input_file_name, - my_rgt_module_file, - my_path_to_harness_top_level, - my_fqdn_to_app_repo, - my_repository_branch, - my_tests, - my_member_work, - my_harness_tasks) - - self.__startingDirectory = os.getcwd() - self.__inputDirectory = my_rgt_input_directory - - os.chdir(self.__inputDirectory) - - def tearDown(self): - """ Stud doc for tear down """ - - # The database checking does not work. Talk to Wayne on fixing. - # - # time.sleep(60) - # - # sdb = StatusDatabase().load() - # - # self.__checkTest(sdb) - - os.chdir(self.__startingDirectory) - - return - - def test_hello_world_serial(self): - """ Test of harness if it can launch a MPI hello world on 1 node. """ - - argument_string = "--concurrency serial --loglevel DEBUG" - my_rgt_test = runtests.runtests(argument_string) - - # Get the state of my_rgt_test - state_of_rgt = my_rgt_test.getState() - - # The state of my_rgt_test should be "ALL_TASKS_COMPLETED". - correct_state = RgtState.ALL_TASKS_COMPLETED - - # Compare results. - error_message = "Harness Hello world serial did not complete all tasks." - self.assertEqual(state_of_rgt,correct_state,error_message) - - @unittest.skip("Skipping paralell test") - def test_hello_world_parallel(self): - argument_string = "--concurrency parallel --loglevel DEBUG" - my_rgt_test = runtests.runtests(argument_string) - - # Get the state of my_rgt_test - state_of_rgt = my_rgt_test.getState() - - # The state of my_rgt_test should be "ALL_TASKS_COMPLETED". - correct_state = RgtState.ALL_TASKS_COMPLETED - - # Compare results. - error_message = "Harness Hello world parallel did not complete all tasks." - self.assertEqual(state_of_rgt,correct_state,error_message) - - def __checkTest(self,sdb): - query_string = ( - 'SELECT check_end_event_value ' - 'FROM test_instances ' + - 'WHERE app IS \'HelloWorld\' ' - 'AND test IS \'Test_16cores\' ' - 'ORDER BY check_end_event_time DESC' ) - - query_result = sdb.query(query_string) - - count = len(query_result.split('\n')) - - return - - def __createInputDirectoryAndFiles( - self, - path_to_scratch_space, - path_to_input_directory, - rgt_input_file_name, - path_to_module_file, - path_to_harness_top_level, - fqdn_to_app_repo, - repository_branch, - harness_tests, - my_member_work, - harness_tasks): - - # Create the input directory. - if not os.path.isdir(path_to_input_directory): - os.makedirs(path_to_input_directory) - - # Create the rgt environmental file. - self.__createRgtEnvFile(path_to_input_directory, - path_to_scratch_space, - path_to_module_file, - path_to_harness_top_level, - my_member_work, - fqdn_to_app_repo, - repository_branch) - - # Create the rgt input file. - self.__createRgtInputFile(path_to_input_directory, - harness_tests, - harness_tasks) - - def __createRgtEnvFile( - self, - path_to_input_directory, - path_to_scratch_space, - path_to_module_file, - path_to_harness_top_level, - my_member_work, - fqdn_to_app_repo, - repository_branch): - - # Define the path to the rgt environmental variables - # file that will be in the input directory. - fqpn_rgt_env_file_path = self.__fqpn_of_rgt_env_file(path_to_input_directory) - - # Define a comment format - comment_frmt = "#---------------------------------------------------------------\n" - comment_frmt += "# {rgt_comment:<60}" - comment_frmt += "{rgt_space:<1}-\n" - comment_frmt += "#---------------------------------------------------------------\n" - - #Define a export environmental variable format. - export_frmt = "{rgt_variable}={rgt_variable_value}\n" - export_frmt += "export {rgt_variable}\n\n" - - # Open file for writing. - rgt_file_obj = open(fqpn_rgt_env_file_path,"w") - - # Write the PBS job account id export lines to file. - pbs_job_comment = comment_frmt.format(rgt_comment="JOB ACCOUNT ID", - rgt_space=" ") - rgt_file_obj.write(pbs_job_comment) - - my_job_account_id = os.getenv("my_job_account_id") - pbs_env = export_frmt.format(rgt_variable="RGT_PBS_JOB_ACCNT_ID", - rgt_variable_value="'" + my_job_account_id + "'") - rgt_file_obj.write(pbs_env) - - project_env = export_frmt.format(rgt_variable="RGT_PROJECTID", - rgt_variable_value="'" + my_job_account_id + "'") - - rgt_file_obj.write(project_env) - - # Write the path to scratch space to file. - scratch_space_comment = comment_frmt.format(rgt_comment="Absolute path to scratch space", - rgt_space=" ") - rgt_file_obj.write(scratch_space_comment) - - scratch_space_env = export_frmt.format(rgt_variable="RGT_PATH_TO_SSPACE", - rgt_variable_value="'" + path_to_scratch_space + "'") - - rgt_file_obj.write(scratch_space_env) - - - # Write path to my member work directory. - member_work_comment = comment_frmt.format(rgt_comment="Absolute path to member work directory.", - rgt_space=" ") - - rgt_file_obj.write(member_work_comment) - - member_work_env = export_frmt.format(rgt_variable="MY_MEMBER_WORK", - rgt_variable_value="'" + my_member_work + "'") - - rgt_file_obj.write(member_work_env) - - - # Write the path to module to load. - module_comment = comment_frmt.format(rgt_comment="Name of test harness module to load", - rgt_space=" ") - - rgt_file_obj.write(module_comment) - - module_env = export_frmt.format(rgt_variable="RGT_NCCS_TEST_HARNESS_MODULE", - rgt_variable_value="'" + path_to_module_file + "'") - - rgt_file_obj.write(module_env) - - # Write the path to the rgt environmental variable file. - rgt_path_comment = comment_frmt.format(rgt_comment="Absolute path to this file.", - rgt_space=" ") - - rgt_path_env = export_frmt.format(rgt_variable="RGT_ENVIRONMENTAL_FILE", - rgt_variable_value="'" + fqpn_rgt_env_file_path + "'") - - rgt_file_obj.write(rgt_path_comment) - rgt_file_obj.write(rgt_path_env) - - - #Write the path to the top level of the harness - rgt_path_top_level_comment = comment_frmt.format(rgt_comment="Fully qualified path to harness top level.", - rgt_space=" ") - - rgt_file_obj.write(rgt_path_top_level_comment) - - rgt_path_top_level_env = export_frmt.format(rgt_variable="PATH_TO_HARNESS_TOP_LEVEL", - rgt_variable_value="'" + path_to_harness_top_level + "'") - - rgt_file_obj.write(rgt_path_top_level_env) - - # Write the path to the Application repository. - rgt_path_top_app = comment_frmt.format(rgt_comment="Fully qualified applications path.", - rgt_space=" ") - - rgt_file_obj.write(rgt_path_top_app) - - - rgt_path_top_app_env = export_frmt.format(rgt_variable="MY_APP_REPO", - rgt_variable_value="'" + fqdn_to_app_repo + "'") - - rgt_file_obj.write(rgt_path_top_app_env) - - # Write the repository branch - rgt_path_top_app = comment_frmt.format(rgt_comment="The branch of the repository.", - rgt_space=" ") - rgt_file_obj.write(rgt_path_top_app) - - rgt_repo_branch_env = export_frmt.format(rgt_variable="MY_APP_REPO_BRANCH", - rgt_variable_value="'" + repository_branch + "'") - rgt_file_obj.write(rgt_repo_branch_env) - - # Close file. - rgt_file_obj.close() - - - def __createRgtInputFile( - self, - path_to_input_directory, - harness_tests, - harness_tasks): - - # Define a comment format - comment_frmt = "#---------------------------------------------------------------\n" - comment_frmt += "# {rgt_comment:<60}" - comment_frmt += "{rgt_space:<1}-\n" - comment_frmt += "#---------------------------------------------------------------\n" - - #Define a ??? format. - top_level_frmt = "Path_to_tests = {}\n\n" - - #Define a test format. - test_frmt = "Test = {Application} {test}\n" - - #Define a task format. - task_frmt = "Harness_task = {harness_task}\n" - - rgt_input_file_name = "rgt.input" - test_rgt_input_file_path = os.path.join(path_to_input_directory, - rgt_input_file_name ) - - rgt_file_obj = open(test_rgt_input_file_path,"w") - path_to_tests_comment = comment_frmt.format(rgt_comment="Set the path to the top level of the application directory.", - rgt_space=" ") - - # Write to file the top level path to tests. - rgt_file_obj.write(path_to_tests_comment) - - my_top_level_path_to_tests = os.path.join(path_to_input_directory,"Applications") - path_to_tests = top_level_frmt.format(my_top_level_path_to_tests) - rgt_file_obj.write(path_to_tests) - - # Write to file tests to be run. - my_tests = "" - for my_app_test in harness_tests: - for [my_application,my_subtest] in my_app_test.Tests: - my_tests += test_frmt.format(Application = my_application, - test = my_subtest) - rgt_file_obj.write(my_tests) - - #Write to file the harness tasks. - harness_task_comments = comment_frmt.format(rgt_comment="Harness tasks", - rgt_space=" ") - rgt_file_obj.write("\n" + harness_task_comments) - - - my_harness_tasks = "" - for a_task in harness_tasks: - my_harness_tasks += task_frmt.format(harness_task=a_task) - rgt_file_obj.write(my_harness_tasks + "\n" + "\n") - - - rgt_file_obj.close() - - def __fqpn_of_rgt_env_file( - self, - path_to_rgt_input_directory=None): - - rgt_env_file_name = "rgt.environmental_variables.sh" - fqpn_rgt_env_file_path = os.path.join(path_to_rgt_input_directory, - rgt_env_file_name) - return fqpn_rgt_env_file_path - - def __export_to_environment( - self, - key, - value): - os.environ[key] = value - - pass - -if __name__ == "__main__": - unittest.main() -