From cc0a956b6672ad1fa9401239474c7a63f7c6c687 Mon Sep 17 00:00:00 2001 From: ebattat <73884315+ebattat@users.noreply.github.com> Date: Sun, 6 Aug 2023 07:15:44 +0300 Subject: [PATCH] check if kata and cnv is installed (#638) --- .../benchmark_operator_exceptions.py | 33 +++-- .../benchmark_operator_workloads.py | 4 +- ...benchmark_operator_workloads_operations.py | 72 ++++++----- .../benchmark_operator/hammerdb_pod.py | 6 +- .../benchmark_operator/hammerdb_vm.py | 6 +- .../benchmark_operator/stressng_pod.py | 6 +- .../benchmark_operator/stressng_vm.py | 6 +- .../benchmark_operator/uperf_pod.py | 6 +- .../benchmark_operator/uperf_vm.py | 6 +- benchmark_runner/common/oc/oc.py | 118 +++++++++--------- benchmark_runner/workloads/bootstorm_vm.py | 4 +- benchmark_runner/workloads/vdbench_pod.py | 8 +- benchmark_runner/workloads/vdbench_vm.py | 8 +- benchmark_runner/workloads/windows_vm.py | 2 +- benchmark_runner/workloads/workloads.py | 2 +- .../workloads/workloads_exceptions.py | 32 +++-- .../workloads/workloads_operations.py | 60 +++++---- 17 files changed, 214 insertions(+), 165 deletions(-) diff --git a/benchmark_runner/benchmark_operator/benchmark_operator_exceptions.py b/benchmark_runner/benchmark_operator/benchmark_operator_exceptions.py index 651c73810..6e2a6dd2e 100644 --- a/benchmark_runner/benchmark_operator/benchmark_operator_exceptions.py +++ b/benchmark_runner/benchmark_operator/benchmark_operator_exceptions.py @@ -5,26 +5,45 @@ class BenchmarkOperatorError(Exception): pass -class ODFNonInstalled(BenchmarkOperatorError): +class CNVNotInstalled(BenchmarkOperatorError): """ - This class is error that ODF operator is not installed + This class raises an error that CNV operator is not installed """ - def __init__(self): - self.message = "ODF is not installed, set 'ODF_PVC' to False" - super(ODFNonInstalled, self).__init__(self.message) + def __init__(self, workload): + self.message = f"{workload} requires CNV to be installed" + super(CNVNotInstalled, self).__init__(self.message) + + +class KataNotInstalled(BenchmarkOperatorError): + """ + This class raises an error that Kata operator is not installed + """ + def __init__(self, workload): + self.message = f"{workload} requires Kata to be installed" + super(KataNotInstalled, self).__init__(self.message) + + +class ODFNotInstalled(BenchmarkOperatorError): + """ + This class raises an error that ODF operator is not installed + """ + def __init__(self, workload): + self.message = f"{workload} requires ODF to be installed, set 'ODF_PVC' to False to run with Ephemeral" + super(ODFNotInstalled, self).__init__(self.message) class EmptyLSOPath(BenchmarkOperatorError): """ - This class is error that LSO path is empty + This class raises an error that LSO path is empty """ def __init__(self): self.message = "LSO path is empty" super(EmptyLSOPath, self).__init__(self.message) + class PrometheusSnapshotFailed(BenchmarkOperatorError): """ - Prometheus snapshot failed + This class raises an error when Prometheus snapshot failed """ def __init__(self, err): self.message = f'Prometheus snapshot failed: {err}' diff --git a/benchmark_runner/benchmark_operator/benchmark_operator_workloads.py b/benchmark_runner/benchmark_operator/benchmark_operator_workloads.py index f4efe7c72..28ce8aae6 100644 --- a/benchmark_runner/benchmark_operator/benchmark_operator_workloads.py +++ b/benchmark_runner/benchmark_operator/benchmark_operator_workloads.py @@ -8,7 +8,7 @@ class BenchmarkOperatorWorkloads(BenchmarkOperatorWorkloadsOperations): """ - This class contains all the custom_workloads + This class runs benchmark-operator workload operations """ def __init__(self): super().__init__() @@ -16,7 +16,7 @@ def __init__(self): @logger_time_stamp def run(self): """ - The method run workload + The method runs benchmark-operator workload :return: """ diff --git a/benchmark_runner/benchmark_operator/benchmark_operator_workloads_operations.py b/benchmark_runner/benchmark_operator/benchmark_operator_workloads_operations.py index ede1b595c..0c2925fbb 100644 --- a/benchmark_runner/benchmark_operator/benchmark_operator_workloads_operations.py +++ b/benchmark_runner/benchmark_operator/benchmark_operator_workloads_operations.py @@ -12,7 +12,7 @@ from benchmark_runner.common.template_operations.template_operations import TemplateOperations from benchmark_runner.common.elasticsearch.elasticsearch_operations import ElasticSearchOperations from benchmark_runner.common.ssh.ssh import SSH -from benchmark_runner.benchmark_operator.benchmark_operator_exceptions import ODFNonInstalled, EmptyLSOPath +from benchmark_runner.benchmark_operator.benchmark_operator_exceptions import ODFNotInstalled, CNVNotInstalled, KataNotInstalled, EmptyLSOPath from benchmark_runner.main.environment_variables import environment_variables from benchmark_runner.common.clouds.shared.s3.s3_operations import S3Operations from benchmark_runner.common.prometheus.prometheus_snapshot import PrometheusSnapshot @@ -21,7 +21,7 @@ class BenchmarkOperatorWorkloadsOperations: """ - This class contains all the custom_workloads + This class contains benchmark-operator workload operations """ def __init__(self): # environment variables @@ -82,7 +82,7 @@ def __init__(self): def set_login(self, kubeadmin_password: str = ''): """ - This method set oc login + This method sets oc login :param kubeadmin_password: :return: """ @@ -93,7 +93,7 @@ def set_login(self, kubeadmin_password: str = ''): @logger_time_stamp def update_node_selector(self, runner_path: str = environment_variables.environment_variables_dict['runner_path'], yaml_path: str = '', pin_node: str = ''): """ - This method update node selector in yaml + This method updates node selector in yaml @return: """ data = [] @@ -118,7 +118,7 @@ def update_node_selector(self, runner_path: str = environment_variables.environm @logger_time_stamp def make_deploy_benchmark_controller_manager(self, runner_path: str = environment_variables.environment_variables_dict['runner_path']): """ - This method make deploy benchmark operator + This method deploys the benchmark operator :return: """ benchmark_operator_path = 'benchmark-operator' @@ -135,7 +135,7 @@ def make_deploy_benchmark_controller_manager(self, runner_path: str = environmen @logger_time_stamp def make_undeploy_benchmark_controller_manager(self, runner_path: str = environment_variables.environment_variables_dict['runner_path']): """ - This method make undeploy benchmark operator + This method removes a benchmark-operator deployment :return: """ benchmark_operator_path = 'benchmark-operator' @@ -148,7 +148,7 @@ def make_undeploy_benchmark_controller_manager(self, runner_path: str = environm @logger_time_stamp def make_undeploy_benchmark_controller_manager_if_exist(self, runner_path: str = environment_variables.environment_variables_dict['runner_path']): """ - This method make undeploy benchmark controller manager if exist + This method undeploys benchmark controller manager, if it exists @return: """ # delete benchmark-operator pod if exist @@ -159,14 +159,14 @@ def make_undeploy_benchmark_controller_manager_if_exist(self, runner_path: str = @logger_time_stamp def login(self): """ - This method login to the cluster + This method logs in to the cluster """ self._oc.login() @logger_time_stamp def tear_down_pod_after_error(self, yaml: str, pod_name: str): """ - This method tear down pod in case of error + This method tears down pod in case of error @param yaml: @param pod_name: @return: @@ -178,7 +178,7 @@ def tear_down_pod_after_error(self, yaml: str, pod_name: str): @logger_time_stamp def tear_down_vm_after_error(self, yaml: str, vm_name: str): """ - This method tear down vm in case of error + This method tears down vm in case of error @param yaml: @param vm_name: """ @@ -189,7 +189,7 @@ def tear_down_vm_after_error(self, yaml: str, vm_name: str): @logger_time_stamp def system_metrics_collector(self, workload: str, es_fetch_min_time: int = None): """ - This method run system metrics collector + This method runs system metrics collector @param workload: the workload @param es_fetch_min_time: :return: @@ -211,8 +211,8 @@ def system_metrics_collector(self, workload: str, es_fetch_min_time: int = None) def __get_metadata(self, kind: str = None, database: str = None, status: str = None, run_artifacts_url: str = None, uuid: str = None) -> dict: """ - This method return metadata kind and database argument are optional - @param kind: optional: pod, vm, or kata + This method returns metadata for a run, optionally filtered by runtime kind and database + @param kind: optionally: pod, vm, or kata @param database: optional:mssql, postgres or mariadb @param status: @param run_artifacts_url: @@ -255,7 +255,7 @@ def __get_metadata(self, kind: str = None, database: str = None, status: str = N @logger_time_stamp def _update_elasticsearch_index(self, index: str, id: str, kind: str, status: str, run_artifacts_url: str, database: str = ''): """ - This method update elasticsearch id + This method updates elasticsearch id :param index: :param id: :param kind: @@ -268,7 +268,7 @@ def _update_elasticsearch_index(self, index: str, id: str, kind: str, status: st def _upload_to_elasticsearch(self, index: str, kind: str, status: str, run_artifacts_url: str, database: str = '', uuid: str = ''): """ - This method upload to elasticsearch + This method uploads data to elasticsearch :param index: :param kind: :param status: @@ -280,7 +280,7 @@ def _upload_to_elasticsearch(self, index: str, kind: str, status: str, run_artif def _verify_elasticsearch_data_uploaded(self, index: str, uuid: str, workload: str = '', fast_check: bool = False, timeout: int = None, es_fetch_min_time: int = None): """ - This method verify that elasticsearch data uploaded + This method verifies that elasticsearch data uploaded :param index: :param uuid: :param workload: @@ -295,7 +295,7 @@ def _verify_elasticsearch_data_uploaded(self, index: str, uuid: str, workload: s def _upload_workload_to_elasticsearch(self, index: str, kind: str, status: str, result: dict = None): """ - This method upload to elasticsearch + This method uploads to elasticsearch :param index: :param kind: :param status: @@ -306,7 +306,7 @@ def _upload_workload_to_elasticsearch(self, index: str, kind: str, status: str, def _verify_elasticsearch_workload_data_uploaded(self, index: str, uuid: str): """ - This method verify that elasticsearch data uploaded + This method verifies that elasticsearch data was uploaded :param index: :param uuid: :return: @@ -315,7 +315,7 @@ def _verify_elasticsearch_workload_data_uploaded(self, index: str, uuid: str): def _create_vm_log(self, labels: list) -> str: """ - This method set vm log per workload + This method sets vm log per workload :param labels: list of labels :return: vm_name """ @@ -327,7 +327,7 @@ def _create_vm_log(self, labels: list) -> str: def _create_pod_log(self, label: str = '', database: str = '') -> str: """ - This method create pod log per workload + This method creates pod log per workload :param label:pod label :param database: :return: @@ -340,7 +340,7 @@ def _create_pod_log(self, label: str = '', database: str = '') -> str: def _get_run_artifacts_hierarchy(self, workload_name: str = '', is_file: bool = False): """ - This method return log hierarchy + This method returns log hierarchy :param workload_name: workload name :param is_file: is file name :return: @@ -358,7 +358,7 @@ def _get_run_artifacts_hierarchy(self, workload_name: str = '', is_file: bool = def _create_run_artifacts(self, workload: str = '', database: str = '', labels: list = [], pod: bool = True): """ - This method create pod logs of benchmark-controller-manager, system-metrics and workload pod + This method creates pod logs of benchmark-controller-manager, system-metrics and workload pod :param workload: workload name :param database: database name :param pod: False in case of vm @@ -383,7 +383,7 @@ def _create_run_artifacts(self, workload: str = '', database: str = '', labels: def __make_run_artifacts_tarfile(self, workload: str): """ - This method tar.gz log path and return the tar.gz path + This method compresses the log file and returns the compressed path :return: """ tar_run_artifacts_path = f"{self._run_artifacts_path}.tar.gz" @@ -394,7 +394,7 @@ def __make_run_artifacts_tarfile(self, workload: str): @logger_time_stamp def delete_local_artifacts(self): """ - This method delete local artifacts + This method deletes local artifacts :return: """ workload = self._workload_name @@ -428,7 +428,7 @@ def upload_run_artifacts_to_s3(self): @logger_time_stamp def start_prometheus(self): """ - This method start collection of Prometheus snapshot + This method starts collection of Prometheus snapshot :return: """ if self._enable_prometheus_snapshot: @@ -442,7 +442,7 @@ def start_prometheus(self): @logger_time_stamp def end_prometheus(self): """ - This method retrieve the Prometheus snapshot + This method retrieves the Prometheus snapshot :return: """ if self._enable_prometheus_snapshot: @@ -454,20 +454,20 @@ def end_prometheus(self): raise err @logger_time_stamp - def odf_pvc_verification(self): + def odf_workload_verification(self): """ - This method verified if odf or pvc is required for workload, raise error in case of missing odf + This method verifies whether the ODF operator is installed for ODF workloads and raises an error if it is missing. :return: """ workload_name = self._workload.split('_') if workload_name[0] in self._workloads_odf_pvc: if not self._oc.is_odf_installed(): - raise ODFNonInstalled() + raise ODFNotInstalled(workload=self._workload) @logger_time_stamp def verify_lso(self): """ - This method Verifies that lso path exist + This method verifies that lso path exist :return: """ if not self._lso_disk_id: @@ -476,7 +476,7 @@ def verify_lso(self): @logger_time_stamp def delete_all(self): """ - This method delete all pod or unbound pv in namespace + This method deletes all pod or unbound pv in namespace :return: """ # make undeploy benchmark controller manager if exist @@ -490,7 +490,7 @@ def delete_all(self): @logger_time_stamp def clear_nodes_cache(self): """ - This method clear nodes cache + This method clears nodes cache """ self._oc.clear_node_caches() @@ -499,10 +499,16 @@ def initialize_workload(self): This method includes all the initialization of workload :return: """ + # Verify that CNV operator in installed for CNV workloads + if '_vm' in self._workload and not self._oc.is_cnv_installed(): + raise CNVNotInstalled(workload=self._workload) + # Verify that Kata operator in installed for kata workloads + if '_kata' in self._workload and not self._oc.is_kata_installed(): + raise KataNotInstalled(workload=self._workload) self.delete_all() self.clear_nodes_cache() if self._odf_pvc: - self.odf_pvc_verification() + self.odf_workload_verification() if 'lso' in self._workload: self.verify_lso() # make deploy benchmark controller manager diff --git a/benchmark_runner/benchmark_operator/hammerdb_pod.py b/benchmark_runner/benchmark_operator/hammerdb_pod.py index 8b2e091e1..53a077f7a 100644 --- a/benchmark_runner/benchmark_operator/hammerdb_pod.py +++ b/benchmark_runner/benchmark_operator/hammerdb_pod.py @@ -10,13 +10,13 @@ class HammerdbPod(BenchmarkOperatorWorkloadsOperations): """ - This class for uperf workload + This class runs hammerdb workload """ ES_FETCH_TIME = 30 def __init__(self): """ - All inherit from WorkloadsOperations + All inherit from BenchmarkOperatorWorkloadsOperations """ super().__init__() self.__name = '' @@ -30,7 +30,7 @@ def __init__(self): @logger_time_stamp def run(self): """ - This method run hammerdb workload + This method runs hammerdb workload :return: """ try: diff --git a/benchmark_runner/benchmark_operator/hammerdb_vm.py b/benchmark_runner/benchmark_operator/hammerdb_vm.py index 284d85e8b..7a25a8644 100644 --- a/benchmark_runner/benchmark_operator/hammerdb_vm.py +++ b/benchmark_runner/benchmark_operator/hammerdb_vm.py @@ -9,13 +9,13 @@ class HammerdbVM(BenchmarkOperatorWorkloadsOperations): """ - This class for hammerdb vm workload + This class runs hammerdb vm workload """ ES_FETCH_TIME = 30 def __init__(self): """ - All inherit from WorkloadsOperations + All inherit from BenchmarkOperatorWorkloadsOperations """ super().__init__() self.__name = '' @@ -29,7 +29,7 @@ def __init__(self): @logger_time_stamp def run(self): """ - This method run hammerdb vm workload + This method runs hammerdb vm workload :return: """ try: diff --git a/benchmark_runner/benchmark_operator/stressng_pod.py b/benchmark_runner/benchmark_operator/stressng_pod.py index 39c3cd0fa..5deff7a16 100644 --- a/benchmark_runner/benchmark_operator/stressng_pod.py +++ b/benchmark_runner/benchmark_operator/stressng_pod.py @@ -10,11 +10,11 @@ class StressngPod(BenchmarkOperatorWorkloadsOperations): """ - This class for stressng workload + This class runs stressng workload """ def __init__(self): """ - All inherit from WorkloadsOperations + All inherit from BenchmarkOperatorWorkloadsOperations """ super().__init__() self.__name = '' @@ -26,7 +26,7 @@ def __init__(self): @logger_time_stamp def run(self): """ - This method run stressng workload + This method runs stressng workload :return: """ try: diff --git a/benchmark_runner/benchmark_operator/stressng_vm.py b/benchmark_runner/benchmark_operator/stressng_vm.py index 61d537e90..8cca42d9b 100644 --- a/benchmark_runner/benchmark_operator/stressng_vm.py +++ b/benchmark_runner/benchmark_operator/stressng_vm.py @@ -9,11 +9,11 @@ class StressngVM(BenchmarkOperatorWorkloadsOperations): """ - This class for stressng vm workload + This class runs stressng vm workload """ def __init__(self): """ - All inherit from WorkloadsOperations + All inherit from BenchmarkOperatorWorkloadsOperations """ super().__init__() self.__name = '' @@ -25,7 +25,7 @@ def __init__(self): @logger_time_stamp def run(self): """ - This method run stressng vm workload + This method runs stressng vm workload :return: """ try: diff --git a/benchmark_runner/benchmark_operator/uperf_pod.py b/benchmark_runner/benchmark_operator/uperf_pod.py index 2d7f6aa4d..a2ed8bd2d 100644 --- a/benchmark_runner/benchmark_operator/uperf_pod.py +++ b/benchmark_runner/benchmark_operator/uperf_pod.py @@ -10,11 +10,11 @@ class UperfPod(BenchmarkOperatorWorkloadsOperations): """ - This class for uperf workload + This class runs uperf workload """ def __init__(self): """ - All inherit from WorkloadsOperations + All inherit from BenchmarkOperatorWorkloadsOperations """ super().__init__() self.__name = '' @@ -26,7 +26,7 @@ def __init__(self): @logger_time_stamp def run(self): """ - This method run uperf workload + This method runs uperf workload :return: """ try: diff --git a/benchmark_runner/benchmark_operator/uperf_vm.py b/benchmark_runner/benchmark_operator/uperf_vm.py index 291c7f953..8d2466483 100644 --- a/benchmark_runner/benchmark_operator/uperf_vm.py +++ b/benchmark_runner/benchmark_operator/uperf_vm.py @@ -9,11 +9,11 @@ class UperfVM(BenchmarkOperatorWorkloadsOperations): """ - This class for uperf vm workload + This class runs uperf vm workload """ def __init__(self): """ - All inherit from WorkloadsOperations + All inherit from BenchmarkOperatorWorkloadsOperations """ super().__init__() self.__name = '' @@ -25,7 +25,7 @@ def __init__(self): @logger_time_stamp def run(self): """ - This method run uperf vm workload + This method runs uperf vm workload :return: """ try: diff --git a/benchmark_runner/common/oc/oc.py b/benchmark_runner/common/oc/oc.py index 82e242b6a..74c98503a 100644 --- a/benchmark_runner/common/oc/oc.py +++ b/benchmark_runner/common/oc/oc.py @@ -42,35 +42,35 @@ def __init__(self, kubeadmin_password: str = ''): def get_ocp_server_version(self): """ - This method return ocp server version + This method returns ocp server version :return: """ return self.run(f"{self.__cli} get clusterversion version -o jsonpath='{{.status.desired.version}}'") def get_cnv_version(self): """ - This method return cnv version + This method returns cnv version :return: """ return self.run(f"{self.__cli} get csv -n openshift-cnv $(oc get csv -n openshift-cnv --no-headers | awk '{{ print $1; }}') -ojsonpath='{{.spec.version}}'") def get_odf_version(self): """ - This method return odf version + This method returns odf version :return: """ return self.run(f"{self.__cli} get csv -n openshift-storage -ojsonpath='{{.items[0].spec.labels.full_version}}'") def get_pv_disk_ids(self): """ - This method return list of pv disk ids + This method returns list of pv disk ids """ result = self.run(f"{self.__cli} get pv -o jsonpath={{.items[*].metadata.annotations.'storage\.openshift\.com/device-id'}}") return result.split() def get_worker_disk_ids(self): """ - The method return worker disk ids + The method returns worker disk ids """ workers_disk_ids = [] if self.__worker_disk_ids: @@ -88,7 +88,7 @@ def remove_lso_path(self): def get_free_disk_id(self): """ - This method return free disk (workers_all_disk_ids - workers_odf_pv_disk_ids) + This method returns free disk (workers_all_disk_ids - workers_odf_pv_disk_ids) """ workers_disk_ids = self.get_worker_disk_ids() workers_pv_disk_ids = self.get_pv_disk_ids() @@ -114,13 +114,13 @@ def get_kata_rpm_version(self, node: str): def _get_kata_default_channel(self): """ - Retrieve the default channel for Kata + This method retrieves the default channel for Kata """ return self.run(f"{self.__cli} get packagemanifest -n openshift-marketplace sandboxed-containers-operator -ojsonpath='{{.status.defaultChannel}}'") def _get_kata_default_channel_field(self, channel_field: str): """ - Retrieve a field from the packagemanifest for the default Kata channel + This method retrieves a field from the packagemanifest for the default Kata channel """ default_channel = f'"{self._get_kata_default_channel()}"' command = f"{self.__cli} get packagemanifest -n openshift-marketplace sandboxed-containers-operator -ojson | jq -r '[foreach .status.channels[] as $channel ([[],[]];0;(if ($channel.name == {default_channel}) then $channel.{channel_field} else null end))] | flatten | map (select (. != null))[]'" @@ -128,31 +128,31 @@ def _get_kata_default_channel_field(self, channel_field: str): def _get_kata_csv(self): """ - Retrieve the CSV of the sandboxed containers operator for installation" + This method retrieves the CSV of the sandboxed containers operator for installation" """ return self._get_kata_default_channel_field("currentCSV") def _get_kata_catalog_source(self): """ - Retrieve the catalog source of the sandboxed containers operator for installation" + This method retrieves the catalog source of the sandboxed containers operator for installation" """ return self.run(f"{self.__cli} get packagemanifest -n openshift-marketplace sandboxed-containers-operator -ojsonpath='{{.status.catalogSource}}'") def _get_kata_channel(self): """ - Retrieve the channel of the sandboxed containers operator for installation" + This method retrieves the channel of the sandboxed containers operator for installation" """ return self._get_kata_default_channel_field("name") def _get_kata_namespace(self): """ - Retrieve the namespace of the sandboxed containers operator for installation" + This method retrieves the namespace of the sandboxed containers operator for installation" """ return self._get_kata_default_channel_field('currentCSVDesc.annotations."operatorframework.io/suggested-namespace"') def set_kata_threads_pool(self, thread_pool_size: str): """ - This methods sets kata thread-pool-size in every worker node + This method sets kata thread-pool-size in every worker node @param thread_pool_size: @return: """ @@ -160,7 +160,7 @@ def set_kata_threads_pool(self, thread_pool_size: str): def delete_kata_threads_pool(self): """ - This methods deletes kata thread-pool-size from every worker node + This method deletes kata thread-pool-size from every worker node @return: """ self.run(fr"""{self.__cli} get nodes -l node-role.kubernetes.io/worker= -o jsonpath="{{range .items[*]}}{{.metadata.name}}{{'\\n'}}{{end}}" | xargs -I{{}} oc debug node/{{}} -- chroot /host sh -c "rm -f /etc/kata-containers/configuration.toml" """) @@ -168,7 +168,7 @@ def delete_kata_threads_pool(self): @typechecked def populate_additional_template_variables(self, env: dict): """ - Populate any additional variables needed for setup templates + This method populates any additional variables needed for setup templates """ if self.__kata_csv: # custom kata version env['kata_csv'] = self.__kata_csv @@ -180,7 +180,7 @@ def populate_additional_template_variables(self, env: dict): def is_cnv_installed(self): """ - This method check if cnv operator is installed + This method checks if cnv operator is installed :return: """ verify_cmd = f"{self.__cli} get csv -n openshift-cnv -ojsonpath='{{.items[0].status.phase}}'" @@ -190,7 +190,7 @@ def is_cnv_installed(self): def is_odf_installed(self): """ - This method check if odf operator is installed + This method checks if odf operator is installed :return: """ verify_cmd = f"{self.__cli} get csv -n openshift-storage -ojsonpath='{{.items[0].status.phase}}'" @@ -202,7 +202,7 @@ def check_dv_status(self, status: str, namespace: str = environment_variables.environment_variables_dict['namespace']): """ - This method check dv status + This method checks dv status :return: """ namespace = f'-n {namespace}' if namespace else '' @@ -217,7 +217,7 @@ def wait_for_dv_status(self, status: str = 'Succeeded', timeout: int = int(environment_variables.environment_variables_dict['timeout'])): """ - This method wait for methods status + This method waits for methods status @return: True/ False if reach to status """ current_wait_time = 0 @@ -239,7 +239,7 @@ def get_odf_disk_count(self): def is_kata_installed(self): """ - This method check if kata operator is installed + This method checks if kata operator is installed :return: """ verify_cmd = "oc get csv -n openshift-sandboxed-containers-operator -ojsonpath='{.items[0].status.phase}'" @@ -249,21 +249,21 @@ def is_kata_installed(self): def get_master_nodes(self): """ - This method return master nodes + This method returns master nodes :return: """ return self.run(fr""" {self.__cli} get nodes -l node-role.kubernetes.io/master= -o jsonpath="{{range .items[*]}}{{.metadata.name}}{{'\n'}}{{end}}" """) def get_worker_nodes(self): """ - This method return worker nodes + This method returns worker nodes :return: """ return self.run(fr""" {self.__cli} get nodes -l node-role.kubernetes.io/worker= -o jsonpath="{{range .items[*]}}{{.metadata.name}}{{'\n'}}{{end}}" """) def delete_available_released_pv(self): """ - This method delete available or released pv because that avoid launching new pv + This method deletes available or released pv because that avoid launching new pv """ pv_status_list = self.run(fr"{self.__cli} get pv -ojsonpath={{..status.phase}}").split() for ind, pv_status in enumerate(pv_status_list): @@ -280,7 +280,7 @@ def clear_node_caches(self): def __get_short_uuid(self, workload: str): """ - This method return uuid + This method returns uuid :return: """ long_uuid = self.get_long_uuid(workload=workload) @@ -290,7 +290,7 @@ def __get_short_uuid(self, workload: str): def get_num_active_nodes(self): """ - This method return the number of active nodes + This method returns the number of active nodes :return: """ # count the number of active master/worker nodes @@ -304,7 +304,7 @@ def get_num_active_nodes(self): @logger_time_stamp def apply_security_privileged(self, namespace: str = environment_variables.environment_variables_dict['namespace']): """ - This method apply security privileged for namespace + This method applies security privileged for namespace @param namespace: @return: """ @@ -315,7 +315,7 @@ def apply_security_privileged(self, namespace: str = environment_variables.envir @logger_time_stamp def create_async(self, yaml: str, is_check: bool = False): """ - This method create yaml in async + This method creates yaml in async @param yaml: @param is_check: :return: @@ -329,7 +329,7 @@ def create_async(self, yaml: str, is_check: bool = False): @logger_time_stamp def delete_async(self, yaml: str): """ - This method delete yaml in async + This method deletes yaml in async :param yaml: :return: """ @@ -342,7 +342,7 @@ def delete_async(self, yaml: str): def _get_pod_name(self, pod_name: str, namespace: str = environment_variables.environment_variables_dict['namespace']): """ - This method return pod name if exist or raise error + This method returns pod name if exist or raise error :param pod_name: :param namespace: :return: @@ -356,7 +356,7 @@ def _get_pod_name(self, pod_name: str, def pod_exists(self, pod_name: str, namespace: str = environment_variables.environment_variables_dict['namespace']): """ - This method return True if exist or False if not + This method returns True if exist or False if not :param pod_name: :param namespace: :return: @@ -371,7 +371,7 @@ def pod_exists(self, pod_name: str, def pod_label_exists(self, label_name: str, namespace: str = environment_variables.environment_variables_dict['namespace']): """ - This method return True if pod exist or not by label + This method returns True if pod exist or not by label :param label_name: :param namespace: :return: @@ -386,7 +386,7 @@ def pod_label_exists(self, label_name: str, @typechecked() def get_long_uuid(self, workload: str): """ - This method return uuid + This method returns uuid :return: """ long_uuid = self.run( @@ -409,7 +409,7 @@ def get_ocp_minor_version(self): def get_prom_token(self): """ - This method return prometheus token + This method returns prometheus token :return: """ # OCP 4.10 and below @@ -431,7 +431,7 @@ def collect_events(self): @logger_time_stamp def login(self): """ - This method login to the cluster + This method logs in to the cluster :return: """ try: @@ -445,7 +445,7 @@ def login(self): @logger_time_stamp def get_pod(self, label: str, database: str = '', namespace: str = environment_variables.environment_variables_dict['namespace']): """ - This method get pods according to label + This method gets pods according to label :param label: :param database: :param namespace: @@ -463,7 +463,7 @@ def get_pod(self, label: str, database: str = '', namespace: str = environment_v @logger_time_stamp def save_pod_log(self, pod_name: str, database: str = '', log_type: str = ''): """ - This method save pod log in log_path + This method saves pod log in log_path :param pod_name: pod name with uuid :param database: database :param log_type: log type extension @@ -484,7 +484,7 @@ def save_pod_log(self, pod_name: str, database: str = '', log_type: str = ''): def describe_pod(self, pod_name: str, namespace: str = ''): """ - This method describe pod into log + This method describes pod into log :param pod_name: pod name with uuid :param namespace: namespace :return: output_filename @@ -496,7 +496,7 @@ def describe_pod(self, pod_name: str, namespace: str = ''): @logger_time_stamp def get_pods(self): """ - This method get pods + This method retrieves information on benchmark-runner pods in oc get pod format :return: """ return self.run(f'{self.__cli} get pods', is_check=True) @@ -507,7 +507,7 @@ def wait_for_pod_create(self, pod_name: str, namespace: str = environment_variables.environment_variables_dict['namespace'], timeout: int = int(environment_variables.environment_variables_dict['timeout'])): """ - This method is wait till pod name is creating or throw exception after timeout + This method waits till pod name is creating or throw exception after timeout :param namespace: :param pod_name: :param timeout: @@ -530,7 +530,7 @@ def wait_for_pod_terminate(self, pod_name: str, namespace: str = environment_variables.environment_variables_dict['namespace'], timeout: int = int(environment_variables.environment_variables_dict['timeout'])): """ - This method is wait till pod name is terminating or throw exception after timeout + This method waits till pod name is terminating or throw exception after timeout :param namespace: :param pod_name: :param timeout: @@ -551,7 +551,7 @@ def create_pod_sync(self, yaml: str, pod_name: str, namespace: str = environment_variables.environment_variables_dict['namespace'], timeout: int = int(environment_variables.environment_variables_dict['timeout'])): """ - This method create pod yaml in async + This method creates pod yaml in async :param namespace: :param timeout: :param pod_name: @@ -567,7 +567,7 @@ def delete_pod_sync(self, yaml: str, pod_name: str, namespace: str = environment_variables.environment_variables_dict['namespace'], timeout: int = int(environment_variables.environment_variables_dict['timeout'])): """ - This method delete pod yaml in async, only if exist and return false if not exist + This method deletes pod yaml in async, only if exist and return false if not exist :param namespace: :param timeout: :param pod_name: @@ -583,7 +583,7 @@ def delete_pod_sync(self, yaml: str, pod_name: str, @typechecked def delete_namespace(self, namespace: str = environment_variables.environment_variables_dict['namespace']): """ - This method delete namespace + This method deletes namespace :param namespace: :return: """ @@ -595,7 +595,7 @@ def wait_for_initialized(self, label: str, workload: str = '', status: str = 'In namespace: str = environment_variables.environment_variables_dict['namespace'], timeout: int = SHORT_TIMEOUT): """ - This method wait to pod to be initialized + This method waits to pod to be initialized :param namespace: :param label: :param status: @@ -659,7 +659,7 @@ def wait_for_ready(self, label: str, run_type: str = 'pod', workload: str = '', @typechecked def exec(self, command: str, pod_name: str, namespace: str = environment_variables.environment_variables_dict['namespace'], container: str = ''): """ - oc exec a command and return the answer + This method executes a command within a specified pod and optional container and returns the output :param command: :param pod_name: :param namespace: @@ -678,7 +678,7 @@ def terminate_pod_sync(self, pod_name: str, namespace: str = environment_variables.environment_variables_dict['namespace'], timeout: int = int(environment_variables.environment_variables_dict['timeout'])): """ - Delete a pod based on name and namespace only + This method deletes a pod based on name and namespace only :param pod_name: :param namespace: :param timeout: @@ -697,7 +697,7 @@ def wait_for_pod_ready(self, pod_name: str, namespace: str = environment_variables.environment_variables_dict['namespace'], timeout: int = int(environment_variables.environment_variables_dict['timeout'])): """ - Wait for a pod to be ready and running + This method waits for a pod to be ready and running :param pod_name: :param namespace: :param timeout: @@ -722,7 +722,7 @@ def wait_for_pod_completed(self, label: str, workload: str = '', label_uuid: boo namespace: str = environment_variables.environment_variables_dict['namespace'], timeout: int = int(environment_variables.environment_variables_dict['timeout'])): """ - This method wait to pod to be completed + This method waits for a pod to be completed :param workload: :param label: :param label_uuid: need to get uuid from label (benchmark-operator) @@ -756,7 +756,7 @@ def wait_for_pod_completed(self, label: str, workload: str = '', label_uuid: boo def describe_vmi(self, vm_name: str, namespace: str): """ - This method describe vmi into log + This method describes vmi into log :param vm_name: vm name with uuid :param namespace: namespace :return: output_filename @@ -782,7 +782,7 @@ def _get_vm_name(self, vm_name: str, namespace: str = environment_variables.envi @typechecked def vm_exists(self, vm_name: str, namespace: str = environment_variables.environment_variables_dict['namespace']): """ - This method return True or False if vm name exist + This method returns True or False if vm name exist :param vm_name: :param namespace: :return: True or False @@ -798,7 +798,7 @@ def vm_exists(self, vm_name: str, namespace: str = environment_variables.environ @logger_time_stamp def get_vm(self, label: str = '', namespace: str = environment_variables.environment_variables_dict['namespace']): """ - This method get vm according to label + This method gets vm according to label :param label: :param namespace: :return: @@ -813,7 +813,7 @@ def get_vm(self, label: str = '', namespace: str = environment_variables.environ @logger_time_stamp def __verify_vm_log_complete(self, vm_name: str, timeout: int = int(environment_variables.environment_variables_dict['timeout'])): """ - This method verify that vm log is complete + This method verifies that vm log is complete :param vm_name: vm name with uuid :return: """ @@ -832,7 +832,7 @@ def __verify_vm_log_complete(self, vm_name: str, timeout: int = int(environment_ @logger_time_stamp def get_exposed_vm_port(self, vm_name: str, namespace: str = environment_variables.environment_variables_dict['namespace']): """ - The method get exposed vm port + The method gets exposed vm port @param vm_name: @return: """ @@ -854,7 +854,7 @@ def wait_for_vm_status(self, vm_name: str = '', status: VMStatus = VMStatus.Stop namespace: str = environment_variables.environment_variables_dict['namespace'], timeout: int = SHORT_TIMEOUT): """ - This method wait for VM to reach the specified status + This method waits for VM to reach the specified status :param vm_name: :param status: Stopped, Starting, Running :param namespace: @@ -897,7 +897,7 @@ def wait_for_vm_login(self, vm_name: str = '', node_ip: str = '', vm_node_port: @logger_time_stamp def get_vm_node(self, vm_name: str, namespace: str = environment_variables.environment_variables_dict['namespace']): """ - This method get vm node + This method gets vm node :param vm_name: :param namespace: :return: @@ -911,7 +911,7 @@ def wait_for_vm_create(self, vm_name: str, namespace: str = environment_variables.environment_variables_dict['namespace'], timeout: int = int(environment_variables.environment_variables_dict['timeout'])): """ - This method is wait till vm name is creating or throw exception after timeout + This method waits till vm name is creating or throw exception after timeout :param vm_name: :param namespace: :param timeout: @@ -934,7 +934,7 @@ def create_vm_sync(self, yaml: str, vm_name: str, namespace: str = environment_variables.environment_variables_dict['namespace'], timeout: int = int(environment_variables.environment_variables_dict['timeout'])): """ - This method create vm synchronously + This method creates vm synchronously :param timeout: :param vm_name: :param yaml: @@ -965,7 +965,7 @@ def delete_vm_sync(self, yaml: str, vm_name: str, @logger_time_stamp def delete_all_vms(self, namespace: str = environment_variables.environment_variables_dict['namespace']): """ - This method delete all vms + This method deletes all vms :return: """ namespace = f'-n {namespace}' if namespace else '' @@ -1046,7 +1046,7 @@ def wait_for_vm_log_completed(self, vm_name: str = '', end_stamp: str = '', outp @logger_time_stamp def extract_vm_results(self, vm_name: str = '', start_stamp: str = '', end_stamp: str = '', output_filename: str = ''): """ - This method extract vm results from vm output log + This method extracts vm results from vm output log :param vm_name: :param start_stamp: start of run stamp :param end_stamp: end of run stamp diff --git a/benchmark_runner/workloads/bootstorm_vm.py b/benchmark_runner/workloads/bootstorm_vm.py index 626b6412e..7f839608d 100644 --- a/benchmark_runner/workloads/bootstorm_vm.py +++ b/benchmark_runner/workloads/bootstorm_vm.py @@ -12,7 +12,7 @@ class BootstormVM(WorkloadsOperations): """ - This class run bootstorm vm + This class runs bootstorm vm """ def __init__(self): super().__init__() @@ -63,7 +63,7 @@ def _create_vm_scale(self, vm_num: str): def _run_vm_scale(self, vm_num: str): """ - This method start VMs in parallel and wait for login to be enabled + This method starts VMs in parallel and wait for login to be enabled """ vm_name = f'{self._workload_name}-{self._trunc_uuid}-{vm_num}' self._set_bootstorm_vm_start_time(vm_name=f'{self._workload_name}-{self._trunc_uuid}-{vm_num}') diff --git a/benchmark_runner/workloads/vdbench_pod.py b/benchmark_runner/workloads/vdbench_pod.py index ecf8b9052..55068f07c 100644 --- a/benchmark_runner/workloads/vdbench_pod.py +++ b/benchmark_runner/workloads/vdbench_pod.py @@ -11,7 +11,7 @@ class VdbenchPod(WorkloadsOperations): """ - This class run vdbench pod + This class runs vdbench pod """ def __init__(self): super().__init__() @@ -27,7 +27,7 @@ def __init__(self): def __create_pod_scale(self, pod_num: str): """ - This method create pod in parallel + This method creates pod in parallel """ self._oc.create_async(yaml=os.path.join(f'{self._run_artifacts_path}', f'{self.__name}_{pod_num}.yaml')) self._oc.wait_for_pod_create(pod_name=f'{self.__pod_name}-{pod_num}') @@ -55,14 +55,14 @@ def __run_pod_scale(self, pod_num: str): def __delete_pod_scale(self, pod_num: str): """ - This method create pod in parallel + This method creates pod in parallel """ self._oc.delete_async(yaml=os.path.join(f'{self._run_artifacts_path}', f'{self.__name}_{pod_num}.yaml')) @logger_time_stamp def run(self): """ - This method run the workload + This method runs the workload :return: """ try: diff --git a/benchmark_runner/workloads/vdbench_vm.py b/benchmark_runner/workloads/vdbench_vm.py index 2dece7fc2..5657ae223 100644 --- a/benchmark_runner/workloads/vdbench_vm.py +++ b/benchmark_runner/workloads/vdbench_vm.py @@ -11,7 +11,7 @@ class VdbenchVM(WorkloadsOperations): """ - This class run vdbench vm + This class runs vdbench vm """ START_STAMP = '@@~@@START-WORKLOAD@@~@@' END_STAMP = '@@~@@END-WORKLOAD@@~@@' @@ -31,14 +31,14 @@ def __init__(self): def __create_vm_scale(self, vm_num: str): """ - This method create vm in parallel + This method creates vm in parallel """ self._oc.create_async(yaml=os.path.join(f'{self._run_artifacts_path}', f'{self.__name}_{vm_num}.yaml')) self._oc.wait_for_vm_create(vm_name=f'{self.__vm_name}-{vm_num}') def __run_vm_scale(self, vm_num: str): """ - This method run vm in parallel + This method runs vm in parallel """ self._oc.wait_for_ready(label=f'app=vdbench-{self._trunc_uuid}-{vm_num}', run_type='vm', label_uuid=False) # Create vm log should be direct after vm is ready @@ -63,7 +63,7 @@ def __run_vm_scale(self, vm_num: str): def __delete_vm_scale(self, vm_num: str): """ - This method delete vm in parallel + This method deletes vm in parallel """ self._oc.delete_async(yaml=os.path.join(f'{self._run_artifacts_path}', f'{self.__name}_{vm_num}.yaml')) diff --git a/benchmark_runner/workloads/windows_vm.py b/benchmark_runner/workloads/windows_vm.py index e664944e8..0eb203186 100644 --- a/benchmark_runner/workloads/windows_vm.py +++ b/benchmark_runner/workloads/windows_vm.py @@ -11,7 +11,7 @@ class WindowsVM(BootstormVM): """ - This class run Windows vm + This class runs Windows vm """ def __init__(self): super().__init__() diff --git a/benchmark_runner/workloads/workloads.py b/benchmark_runner/workloads/workloads.py index 04e9116f7..5d8e85086 100644 --- a/benchmark_runner/workloads/workloads.py +++ b/benchmark_runner/workloads/workloads.py @@ -8,7 +8,7 @@ class Workloads(WorkloadsOperations): """ - This class create workload + This class run workload """ def __init__(self): """ diff --git a/benchmark_runner/workloads/workloads_exceptions.py b/benchmark_runner/workloads/workloads_exceptions.py index 4d7a4f504..0563eaea6 100644 --- a/benchmark_runner/workloads/workloads_exceptions.py +++ b/benchmark_runner/workloads/workloads_exceptions.py @@ -5,18 +5,36 @@ class BenchmarkRunnerError(Exception): pass -class ODFNonInstalled(BenchmarkRunnerError): +class CNVNotInstalled(BenchmarkRunnerError): """ - This class is error that ODF operator is not installed + This class raises an error that CNV operator is not installed """ - def __init__(self): - self.message = "ODF is not installed, set 'ODF_PVC' to False" - super(ODFNonInstalled, self).__init__(self.message) + def __init__(self, workload): + self.message = f"{workload} requires CNV to be installed" + super(CNVNotInstalled, self).__init__(self.message) + + +class KataNotInstalled(BenchmarkRunnerError): + """ + This class raises an error that Kata operator is not installed + """ + def __init__(self, workload): + self.message = f"{workload} requires Kata to be installed" + super(KataNotInstalled, self).__init__(self.message) + + +class ODFNotInstalled(BenchmarkRunnerError): + """ + This class raises an error that ODF operator is not installed + """ + def __init__(self, workload): + self.message = f"{workload} requires ODF to be installed, set 'ODF_PVC' to False to run with Ephemeral" + super(ODFNotInstalled, self).__init__(self.message) class MissingScaleNodes(BenchmarkRunnerError): """ - This class is error that Missing scale nodes + This class raises an error for missing scale nodes """ def __init__(self): self.message = "Missing scale nodes" @@ -25,7 +43,7 @@ def __init__(self): class MissingRedis(BenchmarkRunnerError): """ - This class is error that Missing redis for scale synchronization + This class raises an error for missing redis for scale synchronization """ def __init__(self): self.message = "Missing redis" diff --git a/benchmark_runner/workloads/workloads_operations.py b/benchmark_runner/workloads/workloads_operations.py index e6170cf9d..fcaa8bdc5 100644 --- a/benchmark_runner/workloads/workloads_operations.py +++ b/benchmark_runner/workloads/workloads_operations.py @@ -8,7 +8,7 @@ from csv import DictReader from benchmark_runner.common.logger.logger_time_stamp import logger_time_stamp -from benchmark_runner.workloads.workloads_exceptions import ODFNonInstalled, MissingScaleNodes, MissingRedis +from benchmark_runner.workloads.workloads_exceptions import ODFNotInstalled, CNVNotInstalled, KataNotInstalled, MissingScaleNodes, MissingRedis from benchmark_runner.common.oc.oc import OC from benchmark_runner.common.virtctl.virtctl import Virtctl from benchmark_runner.common.elasticsearch.elasticsearch_operations import ElasticSearchOperations @@ -26,7 +26,7 @@ class WorkloadsOperations: REPEAT_TIMES = 3 SLEEP_TIME = 3 """ - This class run workloads + This class contains workloads operations """ def __init__(self): # environment variables @@ -85,8 +85,8 @@ def __init__(self): timeout=self._timeout) # Generate templates class self._template = TemplateOperations(workload=self._workload) - # set oc login + # set oc login if WorkloadsOperations.oc is None: WorkloadsOperations.oc = self.set_login(kubeadmin_password=self._kubeadmin_password) self._oc = WorkloadsOperations.oc @@ -109,7 +109,7 @@ def __get_workload_file_name(self, workload): def set_login(self, kubeadmin_password: str = ''): """ - This method set oc login + This method sets login :param kubeadmin_password: :return: oc instance """ @@ -120,7 +120,7 @@ def set_login(self, kubeadmin_password: str = ''): @logger_time_stamp def delete_all(self): """ - This method delete all resources in namespace + This method deletes all resources in namespace :return: """ self._oc.delete_namespace() @@ -128,7 +128,7 @@ def delete_all(self): @logger_time_stamp def start_prometheus(self): """ - This method start collection of Prometheus snapshot + This method starts collection of Prometheus snapshot :return: """ if self._enable_prometheus_snapshot: @@ -142,7 +142,7 @@ def start_prometheus(self): @logger_time_stamp def end_prometheus(self): """ - This method retrieve the Prometheus snapshot + This method retrieves the Prometheus snapshot :return: """ if self._enable_prometheus_snapshot: @@ -154,19 +154,19 @@ def end_prometheus(self): raise err @logger_time_stamp - def odf_pvc_verification(self): + def odf_workload_verification(self): """ - This method verified if odf or pvc is required for workload, raise error in case of missing odf + This method verifies whether the ODF operator is installed for ODF workloads and raises an error if it is missing. :return: """ workload_name = self._workload.split('_') if workload_name[0] in self._workloads_odf_pvc: if not self._oc.is_odf_installed(): - raise ODFNonInstalled() + raise ODFNotInstalled(workload=self._workload) def _create_vm_log(self, labels: list) -> str: """ - This method set vm log per workload + This method sets vm log per workload :param labels: list of labels :return: vm_name """ @@ -178,7 +178,7 @@ def _create_vm_log(self, labels: list) -> str: def _create_pod_log(self, pod: str = '', log_type: str = ''): """ - This method create pod log per workload + This method creates pod log per workload :param pod: pod name :return: save_pod_log file """ @@ -187,7 +187,7 @@ def _create_pod_log(self, pod: str = '', log_type: str = ''): def _get_run_artifacts_hierarchy(self, workload_name: str = '', is_file: bool = False): """ - This method return log hierarchy + This method returns log hierarchy :param workload_name: workload name :param is_file: is file name :return: @@ -206,7 +206,7 @@ def _get_run_artifacts_hierarchy(self, workload_name: str = '', is_file: bool = @staticmethod def __is_float(value) -> bool: """ - This method check if value is float + This method checks if value is float :param value: :return: """ @@ -218,7 +218,7 @@ def __is_float(value) -> bool: def _create_scale_logs(self): """ - The method create scale logs + The method creates scale logs :return: """ self._create_pod_log(pod='state-signals-exporter', log_type='.log') @@ -226,7 +226,7 @@ def _create_scale_logs(self): def _create_pod_run_artifacts(self, pod_name: str, log_type: str): """ - This method create pod run artifacts + This method creates pod run artifacts :param pod_name: pod name :param log_type: log type extension :return: run results list of dicts @@ -251,7 +251,7 @@ def _create_pod_run_artifacts(self, pod_name: str, log_type: str): def _create_vm_run_artifacts(self, vm_name: str, start_stamp: str, end_stamp: str, log_type: str): """ - This method create vm run artifacts + This method creates vm run artifacts :param vm_name: vm name :param start_stamp: start stamp :param end_stamp: end stamp @@ -288,7 +288,7 @@ def _create_vm_run_artifacts(self, vm_name: str, start_stamp: str, end_stamp: st def __make_run_artifacts_tarfile(self, workload: str): """ - This method tar.gz log path and return the tar.gz path + This method compresses the log file and returns the compressed path :return: """ tar_run_artifacts_path = f"{self._run_artifacts_path}.tar.gz" @@ -300,7 +300,7 @@ def __make_run_artifacts_tarfile(self, workload: str): @logger_time_stamp def delete_local_artifacts(self): """ - This method delete local artifacts + This method deletes local artifacts :return: """ workload = self._workload.replace('_', '-') @@ -334,8 +334,8 @@ def upload_run_artifacts_to_s3(self): def __get_metadata(self, kind: str = None, status: str = None, result: dict = None) -> dict: """ - This method return metadata kind and database argument are optional - @param kind: optional: pod, vm, or kata + This method returns metadata for a run, optionally updates by runtime kind + @param kind: optionally: pod, vm, or kata @param status: @param result: :return: @@ -371,7 +371,7 @@ def __get_metadata(self, kind: str = None, status: str = None, result: dict = No def _upload_to_elasticsearch(self, index: str, kind: str, status: str, result: dict = None): """ - This method upload to elasticsearch + This method uploads results to elasticsearch :param index: :param kind: :param status: @@ -382,7 +382,7 @@ def _upload_to_elasticsearch(self, index: str, kind: str, status: str, result: d def _verify_elasticsearch_data_uploaded(self, index: str, uuid: str): """ - This method verify that elasticsearch data uploaded + This method verifies that elasticsearch data was uploaded :param index: :param uuid: :return: @@ -392,7 +392,7 @@ def _verify_elasticsearch_data_uploaded(self, index: str, uuid: str): @logger_time_stamp def update_ci_status(self, status: str, ci_minutes_time: int, benchmark_runner_id: str, benchmark_operator_id: str, benchmark_wrapper_id: str, ocp_install_minutes_time: int = 0, ocp_resource_install_minutes_time: int = 0): """ - This method update ci status Pass/Failed + This method updates ci status Pass/Failed :param status: Pass/Failed :param ci_minutes_time: ci time in minutes :param benchmark_runner_id: benchmark_runner last repository commit id @@ -430,7 +430,7 @@ def split_run_bulks(self, iterable: range, limit: int = 1): @logger_time_stamp def parse_prometheus_metrics(data): """ - This method parse prometheus metrics and return summary result + This method parses prometheus metrics and return summary result @return: """ result_dict = {} @@ -453,7 +453,7 @@ def parse_prometheus_metrics(data): @logger_time_stamp def clear_nodes_cache(self): """ - This method clear nodes cache + This method clears nodes cache """ for i in range(self.REPEAT_TIMES-1): self._oc.clear_node_caches() @@ -465,10 +465,16 @@ def initialize_workload(self): This method includes all the initialization of workload :return: """ + # Verify that CNV operator in installed for CNV workloads + if '_vm' in self._workload and not self._oc.is_cnv_installed(): + raise CNVNotInstalled(workload=self._workload) + # Verify that Kata operator in installed for kata workloads + if '_kata' in self._workload and not self._oc.is_kata_installed(): + raise KataNotInstalled(workload=self._workload) self.delete_all() self.clear_nodes_cache() if self._odf_pvc: - self.odf_pvc_verification() + self.odf_workload_verification() self._template.generate_yamls(scale=str(self._scale), scale_nodes=self._scale_node_list, redis=self._redis, thread_limit=self._threads_limit) if self._enable_prometheus_snapshot: self.start_prometheus()