diff --git a/tests/scripts/helpers/kruize.py b/tests/scripts/helpers/kruize.py index 9bf240540..4b633f6a1 100644 --- a/tests/scripts/helpers/kruize.py +++ b/tests/scripts/helpers/kruize.py @@ -364,3 +364,75 @@ def list_metadata(datasource=None, cluster_name=None, namespace=None, verbose=No print(response.text) print("\n************************************************************") return response + + +# Description: This function creates a metric profile using the Kruize createMetricProfile API +# Input Parameters: metric profile json +def create_metric_profile(metric_profile_json_file): + json_file = open(metric_profile_json_file, "r") + metric_profile_json = json.loads(json_file.read()) + + print("\nCreating metric profile...") + url = URL + "/createMetricProfile" + print("URL = ", url) + + response = requests.post(url, json=metric_profile_json) + print("Response status code = ", response.status_code) + print(response.text) + return response + +# Description: This function deletes the metric profile +# Input Parameters: metric profile input json +def delete_metric_profile(input_json_file, invalid_header=False): + json_file = open(input_json_file, "r") + input_json = json.loads(json_file.read()) + + print("\nDeleting the metric profile...") + url = URL + "/deleteMetricProfile" + + metric_profile_name = input_json['metadata']['name'] + query_string = f"name={metric_profile_name}" + + if query_string: + url += "?" + query_string + print("URL = ", url) + + headers = {'content-type': 'application/xml'} + if invalid_header: + print("Invalid header") + response = requests.delete(url, headers=headers) + else: + response = requests.delete(url) + + print(response) + print("Response status code = ", response.status_code) + return response + + +# Description: This function lists the metric profile from Kruize Autotune using GET listMetricProfiles API +# Input Parameters: metric profile name and verbose - flag indicating granularity of data to be listed +def list_metric_profiles(name=None, verbose=None, logging=True): + print("\nListing the metric profiles...") + + query_params = {} + + if name is not None: + query_params['name'] = name + if verbose is not None: + query_params['verbose'] = verbose + + query_string = "&".join(f"{key}={value}" for key, value in query_params.items()) + + url = URL + "/listMetricProfiles" + if query_string: + url += "?" + query_string + print("URL = ", url) + print("PARAMS = ", query_params) + response = requests.get(url) + + print("Response status code = ", response.status_code) + if logging: + print("\n************************************************************") + print(response.text) + print("\n************************************************************") + return response diff --git a/tests/scripts/helpers/list_metric_profiles_schema.py b/tests/scripts/helpers/list_metric_profiles_schema.py new file mode 100644 index 000000000..7cdd9c00b --- /dev/null +++ b/tests/scripts/helpers/list_metric_profiles_schema.py @@ -0,0 +1,109 @@ +""" +Copyright (c) 2024, 2024 Red Hat, IBM Corporation and others. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +list_metric_profiles_schema = { + "type": "array", + "items": { + "type": "object", + "properties": { + "apiVersion": { + "type": "string" + }, + "kind": { + "type": "string" + }, + "metadata": { + "type": "object", + "properties": { + "name": { + "type": "string" + } + }, + "required": ["name"] + }, + "profile_version": { + "type": "number" + }, + "k8s_type": { + "type": "string" + }, + "slo": { + "type": "object", + "properties": { + "sloClass": { + "type": "string" + }, + "objective_function": { + "type": "object", + "properties": { + "function_type": { + "type": "string" + } + }, + "required": ["function_type"] + }, + "direction": { + "type": "string" + }, + "function_variables": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "datasource": { + "type": "string" + }, + "value_type": { + "type": "string" + }, + "kubernetes_object": { + "type": "string" + }, + "aggregation_functions": { + "type": "object", + "items": { + "type": "object", + "patternProperties": { + "^[a-zA-Z0-9_-]+$": { + "type": "object", + "properties": { + "function": { + "type": "string", + "pattern": "^[a-zA-Z0-9_-]+$" + }, + "query": { + "type": "string" + } + }, + "required": ["function", "query"] + }, + }, + } + }, + } + }, + "required": ["name", "datasource", "value_type", "kubernetes_object", "aggregation_functions"] + } + }, + "required": ["sloClass", "objective_function", "direction", "function_variables"] + } + } + }, + "required": ["apiVersion", "kind", "metadata", "profile_version", "k8s_type", "slo"] +} diff --git a/tests/scripts/helpers/list_metric_profiles_validate.py b/tests/scripts/helpers/list_metric_profiles_validate.py new file mode 100644 index 000000000..3685ef0e3 --- /dev/null +++ b/tests/scripts/helpers/list_metric_profiles_validate.py @@ -0,0 +1,121 @@ +""" +Copyright (c) 2024, 2024 Red Hat, IBM Corporation and others. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +import json +import jsonschema +from jsonschema import FormatChecker +from jsonschema.exceptions import ValidationError +from helpers.list_metric_profiles_schema import list_metric_profiles_schema + + +SLO_CLASSES_SUPPORTED = ("throughput", "response_time", "resource_usage") +SLO_CLASSES_NOT_SUPPORTED = "SLO class not supported!" + +DIRECTIONS_SUPPORTED = ("minimize", "maximize") +DIRECTIONS_NOT_SUPPORTED = "Directions not supported!" + +VALUE_TYPES_SUPPORTED = ("double", "int", "string", "categorical") +VALUE_TYPE_NOT_SUPPORTED = "Value type not supported!" + +KUBERNETES_OBJECTS_TYPE_SUPPORTED = ("deployment", "pod", "container", "namespace") +KUBERNETES_OBJECTS_TYPE_NOT_SUPPORTED = "Kubernetes objects type not supported!" + +FUNCTION_TYPES_SUPPORTED = ("sum", "avg", "min", "max") +FUNCTION_TYPE_NOT_SUPPORTED = "Aggregation function type not supported!" + +JSON_NULL_VALUES = ("is not of type 'string'", "is not of type 'integer'", "is not of type 'number'") +VALUE_MISSING = " cannot be empty or null!" + + +def validate_list_metric_profiles_json(list_metric_profiles_json, json_schema): + errorMsg = "" + try: + # create a validator with the format checker + print("Validating json against the json schema...") + validator = jsonschema.Draft7Validator(json_schema, format_checker=FormatChecker()) + + # validate the JSON data against the schema + errors = "" + errors = list(validator.iter_errors(list_metric_profiles_json)) + print("Validating json against the json schema...done") + errorMsg = validate_list_metric_profiles_json_values(list_metric_profiles_json) + + if errors: + custom_err = ValidationError(errorMsg) + errors.append(custom_err) + return errors + else: + return errorMsg + except ValidationError as err: + print("Received a VaidationError") + + # Check if the exception is due to empty or null required parameters and prepare the response accordingly + if any(word in err.message for word in JSON_NULL_VALUES): + errorMsg = "Parameters" + VALUE_MISSING + return errorMsg + # Modify the error response in case of additional properties error + elif str(err.message).__contains__('('): + errorMsg = str(err.message).split('(') + return errorMsg[0] + else: + return err.message + +def validate_list_metric_profiles_json_values(metric_profile): + validationErrorMsg = "" + slo = "slo" + func_var = "function_variables" + aggr_func = "aggregation_functions" + + for key in metric_profile[0].keys(): + + # Check if any of the key is empty or null + if not (str(metric_profile[0][key]) and str(metric_profile[0][key]).strip()): + validationErrorMsg = ",".join([validationErrorMsg, "Parameters" + VALUE_MISSING]) + + if slo == key: + for subkey in metric_profile[0][key].keys(): + if not (str(metric_profile[0][key][subkey]) and str(metric_profile[0][key][subkey]).strip()): + print(f"FAILED - {str(metric_profile[0][key][subkey])} is empty or null") + validationErrorMsg = ",".join([validationErrorMsg, "Parameters" + VALUE_MISSING]) + elif str(subkey) == "sloClass" and (str(metric_profile[0][key][subkey]) not in SLO_CLASSES_SUPPORTED): + validationErrorMsg = ",".join([validationErrorMsg, SLO_CLASSES_NOT_SUPPORTED]) + elif str(subkey) == "direction" and (str(metric_profile[0][key][subkey]) not in DIRECTIONS_SUPPORTED): + validationErrorMsg = ",".join([validationErrorMsg, DIRECTIONS_NOT_SUPPORTED]) + + if func_var == subkey: + for func_var_object in metric_profile[0][key][subkey]: + for field in func_var_object.keys(): + # Check if any of the key is empty or null + if not (str(func_var_object.get(field)) and str(func_var_object.get(field)).strip()): + print(f"FAILED - {str(func_var_object.get(field))} is empty or null") + validationErrorMsg = ",".join([validationErrorMsg, "Parameters" + VALUE_MISSING]) + elif str(field) == "value_type" and str(func_var_object.get(field)) not in VALUE_TYPES_SUPPORTED: + validationErrorMsg = ",".join([validationErrorMsg, VALUE_TYPE_NOT_SUPPORTED]) + elif str(field) == "kubernetes_object" and str(func_var_object.get(field)) not in KUBERNETES_OBJECTS_TYPE_SUPPORTED: + validationErrorMsg = ",".join([validationErrorMsg, KUBERNETES_OBJECTS_TYPE_NOT_SUPPORTED]) + + if aggr_func == field: + aggr_func_obj = func_var_object.get("aggregation_functions", {}) + for aggr_func_object, aggr_func_value in aggr_func_obj.items(): + for query in aggr_func_value.keys(): + # Check if any of the key is empty or null + if not (str(aggr_func_value.get(query)) and str(aggr_func_value.get(query)).strip()): + print(f"FAILED - {str(aggr_func_value.get(query))} is empty or null") + validationErrorMsg = ",".join([validationErrorMsg, "Parameters" + VALUE_MISSING]) + elif str(query) == "function" and str(aggr_func_value.get(query)) not in FUNCTION_TYPES_SUPPORTED: + validationErrorMsg = ",".join([validationErrorMsg, FUNCTION_TYPE_NOT_SUPPORTED]) + + return validationErrorMsg.lstrip(',') diff --git a/tests/scripts/helpers/list_metric_profiles_without_parameters_schema.py b/tests/scripts/helpers/list_metric_profiles_without_parameters_schema.py new file mode 100644 index 000000000..1de70a140 --- /dev/null +++ b/tests/scripts/helpers/list_metric_profiles_without_parameters_schema.py @@ -0,0 +1,28 @@ +""" +Copyright (c) 2024, 2024 Red Hat, IBM Corporation and others. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +list_metric_profiles_without_parameters_schema = { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": { + "type": "string" + } + }, + "required": ["name"] + } +} diff --git a/tests/scripts/helpers/utils.py b/tests/scripts/helpers/utils.py index 2c48aedc6..204d6f442 100644 --- a/tests/scripts/helpers/utils.py +++ b/tests/scripts/helpers/utils.py @@ -28,6 +28,7 @@ ERROR_STATUS_CODE = 400 ERROR_409_STATUS_CODE = 409 DUPLICATE_RECORDS_COUNT = 5 +ERROR_500_STATUS_CODE = 500 SUCCESS_STATUS = "SUCCESS" ERROR_STATUS = "ERROR" @@ -57,6 +58,13 @@ LIST_METADATA_DATASOURCE_NAME_CLUSTER_NAME_ERROR_MSG = "Metadata for a given datasource name - %s, cluster_name - %s either does not exist or is not valid" LIST_METADATA_MISSING_DATASOURCE = "datasource is mandatory" IMPORT_METADATA_DATASOURCE_CONNECTION_FAILURE_MSG = "Metadata cannot be imported, datasource connection refused or timed out" +CREATE_METRIC_PROFILE_SUCCESS_MSG = "Metric Profile : %s created successfully. View Metric Profiles at /listMetricProfiles" +METRIC_PROFILE_EXISTS_MSG = "Validation failed: Metric Profile already exists: %s" +METRIC_PROFILE_NOT_FOUND_MSG = "No metric profiles found!" +INVALID_LIST_METRIC_PROFILE_INPUT_QUERY = "The query param(s) - [%s] is/are invalid" +LIST_METRIC_PROFILES_INVALID_NAME = "Given metric profile name - %s is not valid" +CREATE_METRIC_PROFILE_MISSING_MANDATORY_FIELD_MSG = "Validation failed: JSONObject[\"%s\"] not found." +CREATE_METRIC_PROFILE_MISSING_MANDATORY_PARAMETERS_MSG = "Validation failed: Missing mandatory parameters: [%s] " # Kruize Recommendations Notification codes NOTIFICATION_CODE_FOR_RECOMMENDATIONS_AVAILABLE = "111000" diff --git a/tests/scripts/local_monitoring_tests/Local_monitoring_tests.md b/tests/scripts/local_monitoring_tests/Local_monitoring_tests.md index 302418779..4923d612f 100644 --- a/tests/scripts/local_monitoring_tests/Local_monitoring_tests.md +++ b/tests/scripts/local_monitoring_tests/Local_monitoring_tests.md @@ -48,9 +48,30 @@ Here are the test scenarios: - List dsmetadata with datasource and namespace but without cluster_name - List the dsmetadata after deleting imported metadata +### **Create Metric Profile API tests** + +Here are the test scenarios: + +- Create metric profile passing a valid input JSON payload with all the metric queries +- Post the same metric profile again - creating it twice and validate the error as metric profile name is a unique field +- Create multiple valid metric profiles using different jsons +- Create Metric profile missing mandatory fields and validate error messages when the mandatory fields are missing + + +### **List Metric Profile API tests** + +Here are the test scenarios: + +- List metric profiles without specifying any query parameters +- List metric profiles specifying profile name query parameter +- List metric profiles specifying verbose query parameter +- List metric profiles specifying profile name and verbose query parameters +- Test with invalid values such as blank, null or an invalid value for name query parameter in listMetricProfiles API +- List metric profiles without creating metric profile + The above tests are developed using pytest framework and the tests are run using shell script wrapper that does the following: - Deploys kruize in non-CRD mode using the [deploy script](https://github.com/kruize/autotune/blob/master/deploy.sh) from the autotune repo -- Creates a resource optimization performance profile using the [createPerformanceProfile API](/design/PerformanceProfileAPI.md) +- Creates a resource optimization metric profile using the [createMetricProfile API](/design/MetricProfileAPI.md) - Runs the above tests using pytest ## Prerequisites for running the tests: diff --git a/tests/scripts/local_monitoring_tests/json_files/resource_optimization_openshift_metric_profile.json b/tests/scripts/local_monitoring_tests/json_files/resource_optimization_openshift_metric_profile.json new file mode 100644 index 000000000..fda38b4f3 --- /dev/null +++ b/tests/scripts/local_monitoring_tests/json_files/resource_optimization_openshift_metric_profile.json @@ -0,0 +1,396 @@ +{ + "apiVersion": "recommender.com/v1", + "kind": "KruizePerformanceProfile", + "metadata": { + "name": "resource-optimization-openshift" + }, + "profile_version": 1, + "k8s_type": "openshift", + "slo": { + "slo_class": "resource_usage", + "direction": "minimize", + "objective_function": { + "function_type": "source" + }, + "function_variables": [ + { + "name": "cpuRequest", + "datasource": "prometheus", + "value_type": "double", + "kubernetes_object": "container", + "aggregation_functions": [ + { + "function": "avg", + "query": "avg by(container,namespace,workload,workload_type,owner_kind) ((kube_pod_container_resource_requests{container!=\"\", container!=\"POD\", pod!=\"\", resource=\"cpu\", unit=\"core\", container=\"$CONTAINER_NAME$\", namespace=\"$NAMESPACE$\"} * on(pod, namespace) group_left max by (container,pod, namespace) (kube_pod_status_phase{phase=\"Running\"}) * on(pod) group_left(workload, workload_type) max by (pod, workload, workload_type) (max_over_time(namespace_workload_pod:kube_pod_owner:relabel{pod!=\"\",workload=\"$WORKLOAD$\", workload_type=\"$WORKLOAD_TYPE$\"}[15m])) * on(pod) group_left(owner_kind) max by (pod, owner_kind) (max_over_time(kube_pod_owner{pod!=\"\"}[15m]))))" + }, + { + "function": "sum", + "query": "sum by(container,namespace,workload,workload_type,owner_kind) ((kube_pod_container_resource_requests{container!=\"\", container!=\"POD\", pod!=\"\", resource=\"cpu\", unit=\"core\", container=\"$CONTAINER_NAME$\", namespace=\"$NAMESPACE$\"} * on(pod, namespace) group_left max by (container,pod, namespace) (kube_pod_status_phase{phase=\"Running\"}) * on(pod) group_left(workload, workload_type) max by (pod, workload, workload_type) (max_over_time(namespace_workload_pod:kube_pod_owner:relabel{pod!=\"\",workload=\"$WORKLOAD$\", workload_type=\"$WORKLOAD_TYPE$\"}[15m])) * on(pod) group_left(owner_kind) max by (pod, owner_kind) (max_over_time(kube_pod_owner{pod!=\"\"}[15m]))))" + } + ] + }, + { + "name": "cpuLimit", + "datasource": "prometheus", + "value_type": "double", + "kubernetes_object": "container", + "aggregation_functions": [ + { + "function": "avg", + "query": "avg by(container,namespace,workload,workload_type,owner_kind) ((kube_pod_container_resource_limits{container!=\"\", container!=\"POD\", pod!=\"\", resource=\"cpu\", unit=\"core\", container=\"$CONTAINER_NAME$\", namespace=\"$NAMESPACE$\"} * on(pod, namespace) group_left max by (container,pod, namespace) (kube_pod_status_phase{phase=\"Running\"}) * on(pod) group_left(workload, workload_type) max by (pod, workload, workload_type) (max_over_time(namespace_workload_pod:kube_pod_owner:relabel{pod!=\"\",workload=\"$WORKLOAD$\", workload_type=\"$WORKLOAD_TYPE$\"}[15m])) * on(pod) group_left(owner_kind) max by (pod, owner_kind) (max_over_time(kube_pod_owner{pod!=\"\"}[15m]))))" + }, + { + "function": "sum", + "query": "avg by(container,namespace,workload,workload_type,owner_kind) ((kube_pod_container_resource_limits{container!=\"\", container!=\"POD\", pod!=\"\", resource=\"cpu\", unit=\"core\", container=\"$CONTAINER_NAME$\", namespace=\"$NAMESPACE$\"} * on(pod, namespace) group_left max by (container,pod, namespace) (kube_pod_status_phase{phase=\"Running\"}) * on(pod) group_left(workload, workload_type) max by (pod, workload, workload_type) (max_over_time(namespace_workload_pod:kube_pod_owner:relabel{pod!=\"\",workload=\"$WORKLOAD$\", workload_type=\"$WORKLOAD_TYPE$\"}[15m])) * on(pod) group_left(owner_kind) max by (pod, owner_kind) (max_over_time(kube_pod_owner{pod!=\"\"}[15m]))))" + } + ] + }, + { + "name": "cpuUsage", + "datasource": "prometheus", + "value_type": "double", + "kubernetes_object": "container", + "aggregation_functions": [ + { + "function": "avg", + "query": "avg by(namespace,container,workload,workload_type,owner_kind) (avg_over_time(((irate(container_cpu_usage_seconds_total{container!=\"\", container!=\"POD\", pod!=\"\", container=\"$CONTAINER_NAME$\", namespace=\"$NAMESPACE$\"}[5m])) * on(pod) group_left(workload, workload_type) max by (pod, workload, workload_type) (max_over_time(namespace_workload_pod:kube_pod_owner:relabel{pod!=\"\", workload=\"$WORKLOAD$\", workload_type=\"$WORKLOAD_TYPE$\"}[15m])) * on(pod) group_left(owner_kind) max by (pod, owner_kind) (max_over_time(kube_pod_owner{pod!=\"\"}[15m])))[15m:]))", + "version": "<=4.8" + }, + { + "function": "avg", + "query": "avg by(namespace,container,workload,workload_type,owner_kind) (avg_over_time(((node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate{container!=\"\", container!=\"POD\", pod!=\"\",container=\"$CONTAINER_NAME$\", namespace=\"$NAMESPACE$\"}) * on(pod) group_left(workload, workload_type) max by (pod, workload, workload_type) (max_over_time(namespace_workload_pod:kube_pod_owner:relabel{pod!=\"\",workload=\"$WORKLOAD$\", workload_type=\"$WORKLOAD_TYPE$\"}[15m])) * on(pod) group_left(owner_kind) max by (pod, owner_kind) (max_over_time(kube_pod_owner{pod!=\"\"}[15m])))[15m:]))", + "version": ">4.9" + }, + { + "function": "min", + "query": "min by(namespace,container,workload,workload_type,owner_kind) (min_over_time(((irate(container_cpu_usage_seconds_total{container!=\"\", container!=\"POD\", pod!=\"\", container=\"$CONTAINER_NAME$\", namespace=\"$NAMESPACE$\"}[5m])) * on(pod) group_left(workload, workload_type) max by (pod, workload, workload_type) (max_over_time(namespace_workload_pod:kube_pod_owner:relabel{pod!=\"\", workload=\"$WORKLOAD$\", workload_type=\"$WORKLOAD_TYPE$\"}[15m])) * on(pod) group_left(owner_kind) max by (pod, owner_kind) (max_over_time(kube_pod_owner{pod!=\"\"}[15m])))[15m:]))", + "version": "<=4.8" + }, + { + "function": "min", + "query": "min by(namespace,container,workload,workload_type,owner_kind) (min_over_time(((node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate{container!=\"\", container!=\"POD\", pod!=\"\",container=\"$CONTAINER_NAME$\", namespace=\"$NAMESPACE$\"}) * on(pod) group_left(workload, workload_type) max by (pod, workload, workload_type) (max_over_time(namespace_workload_pod:kube_pod_owner:relabel{pod!=\"\",workload=\"$WORKLOAD$\", workload_type=\"$WORKLOAD_TYPE$\"}[15m])) * on(pod) group_left(owner_kind) max by (pod, owner_kind) (max_over_time(kube_pod_owner{pod!=\"\"}[15m])))[15m:]))", + "version": ">4.9" + }, + { + "function": "max", + "query": "max by(namespace,container,workload,workload_type,owner_kind) (max_over_time(((irate(container_cpu_usage_seconds_total{container!=\"\", container!=\"POD\", pod!=\"\", container=\"$CONTAINER_NAME$\", namespace=\"$NAMESPACE$\"}[5m])) * on(pod) group_left(workload, workload_type) max by (pod, workload, workload_type) (max_over_time(namespace_workload_pod:kube_pod_owner:relabel{pod!=\"\", workload=\"$WORKLOAD$\", workload_type=\"$WORKLOAD_TYPE$\"}[15m])) * on(pod) group_left(owner_kind) max by (pod, owner_kind) (max_over_time(kube_pod_owner{pod!=\"\"}[15m])))[15m:]))", + "version": "<=4.8" + }, + { + "function": "max", + "query": "max by(namespace,container,workload,workload_type,owner_kind) (max_over_time(((node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate{container!=\"\", container!=\"POD\", pod!=\"\",container=\"$CONTAINER_NAME$\", namespace=\"$NAMESPACE$\"}) * on(pod) group_left(workload, workload_type) max by (pod, workload, workload_type) (max_over_time(namespace_workload_pod:kube_pod_owner:relabel{pod!=\"\",workload=\"$WORKLOAD$\", workload_type=\"$WORKLOAD_TYPE$\"}[15m])) * on(pod) group_left(owner_kind) max by (pod, owner_kind) (max_over_time(kube_pod_owner{pod!=\"\"}[15m])))[15m:]))", + "version": ">4.9" + }, + { + "function": "sum", + "query": "sum by(namespace,container,workload,workload_type,owner_kind) (avg_over_time(((irate(container_cpu_usage_seconds_total{container!=\"\", container!=\"POD\", pod!=\"\", container=\"$CONTAINER_NAME$\", namespace=\"$NAMESPACE$\"}[5m])) * on(pod) group_left(workload, workload_type) max by (pod, workload, workload_type) (max_over_time(namespace_workload_pod:kube_pod_owner:relabel{pod!=\"\", workload=\"$WORKLOAD$\", workload_type=\"$WORKLOAD_TYPE$\"}[15m])) * on(pod) group_left(owner_kind) max by (pod, owner_kind) (max_over_time(kube_pod_owner{pod!=\"\"}[15m])))[15m:]))", + "version": "<=4.8" + }, + { + "function": "sum", + "query": "sum by(namespace,container,workload,workload_type,owner_kind) (avg_over_time(((node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate{container!=\"\", container!=\"POD\", pod!=\"\",container=\"$CONTAINER_NAME$\", namespace=\"$NAMESPACE$\"}) * on(pod) group_left(workload, workload_type) max by (pod, workload, workload_type) (max_over_time(namespace_workload_pod:kube_pod_owner:relabel{pod!=\"\",workload=\"$WORKLOAD$\", workload_type=\"$WORKLOAD_TYPE$\"}[15m])) * on(pod) group_left(owner_kind) max by (pod, owner_kind) (max_over_time(kube_pod_owner{pod!=\"\"}[15m])))[15m:]))", + "version": ">4.9" + } + ] + }, + { + "name": "cpuThrottle", + "datasource": "prometheus", + "value_type": "double", + "kubernetes_object": "container", + "aggregation_functions": [ + { + "function": "avg", + "query": "avg by(namespace,container,workload,workload_type,owner_kind) (avg_over_time((rate(container_cpu_cfs_throttled_seconds_total{container!=\"\", container!=\"POD\", pod!=\"\",container=\"$CONTAINER_NAME$\", namespace=\"$NAMESPACE$\"}[15m]) * on(pod) group_left(workload, workload_type) max by (pod, workload, workload_type) (max_over_time(namespace_workload_pod:kube_pod_owner:relabel{pod!=\"\",workload=\"$WORKLOAD$\", workload_type=\"$WORKLOAD_TYPE$\"}[15m])) * on(pod) group_left(owner_kind) max by (pod, owner_kind) (max_over_time(kube_pod_owner{pod!=\"\"}[15m])))[15m:]))" + }, + { + "function": "max", + "query": "max by(namespace,container,workload,workload_type,owner_kind) (max_over_time((rate(container_cpu_cfs_throttled_seconds_total{container!=\"\", container!=\"POD\", pod!=\"\",container=\"$CONTAINER_NAME$\", namespace=\"$NAMESPACE$\"}[15m]) * on(pod) group_left(workload, workload_type) max by (pod, workload, workload_type) (max_over_time(namespace_workload_pod:kube_pod_owner:relabel{pod!=\"\",workload=\"$WORKLOAD$\", workload_type=\"$WORKLOAD_TYPE$\"}[15m])) * on(pod) group_left(owner_kind) max by (pod, owner_kind) (max_over_time(kube_pod_owner{pod!=\"\"}[15m])))[15m:]))" + + }, + { + "function": "min", + "query": "min by(namespace,container,workload,workload_type,owner_kind) (min_over_time((rate(container_cpu_cfs_throttled_seconds_total{container!=\"\", container!=\"POD\", pod!=\"\",container=\"$CONTAINER_NAME$\", namespace=\"$NAMESPACE$\"}[15m]) * on(pod) group_left(workload, workload_type) max by (pod, workload, workload_type) (max_over_time(namespace_workload_pod:kube_pod_owner:relabel{pod!=\"\",workload=\"$WORKLOAD$\", workload_type=\"$WORKLOAD_TYPE$\"}[15m])) * on(pod) group_left(owner_kind) max by (pod, owner_kind) (max_over_time(kube_pod_owner{pod!=\"\"}[15m])))[15m:]))" + }, + { + "function": "sum", + "query": "sum by(namespace,container,workload,workload_type,owner_kind) (avg_over_time((rate(container_cpu_cfs_throttled_seconds_total{container!=\"\", container!=\"POD\", pod!=\"\",container=\"$CONTAINER_NAME$\", namespace=\"$NAMESPACE$\"}[15m]) * on(pod) group_left(workload, workload_type) max by (pod, workload, workload_type) (max_over_time(namespace_workload_pod:kube_pod_owner:relabel{pod!=\"\",workload=\"$WORKLOAD$\", workload_type=\"$WORKLOAD_TYPE$\"}[15m])) * on(pod) group_left(owner_kind) max by (pod, owner_kind) (max_over_time(kube_pod_owner{pod!=\"\"}[15m])))[15m:]))" + } + ] + }, + { + "name": "memoryRequest", + "datasource": "prometheus", + "value_type": "double", + "kubernetes_object": "container", + "aggregation_functions": [ + { + "function": "avg", + "query": "avg by(container,namespace,workload,workload_type,owner_kind) ((kube_pod_container_resource_requests{container!=\"\", container!=\"POD\", pod!=\"\", resource=\"memory\", unit=\"byte\", container=\"$CONTAINER_NAME$\", namespace=\"$NAMESPACE$\"} * on(pod, namespace) group_left max by (container,pod, namespace) (kube_pod_status_phase{phase=\"Running\"}) * on(pod) group_left(workload, workload_type) max by (pod, workload, workload_type) (max_over_time(namespace_workload_pod:kube_pod_owner:relabel{pod!=\"\",workload=\"$WORKLOAD$\", workload_type=\"$WORKLOAD_TYPE$\"}[15m])) * on(pod) group_left(owner_kind) max by (pod, owner_kind) (max_over_time(kube_pod_owner{pod!=\"\"}[15m]))))" + + }, + { + "function": "sum", + "query": "sum by(container,namespace,workload,workload_type,owner_kind) ((kube_pod_container_resource_requests{container!=\"\", container!=\"POD\", pod!=\"\", resource=\"memory\", unit=\"byte\", container=\"$CONTAINER_NAME$\", namespace=\"$NAMESPACE$\"} * on(pod, namespace) group_left max by (container,pod, namespace) (kube_pod_status_phase{phase=\"Running\"}) * on(pod) group_left(workload, workload_type) max by (pod, workload, workload_type) (max_over_time(namespace_workload_pod:kube_pod_owner:relabel{pod!=\"\",workload=\"$WORKLOAD$\", workload_type=\"$WORKLOAD_TYPE$\"}[15m])) * on(pod) group_left(owner_kind) max by (pod, owner_kind) (max_over_time(kube_pod_owner{pod!=\"\"}[15m]))))" + + } + ] + }, + { + "name": "memoryLimit", + "datasource": "prometheus", + "value_type": "double", + "kubernetes_object": "container", + "aggregation_functions": [ + { + "function": "avg", + "query": "avg by(container,namespace,workload,workload_type,owner_kind) ((kube_pod_container_resource_limits{container!=\"\", container!=\"POD\", pod!=\"\", resource=\"memory\", unit=\"byte\", container=\"$CONTAINER_NAME$\", namespace=\"$NAMESPACE$\"} * on(pod, namespace) group_left max by (container,pod, namespace) (kube_pod_status_phase{phase=\"Running\"}) * on(pod) group_left(workload, workload_type) max by (pod, workload, workload_type) (max_over_time(namespace_workload_pod:kube_pod_owner:relabel{pod!=\"\",workload=\"$WORKLOAD$\", workload_type=\"$WORKLOAD_TYPE$\"}[15m])) * on(pod) group_left(owner_kind) max by (pod, owner_kind) (max_over_time(kube_pod_owner{pod!=\"\"}[15m]))))" + }, + { + "function": "sum", + "query": "sum by(container,namespace,workload,workload_type,owner_kind) ((kube_pod_container_resource_limits{container!=\"\", container!=\"POD\", pod!=\"\", resource=\"memory\", unit=\"byte\", container=\"$CONTAINER_NAME$\", namespace=\"$NAMESPACE$\"} * on(pod, namespace) group_left max by (container,pod, namespace) (kube_pod_status_phase{phase=\"Running\"}) * on(pod) group_left(workload, workload_type) max by (pod, workload, workload_type) (max_over_time(namespace_workload_pod:kube_pod_owner:relabel{pod!=\"\",workload=\"$WORKLOAD$\", workload_type=\"$WORKLOAD_TYPE$\"}[15m])) * on(pod) group_left(owner_kind) max by (pod, owner_kind) (max_over_time(kube_pod_owner{pod!=\"\"}[15m]))))" + + } + ] + }, + { + "name": "memoryUsage", + "datasource": "prometheus", + "value_type": "double", + "kubernetes_object": "container", + "aggregation_functions": [ + { + "function": "avg", + "query": "avg by(namespace,container,workload,workload_type,owner_kind) (avg_over_time((container_memory_working_set_bytes{container!=\"\", container!=\"POD\", pod!=\"\", container=\"$CONTAINER_NAME$\", namespace=\"$NAMESPACE$\"} * on(pod) group_left(workload, workload_type) max by (pod, workload, workload_type) (max_over_time(namespace_workload_pod:kube_pod_owner:relabel{pod!=\"\",workload=\"$WORKLOAD$\", workload_type=\"$WORKLOAD_TYPE$\"}[15m])) * on(pod) group_left(owner_kind) max by (pod, owner_kind) (max_over_time(kube_pod_owner{pod!=\"\"}[15m])))[15m:]))" + + }, + { + "function": "min", + "query": "min by(namespace,container,workload,workload_type,owner_kind) (min_over_time((container_memory_working_set_bytes{container!=\"\", container!=\"POD\", pod!=\"\", container=\"$CONTAINER_NAME$\", namespace=\"$NAMESPACE$\"} * on(pod) group_left(workload, workload_type) max by (pod, workload, workload_type) (max_over_time(namespace_workload_pod:kube_pod_owner:relabel{pod!=\"\",workload=\"$WORKLOAD$\", workload_type=\"$WORKLOAD_TYPE$\"}[15m])) * on(pod) group_left(owner_kind) max by (pod, owner_kind) (max_over_time(kube_pod_owner{pod!=\"\"}[15m])))[15m:]))" + + }, + { + "function": "max", + "query": "max by(namespace,container,workload,workload_type,owner_kind) (max_over_time((container_memory_working_set_bytes{container!=\"\", container!=\"POD\", pod!=\"\", container=\"$CONTAINER_NAME$\", namespace=\"$NAMESPACE$\"} * on(pod) group_left(workload, workload_type) max by (pod, workload, workload_type) (max_over_time(namespace_workload_pod:kube_pod_owner:relabel{pod!=\"\",workload=\"$WORKLOAD$\", workload_type=\"$WORKLOAD_TYPE$\"}[15m])) * on(pod) group_left(owner_kind) max by (pod, owner_kind) (max_over_time(kube_pod_owner{pod!=\"\"}[15m])))[15m:]))" + + }, + { + "function": "sum", + "query": "sum by(namespace,container,workload,workload_type,owner_kind) (avg_over_time((container_memory_working_set_bytes{container!=\"\", container!=\"POD\", pod!=\"\", container=\"$CONTAINER_NAME$\", namespace=\"$NAMESPACE$\"} * on(pod) group_left(workload, workload_type) max by (pod, workload, workload_type) (max_over_time(namespace_workload_pod:kube_pod_owner:relabel{pod!=\"\",workload=\"$WORKLOAD$\", workload_type=\"$WORKLOAD_TYPE$\"}[15m])) * on(pod) group_left(owner_kind) max by (pod, owner_kind) (max_over_time(kube_pod_owner{pod!=\"\"}[15m])))[15m:]))" + + } + ] + }, + { + "name": "memoryRSS", + "datasource": "prometheus", + "value_type": "double", + "kubernetes_object": "container", + "aggregation_functions": [ + { + "function": "avg", + "query": "avg by(namespace,container,workload,workload_type,owner_kind) (avg_over_time((container_memory_rss{container!=\"\", container!=\"POD\", pod!=\"\", container=\"$CONTAINER_NAME$\", namespace=\"$NAMESPACE$\"} * on(pod) group_left(workload, workload_type) max by (pod, workload, workload_type) (max_over_time(namespace_workload_pod:kube_pod_owner:relabel{pod!=\"\",workload=\"$WORKLOAD$\", workload_type=\"$WORKLOAD_TYPE$\"}[15m])) * on(pod) group_left(owner_kind) max by (pod, owner_kind) (max_over_time(kube_pod_owner{pod!=\"\"}[15m])))[15m:]))" + + }, + { + "function": "min", + "query": "min by(namespace,container,workload,workload_type,owner_kind) (min_over_time((container_memory_rss{container!=\"\", container!=\"POD\", pod!=\"\", container=\"$CONTAINER_NAME$\", namespace=\"$NAMESPACE$\"} * on(pod) group_left(workload, workload_type) max by (pod, workload, workload_type) (max_over_time(namespace_workload_pod:kube_pod_owner:relabel{pod!=\"\",workload=\"$WORKLOAD$\", workload_type=\"$WORKLOAD_TYPE$\"}[15m])) * on(pod) group_left(owner_kind) max by (pod, owner_kind) (max_over_time(kube_pod_owner{pod!=\"\"}[15m])))[15m:]))" + + }, + { + "function": "max", + "query": "max by(namespace,container,workload,workload_type,owner_kind) (max_over_time((container_memory_rss{container!=\"\", container!=\"POD\", pod!=\"\", container=\"$CONTAINER_NAME$\", namespace=\"$NAMESPACE$\"} * on(pod) group_left(workload, workload_type) max by (pod, workload, workload_type) (max_over_time(namespace_workload_pod:kube_pod_owner:relabel{pod!=\"\",workload=\"$WORKLOAD$\", workload_type=\"$WORKLOAD_TYPE$\"}[15m])) * on(pod) group_left(owner_kind) max by (pod, owner_kind) (max_over_time(kube_pod_owner{pod!=\"\"}[15m])))[15m:]))" + + }, + { + "function": "sum", + "query": "sum by(namespace,container,workload,workload_type,owner_kind) (avg_over_time((container_memory_rss{container!=\"\", container!=\"POD\", pod!=\"\", container=\"$CONTAINER_NAME$\", namespace=\"$NAMESPACE$\"} * on(pod) group_left(workload, workload_type) max by (pod, workload, workload_type) (max_over_time(namespace_workload_pod:kube_pod_owner:relabel{pod!=\"\",workload=\"$WORKLOAD$\", workload_type=\"$WORKLOAD_TYPE$\"}[15m])) * on(pod) group_left(owner_kind) max by (pod, owner_kind) (max_over_time(kube_pod_owner{pod!=\"\"}[15m])))[15m:]))" + + } + ] + }, + { + "name": "maxDate", + "datasource": "prometheus", + "value_type": "double", + "kubernetes_object": "container", + "aggregation_functions": [ + { + "function": "max", + "query": "max by(namespace,container,workload,workload_type,owner_kind) (last_over_time((timestamp(container_cpu_usage_seconds_total{container!=\"\", container!=\"POD\", pod!=\"\", namespace=\"$NAMESPACE$\",container=\"$CONTAINER_NAME$\"} > 0))[15d:]) * on(pod) group_left(workload, workload_type) max by (pod, workload, workload_type) (max_over_time(namespace_workload_pod:kube_pod_owner:relabel{pod!='',workload=\"$WORKLOAD$\", workload_type=\"$WORKLOAD_TYPE$\"}[15d])) * on(pod) group_left(owner_kind) max by (pod, owner_kind) (max_over_time(kube_pod_owner{pod!=\"\"}[15d])) )" + } + ] + }, + { + "name": "namespaceCpuRequest", + "datasource": "prometheus", + "value_type": "double", + "kubernetes_object": "namespace", + "aggregation_functions": [ + { + "function": "sum", + "query": "sum by (namespace) (kube_resourcequota{namespace=\"$NAMESPACE$\", resource=\"requests.cpu\", type=\"hard\"})" + + } + ] + }, + { + "name": "namespaceCpuLimit", + "datasource": "prometheus", + "value_type": "double", + "kubernetes_object": "namespace", + "aggregation_functions": [ + { + "function": "sum", + "query": "sum by (namespace) (kube_resourcequota{namespace=\"$NAMESPACE$\", resource=\"limits.cpu\", type=\"hard\"})" + + } + ] + }, + { + "name": "namespaceMemoryRequest", + "datasource": "prometheus", + "value_type": "double", + "kubernetes_object": "namespace", + "aggregation_functions": [ + { + "function": "sum", + "query": "sum by (namespace) (kube_resourcequota{namespace=\"$NAMESPACE$\", resource=\"requests.memory\", type=\"hard\"})" + + } + ] + }, + { + "name": "namespaceMemoryLimit", + "datasource": "prometheus", + "value_type": "double", + "kubernetes_object": "namespace", + "aggregation_functions": [ + { + "function": "sum", + "query": "sum by (namespace) (kube_resourcequota{namespace=\"$NAMESPACE$\", resource=\"limits.memory\", type=\"hard\"})" + + } + ] + }, + { + "name": "namespaceCpuUsage", + "datasource": "prometheus", + "value_type": "double", + "kubernetes_object": "namespace", + "aggregation_functions": [ + { + "function": "avg", + "query": "avg_over_time(sum by(namespace) (node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate{namespace=\"$NAMESPACE$\", container!='', container!='POD', pod!=''})[$MEASUREMENT_DURATION_IN_MIN$m:])" + + }, + { + "function": "max", + "query": "max_over_time(sum by(namespace) (node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate{namespace=\"$NAMESPACE$\", container!='', container!='POD', pod!=''})[$MEASUREMENT_DURATION_IN_MIN$m:])" + + }, + { + "function": "min", + "query": "min_over_time(sum by(namespace) (node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate{namespace=\"$NAMESPACE$\", container!='', container!='POD', pod!=''})[$MEASUREMENT_DURATION_IN_MIN$m:])" + + } + ] + }, + { + "name": "namespaceCpuThrottle", + "datasource": "prometheus", + "value_type": "double", + "kubernetes_object": "namespace", + "aggregation_functions": [ + { + "function": "avg", + "query": "avg_over_time(sum by(namespace) (rate(container_cpu_cfs_throttled_seconds_total{namespace=\"$NAMESPACE$\", container!='', container!='POD', pod!=''}[5m]))[$MEASUREMENT_DURATION_IN_MIN$m:])" + + }, + { + "function": "max", + "query": "max_over_time(sum by(namespace) (rate(container_cpu_cfs_throttled_seconds_total{namespace=\"$NAMESPACE$\", container!='', container!='POD', pod!=''}[5m]))[$MEASUREMENT_DURATION_IN_MIN$m:])" + + }, + { + "function": "min", + "query": "min_over_time(sum by(namespace) (rate(container_cpu_cfs_throttled_seconds_total{namespace=\"$NAMESPACE$\", container!='', container!='POD', pod!=''}[5m]))[$MEASUREMENT_DURATION_IN_MIN$m:])" + + } + ] + }, + { + "name": "namespaceMemoryUsage", + "datasource": "prometheus", + "value_type": "double", + "kubernetes_object": "namespace", + "aggregation_functions": [ + { + "function": "avg", + "query": "avg_over_time(sum by(namespace) (container_memory_working_set_bytes{namespace=\"$NAMESPACE$\", container!='', container!='POD', pod!=''})[$MEASUREMENT_DURATION_IN_MIN$m:])" + + }, + { + "function": "max", + "query": "max_over_time(sum by(namespace) (container_memory_working_set_bytes{namespace=\"$NAMESPACE$\", container!='', container!='POD', pod!=''})[$MEASUREMENT_DURATION_IN_MIN$m:])" + + }, + { + "function": "min", + "query": "min_over_time(sum by(namespace) (container_memory_working_set_bytes{namespace=\"$NAMESPACE$\", container!='', container!='POD', pod!=''})[$MEASUREMENT_DURATION_IN_MIN$m:])" + + } + ] + }, + { + "name": "namespaceMemoryRSS", + "datasource": "prometheus", + "value_type": "double", + "kubernetes_object": "namespace", + "aggregation_functions": [ + { + "function": "avg", + "query": "avg_over_time(sum by(namespace) (container_memory_rss{namespace=\"$NAMESPACE$\", container!='', container!='POD', pod!=''})[$MEASUREMENT_DURATION_IN_MIN$m:])" + + }, + { + "function": "max", + "query": "max_over_time(sum by(namespace) (container_memory_rss{namespace=\"$NAMESPACE$\", container!='', container!='POD', pod!=''})[$MEASUREMENT_DURATION_IN_MIN$m:])" + + }, + { + "function": "min", + "query": "min_over_time(sum by(namespace) (container_memory_rss{namespace=\"$NAMESPACE$\", container!='', container!='POD', pod!=''})[$MEASUREMENT_DURATION_IN_MIN$m:])" + + } + ] + }, + { + "name": "namespaceTotalPods", + "datasource": "prometheus", + "value_type": "double", + "kubernetes_object": "namespace", + "aggregation_functions": [ + { + "function": "sum", + "query": "sum(count(kube_pod_status_phase{namespace=\"$NAMESPACE$\"}))" + + } + ] + }, + { + "name": "namespaceRunningPods", + "datasource": "prometheus", + "value_type": "double", + "kubernetes_object": "namespace", + "aggregation_functions": [ + { + "function": "sum", + "query": "sum(count(kube_pod_status_phase{namespace=\"$NAMESPACE$\", phase=\"Running\"}))" + + } + ] + } + ] + } +} diff --git a/tests/scripts/local_monitoring_tests/rest_apis/test_create_metric_profile.py b/tests/scripts/local_monitoring_tests/rest_apis/test_create_metric_profile.py new file mode 100644 index 000000000..bb497c52b --- /dev/null +++ b/tests/scripts/local_monitoring_tests/rest_apis/test_create_metric_profile.py @@ -0,0 +1,279 @@ +""" +Copyright (c) 2024, 2024 Red Hat, IBM Corporation and others. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" +import pytest +import json +import sys +import copy + +sys.path.append("../../") + +from helpers.fixtures import * +from helpers.kruize import * +from helpers.utils import * +from helpers.list_metric_profiles_validate import * +from helpers.list_metric_profiles_without_parameters_schema import * + +mandatory_fields = [ + ("apiVersion", ERROR_500_STATUS_CODE, ERROR_STATUS), + ("kind", ERROR_500_STATUS_CODE, ERROR_STATUS), + ("metadata", ERROR_500_STATUS_CODE, ERROR_STATUS), + ("name", ERROR_500_STATUS_CODE, ERROR_STATUS), + ("slo", ERROR_500_STATUS_CODE, ERROR_STATUS), + ("direction", ERROR_STATUS_CODE, ERROR_STATUS), + ("objective_function", ERROR_500_STATUS_CODE, ERROR_STATUS), + ("function_type", ERROR_STATUS_CODE, ERROR_STATUS), + ("function_variables", ERROR_500_STATUS_CODE, ERROR_STATUS), + ("name", ERROR_500_STATUS_CODE, ERROR_STATUS), + ("datasource", ERROR_500_STATUS_CODE, ERROR_STATUS), + ("value_type", ERROR_500_STATUS_CODE, ERROR_STATUS), + ("aggregation_functions", ERROR_500_STATUS_CODE, ERROR_STATUS), + ("function", ERROR_500_STATUS_CODE, ERROR_STATUS), + ("query", ERROR_500_STATUS_CODE, ERROR_STATUS) +] + + +@pytest.mark.sanity +def test_create_metric_profile(cluster_type): + """ + Test Description: This test validates the response status code of createMetricProfile API by passing a + valid input for the json + """ + input_json_file = "../json_files/resource_optimization_openshift_metric_profile.json" + form_kruize_url(cluster_type) + + response = delete_metric_profile(input_json_file) + print("delete metric profile = ", response.status_code) + + # Create metric profile using the specified json + response = create_metric_profile(input_json_file) + + data = response.json() + print(data['message']) + + assert response.status_code == SUCCESS_STATUS_CODE + assert data['status'] == SUCCESS_STATUS + + json_file = open(input_json_file, "r") + input_json = json.loads(json_file.read()) + metric_profile_name = input_json['metadata']['name'] + assert data['message'] == CREATE_METRIC_PROFILE_SUCCESS_MSG % metric_profile_name + + response = list_metric_profiles(name=metric_profile_name) + metric_profile_json = response.json() + + assert response.status_code == SUCCESS_200_STATUS_CODE + + # Validate the json against the json schema + errorMsg = validate_list_metric_profiles_json(metric_profile_json, list_metric_profiles_schema) + assert errorMsg == "" + + response = delete_metric_profile(input_json_file) + print("delete metric profile = ", response.status_code) + + +@pytest.mark.sanity +def test_create_duplicate_metric_profile(cluster_type): + """ + Test Description: This test validates the response status code of createMetricProfile API by specifying the + same metric profile name + """ + input_json_file = "../json_files/resource_optimization_openshift_metric_profile.json" + json_data = json.load(open(input_json_file)) + + metric_profile_name = json_data['metadata']['name'] + print("name = ", metric_profile_name) + + form_kruize_url(cluster_type) + + response = delete_metric_profile(input_json_file) + print("delete metric profile = ", response.status_code) + + # Create metric profile using the specified json + response = create_metric_profile(input_json_file) + + data = response.json() + print(data['message']) + + assert response.status_code == SUCCESS_STATUS_CODE + assert data['status'] == SUCCESS_STATUS + assert data['message'] == CREATE_METRIC_PROFILE_SUCCESS_MSG % metric_profile_name + + # Create metric profile using the specified json + response = create_metric_profile(input_json_file) + + data = response.json() + print(data['message']) + + assert response.status_code == ERROR_409_STATUS_CODE + assert data['status'] == ERROR_STATUS + assert data['message'] == METRIC_PROFILE_EXISTS_MSG % metric_profile_name + + response = delete_metric_profile(input_json_file) + print("delete metric profile = ", response.status_code) + + +@pytest.mark.sanity +def test_create_multiple_metric_profiles(cluster_type): + """ + Test Description: This test validates the creation of multiple metric profiles using different json files + """ + + input_json_file = "../json_files/resource_optimization_openshift_metric_profile.json" + output_json_file = "/tmp/create_metric_profile.json" + temp_json_file = "/tmp/temp_profile.json" + + input_json_data = json.load(open(input_json_file, 'r')) + + form_kruize_url(cluster_type) + + metric_profiles = [] + + input_metric_profile_name = input_json_data['metadata']['name'] + + # Create metric profile using the specified json + num_metric_profiles = 100 + for i in range(num_metric_profiles): + json_data = copy.deepcopy(input_json_data) + # Modify the name for each profile + metric_profile_name = f"{input_metric_profile_name}_{i}" + json_data['metadata']['name'] = metric_profile_name + + # Write the modified profile to a temporary file + with open(temp_json_file, 'w') as file: + json.dump(json_data, file, indent=4) + + response = delete_metric_profile(temp_json_file) + print("delete metric profile = ", response.status_code) + + response = create_metric_profile(temp_json_file) + + data = response.json() + print(data['message']) + + assert response.status_code == SUCCESS_STATUS_CODE + assert data['status'] == SUCCESS_STATUS + assert data['message'] == CREATE_METRIC_PROFILE_SUCCESS_MSG % metric_profile_name + + response = list_metric_profiles(name=metric_profile_name, logging=False) + metric_profile_json = response.json() + + assert response.status_code == SUCCESS_200_STATUS_CODE + + # Validate the json against the json schema + errorMsg = validate_list_metric_profiles_json(metric_profile_json, list_metric_profiles_schema) + assert errorMsg == "" + + metric_profiles.append(copy.deepcopy(json_data)) + + # response = delete_metric_profile(temp_json_file) + # print("delete metric profile = ", response.status_code) + + # list all the metric profile names created + response = list_metric_profiles() + list_metric_profiles_json = response.json() + + assert len(list_metric_profiles_json) == num_metric_profiles, f"Expected {num_metric_profiles} metric profiles in response, but got {len(list_metric_profiles_json)}" + assert response.status_code == SUCCESS_200_STATUS_CODE + + # Validate the json against the json schema + errorMsg = validate_list_metric_profiles_json(list_metric_profiles_json, list_metric_profiles_without_parameters_schema) + assert errorMsg == "" + + # Write the profiles to the output file + with open(output_json_file, 'w') as file: + json.dump(metric_profiles, file, indent=4) + + for i in range(num_metric_profiles): + metric_profile = metric_profiles[i] + + with open(temp_json_file, 'w') as file: + json.dump(metric_profile, file, indent=4) + + response = delete_metric_profile(temp_json_file) + print("delete metric profile = ", response.status_code) + + +@pytest.mark.extended +@pytest.mark.parametrize("field, expected_status_code, expected_status", mandatory_fields) +def test_create_metric_profiles_mandatory_fields(cluster_type, field, expected_status_code, expected_status): + """ + Test Description: This test validates the creation of metric profile by missing the mandatory fields and validating + the error message and status code + """ + + form_kruize_url(cluster_type) + + # Create metric profile using the specified json + json_file = "/tmp/create_metric_profile.json" + input_json_file = "../json_files/resource_optimization_openshift_metric_profile.json" + json_data = json.load(open(input_json_file)) + + if field == "apiVersion": + json_data.pop("apiVersion", None) + elif field == "kind": + json_data.pop("kind", None) + elif field == "metadata": + json_data.pop("metadata", None) + elif field == "name": + json_data['metadata'].pop("name", None) + elif field == "slo": + json_data.pop("slo", None) + elif field == "direction": + json_data['slo'].pop("direction", None) + elif field == "objective_function": + json_data['slo'].pop("objective_function", None) + elif field == "function_type": + json_data['slo']['objective_function'].pop("function_type", None) + elif field == "function_variables": + json_data['slo'].pop("function_variables", None) + elif field == "name": + json_data['slo']['function_variables'].pop("name", None) + elif field == "datasource": + json_data['slo']['function_variables'][0].pop("datasource", None) + elif field == "value_type": + json_data['slo']['function_variables'][0].pop("value_type", None) + elif field == "aggregation_functions": + json_data['slo']['function_variables'][0].pop("aggregation_functions", None) + elif field == "function": + json_data['slo']['function_variables'][0]['aggregation_functions'][0].pop("function", None) + elif field == "query": + json_data['slo']['function_variables'][0]['aggregation_functions'][0].pop("query", None) + + print("\n*****************************************") + print(json_data) + print("*****************************************\n") + data = json.dumps(json_data) + with open(json_file, 'w') as file: + file.write(data) + + response = delete_metric_profile(input_json_file) + print("delete metric profile = ", response.status_code) + response = create_metric_profile(json_file) + + data = response.json() + print(data['message']) + + assert response.status_code == expected_status_code, \ + f"Mandatory field check failed for {field} actual - {response.status_code} expected - {expected_status_code}" + assert data['status'] == expected_status + + if response.status_code == ERROR_500_STATUS_CODE: + assert data['message'] == CREATE_METRIC_PROFILE_MISSING_MANDATORY_FIELD_MSG % field + else: + assert data['message'] == CREATE_METRIC_PROFILE_MISSING_MANDATORY_PARAMETERS_MSG % field + + response = delete_metric_profile(input_json_file) + print("delete metric profile = ", response.status_code) diff --git a/tests/scripts/local_monitoring_tests/rest_apis/test_list_metric_profiles.py b/tests/scripts/local_monitoring_tests/rest_apis/test_list_metric_profiles.py new file mode 100644 index 000000000..f57f505ba --- /dev/null +++ b/tests/scripts/local_monitoring_tests/rest_apis/test_list_metric_profiles.py @@ -0,0 +1,253 @@ +""" +Copyright (c) 2024, 2024 Red Hat, IBM Corporation and others. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" +import pytest +import json +import sys + +sys.path.append("../../") + +from helpers.fixtures import * +from helpers.kruize import * +from helpers.utils import * +from helpers.list_metric_profiles_validate import * +from helpers.list_metric_profiles_without_parameters_schema import * + +@pytest.mark.sanity +def test_list_metric_profiles_with_name(cluster_type): + """ + Test Description: This test validates the response status code of listMetricProfiles API by validating the output + JSON response by passing metric profile 'name' query parameter + """ + input_json_file = "../json_files/resource_optimization_openshift_metric_profile.json" + form_kruize_url(cluster_type) + + response = delete_metric_profile(input_json_file) + print("delete metric profile = ", response.status_code) + + # Create metric profile using the specified json + response = create_metric_profile(input_json_file) + + data = response.json() + print(data['message']) + + assert response.status_code == SUCCESS_STATUS_CODE + assert data['status'] == SUCCESS_STATUS + + json_file = open(input_json_file, "r") + input_json = json.loads(json_file.read()) + metric_profile_name = input_json['metadata']['name'] + assert data['message'] == CREATE_METRIC_PROFILE_SUCCESS_MSG % metric_profile_name + + response = list_metric_profiles(metric_profile_name) + list_metric_profiles_json = response.json() + + # Validate the json against the json schema + errorMsg = validate_list_metric_profiles_json(list_metric_profiles_json, list_metric_profiles_schema) + assert errorMsg == "" + + + response = delete_metric_profile(input_json_file) + print("delete metric profile = ", response.status_code) + + +@pytest.mark.sanity +def test_list_metric_profiles_without_parameters(cluster_type): + """ + Test Description: This test validates the response status code of listMetricProfiles API by validating the output + JSON response without any parameters - expected output is listing all the metric profile names + """ + input_json_file = "../json_files/resource_optimization_openshift_metric_profile.json" + form_kruize_url(cluster_type) + + response = delete_metric_profile(input_json_file) + print("delete metric profile = ", response.status_code) + + # Create metric profile using the specified json + response = create_metric_profile(input_json_file) + + data = response.json() + print(data['message']) + + assert response.status_code == SUCCESS_STATUS_CODE + assert data['status'] == SUCCESS_STATUS + + json_file = open(input_json_file, "r") + input_json = json.loads(json_file.read()) + metric_profile_name = input_json['metadata']['name'] + assert data['message'] == CREATE_METRIC_PROFILE_SUCCESS_MSG % metric_profile_name + + response = list_metric_profiles() + list_metric_profiles_json = response.json() + + # Validate the json against the json schema + errorMsg = validate_list_metric_profiles_json(list_metric_profiles_json, list_metric_profiles_without_parameters_schema) + assert errorMsg == "" + + + response = delete_metric_profile(input_json_file) + print("delete metric profile = ", response.status_code) + + +@pytest.mark.negative +def test_list_metric_profiles_without_creating_profile(cluster_type): + """ + Test Description: This test validates the response status code of listMetricProfiles API by validating the output + JSON response without creating metric profile - expected output is an error message + """ + input_json_file = "../json_files/resource_optimization_openshift_metric_profile.json" + form_kruize_url(cluster_type) + + response = delete_metric_profile(input_json_file) + print("delete metric profile = ", response.status_code) + + + json_file = open(input_json_file, "r") + input_json = json.loads(json_file.read()) + metric_profile_name = input_json['metadata']['name'] + + response = list_metric_profiles(metric_profile_name) + data = response.json() + + assert response.status_code == ERROR_STATUS_CODE + assert data['message'] == LIST_METRIC_PROFILES_INVALID_NAME % metric_profile_name + + + response = delete_metric_profile(input_json_file) + print("delete metric profile = ", response.status_code) + + +@pytest.mark.negative +@pytest.mark.parametrize("test_name, expected_status_code, name", + [ + ("blank_name", 400, ""), + ("null_name", 400, "null"), + ("invalid_name", 400, "xyz") + ] + ) +def test_list_metric_profiles_invalid_name(test_name, expected_status_code, name, cluster_type): + """ + Test Description: This test validates the response status code of listMetricProfiles API by validating the output + JSON response by passing invalid query parameter 'name' - expected output is an error message + """ + input_json_file = "../json_files/resource_optimization_openshift_metric_profile.json" + form_kruize_url(cluster_type) + + response = delete_metric_profile(input_json_file) + print("delete metric profile = ", response.status_code) + + # Create metric profile using the specified json + response = create_metric_profile(input_json_file) + + data = response.json() + print(data['message']) + + assert response.status_code == SUCCESS_STATUS_CODE + assert data['status'] == SUCCESS_STATUS + + json_file = open(input_json_file, "r") + input_json = json.loads(json_file.read()) + metric_profile_name = input_json['metadata']['name'] + assert data['message'] == CREATE_METRIC_PROFILE_SUCCESS_MSG % metric_profile_name + + response = list_metric_profiles(name=name) + data = response.json() + + assert response.status_code == ERROR_STATUS_CODE + assert data['message'] == LIST_METRIC_PROFILES_INVALID_NAME % name + + response = delete_metric_profile(input_json_file) + print("delete metric profile = ", response.status_code) + + +@pytest.mark.sanity +@pytest.mark.parametrize("verbose", ["true", "false"]) +def test_list_metric_profiles_with_verbose(verbose, cluster_type): + """ + Test Description: This test validates the response status code of listMetricProfiles API by validating the output + JSON response by passing 'verbose' query parameter - expected output is list of all the metric profiles created + including all the metric profile fields when verbose=true and list of only the profile names when verbose=false + """ + input_json_file = "../json_files/resource_optimization_openshift_metric_profile.json" + form_kruize_url(cluster_type) + + response = delete_metric_profile(input_json_file) + print("delete metric profile = ", response.status_code) + + # Create metric profile using the specified json + response = create_metric_profile(input_json_file) + + data = response.json() + print(data['message']) + + assert response.status_code == SUCCESS_STATUS_CODE + assert data['status'] == SUCCESS_STATUS + + json_file = open(input_json_file, "r") + input_json = json.loads(json_file.read()) + metric_profile_name = input_json['metadata']['name'] + assert data['message'] == CREATE_METRIC_PROFILE_SUCCESS_MSG % metric_profile_name + + response = list_metric_profiles(verbose=verbose) + list_metric_profiles_json = response.json() + + # Validate the json against the json schema + errorMsg = validate_list_metric_profiles_json(list_metric_profiles_json, list_metric_profiles_schema) + assert errorMsg == "" + + + response = delete_metric_profile(input_json_file) + print("delete metric profile = ", response.status_code) + + +@pytest.mark.sanity +@pytest.mark.parametrize("verbose", ["false", "true"]) +def test_list_metric_profiles_name_and_verbose(verbose, cluster_type): + """ + Test Description: This test validates the response status code of listMetricProfiles API by validating the output + JSON response by passing both 'name' and 'verbose' query parameters - expected output is metric profile of the specified + name as verbose is set to true when name parameter is passed + """ + input_json_file = "../json_files/resource_optimization_openshift_metric_profile.json" + form_kruize_url(cluster_type) + + response = delete_metric_profile(input_json_file) + print("delete metric profile = ", response.status_code) + + # Create metric profile using the specified json + response = create_metric_profile(input_json_file) + + data = response.json() + print(data['message']) + + assert response.status_code == SUCCESS_STATUS_CODE + assert data['status'] == SUCCESS_STATUS + + json_file = open(input_json_file, "r") + input_json = json.loads(json_file.read()) + metric_profile_name = input_json['metadata']['name'] + assert data['message'] == CREATE_METRIC_PROFILE_SUCCESS_MSG % metric_profile_name + + response = list_metric_profiles(name= metric_profile_name, verbose=verbose) + list_metric_profiles_json = response.json() + + # Validate the json against the json schema + errorMsg = validate_list_metric_profiles_json(list_metric_profiles_json, list_metric_profiles_schema) + assert errorMsg == "" + + + response = delete_metric_profile(input_json_file) + print("delete metric profile = ", response.status_code) + diff --git a/tests/setup.log b/tests/setup.log old mode 100755 new mode 100644 index ded3bb8cc..5d3bc9ee3 --- a/tests/setup.log +++ b/tests/setup.log @@ -131,4 +131,4 @@ Every request from UI or curl will go to API server, this will figure out kruize } ] } -] \ No newline at end of file +]