diff --git a/tests/scripts/helpers/list_reco_json_local_monitoring_schema.py b/tests/scripts/helpers/list_reco_json_local_monitoring_schema.py index 23187207c..cecb0f803 100644 --- a/tests/scripts/helpers/list_reco_json_local_monitoring_schema.py +++ b/tests/scripts/helpers/list_reco_json_local_monitoring_schema.py @@ -444,3 +444,767 @@ } } +list_reco_namespace_json_local_monitoring_schema = { + "type": "array", + "items": { + "type": "object", + "properties": { + "cluster_name": { + "type": "string" + }, + "kubernetes_objects": { + "type": "array", + "items": { + "type": "object", + "properties": { + "namespace": { + "type": "string" + }, + "namespaces": { + "type": "object", + "properties": { + "namespace_name": { + "type": "string" + }, + "recommendations": { + "type": "object", + "properties": { + "version": { + "type": "string" + }, + "notifications": { + "type": "object", + "items": { + "type": "object", + "properties": { + "type": { + "type": "string" + }, + "message": { + "type": "string" + }, + "code": { + "type": "number" + } + }, + "required": [ + "type", + "message", + "code" + ] + } + }, + "data": { + "type": "object", + "patternProperties": { + "^\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}.\\d{3}Z$": { + "type": "object", + "properties": { + "notifications": { + "type": "object", + "items": { + "type": "object", + "properties": { + "type": { + "type": "string" + }, + "message": { + "type": "string" + }, + "code": { + "type": "number" + } + }, + "required": [ + "type", + "message", + "code" + ] + } + }, + "monitoring_end_time": { + "type": "string" + }, + "current": { + "type": "object", + "properties": { + "requests": { + "type": "object", + "properties": { + "memory": { + "type": "object", + "properties": { + "amount": { + "type": "number" + }, + "format": { + "type": "string" + } + }, + "required": [ + "amount", + "format" + ] + }, + "cpu": { + "type": "object", + "properties": { + "amount": { + "type": "number" + }, + "format": { + "type": "string" + } + }, + "required": [ + "amount", + "format" + ] + } + }, + "required": [] + }, + "limits": { + "type": "object", + "properties": { + "memory": { + "type": "object", + "properties": { + "amount": { + "type": "number" + }, + "format": { + "type": "string" + } + }, + "required": [ + "amount", + "format" + ] + }, + "cpu": { + "type": "object", + "properties": { + "amount": { + "type": "number" + }, + "format": { + "type": "string" + } + }, + "required": [ + "amount", + "format" + ] + } + }, + "required": [ + "memory", + "cpu" + ] + } + }, + "required": [] + }, + "recommendation_terms": { + "type": "object", + "properties": { + "short_term": { + "type": "object", + "properties": { + "notifications": { + "type": "object", + "items": { + "type": "object", + "properties": { + "type": { + "type": "string" + }, + "message": { + "type": "string" + }, + "code": { + "type": "number" + } + }, + "required": [ + "type", + "message", + "code" + ] + } + }, + "monitoring_start_time": { + "type": "string" + }, + "duration_in_hours": { + "type": "number" + }, + "recommendation_engines": { + "type": "object", + "properties": { + "cost": { + "type": "object", + "properties": { + "pods_count": { + "type": "number" + }, + "confidence_level": { + "type": "number" + }, + "config": { + "type": "object", + "properties": { + "requests": { + "type": "object", + "properties": { + "memory": { + "type": "object", + "properties": { + "amount": { + "type": "number" + }, + "format": { + "type": "string" + } + }, + "required": [ + "amount", + "format" + ] + }, + "cpu": { + "type": "object", + "properties": { + "amount": { + "type": "number" + }, + "format": { + "type": "string" + } + }, + "required": [ + "amount", + "format" + ] + } + }, + "required": [ + "memory", + "cpu" + ] + }, + "limits": { + "type": "object", + "properties": { + "memory": { + "type": "object", + "properties": { + "amount": { + "type": "number" + }, + "format": { + "type": "string" + } + }, + "required": [ + "amount", + "format" + ] + }, + "cpu": { + "type": "object", + "properties": { + "amount": { + "type": "number" + }, + "format": { + "type": "string" + } + }, + "required": [ + "amount", + "format" + ] + } + }, + "required": [ + "memory", + "cpu" + ] + } + }, + "required": [ + "requests", + "limits" + ] + }, + "variation": { + "type": "object", + "properties": { + "requests": { + "type": "object", + "properties": { + "memory": { + "type": "object", + "properties": { + "amount": { + "type": "number" + }, + "format": { + "type": "string" + } + }, + "required": [ + "amount", + "format" + ] + }, + "cpu": { + "type": "object", + "properties": { + "amount": { + "type": "number" + }, + "format": { + "type": "string" + } + }, + "required": [ + "amount", + "format" + ] + } + }, + "required": [ + "memory", + "cpu" + ] + }, + "limits": { + "type": "object", + "properties": { + "memory": { + "type": "object", + "properties": { + "amount": { + "type": "number" + }, + "format": { + "type": "string" + } + }, + "required": [ + "amount", + "format" + ] + }, + "cpu": { + "type": "object", + "properties": { + "amount": { + "type": "number" + }, + "format": { + "type": "string" + } + }, + "required": [ + "amount", + "format" + ] + } + }, + "required": [ + "memory", + "cpu" + ] + } + }, + "required": [ + "requests", + "limits" + ] + }, + "notifications": { + "type": "object", + "items": { + "type": "object", + "properties": { + "type": { + "type": "string" + }, + "message": { + "type": "string" + }, + "code": { + "type": "number" + } + }, + "required": [ + "type", + "message", + "code" + ] + } + } + }, + "required": [ + "pods_count", + "confidence_level", + "config", + "variation", + "notifications" + ] + }, + "performance": { + "type": "object", + "properties": { + "monitoring_start_time": { + "type": "string" + }, + "pods_count": { + "type": "number" + }, + "confidence_level": { + "type": "number" + }, + "config": { + "type": "object", + "properties": { + "requests": { + "type": "object", + "properties": { + "memory": { + "type": "object", + "properties": { + "amount": { + "type": "number" + }, + "format": { + "type": "string" + } + }, + "required": [ + "amount", + "format" + ] + }, + "cpu": { + "type": "object", + "properties": { + "amount": { + "type": "number" + }, + "format": { + "type": "string" + } + }, + "required": [ + "amount", + "format" + ] + } + }, + "required": [ + "memory", + "cpu" + ] + }, + "limits": { + "type": "object", + "properties": { + "memory": { + "type": "object", + "properties": { + "amount": { + "type": "number" + }, + "format": { + "type": "string" + } + }, + "required": [ + "amount", + "format" + ] + }, + "cpu": { + "type": "object", + "properties": { + "amount": { + "type": "number" + }, + "format": { + "type": "string" + } + }, + "required": [ + "amount", + "format" + ] + } + }, + "required": [ + "memory", + "cpu" + ] + } + }, + "required": [ + "requests", + "limits" + ] + }, + "variation": { + "type": "object", + "properties": { + "requests": { + "type": "object", + "properties": { + "memory": { + "type": "object", + "properties": { + "amount": { + "type": "number" + }, + "format": { + "type": "string" + } + }, + "required": [ + "amount", + "format" + ] + }, + "cpu": { + "type": "object", + "properties": { + "amount": { + "type": "number" + }, + "format": { + "type": "string" + } + }, + "required": [ + "amount", + "format" + ] + } + }, + "required": [ + "memory", + "cpu" + ] + }, + "limits": { + "type": "object", + "properties": { + "memory": { + "type": "object", + "properties": { + "amount": { + "type": "number" + }, + "format": { + "type": "string" + } + }, + "required": [ + "amount", + "format" + ] + }, + "cpu": { + "type": "object", + "properties": { + "amount": { + "type": "number" + }, + "format": { + "type": "string" + } + }, + "required": [ + "amount", + "format" + ] + } + }, + "required": [ + "memory", + "cpu" + ] + } + }, + "required": [ + "requests", + "limits" + ] + }, + "notifications": { + "type": "object", + "items": { + "type": "object", + "properties": { + "type": { + "type": "string" + }, + "message": { + "type": "string" + }, + "code": { + "type": "number" + } + }, + "required": [ + "type", + "message", + "code" + ] + } + } + }, + "required": [] + } + }, + "required": [] + }, + "plots": { + "type": "object", + "properties": { + "datapoints": { + "type": "number" + }, + "plots_data": { + "type": "object", + "patternProperties": { + "^\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}.\\d{3}Z$": { + "type": "object", + "properties": { + "cpuUsage": { + "type": "object", + "properties": { + "min": { + "type": "number" + }, + "q1": { + "type": "number" + }, + "median": { + "type": "number" + }, + "q3": { + "type": "number" + }, + "max": { + "type": "number" + }, + "format": { + "type": "string" + } + }, + "required": [ + "min", + "q1", + "median", + "q3", + "max", + "format" + ] + }, + "memoryUsage": { + "type": "object", + "properties": { + "min": { + "type": "number" + }, + "q1": { + "type": "number" + }, + "median": { + "type": "number" + }, + "q3": { + "type": "number" + }, + "max": { + "type": "number" + }, + "format": { + "type": "string" + } + }, + "required": [ + "min", + "q1", + "median", + "q3", + "max", + "format" + ] + }, + }, + "required": [] + } + }, + "required": [] + } + }, + "required": [ + "datapoints", + "plots_data" + ] + } + }, + "required": [] + } + }, + "required": [] + } + }, + "required": [] + } + }, + "required": [] + } + }, + "required": [ + "version", + "notifications", + "data" + ] + } + }, + "required": [ + "namespace_name", + "recommendations" + ] + }, + "containers": { + "type": "array", + "items": {} + } + }, + "required": [ + "namespace", + "namespaces" + ] + } + }, + "version": { + "type": "string" + }, + "experiment_name": { + "type": "string" + } + }, + "required": [ + "cluster_name", + "kubernetes_objects", + "version", + "experiment_name" + ] + } +} \ No newline at end of file diff --git a/tests/scripts/helpers/utils.py b/tests/scripts/helpers/utils.py index ec6403474..66736c752 100644 --- a/tests/scripts/helpers/utils.py +++ b/tests/scripts/helpers/utils.py @@ -165,6 +165,8 @@ LONG_TERM: NOTIFICATION_CODE_FOR_LONG_TERM_RECOMMENDATIONS_AVAILABLE, } +NAMESPACE_EXPERIMENT_TYPE = "namespace" +CONTAINER_EXPERIMENT_TYPE = "container" # version,experiment_name,cluster_name,performance_profile,mode,target_cluster,type,name,namespace,container_image_name,container_name,measurement_duration,threshold create_exp_test_data = { @@ -520,25 +522,32 @@ def validate_kubernetes_obj(create_exp_kubernetes_obj, update_results_kubernetes def validate_local_monitoring_kubernetes_obj(create_exp_kubernetes_obj, list_reco_kubernetes_obj, expected_duration_in_hours, test_name): - # Validate type, name, namespace - assert list_reco_kubernetes_obj["type"] == create_exp_kubernetes_obj["type"] - assert list_reco_kubernetes_obj["name"] == create_exp_kubernetes_obj["name"] - assert list_reco_kubernetes_obj["namespace"] == create_exp_kubernetes_obj["namespace"] + experiment_type = create_exp_kubernetes_obj.get("experiment_type") + if experiment_type == NAMESPACE_EXPERIMENT_TYPE: + assert list_reco_kubernetes_obj["namespaces"]["namespace_name"] == create_exp_kubernetes_obj["namespaces"]["namespace_name"] + list_reco_namespace = list_reco_kubernetes_obj["namespaces"] + create_exp_namespace = create_exp_kubernetes_obj["namespaces"] + validate_local_monitoring_namespace(create_exp_namespace, list_reco_namespace, expected_duration_in_hours, test_name) + else: + # Validate type, name, namespace + assert list_reco_kubernetes_obj["type"] == create_exp_kubernetes_obj["type"] + assert list_reco_kubernetes_obj["name"] == create_exp_kubernetes_obj["name"] + assert list_reco_kubernetes_obj["namespace"] == create_exp_kubernetes_obj["namespace"] - exp_containers_length = len(create_exp_kubernetes_obj["containers"]) - list_reco_containers_length = len(list_reco_kubernetes_obj["containers"]) + exp_containers_length = len(create_exp_kubernetes_obj["containers"]) + list_reco_containers_length = len(list_reco_kubernetes_obj["containers"]) - # Validate if all the containers are present - for i in range(exp_containers_length): - list_reco_container = None + # Validate if all the containers are present + for i in range(exp_containers_length): + list_reco_container = None - for j in range(list_reco_containers_length): - if list_reco_kubernetes_obj["containers"][j]["container_name"] == \ - create_exp_kubernetes_obj["containers"][i]["container_name"]: - list_reco_container = list_reco_kubernetes_obj["containers"][j] - create_exp_container = create_exp_kubernetes_obj["containers"][i] - validate_local_monitoring_container(create_exp_container, list_reco_container, expected_duration_in_hours, test_name) + for j in range(list_reco_containers_length): + if list_reco_kubernetes_obj["containers"][j]["container_name"] == \ + create_exp_kubernetes_obj["containers"][i]["container_name"]: + list_reco_container = list_reco_kubernetes_obj["containers"][j] + create_exp_container = create_exp_kubernetes_obj["containers"][i] + validate_local_monitoring_container(create_exp_container, list_reco_container, expected_duration_in_hours, test_name) def validate_container(update_results_container, update_results_json, list_reco_container, expected_duration_in_hours, test_name): @@ -733,6 +742,86 @@ def validate_local_monitoring_container(create_exp_container, list_reco_containe data = list_reco_container["recommendations"]["data"] assert len(data) == 0, f"Data is not empty! Length of data - Actual = {len(data)} expected = 0" + +def validate_local_monitoring_namespace(create_exp_namespace, list_reco_namespace, expected_duration_in_hours, test_name): + # Validate namespace name + if create_exp_namespace != None and list_reco_namespace != None: + assert create_exp_namespace["namespace_name"] == list_reco_namespace["namespace_name"], \ + f"Namespace names did not match! Actual - {list_reco_namespace['namespace_name']} Expected - {create_exp_namespace['namespace_name']}" + + if expected_duration_in_hours == None: + duration_in_hours = 0.0 + else: + duration_in_hours = expected_duration_in_hours + + if check_if_recommendations_are_present(list_reco_namespace["recommendations"]): + interval_end_time = list(list_reco_namespace['recommendations']['data'].keys())[0] + print(f"interval_end_time = {interval_end_time}") + + terms_obj = list_reco_namespace["recommendations"]["data"][interval_end_time]["recommendation_terms"] + current_config = list_reco_namespace["recommendations"]["data"][interval_end_time]["current"] + + duration_terms = {'short_term': 4, 'medium_term': 7, 'long_term': 15} + for term in duration_terms.keys(): + if check_if_recommendations_are_present(terms_obj[term]): + print(f"reco present for term {term}") + + interval_start_time = list_reco_namespace['recommendations']['data'][interval_end_time]['recommendation_terms'][term]['monitoring_start_time'] + # Validate the precision of the valid duration + duration = terms_obj[term]["duration_in_hours"] + assert validate_duration_in_hours_decimal_precision(duration), f"The value '{duration}' for " \ + f"'{term}' has more than two decimal places" + + monitoring_start_time = term_based_start_time(interval_end_time, term) + assert terms_obj[term]["monitoring_start_time"] == monitoring_start_time, \ + f"actual = {terms_obj[term]['monitoring_start_time']} expected = {monitoring_start_time}" + + # Validate duration in hrs + if expected_duration_in_hours is None: + duration_in_hours = set_duration_based_on_terms(duration_in_hours, term, + interval_start_time, interval_end_time) + + if test_name is not None: + + if MEDIUM_TERM_TEST in test_name and term == MEDIUM_TERM: + assert terms_obj[term]["duration_in_hours"] == duration_in_hours, \ + f"Duration in hours did not match! Actual = {terms_obj[term]['duration_in_hours']} expected = {duration_in_hours}" + elif SHORT_TERM_TEST in test_name and term == SHORT_TERM: + assert terms_obj[term]["duration_in_hours"] == duration_in_hours, \ + f"Duration in hours did not match! Actual = {terms_obj[term]['duration_in_hours']} expected = {duration_in_hours}" + elif LONG_TERM_TEST in test_name and term == LONG_TERM: + assert terms_obj[term]["duration_in_hours"] == duration_in_hours, \ + f"Duration in hours did not match! Actual = {terms_obj[term]['duration_in_hours']} expected = {duration_in_hours}" + else: + print( + f"Actual = {terms_obj[term]['duration_in_hours']} expected = {duration_in_hours}") + assert terms_obj[term]["duration_in_hours"] == duration_in_hours, \ + f"Duration in hours did not match! Actual = {terms_obj[term]['duration_in_hours']} expected = {duration_in_hours}" + duration_in_hours = set_duration_based_on_terms(duration_in_hours, term, interval_start_time, + interval_end_time) + + # Get engine objects + engines_list = ["cost", "performance"] + + # Extract recommendation engine objects + recommendation_engines_object = None + if "recommendation_engines" in terms_obj[term]: + recommendation_engines_object = terms_obj[term]["recommendation_engines"] + if recommendation_engines_object is not None: + for engine_entry in engines_list: + if engine_entry in terms_obj[term]["recommendation_engines"]: + engine_obj = terms_obj[term]["recommendation_engines"][engine_entry] + validate_config_local_monitoring(engine_obj["config"]) + validate_variation_local_monitoring(current_config, engine_obj["config"], engine_obj["variation"], engine_obj) + else: + notifications = list_reco_namespace["recommendations"]["notifications"] + if NOTIFICATION_CODE_FOR_NOT_ENOUGH_DATA in notifications: + assert notifications[NOTIFICATION_CODE_FOR_NOT_ENOUGH_DATA]["message"] == NOT_ENOUGH_DATA_MSG + + data = list_reco_namespace["recommendations"]["data"] + assert len(data) == 0, f"Data is not empty! Length of data - Actual = {len(data)} expected = 0" + + def validate_plots(terms_obj, duration_terms, term): plots = terms_obj[term][PLOTS] datapoint = plots[DATA_POINTS] diff --git a/tests/scripts/local_monitoring_tests/Local_monitoring_tests.md b/tests/scripts/local_monitoring_tests/Local_monitoring_tests.md index 4923d612f..49aab43f0 100644 --- a/tests/scripts/local_monitoring_tests/Local_monitoring_tests.md +++ b/tests/scripts/local_monitoring_tests/Local_monitoring_tests.md @@ -69,6 +69,29 @@ Here are the test scenarios: - Test with invalid values such as blank, null or an invalid value for name query parameter in listMetricProfiles API - List metric profiles without creating metric profile + +### **Create Experiment API tests** + +Here are the test scenarios: + +- Create namespace experiment specifying namespace experiment type +- Create namespace experiment without specifying experiment type +- Create container experiment specifying container experiment type +- Create container experiment without specifying experiment type +- Create experiment specifying both namespaces and containers without specifying the experiment type +- Create experiment specifying both namespaces and containers specifying the namespace experiment type +- Create experiment specifying both namespaces and containers specifying the container experiment type +- Create namespace experiment specifying containers +- Create container experiment specifying namespaces +- Create multiple experiments with valid namespace + +### **List Recommendations API tests** + +Here are the test scenarios: + +- List recommendations for a valid namespace experiment + + The above tests are developed using pytest framework and the tests are run using shell script wrapper that does the following: - Deploys kruize in non-CRD mode using the [deploy script](https://github.com/kruize/autotune/blob/master/deploy.sh) from the autotune repo - Creates a resource optimization metric profile using the [createMetricProfile API](/design/MetricProfileAPI.md) diff --git a/tests/scripts/local_monitoring_tests/json_files/create_exp_template.json b/tests/scripts/local_monitoring_tests/json_files/create_exp_template.json new file mode 100644 index 000000000..2d3b9c1cd --- /dev/null +++ b/tests/scripts/local_monitoring_tests/json_files/create_exp_template.json @@ -0,0 +1,29 @@ +[{ + "version": "{{version}}", + "experiment_name": "{{experiment_name}}", + "cluster_name": "{{cluster_name}}", + "performance_profile": "{{performance_profile}}", + "mode": "{{mode}}", + "target_cluster": "{{target_cluster}}", + "datasource": "{{datasource}}", + "kubernetes_objects": [{ + "experiment_type": "{{experiment_type}}", + "type": "{{kubernetes_obj_type}}", + "name": "{{name}}", + "namespace": "{{namespace}}", + "namespaces": { + "namespace_name": "{{namespace_name}}" + }, + "containers": [{ + "container_image_name": "{{container_image_name}}", + "container_name": "{{container_name}}" + }] + }], + "trial_settings": { + "measurement_duration": "{{measurement_duration}}" + }, + "recommendation_settings": { + "threshold": "{{threshold}}" + } +}] + diff --git a/tests/scripts/local_monitoring_tests/json_files/create_multiple_namespace_exp.json b/tests/scripts/local_monitoring_tests/json_files/create_multiple_namespace_exp.json new file mode 100644 index 000000000..2272e7f3c --- /dev/null +++ b/tests/scripts/local_monitoring_tests/json_files/create_multiple_namespace_exp.json @@ -0,0 +1,48 @@ +[ + { + "version": "v2.0", + "experiment_name": "tfb-workload-namespace", + "cluster_name": "default", + "performance_profile": "resource-optimization-local-monitoring", + "mode": "monitor", + "target_cluster": "local", + "datasource": "prometheus-1", + "kubernetes_objects": [ + { + "experiment_type": "namespace", + "namespaces": { + "namespace_name": "default" + } + } + ], + "trial_settings": { + "measurement_duration": "15min" + }, + "recommendation_settings": { + "threshold": "0.1" + } + }, + { + "version": "v2.0", + "experiment_name": "multiple-import-namespace", + "cluster_name": "default", + "performance_profile": "resource-optimization-openshift", + "mode": "monitor", + "target_cluster": "local", + "datasource": "prometheus-1", + "kubernetes_objects": [ + { + "experiment_type": "namespace", + "namespaces": { + "namespace_name": "test-multiple-import" + } + } + ], + "trial_settings": { + "measurement_duration": "15min" + }, + "recommendation_settings": { + "threshold": "0.1" + } + } +] diff --git a/tests/scripts/local_monitoring_tests/rest_apis/test_create_experiment.py b/tests/scripts/local_monitoring_tests/rest_apis/test_create_experiment.py new file mode 100644 index 000000000..d9a4fa887 --- /dev/null +++ b/tests/scripts/local_monitoring_tests/rest_apis/test_create_experiment.py @@ -0,0 +1,231 @@ +""" +Copyright (c) 2024 Red Hat, IBM Corporation and others. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" +import pytest +import sys +sys.path.append("../../") + +from helpers.fixtures import * +from helpers.kruize import * +from helpers.utils import * +from jinja2 import Environment, FileSystemLoader + +@pytest.mark.sanity +@pytest.mark.parametrize("test_name, expected_status_code, version, experiment_name, cluster_name, performance_profile, mode, target_cluster, datasource, experiment_type, kubernetes_obj_type, name, namespace, namespace_name, container_image_name, container_name, measurement_duration, threshold", + [ + ("valid_namespace_exp_with_exp_type", SUCCESS_STATUS_CODE, "v2.0", "tfb-workload-namespace", "default", "resource-optimization-local-monitoring", "monitor", "local", "prometheus-1", "namespace", None, None, None, "default", None, None, "15min", "0.1"), + ("valid_container_exp_without_exp_type", SUCCESS_STATUS_CODE, "v2.0", "tfb-workload-namespace", "default", "resource-optimization-local-monitoring", "monitor", "local", "prometheus-1", None, "deployment", "tfb-qrh-sample", "default", None, "kruize/tfb-qrh:1.13.2.F_et17", "tfb-server", "15min", "0.1"), + ("valid_container_exp_with_exp_type", SUCCESS_STATUS_CODE, "v2.0", "tfb-workload-namespace", "default", "resource-optimization-local-monitoring", "monitor", "local", "prometheus-1", "container", "deployment", "tfb-qrh-sample", "default", None, "kruize/tfb-qrh:1.13.2.F_et17", "tfb-server", "15min", "0.1"), + ] +) +def test_create_exp_valid_tests(test_name, expected_status_code, version, experiment_name, cluster_name, performance_profile, mode, target_cluster, datasource, experiment_type, kubernetes_obj_type, name, namespace, namespace_name, container_image_name, container_name, measurement_duration, threshold, cluster_type): + """ + Test Description: This test validates the response status code of createExperiment API + for namespace experiment by passing a valid input for the json + """ + # Generate a temporary JSON filename + tmp_json_file = "/tmp/create_exp_" + test_name + ".json" + print("tmp_json_file = ", tmp_json_file) + + # Load the Jinja2 template + environment = Environment(loader=FileSystemLoader("../json_files/")) + template = environment.get_template("create_exp_template.json") + + # In case of test_name with "null", strip the specific fields + if "null" in test_name: + field = test_name.replace("null_", "") + json_file = "../json_files/create_exp_template.json" + filename = "/tmp/create_exp_template.json" + strip_double_quotes_for_field(json_file, field, filename) + environment = Environment(loader=FileSystemLoader("/tmp/")) + template = environment.get_template("create_exp_template.json") + + # Render the JSON content from the template + content = template.render( + version=version, + experiment_name=experiment_name, + cluster_name=cluster_name, + performance_profile=performance_profile, + mode=mode, + target_cluster=target_cluster, + datasource=datasource, + experiment_type=experiment_type, + kubernetes_obj_type=kubernetes_obj_type, + name=name, + namespace=namespace, + namespace_name=namespace_name, + container_image_name=container_image_name, + container_name=container_name, + measurement_duration=measurement_duration, + threshold=threshold + ) + + # Convert rendered content to a dictionary + json_content = json.loads(content) + + if json_content[0]["kubernetes_objects"][0]["type"] == "None": + json_content[0]["kubernetes_objects"][0].pop("type") + if json_content[0]["kubernetes_objects"][0]["name"] == "None": + json_content[0]["kubernetes_objects"][0].pop("name") + if json_content[0]["kubernetes_objects"][0]["namespace"] == "None": + json_content[0]["kubernetes_objects"][0].pop("namespace") + if json_content[0]["kubernetes_objects"][0]["containers"][0]["container_image_name"] == "None": + json_content[0]["kubernetes_objects"][0].pop("containers") + if json_content[0]["kubernetes_objects"][0]["namespaces"]["namespace_name"] == "None": + json_content[0]["kubernetes_objects"][0].pop("namespaces") + if json_content[0]["kubernetes_objects"][0]["experiment_type"] == "None": + json_content[0]["kubernetes_objects"][0].pop("experiment_type") + + # Write the final JSON to the temp file + with open(tmp_json_file, mode="w", encoding="utf-8") as message: + json.dump(json_content, message, indent=4) + + input_json_file = tmp_json_file + form_kruize_url(cluster_type) + + response = delete_experiment(input_json_file) + print("delete exp = ", response.status_code) + + # Create experiment using the specified json + response = create_experiment(input_json_file) + + data = response.json() + print(data['message']) + + assert response.status_code == SUCCESS_STATUS_CODE + assert data['status'] == SUCCESS_STATUS + assert data['message'] == CREATE_EXP_SUCCESS_MSG + + response = delete_experiment(input_json_file) + print("delete exp = ", response.status_code) + + +@pytest.mark.negative +@pytest.mark.parametrize("test_name, expected_status_code, version, experiment_name, cluster_name, performance_profile, mode, target_cluster, datasource, experiment_type, kubernetes_obj_type, name, namespace, namespace_name, container_image_name, container_name, measurement_duration, threshold", + [ + ("invalid_namespace_exp_without_exp_type", ERROR_500_STATUS_CODE, "v2.0", "tfb-workload-namespace", "default", "resource-optimization-local-monitoring", "monitor", "local", "prometheus-1", None, None, None, None, "default", None, None, "15min", "0.1"), + ("invalid_both_container_and_namespace_without_exp_type", ERROR_500_STATUS_CODE, "v2.0", "tfb-workload-namespace", "default", "resource-optimization-local-monitoring", "monitor", "local", "prometheus-1", None, "deployment", "tfb-qrh-sample", "default", "default", "kruize/tfb-qrh:1.13.2.F_et17", "tfb-server", "15min", "0.1"), + ("invalid_both_container_and_namespace_namespace_exp_type", ERROR_500_STATUS_CODE, "v2.0", "tfb-workload-namespace", "default", "resource-optimization-local-monitoring", "monitor", "local", "prometheus-1", "namespace", "deployment", "tfb-qrh-sample", "default", "default", "kruize/tfb-qrh:1.13.2.F_et17", "tfb-server", "15min", "0.1"), + ("invalid_both_container_and_namespace_container_exp_type", ERROR_500_STATUS_CODE, "v2.0", "tfb-workload-namespace", "default", "resource-optimization-local-monitoring", "monitor", "local", "prometheus-1", "container", "deployment", "tfb-qrh-sample", "default", "default", "kruize/tfb-qrh:1.13.2.F_et17", "tfb-server", "15min", "0.1"), + ("invalid_namespace_exp_type_with_only_containers", ERROR_500_STATUS_CODE, "v2.0", "tfb-workload-namespace", "default", "resource-optimization-local-monitoring", "monitor", "local", "prometheus-1", "namespace", "deployment", "tfb-qrh-sample", "default", None, "kruize/tfb-qrh:1.13.2.F_et17", "tfb-server", "15min", "0.1"), + ("invalid_container_exp_type_with_only_namespace", ERROR_500_STATUS_CODE, "v2.0", "tfb-workload-namespace", "default", "resource-optimization-local-monitoring", "monitor", "local", "prometheus-1", "container", None, None, None, "default", None, None, "15min", "0.1") + ] +) +def test_create_exp_invalid_tests(test_name, expected_status_code, version, experiment_name, cluster_name, performance_profile, mode, target_cluster, datasource, experiment_type, kubernetes_obj_type, name, namespace, namespace_name, container_image_name, container_name, measurement_duration, threshold, cluster_type): + """ + Test Description: This test validates the response status code of createExperiment API + for namespace experiment by passing a valid input for the json + """ + # Generate a temporary JSON filename + tmp_json_file = "/tmp/create_exp_" + test_name + ".json" + print("tmp_json_file = ", tmp_json_file) + + # Load the Jinja2 template + environment = Environment(loader=FileSystemLoader("../json_files/")) + template = environment.get_template("create_exp_template.json") + + # In case of test_name with "null", strip the specific fields + if "null" in test_name: + field = test_name.replace("null_", "") + json_file = "../json_files/create_exp_template.json" + filename = "/tmp/create_exp_template.json" + strip_double_quotes_for_field(json_file, field, filename) + environment = Environment(loader=FileSystemLoader("/tmp/")) + template = environment.get_template("create_exp_template.json") + + # Render the JSON content from the template + content = template.render( + version=version, + experiment_name=experiment_name, + cluster_name=cluster_name, + performance_profile=performance_profile, + mode=mode, + target_cluster=target_cluster, + datasource=datasource, + experiment_type=experiment_type, + kubernetes_obj_type=kubernetes_obj_type, + name=name, + namespace=namespace, + namespace_name=namespace_name, + container_image_name=container_image_name, + container_name=container_name, + measurement_duration=measurement_duration, + threshold=threshold + ) + + # Convert rendered content to a dictionary + json_content = json.loads(content) + + if json_content[0]["kubernetes_objects"][0]["type"] == "None": + json_content[0]["kubernetes_objects"][0].pop("type") + if json_content[0]["kubernetes_objects"][0]["name"] == "None": + json_content[0]["kubernetes_objects"][0].pop("name") + if json_content[0]["kubernetes_objects"][0]["namespace"] == "None": + json_content[0]["kubernetes_objects"][0].pop("namespace") + if json_content[0]["kubernetes_objects"][0]["containers"][0]["container_image_name"] == "None": + json_content[0]["kubernetes_objects"][0].pop("containers") + if json_content[0]["kubernetes_objects"][0]["namespaces"]["namespace_name"] == "None": + json_content[0]["kubernetes_objects"][0].pop("namespaces") + if json_content[0]["kubernetes_objects"][0]["experiment_type"] == "None": + json_content[0]["kubernetes_objects"][0].pop("experiment_type") + + # Write the final JSON to the temp file + with open(tmp_json_file, mode="w", encoding="utf-8") as message: + json.dump(json_content, message, indent=4) + + input_json_file = tmp_json_file + form_kruize_url(cluster_type) + + response = delete_experiment(input_json_file) + print("delete exp = ", response.status_code) + + # Create experiment using the specified json + response = create_experiment(input_json_file) + + data = response.json() + print(data['message']) + + assert response.status_code == expected_status_code + assert data['status'] == ERROR_STATUS + + response = delete_experiment(input_json_file) + print("delete exp = ", response.status_code) + + +@pytest.mark.negative +def test_create_multiple_namespace_exp(cluster_type): + """ + Test Description: This test validates the response status code of createExperiment API + if multiple entries are presnet in create experiment json + """ + input_json_file = "../json_files/create_multiple_namespace_exp.json" + form_kruize_url(cluster_type) + + response = delete_experiment(input_json_file) + print("delete exp = ", response.status_code) + + # Create experiment using the specified json + response = create_experiment(input_json_file) + + data = response.json() + print(data['message']) + + assert response.status_code == ERROR_STATUS_CODE + assert data['status'] == ERROR_STATUS + # validate error message + assert data['message'] == CREATE_EXP_BULK_ERROR_MSG + + response = delete_experiment(input_json_file) + print("delete exp = ", response.status_code) diff --git a/tests/scripts/local_monitoring_tests/rest_apis/test_list_recommendations.py b/tests/scripts/local_monitoring_tests/rest_apis/test_list_recommendations.py new file mode 100644 index 000000000..5eda0d6dd --- /dev/null +++ b/tests/scripts/local_monitoring_tests/rest_apis/test_list_recommendations.py @@ -0,0 +1,168 @@ +""" +Copyright (c) 2024 Red Hat, IBM Corporation and others. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" +import datetime +import json +import time + +import pytest +import sys +sys.path.append("../../") + +from helpers.all_terms_list_reco_json_schema import all_terms_list_reco_json_schema +from helpers.fixtures import * +from helpers.generate_rm_jsons import * +from helpers.kruize import * +from helpers.list_reco_json_local_monitoring_schema import * +from helpers.medium_and_long_term_list_reco_json_schema import medium_and_long_term_list_reco_json_schema +from helpers.medium_term_list_reco_json_schema import * +from helpers.long_term_list_reco_json_schema import * +from helpers.list_reco_json_validate import * +from helpers.list_metric_profiles_validate import * +from helpers.list_metric_profiles_without_parameters_schema import * +from helpers.short_and_long_term_list_reco_json_schema import short_and_long_term_list_reco_json_schema +from helpers.short_and_medium_term_list_reco_json_schema import short_and_medium_term_list_reco_json_schema +from helpers.short_term_list_reco_json_schema import short_term_list_reco_json_schema +from helpers.utils import * +from jinja2 import Environment, FileSystemLoader + + +metric_profile_dir = get_metric_profile_dir() + +@pytest.mark.sanity +@pytest.mark.parametrize("test_name, expected_status_code, version, experiment_name, cluster_name, performance_profile, mode, target_cluster, datasource, experiment_type, kubernetes_obj_type, name, namespace, namespace_name, container_image_name, container_name, measurement_duration, threshold", + [ + ("list_reco_default_cluster1", SUCCESS_STATUS_CODE, "v2.0", "test-default-ns", "cluster-1", "resource-optimization-local-monitoring", "monitor", "local", "prometheus-1", "namespace", None, None, None, "default", None, None, "15min", "0.1"), + ("list_reco_default_cluster2", SUCCESS_STATUS_CODE, "v2.0", "test-default-ns", "cluster-2", "resource-optimization-local-monitoring", "monitor", "local", "prometheus-1", "namespace", None, None, None, "default", None, None, "15min", "0.1") + ] +) +def test_list_recommendations_namespace_single_result(test_name, expected_status_code, version, experiment_name, cluster_name, performance_profile, mode, target_cluster, datasource, experiment_type, kubernetes_obj_type, name, namespace, namespace_name, container_image_name, container_name, measurement_duration, threshold, cluster_type): + """ + Test Description: This test validates listRecommendations by passing a valid + namespace experiment name + """ + # Generate a temporary JSON filename + tmp_json_file = "/tmp/create_exp_" + test_name + ".json" + print("tmp_json_file = ", tmp_json_file) + + # Load the Jinja2 template + environment = Environment(loader=FileSystemLoader("../json_files/")) + template = environment.get_template("create_exp_template.json") + + # Render the JSON content from the template + content = template.render( + version=version, + experiment_name=experiment_name, + cluster_name=cluster_name, + performance_profile=performance_profile, + mode=mode, + target_cluster=target_cluster, + datasource=datasource, + experiment_type=experiment_type, + kubernetes_obj_type=kubernetes_obj_type, + name=name, + namespace=namespace, + namespace_name=namespace_name, + container_image_name=container_image_name, + container_name=container_name, + measurement_duration=measurement_duration, + threshold=threshold + ) + + # Convert rendered content to a dictionary + json_content = json.loads(content) + + if json_content[0]["kubernetes_objects"][0]["type"] == "None": + json_content[0]["kubernetes_objects"][0].pop("type") + if json_content[0]["kubernetes_objects"][0]["name"] == "None": + json_content[0]["kubernetes_objects"][0].pop("name") + if json_content[0]["kubernetes_objects"][0]["namespace"] == "None": + json_content[0]["kubernetes_objects"][0].pop("namespace") + if json_content[0]["kubernetes_objects"][0]["containers"][0]["container_image_name"] == "None": + json_content[0]["kubernetes_objects"][0].pop("containers") + + # Write the final JSON to the temp file + with open(tmp_json_file, mode="w", encoding="utf-8") as message: + json.dump(json_content, message, indent=4) + + input_json_file = tmp_json_file + + form_kruize_url(cluster_type) + response = delete_experiment(input_json_file) + print("delete exp = ", response.status_code) + + #Install default metric profile + metric_profile_json_file = metric_profile_dir / 'resource_optimization_local_monitoring.json' + response = delete_metric_profile(metric_profile_json_file) + print("delete metric profile = ", response.status_code) + + # Create metric profile using the specified json + response = create_metric_profile(metric_profile_json_file) + + data = response.json() + print(data['message']) + + assert response.status_code == SUCCESS_STATUS_CODE + assert data['status'] == SUCCESS_STATUS + + json_file = open(metric_profile_json_file, "r") + input_json = json.loads(json_file.read()) + metric_profile_name = input_json['metadata']['name'] + assert data['message'] == CREATE_METRIC_PROFILE_SUCCESS_MSG % metric_profile_name + + response = list_metric_profiles(name=metric_profile_name, logging=False) + metric_profile_json = response.json() + + assert response.status_code == SUCCESS_200_STATUS_CODE + + # Validate the json against the json schema + errorMsg = validate_list_metric_profiles_json(metric_profile_json, list_metric_profiles_schema) + assert errorMsg == "" + + # Create namespace experiment using the specified json + response = create_experiment(input_json_file) + + data = response.json() + print(data['message']) + + assert response.status_code == SUCCESS_STATUS_CODE + assert data['status'] == SUCCESS_STATUS + assert data['message'] == CREATE_EXP_SUCCESS_MSG + + # generate recommendations + json_file = open(input_json_file, "r") + input_json = json.loads(json_file.read()) + exp_name = input_json[0]['experiment_name'] + + response = generate_recommendations(exp_name) + assert response.status_code == SUCCESS_STATUS_CODE + + # Invoke list recommendations for the specified experiment + response = list_recommendations(exp_name) + assert response.status_code == SUCCESS_200_STATUS_CODE + list_reco_json = response.json() + + # Validate the json against the json schema + errorMsg = validate_list_reco_json(list_reco_json, list_reco_namespace_json_local_monitoring_schema) + assert errorMsg == "" + + # Validate the json values + namespace_exp_json = read_json_data_from_file(input_json_file) + validate_local_monitoring_reco_json(namespace_exp_json[0], list_reco_json[0]) + + # Delete experiment + response = delete_experiment(input_json_file) + print("delete exp = ", response.status_code) + assert response.status_code == SUCCESS_STATUS_CODE \ No newline at end of file diff --git a/tests/scripts/local_monitoring_tests/rest_apis/test_local_monitoring_e2e_workflow.py b/tests/scripts/local_monitoring_tests/rest_apis/test_local_monitoring_e2e_workflow.py index ae1246963..47a7022d7 100644 --- a/tests/scripts/local_monitoring_tests/rest_apis/test_local_monitoring_e2e_workflow.py +++ b/tests/scripts/local_monitoring_tests/rest_apis/test_local_monitoring_e2e_workflow.py @@ -44,6 +44,7 @@ from helpers.list_reco_json_local_monitoring_schema import * from helpers.list_reco_json_validate import * from helpers.import_metadata_json_validate import * +from jinja2 import Environment, FileSystemLoader metric_profile_dir = get_metric_profile_dir() @@ -116,17 +117,49 @@ def test_list_recommendations_multiple_exps_for_datasource_workloads(cluster_typ errorMsg = validate_list_metadata_json(list_metadata_json, list_metadata_json_verbose_true_schema) assert errorMsg == "" + # Generate a temporary JSON filename + tmp_json_file = "/tmp/create_exp_" + ".json" + print("tmp_json_file = ", tmp_json_file) + + # Load the Jinja2 template + environment = Environment(loader=FileSystemLoader("../json_files/")) + template = environment.get_template("create_exp_template.json") + + # Render the JSON content from the template + content = template.render( + version="v2.0", experiment_name="test-default-ns", cluster_name="default", performance_profile="resource-optimization-local-monitoring", + mode="monitor", target_cluster="local", datasource="prometheus-1", experiment_type="namespace", kubernetes_obj_type=None, name=None, + namespace=None, namespace_name="default", container_image_name=None, container_name=None, measurement_duration="15min", threshold="0.1" + ) + + # Convert rendered content to a dictionary + json_content = json.loads(content) + json_content[0]["kubernetes_objects"][0].pop("type") + json_content[0]["kubernetes_objects"][0].pop("name") + json_content[0]["kubernetes_objects"][0].pop("namespace") + json_content[0]["kubernetes_objects"][0].pop("containers") + + # Write the final JSON to the temp file + with open(tmp_json_file, mode="w", encoding="utf-8") as message: + json.dump(json_content, message, indent=4) + + # namespace exp json file + namespace_exp_json_file = tmp_json_file # delete tfb experiments tfb_exp_json_file = "../json_files/create_tfb_exp.json" tfb_db_exp_json_file = "../json_files/create_tfb_db_exp.json" + response = delete_experiment(tfb_exp_json_file) print("delete tfb exp = ", response.status_code) response = delete_experiment(tfb_db_exp_json_file) print("delete tfb_db exp = ", response.status_code) + response = delete_experiment(namespace_exp_json_file) + print("delete namespace exp = ", response.status_code) + #Install default metric profile metric_profile_json_file = metric_profile_dir / 'resource_optimization_local_monitoring.json' response = delete_metric_profile(metric_profile_json_file) @@ -175,6 +208,16 @@ def test_list_recommendations_multiple_exps_for_datasource_workloads(cluster_typ assert data['status'] == SUCCESS_STATUS assert data['message'] == CREATE_EXP_SUCCESS_MSG + # create namespace experiment + response = create_experiment(namespace_exp_json_file) + + data = response.json() + print(data['message']) + + assert response.status_code == SUCCESS_STATUS_CODE + assert data['status'] == SUCCESS_STATUS + assert data['message'] == CREATE_EXP_SUCCESS_MSG + # Wait for the container to complete wait_for_container_to_complete(container_id) @@ -187,6 +230,10 @@ def test_list_recommendations_multiple_exps_for_datasource_workloads(cluster_typ input_json = json.loads(json_file.read()) tfb_db_exp_name = input_json[0]['experiment_name'] + json_file = open(namespace_exp_json_file, "r") + input_json = json.loads(json_file.read()) + namespace_exp_name = input_json[0]['experiment_name'] + response = generate_recommendations(tfb_exp_name) assert response.status_code == SUCCESS_STATUS_CODE @@ -220,6 +267,22 @@ def test_list_recommendations_multiple_exps_for_datasource_workloads(cluster_typ tfb_db_exp_json = read_json_data_from_file(tfb_db_exp_json_file) validate_local_monitoring_reco_json(tfb_db_exp_json[0], list_reco_json[0]) + response = generate_recommendations(namespace_exp_name) + assert response.status_code == SUCCESS_STATUS_CODE + + # Invoke list recommendations for the specified experiment + response = list_recommendations(namespace_exp_name) + assert response.status_code == SUCCESS_200_STATUS_CODE + list_reco_json = response.json() + + # Validate the json against the json schema + errorMsg = validate_list_reco_json(list_reco_json, list_reco_namespace_json_local_monitoring_schema) + assert errorMsg == "" + + # Validate the json values + namespace_exp_json = read_json_data_from_file(namespace_exp_json_file) + validate_local_monitoring_reco_json(namespace_exp_json[0], list_reco_json[0]) + # Delete tfb experiment response = delete_experiment(tfb_exp_json_file) print("delete exp = ", response.status_code) @@ -230,6 +293,11 @@ def test_list_recommendations_multiple_exps_for_datasource_workloads(cluster_typ print("delete exp = ", response.status_code) assert response.status_code == SUCCESS_STATUS_CODE + # Delete namespace experiment + response = delete_experiment(namespace_exp_json_file) + print("delete exp = ", response.status_code) + assert response.status_code == SUCCESS_STATUS_CODE + # Delete Metric Profile response = delete_metric_profile(metric_profile_json_file) print("delete metric profile = ", response.status_code) diff --git a/tests/scripts/local_monitoring_tests/rest_apis/test_namespace_reco_e2e_workflow.py b/tests/scripts/local_monitoring_tests/rest_apis/test_namespace_reco_e2e_workflow.py new file mode 100644 index 000000000..bbc0438cf --- /dev/null +++ b/tests/scripts/local_monitoring_tests/rest_apis/test_namespace_reco_e2e_workflow.py @@ -0,0 +1,339 @@ +""" +Copyright (c) 2024, 2024 Red Hat, IBM Corporation and others. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" +import copy +import json + +import pytest +import sys +import time +import shutil +sys.path.append("../../") + +from helpers.fixtures import * +from helpers.generate_rm_jsons import * +from helpers.kruize import * +from helpers.short_term_list_reco_json_schema import * +from helpers.list_reco_json_validate import * +from helpers.list_datasources_json_validate import * +from helpers.utils import * +from helpers.utils import benchmarks_install +from helpers.utils import clone_repo +from helpers.utils import apply_tfb_load +from helpers.utils import wait_for_container_to_complete +from helpers.utils import validate_local_monitoring_reco_json +from helpers.list_metadata_json_validate import * +from helpers.list_metadata_json_schema import * +from helpers.list_metadata_json_verbose_true_schema import * +from helpers.list_metadata_json_cluster_name_without_verbose_schema import * +from helpers.list_metric_profiles_validate import * +from helpers.list_metric_profiles_without_parameters_schema import * +from helpers.short_term_list_reco_json_schema import * +from helpers.list_reco_json_local_monitoring_schema import * +from helpers.list_reco_json_validate import * +from helpers.import_metadata_json_validate import * +from jinja2 import Environment, FileSystemLoader + +metric_profile_dir = get_metric_profile_dir() + + +@pytest.mark.test_e2e +def test_list_recommendations_namespace_exps(cluster_type): + """ + Test Description: This test validates list recommendations for multiple experiments posted using different json files + """ + clone_repo("https://github.com/kruize/benchmarks") + + create_namespace("ns1") + create_namespace("ns2") + create_namespace("ns3") + + benchmarks_install(namespace="ns1") + benchmarks_install(namespace="ns2") + benchmarks_install(namespace="ns3") + + container_id1 = apply_tfb_load("ns1", cluster_type) + container_id2 = apply_tfb_load("ns2", cluster_type) + container_id3 = apply_tfb_load("ns3", cluster_type) + + print(container_id1) + print(container_id2) + print(container_id3) + + # list all datasources + form_kruize_url(cluster_type) + + # Get the datasources name + datasource_name = None + response = list_datasources(datasource_name) + + list_datasources_json = response.json() + + assert response.status_code == SUCCESS_200_STATUS_CODE + + # Validate the json against the json schema + errorMsg = validate_list_datasources_json(list_datasources_json, list_datasources_json_schema) + assert errorMsg == "" + + + # Import datasource metadata + input_json_file = "../json_files/import_metadata.json" + + response = delete_metadata(input_json_file) + print("delete metadata = ", response.status_code) + + # Import metadata using the specified json + response = import_metadata(input_json_file) + metadata_json = response.json() + + # Validate the json against the json schema + errorMsg = validate_import_metadata_json(metadata_json, import_metadata_json_schema) + assert errorMsg == "" + + + # Display metadata from prometheus-1 datasource + json_data = json.load(open(input_json_file)) + datasource = json_data['datasource_name'] + + response = list_metadata(datasource) + + list_metadata_json = response.json() + assert response.status_code == SUCCESS_200_STATUS_CODE + + # Validate the json against the json schema + errorMsg = validate_list_metadata_json(list_metadata_json, list_metadata_json_schema) + assert errorMsg == "" + + + # Display metadata for default namespace + # Currently only default cluster is supported by Kruize + cluster_name = "default" + + response = list_metadata(datasource=datasource, cluster_name=cluster_name, verbose="true") + + list_metadata_json = response.json() + assert response.status_code == SUCCESS_200_STATUS_CODE + + # Validate the json against the json schema + errorMsg = validate_list_metadata_json(list_metadata_json, list_metadata_json_verbose_true_schema) + assert errorMsg == "" + + # Generate a temporary JSON filename + tmp_json_file_1 = "/tmp/create_exp_1" + ".json" + tmp_json_file_2 = "/tmp/create_exp_2" + ".json" + tmp_json_file_3 = "/tmp/create_exp_3" + ".json" + + # Load the Jinja2 template + environment = Environment(loader=FileSystemLoader("../json_files/")) + template = environment.get_template("create_exp_template.json") + + # Render the JSON content from the template + content = template.render( + version="v2.0", experiment_name="test-ns1", cluster_name="default", performance_profile="resource-optimization-local-monitoring", + mode="monitor", target_cluster="local", datasource="prometheus-1", experiment_type="namespace", kubernetes_obj_type=None, name=None, + namespace=None, namespace_name="ns1", container_image_name=None, container_name=None, measurement_duration="15min", threshold="0.1" + ) + + # Convert rendered content to a dictionary + json_content = json.loads(content) + json_content[0]["kubernetes_objects"][0].pop("type") + json_content[0]["kubernetes_objects"][0].pop("name") + json_content[0]["kubernetes_objects"][0].pop("namespace") + json_content[0]["kubernetes_objects"][0].pop("containers") + + # Write the final JSON to the temp file + with open(tmp_json_file_1, mode="w", encoding="utf-8") as message: + json.dump(json_content, message, indent=4) + + # namespace exp json file + ns1_exp_json_file = tmp_json_file_1 + + json_content[0]["experiment_name"] = "test-ns2" + json_content[0]["kubernetes_objects"][0]["namespaces"]["namespace_name"] = "ns2" + + # Write the final JSON to the temp file + with open(tmp_json_file_2, mode="w", encoding="utf-8") as message: + json.dump(json_content, message, indent=4) + + # namespace exp json file + ns2_exp_json_file = tmp_json_file_2 + + json_content[0]["experiment_name"] = "test-ns3" + json_content[0]["kubernetes_objects"][0]["namespaces"]["namespace_name"] = "ns3" + + # Write the final JSON to the temp file + with open(tmp_json_file_3, mode="w", encoding="utf-8") as message: + json.dump(json_content, message, indent=4) + + # namespace exp json file + ns3_exp_json_file = tmp_json_file_3 + + # delete tfb experiments + response = delete_experiment(ns1_exp_json_file) + print("delete tfb exp = ", response.status_code) + + response = delete_experiment(ns2_exp_json_file) + print("delete tfb_db exp = ", response.status_code) + + response = delete_experiment(ns3_exp_json_file) + print("delete namespace exp = ", response.status_code) + + #Install default metric profile + metric_profile_json_file = metric_profile_dir / 'resource_optimization_local_monitoring.json' + response = delete_metric_profile(metric_profile_json_file) + print("delete metric profile = ", response.status_code) + + # Create metric profile using the specified json + response = create_metric_profile(metric_profile_json_file) + + data = response.json() + print(data['message']) + + assert response.status_code == SUCCESS_STATUS_CODE + assert data['status'] == SUCCESS_STATUS + + json_file = open(metric_profile_json_file, "r") + input_json = json.loads(json_file.read()) + metric_profile_name = input_json['metadata']['name'] + assert data['message'] == CREATE_METRIC_PROFILE_SUCCESS_MSG % metric_profile_name + + response = list_metric_profiles(name=metric_profile_name, logging=False) + metric_profile_json = response.json() + + assert response.status_code == SUCCESS_200_STATUS_CODE + + # Validate the json against the json schema + errorMsg = validate_list_metric_profiles_json(metric_profile_json, list_metric_profiles_schema) + assert errorMsg == "" + + + # create namespace experiment + response = create_experiment(ns1_exp_json_file) + + data = response.json() + print(data['message']) + + assert response.status_code == SUCCESS_STATUS_CODE + assert data['status'] == SUCCESS_STATUS + assert data['message'] == CREATE_EXP_SUCCESS_MSG + + # create namespace experiment + response = create_experiment(ns2_exp_json_file) + + data = response.json() + print(data['message']) + + assert response.status_code == SUCCESS_STATUS_CODE + assert data['status'] == SUCCESS_STATUS + assert data['message'] == CREATE_EXP_SUCCESS_MSG + + # create namespace experiment + response = create_experiment(ns3_exp_json_file) + + data = response.json() + print(data['message']) + + assert response.status_code == SUCCESS_STATUS_CODE + assert data['status'] == SUCCESS_STATUS + assert data['message'] == CREATE_EXP_SUCCESS_MSG + + # Wait for the container to complete + wait_for_container_to_complete(container_id1) + wait_for_container_to_complete(container_id2) + wait_for_container_to_complete(container_id3) + + + # generate recommendations + json_file = open(ns1_exp_json_file, "r") + input_json = json.loads(json_file.read()) + ns1_exp_name = input_json[0]['experiment_name'] + + json_file = open(ns2_exp_json_file, "r") + input_json = json.loads(json_file.read()) + ns2_exp_name = input_json[0]['experiment_name'] + + json_file = open(ns3_exp_json_file, "r") + input_json = json.loads(json_file.read()) + ns3_exp_name = input_json[0]['experiment_name'] + + response = generate_recommendations(ns1_exp_name) + assert response.status_code == SUCCESS_STATUS_CODE + + # Invoke list recommendations for the specified experiment + response = list_recommendations(ns1_exp_name) + assert response.status_code == SUCCESS_200_STATUS_CODE + list_reco_json = response.json() + + # Validate the json against the json schema + errorMsg = validate_list_reco_json(list_reco_json, list_reco_namespace_json_local_monitoring_schema) + assert errorMsg == "" + + # Validate the json values + ns1_exp_json = read_json_data_from_file(ns1_exp_json_file) + validate_local_monitoring_reco_json(ns1_exp_json[0], list_reco_json[0]) + + response = generate_recommendations(ns2_exp_name) + assert response.status_code == SUCCESS_STATUS_CODE + + # Invoke list recommendations for the specified experiment + response = list_recommendations(ns2_exp_name) + assert response.status_code == SUCCESS_200_STATUS_CODE + list_reco_json = response.json() + + # Validate the json against the json schema + errorMsg = validate_list_reco_json(list_reco_json, list_reco_namespace_json_local_monitoring_schema) + assert errorMsg == "" + + # Validate the json values + ns2_exp_json = read_json_data_from_file(ns2_exp_json_file) + validate_local_monitoring_reco_json(ns2_exp_json[0], list_reco_json[0]) + + response = generate_recommendations(ns3_exp_name) + assert response.status_code == SUCCESS_STATUS_CODE + + # Invoke list recommendations for the specified experiment + response = list_recommendations(ns3_exp_name) + assert response.status_code == SUCCESS_200_STATUS_CODE + list_reco_json = response.json() + + # Validate the json against the json schema + errorMsg = validate_list_reco_json(list_reco_json, list_reco_namespace_json_local_monitoring_schema) + assert errorMsg == "" + + # Validate the json values + ns3_exp_json = read_json_data_from_file(ns3_exp_json_file) + validate_local_monitoring_reco_json(ns3_exp_json[0], list_reco_json[0]) + + + # Delete namespace experiment + response = delete_experiment(ns1_exp_json_file) + print("delete exp = ", response.status_code) + assert response.status_code == SUCCESS_STATUS_CODE + + response = delete_experiment(ns2_exp_json_file) + print("delete exp = ", response.status_code) + assert response.status_code == SUCCESS_STATUS_CODE + + response = delete_experiment(ns3_exp_json_file) + print("delete exp = ", response.status_code) + assert response.status_code == SUCCESS_STATUS_CODE + + # Delete Metric Profile + response = delete_metric_profile(metric_profile_json_file) + print("delete metric profile = ", response.status_code) + assert response.status_code == SUCCESS_STATUS_CODE + + # Remove benchmarks directory + shutil.rmtree("benchmarks")