From 56a493a77bd920140341dc4d552933c764d90f5c Mon Sep 17 00:00:00 2001 From: Shekhar Saxena Date: Mon, 16 Sep 2024 15:12:46 +0530 Subject: [PATCH 01/16] adding-tests-for-create-experiment-api Signed-off-by: Shekhar Saxena --- .../Local_monitoring_tests.md | 16 ++ .../create_container_exp_with_namespace.json | 23 ++ .../create_exp_namespace_container_both.json | 28 ++ ...mespace_container_both_container_type.json | 32 +++ ...mespace_container_both_namespace_type.json | 29 ++ .../json_files/create_namespace_exp.json | 23 ++ .../create_namespace_exp_with_containers.json | 29 ++ .../create_namespace_exp_without_type.json | 22 ++ .../create_tfb_exp_container_type.json | 29 ++ .../rest_apis/test_create_experiment.py | 270 ++++++++++++++++++ 10 files changed, 501 insertions(+) create mode 100644 tests/scripts/local_monitoring_tests/json_files/create_container_exp_with_namespace.json create mode 100644 tests/scripts/local_monitoring_tests/json_files/create_exp_namespace_container_both.json create mode 100644 tests/scripts/local_monitoring_tests/json_files/create_exp_namespace_container_both_container_type.json create mode 100644 tests/scripts/local_monitoring_tests/json_files/create_exp_namespace_container_both_namespace_type.json create mode 100644 tests/scripts/local_monitoring_tests/json_files/create_namespace_exp.json create mode 100644 tests/scripts/local_monitoring_tests/json_files/create_namespace_exp_with_containers.json create mode 100644 tests/scripts/local_monitoring_tests/json_files/create_namespace_exp_without_type.json create mode 100644 tests/scripts/local_monitoring_tests/json_files/create_tfb_exp_container_type.json create mode 100644 tests/scripts/local_monitoring_tests/rest_apis/test_create_experiment.py diff --git a/tests/scripts/local_monitoring_tests/Local_monitoring_tests.md b/tests/scripts/local_monitoring_tests/Local_monitoring_tests.md index 4923d612f..07738d62b 100644 --- a/tests/scripts/local_monitoring_tests/Local_monitoring_tests.md +++ b/tests/scripts/local_monitoring_tests/Local_monitoring_tests.md @@ -69,6 +69,22 @@ Here are the test scenarios: - Test with invalid values such as blank, null or an invalid value for name query parameter in listMetricProfiles API - List metric profiles without creating metric profile + +### **Create Experiment API tests** + +Here are the test scenarios: + +- Create namespace experiment specifying namespace experiment type +- Create namespace experiment without specifying experiment type +- Create container experiment specifying container experiment type +- Create container experiment without specifying experiment type +- Create experiment specifying both namespaces and containers without specifying the experiment type +- Create experiment specifying both namespaces and containers specifying the namespace experiment type +- Create experiment specifying both namespaces and containers specifying the container experiment type +- Create namespace experiment specifying containers +- Create container experiment specifying namespaces + + The above tests are developed using pytest framework and the tests are run using shell script wrapper that does the following: - Deploys kruize in non-CRD mode using the [deploy script](https://github.com/kruize/autotune/blob/master/deploy.sh) from the autotune repo - Creates a resource optimization metric profile using the [createMetricProfile API](/design/MetricProfileAPI.md) diff --git a/tests/scripts/local_monitoring_tests/json_files/create_container_exp_with_namespace.json b/tests/scripts/local_monitoring_tests/json_files/create_container_exp_with_namespace.json new file mode 100644 index 000000000..60254209b --- /dev/null +++ b/tests/scripts/local_monitoring_tests/json_files/create_container_exp_with_namespace.json @@ -0,0 +1,23 @@ +[{ + "version": "v2.0", + "experiment_name": "tfb-workload-namespace", + "cluster_name": "default", + "performance_profile": "resource-optimization-openshift", + "mode": "monitor", + "target_cluster": "local", + "datasource": "prometheus-1", + "kubernetes_objects": [ + { + "experiment_type": "container", + "namespaces": { + "namespace_name": "default" + } + } + ], + "trial_settings": { + "measurement_duration": "15min" + }, + "recommendation_settings": { + "threshold": "0.1" + } +}] diff --git a/tests/scripts/local_monitoring_tests/json_files/create_exp_namespace_container_both.json b/tests/scripts/local_monitoring_tests/json_files/create_exp_namespace_container_both.json new file mode 100644 index 000000000..a47ad053b --- /dev/null +++ b/tests/scripts/local_monitoring_tests/json_files/create_exp_namespace_container_both.json @@ -0,0 +1,28 @@ +[{ + "version": "v2.0", + "experiment_name": "tfb-workload-namespace", + "cluster_name": "default", + "performance_profile": "resource-optimization-openshift", + "mode": "monitor", + "target_cluster": "local", + "datasource": "prometheus-1", + "kubernetes_objects": [ + { + "namespaces": { + "namespace_name": "default" + }, + "containers": [ + { + "container_image_name": "kruize/tfb-qrh:1.13.2.F_et17", + "container_name": "tfb-server" + } + ] + } + ], + "trial_settings": { + "measurement_duration": "15min" + }, + "recommendation_settings": { + "threshold": "0.1" + } +}] diff --git a/tests/scripts/local_monitoring_tests/json_files/create_exp_namespace_container_both_container_type.json b/tests/scripts/local_monitoring_tests/json_files/create_exp_namespace_container_both_container_type.json new file mode 100644 index 000000000..f0686f1fa --- /dev/null +++ b/tests/scripts/local_monitoring_tests/json_files/create_exp_namespace_container_both_container_type.json @@ -0,0 +1,32 @@ +[{ + "version": "v2.0", + "experiment_name": "tfb-workload-namespace", + "cluster_name": "default", + "performance_profile": "resource-optimization-openshift", + "mode": "monitor", + "target_cluster": "local", + "datasource": "prometheus-1", + "kubernetes_objects": [ + { + "experiment_type": "container", + "type": "deployment", + "name": "tfb-qrh-sample", + "namespace": "default", + "namespaces": { + "namespace_name": "default" + }, + "containers": [ + { + "container_image_name": "kruize/tfb-qrh:1.13.2.F_et17", + "container_name": "tfb-server" + } + ] + } + ], + "trial_settings": { + "measurement_duration": "15min" + }, + "recommendation_settings": { + "threshold": "0.1" + } +}] diff --git a/tests/scripts/local_monitoring_tests/json_files/create_exp_namespace_container_both_namespace_type.json b/tests/scripts/local_monitoring_tests/json_files/create_exp_namespace_container_both_namespace_type.json new file mode 100644 index 000000000..1924f3c93 --- /dev/null +++ b/tests/scripts/local_monitoring_tests/json_files/create_exp_namespace_container_both_namespace_type.json @@ -0,0 +1,29 @@ +[{ + "version": "v2.0", + "experiment_name": "tfb-workload-namespace", + "cluster_name": "default", + "performance_profile": "resource-optimization-openshift", + "mode": "monitor", + "target_cluster": "local", + "datasource": "prometheus-1", + "kubernetes_objects": [ + { + "experiment_type": "namespace", + "namespaces": { + "namespace_name": "default" + }, + "containers": [ + { + "container_image_name": "kruize/tfb-qrh:1.13.2.F_et17", + "container_name": "tfb-server" + } + ] + } + ], + "trial_settings": { + "measurement_duration": "15min" + }, + "recommendation_settings": { + "threshold": "0.1" + } +}] diff --git a/tests/scripts/local_monitoring_tests/json_files/create_namespace_exp.json b/tests/scripts/local_monitoring_tests/json_files/create_namespace_exp.json new file mode 100644 index 000000000..c8503d74c --- /dev/null +++ b/tests/scripts/local_monitoring_tests/json_files/create_namespace_exp.json @@ -0,0 +1,23 @@ +[{ + "version": "v2.0", + "experiment_name": "tfb-workload-namespace", + "cluster_name": "default", + "performance_profile": "resource-optimization-openshift", + "mode": "monitor", + "target_cluster": "local", + "datasource": "prometheus-1", + "kubernetes_objects": [ + { + "experiment_type": "namespace", + "namespaces": { + "namespace_name": "default" + } + } + ], + "trial_settings": { + "measurement_duration": "15min" + }, + "recommendation_settings": { + "threshold": "0.1" + } +}] diff --git a/tests/scripts/local_monitoring_tests/json_files/create_namespace_exp_with_containers.json b/tests/scripts/local_monitoring_tests/json_files/create_namespace_exp_with_containers.json new file mode 100644 index 000000000..60e54b6cf --- /dev/null +++ b/tests/scripts/local_monitoring_tests/json_files/create_namespace_exp_with_containers.json @@ -0,0 +1,29 @@ +[{ + "version": "v2.0", + "experiment_name": "monitor_tfb_benchmark", + "cluster_name": "default", + "performance_profile": "resource-optimization-openshift", + "mode": "monitor", + "target_cluster": "local", + "datasource": "prometheus-1", + "kubernetes_objects": [ + { + "experiment_type": "namespace", + "type": "deployment", + "name": "tfb-qrh-sample", + "namespace": "default", + "containers": [ + { + "container_image_name": "kruize/tfb-qrh:1.13.2.F_et17", + "container_name": "tfb-server" + } + ] + } + ], + "trial_settings": { + "measurement_duration": "15min" + }, + "recommendation_settings": { + "threshold": "0.1" + } +}] diff --git a/tests/scripts/local_monitoring_tests/json_files/create_namespace_exp_without_type.json b/tests/scripts/local_monitoring_tests/json_files/create_namespace_exp_without_type.json new file mode 100644 index 000000000..488eb94a2 --- /dev/null +++ b/tests/scripts/local_monitoring_tests/json_files/create_namespace_exp_without_type.json @@ -0,0 +1,22 @@ +[{ + "version": "v2.0", + "experiment_name": "tfb-workload-namespace", + "cluster_name": "default", + "performance_profile": "resource-optimization-openshift", + "mode": "monitor", + "target_cluster": "local", + "datasource": "prometheus-1", + "kubernetes_objects": [ + { + "namespaces": { + "namespace_name": "default" + } + } + ], + "trial_settings": { + "measurement_duration": "15min" + }, + "recommendation_settings": { + "threshold": "0.1" + } +}] diff --git a/tests/scripts/local_monitoring_tests/json_files/create_tfb_exp_container_type.json b/tests/scripts/local_monitoring_tests/json_files/create_tfb_exp_container_type.json new file mode 100644 index 000000000..3edef2954 --- /dev/null +++ b/tests/scripts/local_monitoring_tests/json_files/create_tfb_exp_container_type.json @@ -0,0 +1,29 @@ +[{ + "version": "v2.0", + "experiment_name": "monitor_tfb_benchmark", + "cluster_name": "default", + "performance_profile": "resource-optimization-openshift", + "mode": "monitor", + "target_cluster": "local", + "datasource": "prometheus-1", + "kubernetes_objects": [ + { + "experiment_type": "container", + "type": "deployment", + "name": "tfb-qrh-sample", + "namespace": "default", + "containers": [ + { + "container_image_name": "kruize/tfb-qrh:1.13.2.F_et17", + "container_name": "tfb-server" + } + ] + } + ], + "trial_settings": { + "measurement_duration": "15min" + }, + "recommendation_settings": { + "threshold": "0.1" + } +}] diff --git a/tests/scripts/local_monitoring_tests/rest_apis/test_create_experiment.py b/tests/scripts/local_monitoring_tests/rest_apis/test_create_experiment.py new file mode 100644 index 000000000..7db2c7e97 --- /dev/null +++ b/tests/scripts/local_monitoring_tests/rest_apis/test_create_experiment.py @@ -0,0 +1,270 @@ +""" +Copyright (c) 2024 Red Hat, IBM Corporation and others. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" +import pytest +import sys +sys.path.append("../../") + +from helpers.fixtures import * +from helpers.kruize import * +from helpers.utils import * +from jinja2 import Environment, FileSystemLoader + +mandatory_fields = [ + ("version", ERROR_STATUS_CODE, ERROR_STATUS), + ("cluster_name", ERROR_STATUS_CODE, ERROR_STATUS), + ("experiment_name", ERROR_STATUS_CODE, ERROR_STATUS), + ("mode", ERROR_STATUS_CODE, ERROR_STATUS), + ("target_cluster", ERROR_STATUS_CODE, ERROR_STATUS), + ("kubernetes_objects", ERROR_STATUS_CODE, ERROR_STATUS), + ("type", ERROR_STATUS_CODE, ERROR_STATUS), + ("kubernetes_objects_name", ERROR_STATUS_CODE, ERROR_STATUS), + ("namespace", ERROR_STATUS_CODE, ERROR_STATUS), + ("containers", ERROR_STATUS_CODE, ERROR_STATUS), + ("container_image_name", ERROR_STATUS_CODE, ERROR_STATUS), + ("container_name", ERROR_STATUS_CODE, ERROR_STATUS), + ("selector", SUCCESS_STATUS_CODE, SUCCESS_STATUS), + ("namespace", ERROR_STATUS_CODE, ERROR_STATUS), + ("performance_profile", ERROR_STATUS_CODE, ERROR_STATUS), + ("slo", SUCCESS_STATUS_CODE, SUCCESS_STATUS), + ("recommendation_settings", ERROR_STATUS_CODE, ERROR_STATUS), + ("trial_settings", ERROR_STATUS_CODE, ERROR_STATUS), + ("kubernetes_objects_name_selector", ERROR_STATUS_CODE, ERROR_STATUS), + ("performance_profile_slo", ERROR_STATUS_CODE, ERROR_STATUS) +] + +csvfile = "/tmp/create_exp_test_data.csv" + +@pytest.mark.sanity +def test_create_namespace_exp_with_namespace_type(cluster_type): + """ + Test Description: This test validates the response status code of createExperiment API + for namespace experiment by passing a valid input for the json + """ + input_json_file = "../json_files/create_namespace_exp.json" + form_kruize_url(cluster_type) + + response = delete_experiment(input_json_file) + print("delete exp = ", response.status_code) + + # Create experiment using the specified json + response = create_experiment(input_json_file) + + data = response.json() + print(data['message']) + + assert response.status_code == SUCCESS_STATUS_CODE + assert data['status'] == SUCCESS_STATUS + assert data['message'] == CREATE_EXP_SUCCESS_MSG + + response = delete_experiment(input_json_file) + print("delete exp = ", response.status_code) + +@pytest.mark.negative +def test_create_namespace_exp_without_type(cluster_type): + """ + Test Description: This test validates the response status code of createExperiment API + for namespace experiment by passing a invalid input for the json without specifying the experiment type + """ + input_json_file = "../json_files/create_namespace_exp_without_type.json" + form_kruize_url(cluster_type) + + response = delete_experiment(input_json_file) + print("delete exp = ", response.status_code) + + # Create experiment using the specified json + response = create_experiment(input_json_file) + + data = response.json() + print(data['message']) + + assert response.status_code == ERROR_500_STATUS_CODE + assert data['status'] == ERROR_STATUS + + response = delete_experiment(input_json_file) + print("delete exp = ", response.status_code) + + +@pytest.mark.sanity +def test_create_container_exp_without_type(cluster_type): + """ + Test Description: This test validates the response status code of createExperiment API + for containers experiment by passing a valid input for the json without specifying the experiment type + """ + input_json_file = "../json_files/create_tfb_exp.json" + form_kruize_url(cluster_type) + + response = delete_experiment(input_json_file) + print("delete exp = ", response.status_code) + + # Create experiment using the specified json + response = create_experiment(input_json_file) + + data = response.json() + print(data['message']) + + assert response.status_code == SUCCESS_STATUS_CODE + assert data['status'] == SUCCESS_STATUS + assert data['message'] == CREATE_EXP_SUCCESS_MSG + + response = delete_experiment(input_json_file) + print("delete exp = ", response.status_code) + +@pytest.mark.sanity +def test_create_container_exp_with_container_type(cluster_type): + """ + Test Description: This test validates the response status code of createExperiment API + for containers experiment by passing a valid input for the json with specifying the experiment type + """ + input_json_file = "../json_files/create_tfb_exp_container_type.json" + form_kruize_url(cluster_type) + + response = delete_experiment(input_json_file) + print("delete exp = ", response.status_code) + + # Create experiment using the specified json + response = create_experiment(input_json_file) + + data = response.json() + print(data['message']) + + assert response.status_code == SUCCESS_STATUS_CODE + assert data['status'] == SUCCESS_STATUS + assert data['message'] == CREATE_EXP_SUCCESS_MSG + + response = delete_experiment(input_json_file) + print("delete exp = ", response.status_code) + + +@pytest.mark.negative +def test_create_exp_with_container_namespace_both_without_type(cluster_type): + """ + Test Description: This test validates the response status code of createExperiment API + if both container and namespace is passed and experiment type is not passed + """ + input_json_file = "../json_files/create_exp_namespace_container_both.json" + form_kruize_url(cluster_type) + + response = delete_experiment(input_json_file) + print("delete exp = ", response.status_code) + + # Create experiment using the specified json + response = create_experiment(input_json_file) + + data = response.json() + print(data['message']) + + assert response.status_code == ERROR_500_STATUS_CODE + assert data['status'] == ERROR_STATUS + + response = delete_experiment(input_json_file) + print("delete exp = ", response.status_code) + +@pytest.mark.negative +def test_create_exp_with_container_namespace_both_container_type(cluster_type): + """ + Test Description: This test validates the response status code of createExperiment API + if both container and namespace is passed and experiment type is container + """ + input_json_file = "../json_files/create_exp_namespace_container_both_container_type.json" + form_kruize_url(cluster_type) + + response = delete_experiment(input_json_file) + print("delete exp = ", response.status_code) + + # Create experiment using the specified json + response = create_experiment(input_json_file) + + data = response.json() + print(data['message']) + + assert response.status_code == ERROR_500_STATUS_CODE + assert data['status'] == ERROR_STATUS + + response = delete_experiment(input_json_file) + print("delete exp = ", response.status_code) + +@pytest.mark.negative +def test_create_exp_with_container_namespace_both_namespace_type(cluster_type): + """ + Test Description: This test validates the response status code of createExperiment API + if both container and namespace is passed and experiment type is namespace + """ + input_json_file = "../json_files/create_exp_namespace_container_both_namespace_type.json" + form_kruize_url(cluster_type) + + response = delete_experiment(input_json_file) + print("delete exp = ", response.status_code) + + # Create experiment using the specified json + response = create_experiment(input_json_file) + + data = response.json() + print(data['message']) + + assert response.status_code == ERROR_500_STATUS_CODE + assert data['status'] == ERROR_STATUS + + response = delete_experiment(input_json_file) + print("delete exp = ", response.status_code) + +@pytest.mark.negative +def test_create_namespace_exp_with_containers(cluster_type): + """ + Test Description: This test validates the response status code of createExperiment API + if containers array is passed and experiment type is namespace + """ + input_json_file = "../json_files/create_namespace_exp_with_containers.json" + form_kruize_url(cluster_type) + + response = delete_experiment(input_json_file) + print("delete exp = ", response.status_code) + + # Create experiment using the specified json + response = create_experiment(input_json_file) + + data = response.json() + print(data['message']) + + assert response.status_code == ERROR_500_STATUS_CODE + assert data['status'] == ERROR_STATUS + + response = delete_experiment(input_json_file) + print("delete exp = ", response.status_code) + +@pytest.mark.negative +def test_create_conatiner_exp_with_namespace(cluster_type): + """ + Test Description: This test validates the response status code of createExperiment API + if namespaces object is passed and experiment type is container + """ + input_json_file = "../json_files/create_container_exp_with_namespace.json" + form_kruize_url(cluster_type) + + response = delete_experiment(input_json_file) + print("delete exp = ", response.status_code) + + # Create experiment using the specified json + response = create_experiment(input_json_file) + + data = response.json() + print(data['message']) + + assert response.status_code == ERROR_500_STATUS_CODE + assert data['status'] == ERROR_STATUS + + response = delete_experiment(input_json_file) + print("delete exp = ", response.status_code) + From c1c7743b5bf6ee9c98e3899143cb97f05d4e5289 Mon Sep 17 00:00:00 2001 From: Shekhar Saxena Date: Mon, 16 Sep 2024 18:13:14 +0530 Subject: [PATCH 02/16] adding tests for list recommendations api for namespace Signed-off-by: Shekhar Saxena --- .../scripts/helpers/list_reco_json_schema.py | 444 ++++++++++++++++++ .../rest_apis/test_list_recommendations.py | 82 ++++ 2 files changed, 526 insertions(+) create mode 100644 tests/scripts/local_monitoring_tests/rest_apis/test_list_recommendations.py diff --git a/tests/scripts/helpers/list_reco_json_schema.py b/tests/scripts/helpers/list_reco_json_schema.py index 24d7ff61c..6a22e11d9 100644 --- a/tests/scripts/helpers/list_reco_json_schema.py +++ b/tests/scripts/helpers/list_reco_json_schema.py @@ -445,3 +445,447 @@ } } +list_reco_json_schema_for_namespace_reco = { +"type": "array", + "items": { + "type": "object", + "properties": { + "cluster_name": { "type": "string" }, + "kubernetes_objects": { + "type": "array", + "items": { + "type": "object", + "properties": { + "namespace": { "type": "string" }, + "namespaces": { + "type": "object", + "properties": { + "namespace_name": { "type": "string" }, + "recommendations": { + "type": "object", + "properties": { + "version": { "type": "string" }, + "notifications": { + "type": "object", + "items": { + "type": "object", + "properties": { + "type": { "type": "string" }, + "message": { "type": "string" }, + "code": { "type": "number" } + }, + "required": ["type", "message", "code"] + } + }, + "data": { + "type": "object", + "patternProperties": { + "^\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}.\\d{3}Z$": { + "type": "object", + "properties": { + "notifications": { + "type": "object", + "items": { + "type": "object", + "properties": { + "type": { "type": "string" }, + "message": { "type": "string" }, + "code": { "type": "number" } + }, + "required": ["type", "message", "code"] + } + }, + "monitoring_end_time": { "type": "string" }, + "current": { + "type": "object", + "properties": { + "requests": { + "type": "object", + "properties": { + "memory": { + "type": "object", + "properties": { + "amount": { "type": "number" }, + "format": { "type": "string" } + }, + "required": ["amount", "format"] + }, + "cpu": { + "type": "object", + "properties": { + "amount": { "type": "number" }, + "format": { "type": "string" } + }, + "required": ["amount", "format"] + } + }, + "required": [] + }, + "limits": { + "type": "object", + "properties": { + "memory": { + "type": "object", + "properties": { + "amount": { "type": "number" }, + "format": { "type": "string" } + }, + "required": ["amount", "format"] + }, + "cpu": { + "type": "object", + "properties": { + "amount": { "type": "number" }, + "format": { "type": "string" } + }, + "required": ["amount", "format"] + } + }, + "required": ["memory", "cpu"] + } + }, + "required": [] + }, + "recommendation_terms": { + "type": "object", + "properties": { + "short_term": { + "type": "object", + "properties": { + "notifications": { + "type": "object", + "items": { + "type": "object", + "properties": { + "type": { "type": "string" }, + "message": { "type": "string" }, + "code": { "type": "number" } + }, + "required": ["type", "message", "code"] + } + }, + "monitoring_start_time": { "type": "string" }, + "duration_in_hours": { "type": "number" }, + "recommendation_engines": { + "type": "object", + "properties": { + "cost": { + "type": "object", + "properties": { + "pods_count": { "type": "number" }, + "confidence_level": { "type": "number" }, + "config": { + "type": "object", + "properties": { + "requests": { + "type": "object", + "properties": { + "memory": { + "type": "object", + "properties": { + "amount": { "type": "number" }, + "format": { "type": "string" } + }, + "required": ["amount", "format"] + }, + "cpu": { + "type": "object", + "properties": { + "amount": { "type": "number" }, + "format": { "type": "string" } + }, + "required": ["amount", "format"] + } + }, + "required": ["memory", "cpu"] + }, + "limits": { + "type": "object", + "properties": { + "memory": { + "type": "object", + "properties": { + "amount": { "type": "number" }, + "format": { "type": "string" } + }, + "required": ["amount", "format"] + }, + "cpu": { + "type": "object", + "properties": { + "amount": { "type": "number" }, + "format": { "type": "string" } + }, + "required": ["amount", "format"] + } + }, + "required": ["memory", "cpu"] + } + }, + "required": ["requests", "limits"] + }, + "variation": { + "type": "object", + "properties": { + "requests": { + "type": "object", + "properties": { + "memory": { + "type": "object", + "properties": { + "amount": { "type": "number" }, + "format": { "type": "string" } + }, + "required": ["amount", "format"] + }, + "cpu": { + "type": "object", + "properties": { + "amount": { "type": "number" }, + "format": { "type": "string" } + }, + "required": ["amount", "format"] + } + }, + "required": ["memory", "cpu"] + }, + "limits": { + "type": "object", + "properties": { + "memory": { + "type": "object", + "properties": { + "amount": { "type": "number" }, + "format": { "type": "string" } + }, + "required": ["amount", "format"] + }, + "cpu": { + "type": "object", + "properties": { + "amount": { "type": "number" }, + "format": { "type": "string" } + }, + "required": ["amount", "format"] + } + }, + "required": ["memory", "cpu"] + } + }, + "required": ["requests", "limits"] + }, + "notifications": { + "type": "object", + "items": { + "type": "object", + "properties": { + "type": { "type": "string" }, + "message": { "type": "string" }, + "code": { "type": "number" } + }, + "required": ["type", "message", "code"] + } + } + }, + "required": ["pods_count", "confidence_level", "config", "variation", "notifications"] + }, + "performance": { + "type": "object", + "properties": { + "monitoring_start_time": { "type": "string" }, + "pods_count": { "type": "number" }, + "confidence_level": { "type": "number" }, + "config": { + "type": "object", + "properties": { + "requests": { + "type": "object", + "properties": { + "memory": { + "type": "object", + "properties": { + "amount": { "type": "number" }, + "format": { "type": "string" } + }, + "required": ["amount", "format"] + }, + "cpu": { + "type": "object", + "properties": { + "amount": { "type": "number" }, + "format": { "type": "string" } + }, + "required": ["amount", "format"] + } + }, + "required": ["memory", "cpu"] + }, + "limits": { + "type": "object", + "properties": { + "memory": { + "type": "object", + "properties": { + "amount": { "type": "number" }, + "format": { "type": "string" } + }, + "required": ["amount", "format"] + }, + "cpu": { + "type": "object", + "properties": { + "amount": { "type": "number" }, + "format": { "type": "string" } + }, + "required": ["amount", "format"] + } + }, + "required": ["memory", "cpu"] + } + }, + "required": ["requests", "limits"] + }, + "variation": { + "type": "object", + "properties": { + "requests": { + "type": "object", + "properties": { + "memory": { + "type": "object", + "properties": { + "amount": { "type": "number" }, + "format": { "type": "string" } + }, + "required": ["amount", "format"] + }, + "cpu": { + "type": "object", + "properties": { + "amount": { "type": "number" }, + "format": { "type": "string" } + }, + "required": ["amount", "format"] + } + }, + "required": ["memory", "cpu"] + }, + "limits": { + "type": "object", + "properties": { + "memory": { + "type": "object", + "properties": { + "amount": { "type": "number" }, + "format": { "type": "string" } + }, + "required": ["amount", "format"] + }, + "cpu": { + "type": "object", + "properties": { + "amount": { "type": "number" }, + "format": { "type": "string" } + }, + "required": ["amount", "format"] + } + }, + "required": ["memory", "cpu"] + } + }, + "required": ["requests", "limits"] + }, + "notifications": { + "type": "object", + "items": { + "type": "object", + "properties": { + "type": { "type": "string" }, + "message": { "type": "string" }, + "code": { "type": "number" } + }, + "required": ["type", "message", "code"] + } + } + }, + "required": [] + } + }, + "required": [] + }, + "plots": { + "type": "object", + "properties": { + "datapoints": { "type": "number" }, + "plots_data": { + "type": "object", + "patternProperties": { + "^\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}.\\d{3}Z$": { + "type": "object", + "properties": { + "cpuUsage": { + "type": "object", + "properties": { + "min": { "type": "number" }, + "q1": { "type": "number" }, + "median": { "type": "number" }, + "q3": { "type": "number" }, + "max": { "type": "number" }, + "format": { "type": "string" } + }, + "required": ["min", "q1", "median", "q3", "max", "format"] + }, + "memoryUsage": { + "type": "object", + "properties": { + "min": { "type": "number" }, + "q1": { "type": "number" }, + "median": { "type": "number" }, + "q3": { "type": "number" }, + "max": { "type": "number" }, + "format": { "type": "string" } + }, + "required": ["min", "q1", "median", "q3", "max", "format"] + }, + }, + "required": [] + } + }, + "required": [] + } + }, + "required": ["datapoints", "plots_data"] + } + }, + "required": [] + } + }, + "required": [] + } + }, + "required": [] + } + }, + "required": [] + } + }, + "required": ["version", "notifications", "data"] + } + }, + "required": ["namespace_name", "recommendations"] + }, + "containers": { + "type": "array", + "items": {} + } + }, + "required": ["namespace", "namespaces"] + } + }, + "version": { "type": "string" }, + "experiment_name": { "type": "string" } + }, + "required": ["cluster_name", "kubernetes_objects", "version", "experiment_name"] + } +} + diff --git a/tests/scripts/local_monitoring_tests/rest_apis/test_list_recommendations.py b/tests/scripts/local_monitoring_tests/rest_apis/test_list_recommendations.py new file mode 100644 index 000000000..3bd047a1d --- /dev/null +++ b/tests/scripts/local_monitoring_tests/rest_apis/test_list_recommendations.py @@ -0,0 +1,82 @@ +""" +Copyright (c) 2024 Red Hat, IBM Corporation and others. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" +import datetime +import json +import time + +import pytest +import sys +sys.path.append("../../") + +from helpers.all_terms_list_reco_json_schema import all_terms_list_reco_json_schema +from helpers.fixtures import * +from helpers.generate_rm_jsons import * +from helpers.kruize import * +from helpers.list_reco_json_schema import * +from helpers.medium_and_long_term_list_reco_json_schema import medium_and_long_term_list_reco_json_schema +from helpers.medium_term_list_reco_json_schema import * +from helpers.long_term_list_reco_json_schema import * +from helpers.list_reco_json_validate import * +from helpers.short_and_long_term_list_reco_json_schema import short_and_long_term_list_reco_json_schema +from helpers.short_and_medium_term_list_reco_json_schema import short_and_medium_term_list_reco_json_schema +from helpers.short_term_list_reco_json_schema import short_term_list_reco_json_schema +from helpers.utils import * + + + +@pytest.mark.sanity +def test_list_recommendations_namespace_single_result(cluster_type): + """ + Test Description: This test validates listRecommendations by passing a valid + namespace experiment name + """ + input_json_file = "../json_files/create_namespace_exp.json" + + form_kruize_url(cluster_type) + response = delete_experiment(input_json_file) + print("delete exp = ", response.status_code) + + # Create namespace experiment using the specified json + response = create_experiment(input_json_file) + + data = response.json() + print(data['message']) + + assert response.status_code == SUCCESS_STATUS_CODE + assert data['status'] == SUCCESS_STATUS + assert data['message'] == CREATE_EXP_SUCCESS_MSG + + # generate recommendations + json_file = open(input_json_file, "r") + input_json = json.loads(json_file.read()) + exp_name = input_json[0]['experiment_name'] + + response = generate_recommendations(exp_name) + assert response.status_code == SUCCESS_STATUS_CODE + + # Invoke list recommendations for the specified experiment + response = list_recommendations(exp_name) + assert response.status_code == SUCCESS_200_STATUS_CODE + list_reco_json = response.json() + + # Validate the json against the json schema + errorMsg = validate_list_reco_json(list_reco_json, list_reco_json_schema_for_namespace_reco) + assert errorMsg == "" + + # Delete experiment + response = delete_experiment(input_json_file) + print("delete exp = ", response.status_code) + assert response.status_code == SUCCESS_STATUS_CODE \ No newline at end of file From 72c5d5799d16e6a8239041c50742f49515c05077 Mon Sep 17 00:00:00 2001 From: Shekhar Saxena Date: Tue, 17 Sep 2024 14:12:10 +0530 Subject: [PATCH 03/16] updating performance profile Signed-off-by: Shekhar Saxena --- .../Local_monitoring_tests.md | 7 +++ .../create_container_exp_with_namespace.json | 2 +- .../create_exp_namespace_container_both.json | 2 +- ...mespace_container_both_container_type.json | 2 +- ...mespace_container_both_namespace_type.json | 2 +- .../create_multiple_namespace_exp.json | 48 +++++++++++++++++++ .../json_files/create_namespace_exp.json | 2 +- .../create_namespace_exp_with_containers.json | 2 +- .../create_namespace_exp_without_type.json | 2 +- .../create_tfb_exp_container_type.json | 2 +- .../rest_apis/test_create_experiment.py | 24 ++++++++++ 11 files changed, 87 insertions(+), 8 deletions(-) create mode 100644 tests/scripts/local_monitoring_tests/json_files/create_multiple_namespace_exp.json diff --git a/tests/scripts/local_monitoring_tests/Local_monitoring_tests.md b/tests/scripts/local_monitoring_tests/Local_monitoring_tests.md index 07738d62b..49aab43f0 100644 --- a/tests/scripts/local_monitoring_tests/Local_monitoring_tests.md +++ b/tests/scripts/local_monitoring_tests/Local_monitoring_tests.md @@ -83,6 +83,13 @@ Here are the test scenarios: - Create experiment specifying both namespaces and containers specifying the container experiment type - Create namespace experiment specifying containers - Create container experiment specifying namespaces +- Create multiple experiments with valid namespace + +### **List Recommendations API tests** + +Here are the test scenarios: + +- List recommendations for a valid namespace experiment The above tests are developed using pytest framework and the tests are run using shell script wrapper that does the following: diff --git a/tests/scripts/local_monitoring_tests/json_files/create_container_exp_with_namespace.json b/tests/scripts/local_monitoring_tests/json_files/create_container_exp_with_namespace.json index 60254209b..c27434142 100644 --- a/tests/scripts/local_monitoring_tests/json_files/create_container_exp_with_namespace.json +++ b/tests/scripts/local_monitoring_tests/json_files/create_container_exp_with_namespace.json @@ -2,7 +2,7 @@ "version": "v2.0", "experiment_name": "tfb-workload-namespace", "cluster_name": "default", - "performance_profile": "resource-optimization-openshift", + "performance_profile": "resource-optimization-local-monitoring", "mode": "monitor", "target_cluster": "local", "datasource": "prometheus-1", diff --git a/tests/scripts/local_monitoring_tests/json_files/create_exp_namespace_container_both.json b/tests/scripts/local_monitoring_tests/json_files/create_exp_namespace_container_both.json index a47ad053b..2bd37e72f 100644 --- a/tests/scripts/local_monitoring_tests/json_files/create_exp_namespace_container_both.json +++ b/tests/scripts/local_monitoring_tests/json_files/create_exp_namespace_container_both.json @@ -2,7 +2,7 @@ "version": "v2.0", "experiment_name": "tfb-workload-namespace", "cluster_name": "default", - "performance_profile": "resource-optimization-openshift", + "performance_profile": "resource-optimization-local-monitoring", "mode": "monitor", "target_cluster": "local", "datasource": "prometheus-1", diff --git a/tests/scripts/local_monitoring_tests/json_files/create_exp_namespace_container_both_container_type.json b/tests/scripts/local_monitoring_tests/json_files/create_exp_namespace_container_both_container_type.json index f0686f1fa..59f0466f4 100644 --- a/tests/scripts/local_monitoring_tests/json_files/create_exp_namespace_container_both_container_type.json +++ b/tests/scripts/local_monitoring_tests/json_files/create_exp_namespace_container_both_container_type.json @@ -2,7 +2,7 @@ "version": "v2.0", "experiment_name": "tfb-workload-namespace", "cluster_name": "default", - "performance_profile": "resource-optimization-openshift", + "performance_profile": "resource-optimization-local-monitoring", "mode": "monitor", "target_cluster": "local", "datasource": "prometheus-1", diff --git a/tests/scripts/local_monitoring_tests/json_files/create_exp_namespace_container_both_namespace_type.json b/tests/scripts/local_monitoring_tests/json_files/create_exp_namespace_container_both_namespace_type.json index 1924f3c93..31442ab66 100644 --- a/tests/scripts/local_monitoring_tests/json_files/create_exp_namespace_container_both_namespace_type.json +++ b/tests/scripts/local_monitoring_tests/json_files/create_exp_namespace_container_both_namespace_type.json @@ -2,7 +2,7 @@ "version": "v2.0", "experiment_name": "tfb-workload-namespace", "cluster_name": "default", - "performance_profile": "resource-optimization-openshift", + "performance_profile": "resource-optimization-local-monitoring", "mode": "monitor", "target_cluster": "local", "datasource": "prometheus-1", diff --git a/tests/scripts/local_monitoring_tests/json_files/create_multiple_namespace_exp.json b/tests/scripts/local_monitoring_tests/json_files/create_multiple_namespace_exp.json new file mode 100644 index 000000000..2272e7f3c --- /dev/null +++ b/tests/scripts/local_monitoring_tests/json_files/create_multiple_namespace_exp.json @@ -0,0 +1,48 @@ +[ + { + "version": "v2.0", + "experiment_name": "tfb-workload-namespace", + "cluster_name": "default", + "performance_profile": "resource-optimization-local-monitoring", + "mode": "monitor", + "target_cluster": "local", + "datasource": "prometheus-1", + "kubernetes_objects": [ + { + "experiment_type": "namespace", + "namespaces": { + "namespace_name": "default" + } + } + ], + "trial_settings": { + "measurement_duration": "15min" + }, + "recommendation_settings": { + "threshold": "0.1" + } + }, + { + "version": "v2.0", + "experiment_name": "multiple-import-namespace", + "cluster_name": "default", + "performance_profile": "resource-optimization-openshift", + "mode": "monitor", + "target_cluster": "local", + "datasource": "prometheus-1", + "kubernetes_objects": [ + { + "experiment_type": "namespace", + "namespaces": { + "namespace_name": "test-multiple-import" + } + } + ], + "trial_settings": { + "measurement_duration": "15min" + }, + "recommendation_settings": { + "threshold": "0.1" + } + } +] diff --git a/tests/scripts/local_monitoring_tests/json_files/create_namespace_exp.json b/tests/scripts/local_monitoring_tests/json_files/create_namespace_exp.json index c8503d74c..c6c51df67 100644 --- a/tests/scripts/local_monitoring_tests/json_files/create_namespace_exp.json +++ b/tests/scripts/local_monitoring_tests/json_files/create_namespace_exp.json @@ -2,7 +2,7 @@ "version": "v2.0", "experiment_name": "tfb-workload-namespace", "cluster_name": "default", - "performance_profile": "resource-optimization-openshift", + "performance_profile": "resource-optimization-local-monitoring", "mode": "monitor", "target_cluster": "local", "datasource": "prometheus-1", diff --git a/tests/scripts/local_monitoring_tests/json_files/create_namespace_exp_with_containers.json b/tests/scripts/local_monitoring_tests/json_files/create_namespace_exp_with_containers.json index 60e54b6cf..35123977e 100644 --- a/tests/scripts/local_monitoring_tests/json_files/create_namespace_exp_with_containers.json +++ b/tests/scripts/local_monitoring_tests/json_files/create_namespace_exp_with_containers.json @@ -2,7 +2,7 @@ "version": "v2.0", "experiment_name": "monitor_tfb_benchmark", "cluster_name": "default", - "performance_profile": "resource-optimization-openshift", + "performance_profile": "resource-optimization-local-monitoring", "mode": "monitor", "target_cluster": "local", "datasource": "prometheus-1", diff --git a/tests/scripts/local_monitoring_tests/json_files/create_namespace_exp_without_type.json b/tests/scripts/local_monitoring_tests/json_files/create_namespace_exp_without_type.json index 488eb94a2..a3c0cafa2 100644 --- a/tests/scripts/local_monitoring_tests/json_files/create_namespace_exp_without_type.json +++ b/tests/scripts/local_monitoring_tests/json_files/create_namespace_exp_without_type.json @@ -2,7 +2,7 @@ "version": "v2.0", "experiment_name": "tfb-workload-namespace", "cluster_name": "default", - "performance_profile": "resource-optimization-openshift", + "performance_profile": "resource-optimization-local-monitoring", "mode": "monitor", "target_cluster": "local", "datasource": "prometheus-1", diff --git a/tests/scripts/local_monitoring_tests/json_files/create_tfb_exp_container_type.json b/tests/scripts/local_monitoring_tests/json_files/create_tfb_exp_container_type.json index 3edef2954..345c322ac 100644 --- a/tests/scripts/local_monitoring_tests/json_files/create_tfb_exp_container_type.json +++ b/tests/scripts/local_monitoring_tests/json_files/create_tfb_exp_container_type.json @@ -2,7 +2,7 @@ "version": "v2.0", "experiment_name": "monitor_tfb_benchmark", "cluster_name": "default", - "performance_profile": "resource-optimization-openshift", + "performance_profile": "resource-optimization-local-monitoring", "mode": "monitor", "target_cluster": "local", "datasource": "prometheus-1", diff --git a/tests/scripts/local_monitoring_tests/rest_apis/test_create_experiment.py b/tests/scripts/local_monitoring_tests/rest_apis/test_create_experiment.py index 7db2c7e97..e8150648e 100644 --- a/tests/scripts/local_monitoring_tests/rest_apis/test_create_experiment.py +++ b/tests/scripts/local_monitoring_tests/rest_apis/test_create_experiment.py @@ -268,3 +268,27 @@ def test_create_conatiner_exp_with_namespace(cluster_type): response = delete_experiment(input_json_file) print("delete exp = ", response.status_code) + +@pytest.mark.negative +def test_create_multiple_namespace_exp(cluster_type): + """ + Test Description: This test validates the response status code of createExperiment API + if multiple entries are presnet in create experiment json + """ + input_json_file = "../json_files/create_multiple_namespace_exp.json" + form_kruize_url(cluster_type) + + response = delete_experiment(input_json_file) + print("delete exp = ", response.status_code) + + # Create experiment using the specified json + response = create_experiment(input_json_file) + + data = response.json() + print(data['message']) + + assert response.status_code == ERROR_STATUS_CODE + assert data['status'] == ERROR_STATUS + + response = delete_experiment(input_json_file) + print("delete exp = ", response.status_code) From c85d3733ac76dad5de2567521d19b62c42a48e8a Mon Sep 17 00:00:00 2001 From: Shekhar Saxena Date: Tue, 17 Sep 2024 23:29:59 +0530 Subject: [PATCH 04/16] adding validations for list_recommendations Signed-off-by: Shekhar Saxena --- .../list_reco_json_local_monitoring_schema.py | 764 ++++++++++++++++++ .../scripts/helpers/list_reco_json_schema.py | 443 ---------- tests/scripts/helpers/utils.py | 119 ++- .../rest_apis/test_list_recommendations.py | 8 +- .../test_local_monitoring_e2e_workflow.py | 39 + 5 files changed, 913 insertions(+), 460 deletions(-) diff --git a/tests/scripts/helpers/list_reco_json_local_monitoring_schema.py b/tests/scripts/helpers/list_reco_json_local_monitoring_schema.py index 23187207c..cecb0f803 100644 --- a/tests/scripts/helpers/list_reco_json_local_monitoring_schema.py +++ b/tests/scripts/helpers/list_reco_json_local_monitoring_schema.py @@ -444,3 +444,767 @@ } } +list_reco_namespace_json_local_monitoring_schema = { + "type": "array", + "items": { + "type": "object", + "properties": { + "cluster_name": { + "type": "string" + }, + "kubernetes_objects": { + "type": "array", + "items": { + "type": "object", + "properties": { + "namespace": { + "type": "string" + }, + "namespaces": { + "type": "object", + "properties": { + "namespace_name": { + "type": "string" + }, + "recommendations": { + "type": "object", + "properties": { + "version": { + "type": "string" + }, + "notifications": { + "type": "object", + "items": { + "type": "object", + "properties": { + "type": { + "type": "string" + }, + "message": { + "type": "string" + }, + "code": { + "type": "number" + } + }, + "required": [ + "type", + "message", + "code" + ] + } + }, + "data": { + "type": "object", + "patternProperties": { + "^\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}.\\d{3}Z$": { + "type": "object", + "properties": { + "notifications": { + "type": "object", + "items": { + "type": "object", + "properties": { + "type": { + "type": "string" + }, + "message": { + "type": "string" + }, + "code": { + "type": "number" + } + }, + "required": [ + "type", + "message", + "code" + ] + } + }, + "monitoring_end_time": { + "type": "string" + }, + "current": { + "type": "object", + "properties": { + "requests": { + "type": "object", + "properties": { + "memory": { + "type": "object", + "properties": { + "amount": { + "type": "number" + }, + "format": { + "type": "string" + } + }, + "required": [ + "amount", + "format" + ] + }, + "cpu": { + "type": "object", + "properties": { + "amount": { + "type": "number" + }, + "format": { + "type": "string" + } + }, + "required": [ + "amount", + "format" + ] + } + }, + "required": [] + }, + "limits": { + "type": "object", + "properties": { + "memory": { + "type": "object", + "properties": { + "amount": { + "type": "number" + }, + "format": { + "type": "string" + } + }, + "required": [ + "amount", + "format" + ] + }, + "cpu": { + "type": "object", + "properties": { + "amount": { + "type": "number" + }, + "format": { + "type": "string" + } + }, + "required": [ + "amount", + "format" + ] + } + }, + "required": [ + "memory", + "cpu" + ] + } + }, + "required": [] + }, + "recommendation_terms": { + "type": "object", + "properties": { + "short_term": { + "type": "object", + "properties": { + "notifications": { + "type": "object", + "items": { + "type": "object", + "properties": { + "type": { + "type": "string" + }, + "message": { + "type": "string" + }, + "code": { + "type": "number" + } + }, + "required": [ + "type", + "message", + "code" + ] + } + }, + "monitoring_start_time": { + "type": "string" + }, + "duration_in_hours": { + "type": "number" + }, + "recommendation_engines": { + "type": "object", + "properties": { + "cost": { + "type": "object", + "properties": { + "pods_count": { + "type": "number" + }, + "confidence_level": { + "type": "number" + }, + "config": { + "type": "object", + "properties": { + "requests": { + "type": "object", + "properties": { + "memory": { + "type": "object", + "properties": { + "amount": { + "type": "number" + }, + "format": { + "type": "string" + } + }, + "required": [ + "amount", + "format" + ] + }, + "cpu": { + "type": "object", + "properties": { + "amount": { + "type": "number" + }, + "format": { + "type": "string" + } + }, + "required": [ + "amount", + "format" + ] + } + }, + "required": [ + "memory", + "cpu" + ] + }, + "limits": { + "type": "object", + "properties": { + "memory": { + "type": "object", + "properties": { + "amount": { + "type": "number" + }, + "format": { + "type": "string" + } + }, + "required": [ + "amount", + "format" + ] + }, + "cpu": { + "type": "object", + "properties": { + "amount": { + "type": "number" + }, + "format": { + "type": "string" + } + }, + "required": [ + "amount", + "format" + ] + } + }, + "required": [ + "memory", + "cpu" + ] + } + }, + "required": [ + "requests", + "limits" + ] + }, + "variation": { + "type": "object", + "properties": { + "requests": { + "type": "object", + "properties": { + "memory": { + "type": "object", + "properties": { + "amount": { + "type": "number" + }, + "format": { + "type": "string" + } + }, + "required": [ + "amount", + "format" + ] + }, + "cpu": { + "type": "object", + "properties": { + "amount": { + "type": "number" + }, + "format": { + "type": "string" + } + }, + "required": [ + "amount", + "format" + ] + } + }, + "required": [ + "memory", + "cpu" + ] + }, + "limits": { + "type": "object", + "properties": { + "memory": { + "type": "object", + "properties": { + "amount": { + "type": "number" + }, + "format": { + "type": "string" + } + }, + "required": [ + "amount", + "format" + ] + }, + "cpu": { + "type": "object", + "properties": { + "amount": { + "type": "number" + }, + "format": { + "type": "string" + } + }, + "required": [ + "amount", + "format" + ] + } + }, + "required": [ + "memory", + "cpu" + ] + } + }, + "required": [ + "requests", + "limits" + ] + }, + "notifications": { + "type": "object", + "items": { + "type": "object", + "properties": { + "type": { + "type": "string" + }, + "message": { + "type": "string" + }, + "code": { + "type": "number" + } + }, + "required": [ + "type", + "message", + "code" + ] + } + } + }, + "required": [ + "pods_count", + "confidence_level", + "config", + "variation", + "notifications" + ] + }, + "performance": { + "type": "object", + "properties": { + "monitoring_start_time": { + "type": "string" + }, + "pods_count": { + "type": "number" + }, + "confidence_level": { + "type": "number" + }, + "config": { + "type": "object", + "properties": { + "requests": { + "type": "object", + "properties": { + "memory": { + "type": "object", + "properties": { + "amount": { + "type": "number" + }, + "format": { + "type": "string" + } + }, + "required": [ + "amount", + "format" + ] + }, + "cpu": { + "type": "object", + "properties": { + "amount": { + "type": "number" + }, + "format": { + "type": "string" + } + }, + "required": [ + "amount", + "format" + ] + } + }, + "required": [ + "memory", + "cpu" + ] + }, + "limits": { + "type": "object", + "properties": { + "memory": { + "type": "object", + "properties": { + "amount": { + "type": "number" + }, + "format": { + "type": "string" + } + }, + "required": [ + "amount", + "format" + ] + }, + "cpu": { + "type": "object", + "properties": { + "amount": { + "type": "number" + }, + "format": { + "type": "string" + } + }, + "required": [ + "amount", + "format" + ] + } + }, + "required": [ + "memory", + "cpu" + ] + } + }, + "required": [ + "requests", + "limits" + ] + }, + "variation": { + "type": "object", + "properties": { + "requests": { + "type": "object", + "properties": { + "memory": { + "type": "object", + "properties": { + "amount": { + "type": "number" + }, + "format": { + "type": "string" + } + }, + "required": [ + "amount", + "format" + ] + }, + "cpu": { + "type": "object", + "properties": { + "amount": { + "type": "number" + }, + "format": { + "type": "string" + } + }, + "required": [ + "amount", + "format" + ] + } + }, + "required": [ + "memory", + "cpu" + ] + }, + "limits": { + "type": "object", + "properties": { + "memory": { + "type": "object", + "properties": { + "amount": { + "type": "number" + }, + "format": { + "type": "string" + } + }, + "required": [ + "amount", + "format" + ] + }, + "cpu": { + "type": "object", + "properties": { + "amount": { + "type": "number" + }, + "format": { + "type": "string" + } + }, + "required": [ + "amount", + "format" + ] + } + }, + "required": [ + "memory", + "cpu" + ] + } + }, + "required": [ + "requests", + "limits" + ] + }, + "notifications": { + "type": "object", + "items": { + "type": "object", + "properties": { + "type": { + "type": "string" + }, + "message": { + "type": "string" + }, + "code": { + "type": "number" + } + }, + "required": [ + "type", + "message", + "code" + ] + } + } + }, + "required": [] + } + }, + "required": [] + }, + "plots": { + "type": "object", + "properties": { + "datapoints": { + "type": "number" + }, + "plots_data": { + "type": "object", + "patternProperties": { + "^\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}.\\d{3}Z$": { + "type": "object", + "properties": { + "cpuUsage": { + "type": "object", + "properties": { + "min": { + "type": "number" + }, + "q1": { + "type": "number" + }, + "median": { + "type": "number" + }, + "q3": { + "type": "number" + }, + "max": { + "type": "number" + }, + "format": { + "type": "string" + } + }, + "required": [ + "min", + "q1", + "median", + "q3", + "max", + "format" + ] + }, + "memoryUsage": { + "type": "object", + "properties": { + "min": { + "type": "number" + }, + "q1": { + "type": "number" + }, + "median": { + "type": "number" + }, + "q3": { + "type": "number" + }, + "max": { + "type": "number" + }, + "format": { + "type": "string" + } + }, + "required": [ + "min", + "q1", + "median", + "q3", + "max", + "format" + ] + }, + }, + "required": [] + } + }, + "required": [] + } + }, + "required": [ + "datapoints", + "plots_data" + ] + } + }, + "required": [] + } + }, + "required": [] + } + }, + "required": [] + } + }, + "required": [] + } + }, + "required": [ + "version", + "notifications", + "data" + ] + } + }, + "required": [ + "namespace_name", + "recommendations" + ] + }, + "containers": { + "type": "array", + "items": {} + } + }, + "required": [ + "namespace", + "namespaces" + ] + } + }, + "version": { + "type": "string" + }, + "experiment_name": { + "type": "string" + } + }, + "required": [ + "cluster_name", + "kubernetes_objects", + "version", + "experiment_name" + ] + } +} \ No newline at end of file diff --git a/tests/scripts/helpers/list_reco_json_schema.py b/tests/scripts/helpers/list_reco_json_schema.py index 6a22e11d9..5376ba4da 100644 --- a/tests/scripts/helpers/list_reco_json_schema.py +++ b/tests/scripts/helpers/list_reco_json_schema.py @@ -445,447 +445,4 @@ } } -list_reco_json_schema_for_namespace_reco = { -"type": "array", - "items": { - "type": "object", - "properties": { - "cluster_name": { "type": "string" }, - "kubernetes_objects": { - "type": "array", - "items": { - "type": "object", - "properties": { - "namespace": { "type": "string" }, - "namespaces": { - "type": "object", - "properties": { - "namespace_name": { "type": "string" }, - "recommendations": { - "type": "object", - "properties": { - "version": { "type": "string" }, - "notifications": { - "type": "object", - "items": { - "type": "object", - "properties": { - "type": { "type": "string" }, - "message": { "type": "string" }, - "code": { "type": "number" } - }, - "required": ["type", "message", "code"] - } - }, - "data": { - "type": "object", - "patternProperties": { - "^\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}.\\d{3}Z$": { - "type": "object", - "properties": { - "notifications": { - "type": "object", - "items": { - "type": "object", - "properties": { - "type": { "type": "string" }, - "message": { "type": "string" }, - "code": { "type": "number" } - }, - "required": ["type", "message", "code"] - } - }, - "monitoring_end_time": { "type": "string" }, - "current": { - "type": "object", - "properties": { - "requests": { - "type": "object", - "properties": { - "memory": { - "type": "object", - "properties": { - "amount": { "type": "number" }, - "format": { "type": "string" } - }, - "required": ["amount", "format"] - }, - "cpu": { - "type": "object", - "properties": { - "amount": { "type": "number" }, - "format": { "type": "string" } - }, - "required": ["amount", "format"] - } - }, - "required": [] - }, - "limits": { - "type": "object", - "properties": { - "memory": { - "type": "object", - "properties": { - "amount": { "type": "number" }, - "format": { "type": "string" } - }, - "required": ["amount", "format"] - }, - "cpu": { - "type": "object", - "properties": { - "amount": { "type": "number" }, - "format": { "type": "string" } - }, - "required": ["amount", "format"] - } - }, - "required": ["memory", "cpu"] - } - }, - "required": [] - }, - "recommendation_terms": { - "type": "object", - "properties": { - "short_term": { - "type": "object", - "properties": { - "notifications": { - "type": "object", - "items": { - "type": "object", - "properties": { - "type": { "type": "string" }, - "message": { "type": "string" }, - "code": { "type": "number" } - }, - "required": ["type", "message", "code"] - } - }, - "monitoring_start_time": { "type": "string" }, - "duration_in_hours": { "type": "number" }, - "recommendation_engines": { - "type": "object", - "properties": { - "cost": { - "type": "object", - "properties": { - "pods_count": { "type": "number" }, - "confidence_level": { "type": "number" }, - "config": { - "type": "object", - "properties": { - "requests": { - "type": "object", - "properties": { - "memory": { - "type": "object", - "properties": { - "amount": { "type": "number" }, - "format": { "type": "string" } - }, - "required": ["amount", "format"] - }, - "cpu": { - "type": "object", - "properties": { - "amount": { "type": "number" }, - "format": { "type": "string" } - }, - "required": ["amount", "format"] - } - }, - "required": ["memory", "cpu"] - }, - "limits": { - "type": "object", - "properties": { - "memory": { - "type": "object", - "properties": { - "amount": { "type": "number" }, - "format": { "type": "string" } - }, - "required": ["amount", "format"] - }, - "cpu": { - "type": "object", - "properties": { - "amount": { "type": "number" }, - "format": { "type": "string" } - }, - "required": ["amount", "format"] - } - }, - "required": ["memory", "cpu"] - } - }, - "required": ["requests", "limits"] - }, - "variation": { - "type": "object", - "properties": { - "requests": { - "type": "object", - "properties": { - "memory": { - "type": "object", - "properties": { - "amount": { "type": "number" }, - "format": { "type": "string" } - }, - "required": ["amount", "format"] - }, - "cpu": { - "type": "object", - "properties": { - "amount": { "type": "number" }, - "format": { "type": "string" } - }, - "required": ["amount", "format"] - } - }, - "required": ["memory", "cpu"] - }, - "limits": { - "type": "object", - "properties": { - "memory": { - "type": "object", - "properties": { - "amount": { "type": "number" }, - "format": { "type": "string" } - }, - "required": ["amount", "format"] - }, - "cpu": { - "type": "object", - "properties": { - "amount": { "type": "number" }, - "format": { "type": "string" } - }, - "required": ["amount", "format"] - } - }, - "required": ["memory", "cpu"] - } - }, - "required": ["requests", "limits"] - }, - "notifications": { - "type": "object", - "items": { - "type": "object", - "properties": { - "type": { "type": "string" }, - "message": { "type": "string" }, - "code": { "type": "number" } - }, - "required": ["type", "message", "code"] - } - } - }, - "required": ["pods_count", "confidence_level", "config", "variation", "notifications"] - }, - "performance": { - "type": "object", - "properties": { - "monitoring_start_time": { "type": "string" }, - "pods_count": { "type": "number" }, - "confidence_level": { "type": "number" }, - "config": { - "type": "object", - "properties": { - "requests": { - "type": "object", - "properties": { - "memory": { - "type": "object", - "properties": { - "amount": { "type": "number" }, - "format": { "type": "string" } - }, - "required": ["amount", "format"] - }, - "cpu": { - "type": "object", - "properties": { - "amount": { "type": "number" }, - "format": { "type": "string" } - }, - "required": ["amount", "format"] - } - }, - "required": ["memory", "cpu"] - }, - "limits": { - "type": "object", - "properties": { - "memory": { - "type": "object", - "properties": { - "amount": { "type": "number" }, - "format": { "type": "string" } - }, - "required": ["amount", "format"] - }, - "cpu": { - "type": "object", - "properties": { - "amount": { "type": "number" }, - "format": { "type": "string" } - }, - "required": ["amount", "format"] - } - }, - "required": ["memory", "cpu"] - } - }, - "required": ["requests", "limits"] - }, - "variation": { - "type": "object", - "properties": { - "requests": { - "type": "object", - "properties": { - "memory": { - "type": "object", - "properties": { - "amount": { "type": "number" }, - "format": { "type": "string" } - }, - "required": ["amount", "format"] - }, - "cpu": { - "type": "object", - "properties": { - "amount": { "type": "number" }, - "format": { "type": "string" } - }, - "required": ["amount", "format"] - } - }, - "required": ["memory", "cpu"] - }, - "limits": { - "type": "object", - "properties": { - "memory": { - "type": "object", - "properties": { - "amount": { "type": "number" }, - "format": { "type": "string" } - }, - "required": ["amount", "format"] - }, - "cpu": { - "type": "object", - "properties": { - "amount": { "type": "number" }, - "format": { "type": "string" } - }, - "required": ["amount", "format"] - } - }, - "required": ["memory", "cpu"] - } - }, - "required": ["requests", "limits"] - }, - "notifications": { - "type": "object", - "items": { - "type": "object", - "properties": { - "type": { "type": "string" }, - "message": { "type": "string" }, - "code": { "type": "number" } - }, - "required": ["type", "message", "code"] - } - } - }, - "required": [] - } - }, - "required": [] - }, - "plots": { - "type": "object", - "properties": { - "datapoints": { "type": "number" }, - "plots_data": { - "type": "object", - "patternProperties": { - "^\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}.\\d{3}Z$": { - "type": "object", - "properties": { - "cpuUsage": { - "type": "object", - "properties": { - "min": { "type": "number" }, - "q1": { "type": "number" }, - "median": { "type": "number" }, - "q3": { "type": "number" }, - "max": { "type": "number" }, - "format": { "type": "string" } - }, - "required": ["min", "q1", "median", "q3", "max", "format"] - }, - "memoryUsage": { - "type": "object", - "properties": { - "min": { "type": "number" }, - "q1": { "type": "number" }, - "median": { "type": "number" }, - "q3": { "type": "number" }, - "max": { "type": "number" }, - "format": { "type": "string" } - }, - "required": ["min", "q1", "median", "q3", "max", "format"] - }, - }, - "required": [] - } - }, - "required": [] - } - }, - "required": ["datapoints", "plots_data"] - } - }, - "required": [] - } - }, - "required": [] - } - }, - "required": [] - } - }, - "required": [] - } - }, - "required": ["version", "notifications", "data"] - } - }, - "required": ["namespace_name", "recommendations"] - }, - "containers": { - "type": "array", - "items": {} - } - }, - "required": ["namespace", "namespaces"] - } - }, - "version": { "type": "string" }, - "experiment_name": { "type": "string" } - }, - "required": ["cluster_name", "kubernetes_objects", "version", "experiment_name"] - } -} diff --git a/tests/scripts/helpers/utils.py b/tests/scripts/helpers/utils.py index ec6403474..66736c752 100644 --- a/tests/scripts/helpers/utils.py +++ b/tests/scripts/helpers/utils.py @@ -165,6 +165,8 @@ LONG_TERM: NOTIFICATION_CODE_FOR_LONG_TERM_RECOMMENDATIONS_AVAILABLE, } +NAMESPACE_EXPERIMENT_TYPE = "namespace" +CONTAINER_EXPERIMENT_TYPE = "container" # version,experiment_name,cluster_name,performance_profile,mode,target_cluster,type,name,namespace,container_image_name,container_name,measurement_duration,threshold create_exp_test_data = { @@ -520,25 +522,32 @@ def validate_kubernetes_obj(create_exp_kubernetes_obj, update_results_kubernetes def validate_local_monitoring_kubernetes_obj(create_exp_kubernetes_obj, list_reco_kubernetes_obj, expected_duration_in_hours, test_name): - # Validate type, name, namespace - assert list_reco_kubernetes_obj["type"] == create_exp_kubernetes_obj["type"] - assert list_reco_kubernetes_obj["name"] == create_exp_kubernetes_obj["name"] - assert list_reco_kubernetes_obj["namespace"] == create_exp_kubernetes_obj["namespace"] + experiment_type = create_exp_kubernetes_obj.get("experiment_type") + if experiment_type == NAMESPACE_EXPERIMENT_TYPE: + assert list_reco_kubernetes_obj["namespaces"]["namespace_name"] == create_exp_kubernetes_obj["namespaces"]["namespace_name"] + list_reco_namespace = list_reco_kubernetes_obj["namespaces"] + create_exp_namespace = create_exp_kubernetes_obj["namespaces"] + validate_local_monitoring_namespace(create_exp_namespace, list_reco_namespace, expected_duration_in_hours, test_name) + else: + # Validate type, name, namespace + assert list_reco_kubernetes_obj["type"] == create_exp_kubernetes_obj["type"] + assert list_reco_kubernetes_obj["name"] == create_exp_kubernetes_obj["name"] + assert list_reco_kubernetes_obj["namespace"] == create_exp_kubernetes_obj["namespace"] - exp_containers_length = len(create_exp_kubernetes_obj["containers"]) - list_reco_containers_length = len(list_reco_kubernetes_obj["containers"]) + exp_containers_length = len(create_exp_kubernetes_obj["containers"]) + list_reco_containers_length = len(list_reco_kubernetes_obj["containers"]) - # Validate if all the containers are present - for i in range(exp_containers_length): - list_reco_container = None + # Validate if all the containers are present + for i in range(exp_containers_length): + list_reco_container = None - for j in range(list_reco_containers_length): - if list_reco_kubernetes_obj["containers"][j]["container_name"] == \ - create_exp_kubernetes_obj["containers"][i]["container_name"]: - list_reco_container = list_reco_kubernetes_obj["containers"][j] - create_exp_container = create_exp_kubernetes_obj["containers"][i] - validate_local_monitoring_container(create_exp_container, list_reco_container, expected_duration_in_hours, test_name) + for j in range(list_reco_containers_length): + if list_reco_kubernetes_obj["containers"][j]["container_name"] == \ + create_exp_kubernetes_obj["containers"][i]["container_name"]: + list_reco_container = list_reco_kubernetes_obj["containers"][j] + create_exp_container = create_exp_kubernetes_obj["containers"][i] + validate_local_monitoring_container(create_exp_container, list_reco_container, expected_duration_in_hours, test_name) def validate_container(update_results_container, update_results_json, list_reco_container, expected_duration_in_hours, test_name): @@ -733,6 +742,86 @@ def validate_local_monitoring_container(create_exp_container, list_reco_containe data = list_reco_container["recommendations"]["data"] assert len(data) == 0, f"Data is not empty! Length of data - Actual = {len(data)} expected = 0" + +def validate_local_monitoring_namespace(create_exp_namespace, list_reco_namespace, expected_duration_in_hours, test_name): + # Validate namespace name + if create_exp_namespace != None and list_reco_namespace != None: + assert create_exp_namespace["namespace_name"] == list_reco_namespace["namespace_name"], \ + f"Namespace names did not match! Actual - {list_reco_namespace['namespace_name']} Expected - {create_exp_namespace['namespace_name']}" + + if expected_duration_in_hours == None: + duration_in_hours = 0.0 + else: + duration_in_hours = expected_duration_in_hours + + if check_if_recommendations_are_present(list_reco_namespace["recommendations"]): + interval_end_time = list(list_reco_namespace['recommendations']['data'].keys())[0] + print(f"interval_end_time = {interval_end_time}") + + terms_obj = list_reco_namespace["recommendations"]["data"][interval_end_time]["recommendation_terms"] + current_config = list_reco_namespace["recommendations"]["data"][interval_end_time]["current"] + + duration_terms = {'short_term': 4, 'medium_term': 7, 'long_term': 15} + for term in duration_terms.keys(): + if check_if_recommendations_are_present(terms_obj[term]): + print(f"reco present for term {term}") + + interval_start_time = list_reco_namespace['recommendations']['data'][interval_end_time]['recommendation_terms'][term]['monitoring_start_time'] + # Validate the precision of the valid duration + duration = terms_obj[term]["duration_in_hours"] + assert validate_duration_in_hours_decimal_precision(duration), f"The value '{duration}' for " \ + f"'{term}' has more than two decimal places" + + monitoring_start_time = term_based_start_time(interval_end_time, term) + assert terms_obj[term]["monitoring_start_time"] == monitoring_start_time, \ + f"actual = {terms_obj[term]['monitoring_start_time']} expected = {monitoring_start_time}" + + # Validate duration in hrs + if expected_duration_in_hours is None: + duration_in_hours = set_duration_based_on_terms(duration_in_hours, term, + interval_start_time, interval_end_time) + + if test_name is not None: + + if MEDIUM_TERM_TEST in test_name and term == MEDIUM_TERM: + assert terms_obj[term]["duration_in_hours"] == duration_in_hours, \ + f"Duration in hours did not match! Actual = {terms_obj[term]['duration_in_hours']} expected = {duration_in_hours}" + elif SHORT_TERM_TEST in test_name and term == SHORT_TERM: + assert terms_obj[term]["duration_in_hours"] == duration_in_hours, \ + f"Duration in hours did not match! Actual = {terms_obj[term]['duration_in_hours']} expected = {duration_in_hours}" + elif LONG_TERM_TEST in test_name and term == LONG_TERM: + assert terms_obj[term]["duration_in_hours"] == duration_in_hours, \ + f"Duration in hours did not match! Actual = {terms_obj[term]['duration_in_hours']} expected = {duration_in_hours}" + else: + print( + f"Actual = {terms_obj[term]['duration_in_hours']} expected = {duration_in_hours}") + assert terms_obj[term]["duration_in_hours"] == duration_in_hours, \ + f"Duration in hours did not match! Actual = {terms_obj[term]['duration_in_hours']} expected = {duration_in_hours}" + duration_in_hours = set_duration_based_on_terms(duration_in_hours, term, interval_start_time, + interval_end_time) + + # Get engine objects + engines_list = ["cost", "performance"] + + # Extract recommendation engine objects + recommendation_engines_object = None + if "recommendation_engines" in terms_obj[term]: + recommendation_engines_object = terms_obj[term]["recommendation_engines"] + if recommendation_engines_object is not None: + for engine_entry in engines_list: + if engine_entry in terms_obj[term]["recommendation_engines"]: + engine_obj = terms_obj[term]["recommendation_engines"][engine_entry] + validate_config_local_monitoring(engine_obj["config"]) + validate_variation_local_monitoring(current_config, engine_obj["config"], engine_obj["variation"], engine_obj) + else: + notifications = list_reco_namespace["recommendations"]["notifications"] + if NOTIFICATION_CODE_FOR_NOT_ENOUGH_DATA in notifications: + assert notifications[NOTIFICATION_CODE_FOR_NOT_ENOUGH_DATA]["message"] == NOT_ENOUGH_DATA_MSG + + data = list_reco_namespace["recommendations"]["data"] + assert len(data) == 0, f"Data is not empty! Length of data - Actual = {len(data)} expected = 0" + + def validate_plots(terms_obj, duration_terms, term): plots = terms_obj[term][PLOTS] datapoint = plots[DATA_POINTS] diff --git a/tests/scripts/local_monitoring_tests/rest_apis/test_list_recommendations.py b/tests/scripts/local_monitoring_tests/rest_apis/test_list_recommendations.py index 3bd047a1d..ad3ed1ff4 100644 --- a/tests/scripts/local_monitoring_tests/rest_apis/test_list_recommendations.py +++ b/tests/scripts/local_monitoring_tests/rest_apis/test_list_recommendations.py @@ -25,7 +25,7 @@ from helpers.fixtures import * from helpers.generate_rm_jsons import * from helpers.kruize import * -from helpers.list_reco_json_schema import * +from helpers.list_reco_json_local_monitoring_schema import * from helpers.medium_and_long_term_list_reco_json_schema import medium_and_long_term_list_reco_json_schema from helpers.medium_term_list_reco_json_schema import * from helpers.long_term_list_reco_json_schema import * @@ -73,9 +73,13 @@ def test_list_recommendations_namespace_single_result(cluster_type): list_reco_json = response.json() # Validate the json against the json schema - errorMsg = validate_list_reco_json(list_reco_json, list_reco_json_schema_for_namespace_reco) + errorMsg = validate_list_reco_json(list_reco_json, list_reco_namespace_json_local_monitoring_schema) assert errorMsg == "" + # Validate the json values + namespace_exp_json = read_json_data_from_file(input_json_file) + validate_local_monitoring_reco_json(namespace_exp_json[0], list_reco_json[0]) + # Delete experiment response = delete_experiment(input_json_file) print("delete exp = ", response.status_code) diff --git a/tests/scripts/local_monitoring_tests/rest_apis/test_local_monitoring_e2e_workflow.py b/tests/scripts/local_monitoring_tests/rest_apis/test_local_monitoring_e2e_workflow.py index ae1246963..31ea19542 100644 --- a/tests/scripts/local_monitoring_tests/rest_apis/test_local_monitoring_e2e_workflow.py +++ b/tests/scripts/local_monitoring_tests/rest_apis/test_local_monitoring_e2e_workflow.py @@ -120,6 +120,7 @@ def test_list_recommendations_multiple_exps_for_datasource_workloads(cluster_typ # delete tfb experiments tfb_exp_json_file = "../json_files/create_tfb_exp.json" tfb_db_exp_json_file = "../json_files/create_tfb_db_exp.json" + namespace_exp_json_file = "../json_files/create_namespace_exp.json" response = delete_experiment(tfb_exp_json_file) print("delete tfb exp = ", response.status_code) @@ -127,6 +128,9 @@ def test_list_recommendations_multiple_exps_for_datasource_workloads(cluster_typ response = delete_experiment(tfb_db_exp_json_file) print("delete tfb_db exp = ", response.status_code) + response = delete_experiment(namespace_exp_json_file) + print("delete namespace exp = ", response.status_code) + #Install default metric profile metric_profile_json_file = metric_profile_dir / 'resource_optimization_local_monitoring.json' response = delete_metric_profile(metric_profile_json_file) @@ -175,6 +179,16 @@ def test_list_recommendations_multiple_exps_for_datasource_workloads(cluster_typ assert data['status'] == SUCCESS_STATUS assert data['message'] == CREATE_EXP_SUCCESS_MSG + # create namespace experiment + response = create_experiment(namespace_exp_json_file) + + data = response.json() + print(data['message']) + + assert response.status_code == SUCCESS_STATUS_CODE + assert data['status'] == SUCCESS_STATUS + assert data['message'] == CREATE_EXP_SUCCESS_MSG + # Wait for the container to complete wait_for_container_to_complete(container_id) @@ -187,6 +201,10 @@ def test_list_recommendations_multiple_exps_for_datasource_workloads(cluster_typ input_json = json.loads(json_file.read()) tfb_db_exp_name = input_json[0]['experiment_name'] + json_file = open(namespace_exp_json_file, "r") + input_json = json.loads(json_file.read()) + namespace_exp_name = input_json[0]['experiment_name'] + response = generate_recommendations(tfb_exp_name) assert response.status_code == SUCCESS_STATUS_CODE @@ -220,6 +238,22 @@ def test_list_recommendations_multiple_exps_for_datasource_workloads(cluster_typ tfb_db_exp_json = read_json_data_from_file(tfb_db_exp_json_file) validate_local_monitoring_reco_json(tfb_db_exp_json[0], list_reco_json[0]) + response = generate_recommendations(namespace_exp_name) + assert response.status_code == SUCCESS_STATUS_CODE + + # Invoke list recommendations for the specified experiment + response = list_recommendations(namespace_exp_name) + assert response.status_code == SUCCESS_200_STATUS_CODE + list_reco_json = response.json() + + # Validate the json against the json schema + errorMsg = validate_list_reco_json(list_reco_json, list_reco_namespace_json_local_monitoring_schema) + assert errorMsg == "" + + # Validate the json values + namespace_exp_json = read_json_data_from_file(namespace_exp_json_file) + validate_local_monitoring_reco_json(namespace_exp_json[0], list_reco_json[0]) + # Delete tfb experiment response = delete_experiment(tfb_exp_json_file) print("delete exp = ", response.status_code) @@ -230,6 +264,11 @@ def test_list_recommendations_multiple_exps_for_datasource_workloads(cluster_typ print("delete exp = ", response.status_code) assert response.status_code == SUCCESS_STATUS_CODE + # Delete namespace experiment + response = delete_experiment(namespace_exp_json_file) + print("delete exp = ", response.status_code) + assert response.status_code == SUCCESS_STATUS_CODE + # Delete Metric Profile response = delete_metric_profile(metric_profile_json_file) print("delete metric profile = ", response.status_code) From 4037c11a583dfe0b7eab09eb89af332d8dcaa34a Mon Sep 17 00:00:00 2001 From: Shekhar Saxena Date: Tue, 17 Sep 2024 23:32:42 +0530 Subject: [PATCH 05/16] removing extra lines Signed-off-by: Shekhar Saxena --- tests/scripts/helpers/list_reco_json_schema.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/scripts/helpers/list_reco_json_schema.py b/tests/scripts/helpers/list_reco_json_schema.py index 5376ba4da..24d7ff61c 100644 --- a/tests/scripts/helpers/list_reco_json_schema.py +++ b/tests/scripts/helpers/list_reco_json_schema.py @@ -445,4 +445,3 @@ } } - From 17897df9f4ed9086d6df4133efab2a39e4cba70f Mon Sep 17 00:00:00 2001 From: Shekhar Saxena Date: Wed, 18 Sep 2024 01:08:56 +0530 Subject: [PATCH 06/16] adding template for createExperiment Signed-off-by: Shekhar Saxena --- .../create_container_exp_with_namespace.json | 23 -- .../create_exp_namespace_container_both.json | 28 -- ...mespace_container_both_container_type.json | 32 -- ...mespace_container_both_namespace_type.json | 29 -- .../json_files/create_exp_template.json | 29 ++ .../json_files/create_namespace_exp.json | 23 -- .../create_namespace_exp_with_containers.json | 29 -- .../create_namespace_exp_without_type.json | 22 -- .../create_tfb_exp_container_type.json | 29 -- .../rest_apis/test_create_experiment.py | 314 ++++++++---------- 10 files changed, 166 insertions(+), 392 deletions(-) delete mode 100644 tests/scripts/local_monitoring_tests/json_files/create_container_exp_with_namespace.json delete mode 100644 tests/scripts/local_monitoring_tests/json_files/create_exp_namespace_container_both.json delete mode 100644 tests/scripts/local_monitoring_tests/json_files/create_exp_namespace_container_both_container_type.json delete mode 100644 tests/scripts/local_monitoring_tests/json_files/create_exp_namespace_container_both_namespace_type.json create mode 100644 tests/scripts/local_monitoring_tests/json_files/create_exp_template.json delete mode 100644 tests/scripts/local_monitoring_tests/json_files/create_namespace_exp.json delete mode 100644 tests/scripts/local_monitoring_tests/json_files/create_namespace_exp_with_containers.json delete mode 100644 tests/scripts/local_monitoring_tests/json_files/create_namespace_exp_without_type.json delete mode 100644 tests/scripts/local_monitoring_tests/json_files/create_tfb_exp_container_type.json diff --git a/tests/scripts/local_monitoring_tests/json_files/create_container_exp_with_namespace.json b/tests/scripts/local_monitoring_tests/json_files/create_container_exp_with_namespace.json deleted file mode 100644 index c27434142..000000000 --- a/tests/scripts/local_monitoring_tests/json_files/create_container_exp_with_namespace.json +++ /dev/null @@ -1,23 +0,0 @@ -[{ - "version": "v2.0", - "experiment_name": "tfb-workload-namespace", - "cluster_name": "default", - "performance_profile": "resource-optimization-local-monitoring", - "mode": "monitor", - "target_cluster": "local", - "datasource": "prometheus-1", - "kubernetes_objects": [ - { - "experiment_type": "container", - "namespaces": { - "namespace_name": "default" - } - } - ], - "trial_settings": { - "measurement_duration": "15min" - }, - "recommendation_settings": { - "threshold": "0.1" - } -}] diff --git a/tests/scripts/local_monitoring_tests/json_files/create_exp_namespace_container_both.json b/tests/scripts/local_monitoring_tests/json_files/create_exp_namespace_container_both.json deleted file mode 100644 index 2bd37e72f..000000000 --- a/tests/scripts/local_monitoring_tests/json_files/create_exp_namespace_container_both.json +++ /dev/null @@ -1,28 +0,0 @@ -[{ - "version": "v2.0", - "experiment_name": "tfb-workload-namespace", - "cluster_name": "default", - "performance_profile": "resource-optimization-local-monitoring", - "mode": "monitor", - "target_cluster": "local", - "datasource": "prometheus-1", - "kubernetes_objects": [ - { - "namespaces": { - "namespace_name": "default" - }, - "containers": [ - { - "container_image_name": "kruize/tfb-qrh:1.13.2.F_et17", - "container_name": "tfb-server" - } - ] - } - ], - "trial_settings": { - "measurement_duration": "15min" - }, - "recommendation_settings": { - "threshold": "0.1" - } -}] diff --git a/tests/scripts/local_monitoring_tests/json_files/create_exp_namespace_container_both_container_type.json b/tests/scripts/local_monitoring_tests/json_files/create_exp_namespace_container_both_container_type.json deleted file mode 100644 index 59f0466f4..000000000 --- a/tests/scripts/local_monitoring_tests/json_files/create_exp_namespace_container_both_container_type.json +++ /dev/null @@ -1,32 +0,0 @@ -[{ - "version": "v2.0", - "experiment_name": "tfb-workload-namespace", - "cluster_name": "default", - "performance_profile": "resource-optimization-local-monitoring", - "mode": "monitor", - "target_cluster": "local", - "datasource": "prometheus-1", - "kubernetes_objects": [ - { - "experiment_type": "container", - "type": "deployment", - "name": "tfb-qrh-sample", - "namespace": "default", - "namespaces": { - "namespace_name": "default" - }, - "containers": [ - { - "container_image_name": "kruize/tfb-qrh:1.13.2.F_et17", - "container_name": "tfb-server" - } - ] - } - ], - "trial_settings": { - "measurement_duration": "15min" - }, - "recommendation_settings": { - "threshold": "0.1" - } -}] diff --git a/tests/scripts/local_monitoring_tests/json_files/create_exp_namespace_container_both_namespace_type.json b/tests/scripts/local_monitoring_tests/json_files/create_exp_namespace_container_both_namespace_type.json deleted file mode 100644 index 31442ab66..000000000 --- a/tests/scripts/local_monitoring_tests/json_files/create_exp_namespace_container_both_namespace_type.json +++ /dev/null @@ -1,29 +0,0 @@ -[{ - "version": "v2.0", - "experiment_name": "tfb-workload-namespace", - "cluster_name": "default", - "performance_profile": "resource-optimization-local-monitoring", - "mode": "monitor", - "target_cluster": "local", - "datasource": "prometheus-1", - "kubernetes_objects": [ - { - "experiment_type": "namespace", - "namespaces": { - "namespace_name": "default" - }, - "containers": [ - { - "container_image_name": "kruize/tfb-qrh:1.13.2.F_et17", - "container_name": "tfb-server" - } - ] - } - ], - "trial_settings": { - "measurement_duration": "15min" - }, - "recommendation_settings": { - "threshold": "0.1" - } -}] diff --git a/tests/scripts/local_monitoring_tests/json_files/create_exp_template.json b/tests/scripts/local_monitoring_tests/json_files/create_exp_template.json new file mode 100644 index 000000000..2d3b9c1cd --- /dev/null +++ b/tests/scripts/local_monitoring_tests/json_files/create_exp_template.json @@ -0,0 +1,29 @@ +[{ + "version": "{{version}}", + "experiment_name": "{{experiment_name}}", + "cluster_name": "{{cluster_name}}", + "performance_profile": "{{performance_profile}}", + "mode": "{{mode}}", + "target_cluster": "{{target_cluster}}", + "datasource": "{{datasource}}", + "kubernetes_objects": [{ + "experiment_type": "{{experiment_type}}", + "type": "{{kubernetes_obj_type}}", + "name": "{{name}}", + "namespace": "{{namespace}}", + "namespaces": { + "namespace_name": "{{namespace_name}}" + }, + "containers": [{ + "container_image_name": "{{container_image_name}}", + "container_name": "{{container_name}}" + }] + }], + "trial_settings": { + "measurement_duration": "{{measurement_duration}}" + }, + "recommendation_settings": { + "threshold": "{{threshold}}" + } +}] + diff --git a/tests/scripts/local_monitoring_tests/json_files/create_namespace_exp.json b/tests/scripts/local_monitoring_tests/json_files/create_namespace_exp.json deleted file mode 100644 index c6c51df67..000000000 --- a/tests/scripts/local_monitoring_tests/json_files/create_namespace_exp.json +++ /dev/null @@ -1,23 +0,0 @@ -[{ - "version": "v2.0", - "experiment_name": "tfb-workload-namespace", - "cluster_name": "default", - "performance_profile": "resource-optimization-local-monitoring", - "mode": "monitor", - "target_cluster": "local", - "datasource": "prometheus-1", - "kubernetes_objects": [ - { - "experiment_type": "namespace", - "namespaces": { - "namespace_name": "default" - } - } - ], - "trial_settings": { - "measurement_duration": "15min" - }, - "recommendation_settings": { - "threshold": "0.1" - } -}] diff --git a/tests/scripts/local_monitoring_tests/json_files/create_namespace_exp_with_containers.json b/tests/scripts/local_monitoring_tests/json_files/create_namespace_exp_with_containers.json deleted file mode 100644 index 35123977e..000000000 --- a/tests/scripts/local_monitoring_tests/json_files/create_namespace_exp_with_containers.json +++ /dev/null @@ -1,29 +0,0 @@ -[{ - "version": "v2.0", - "experiment_name": "monitor_tfb_benchmark", - "cluster_name": "default", - "performance_profile": "resource-optimization-local-monitoring", - "mode": "monitor", - "target_cluster": "local", - "datasource": "prometheus-1", - "kubernetes_objects": [ - { - "experiment_type": "namespace", - "type": "deployment", - "name": "tfb-qrh-sample", - "namespace": "default", - "containers": [ - { - "container_image_name": "kruize/tfb-qrh:1.13.2.F_et17", - "container_name": "tfb-server" - } - ] - } - ], - "trial_settings": { - "measurement_duration": "15min" - }, - "recommendation_settings": { - "threshold": "0.1" - } -}] diff --git a/tests/scripts/local_monitoring_tests/json_files/create_namespace_exp_without_type.json b/tests/scripts/local_monitoring_tests/json_files/create_namespace_exp_without_type.json deleted file mode 100644 index a3c0cafa2..000000000 --- a/tests/scripts/local_monitoring_tests/json_files/create_namespace_exp_without_type.json +++ /dev/null @@ -1,22 +0,0 @@ -[{ - "version": "v2.0", - "experiment_name": "tfb-workload-namespace", - "cluster_name": "default", - "performance_profile": "resource-optimization-local-monitoring", - "mode": "monitor", - "target_cluster": "local", - "datasource": "prometheus-1", - "kubernetes_objects": [ - { - "namespaces": { - "namespace_name": "default" - } - } - ], - "trial_settings": { - "measurement_duration": "15min" - }, - "recommendation_settings": { - "threshold": "0.1" - } -}] diff --git a/tests/scripts/local_monitoring_tests/json_files/create_tfb_exp_container_type.json b/tests/scripts/local_monitoring_tests/json_files/create_tfb_exp_container_type.json deleted file mode 100644 index 345c322ac..000000000 --- a/tests/scripts/local_monitoring_tests/json_files/create_tfb_exp_container_type.json +++ /dev/null @@ -1,29 +0,0 @@ -[{ - "version": "v2.0", - "experiment_name": "monitor_tfb_benchmark", - "cluster_name": "default", - "performance_profile": "resource-optimization-local-monitoring", - "mode": "monitor", - "target_cluster": "local", - "datasource": "prometheus-1", - "kubernetes_objects": [ - { - "experiment_type": "container", - "type": "deployment", - "name": "tfb-qrh-sample", - "namespace": "default", - "containers": [ - { - "container_image_name": "kruize/tfb-qrh:1.13.2.F_et17", - "container_name": "tfb-server" - } - ] - } - ], - "trial_settings": { - "measurement_duration": "15min" - }, - "recommendation_settings": { - "threshold": "0.1" - } -}] diff --git a/tests/scripts/local_monitoring_tests/rest_apis/test_create_experiment.py b/tests/scripts/local_monitoring_tests/rest_apis/test_create_experiment.py index e8150648e..5ead7ee0e 100644 --- a/tests/scripts/local_monitoring_tests/rest_apis/test_create_experiment.py +++ b/tests/scripts/local_monitoring_tests/rest_apis/test_create_experiment.py @@ -48,12 +48,76 @@ csvfile = "/tmp/create_exp_test_data.csv" @pytest.mark.sanity -def test_create_namespace_exp_with_namespace_type(cluster_type): +@pytest.mark.parametrize("test_name, expected_status_code, version, experiment_name, cluster_name, performance_profile, mode, target_cluster, datasource, experiment_type, kubernetes_obj_type, name, namespace, namespace_name, container_image_name, container_name, measurement_duration, threshold", + [ + ("valid_namespace_exp_with_exp_type", SUCCESS_STATUS_CODE, "v2.0", "tfb-workload-namespace", "default", "resource-optimization-local-monitoring", "monitor", "local", "prometheus-1", "namespace", None, None, None, "default", None, None, "15min", "0.1"), + ("valid_container_exp_without_exp_type", SUCCESS_STATUS_CODE, "v2.0", "tfb-workload-namespace", "default", "resource-optimization-local-monitoring", "monitor", "local", "prometheus-1", None, "deployment", "tfb-qrh-sample", "default", None, "kruize/tfb-qrh:1.13.2.F_et17", "tfb-server", "15min", "0.1"), + ("valid_container_exp_with_exp_type", SUCCESS_STATUS_CODE, "v2.0", "tfb-workload-namespace", "default", "resource-optimization-local-monitoring", "monitor", "local", "prometheus-1", "container", "deployment", "tfb-qrh-sample", "default", None, "kruize/tfb-qrh:1.13.2.F_et17", "tfb-server", "15min", "0.1"), + ] +) +def test_create_exp_valid_tests(test_name, expected_status_code, version, experiment_name, cluster_name, performance_profile, mode, target_cluster, datasource, experiment_type, kubernetes_obj_type, name, namespace, namespace_name, container_image_name, container_name, measurement_duration, threshold, cluster_type): """ Test Description: This test validates the response status code of createExperiment API for namespace experiment by passing a valid input for the json """ - input_json_file = "../json_files/create_namespace_exp.json" + # Generate a temporary JSON filename + tmp_json_file = "/tmp/create_exp_" + test_name + ".json" + print("tmp_json_file = ", tmp_json_file) + + # Load the Jinja2 template + environment = Environment(loader=FileSystemLoader("../json_files/")) + template = environment.get_template("create_exp_template.json") + + # In case of test_name with "null", strip the specific fields + if "null" in test_name: + field = test_name.replace("null_", "") + json_file = "../json_files/create_exp_template.json" + filename = "/tmp/create_exp_template.json" + strip_double_quotes_for_field(json_file, field, filename) + environment = Environment(loader=FileSystemLoader("/tmp/")) + template = environment.get_template("create_exp_template.json") + + # Render the JSON content from the template + content = template.render( + version=version, + experiment_name=experiment_name, + cluster_name=cluster_name, + performance_profile=performance_profile, + mode=mode, + target_cluster=target_cluster, + datasource=datasource, + experiment_type=experiment_type, + kubernetes_obj_type=kubernetes_obj_type, + name=name, + namespace=namespace, + namespace_name=namespace_name, + container_image_name=container_image_name, + container_name=container_name, + measurement_duration=measurement_duration, + threshold=threshold + ) + + # Convert rendered content to a dictionary + json_content = json.loads(content) + + if json_content[0]["kubernetes_objects"][0]["type"] == "None": + json_content[0]["kubernetes_objects"][0].pop("type") + if json_content[0]["kubernetes_objects"][0]["name"] == "None": + json_content[0]["kubernetes_objects"][0].pop("name") + if json_content[0]["kubernetes_objects"][0]["namespace"] == "None": + json_content[0]["kubernetes_objects"][0].pop("namespace") + if json_content[0]["kubernetes_objects"][0]["containers"][0]["container_image_name"] == "None": + json_content[0]["kubernetes_objects"][0].pop("containers") + if json_content[0]["kubernetes_objects"][0]["namespaces"]["namespace_name"] == "None": + json_content[0]["kubernetes_objects"][0].pop("namespaces") + if json_content[0]["kubernetes_objects"][0]["experiment_type"] == "None": + json_content[0]["kubernetes_objects"][0].pop("experiment_type") + + # Write the final JSON to the temp file + with open(tmp_json_file, mode="w", encoding="utf-8") as message: + json.dump(json_content, message, indent=4) + + input_json_file = tmp_json_file form_kruize_url(cluster_type) response = delete_experiment(input_json_file) @@ -72,185 +136,81 @@ def test_create_namespace_exp_with_namespace_type(cluster_type): response = delete_experiment(input_json_file) print("delete exp = ", response.status_code) -@pytest.mark.negative -def test_create_namespace_exp_without_type(cluster_type): - """ - Test Description: This test validates the response status code of createExperiment API - for namespace experiment by passing a invalid input for the json without specifying the experiment type - """ - input_json_file = "../json_files/create_namespace_exp_without_type.json" - form_kruize_url(cluster_type) - - response = delete_experiment(input_json_file) - print("delete exp = ", response.status_code) - - # Create experiment using the specified json - response = create_experiment(input_json_file) - - data = response.json() - print(data['message']) - - assert response.status_code == ERROR_500_STATUS_CODE - assert data['status'] == ERROR_STATUS - - response = delete_experiment(input_json_file) - print("delete exp = ", response.status_code) - - -@pytest.mark.sanity -def test_create_container_exp_without_type(cluster_type): - """ - Test Description: This test validates the response status code of createExperiment API - for containers experiment by passing a valid input for the json without specifying the experiment type - """ - input_json_file = "../json_files/create_tfb_exp.json" - form_kruize_url(cluster_type) - - response = delete_experiment(input_json_file) - print("delete exp = ", response.status_code) - - # Create experiment using the specified json - response = create_experiment(input_json_file) - - data = response.json() - print(data['message']) - - assert response.status_code == SUCCESS_STATUS_CODE - assert data['status'] == SUCCESS_STATUS - assert data['message'] == CREATE_EXP_SUCCESS_MSG - - response = delete_experiment(input_json_file) - print("delete exp = ", response.status_code) - -@pytest.mark.sanity -def test_create_container_exp_with_container_type(cluster_type): - """ - Test Description: This test validates the response status code of createExperiment API - for containers experiment by passing a valid input for the json with specifying the experiment type - """ - input_json_file = "../json_files/create_tfb_exp_container_type.json" - form_kruize_url(cluster_type) - - response = delete_experiment(input_json_file) - print("delete exp = ", response.status_code) - - # Create experiment using the specified json - response = create_experiment(input_json_file) - - data = response.json() - print(data['message']) - - assert response.status_code == SUCCESS_STATUS_CODE - assert data['status'] == SUCCESS_STATUS - assert data['message'] == CREATE_EXP_SUCCESS_MSG - - response = delete_experiment(input_json_file) - print("delete exp = ", response.status_code) - - -@pytest.mark.negative -def test_create_exp_with_container_namespace_both_without_type(cluster_type): - """ - Test Description: This test validates the response status code of createExperiment API - if both container and namespace is passed and experiment type is not passed - """ - input_json_file = "../json_files/create_exp_namespace_container_both.json" - form_kruize_url(cluster_type) - - response = delete_experiment(input_json_file) - print("delete exp = ", response.status_code) - - # Create experiment using the specified json - response = create_experiment(input_json_file) - - data = response.json() - print(data['message']) - - assert response.status_code == ERROR_500_STATUS_CODE - assert data['status'] == ERROR_STATUS - - response = delete_experiment(input_json_file) - print("delete exp = ", response.status_code) @pytest.mark.negative -def test_create_exp_with_container_namespace_both_container_type(cluster_type): +@pytest.mark.parametrize("test_name, expected_status_code, version, experiment_name, cluster_name, performance_profile, mode, target_cluster, datasource, experiment_type, kubernetes_obj_type, name, namespace, namespace_name, container_image_name, container_name, measurement_duration, threshold", + [ + ("invalid_namespace_exp_without_exp_type", ERROR_500_STATUS_CODE, "v2.0", "tfb-workload-namespace", "default", "resource-optimization-local-monitoring", "monitor", "local", "prometheus-1", None, None, None, None, "default", None, None, "15min", "0.1"), + ("invalid_both_container_and_namespace_without_exp_type", ERROR_500_STATUS_CODE, "v2.0", "tfb-workload-namespace", "default", "resource-optimization-local-monitoring", "monitor", "local", "prometheus-1", None, "deployment", "tfb-qrh-sample", "default", "default", "kruize/tfb-qrh:1.13.2.F_et17", "tfb-server", "15min", "0.1"), + ("invalid_both_container_and_namespace_namespace_exp_type", ERROR_500_STATUS_CODE, "v2.0", "tfb-workload-namespace", "default", "resource-optimization-local-monitoring", "monitor", "local", "prometheus-1", "namespace", "deployment", "tfb-qrh-sample", "default", "default", "kruize/tfb-qrh:1.13.2.F_et17", "tfb-server", "15min", "0.1"), + ("invalid_both_container_and_namespace_container_exp_type", ERROR_500_STATUS_CODE, "v2.0", "tfb-workload-namespace", "default", "resource-optimization-local-monitoring", "monitor", "local", "prometheus-1", "container", "deployment", "tfb-qrh-sample", "default", "default", "kruize/tfb-qrh:1.13.2.F_et17", "tfb-server", "15min", "0.1"), + ("invalid_namespace_exp_type_with_only_containers", ERROR_500_STATUS_CODE, "v2.0", "tfb-workload-namespace", "default", "resource-optimization-local-monitoring", "monitor", "local", "prometheus-1", "namespace", "deployment", "tfb-qrh-sample", "default", None, "kruize/tfb-qrh:1.13.2.F_et17", "tfb-server", "15min", "0.1"), + ("invalid_container_exp_type_with_only_namespace", ERROR_500_STATUS_CODE, "v2.0", "tfb-workload-namespace", "default", "resource-optimization-local-monitoring", "monitor", "local", "prometheus-1", "container", None, None, None, "default", None, None, "15min", "0.1") + ] +) +def test_create_exp_invalid_tests(test_name, expected_status_code, version, experiment_name, cluster_name, performance_profile, mode, target_cluster, datasource, experiment_type, kubernetes_obj_type, name, namespace, namespace_name, container_image_name, container_name, measurement_duration, threshold, cluster_type): """ Test Description: This test validates the response status code of createExperiment API - if both container and namespace is passed and experiment type is container - """ - input_json_file = "../json_files/create_exp_namespace_container_both_container_type.json" - form_kruize_url(cluster_type) - - response = delete_experiment(input_json_file) - print("delete exp = ", response.status_code) - - # Create experiment using the specified json - response = create_experiment(input_json_file) - - data = response.json() - print(data['message']) - - assert response.status_code == ERROR_500_STATUS_CODE - assert data['status'] == ERROR_STATUS - - response = delete_experiment(input_json_file) - print("delete exp = ", response.status_code) - -@pytest.mark.negative -def test_create_exp_with_container_namespace_both_namespace_type(cluster_type): - """ - Test Description: This test validates the response status code of createExperiment API - if both container and namespace is passed and experiment type is namespace - """ - input_json_file = "../json_files/create_exp_namespace_container_both_namespace_type.json" - form_kruize_url(cluster_type) - - response = delete_experiment(input_json_file) - print("delete exp = ", response.status_code) - - # Create experiment using the specified json - response = create_experiment(input_json_file) - - data = response.json() - print(data['message']) - - assert response.status_code == ERROR_500_STATUS_CODE - assert data['status'] == ERROR_STATUS - - response = delete_experiment(input_json_file) - print("delete exp = ", response.status_code) - -@pytest.mark.negative -def test_create_namespace_exp_with_containers(cluster_type): - """ - Test Description: This test validates the response status code of createExperiment API - if containers array is passed and experiment type is namespace - """ - input_json_file = "../json_files/create_namespace_exp_with_containers.json" - form_kruize_url(cluster_type) - - response = delete_experiment(input_json_file) - print("delete exp = ", response.status_code) - - # Create experiment using the specified json - response = create_experiment(input_json_file) - - data = response.json() - print(data['message']) - - assert response.status_code == ERROR_500_STATUS_CODE - assert data['status'] == ERROR_STATUS - - response = delete_experiment(input_json_file) - print("delete exp = ", response.status_code) - -@pytest.mark.negative -def test_create_conatiner_exp_with_namespace(cluster_type): - """ - Test Description: This test validates the response status code of createExperiment API - if namespaces object is passed and experiment type is container + for namespace experiment by passing a valid input for the json """ - input_json_file = "../json_files/create_container_exp_with_namespace.json" + # Generate a temporary JSON filename + tmp_json_file = "/tmp/create_exp_" + test_name + ".json" + print("tmp_json_file = ", tmp_json_file) + + # Load the Jinja2 template + environment = Environment(loader=FileSystemLoader("../json_files/")) + template = environment.get_template("create_exp_template.json") + + # In case of test_name with "null", strip the specific fields + if "null" in test_name: + field = test_name.replace("null_", "") + json_file = "../json_files/create_exp_template.json" + filename = "/tmp/create_exp_template.json" + strip_double_quotes_for_field(json_file, field, filename) + environment = Environment(loader=FileSystemLoader("/tmp/")) + template = environment.get_template("create_exp_template.json") + + # Render the JSON content from the template + content = template.render( + version=version, + experiment_name=experiment_name, + cluster_name=cluster_name, + performance_profile=performance_profile, + mode=mode, + target_cluster=target_cluster, + datasource=datasource, + experiment_type=experiment_type, + kubernetes_obj_type=kubernetes_obj_type, + name=name, + namespace=namespace, + namespace_name=namespace_name, + container_image_name=container_image_name, + container_name=container_name, + measurement_duration=measurement_duration, + threshold=threshold + ) + + # Convert rendered content to a dictionary + json_content = json.loads(content) + + if json_content[0]["kubernetes_objects"][0]["type"] == "None": + json_content[0]["kubernetes_objects"][0].pop("type") + if json_content[0]["kubernetes_objects"][0]["name"] == "None": + json_content[0]["kubernetes_objects"][0].pop("name") + if json_content[0]["kubernetes_objects"][0]["namespace"] == "None": + json_content[0]["kubernetes_objects"][0].pop("namespace") + if json_content[0]["kubernetes_objects"][0]["containers"][0]["container_image_name"] == "None": + json_content[0]["kubernetes_objects"][0].pop("containers") + if json_content[0]["kubernetes_objects"][0]["namespaces"]["namespace_name"] == "None": + json_content[0]["kubernetes_objects"][0].pop("namespaces") + if json_content[0]["kubernetes_objects"][0]["experiment_type"] == "None": + json_content[0]["kubernetes_objects"][0].pop("experiment_type") + + # Write the final JSON to the temp file + with open(tmp_json_file, mode="w", encoding="utf-8") as message: + json.dump(json_content, message, indent=4) + + input_json_file = tmp_json_file form_kruize_url(cluster_type) response = delete_experiment(input_json_file) @@ -262,7 +222,7 @@ def test_create_conatiner_exp_with_namespace(cluster_type): data = response.json() print(data['message']) - assert response.status_code == ERROR_500_STATUS_CODE + assert response.status_code == expected_status_code assert data['status'] == ERROR_STATUS response = delete_experiment(input_json_file) From 2db5937e9c9805440ce863e2685768bac058f03f Mon Sep 17 00:00:00 2001 From: Shekhar Saxena Date: Wed, 18 Sep 2024 02:29:39 +0530 Subject: [PATCH 07/16] updating e2e to use templates Signed-off-by: Shekhar Saxena --- .../rest_apis/test_list_recommendations.py | 65 ++++++++++++++++++- .../test_local_monitoring_e2e_workflow.py | 31 ++++++++- 2 files changed, 93 insertions(+), 3 deletions(-) diff --git a/tests/scripts/local_monitoring_tests/rest_apis/test_list_recommendations.py b/tests/scripts/local_monitoring_tests/rest_apis/test_list_recommendations.py index ad3ed1ff4..8abb73bd6 100644 --- a/tests/scripts/local_monitoring_tests/rest_apis/test_list_recommendations.py +++ b/tests/scripts/local_monitoring_tests/rest_apis/test_list_recommendations.py @@ -34,16 +34,77 @@ from helpers.short_and_medium_term_list_reco_json_schema import short_and_medium_term_list_reco_json_schema from helpers.short_term_list_reco_json_schema import short_term_list_reco_json_schema from helpers.utils import * +from jinja2 import Environment, FileSystemLoader @pytest.mark.sanity -def test_list_recommendations_namespace_single_result(cluster_type): +@pytest.mark.parametrize("test_name, expected_status_code, version, experiment_name, cluster_name, performance_profile, mode, target_cluster, datasource, experiment_type, kubernetes_obj_type, name, namespace, namespace_name, container_image_name, container_name, measurement_duration, threshold", + [ + ("list_reco_default_namespace", SUCCESS_STATUS_CODE, "v2.0", "test-default-ns", "default", "resource-optimization-local-monitoring", "monitor", "local", "prometheus-1", "namespace", None, None, None, "default", None, None, "15min", "0.1"), + ("list_reco_openshift_monitoring_cluster1", SUCCESS_STATUS_CODE, "v2.0", "test-openshift-monitoring-ns", "cluster-1", "resource-optimization-local-monitoring", "monitor", "local", "prometheus-1", "namespace", None, None, None, "openshift-monitoring", None, None, "15min", "0.1"), + ("list_reco_openshift_monitoring_cluster2", SUCCESS_STATUS_CODE, "v2.0", "test-openshift-monitoring-ns", "cluster-2", "resource-optimization-local-monitoring", "monitor", "local", "prometheus-1", "namespace", None, None, None, "openshift-monitoring", None, None, "15min", "0.1") + ] +) +def test_list_recommendations_namespace_single_result(test_name, expected_status_code, version, experiment_name, cluster_name, performance_profile, mode, target_cluster, datasource, experiment_type, kubernetes_obj_type, name, namespace, namespace_name, container_image_name, container_name, measurement_duration, threshold, cluster_type): """ Test Description: This test validates listRecommendations by passing a valid namespace experiment name """ - input_json_file = "../json_files/create_namespace_exp.json" + # Generate a temporary JSON filename + tmp_json_file = "/tmp/create_exp_" + test_name + ".json" + print("tmp_json_file = ", tmp_json_file) + + # Load the Jinja2 template + environment = Environment(loader=FileSystemLoader("../json_files/")) + template = environment.get_template("create_exp_template.json") + + # In case of test_name with "null", strip the specific fields + if "null" in test_name: + field = test_name.replace("null_", "") + json_file = "../json_files/create_exp_template.json" + filename = "/tmp/create_exp_template.json" + strip_double_quotes_for_field(json_file, field, filename) + environment = Environment(loader=FileSystemLoader("/tmp/")) + template = environment.get_template("create_exp_template.json") + + # Render the JSON content from the template + content = template.render( + version=version, + experiment_name=experiment_name, + cluster_name=cluster_name, + performance_profile=performance_profile, + mode=mode, + target_cluster=target_cluster, + datasource=datasource, + experiment_type=experiment_type, + kubernetes_obj_type=kubernetes_obj_type, + name=name, + namespace=namespace, + namespace_name=namespace_name, + container_image_name=container_image_name, + container_name=container_name, + measurement_duration=measurement_duration, + threshold=threshold + ) + + # Convert rendered content to a dictionary + json_content = json.loads(content) + + if json_content[0]["kubernetes_objects"][0]["type"] == "None": + json_content[0]["kubernetes_objects"][0].pop("type") + if json_content[0]["kubernetes_objects"][0]["name"] == "None": + json_content[0]["kubernetes_objects"][0].pop("name") + if json_content[0]["kubernetes_objects"][0]["namespace"] == "None": + json_content[0]["kubernetes_objects"][0].pop("namespace") + if json_content[0]["kubernetes_objects"][0]["containers"][0]["container_image_name"] == "None": + json_content[0]["kubernetes_objects"][0].pop("containers") + + # Write the final JSON to the temp file + with open(tmp_json_file, mode="w", encoding="utf-8") as message: + json.dump(json_content, message, indent=4) + + input_json_file = tmp_json_file form_kruize_url(cluster_type) response = delete_experiment(input_json_file) diff --git a/tests/scripts/local_monitoring_tests/rest_apis/test_local_monitoring_e2e_workflow.py b/tests/scripts/local_monitoring_tests/rest_apis/test_local_monitoring_e2e_workflow.py index 31ea19542..47a7022d7 100644 --- a/tests/scripts/local_monitoring_tests/rest_apis/test_local_monitoring_e2e_workflow.py +++ b/tests/scripts/local_monitoring_tests/rest_apis/test_local_monitoring_e2e_workflow.py @@ -44,6 +44,7 @@ from helpers.list_reco_json_local_monitoring_schema import * from helpers.list_reco_json_validate import * from helpers.import_metadata_json_validate import * +from jinja2 import Environment, FileSystemLoader metric_profile_dir = get_metric_profile_dir() @@ -116,11 +117,39 @@ def test_list_recommendations_multiple_exps_for_datasource_workloads(cluster_typ errorMsg = validate_list_metadata_json(list_metadata_json, list_metadata_json_verbose_true_schema) assert errorMsg == "" + # Generate a temporary JSON filename + tmp_json_file = "/tmp/create_exp_" + ".json" + print("tmp_json_file = ", tmp_json_file) + + # Load the Jinja2 template + environment = Environment(loader=FileSystemLoader("../json_files/")) + template = environment.get_template("create_exp_template.json") + + # Render the JSON content from the template + content = template.render( + version="v2.0", experiment_name="test-default-ns", cluster_name="default", performance_profile="resource-optimization-local-monitoring", + mode="monitor", target_cluster="local", datasource="prometheus-1", experiment_type="namespace", kubernetes_obj_type=None, name=None, + namespace=None, namespace_name="default", container_image_name=None, container_name=None, measurement_duration="15min", threshold="0.1" + ) + + # Convert rendered content to a dictionary + json_content = json.loads(content) + json_content[0]["kubernetes_objects"][0].pop("type") + json_content[0]["kubernetes_objects"][0].pop("name") + json_content[0]["kubernetes_objects"][0].pop("namespace") + json_content[0]["kubernetes_objects"][0].pop("containers") + + # Write the final JSON to the temp file + with open(tmp_json_file, mode="w", encoding="utf-8") as message: + json.dump(json_content, message, indent=4) + + # namespace exp json file + namespace_exp_json_file = tmp_json_file # delete tfb experiments tfb_exp_json_file = "../json_files/create_tfb_exp.json" tfb_db_exp_json_file = "../json_files/create_tfb_db_exp.json" - namespace_exp_json_file = "../json_files/create_namespace_exp.json" + response = delete_experiment(tfb_exp_json_file) print("delete tfb exp = ", response.status_code) From 7bb6dfa1180c1086b8296c8066ca24cd98e34277 Mon Sep 17 00:00:00 2001 From: Shekhar Saxena Date: Wed, 18 Sep 2024 11:22:12 +0530 Subject: [PATCH 08/16] adding namespace e2e flow Signed-off-by: Shekhar Saxena --- .../test_namespace_reco_e2e_workflow.py | 339 ++++++++++++++++++ 1 file changed, 339 insertions(+) create mode 100644 tests/scripts/local_monitoring_tests/rest_apis/test_namespace_reco_e2e_workflow.py diff --git a/tests/scripts/local_monitoring_tests/rest_apis/test_namespace_reco_e2e_workflow.py b/tests/scripts/local_monitoring_tests/rest_apis/test_namespace_reco_e2e_workflow.py new file mode 100644 index 000000000..bbc0438cf --- /dev/null +++ b/tests/scripts/local_monitoring_tests/rest_apis/test_namespace_reco_e2e_workflow.py @@ -0,0 +1,339 @@ +""" +Copyright (c) 2024, 2024 Red Hat, IBM Corporation and others. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" +import copy +import json + +import pytest +import sys +import time +import shutil +sys.path.append("../../") + +from helpers.fixtures import * +from helpers.generate_rm_jsons import * +from helpers.kruize import * +from helpers.short_term_list_reco_json_schema import * +from helpers.list_reco_json_validate import * +from helpers.list_datasources_json_validate import * +from helpers.utils import * +from helpers.utils import benchmarks_install +from helpers.utils import clone_repo +from helpers.utils import apply_tfb_load +from helpers.utils import wait_for_container_to_complete +from helpers.utils import validate_local_monitoring_reco_json +from helpers.list_metadata_json_validate import * +from helpers.list_metadata_json_schema import * +from helpers.list_metadata_json_verbose_true_schema import * +from helpers.list_metadata_json_cluster_name_without_verbose_schema import * +from helpers.list_metric_profiles_validate import * +from helpers.list_metric_profiles_without_parameters_schema import * +from helpers.short_term_list_reco_json_schema import * +from helpers.list_reco_json_local_monitoring_schema import * +from helpers.list_reco_json_validate import * +from helpers.import_metadata_json_validate import * +from jinja2 import Environment, FileSystemLoader + +metric_profile_dir = get_metric_profile_dir() + + +@pytest.mark.test_e2e +def test_list_recommendations_namespace_exps(cluster_type): + """ + Test Description: This test validates list recommendations for multiple experiments posted using different json files + """ + clone_repo("https://github.com/kruize/benchmarks") + + create_namespace("ns1") + create_namespace("ns2") + create_namespace("ns3") + + benchmarks_install(namespace="ns1") + benchmarks_install(namespace="ns2") + benchmarks_install(namespace="ns3") + + container_id1 = apply_tfb_load("ns1", cluster_type) + container_id2 = apply_tfb_load("ns2", cluster_type) + container_id3 = apply_tfb_load("ns3", cluster_type) + + print(container_id1) + print(container_id2) + print(container_id3) + + # list all datasources + form_kruize_url(cluster_type) + + # Get the datasources name + datasource_name = None + response = list_datasources(datasource_name) + + list_datasources_json = response.json() + + assert response.status_code == SUCCESS_200_STATUS_CODE + + # Validate the json against the json schema + errorMsg = validate_list_datasources_json(list_datasources_json, list_datasources_json_schema) + assert errorMsg == "" + + + # Import datasource metadata + input_json_file = "../json_files/import_metadata.json" + + response = delete_metadata(input_json_file) + print("delete metadata = ", response.status_code) + + # Import metadata using the specified json + response = import_metadata(input_json_file) + metadata_json = response.json() + + # Validate the json against the json schema + errorMsg = validate_import_metadata_json(metadata_json, import_metadata_json_schema) + assert errorMsg == "" + + + # Display metadata from prometheus-1 datasource + json_data = json.load(open(input_json_file)) + datasource = json_data['datasource_name'] + + response = list_metadata(datasource) + + list_metadata_json = response.json() + assert response.status_code == SUCCESS_200_STATUS_CODE + + # Validate the json against the json schema + errorMsg = validate_list_metadata_json(list_metadata_json, list_metadata_json_schema) + assert errorMsg == "" + + + # Display metadata for default namespace + # Currently only default cluster is supported by Kruize + cluster_name = "default" + + response = list_metadata(datasource=datasource, cluster_name=cluster_name, verbose="true") + + list_metadata_json = response.json() + assert response.status_code == SUCCESS_200_STATUS_CODE + + # Validate the json against the json schema + errorMsg = validate_list_metadata_json(list_metadata_json, list_metadata_json_verbose_true_schema) + assert errorMsg == "" + + # Generate a temporary JSON filename + tmp_json_file_1 = "/tmp/create_exp_1" + ".json" + tmp_json_file_2 = "/tmp/create_exp_2" + ".json" + tmp_json_file_3 = "/tmp/create_exp_3" + ".json" + + # Load the Jinja2 template + environment = Environment(loader=FileSystemLoader("../json_files/")) + template = environment.get_template("create_exp_template.json") + + # Render the JSON content from the template + content = template.render( + version="v2.0", experiment_name="test-ns1", cluster_name="default", performance_profile="resource-optimization-local-monitoring", + mode="monitor", target_cluster="local", datasource="prometheus-1", experiment_type="namespace", kubernetes_obj_type=None, name=None, + namespace=None, namespace_name="ns1", container_image_name=None, container_name=None, measurement_duration="15min", threshold="0.1" + ) + + # Convert rendered content to a dictionary + json_content = json.loads(content) + json_content[0]["kubernetes_objects"][0].pop("type") + json_content[0]["kubernetes_objects"][0].pop("name") + json_content[0]["kubernetes_objects"][0].pop("namespace") + json_content[0]["kubernetes_objects"][0].pop("containers") + + # Write the final JSON to the temp file + with open(tmp_json_file_1, mode="w", encoding="utf-8") as message: + json.dump(json_content, message, indent=4) + + # namespace exp json file + ns1_exp_json_file = tmp_json_file_1 + + json_content[0]["experiment_name"] = "test-ns2" + json_content[0]["kubernetes_objects"][0]["namespaces"]["namespace_name"] = "ns2" + + # Write the final JSON to the temp file + with open(tmp_json_file_2, mode="w", encoding="utf-8") as message: + json.dump(json_content, message, indent=4) + + # namespace exp json file + ns2_exp_json_file = tmp_json_file_2 + + json_content[0]["experiment_name"] = "test-ns3" + json_content[0]["kubernetes_objects"][0]["namespaces"]["namespace_name"] = "ns3" + + # Write the final JSON to the temp file + with open(tmp_json_file_3, mode="w", encoding="utf-8") as message: + json.dump(json_content, message, indent=4) + + # namespace exp json file + ns3_exp_json_file = tmp_json_file_3 + + # delete tfb experiments + response = delete_experiment(ns1_exp_json_file) + print("delete tfb exp = ", response.status_code) + + response = delete_experiment(ns2_exp_json_file) + print("delete tfb_db exp = ", response.status_code) + + response = delete_experiment(ns3_exp_json_file) + print("delete namespace exp = ", response.status_code) + + #Install default metric profile + metric_profile_json_file = metric_profile_dir / 'resource_optimization_local_monitoring.json' + response = delete_metric_profile(metric_profile_json_file) + print("delete metric profile = ", response.status_code) + + # Create metric profile using the specified json + response = create_metric_profile(metric_profile_json_file) + + data = response.json() + print(data['message']) + + assert response.status_code == SUCCESS_STATUS_CODE + assert data['status'] == SUCCESS_STATUS + + json_file = open(metric_profile_json_file, "r") + input_json = json.loads(json_file.read()) + metric_profile_name = input_json['metadata']['name'] + assert data['message'] == CREATE_METRIC_PROFILE_SUCCESS_MSG % metric_profile_name + + response = list_metric_profiles(name=metric_profile_name, logging=False) + metric_profile_json = response.json() + + assert response.status_code == SUCCESS_200_STATUS_CODE + + # Validate the json against the json schema + errorMsg = validate_list_metric_profiles_json(metric_profile_json, list_metric_profiles_schema) + assert errorMsg == "" + + + # create namespace experiment + response = create_experiment(ns1_exp_json_file) + + data = response.json() + print(data['message']) + + assert response.status_code == SUCCESS_STATUS_CODE + assert data['status'] == SUCCESS_STATUS + assert data['message'] == CREATE_EXP_SUCCESS_MSG + + # create namespace experiment + response = create_experiment(ns2_exp_json_file) + + data = response.json() + print(data['message']) + + assert response.status_code == SUCCESS_STATUS_CODE + assert data['status'] == SUCCESS_STATUS + assert data['message'] == CREATE_EXP_SUCCESS_MSG + + # create namespace experiment + response = create_experiment(ns3_exp_json_file) + + data = response.json() + print(data['message']) + + assert response.status_code == SUCCESS_STATUS_CODE + assert data['status'] == SUCCESS_STATUS + assert data['message'] == CREATE_EXP_SUCCESS_MSG + + # Wait for the container to complete + wait_for_container_to_complete(container_id1) + wait_for_container_to_complete(container_id2) + wait_for_container_to_complete(container_id3) + + + # generate recommendations + json_file = open(ns1_exp_json_file, "r") + input_json = json.loads(json_file.read()) + ns1_exp_name = input_json[0]['experiment_name'] + + json_file = open(ns2_exp_json_file, "r") + input_json = json.loads(json_file.read()) + ns2_exp_name = input_json[0]['experiment_name'] + + json_file = open(ns3_exp_json_file, "r") + input_json = json.loads(json_file.read()) + ns3_exp_name = input_json[0]['experiment_name'] + + response = generate_recommendations(ns1_exp_name) + assert response.status_code == SUCCESS_STATUS_CODE + + # Invoke list recommendations for the specified experiment + response = list_recommendations(ns1_exp_name) + assert response.status_code == SUCCESS_200_STATUS_CODE + list_reco_json = response.json() + + # Validate the json against the json schema + errorMsg = validate_list_reco_json(list_reco_json, list_reco_namespace_json_local_monitoring_schema) + assert errorMsg == "" + + # Validate the json values + ns1_exp_json = read_json_data_from_file(ns1_exp_json_file) + validate_local_monitoring_reco_json(ns1_exp_json[0], list_reco_json[0]) + + response = generate_recommendations(ns2_exp_name) + assert response.status_code == SUCCESS_STATUS_CODE + + # Invoke list recommendations for the specified experiment + response = list_recommendations(ns2_exp_name) + assert response.status_code == SUCCESS_200_STATUS_CODE + list_reco_json = response.json() + + # Validate the json against the json schema + errorMsg = validate_list_reco_json(list_reco_json, list_reco_namespace_json_local_monitoring_schema) + assert errorMsg == "" + + # Validate the json values + ns2_exp_json = read_json_data_from_file(ns2_exp_json_file) + validate_local_monitoring_reco_json(ns2_exp_json[0], list_reco_json[0]) + + response = generate_recommendations(ns3_exp_name) + assert response.status_code == SUCCESS_STATUS_CODE + + # Invoke list recommendations for the specified experiment + response = list_recommendations(ns3_exp_name) + assert response.status_code == SUCCESS_200_STATUS_CODE + list_reco_json = response.json() + + # Validate the json against the json schema + errorMsg = validate_list_reco_json(list_reco_json, list_reco_namespace_json_local_monitoring_schema) + assert errorMsg == "" + + # Validate the json values + ns3_exp_json = read_json_data_from_file(ns3_exp_json_file) + validate_local_monitoring_reco_json(ns3_exp_json[0], list_reco_json[0]) + + + # Delete namespace experiment + response = delete_experiment(ns1_exp_json_file) + print("delete exp = ", response.status_code) + assert response.status_code == SUCCESS_STATUS_CODE + + response = delete_experiment(ns2_exp_json_file) + print("delete exp = ", response.status_code) + assert response.status_code == SUCCESS_STATUS_CODE + + response = delete_experiment(ns3_exp_json_file) + print("delete exp = ", response.status_code) + assert response.status_code == SUCCESS_STATUS_CODE + + # Delete Metric Profile + response = delete_metric_profile(metric_profile_json_file) + print("delete metric profile = ", response.status_code) + assert response.status_code == SUCCESS_STATUS_CODE + + # Remove benchmarks directory + shutil.rmtree("benchmarks") From 97aec12d93e7be2e5a171bcccecf604a51bd5afb Mon Sep 17 00:00:00 2001 From: Shekhar Saxena Date: Wed, 18 Sep 2024 16:30:23 +0530 Subject: [PATCH 09/16] testing-namespace Signed-off-by: Shekhar Saxena --- .../rest_apis/test_create_experiment.py | 2 ++ .../rest_apis/test_list_recommendations.py | 5 +++-- .../rest_apis/test_local_monitoring_e2e_workflow.py | 1 + .../rest_apis/test_namespace_reco_e2e_workflow.py | 3 ++- 4 files changed, 8 insertions(+), 3 deletions(-) diff --git a/tests/scripts/local_monitoring_tests/rest_apis/test_create_experiment.py b/tests/scripts/local_monitoring_tests/rest_apis/test_create_experiment.py index 5ead7ee0e..5da4746f1 100644 --- a/tests/scripts/local_monitoring_tests/rest_apis/test_create_experiment.py +++ b/tests/scripts/local_monitoring_tests/rest_apis/test_create_experiment.py @@ -48,6 +48,7 @@ csvfile = "/tmp/create_exp_test_data.csv" @pytest.mark.sanity +@pytest.mark.namespace @pytest.mark.parametrize("test_name, expected_status_code, version, experiment_name, cluster_name, performance_profile, mode, target_cluster, datasource, experiment_type, kubernetes_obj_type, name, namespace, namespace_name, container_image_name, container_name, measurement_duration, threshold", [ ("valid_namespace_exp_with_exp_type", SUCCESS_STATUS_CODE, "v2.0", "tfb-workload-namespace", "default", "resource-optimization-local-monitoring", "monitor", "local", "prometheus-1", "namespace", None, None, None, "default", None, None, "15min", "0.1"), @@ -138,6 +139,7 @@ def test_create_exp_valid_tests(test_name, expected_status_code, version, experi @pytest.mark.negative +@pytest.mark.namespace @pytest.mark.parametrize("test_name, expected_status_code, version, experiment_name, cluster_name, performance_profile, mode, target_cluster, datasource, experiment_type, kubernetes_obj_type, name, namespace, namespace_name, container_image_name, container_name, measurement_duration, threshold", [ ("invalid_namespace_exp_without_exp_type", ERROR_500_STATUS_CODE, "v2.0", "tfb-workload-namespace", "default", "resource-optimization-local-monitoring", "monitor", "local", "prometheus-1", None, None, None, None, "default", None, None, "15min", "0.1"), diff --git a/tests/scripts/local_monitoring_tests/rest_apis/test_list_recommendations.py b/tests/scripts/local_monitoring_tests/rest_apis/test_list_recommendations.py index 8abb73bd6..2b95bc031 100644 --- a/tests/scripts/local_monitoring_tests/rest_apis/test_list_recommendations.py +++ b/tests/scripts/local_monitoring_tests/rest_apis/test_list_recommendations.py @@ -39,11 +39,12 @@ @pytest.mark.sanity +@pytest.mark.namespace @pytest.mark.parametrize("test_name, expected_status_code, version, experiment_name, cluster_name, performance_profile, mode, target_cluster, datasource, experiment_type, kubernetes_obj_type, name, namespace, namespace_name, container_image_name, container_name, measurement_duration, threshold", [ ("list_reco_default_namespace", SUCCESS_STATUS_CODE, "v2.0", "test-default-ns", "default", "resource-optimization-local-monitoring", "monitor", "local", "prometheus-1", "namespace", None, None, None, "default", None, None, "15min", "0.1"), - ("list_reco_openshift_monitoring_cluster1", SUCCESS_STATUS_CODE, "v2.0", "test-openshift-monitoring-ns", "cluster-1", "resource-optimization-local-monitoring", "monitor", "local", "prometheus-1", "namespace", None, None, None, "openshift-monitoring", None, None, "15min", "0.1"), - ("list_reco_openshift_monitoring_cluster2", SUCCESS_STATUS_CODE, "v2.0", "test-openshift-monitoring-ns", "cluster-2", "resource-optimization-local-monitoring", "monitor", "local", "prometheus-1", "namespace", None, None, None, "openshift-monitoring", None, None, "15min", "0.1") + ("list_reco_kube_system_cluster1", SUCCESS_STATUS_CODE, "v2.0", "test-kube-system-ns", "cluster-1", "resource-optimization-local-monitoring", "monitor", "local", "prometheus-1", "namespace", None, None, None, "kube-system", None, None, "15min", "0.1"), + ("list_reco_kube_system_cluster2", SUCCESS_STATUS_CODE, "v2.0", "test-kube-system-ns", "cluster-2", "resource-optimization-local-monitoring", "monitor", "local", "prometheus-1", "namespace", None, None, None, "kube-system", None, None, "15min", "0.1") ] ) def test_list_recommendations_namespace_single_result(test_name, expected_status_code, version, experiment_name, cluster_name, performance_profile, mode, target_cluster, datasource, experiment_type, kubernetes_obj_type, name, namespace, namespace_name, container_image_name, container_name, measurement_duration, threshold, cluster_type): diff --git a/tests/scripts/local_monitoring_tests/rest_apis/test_local_monitoring_e2e_workflow.py b/tests/scripts/local_monitoring_tests/rest_apis/test_local_monitoring_e2e_workflow.py index 47a7022d7..c2effcd5e 100644 --- a/tests/scripts/local_monitoring_tests/rest_apis/test_local_monitoring_e2e_workflow.py +++ b/tests/scripts/local_monitoring_tests/rest_apis/test_local_monitoring_e2e_workflow.py @@ -50,6 +50,7 @@ @pytest.mark.test_e2e +@pytest.mark.namespace def test_list_recommendations_multiple_exps_for_datasource_workloads(cluster_type): """ Test Description: This test validates list recommendations for multiple experiments posted using different json files diff --git a/tests/scripts/local_monitoring_tests/rest_apis/test_namespace_reco_e2e_workflow.py b/tests/scripts/local_monitoring_tests/rest_apis/test_namespace_reco_e2e_workflow.py index bbc0438cf..30b36cc0d 100644 --- a/tests/scripts/local_monitoring_tests/rest_apis/test_namespace_reco_e2e_workflow.py +++ b/tests/scripts/local_monitoring_tests/rest_apis/test_namespace_reco_e2e_workflow.py @@ -50,6 +50,7 @@ @pytest.mark.test_e2e +@pytest.mark.namespace def test_list_recommendations_namespace_exps(cluster_type): """ Test Description: This test validates list recommendations for multiple experiments posted using different json files @@ -58,7 +59,7 @@ def test_list_recommendations_namespace_exps(cluster_type): create_namespace("ns1") create_namespace("ns2") - create_namespace("ns3") + create_namespace("ns3")c benchmarks_install(namespace="ns1") benchmarks_install(namespace="ns2") From bb6db38e2adabce4fb8aa4e7599e4478b85246d8 Mon Sep 17 00:00:00 2001 From: Shekhar Saxena Date: Wed, 18 Sep 2024 23:48:15 +0530 Subject: [PATCH 10/16] restoring changes Signed-off-by: Shekhar Saxena --- tests/scripts/local_monitoring_tests/local_monitoring_tests.sh | 2 +- .../local_monitoring_tests/rest_apis/test_create_experiment.py | 2 -- .../rest_apis/test_list_recommendations.py | 1 - .../rest_apis/test_local_monitoring_e2e_workflow.py | 1 - .../rest_apis/test_namespace_reco_e2e_workflow.py | 3 +-- 5 files changed, 2 insertions(+), 7 deletions(-) diff --git a/tests/scripts/local_monitoring_tests/local_monitoring_tests.sh b/tests/scripts/local_monitoring_tests/local_monitoring_tests.sh index 28f0a02b1..9676a1382 100644 --- a/tests/scripts/local_monitoring_tests/local_monitoring_tests.sh +++ b/tests/scripts/local_monitoring_tests/local_monitoring_tests.sh @@ -43,7 +43,7 @@ function local_monitoring_tests() { target="crc" metric_profile_json="${METRIC_PROFILE_DIR}/resource_optimization_local_monitoring.json" - local_monitoring_tests=("sanity" "extended" "negative" "test_e2e") + local_monitoring_tests=("sanity" "extended" "negative" "test_e2e" ) # check if the test case is supported if [ ! -z "${testcase}" ]; then diff --git a/tests/scripts/local_monitoring_tests/rest_apis/test_create_experiment.py b/tests/scripts/local_monitoring_tests/rest_apis/test_create_experiment.py index 5da4746f1..5ead7ee0e 100644 --- a/tests/scripts/local_monitoring_tests/rest_apis/test_create_experiment.py +++ b/tests/scripts/local_monitoring_tests/rest_apis/test_create_experiment.py @@ -48,7 +48,6 @@ csvfile = "/tmp/create_exp_test_data.csv" @pytest.mark.sanity -@pytest.mark.namespace @pytest.mark.parametrize("test_name, expected_status_code, version, experiment_name, cluster_name, performance_profile, mode, target_cluster, datasource, experiment_type, kubernetes_obj_type, name, namespace, namespace_name, container_image_name, container_name, measurement_duration, threshold", [ ("valid_namespace_exp_with_exp_type", SUCCESS_STATUS_CODE, "v2.0", "tfb-workload-namespace", "default", "resource-optimization-local-monitoring", "monitor", "local", "prometheus-1", "namespace", None, None, None, "default", None, None, "15min", "0.1"), @@ -139,7 +138,6 @@ def test_create_exp_valid_tests(test_name, expected_status_code, version, experi @pytest.mark.negative -@pytest.mark.namespace @pytest.mark.parametrize("test_name, expected_status_code, version, experiment_name, cluster_name, performance_profile, mode, target_cluster, datasource, experiment_type, kubernetes_obj_type, name, namespace, namespace_name, container_image_name, container_name, measurement_duration, threshold", [ ("invalid_namespace_exp_without_exp_type", ERROR_500_STATUS_CODE, "v2.0", "tfb-workload-namespace", "default", "resource-optimization-local-monitoring", "monitor", "local", "prometheus-1", None, None, None, None, "default", None, None, "15min", "0.1"), diff --git a/tests/scripts/local_monitoring_tests/rest_apis/test_list_recommendations.py b/tests/scripts/local_monitoring_tests/rest_apis/test_list_recommendations.py index 2b95bc031..4ff376b1f 100644 --- a/tests/scripts/local_monitoring_tests/rest_apis/test_list_recommendations.py +++ b/tests/scripts/local_monitoring_tests/rest_apis/test_list_recommendations.py @@ -39,7 +39,6 @@ @pytest.mark.sanity -@pytest.mark.namespace @pytest.mark.parametrize("test_name, expected_status_code, version, experiment_name, cluster_name, performance_profile, mode, target_cluster, datasource, experiment_type, kubernetes_obj_type, name, namespace, namespace_name, container_image_name, container_name, measurement_duration, threshold", [ ("list_reco_default_namespace", SUCCESS_STATUS_CODE, "v2.0", "test-default-ns", "default", "resource-optimization-local-monitoring", "monitor", "local", "prometheus-1", "namespace", None, None, None, "default", None, None, "15min", "0.1"), diff --git a/tests/scripts/local_monitoring_tests/rest_apis/test_local_monitoring_e2e_workflow.py b/tests/scripts/local_monitoring_tests/rest_apis/test_local_monitoring_e2e_workflow.py index c2effcd5e..47a7022d7 100644 --- a/tests/scripts/local_monitoring_tests/rest_apis/test_local_monitoring_e2e_workflow.py +++ b/tests/scripts/local_monitoring_tests/rest_apis/test_local_monitoring_e2e_workflow.py @@ -50,7 +50,6 @@ @pytest.mark.test_e2e -@pytest.mark.namespace def test_list_recommendations_multiple_exps_for_datasource_workloads(cluster_type): """ Test Description: This test validates list recommendations for multiple experiments posted using different json files diff --git a/tests/scripts/local_monitoring_tests/rest_apis/test_namespace_reco_e2e_workflow.py b/tests/scripts/local_monitoring_tests/rest_apis/test_namespace_reco_e2e_workflow.py index 30b36cc0d..bbc0438cf 100644 --- a/tests/scripts/local_monitoring_tests/rest_apis/test_namespace_reco_e2e_workflow.py +++ b/tests/scripts/local_monitoring_tests/rest_apis/test_namespace_reco_e2e_workflow.py @@ -50,7 +50,6 @@ @pytest.mark.test_e2e -@pytest.mark.namespace def test_list_recommendations_namespace_exps(cluster_type): """ Test Description: This test validates list recommendations for multiple experiments posted using different json files @@ -59,7 +58,7 @@ def test_list_recommendations_namespace_exps(cluster_type): create_namespace("ns1") create_namespace("ns2") - create_namespace("ns3")c + create_namespace("ns3") benchmarks_install(namespace="ns1") benchmarks_install(namespace="ns2") From ac9967c2e82c985a1a2ec0f843e5e29f449c84b4 Mon Sep 17 00:00:00 2001 From: Shekhar Saxena Date: Thu, 19 Sep 2024 00:28:40 +0530 Subject: [PATCH 11/16] temp-for-jenkins Signed-off-by: Shekhar Saxena --- .../rest_apis/test_list_recommendations.py | 5 ++--- .../rest_apis/test_local_monitoring_e2e_workflow.py | 2 +- .../rest_apis/test_namespace_reco_e2e_workflow.py | 6 +++--- 3 files changed, 6 insertions(+), 7 deletions(-) diff --git a/tests/scripts/local_monitoring_tests/rest_apis/test_list_recommendations.py b/tests/scripts/local_monitoring_tests/rest_apis/test_list_recommendations.py index 4ff376b1f..c042fbd9b 100644 --- a/tests/scripts/local_monitoring_tests/rest_apis/test_list_recommendations.py +++ b/tests/scripts/local_monitoring_tests/rest_apis/test_list_recommendations.py @@ -41,9 +41,8 @@ @pytest.mark.sanity @pytest.mark.parametrize("test_name, expected_status_code, version, experiment_name, cluster_name, performance_profile, mode, target_cluster, datasource, experiment_type, kubernetes_obj_type, name, namespace, namespace_name, container_image_name, container_name, measurement_duration, threshold", [ - ("list_reco_default_namespace", SUCCESS_STATUS_CODE, "v2.0", "test-default-ns", "default", "resource-optimization-local-monitoring", "monitor", "local", "prometheus-1", "namespace", None, None, None, "default", None, None, "15min", "0.1"), - ("list_reco_kube_system_cluster1", SUCCESS_STATUS_CODE, "v2.0", "test-kube-system-ns", "cluster-1", "resource-optimization-local-monitoring", "monitor", "local", "prometheus-1", "namespace", None, None, None, "kube-system", None, None, "15min", "0.1"), - ("list_reco_kube_system_cluster2", SUCCESS_STATUS_CODE, "v2.0", "test-kube-system-ns", "cluster-2", "resource-optimization-local-monitoring", "monitor", "local", "prometheus-1", "namespace", None, None, None, "kube-system", None, None, "15min", "0.1") + ("list_reco_default_cluster1", SUCCESS_STATUS_CODE, "v2.0", "test-default-ns", "cluster-1", "resource-optimization-local-monitoring", "monitor", "local", "prometheus-1", "namespace", None, None, None, "default", None, None, "15min", "0.1"), + ("list_reco_default_cluster2", SUCCESS_STATUS_CODE, "v2.0", "test-default-ns", "cluster-2", "resource-optimization-local-monitoring", "monitor", "local", "prometheus-1", "namespace", None, None, None, "default", None, None, "15min", "0.1") ] ) def test_list_recommendations_namespace_single_result(test_name, expected_status_code, version, experiment_name, cluster_name, performance_profile, mode, target_cluster, datasource, experiment_type, kubernetes_obj_type, name, namespace, namespace_name, container_image_name, container_name, measurement_duration, threshold, cluster_type): diff --git a/tests/scripts/local_monitoring_tests/rest_apis/test_local_monitoring_e2e_workflow.py b/tests/scripts/local_monitoring_tests/rest_apis/test_local_monitoring_e2e_workflow.py index 47a7022d7..a7502ad03 100644 --- a/tests/scripts/local_monitoring_tests/rest_apis/test_local_monitoring_e2e_workflow.py +++ b/tests/scripts/local_monitoring_tests/rest_apis/test_local_monitoring_e2e_workflow.py @@ -219,7 +219,7 @@ def test_list_recommendations_multiple_exps_for_datasource_workloads(cluster_typ assert data['message'] == CREATE_EXP_SUCCESS_MSG # Wait for the container to complete - wait_for_container_to_complete(container_id) +# wait_for_container_to_complete(container_id) # generate recommendations json_file = open(tfb_exp_json_file, "r") diff --git a/tests/scripts/local_monitoring_tests/rest_apis/test_namespace_reco_e2e_workflow.py b/tests/scripts/local_monitoring_tests/rest_apis/test_namespace_reco_e2e_workflow.py index bbc0438cf..ea80da112 100644 --- a/tests/scripts/local_monitoring_tests/rest_apis/test_namespace_reco_e2e_workflow.py +++ b/tests/scripts/local_monitoring_tests/rest_apis/test_namespace_reco_e2e_workflow.py @@ -250,9 +250,9 @@ def test_list_recommendations_namespace_exps(cluster_type): assert data['message'] == CREATE_EXP_SUCCESS_MSG # Wait for the container to complete - wait_for_container_to_complete(container_id1) - wait_for_container_to_complete(container_id2) - wait_for_container_to_complete(container_id3) +# wait_for_container_to_complete(container_id1) +# wait_for_container_to_complete(container_id2) +# wait_for_container_to_complete(container_id3) # generate recommendations From 83f9b190b8e62aae63ff5e5e82880035f9ba226e Mon Sep 17 00:00:00 2001 From: Shekhar Saxena Date: Thu, 19 Sep 2024 01:19:12 +0530 Subject: [PATCH 12/16] jenkins-namespace-test Signed-off-by: Shekhar Saxena --- .../local_monitoring_tests.sh | 3 +- .../scripts/local_monitoring_tests/pytest.ini | 1 + .../rest_apis/test_create_experiment.py | 2 ++ .../rest_apis/test_list_recommendations.py | 32 +++++++++++++++++++ .../test_local_monitoring_e2e_workflow.py | 3 +- .../test_namespace_reco_e2e_workflow.py | 7 ++-- 6 files changed, 43 insertions(+), 5 deletions(-) diff --git a/tests/scripts/local_monitoring_tests/local_monitoring_tests.sh b/tests/scripts/local_monitoring_tests/local_monitoring_tests.sh index 9676a1382..9fa1f4a17 100644 --- a/tests/scripts/local_monitoring_tests/local_monitoring_tests.sh +++ b/tests/scripts/local_monitoring_tests/local_monitoring_tests.sh @@ -43,7 +43,8 @@ function local_monitoring_tests() { target="crc" metric_profile_json="${METRIC_PROFILE_DIR}/resource_optimization_local_monitoring.json" - local_monitoring_tests=("sanity" "extended" "negative" "test_e2e" ) +# local_monitoring_tests=("sanity" "extended" "negative" "test_e2e" ) + local_monitoring_tests=("namespace_tests") # check if the test case is supported if [ ! -z "${testcase}" ]; then diff --git a/tests/scripts/local_monitoring_tests/pytest.ini b/tests/scripts/local_monitoring_tests/pytest.ini index 48bdd36e6..d021f3338 100644 --- a/tests/scripts/local_monitoring_tests/pytest.ini +++ b/tests/scripts/local_monitoring_tests/pytest.ini @@ -5,3 +5,4 @@ markers = test_e2e: mark a test as end-to-end test negative: mark test as a negative test extended: mark test as a extended test + namespace_tests: mark test as a namespace reco test diff --git a/tests/scripts/local_monitoring_tests/rest_apis/test_create_experiment.py b/tests/scripts/local_monitoring_tests/rest_apis/test_create_experiment.py index 5ead7ee0e..434e2ea75 100644 --- a/tests/scripts/local_monitoring_tests/rest_apis/test_create_experiment.py +++ b/tests/scripts/local_monitoring_tests/rest_apis/test_create_experiment.py @@ -48,6 +48,7 @@ csvfile = "/tmp/create_exp_test_data.csv" @pytest.mark.sanity +@pytest.mark.namespace_tests @pytest.mark.parametrize("test_name, expected_status_code, version, experiment_name, cluster_name, performance_profile, mode, target_cluster, datasource, experiment_type, kubernetes_obj_type, name, namespace, namespace_name, container_image_name, container_name, measurement_duration, threshold", [ ("valid_namespace_exp_with_exp_type", SUCCESS_STATUS_CODE, "v2.0", "tfb-workload-namespace", "default", "resource-optimization-local-monitoring", "monitor", "local", "prometheus-1", "namespace", None, None, None, "default", None, None, "15min", "0.1"), @@ -138,6 +139,7 @@ def test_create_exp_valid_tests(test_name, expected_status_code, version, experi @pytest.mark.negative +@pytest.mark.namespace_tests @pytest.mark.parametrize("test_name, expected_status_code, version, experiment_name, cluster_name, performance_profile, mode, target_cluster, datasource, experiment_type, kubernetes_obj_type, name, namespace, namespace_name, container_image_name, container_name, measurement_duration, threshold", [ ("invalid_namespace_exp_without_exp_type", ERROR_500_STATUS_CODE, "v2.0", "tfb-workload-namespace", "default", "resource-optimization-local-monitoring", "monitor", "local", "prometheus-1", None, None, None, None, "default", None, None, "15min", "0.1"), diff --git a/tests/scripts/local_monitoring_tests/rest_apis/test_list_recommendations.py b/tests/scripts/local_monitoring_tests/rest_apis/test_list_recommendations.py index c042fbd9b..0ed9b0364 100644 --- a/tests/scripts/local_monitoring_tests/rest_apis/test_list_recommendations.py +++ b/tests/scripts/local_monitoring_tests/rest_apis/test_list_recommendations.py @@ -30,6 +30,8 @@ from helpers.medium_term_list_reco_json_schema import * from helpers.long_term_list_reco_json_schema import * from helpers.list_reco_json_validate import * +from helpers.list_metric_profiles_validate import * +from helpers.list_metric_profiles_without_parameters_schema import * from helpers.short_and_long_term_list_reco_json_schema import short_and_long_term_list_reco_json_schema from helpers.short_and_medium_term_list_reco_json_schema import short_and_medium_term_list_reco_json_schema from helpers.short_term_list_reco_json_schema import short_term_list_reco_json_schema @@ -37,8 +39,10 @@ from jinja2 import Environment, FileSystemLoader +metric_profile_dir = get_metric_profile_dir() @pytest.mark.sanity +@pytest.mark.namespace_tests @pytest.mark.parametrize("test_name, expected_status_code, version, experiment_name, cluster_name, performance_profile, mode, target_cluster, datasource, experiment_type, kubernetes_obj_type, name, namespace, namespace_name, container_image_name, container_name, measurement_duration, threshold", [ ("list_reco_default_cluster1", SUCCESS_STATUS_CODE, "v2.0", "test-default-ns", "cluster-1", "resource-optimization-local-monitoring", "monitor", "local", "prometheus-1", "namespace", None, None, None, "default", None, None, "15min", "0.1"), @@ -109,6 +113,34 @@ def test_list_recommendations_namespace_single_result(test_name, expected_status response = delete_experiment(input_json_file) print("delete exp = ", response.status_code) + #Install default metric profile + metric_profile_json_file = metric_profile_dir / 'resource_optimization_local_monitoring.json' + response = delete_metric_profile(metric_profile_json_file) + print("delete metric profile = ", response.status_code) + + # Create metric profile using the specified json + response = create_metric_profile(metric_profile_json_file) + + data = response.json() + print(data['message']) + + assert response.status_code == SUCCESS_STATUS_CODE + assert data['status'] == SUCCESS_STATUS + + json_file = open(metric_profile_json_file, "r") + input_json = json.loads(json_file.read()) + metric_profile_name = input_json['metadata']['name'] + assert data['message'] == CREATE_METRIC_PROFILE_SUCCESS_MSG % metric_profile_name + + response = list_metric_profiles(name=metric_profile_name, logging=False) + metric_profile_json = response.json() + + assert response.status_code == SUCCESS_200_STATUS_CODE + + # Validate the json against the json schema + errorMsg = validate_list_metric_profiles_json(metric_profile_json, list_metric_profiles_schema) + assert errorMsg == "" + # Create namespace experiment using the specified json response = create_experiment(input_json_file) diff --git a/tests/scripts/local_monitoring_tests/rest_apis/test_local_monitoring_e2e_workflow.py b/tests/scripts/local_monitoring_tests/rest_apis/test_local_monitoring_e2e_workflow.py index a7502ad03..ab5b506bd 100644 --- a/tests/scripts/local_monitoring_tests/rest_apis/test_local_monitoring_e2e_workflow.py +++ b/tests/scripts/local_monitoring_tests/rest_apis/test_local_monitoring_e2e_workflow.py @@ -50,6 +50,7 @@ @pytest.mark.test_e2e +@pytest.mark.namespace_tests def test_list_recommendations_multiple_exps_for_datasource_workloads(cluster_type): """ Test Description: This test validates list recommendations for multiple experiments posted using different json files @@ -219,7 +220,7 @@ def test_list_recommendations_multiple_exps_for_datasource_workloads(cluster_typ assert data['message'] == CREATE_EXP_SUCCESS_MSG # Wait for the container to complete -# wait_for_container_to_complete(container_id) + wait_for_container_to_complete(container_id) # generate recommendations json_file = open(tfb_exp_json_file, "r") diff --git a/tests/scripts/local_monitoring_tests/rest_apis/test_namespace_reco_e2e_workflow.py b/tests/scripts/local_monitoring_tests/rest_apis/test_namespace_reco_e2e_workflow.py index ea80da112..116cf30f6 100644 --- a/tests/scripts/local_monitoring_tests/rest_apis/test_namespace_reco_e2e_workflow.py +++ b/tests/scripts/local_monitoring_tests/rest_apis/test_namespace_reco_e2e_workflow.py @@ -50,6 +50,7 @@ @pytest.mark.test_e2e +@pytest.mark.namespace_tests def test_list_recommendations_namespace_exps(cluster_type): """ Test Description: This test validates list recommendations for multiple experiments posted using different json files @@ -250,9 +251,9 @@ def test_list_recommendations_namespace_exps(cluster_type): assert data['message'] == CREATE_EXP_SUCCESS_MSG # Wait for the container to complete -# wait_for_container_to_complete(container_id1) -# wait_for_container_to_complete(container_id2) -# wait_for_container_to_complete(container_id3) + wait_for_container_to_complete(container_id1) + wait_for_container_to_complete(container_id2) + wait_for_container_to_complete(container_id3) # generate recommendations From 62c0732f20410f1b19d00c3315c6a3e05d9d8cc9 Mon Sep 17 00:00:00 2001 From: Shekhar Saxena Date: Thu, 19 Sep 2024 13:14:20 +0530 Subject: [PATCH 13/16] removing namespace test case Signed-off-by: Shekhar Saxena --- tests/scripts/local_monitoring_tests/local_monitoring_tests.sh | 3 +-- tests/scripts/local_monitoring_tests/pytest.ini | 1 - .../local_monitoring_tests/rest_apis/test_create_experiment.py | 2 -- .../rest_apis/test_list_recommendations.py | 1 - .../rest_apis/test_local_monitoring_e2e_workflow.py | 1 - .../rest_apis/test_namespace_reco_e2e_workflow.py | 1 - 6 files changed, 1 insertion(+), 8 deletions(-) diff --git a/tests/scripts/local_monitoring_tests/local_monitoring_tests.sh b/tests/scripts/local_monitoring_tests/local_monitoring_tests.sh index 9fa1f4a17..9676a1382 100644 --- a/tests/scripts/local_monitoring_tests/local_monitoring_tests.sh +++ b/tests/scripts/local_monitoring_tests/local_monitoring_tests.sh @@ -43,8 +43,7 @@ function local_monitoring_tests() { target="crc" metric_profile_json="${METRIC_PROFILE_DIR}/resource_optimization_local_monitoring.json" -# local_monitoring_tests=("sanity" "extended" "negative" "test_e2e" ) - local_monitoring_tests=("namespace_tests") + local_monitoring_tests=("sanity" "extended" "negative" "test_e2e" ) # check if the test case is supported if [ ! -z "${testcase}" ]; then diff --git a/tests/scripts/local_monitoring_tests/pytest.ini b/tests/scripts/local_monitoring_tests/pytest.ini index d021f3338..48bdd36e6 100644 --- a/tests/scripts/local_monitoring_tests/pytest.ini +++ b/tests/scripts/local_monitoring_tests/pytest.ini @@ -5,4 +5,3 @@ markers = test_e2e: mark a test as end-to-end test negative: mark test as a negative test extended: mark test as a extended test - namespace_tests: mark test as a namespace reco test diff --git a/tests/scripts/local_monitoring_tests/rest_apis/test_create_experiment.py b/tests/scripts/local_monitoring_tests/rest_apis/test_create_experiment.py index 434e2ea75..5ead7ee0e 100644 --- a/tests/scripts/local_monitoring_tests/rest_apis/test_create_experiment.py +++ b/tests/scripts/local_monitoring_tests/rest_apis/test_create_experiment.py @@ -48,7 +48,6 @@ csvfile = "/tmp/create_exp_test_data.csv" @pytest.mark.sanity -@pytest.mark.namespace_tests @pytest.mark.parametrize("test_name, expected_status_code, version, experiment_name, cluster_name, performance_profile, mode, target_cluster, datasource, experiment_type, kubernetes_obj_type, name, namespace, namespace_name, container_image_name, container_name, measurement_duration, threshold", [ ("valid_namespace_exp_with_exp_type", SUCCESS_STATUS_CODE, "v2.0", "tfb-workload-namespace", "default", "resource-optimization-local-monitoring", "monitor", "local", "prometheus-1", "namespace", None, None, None, "default", None, None, "15min", "0.1"), @@ -139,7 +138,6 @@ def test_create_exp_valid_tests(test_name, expected_status_code, version, experi @pytest.mark.negative -@pytest.mark.namespace_tests @pytest.mark.parametrize("test_name, expected_status_code, version, experiment_name, cluster_name, performance_profile, mode, target_cluster, datasource, experiment_type, kubernetes_obj_type, name, namespace, namespace_name, container_image_name, container_name, measurement_duration, threshold", [ ("invalid_namespace_exp_without_exp_type", ERROR_500_STATUS_CODE, "v2.0", "tfb-workload-namespace", "default", "resource-optimization-local-monitoring", "monitor", "local", "prometheus-1", None, None, None, None, "default", None, None, "15min", "0.1"), diff --git a/tests/scripts/local_monitoring_tests/rest_apis/test_list_recommendations.py b/tests/scripts/local_monitoring_tests/rest_apis/test_list_recommendations.py index 0ed9b0364..d0c5d447c 100644 --- a/tests/scripts/local_monitoring_tests/rest_apis/test_list_recommendations.py +++ b/tests/scripts/local_monitoring_tests/rest_apis/test_list_recommendations.py @@ -42,7 +42,6 @@ metric_profile_dir = get_metric_profile_dir() @pytest.mark.sanity -@pytest.mark.namespace_tests @pytest.mark.parametrize("test_name, expected_status_code, version, experiment_name, cluster_name, performance_profile, mode, target_cluster, datasource, experiment_type, kubernetes_obj_type, name, namespace, namespace_name, container_image_name, container_name, measurement_duration, threshold", [ ("list_reco_default_cluster1", SUCCESS_STATUS_CODE, "v2.0", "test-default-ns", "cluster-1", "resource-optimization-local-monitoring", "monitor", "local", "prometheus-1", "namespace", None, None, None, "default", None, None, "15min", "0.1"), diff --git a/tests/scripts/local_monitoring_tests/rest_apis/test_local_monitoring_e2e_workflow.py b/tests/scripts/local_monitoring_tests/rest_apis/test_local_monitoring_e2e_workflow.py index ab5b506bd..47a7022d7 100644 --- a/tests/scripts/local_monitoring_tests/rest_apis/test_local_monitoring_e2e_workflow.py +++ b/tests/scripts/local_monitoring_tests/rest_apis/test_local_monitoring_e2e_workflow.py @@ -50,7 +50,6 @@ @pytest.mark.test_e2e -@pytest.mark.namespace_tests def test_list_recommendations_multiple_exps_for_datasource_workloads(cluster_type): """ Test Description: This test validates list recommendations for multiple experiments posted using different json files diff --git a/tests/scripts/local_monitoring_tests/rest_apis/test_namespace_reco_e2e_workflow.py b/tests/scripts/local_monitoring_tests/rest_apis/test_namespace_reco_e2e_workflow.py index 116cf30f6..bbc0438cf 100644 --- a/tests/scripts/local_monitoring_tests/rest_apis/test_namespace_reco_e2e_workflow.py +++ b/tests/scripts/local_monitoring_tests/rest_apis/test_namespace_reco_e2e_workflow.py @@ -50,7 +50,6 @@ @pytest.mark.test_e2e -@pytest.mark.namespace_tests def test_list_recommendations_namespace_exps(cluster_type): """ Test Description: This test validates list recommendations for multiple experiments posted using different json files From e7b00c500c096c5ddb73b44fc9f8ff2a1772808a Mon Sep 17 00:00:00 2001 From: Shekhar Saxena Date: Thu, 19 Sep 2024 13:17:57 +0530 Subject: [PATCH 14/16] removing extra space Signed-off-by: Shekhar Saxena --- tests/scripts/local_monitoring_tests/local_monitoring_tests.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/scripts/local_monitoring_tests/local_monitoring_tests.sh b/tests/scripts/local_monitoring_tests/local_monitoring_tests.sh index 9676a1382..28f0a02b1 100644 --- a/tests/scripts/local_monitoring_tests/local_monitoring_tests.sh +++ b/tests/scripts/local_monitoring_tests/local_monitoring_tests.sh @@ -43,7 +43,7 @@ function local_monitoring_tests() { target="crc" metric_profile_json="${METRIC_PROFILE_DIR}/resource_optimization_local_monitoring.json" - local_monitoring_tests=("sanity" "extended" "negative" "test_e2e" ) + local_monitoring_tests=("sanity" "extended" "negative" "test_e2e") # check if the test case is supported if [ ! -z "${testcase}" ]; then From 2fafe64c51e297caf9bb117a4b104525c821b758 Mon Sep 17 00:00:00 2001 From: Shekhar Saxena Date: Thu, 19 Sep 2024 13:47:13 +0530 Subject: [PATCH 15/16] code refactoring Signed-off-by: Shekhar Saxena --- .../rest_apis/test_create_experiment.py | 26 +------------------ .../rest_apis/test_list_recommendations.py | 9 ------- 2 files changed, 1 insertion(+), 34 deletions(-) diff --git a/tests/scripts/local_monitoring_tests/rest_apis/test_create_experiment.py b/tests/scripts/local_monitoring_tests/rest_apis/test_create_experiment.py index 5ead7ee0e..9075a2741 100644 --- a/tests/scripts/local_monitoring_tests/rest_apis/test_create_experiment.py +++ b/tests/scripts/local_monitoring_tests/rest_apis/test_create_experiment.py @@ -22,31 +22,6 @@ from helpers.utils import * from jinja2 import Environment, FileSystemLoader -mandatory_fields = [ - ("version", ERROR_STATUS_CODE, ERROR_STATUS), - ("cluster_name", ERROR_STATUS_CODE, ERROR_STATUS), - ("experiment_name", ERROR_STATUS_CODE, ERROR_STATUS), - ("mode", ERROR_STATUS_CODE, ERROR_STATUS), - ("target_cluster", ERROR_STATUS_CODE, ERROR_STATUS), - ("kubernetes_objects", ERROR_STATUS_CODE, ERROR_STATUS), - ("type", ERROR_STATUS_CODE, ERROR_STATUS), - ("kubernetes_objects_name", ERROR_STATUS_CODE, ERROR_STATUS), - ("namespace", ERROR_STATUS_CODE, ERROR_STATUS), - ("containers", ERROR_STATUS_CODE, ERROR_STATUS), - ("container_image_name", ERROR_STATUS_CODE, ERROR_STATUS), - ("container_name", ERROR_STATUS_CODE, ERROR_STATUS), - ("selector", SUCCESS_STATUS_CODE, SUCCESS_STATUS), - ("namespace", ERROR_STATUS_CODE, ERROR_STATUS), - ("performance_profile", ERROR_STATUS_CODE, ERROR_STATUS), - ("slo", SUCCESS_STATUS_CODE, SUCCESS_STATUS), - ("recommendation_settings", ERROR_STATUS_CODE, ERROR_STATUS), - ("trial_settings", ERROR_STATUS_CODE, ERROR_STATUS), - ("kubernetes_objects_name_selector", ERROR_STATUS_CODE, ERROR_STATUS), - ("performance_profile_slo", ERROR_STATUS_CODE, ERROR_STATUS) -] - -csvfile = "/tmp/create_exp_test_data.csv" - @pytest.mark.sanity @pytest.mark.parametrize("test_name, expected_status_code, version, experiment_name, cluster_name, performance_profile, mode, target_cluster, datasource, experiment_type, kubernetes_obj_type, name, namespace, namespace_name, container_image_name, container_name, measurement_duration, threshold", [ @@ -249,6 +224,7 @@ def test_create_multiple_namespace_exp(cluster_type): assert response.status_code == ERROR_STATUS_CODE assert data['status'] == ERROR_STATUS + assert data['message'] == CREATE_EXP_BULK_ERROR_MSG response = delete_experiment(input_json_file) print("delete exp = ", response.status_code) diff --git a/tests/scripts/local_monitoring_tests/rest_apis/test_list_recommendations.py b/tests/scripts/local_monitoring_tests/rest_apis/test_list_recommendations.py index d0c5d447c..5eda0d6dd 100644 --- a/tests/scripts/local_monitoring_tests/rest_apis/test_list_recommendations.py +++ b/tests/scripts/local_monitoring_tests/rest_apis/test_list_recommendations.py @@ -61,15 +61,6 @@ def test_list_recommendations_namespace_single_result(test_name, expected_status environment = Environment(loader=FileSystemLoader("../json_files/")) template = environment.get_template("create_exp_template.json") - # In case of test_name with "null", strip the specific fields - if "null" in test_name: - field = test_name.replace("null_", "") - json_file = "../json_files/create_exp_template.json" - filename = "/tmp/create_exp_template.json" - strip_double_quotes_for_field(json_file, field, filename) - environment = Environment(loader=FileSystemLoader("/tmp/")) - template = environment.get_template("create_exp_template.json") - # Render the JSON content from the template content = template.render( version=version, From 3cc2c96e917092faf2405d8fd158dc0b0a544c86 Mon Sep 17 00:00:00 2001 From: Shekhar Saxena Date: Thu, 19 Sep 2024 13:52:22 +0530 Subject: [PATCH 16/16] validating error msg Signed-off-by: Shekhar Saxena --- .../local_monitoring_tests/rest_apis/test_create_experiment.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/scripts/local_monitoring_tests/rest_apis/test_create_experiment.py b/tests/scripts/local_monitoring_tests/rest_apis/test_create_experiment.py index 9075a2741..d9a4fa887 100644 --- a/tests/scripts/local_monitoring_tests/rest_apis/test_create_experiment.py +++ b/tests/scripts/local_monitoring_tests/rest_apis/test_create_experiment.py @@ -224,6 +224,7 @@ def test_create_multiple_namespace_exp(cluster_type): assert response.status_code == ERROR_STATUS_CODE assert data['status'] == ERROR_STATUS + # validate error message assert data['message'] == CREATE_EXP_BULK_ERROR_MSG response = delete_experiment(input_json_file)