Skip to content

Commit

Permalink
Merge pull request kruize#1301 from shekhar316/ns-tests
Browse files Browse the repository at this point in the history
Updating Tests For Namespace Recommendations
  • Loading branch information
dinogun authored Sep 26, 2024
2 parents 131a8d4 + 2bd1545 commit c9a3dab
Show file tree
Hide file tree
Showing 9 changed files with 43 additions and 25 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -274,7 +274,7 @@
{
"function": "avg",
"query": "avg_over_time(sum by(namespace) (rate(container_cpu_usage_seconds_total{namespace=\"$NAMESPACE$\", container!='', container!='POD', pod!=''}[5m]))[$MEASUREMENT_DURATION_IN_MIN$m:])"

},
{
"function": "max",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
"type": "object",
"properties": {
"cluster_name": { "type": "string" },
"experiment_name": { "type": "string" },
"kubernetes_objects": {
"type": "array",
"items": {
Expand Down Expand Up @@ -1195,9 +1196,6 @@
},
"version": {
"type": "string"
},
"experiment_name": {
"type": "string"
}
},
"required": [
Expand Down
9 changes: 6 additions & 3 deletions tests/scripts/helpers/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,9 @@
DUPLICATE_RECORDS_MSG = "An entry for this record already exists!"
CREATE_EXP_SUCCESS_MSG = "Experiment registered successfully with Kruize. View registered experiments at /listExperiments"
CREATE_EXP_BULK_ERROR_MSG = "At present, the system does not support bulk entries!"
CREATE_EXP_CONTAINER_EXP_CONTAINS_NAMESPACE = "Can not specify namespace data for container experiment"
CREATE_EXP_NAMESPACE_EXP_CONTAINS_CONTAINER = "Can not specify container data for namespace experiment"
CREATE_EXP_NAMESPACE_EXP_NOT_SUPPORTED_FOR_REMOTE = "Namespace experiment type is not supported for remote monitoring use case."
UPDATE_RECOMMENDATIONS_MANDATORY_DEFAULT_MESSAGE = 'experiment_name is mandatory'
UPDATE_RECOMMENDATIONS_MANDATORY_INTERVAL_END_DATE = 'interval_end_time is mandatory'
UPDATE_RECOMMENDATIONS_EXPERIMENT_NOT_FOUND = 'Not Found: experiment_name does not exist: '
Expand Down Expand Up @@ -458,8 +461,9 @@ def validate_local_monitoring_reco_json(create_exp_json, list_reco_json, expecte
# Validate kubernetes objects
create_exp_kubernetes_obj = create_exp_json["kubernetes_objects"][0]
list_reco_kubernetes_obj = list_reco_json["kubernetes_objects"][0]
experiment_type = create_exp_json.get("experiment_type")
validate_local_monitoring_kubernetes_obj(create_exp_kubernetes_obj, list_reco_kubernetes_obj, expected_duration_in_hours,
test_name)
test_name, experiment_type)

def validate_list_exp_results_count(expected_results_count, list_exp_json):

Expand Down Expand Up @@ -521,8 +525,7 @@ def validate_kubernetes_obj(create_exp_kubernetes_obj, update_results_kubernetes
expected_duration_in_hours, test_name)

def validate_local_monitoring_kubernetes_obj(create_exp_kubernetes_obj,
list_reco_kubernetes_obj, expected_duration_in_hours, test_name):
experiment_type = create_exp_kubernetes_obj.get("experiment_type")
list_reco_kubernetes_obj, expected_duration_in_hours, test_name, experiment_type):
if experiment_type == NAMESPACE_EXPERIMENT_TYPE:
assert list_reco_kubernetes_obj["namespaces"]["namespace_name"] == create_exp_kubernetes_obj["namespaces"]["namespace_name"]
list_reco_namespace = list_reco_kubernetes_obj["namespaces"]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,8 @@
"mode": "{{mode}}",
"target_cluster": "{{target_cluster}}",
"datasource": "{{datasource}}",
"experiment_type": "{{experiment_type}}",
"kubernetes_objects": [{
"experiment_type": "{{experiment_type}}",
"type": "{{kubernetes_obj_type}}",
"name": "{{name}}",
"namespace": "{{namespace}}",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,9 @@
"mode": "monitor",
"target_cluster": "local",
"datasource": "prometheus-1",
"experiment_type": "namespace",
"kubernetes_objects": [
{
"experiment_type": "namespace",
"namespaces": {
"namespace_name": "default"
}
Expand All @@ -30,9 +30,9 @@
"mode": "monitor",
"target_cluster": "local",
"datasource": "prometheus-1",
"experiment_type": "namespace",
"kubernetes_objects": [
{
"experiment_type": "namespace",
"namespaces": {
"namespace_name": "test-multiple-import"
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -85,8 +85,8 @@ def test_create_exp_valid_tests(test_name, expected_status_code, version, experi
json_content[0]["kubernetes_objects"][0].pop("containers")
if json_content[0]["kubernetes_objects"][0]["namespaces"]["namespace_name"] == "None":
json_content[0]["kubernetes_objects"][0].pop("namespaces")
if json_content[0]["kubernetes_objects"][0]["experiment_type"] == "None":
json_content[0]["kubernetes_objects"][0].pop("experiment_type")
if json_content[0]["experiment_type"] == "None":
json_content[0].pop("experiment_type")

# Write the final JSON to the temp file
with open(tmp_json_file, mode="w", encoding="utf-8") as message:
Expand All @@ -113,17 +113,18 @@ def test_create_exp_valid_tests(test_name, expected_status_code, version, experi


@pytest.mark.negative
@pytest.mark.parametrize("test_name, expected_status_code, version, experiment_name, cluster_name, performance_profile, mode, target_cluster, datasource, experiment_type, kubernetes_obj_type, name, namespace, namespace_name, container_image_name, container_name, measurement_duration, threshold",
@pytest.mark.parametrize("test_name, expected_status_code, expected_error_msg, version, experiment_name, cluster_name, performance_profile, mode, target_cluster, datasource, experiment_type, kubernetes_obj_type, name, namespace, namespace_name, container_image_name, container_name, measurement_duration, threshold",
[
("invalid_namespace_exp_without_exp_type", ERROR_500_STATUS_CODE, "v2.0", "tfb-workload-namespace", "default", "resource-optimization-local-monitoring", "monitor", "local", "prometheus-1", None, None, None, None, "default", None, None, "15min", "0.1"),
("invalid_both_container_and_namespace_without_exp_type", ERROR_500_STATUS_CODE, "v2.0", "tfb-workload-namespace", "default", "resource-optimization-local-monitoring", "monitor", "local", "prometheus-1", None, "deployment", "tfb-qrh-sample", "default", "default", "kruize/tfb-qrh:1.13.2.F_et17", "tfb-server", "15min", "0.1"),
("invalid_both_container_and_namespace_namespace_exp_type", ERROR_500_STATUS_CODE, "v2.0", "tfb-workload-namespace", "default", "resource-optimization-local-monitoring", "monitor", "local", "prometheus-1", "namespace", "deployment", "tfb-qrh-sample", "default", "default", "kruize/tfb-qrh:1.13.2.F_et17", "tfb-server", "15min", "0.1"),
("invalid_both_container_and_namespace_container_exp_type", ERROR_500_STATUS_CODE, "v2.0", "tfb-workload-namespace", "default", "resource-optimization-local-monitoring", "monitor", "local", "prometheus-1", "container", "deployment", "tfb-qrh-sample", "default", "default", "kruize/tfb-qrh:1.13.2.F_et17", "tfb-server", "15min", "0.1"),
("invalid_namespace_exp_type_with_only_containers", ERROR_500_STATUS_CODE, "v2.0", "tfb-workload-namespace", "default", "resource-optimization-local-monitoring", "monitor", "local", "prometheus-1", "namespace", "deployment", "tfb-qrh-sample", "default", None, "kruize/tfb-qrh:1.13.2.F_et17", "tfb-server", "15min", "0.1"),
("invalid_container_exp_type_with_only_namespace", ERROR_500_STATUS_CODE, "v2.0", "tfb-workload-namespace", "default", "resource-optimization-local-monitoring", "monitor", "local", "prometheus-1", "container", None, None, None, "default", None, None, "15min", "0.1")
("invalid_namespace_exp_without_exp_type", ERROR_STATUS_CODE, CREATE_EXP_CONTAINER_EXP_CONTAINS_NAMESPACE, "v2.0", "tfb-workload-namespace", "default", "resource-optimization-local-monitoring", "monitor", "local", "prometheus-1", None, None, None, None, "default", None, None, "15min", "0.1"),
("invalid_both_container_and_namespace_without_exp_type", ERROR_STATUS_CODE, CREATE_EXP_CONTAINER_EXP_CONTAINS_NAMESPACE,"v2.0", "tfb-workload-namespace", "default", "resource-optimization-local-monitoring", "monitor", "local", "prometheus-1", None, "deployment", "tfb-qrh-sample", "default", "default", "kruize/tfb-qrh:1.13.2.F_et17", "tfb-server", "15min", "0.1"),
("invalid_both_container_and_namespace_namespace_exp_type", ERROR_STATUS_CODE, CREATE_EXP_NAMESPACE_EXP_CONTAINS_CONTAINER,"v2.0", "tfb-workload-namespace", "default", "resource-optimization-local-monitoring", "monitor", "local", "prometheus-1", "namespace", "deployment", "tfb-qrh-sample", "default", "default", "kruize/tfb-qrh:1.13.2.F_et17", "tfb-server", "15min", "0.1"),
("invalid_both_container_and_namespace_container_exp_type", ERROR_STATUS_CODE, CREATE_EXP_CONTAINER_EXP_CONTAINS_NAMESPACE,"v2.0", "tfb-workload-namespace", "default", "resource-optimization-local-monitoring", "monitor", "local", "prometheus-1", "container", "deployment", "tfb-qrh-sample", "default", "default", "kruize/tfb-qrh:1.13.2.F_et17", "tfb-server", "15min", "0.1"),
("invalid_namespace_exp_type_with_only_containers", ERROR_STATUS_CODE, CREATE_EXP_NAMESPACE_EXP_CONTAINS_CONTAINER,"v2.0", "tfb-workload-namespace", "default", "resource-optimization-local-monitoring", "monitor", "local", "prometheus-1", "namespace", "deployment", "tfb-qrh-sample", "default", None, "kruize/tfb-qrh:1.13.2.F_et17", "tfb-server", "15min", "0.1"),
("invalid_container_exp_type_with_only_namespace", ERROR_STATUS_CODE, CREATE_EXP_CONTAINER_EXP_CONTAINS_NAMESPACE,"v2.0", "tfb-workload-namespace", "default", "resource-optimization-local-monitoring", "monitor", "local", "prometheus-1", "container", None, None, None, "default", None, None, "15min", "0.1"),
("invalid_namespace_exp_with_remote_cluster", ERROR_STATUS_CODE, CREATE_EXP_NAMESPACE_EXP_NOT_SUPPORTED_FOR_REMOTE,"v2.0", "tfb-workload-namespace", "default", "resource-optimization-local-monitoring", "monitor", "remote", "prometheus-1", "namespace", None, None, None, "default", None, None, "15min", "0.1")
]
)
def test_create_exp_invalid_tests(test_name, expected_status_code, version, experiment_name, cluster_name, performance_profile, mode, target_cluster, datasource, experiment_type, kubernetes_obj_type, name, namespace, namespace_name, container_image_name, container_name, measurement_duration, threshold, cluster_type):
def test_create_exp_invalid_tests(test_name, expected_status_code, expected_error_msg, version, experiment_name, cluster_name, performance_profile, mode, target_cluster, datasource, experiment_type, kubernetes_obj_type, name, namespace, namespace_name, container_image_name, container_name, measurement_duration, threshold, cluster_type):
"""
Test Description: This test validates the response status code of createExperiment API
for namespace experiment by passing a valid input for the json
Expand Down Expand Up @@ -178,8 +179,8 @@ def test_create_exp_invalid_tests(test_name, expected_status_code, version, expe
json_content[0]["kubernetes_objects"][0].pop("containers")
if json_content[0]["kubernetes_objects"][0]["namespaces"]["namespace_name"] == "None":
json_content[0]["kubernetes_objects"][0].pop("namespaces")
if json_content[0]["kubernetes_objects"][0]["experiment_type"] == "None":
json_content[0]["kubernetes_objects"][0].pop("experiment_type")
if json_content[0]["experiment_type"] == "None":
json_content[0].pop("experiment_type")

# Write the final JSON to the temp file
with open(tmp_json_file, mode="w", encoding="utf-8") as message:
Expand All @@ -199,6 +200,7 @@ def test_create_exp_invalid_tests(test_name, expected_status_code, version, expe

assert response.status_code == expected_status_code
assert data['status'] == ERROR_STATUS
assert data['message'] == expected_error_msg

response = delete_experiment(input_json_file)
print("delete exp = ", response.status_code)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,12 @@ def test_list_recommendations_namespace_single_result(test_name, expected_status
print("delete exp = ", response.status_code)

#Install default metric profile
metric_profile_json_file = metric_profile_dir / 'resource_optimization_local_monitoring.json'
if cluster_type == "minikube":
metric_profile_json_file = metric_profile_dir / 'resource_optimization_local_monitoring_norecordingrules.json'

if cluster_type == "openshift":
metric_profile_json_file = metric_profile_dir / 'resource_optimization_local_monitoring.json'

response = delete_metric_profile(metric_profile_json_file)
print("delete metric profile = ", response.status_code)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,12 @@ def test_list_recommendations_multiple_exps_for_datasource_workloads(cluster_typ
print("delete namespace exp = ", response.status_code)

#Install default metric profile
metric_profile_json_file = metric_profile_dir / 'resource_optimization_local_monitoring.json'
if cluster_type == "minikube":
metric_profile_json_file = metric_profile_dir / 'resource_optimization_local_monitoring_norecordingrules.json'

if cluster_type == "openshift":
metric_profile_json_file = metric_profile_dir / 'resource_optimization_local_monitoring.json'

response = delete_metric_profile(metric_profile_json_file)
print("delete metric profile = ", response.status_code)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -191,7 +191,12 @@ def test_list_recommendations_namespace_exps(cluster_type):
print("delete namespace exp = ", response.status_code)

#Install default metric profile
metric_profile_json_file = metric_profile_dir / 'resource_optimization_local_monitoring.json'
if cluster_type == "minikube":
metric_profile_json_file = metric_profile_dir / 'resource_optimization_local_monitoring_norecordingrules.json'

if cluster_type == "openshift":
metric_profile_json_file = metric_profile_dir / 'resource_optimization_local_monitoring.json'

response = delete_metric_profile(metric_profile_json_file)
print("delete metric profile = ", response.status_code)

Expand Down

0 comments on commit c9a3dab

Please sign in to comment.