Skip to content

Commit

Permalink
Merge pull request kruize#1293 from shekhar316/namespace-tests
Browse files Browse the repository at this point in the history
Adding Tests for Namespace Recommendations and Experiment Type
  • Loading branch information
chandrams authored Sep 19, 2024
2 parents e6118a1 + 3cc2c96 commit 37fd1df
Show file tree
Hide file tree
Showing 9 changed files with 1,774 additions and 15 deletions.
764 changes: 764 additions & 0 deletions tests/scripts/helpers/list_reco_json_local_monitoring_schema.py

Large diffs are not rendered by default.

119 changes: 104 additions & 15 deletions tests/scripts/helpers/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -165,6 +165,8 @@
LONG_TERM: NOTIFICATION_CODE_FOR_LONG_TERM_RECOMMENDATIONS_AVAILABLE,
}

NAMESPACE_EXPERIMENT_TYPE = "namespace"
CONTAINER_EXPERIMENT_TYPE = "container"

# version,experiment_name,cluster_name,performance_profile,mode,target_cluster,type,name,namespace,container_image_name,container_name,measurement_duration,threshold
create_exp_test_data = {
Expand Down Expand Up @@ -520,25 +522,32 @@ def validate_kubernetes_obj(create_exp_kubernetes_obj, update_results_kubernetes

def validate_local_monitoring_kubernetes_obj(create_exp_kubernetes_obj,
list_reco_kubernetes_obj, expected_duration_in_hours, test_name):
# Validate type, name, namespace
assert list_reco_kubernetes_obj["type"] == create_exp_kubernetes_obj["type"]
assert list_reco_kubernetes_obj["name"] == create_exp_kubernetes_obj["name"]
assert list_reco_kubernetes_obj["namespace"] == create_exp_kubernetes_obj["namespace"]
experiment_type = create_exp_kubernetes_obj.get("experiment_type")
if experiment_type == NAMESPACE_EXPERIMENT_TYPE:
assert list_reco_kubernetes_obj["namespaces"]["namespace_name"] == create_exp_kubernetes_obj["namespaces"]["namespace_name"]
list_reco_namespace = list_reco_kubernetes_obj["namespaces"]
create_exp_namespace = create_exp_kubernetes_obj["namespaces"]
validate_local_monitoring_namespace(create_exp_namespace, list_reco_namespace, expected_duration_in_hours, test_name)
else:
# Validate type, name, namespace
assert list_reco_kubernetes_obj["type"] == create_exp_kubernetes_obj["type"]
assert list_reco_kubernetes_obj["name"] == create_exp_kubernetes_obj["name"]
assert list_reco_kubernetes_obj["namespace"] == create_exp_kubernetes_obj["namespace"]

exp_containers_length = len(create_exp_kubernetes_obj["containers"])
list_reco_containers_length = len(list_reco_kubernetes_obj["containers"])
exp_containers_length = len(create_exp_kubernetes_obj["containers"])
list_reco_containers_length = len(list_reco_kubernetes_obj["containers"])


# Validate if all the containers are present
for i in range(exp_containers_length):
list_reco_container = None
# Validate if all the containers are present
for i in range(exp_containers_length):
list_reco_container = None

for j in range(list_reco_containers_length):
if list_reco_kubernetes_obj["containers"][j]["container_name"] == \
create_exp_kubernetes_obj["containers"][i]["container_name"]:
list_reco_container = list_reco_kubernetes_obj["containers"][j]
create_exp_container = create_exp_kubernetes_obj["containers"][i]
validate_local_monitoring_container(create_exp_container, list_reco_container, expected_duration_in_hours, test_name)
for j in range(list_reco_containers_length):
if list_reco_kubernetes_obj["containers"][j]["container_name"] == \
create_exp_kubernetes_obj["containers"][i]["container_name"]:
list_reco_container = list_reco_kubernetes_obj["containers"][j]
create_exp_container = create_exp_kubernetes_obj["containers"][i]
validate_local_monitoring_container(create_exp_container, list_reco_container, expected_duration_in_hours, test_name)

def validate_container(update_results_container, update_results_json, list_reco_container, expected_duration_in_hours,
test_name):
Expand Down Expand Up @@ -733,6 +742,86 @@ def validate_local_monitoring_container(create_exp_container, list_reco_containe
data = list_reco_container["recommendations"]["data"]
assert len(data) == 0, f"Data is not empty! Length of data - Actual = {len(data)} expected = 0"


def validate_local_monitoring_namespace(create_exp_namespace, list_reco_namespace, expected_duration_in_hours, test_name):
# Validate namespace name
if create_exp_namespace != None and list_reco_namespace != None:
assert create_exp_namespace["namespace_name"] == list_reco_namespace["namespace_name"], \
f"Namespace names did not match! Actual - {list_reco_namespace['namespace_name']} Expected - {create_exp_namespace['namespace_name']}"

if expected_duration_in_hours == None:
duration_in_hours = 0.0
else:
duration_in_hours = expected_duration_in_hours

if check_if_recommendations_are_present(list_reco_namespace["recommendations"]):
interval_end_time = list(list_reco_namespace['recommendations']['data'].keys())[0]
print(f"interval_end_time = {interval_end_time}")

terms_obj = list_reco_namespace["recommendations"]["data"][interval_end_time]["recommendation_terms"]
current_config = list_reco_namespace["recommendations"]["data"][interval_end_time]["current"]

duration_terms = {'short_term': 4, 'medium_term': 7, 'long_term': 15}
for term in duration_terms.keys():
if check_if_recommendations_are_present(terms_obj[term]):
print(f"reco present for term {term}")

interval_start_time = list_reco_namespace['recommendations']['data'][interval_end_time]['recommendation_terms'][term]['monitoring_start_time']
# Validate the precision of the valid duration
duration = terms_obj[term]["duration_in_hours"]
assert validate_duration_in_hours_decimal_precision(duration), f"The value '{duration}' for " \
f"'{term}' has more than two decimal places"

monitoring_start_time = term_based_start_time(interval_end_time, term)
assert terms_obj[term]["monitoring_start_time"] == monitoring_start_time, \
f"actual = {terms_obj[term]['monitoring_start_time']} expected = {monitoring_start_time}"

# Validate duration in hrs
if expected_duration_in_hours is None:
duration_in_hours = set_duration_based_on_terms(duration_in_hours, term,
interval_start_time, interval_end_time)

if test_name is not None:

if MEDIUM_TERM_TEST in test_name and term == MEDIUM_TERM:
assert terms_obj[term]["duration_in_hours"] == duration_in_hours, \
f"Duration in hours did not match! Actual = {terms_obj[term]['duration_in_hours']} expected = {duration_in_hours}"
elif SHORT_TERM_TEST in test_name and term == SHORT_TERM:
assert terms_obj[term]["duration_in_hours"] == duration_in_hours, \
f"Duration in hours did not match! Actual = {terms_obj[term]['duration_in_hours']} expected = {duration_in_hours}"
elif LONG_TERM_TEST in test_name and term == LONG_TERM:
assert terms_obj[term]["duration_in_hours"] == duration_in_hours, \
f"Duration in hours did not match! Actual = {terms_obj[term]['duration_in_hours']} expected = {duration_in_hours}"
else:
print(
f"Actual = {terms_obj[term]['duration_in_hours']} expected = {duration_in_hours}")
assert terms_obj[term]["duration_in_hours"] == duration_in_hours, \
f"Duration in hours did not match! Actual = {terms_obj[term]['duration_in_hours']} expected = {duration_in_hours}"
duration_in_hours = set_duration_based_on_terms(duration_in_hours, term, interval_start_time,
interval_end_time)

# Get engine objects
engines_list = ["cost", "performance"]

# Extract recommendation engine objects
recommendation_engines_object = None
if "recommendation_engines" in terms_obj[term]:
recommendation_engines_object = terms_obj[term]["recommendation_engines"]
if recommendation_engines_object is not None:
for engine_entry in engines_list:
if engine_entry in terms_obj[term]["recommendation_engines"]:
engine_obj = terms_obj[term]["recommendation_engines"][engine_entry]
validate_config_local_monitoring(engine_obj["config"])
validate_variation_local_monitoring(current_config, engine_obj["config"], engine_obj["variation"], engine_obj)
else:
notifications = list_reco_namespace["recommendations"]["notifications"]
if NOTIFICATION_CODE_FOR_NOT_ENOUGH_DATA in notifications:
assert notifications[NOTIFICATION_CODE_FOR_NOT_ENOUGH_DATA]["message"] == NOT_ENOUGH_DATA_MSG

data = list_reco_namespace["recommendations"]["data"]
assert len(data) == 0, f"Data is not empty! Length of data - Actual = {len(data)} expected = 0"


def validate_plots(terms_obj, duration_terms, term):
plots = terms_obj[term][PLOTS]
datapoint = plots[DATA_POINTS]
Expand Down
23 changes: 23 additions & 0 deletions tests/scripts/local_monitoring_tests/Local_monitoring_tests.md
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,29 @@ Here are the test scenarios:
- Test with invalid values such as blank, null or an invalid value for name query parameter in listMetricProfiles API
- List metric profiles without creating metric profile


### **Create Experiment API tests**

Here are the test scenarios:

- Create namespace experiment specifying namespace experiment type
- Create namespace experiment without specifying experiment type
- Create container experiment specifying container experiment type
- Create container experiment without specifying experiment type
- Create experiment specifying both namespaces and containers without specifying the experiment type
- Create experiment specifying both namespaces and containers specifying the namespace experiment type
- Create experiment specifying both namespaces and containers specifying the container experiment type
- Create namespace experiment specifying containers
- Create container experiment specifying namespaces
- Create multiple experiments with valid namespace

### **List Recommendations API tests**

Here are the test scenarios:

- List recommendations for a valid namespace experiment


The above tests are developed using pytest framework and the tests are run using shell script wrapper that does the following:
- Deploys kruize in non-CRD mode using the [deploy script](https://github.com/kruize/autotune/blob/master/deploy.sh) from the autotune repo
- Creates a resource optimization metric profile using the [createMetricProfile API](/design/MetricProfileAPI.md)
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
[{
"version": "{{version}}",
"experiment_name": "{{experiment_name}}",
"cluster_name": "{{cluster_name}}",
"performance_profile": "{{performance_profile}}",
"mode": "{{mode}}",
"target_cluster": "{{target_cluster}}",
"datasource": "{{datasource}}",
"kubernetes_objects": [{
"experiment_type": "{{experiment_type}}",
"type": "{{kubernetes_obj_type}}",
"name": "{{name}}",
"namespace": "{{namespace}}",
"namespaces": {
"namespace_name": "{{namespace_name}}"
},
"containers": [{
"container_image_name": "{{container_image_name}}",
"container_name": "{{container_name}}"
}]
}],
"trial_settings": {
"measurement_duration": "{{measurement_duration}}"
},
"recommendation_settings": {
"threshold": "{{threshold}}"
}
}]

Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
[
{
"version": "v2.0",
"experiment_name": "tfb-workload-namespace",
"cluster_name": "default",
"performance_profile": "resource-optimization-local-monitoring",
"mode": "monitor",
"target_cluster": "local",
"datasource": "prometheus-1",
"kubernetes_objects": [
{
"experiment_type": "namespace",
"namespaces": {
"namespace_name": "default"
}
}
],
"trial_settings": {
"measurement_duration": "15min"
},
"recommendation_settings": {
"threshold": "0.1"
}
},
{
"version": "v2.0",
"experiment_name": "multiple-import-namespace",
"cluster_name": "default",
"performance_profile": "resource-optimization-openshift",
"mode": "monitor",
"target_cluster": "local",
"datasource": "prometheus-1",
"kubernetes_objects": [
{
"experiment_type": "namespace",
"namespaces": {
"namespace_name": "test-multiple-import"
}
}
],
"trial_settings": {
"measurement_duration": "15min"
},
"recommendation_settings": {
"threshold": "0.1"
}
}
]
Loading

0 comments on commit 37fd1df

Please sign in to comment.