|
18 | 18 | import logging
|
19 | 19 | import time
|
20 | 20 | import pytest
|
| 21 | +import json |
21 | 22 |
|
22 | 23 | from acktest.k8s import resource as k8s
|
23 | 24 | from acktest.k8s import condition
|
|
33 | 34 | from e2e.common.types import CLUSTER_RESOURCE_PLURAL
|
34 | 35 | from e2e.common.waiter import wait_until_deleted
|
35 | 36 | from e2e.replacement_values import REPLACEMENT_VALUES
|
| 37 | +from e2e.tests.test_cluster import simple_cluster |
36 | 38 |
|
37 |
| -MODIFY_WAIT_AFTER_SECONDS = 240 |
38 |
| -CHECK_STATUS_WAIT_SECONDS = 240 |
| 39 | +MODIFY_WAIT_AFTER_SECONDS = 60 |
| 40 | +CHECK_STATUS_WAIT_SECONDS = 30 |
39 | 41 |
|
40 | 42 |
|
41 | 43 | def wait_for_cluster_active(eks_client, cluster_name):
|
@@ -93,8 +95,13 @@ def auto_mode_cluster(eks_client):
|
93 | 95 |
|
94 | 96 | yield (ref, cr)
|
95 | 97 |
|
96 |
| - pass |
97 |
| - |
| 98 | + # Try to delete, if doesn't already exist |
| 99 | + try: |
| 100 | + _, deleted = k8s.delete_custom_resource(ref, 9, 10) |
| 101 | + assert deleted |
| 102 | + wait_until_deleted(cluster_name) |
| 103 | + except Exception: |
| 104 | + pass |
98 | 105 |
|
99 | 106 | @service_marker
|
100 | 107 | @pytest.mark.canary
|
@@ -141,6 +148,176 @@ def test_create_auto_mode_cluster(self, eks_client, auto_mode_cluster):
|
141 | 148 | time.sleep(CHECK_STATUS_WAIT_SECONDS)
|
142 | 149 |
|
143 | 150 | # Clean up
|
144 |
| - _, deleted = k8s.delete_custom_resource(ref, 3, 10) |
| 151 | + _, deleted = k8s.delete_custom_resource(ref, 9, 10) |
145 | 152 | assert deleted
|
146 | 153 | wait_until_deleted(cluster_name)
|
| 154 | + |
| 155 | + |
| 156 | +@service_marker |
| 157 | +@pytest.mark.canary |
| 158 | +class TestAutoModeClusterUpdates: |
| 159 | + def test_enable_auto_mode_on_standard_cluster(self, eks_client, simple_cluster): |
| 160 | + (ref, cr) = simple_cluster |
| 161 | + cluster_name = cr["spec"]["name"] |
| 162 | + |
| 163 | + try: |
| 164 | + aws_res = eks_client.describe_cluster(name=cluster_name) |
| 165 | + assert aws_res is not None |
| 166 | + except eks_client.exceptions.ResourceNotFoundException: |
| 167 | + pytest.fail(f"Could not find cluster '{cluster_name}' in EKS") |
| 168 | + |
| 169 | + # Wait for the cluster to be ACTIVE and let controller refresh status |
| 170 | + wait_for_cluster_active(eks_client, cluster_name) |
| 171 | + time.sleep(CHECK_STATUS_WAIT_SECONDS) |
| 172 | + get_and_assert_status(ref, "ACTIVE", True) |
| 173 | + |
| 174 | + # Patch to enable auto-mode |
| 175 | + patch_enable_auto_mode = { |
| 176 | + "spec": { |
| 177 | + "computeConfig": {"enabled": True}, |
| 178 | + "storageConfig": {"blockStorage": {"enabled": True}}, |
| 179 | + "kubernetesNetworkConfig": { |
| 180 | + "elasticLoadBalancing": {"enabled": True}, |
| 181 | + "ipFamily": "ipv4", |
| 182 | + }, |
| 183 | + } |
| 184 | + } |
| 185 | + k8s.patch_custom_resource(ref, patch_enable_auto_mode) |
| 186 | + time.sleep(MODIFY_WAIT_AFTER_SECONDS) |
| 187 | + get_and_assert_status(ref, "UPDATING", False) |
| 188 | + |
| 189 | + # Wait for cluster to become active after update |
| 190 | + wait_for_cluster_active(eks_client, cluster_name) |
| 191 | + time.sleep(CHECK_STATUS_WAIT_SECONDS) |
| 192 | + get_and_assert_status(ref, "ACTIVE", True) |
| 193 | + |
| 194 | + # Verify auto-mode activation via EKS update history (since DescribeCluster may not reflect the fields immediately) |
| 195 | + updates_summary = eks_client.list_updates(name=cluster_name) |
| 196 | + |
| 197 | + update_ids = updates_summary.get("updateIds", []) |
| 198 | + assert len(update_ids) == 1, ( |
| 199 | + f"Expected exactly 1 update, got {len(update_ids)}: {update_ids}" |
| 200 | + ) |
| 201 | + |
| 202 | + update_id = update_ids[0] |
| 203 | + upd_desc = eks_client.describe_update(name=cluster_name, updateId=update_id) |
| 204 | + |
| 205 | + update_info = upd_desc["update"] |
| 206 | + |
| 207 | + # Verify update type and status |
| 208 | + assert update_info["type"] == "AutoModeUpdate", ( |
| 209 | + f"Expected AutoModeUpdate, got: {update_info['type']}" |
| 210 | + ) |
| 211 | + assert update_info["status"] == "Successful", ( |
| 212 | + f"Expected Successful status, got: {update_info['status']}" |
| 213 | + ) |
| 214 | + |
| 215 | + def test_disable_auto_mode_incorrectly(self, eks_client, auto_mode_cluster): |
| 216 | + (ref, cr) = auto_mode_cluster |
| 217 | + cluster_name = cr["spec"]["name"] |
| 218 | + |
| 219 | + try: |
| 220 | + aws_res = eks_client.describe_cluster(name=cluster_name) |
| 221 | + assert aws_res is not None |
| 222 | + except eks_client.exceptions.ResourceNotFoundException: |
| 223 | + pytest.fail(f"Could not find cluster '{cluster_name}' in EKS") |
| 224 | + |
| 225 | + wait_for_cluster_active(eks_client, cluster_name) |
| 226 | + time.sleep(CHECK_STATUS_WAIT_SECONDS) |
| 227 | + get_and_assert_status(ref, "ACTIVE", True) |
| 228 | + |
| 229 | + # Patch with incorrect parameters to disable auto-mode |
| 230 | + patch_disable_auto_mode_incorrectly = { |
| 231 | + "spec": { |
| 232 | + "computeConfig": {"enabled": False}, |
| 233 | + "storageConfig": { |
| 234 | + "blockStorage": { |
| 235 | + "enabled": True # Should be False |
| 236 | + } |
| 237 | + }, |
| 238 | + "kubernetesNetworkConfig": {"elasticLoadBalancing": {"enabled": False}}, |
| 239 | + } |
| 240 | + } |
| 241 | + logging.info( |
| 242 | + f"Applying patch with incorrect parameters: {patch_disable_auto_mode_incorrectly}" |
| 243 | + ) |
| 244 | + k8s.patch_custom_resource(ref, patch_disable_auto_mode_incorrectly) |
| 245 | + time.sleep(MODIFY_WAIT_AFTER_SECONDS) |
| 246 | + |
| 247 | + # The controller should detect the invalid configuration and set a terminal condition. |
| 248 | + terminal_condition = "ACK.Terminal" |
| 249 | + cond = k8s.get_resource_condition(ref, terminal_condition) |
| 250 | + if cond is None: |
| 251 | + pytest.fail( |
| 252 | + f"Failed to find {terminal_condition} condition in resource {ref}" |
| 253 | + ) |
| 254 | + |
| 255 | + cond_status = cond.get("status", None) |
| 256 | + if str(cond_status) != str(True): |
| 257 | + pytest.fail( |
| 258 | + f"Expected {terminal_condition} condition to have status True but found {cond_status}" |
| 259 | + ) |
| 260 | + |
| 261 | + # Verify the error message contains information about invalid Auto Mode configuration |
| 262 | + assert "invalid Auto Mode configuration" in cond.get("message", "") |
| 263 | + |
| 264 | + def test_disable_auto_mode_correctly(self, eks_client, auto_mode_cluster): |
| 265 | + (ref, cr) = auto_mode_cluster |
| 266 | + cluster_name = cr["spec"]["name"] |
| 267 | + |
| 268 | + try: |
| 269 | + aws_res = eks_client.describe_cluster(name=cluster_name) |
| 270 | + assert aws_res is not None |
| 271 | + except eks_client.exceptions.ResourceNotFoundException: |
| 272 | + pytest.fail(f"Could not find cluster '{cluster_name}' in EKS") |
| 273 | + |
| 274 | + wait_for_cluster_active(eks_client, cluster_name) |
| 275 | + time.sleep(CHECK_STATUS_WAIT_SECONDS) |
| 276 | + get_and_assert_status(ref, "ACTIVE", True) |
| 277 | + |
| 278 | + # Patch to disable auto-mode correctly |
| 279 | + patch_disable_auto_mode = { |
| 280 | + "spec": { |
| 281 | + "computeConfig": {"enabled": False}, |
| 282 | + "storageConfig": {"blockStorage": {"enabled": False}}, |
| 283 | + "kubernetesNetworkConfig": {"elasticLoadBalancing": {"enabled": False}}, |
| 284 | + } |
| 285 | + } |
| 286 | + logging.info(f"Applying patch to disable auto-mode: {patch_disable_auto_mode}") |
| 287 | + k8s.patch_custom_resource(ref, patch_disable_auto_mode) |
| 288 | + time.sleep(MODIFY_WAIT_AFTER_SECONDS ) |
| 289 | + |
| 290 | + get_and_assert_status(ref, "UPDATING", False) |
| 291 | + |
| 292 | + # Wait for cluster to become active after update |
| 293 | + wait_for_cluster_active(eks_client, cluster_name) |
| 294 | + time.sleep(CHECK_STATUS_WAIT_SECONDS) |
| 295 | + |
| 296 | + get_and_assert_status(ref, "ACTIVE", True) |
| 297 | + |
| 298 | + # Verify auto-mode is disabled |
| 299 | + aws_res = eks_client.describe_cluster(name=cluster_name) |
| 300 | + |
| 301 | + # Check compute config - should be absent or disabled |
| 302 | + compute_config = aws_res["cluster"].get("computeConfig") |
| 303 | + if compute_config is not None: |
| 304 | + assert compute_config.get("enabled") is False, ( |
| 305 | + f"computeConfig.enabled should be False or absent, got: {compute_config.get('enabled')}" |
| 306 | + ) |
| 307 | + |
| 308 | + # Check storage config - should be absent or disabled |
| 309 | + storage_config = aws_res["cluster"].get("storageConfig") |
| 310 | + if storage_config is not None: |
| 311 | + block_storage = storage_config.get("blockStorage", {}) |
| 312 | + if block_storage: |
| 313 | + assert block_storage.get("enabled") is False, ( |
| 314 | + f"storageConfig.blockStorage.enabled should be False or absent, got: {block_storage.get('enabled')}" |
| 315 | + ) |
| 316 | + |
| 317 | + # Check elastic load balancing config - should be absent or disabled |
| 318 | + k8s_network_config = aws_res["cluster"].get("kubernetesNetworkConfig", {}) |
| 319 | + elb_config = k8s_network_config.get("elasticLoadBalancing") |
| 320 | + if elb_config is not None: |
| 321 | + assert elb_config.get("enabled") is False, ( |
| 322 | + f"kubernetesNetworkConfig.elasticLoadBalancing.enabled should be False or absent, got: {elb_config.get('enabled')}" |
| 323 | + ) |
0 commit comments