|
1 |
| -# The EKS service does not provide a cluster-level API parameter or resource to automatically configure the underlying Kubernetes cluster |
2 |
| -# to allow worker nodes to join the cluster via AWS IAM role authentication. |
3 |
| - |
4 |
| -# NOTE: To automatically apply the Kubernetes configuration to the cluster (which allows the worker nodes to join the cluster), |
5 |
| -# the requirements outlined here must be met: |
6 |
| -# https://learn.hashicorp.com/terraform/aws/eks-intro#preparation |
7 |
| -# https://learn.hashicorp.com/terraform/aws/eks-intro#configuring-kubectl-for-eks |
8 |
| -# https://learn.hashicorp.com/terraform/aws/eks-intro#required-kubernetes-configuration-to-join-worker-nodes |
9 |
| - |
10 |
| -# Additional links |
11 |
| -# https://learn.hashicorp.com/terraform/aws/eks-intro |
12 |
| -# https://itnext.io/how-does-client-authentication-work-on-amazon-eks-c4f2b90d943b |
13 |
| -# https://docs.aws.amazon.com/eks/latest/userguide/create-kubeconfig.html |
14 |
| -# https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html |
15 |
| -# https://docs.aws.amazon.com/cli/latest/reference/eks/update-kubeconfig.html |
16 |
| -# https://docs.aws.amazon.com/en_pv/eks/latest/userguide/create-kubeconfig.html |
17 |
| -# https://itnext.io/kubernetes-authorization-via-open-policy-agent-a9455d9d5ceb |
18 |
| -# http://marcinkaszynski.com/2018/07/12/eks-auth.html |
19 |
| -# https://cloud.google.com/kubernetes-engine/docs/concepts/configmap |
20 |
| -# http://yaml-multiline.info |
21 |
| -# https://github.com/terraform-providers/terraform-provider-kubernetes/issues/216 |
22 |
| -# https://www.terraform.io/docs/cloud/run/install-software.html |
23 |
| -# https://stackoverflow.com/questions/26123740/is-it-possible-to-install-aws-cli-package-without-root-permission |
24 |
| -# https://stackoverflow.com/questions/58232731/kubectl-missing-form-terraform-cloud |
25 |
| -# https://docs.aws.amazon.com/cli/latest/userguide/install-bundle.html |
26 |
| -# https://docs.aws.amazon.com/cli/latest/userguide/install-cliv1.html |
27 |
| - |
28 | 1 |
|
29 | 2 | locals {
|
30 |
| - yaml_quote = var.aws_auth_yaml_strip_quotes ? "" : "\"" |
31 |
| - |
32 |
| - need_kubernetes_provider = local.enabled && var.apply_config_map_aws_auth |
33 |
| - |
34 |
| - kubeconfig_path_enabled = local.need_kubernetes_provider && var.kubeconfig_path_enabled |
35 |
| - kube_exec_auth_enabled = local.kubeconfig_path_enabled ? false : local.need_kubernetes_provider && var.kube_exec_auth_enabled |
36 |
| - kube_data_auth_enabled = local.kube_exec_auth_enabled ? false : local.need_kubernetes_provider && var.kube_data_auth_enabled |
37 |
| - |
38 |
| - exec_profile = local.kube_exec_auth_enabled && var.kube_exec_auth_aws_profile_enabled ? ["--profile", var.kube_exec_auth_aws_profile] : [] |
39 |
| - exec_role = local.kube_exec_auth_enabled && var.kube_exec_auth_role_arn_enabled ? ["--role-arn", var.kube_exec_auth_role_arn] : [] |
40 |
| - |
41 |
| - cluster_endpoint_data = join("", aws_eks_cluster.default[*].endpoint) # use `join` instead of `one` to keep the value a string |
42 |
| - cluster_auth_map_endpoint = var.apply_config_map_aws_auth ? local.cluster_endpoint_data : var.dummy_kubeapi_server |
43 |
| - |
44 |
| - certificate_authority_data_list = coalescelist(aws_eks_cluster.default[*].certificate_authority, [[{ data : "" }]]) |
45 |
| - certificate_authority_data_list_internal = local.certificate_authority_data_list[0] |
46 |
| - certificate_authority_data_map = local.certificate_authority_data_list_internal[0] |
47 |
| - certificate_authority_data = local.certificate_authority_data_map["data"] |
48 |
| - |
49 |
| - # Add worker nodes role ARNs (could be from many un-managed worker groups) to the ConfigMap |
50 |
| - # Note that we don't need to do this for managed Node Groups since EKS adds their roles to the ConfigMap automatically |
51 |
| - map_worker_roles = [ |
52 |
| - for role_arn in var.workers_role_arns : { |
53 |
| - rolearn = role_arn |
54 |
| - username = "system:node:{{EC2PrivateDNSName}}" |
55 |
| - groups = [ |
56 |
| - "system:bootstrappers", |
57 |
| - "system:nodes" |
58 |
| - ] |
59 |
| - } |
60 |
| - ] |
61 |
| -} |
| 3 | + # Extract the cluster certificate for use in OIDC configuration |
| 4 | + certificate_authority_data = try(aws_eks_cluster.default[0].certificate_authority[0]["data"], "") |
| 5 | + |
| 6 | + eks_policy_short_abbreviation_map = { |
| 7 | + # List available policies with `aws eks list-access-policies --output table` |
| 8 | + |
| 9 | + Admin = "arn:aws:eks::aws:cluster-access-policy/AmazonEKSAdminPolicy" |
| 10 | + ClusterAdmin = "arn:aws:eks::aws:cluster-access-policy/AmazonEKSClusterAdminPolicy" |
| 11 | + Edit = "arn:aws:eks::aws:cluster-access-policy/AmazonEKSEditPolicy" |
| 12 | + View = "arn:aws:eks::aws:cluster-access-policy/AmazonEKSViewPolicy" |
| 13 | + # Add new policies here |
| 14 | + } |
| 15 | + |
| 16 | + eks_policy_abbreviation_map = merge({ for k, v in local.eks_policy_short_abbreviation_map : format("AmazonEKS%sPolicy", k) => v }, |
| 17 | + local.eks_policy_short_abbreviation_map) |
62 | 18 |
|
63 |
| -resource "null_resource" "wait_for_cluster" { |
64 |
| - count = local.enabled && var.apply_config_map_aws_auth ? 1 : 0 |
65 |
| - depends_on = [ |
66 |
| - aws_eks_cluster.default, |
67 |
| - aws_security_group_rule.custom_ingress_rules, |
68 |
| - aws_security_group_rule.managed_ingress_security_groups, |
69 |
| - aws_security_group_rule.managed_ingress_cidr_blocks, |
70 |
| - ] |
71 |
| - |
72 |
| - provisioner "local-exec" { |
73 |
| - command = var.wait_for_cluster_command |
74 |
| - interpreter = var.local_exec_interpreter |
75 |
| - environment = { |
76 |
| - ENDPOINT = local.cluster_endpoint_data |
77 |
| - } |
| 19 | + |
| 20 | + # Expand abbreviated access policies to full ARNs |
| 21 | + access_entry_expanded_map = { for k, v in var.access_entry_map : k => merge({ |
| 22 | + # Expand abbreviated policies to full ARNs |
| 23 | + access_policy_associations = { for kk, vv in v.access_policy_associations : try(local.eks_policy_abbreviation_map[kk], kk) => vv } |
| 24 | + # Copy over all other fields |
| 25 | + }, { for kk, vv in v : kk => vv if kk != "access_policy_associations" }) |
| 26 | + } |
| 27 | + |
| 28 | + # Replace membership in "system:masters" group with association to "ClusterAdmin" policy |
| 29 | + access_entry_map = { for k, v in local.access_entry_expanded_map : k => merge({ |
| 30 | + # Remove "system:masters" group from standard users |
| 31 | + kubernetes_groups = [for group in v.kubernetes_groups : group if group != "system:masters" || v.type != "STANDARD"] |
| 32 | + access_policy_associations = merge( |
| 33 | + # copy all existing associations |
| 34 | + v.access_policy_associations, |
| 35 | + # add "ClusterAdmin" policy if the user was in "system:masters" group and is a standard user |
| 36 | + contains(v.kubernetes_groups, "system:masters") && v.type == "STANDARD" ? { |
| 37 | + "arn:aws:eks::aws:cluster-access-policy/AmazonEKSClusterAdminPolicy" = { |
| 38 | + access_scope = { |
| 39 | + type = "cluster" |
| 40 | + namespaces = null |
| 41 | + } |
| 42 | + } |
| 43 | + } : {} |
| 44 | + ) |
| 45 | + # Copy over all other fields |
| 46 | + }, { for kk, vv in v : kk => vv if kk != "kubernetes_groups" && kk != "access_policy_associations" }) |
78 | 47 | }
|
| 48 | + |
| 49 | + eks_access_policy_association_product_map = merge(flatten([ |
| 50 | + for k, v in local.access_entry_map : [for kk, vv in v.access_policy_associations : { format("%s-%s", k, kk) = { |
| 51 | + principal_arn = k |
| 52 | + policy_arn = kk |
| 53 | + } |
| 54 | + }] |
| 55 | + ])...) |
79 | 56 | }
|
80 | 57 |
|
| 58 | +# The preferred way to keep track of entries is by key, but we also support list, |
| 59 | +# because keys need to be known at plan time, but list values do not. |
| 60 | +resource "aws_eks_access_entry" "map" { |
| 61 | + for_each = local.enabled ? local.access_entry_map : {} |
| 62 | + |
| 63 | + cluster_name = local.eks_cluster_id |
| 64 | + principal_arn = each.key |
| 65 | + kubernetes_groups = each.value.kubernetes_groups |
| 66 | + type = each.value.type |
81 | 67 |
|
82 |
| -# Get an authentication token to communicate with the EKS cluster. |
83 |
| -# By default (before other roles are added to the Auth ConfigMap), you can authenticate to EKS cluster only by assuming the role that created the cluster. |
84 |
| -# `aws_eks_cluster_auth` uses IAM credentials from the AWS provider to generate a temporary token. |
85 |
| -# If the AWS provider assumes an IAM role, `aws_eks_cluster_auth` will use the same IAM role to get the auth token. |
86 |
| -# https://www.terraform.io/docs/providers/aws/d/eks_cluster_auth.html |
87 |
| -# |
88 |
| -# You can set `kube_exec_auth_enabled` to use a different IAM Role or AWS config profile to fetch the auth token |
89 |
| -# |
90 |
| -data "aws_eks_cluster_auth" "eks" { |
91 |
| - count = local.kube_data_auth_enabled ? 1 : 0 |
92 |
| - name = one(aws_eks_cluster.default[*].id) |
| 68 | + tags = module.this.tags |
93 | 69 | }
|
94 | 70 |
|
| 71 | +resource "aws_eks_access_policy_association" "map" { |
| 72 | + for_each = local.enabled ? local.eks_access_policy_association_product_map : {} |
| 73 | + |
| 74 | + cluster_name = local.eks_cluster_id |
| 75 | + principal_arn = each.value.principal_arn |
| 76 | + policy_arn = each.value.policy_arn |
95 | 77 |
|
96 |
| -provider "kubernetes" { |
97 |
| - # Without a dummy API server configured, the provider will throw an error and prevent a "plan" from succeeding |
98 |
| - # in situations where Terraform does not provide it with the cluster endpoint before triggering an API call. |
99 |
| - # Since those situations are limited to ones where we do not care about the failure, such as fetching the |
100 |
| - # ConfigMap before the cluster has been created or in preparation for deleting it, and the worst that will |
101 |
| - # happen is that the aws-auth ConfigMap will be unnecessarily updated, it is just better to ignore the error |
102 |
| - # so we can proceed with the task of creating or destroying the cluster. |
103 |
| - # |
104 |
| - # If this solution bothers you, you can disable it by setting var.dummy_kubeapi_server = null |
105 |
| - host = local.cluster_auth_map_endpoint |
106 |
| - cluster_ca_certificate = local.enabled && !local.kubeconfig_path_enabled ? base64decode(local.certificate_authority_data) : null |
107 |
| - token = local.kube_data_auth_enabled ? one(data.aws_eks_cluster_auth.eks[*].token) : null |
108 |
| - # The Kubernetes provider will use information from KUBECONFIG if it exists, but if the default cluster |
109 |
| - # in KUBECONFIG is some other cluster, this will cause problems, so we override it always. |
110 |
| - config_path = local.kubeconfig_path_enabled ? var.kubeconfig_path : "" |
111 |
| - config_context = var.kubeconfig_context |
112 |
| - |
113 |
| - dynamic "exec" { |
114 |
| - for_each = local.kube_exec_auth_enabled && length(local.cluster_endpoint_data) > 0 ? ["exec"] : [] |
115 |
| - content { |
116 |
| - api_version = "client.authentication.k8s.io/v1beta1" |
117 |
| - command = "aws" |
118 |
| - args = concat(local.exec_profile, ["eks", "get-token", "--cluster-name", try(aws_eks_cluster.default[0].id, "deleted")], local.exec_role) |
119 |
| - } |
| 78 | + access_scope { |
| 79 | + type = local.access_entry_map[each.value.principal_arn].access_policy_associations[each.value.policy_arn].access_scope.type |
| 80 | + namespaces = local.access_entry_map[each.value.principal_arn].access_policy_associations[each.value.policy_arn].access_scope.namespaces |
120 | 81 | }
|
121 | 82 | }
|
122 | 83 |
|
123 |
| -resource "kubernetes_config_map" "aws_auth_ignore_changes" { |
124 |
| - count = local.enabled && var.apply_config_map_aws_auth && var.kubernetes_config_map_ignore_role_changes ? 1 : 0 |
125 |
| - depends_on = [null_resource.wait_for_cluster] |
| 84 | +# We could combine all the list access entries into a single resource, |
| 85 | +# but separating them by category minimizes the ripple effect of changes |
| 86 | +# due to adding and removing items from the list. |
| 87 | +resource "aws_eks_access_entry" "standard" { |
| 88 | + count = local.enabled ? length(var.access_entries) : 0 |
126 | 89 |
|
127 |
| - metadata { |
128 |
| - name = "aws-auth" |
129 |
| - namespace = "kube-system" |
130 |
| - } |
| 90 | + cluster_name = local.eks_cluster_id |
| 91 | + principal_arn = var.access_entries[count.index].principal_arn |
| 92 | + kubernetes_groups = var.access_entries[count.index].kubernetes_groups |
| 93 | + type = "STANDARD" |
131 | 94 |
|
132 |
| - data = { |
133 |
| - mapRoles = yamlencode(distinct(concat(local.map_worker_roles, var.map_additional_iam_roles))) |
134 |
| - mapUsers = yamlencode(var.map_additional_iam_users) |
135 |
| - mapAccounts = yamlencode(var.map_additional_aws_accounts) |
136 |
| - } |
| 95 | + tags = module.this.tags |
| 96 | +} |
137 | 97 |
|
138 |
| - lifecycle { |
139 |
| - ignore_changes = [data["mapRoles"]] |
140 |
| - } |
| 98 | +resource "aws_eks_access_entry" "linux" { |
| 99 | + count = local.enabled ? length(lookup(var.access_entries_for_nodes, "EC2_LINUX", [])) : 0 |
| 100 | + |
| 101 | + cluster_name = local.eks_cluster_id |
| 102 | + principal_arn = var.access_entries_for_nodes.EC2_LINUX[count.index] |
| 103 | + type = "EC2_LINUX" |
| 104 | + |
| 105 | + tags = module.this.tags |
141 | 106 | }
|
142 | 107 |
|
143 |
| -resource "kubernetes_config_map" "aws_auth" { |
144 |
| - count = local.enabled && var.apply_config_map_aws_auth && var.kubernetes_config_map_ignore_role_changes == false ? 1 : 0 |
145 |
| - depends_on = [null_resource.wait_for_cluster] |
| 108 | +resource "aws_eks_access_entry" "windows" { |
| 109 | + count = local.enabled ? length(lookup(var.access_entries_for_nodes, "EC2_WINDOWS", [])) : 0 |
146 | 110 |
|
147 |
| - metadata { |
148 |
| - name = "aws-auth" |
149 |
| - namespace = "kube-system" |
150 |
| - } |
| 111 | + cluster_name = local.eks_cluster_id |
| 112 | + principal_arn = var.access_entries_for_nodes.EC2_WINDOWS[count.index] |
| 113 | + type = "EC2_WINDOWS" |
| 114 | + |
| 115 | + tags = module.this.tags |
| 116 | +} |
| 117 | + |
| 118 | +resource "aws_eks_access_policy_association" "list" { |
| 119 | + count = local.enabled ? length(var.access_policy_associations) : 0 |
| 120 | + |
| 121 | + cluster_name = local.eks_cluster_id |
| 122 | + principal_arn = var.access_policy_associations[count.index].principal_arn |
| 123 | + policy_arn = try(local.eks_policy_abbreviation_map[var.access_policy_associations[count.index].policy_arn], |
| 124 | + var.access_policy_associations[count.index].policy_arn) |
151 | 125 |
|
152 |
| - data = { |
153 |
| - mapRoles = replace(yamlencode(distinct(concat(local.map_worker_roles, var.map_additional_iam_roles))), "\"", local.yaml_quote) |
154 |
| - mapUsers = replace(yamlencode(var.map_additional_iam_users), "\"", local.yaml_quote) |
155 |
| - mapAccounts = replace(yamlencode(var.map_additional_aws_accounts), "\"", local.yaml_quote) |
| 126 | + access_scope { |
| 127 | + type = var.access_policy_associations[count.index].access_scope.type |
| 128 | + namespaces = var.access_policy_associations[count.index].access_scope.namespaces |
156 | 129 | }
|
157 | 130 | }
|
0 commit comments