-
Notifications
You must be signed in to change notification settings - Fork 0
/
oke-variables.tf
191 lines (173 loc) · 7.86 KB
/
oke-variables.tf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
## Copyright (c) 2021, Oracle and/or its affiliates.
## All rights reserved. The Universal Permissive License (UPL), Version 1.0 as shown at http://oss.oracle.com/licenses/upl
# OKE Variables
## OKE Cluster Details
variable "create_new_oke_cluster" {
default = true
description = "Creates a new OKE cluster, node pool and network resources"
}
variable "existent_oke_cluster_id" {
default = ""
description = "Using existent OKE Cluster. Only the application and services will be provisioned. If select cluster autoscaler feature, you need to get the node pool id and enter when required"
}
variable "create_new_compartment_for_oke" {
default = false
description = "Creates new compartment for OKE Nodes and OCI Services deployed. NOTE: The creation of the compartment increases the deployment time by at least 3 minutes, and can increase by 15 minutes when destroying"
}
variable "oke_compartment_description" {
default = "Compartment for OKE, Nodes and Services"
}
variable "cluster_options_add_ons_is_kubernetes_dashboard_enabled" {
default = false
}
variable "cluster_options_admission_controller_options_is_pod_security_policy_enabled" {
description = "If true: The pod security policy admission controller will use pod security policies to restrict the pods accepted into the cluster."
default = false
}
## OKE Visibility (Workers and Endpoint)
variable "cluster_workers_visibility" {
default = "Private"
description = "The Kubernetes worker nodes that are created will be hosted in public or private subnet(s)"
validation {
condition = var.cluster_workers_visibility == "Private" || var.cluster_workers_visibility == "Public"
error_message = "Sorry, but cluster visibility can only be Private or Public."
}
}
variable "cluster_endpoint_visibility" {
default = "Public"
description = "The Kubernetes cluster that is created will be hosted on a public subnet with a public IP address auto-assigned or on a private subnet. If Private, additional configuration will be necessary to run kubectl commands"
validation {
condition = var.cluster_endpoint_visibility == "Private" || var.cluster_endpoint_visibility == "Public"
error_message = "Sorry, but cluster endpoint visibility can only be Private or Public."
}
}
## OKE Encryption details
variable "use_encryption_from_oci_vault" {
default = false
description = "By default, Oracle manages the keys that encrypts Kubernetes Secrets at Rest in Etcd, but you can choose a key from a vault that you have access to, if you want greater control over the key's lifecycle and how it's used"
}
variable "create_new_encryption_key" {
default = false
description = "Creates new vault and key on OCI Vault/Key Management/KMS and assign to boot volume of the worker nodes"
}
variable "existent_encryption_key_id" {
default = ""
description = "Use an existent master encryption key to encrypt boot volume and object storage bucket. NOTE: If the key resides in a different compartment or in a different tenancy, make sure you have the proper policies to access, or the provision of the worker nodes will fail"
}
variable "create_vault_policies_for_group" {
default = false
description = "Creates policies to allow the user applying the stack to manage vault and keys. If you are on the Administrators group or already have the policies for a compartment, this policy is not needed. If you do not have access to allow the policy, ask your administrator to include it for you"
}
variable "user_admin_group_for_vault_policy" {
default = "Administrators"
description = "User Identity Group to allow manage vault and keys. The user running the Terraform scripts or Applying the ORM Stack need to be on this group"
}
# ## OKE Autoscaler
# variable "cluster_autoscaler_enabled" {
# default = true
# description = "Enables OKE cluster autoscaler. Node pools will auto scale based on the resources usage"
# }
# variable "cluster_autoscaler_min_nodes" {
# default = 3
# description = "Minimum number of nodes on the node pool to be scheduled by the Kubernetes"
# }
# variable "cluster_autoscaler_max_nodes" {
# default = 10
# description = "Maximum number of nodes on the node pool to be scheduled by the Kubernetes"
# }
# variable "existent_oke_nodepool_id_for_autoscaler" {
# default = ""
# description = "Nodepool Id of the existent OKE to use with Cluster Autoscaler"
# }
## OKE Node Pool Details
variable "node_pool_name" {
default = "pool1"
description = "Name of the node pool"
}
variable "k8s_version" {
default = "Latest"
description = "Kubernetes version installed on your master and worker nodes"
}
variable "num_pool_workers" {
default = 3
description = "The number of worker nodes in the node pool. If select Cluster Autoscaler, will assume the minimum number of nodes configured"
}
variable "node_pool_shape" {
default = "VM.Standard.E3.Flex"
description = "A shape is a template that determines the number of OCPUs, amount of memory, and other resources allocated to a newly created instance for the Worker Node"
}
variable "node_pool_node_shape_config_ocpus" {
default = "1" # Only used if flex shape is selected
description = "You can customize the number of OCPUs to a flexible shape"
}
variable "node_pool_node_shape_config_memory_in_gbs" {
default = "16" # Only used if flex shape is selected
description = "You can customize the amount of memory allocated to a flexible shape"
}
variable "node_pool_boot_volume_size_in_gbs" {
default = "60"
description = "Specify a custom boot volume size (in GB)"
}
variable "image_operating_system" {
default = "Oracle Linux"
description = "The OS/image installed on all nodes in the node pool."
}
variable "image_operating_system_version" {
default = "7.9"
description = "The OS/image version installed on all nodes in the node pool."
}
variable "generate_public_ssh_key" {
default = true
}
variable "public_ssh_key" {
default = ""
description = "In order to access your private nodes with a public SSH key you will need to set up a bastion host (a.k.a. jump box). If using public nodes, bastion is not needed. Left blank to not import keys."
}
# Network Details
## CIDRs
variable "network_cidrs" {
type = map(string)
default = {
VCN-CIDR = "10.20.0.0/16"
SUBNET-REGIONAL-CIDR = "10.20.10.0/24"
LB-SUBNET-REGIONAL-CIDR = "10.20.20.0/24"
ENDPOINT-SUBNET-REGIONAL-CIDR = "10.20.0.0/28"
ALL-CIDR = "0.0.0.0/0"
PODS-CIDR = "10.244.0.0/16"
KUBERNETES-SERVICE-CIDR = "10.96.0.0/16"
}
}
# Create Dynamic Group and Policies
variable "create_dynamic_group_for_nodes_in_compartment" {
default = true
description = "Creates dynamic group of Nodes in the compartment. Note: You need to have proper rights on the Tenancy. If you only have rights in a compartment, uncheck and ask you administrator to create the Dynamic Group for you"
}
variable "existent_dynamic_group_for_nodes_in_compartment" {
default = ""
description = "Enter previous created Dynamic Group for the policies"
}
variable "create_compartment_policies" {
default = true
description = "Creates policies that will reside on the compartment. e.g.: Policies to support Cluster Autoscaler, OCI Logging datasource on Grafana"
}
variable "create_tenancy_policies" {
default = true
description = "Creates policies that need to reside on the tenancy. e.g.: Policies to support OCI Metrics datasource on Grafana"
}
# ORM Schema visual control variables
variable "show_advanced" {
default = false
}
# App Name Locals
locals {
app_name_normalized = substr(replace(lower(var.app_name), " ", "-"), 0, 6)
app_name_for_db = regex("[[:alnum:]]{1,10}", var.app_name)
}
# Dictionary Locals
locals {
compute_flexible_shapes = [
"VM.Standard.E3.Flex",
"VM.Standard.E4.Flex",
"VM.Standard.A1.Flex"
]
}