-
Notifications
You must be signed in to change notification settings - Fork 0
/
metastore-import.tf
255 lines (223 loc) · 8.52 KB
/
metastore-import.tf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
# Infrastructure for transferring data between two Yandex Data Processing clusters
#
# RU: https://cloud.yandex.ru/docs/data-proc/tutorials/metastore-import
# EN: https://cloud.yandex.com/en/docs/data-proc/tutorials/metastore-import
#
# Set the configuration of the Yandex Data Processing clusters
# Specify the following settings:
locals {
folder_id = "" # Your cloud folder ID, same as for provider
dp_ssh_key = "" # Аbsolute path to an SSH public key for the Yandex Data Processing clusters
# The following settings are predefined. Change them only if necessary.
network_name = "dataproc-network" # Name of the network
nat_name = "dataproc-nat" # Name of the NAT gateway
subnet_name = "dataproc-subnet-a" # Name of the subnet
sa_name = "dataproc-s3-sa" # Name of the service account
dataproc_source_name = "dataproc-source" # Name of the Yandex Data Processing source cluster
dataproc_target_name = "dataproc-target" # Name of the Yandex Data Processing target cluster
bucket_name = "dataproc-bucket" # Name of the Object Storage bucket
}
resource "yandex_vpc_network" "dataproc_network" {
description = "Network for Yandex Data Processing and Metastore"
name = local.network_name
}
# NAT gateway for Yandex Data Processing and Metastore
resource "yandex_vpc_gateway" "dataproc_nat" {
name = local.nat_name
shared_egress_gateway {}
}
# Routing table for Yandex Data Processing and Metastore
resource "yandex_vpc_route_table" "dataproc_rt" {
network_id = yandex_vpc_network.dataproc_network.id
static_route {
destination_prefix = "0.0.0.0/0"
gateway_id = yandex_vpc_gateway.dataproc_nat.id
}
}
resource "yandex_vpc_subnet" "dataproc_subnet-a" {
description = "Subnet for Yandex Data Processing and Metastore"
name = local.subnet_name
zone = "ru-central1-a"
network_id = yandex_vpc_network.dataproc_network.id
v4_cidr_blocks = ["10.140.0.0/24"]
route_table_id = yandex_vpc_route_table.dataproc_rt.id
}
resource "yandex_vpc_security_group" "dataproc-security-group" {
description = "Security group for the Yandex Data Processing clusters"
network_id = yandex_vpc_network.dataproc_network.id
ingress {
description = "Allow any incoming traffic within the security group"
protocol = "ANY"
from_port = 0
to_port = 65535
predefined_target = "self_security_group"
}
ingress {
description = "Allow access to NTP servers for time syncing"
protocol = "UDP"
port = 123
v4_cidr_blocks = ["0.0.0.0/0"]
}
ingress {
description = "Allow SSH connections from any IP address to subcluster hosts with public addresses"
protocol = "TCP"
port = 22
v4_cidr_blocks = ["0.0.0.0/0"]
}
ingress {
description = "Allow any incoming traffic from clients to the Metastore cluster"
protocol = "ANY"
from_port = 30000
to_port = 32767
v4_cidr_blocks = ["0.0.0.0/0"]
}
ingress {
description = "Allow any incoming traffic from a load balancer to the Metastore cluster"
protocol = "ANY"
port = 10256
predefined_target = "loadbalancer_healthchecks"
}
egress {
description = "Allow any outgoing traffic within the security group"
protocol = "ANY"
from_port = 0
to_port = 65535
predefined_target = "self_security_group"
}
egress {
description = "Allow connections to the HTTPS port from any IP address"
protocol = "TCP"
port = 443
v4_cidr_blocks = ["0.0.0.0/0"]
}
egress {
description = "Allow access to NTP servers for time syncing"
protocol = "UDP"
port = 123
v4_cidr_blocks = ["0.0.0.0/0"]
}
egress {
description = "Allow connections to the Metastore port from any IP address"
protocol = "ANY"
port = 9083
v4_cidr_blocks = ["0.0.0.0/0"]
}
}
resource "yandex_iam_service_account" "dataproc-sa" {
description = "Service account to manage the Yandex Data Processing clusters"
name = local.sa_name
}
# Assign the dataproc.agent role to the Yandex Data Processing service account
resource "yandex_resourcemanager_folder_iam_binding" "dataproc-agent" {
folder_id = local.folder_id
role = "dataproc.agent"
members = ["serviceAccount:${yandex_iam_service_account.dataproc-sa.id}"]
}
# Assign the dataproc.provisioner role to the Yandex Data Processing service account
resource "yandex_resourcemanager_folder_iam_binding" "dataproc-provisioner" {
folder_id = local.folder_id
role = "dataproc.provisioner"
members = ["serviceAccount:${yandex_iam_service_account.dataproc-sa.id}"]
}
# Assign the storage.admin role to the Yandex Data Processing service account
resource "yandex_resourcemanager_folder_iam_binding" "storage-admin" {
folder_id = local.folder_id
role = "storage.admin"
members = ["serviceAccount:${yandex_iam_service_account.dataproc-sa.id}"]
}
resource "yandex_storage_bucket" "dataproc-bucket" {
bucket = local.bucket_name
grant {
id = yandex_iam_service_account.dataproc-sa.id
type = "CanonicalUser"
permissions = ["READ", "WRITE"]
}
}
resource "yandex_dataproc_cluster" "dataproc-source-cluster" {
description = "Yandex Data Processing source cluster"
depends_on = [yandex_resourcemanager_folder_iam_binding.dataproc-agent,yandex_resourcemanager_folder_iam_binding.dataproc-provisioner]
bucket = yandex_storage_bucket.dataproc-bucket.id
security_group_ids = [yandex_vpc_security_group.dataproc-security-group.id]
name = local.dataproc_source_name
service_account_id = yandex_iam_service_account.dataproc-sa.id
zone_id = "ru-central1-a"
ui_proxy = true
cluster_config {
version_id = "2.0"
hadoop {
services = ["HDFS", "HIVE", "SPARK", "YARN", "ZEPPELIN"]
ssh_public_keys = [file(local.dp_ssh_key)]
properties = {
# For running PySpark jobs when Yandex Data Processing is integrated with Metastore
"spark:spark.sql.hive.metastore.sharedPrefixes" = "com.amazonaws,ru.yandex.cloud"
}
}
subcluster_spec {
name = "main"
role = "MASTERNODE"
resources {
resource_preset_id = "s2.micro" # 2 vCPU, 8 GB of RAM
disk_type_id = "network-hdd"
disk_size = 20 # GB
}
subnet_id = yandex_vpc_subnet.dataproc_subnet-a.id
hosts_count = 1
assign_public_ip = true
}
subcluster_spec {
name = "data"
role = "DATANODE"
resources {
resource_preset_id = "s2.micro" # 2 vCPU, 8 GB of RAM
disk_type_id = "network-hdd"
disk_size = 20 # GB
}
subnet_id = yandex_vpc_subnet.dataproc_subnet-a.id
hosts_count = 1
}
}
}
resource "yandex_dataproc_cluster" "dataproc-target-cluster" {
description = "Yandex Data Processing target cluster"
depends_on = [yandex_resourcemanager_folder_iam_binding.dataproc-agent,yandex_resourcemanager_folder_iam_binding.dataproc-provisioner]
bucket = yandex_storage_bucket.dataproc-bucket.id
security_group_ids = [yandex_vpc_security_group.dataproc-security-group.id]
name = local.dataproc_target_name
service_account_id = yandex_iam_service_account.dataproc-sa.id
zone_id = "ru-central1-a"
ui_proxy = true
cluster_config {
version_id = "2.0"
hadoop {
services = ["HDFS", "HIVE", "SPARK", "YARN", "ZEPPELIN"]
ssh_public_keys = [file(local.dp_ssh_key)]
properties = {
# For running PySpark jobs when Yandex Data Processing is integrated with Metastore
"spark:spark.sql.hive.metastore.sharedPrefixes" = "com.amazonaws,ru.yandex.cloud"
}
}
subcluster_spec {
name = "main"
role = "MASTERNODE"
resources {
resource_preset_id = "s2.micro" # 2 vCPU, 8 GB of RAM
disk_type_id = "network-hdd"
disk_size = 20 # GB
}
subnet_id = yandex_vpc_subnet.dataproc_subnet-a.id
hosts_count = 1
assign_public_ip = true
}
subcluster_spec {
name = "data"
role = "DATANODE"
resources {
resource_preset_id = "s2.micro" # 2 vCPU, 8 GB of RAM
disk_type_id = "network-hdd"
disk_size = 20 # GB
}
subnet_id = yandex_vpc_subnet.dataproc_subnet-a.id
hosts_count = 1
}
}
}