forked from Altinity/clickhouse-operator
-
Notifications
You must be signed in to change notification settings - Fork 0
/
99-clickhouseinstallation-max.yaml
410 lines (387 loc) · 17.3 KB
/
99-clickhouseinstallation-max.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
# We may need to label nodes with clickhouse=allow label for this example to run
# See ./label_nodes.sh for this purpose
apiVersion: "clickhouse.altinity.com/v1"
kind: "ClickHouseInstallation"
metadata:
name: "clickhouse-installation-max"
labels:
label1: label1_value
label2: label2_value
annotations:
annotation1: annotation1_value
annotation2: annotation2_value
spec:
stop: "no"
# List of templates used by a CHI
useTemplates:
- name: template1
namespace: ns1
useType: merge
- name: template2
# No namespace specified - use CHI namespace
defaults:
replicasUseFQDN: "no"
distributedDDL:
profile: default
templates:
podTemplate: clickhouse-v18.16.1
dataVolumeClaimTemplate: default-volume-claim
logVolumeClaimTemplate: default-volume-claim
serviceTemplate: chi-service-template
configuration:
zookeeper:
nodes:
- host: zookeeper-0.zookeepers.zoo3ns.svc.cluster.local
port: 2181
- host: zookeeper-1.zookeepers.zoo3ns.svc.cluster.local
port: 2181
- host: zookeeper-2.zookeepers.zoo3ns.svc.cluster.local
port: 2181
session_timeout_ms: 30000
operation_timeout_ms: 10000
root: "/path/to/zookeeper/root/node"
identity: "user:password"
users:
readonly/profile: readonly
# <users>
# <readonly>
# <profile>readonly</profile>
# </readonly>
# </users>
test/networks/ip:
- "127.0.0.1"
- "::/0"
# <users>
# <test>
# <networks>
# <ip>127.0.0.1</ip>
# <ip>::/0</ip>
# </networks>
# </test>
# </users>
test/profile: default
test/quotas: default
profiles:
readonly/readonly: "1"
# <profiles>
# <readonly>
# <readonly>1</readonly>
# </readonly>
# </profiles>
default/max_memory_usage: "1000000000"
quotas:
default/interval/duration: "3600"
# <quotas>
# <default>
# <interval>
# <duration>3600</duration>
# </interval>
# </default>
# </quotas>
settings:
compression/case/method: zstd
# <compression>
# <case>
# <method>zstd</method>
# </case>
# </compression>
disable_internal_dns_cache: 1
# <disable_internal_dns_cache>1</disable_internal_dns_cache>
files:
dict1.xml: |
<yandex>
<!-- ref to file /etc/clickhouse-data/config.d/source1.csv -->
</yandex>
source1.csv: |
a1,b1,c1,d1
a2,b2,c2,d2
clusters:
- name: all-counts
templates:
podTemplate: clickhouse-v18.16.1
dataVolumeClaimTemplate: default-volume-claim
logVolumeClaimTemplate: default-volume-claim
layout:
shardsCount: 3
replicasCount: 2
- name: shards-only
templates:
podTemplate: clickhouse-v18.16.1
dataVolumeClaimTemplate: default-volume-claim
logVolumeClaimTemplate: default-volume-claim
layout:
shardsCount: 3
# replicasCount not specified, assumed = 1, by default
- name: replicas-only
templates:
podTemplate: clickhouse-v18.16.1
dataVolumeClaimTemplate: default-volume-claim
logVolumeClaimTemplate: default-volume-claim
layout:
# shardsCount not specified, assumed = 1, by default
replicasCount: 3
- name: customized
templates:
podTemplate: clickhouse-v18.16.1
dataVolumeClaimTemplate: default-volume-claim
logVolumeClaimTemplate: default-volume-claim
layout:
shards:
- name: shard0
replicasCount: 3
weight: 1
internalReplication: Disabled
templates:
podTemplate: clickhouse-v18.16.1
dataVolumeClaimTemplate: default-volume-claim
logVolumeClaimTemplate: default-volume-claim
- name: shard1
templates:
podTemplate: clickhouse-v18.16.1
dataVolumeClaimTemplate: default-volume-claim
logVolumeClaimTemplate: default-volume-claim
replicas:
- name: replica0
- name: replica1
- name: replica2
- name: shard2
replicasCount: 3
templates:
podTemplate: clickhouse-v18.16.1
dataVolumeClaimTemplate: default-volume-claim
logVolumeClaimTemplate: default-volume-claim
replicaServiceTemplate: replica-service-template
replicas:
- name: replica0
tcpPort: 9000
httpPort: 8123
interserverHTTPPort: 9009
templates:
podTemplate: clickhouse-v19.11.3.11
dataVolumeClaimTemplate: default-volume-claim
logVolumeClaimTemplate: default-volume-claim
replicaServiceTemplate: replica-service-template
templates:
serviceTemplates:
- name: chi-service-template
# generateName understands different sets of macroses,
# depending on the level of the object, for which Service is being created:
#
# For CHI-level Service:
# 1. {chi} - ClickHouseInstallation name
# 2. {chiID} - short hashed ClickHouseInstallation name (BEWARE, this is an experimental feature)
#
# For Cluster-level Service:
# 1. {chi} - ClickHouseInstallation name
# 2. {chiID} - short hashed ClickHouseInstallation name (BEWARE, this is an experimental feature)
# 3. {cluster} - cluster name
# 4. {clusterID} - short hashed cluster name (BEWARE, this is an experimental feature)
# 5. {clusterIndex} - 0-based index of the cluster in the CHI (BEWARE, this is an experimental feature)
#
# For Shard-level Service:
# 1. {chi} - ClickHouseInstallation name
# 2. {chiID} - short hashed ClickHouseInstallation name (BEWARE, this is an experimental feature)
# 3. {cluster} - cluster name
# 4. {clusterID} - short hashed cluster name (BEWARE, this is an experimental feature)
# 5. {clusterIndex} - 0-based index of the cluster in the CHI (BEWARE, this is an experimental feature)
# 6. {shard} - shard name
# 7. {shardID} - short hashed shard name (BEWARE, this is an experimental feature)
# 8. {shardIndex} - 0-based index of the shard in the cluster (BEWARE, this is an experimental feature)
#
# For Replica-level Service:
# 1. {chi} - ClickHouseInstallation name
# 2. {chiID} - short hashed ClickHouseInstallation name (BEWARE, this is an experimental feature)
# 3. {cluster} - cluster name
# 4. {clusterID} - short hashed cluster name (BEWARE, this is an experimental feature)
# 5. {clusterIndex} - 0-based index of the cluster in the CHI (BEWARE, this is an experimental feature)
# 6. {shard} - shard name
# 7. {shardID} - short hashed shard name (BEWARE, this is an experimental feature)
# 8. {shardIndex} - 0-based index of the shard in the cluster (BEWARE, this is an experimental feature)
# 9. {replica} - replica name
# 10. {replicaID} - short hashed replica name (BEWARE, this is an experimental feature)
# 11. {replicaIndex} - 0-based index of the replica in the shard (BEWARE, this is an experimental feature)
# 12. {chiScopeIndex} - 0-based index of the host in the chi (BEWARE, this is an experimental feature)
# 13. {chiScopeCycleIndex} - 0-based index of the host's cycle in the chi-scope (BEWARE, this is an experimental feature)
# 14. {chiScopeCycleOffset} - 0-based offset of the host in the chi-scope cycle (BEWARE, this is an experimental feature)
# 15. {clusterScopeIndex} - 0-based index of the host in the cluster (BEWARE, this is an experimental feature)
# 16. {clusterScopeCycleIndex} - 0-based index of the host's cycle in the cluster-scope (BEWARE, this is an experimental feature)
# 17. {clusterScopeCycleOffset} - 0-based offset of the host in the cluster-scope cycle (BEWARE, this is an experimental feature)
# 18. {shardScopeIndex} - 0-based index of the host in the shard (BEWARE, this is an experimental feature)
# 19. {replicaScopeIndex} - 0-based index of the host in the replica (BEWARE, this is an experimental feature)
# 20. {clusterScopeCycleHeadPointsToPreviousCycleTail} - 0-based cluster-scope index of previous cycle tail
generateName: "service-{chi}"
# type ObjectMeta struct from k8s.io/meta/v1
metadata:
labels:
custom.label: "custom.value"
annotations:
# For more details on Internal Load Balancer check
# https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
cloud.google.com/load-balancer-type: "Internal"
service.beta.kubernetes.io/aws-load-balancer-internal: "true"
service.beta.kubernetes.io/azure-load-balancer-internal: "true"
service.beta.kubernetes.io/openstack-internal-load-balancer: "true"
service.beta.kubernetes.io/cce-load-balancer-internal-vpc: "true"
# NLB Load Balancer
service.beta.kubernetes.io/aws-load-balancer-type: "nlb"
# type ServiceSpec struct from k8s.io/core/v1
spec:
ports:
- name: http
port: 8123
- name: tcp
port: 9000
type: LoadBalancer
- name: replica-service-template
# type ServiceSpec struct from k8s.io/core/v1
spec:
ports:
- name: http
port: 8123
- name: tcp
port: 9000
- name: interserver
port: 9009
type: ClusterIP
ClusterIP: None
- name: preserve-client-source-ip
# For more details on Preserving Client Source IP check
# https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
spec:
selector:
app: example
ports:
- name: http
port: 8123
- name: tcp
port: 9000
- name: interserver
port: 9009
externalTrafficPolicy: Local
type: LoadBalancer
volumeClaimTemplates:
- name: default-volume-claim
# type PersistentVolumeClaimSpec struct from k8s.io/core/v1
spec:
# 1. If storageClassName is not specified, default StorageClass
# (must be specified by cluster administrator) would be used for provisioning
# 2. If storageClassName is set to an empty string (‘’), no storage class will be used
# dynamic provisioning is disabled for this PVC. Existing, “Available”, PVs
# (that do not have a specified storageClassName) will be considered for binding to the PVC
#storageClassName: gold
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
- name: volume-claim-retain-pvc
# Keep PVC from being deleted
# Retaining PVC will also keep backing PV from deletion. This is useful in case we need to keep data intact.
reclaimPolicy: Retain
# type PersistentVolumeClaimSpec struct from k8s.io/core/v1
spec:
# 1. If storageClassName is not specified, default StorageClass
# (must be specified by cluster administrator) would be used for provisioning
# 2. If storageClassName is set to an empty string (‘’), no storage class will be used
# dynamic provisioning is disabled for this PVC. Existing, “Available”, PVs
# (that do not have a specified storageClassName) will be considered for binding to the PVC
#storageClassName: gold
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
podTemplates:
# multiple pod templates makes possible to update version smoothly
# pod template for ClickHouse v18.16.1
- name: clickhouse-v18.16.1
# We may need to label nodes with clickhouse=allow label for this example to run
# See ./label_nodes.sh for this purpose
zone:
key: "clickhouse"
values:
- "allow"
# Shortcut version for AWS installations
#zone:
# values:
# - "us-east-1a"
# Possible values for podDistribution are:
# Unspecified - empty value
# ClickHouseAntiAffinity - AntiAffinity by ClickHouse instances.
# Pod pushes away other ClickHouse pods, which allows one ClickHouse instance per node
# CH - (push away) - CH - (push away) - CH
# ShardAntiAffinity - AntiAffinity by shard name.
# Pod pushes away other pods of the same shard (replicas of this shard), which allows one replica of a shard per node.
# Other shards are allowed - it does not push alll shards away, the same only.
# Used for data loss avoidance - keeps all copies of the shard on different nodes.
# shard1,replica1 - (push away) - shard1,replica2 - (push away) - shard1,replica3
# ReplicaAntiAffinity - AntiAffinity by replica name.
# Pod pushes away other pods of the same replica (shards of this replica), which allows one shard of a replica per node.
# Other replicas are allowed - it does not push all replicas away, the same only.
# Used to evenly distribute load from "full cluster scan" queries.
# shard1,replica1 - (push away) - shard2,replica1 - (push away) - shard3,replica1
# AnotherNamespaceAntiAffinity - AntiAffinity by "another" namespace.
# Pod pushes away pods from another namespace, which allows same-namespace pods per node
# ns1 - (push away) - ns2 - (push away) - ns3
# AnotherClickHouseInstallationAntiAffinity - AntiAffinity by "another" ClickHouseInstallation name.
# Pod pushes away pods from another ClickHouseInstallation, which allows same-ClickHouseInstallation pods per node.
# CHI1 - (push away) - CHI2 - (push away) - CHI3
# AnotherClusterAntiAffinity - AntiAffinity by "another" cluster name.
# Pod pushes away pods from another Cluster, which allows same-cluster pods per node
# cluster1 - (push away) - cluster2 - (push away) - cluster3
# MaxNumberPerNode - AntiAffinity by cycle index.
# Pod pushes away pods from the same cycle, which allows to specify maximum number of ClickHouse instances per node.
# Used to setup circular replication.
# NamespaceAffinity - Affinity by namespace.
# Pod attracts pods from the same namespace, which allows pods from same namespace per node.
# ns1 + (attracts) + ns1
# ClickHouseInstallationAffinity - Affinity by ClickHouseInstallation name.
# Pod attracts pods from the same ClickHouseInstallation, which allows pods from the same CHI per node.
# CHI1 + (attracts) + CHI1
# ClusterAffinity - Affinity by cluster name.
# Pod attracts pods from the same cluster, which allows pods from the same Cluster per node.
# cluster1 + (attracts) + cluster1
# ShardAffinity - Affinity by shard name.
# Pod attracts pods from the same shard, which allows pods from the same Shard per node.
# shard1 + (attracts) + shard1
# ReplicaAffinity - Affinity by replica name.
# Pod attracts pods from the same replica, which allows pods from the same Replica per node.
# replica1 + (attracts) + replica1
# PreviousTailAffinity - Affinity to overlap cycles. Used to make cycle pod distribution
# cycle head + (attracts to) + previous cycle tail
podDistribution:
- type: ShardAntiAffinity
- type: MaxNumberPerNode
number: 2
# type PodSpec struct {} from k8s.io/core/v1
spec:
containers:
- name: clickhouse
image: yandex/clickhouse-server:19.16
volumeMounts:
- name: default-volume-claim
mountPath: /var/lib/clickhouse
resources:
requests:
memory: "64Mi"
cpu: "100m"
limits:
memory: "64Mi"
cpu: "100m"
# pod template for ClickHouse v19.11.3.11
- name: clickhouse-v19.11.3.11
# type PodSpec struct {} from k8s.io/core/v1
spec:
containers:
- name: clickhouse
image: yandex/clickhouse-server:19.11.3.11
volumeMounts:
- name: default-volume-claim
mountPath: /var/lib/clickhouse
resources:
requests:
memory: "64Mi"
cpu: "100m"
limits:
memory: "64Mi"
cpu: "100m"