forked from suse-edge/metal3-demo
-
Notifications
You must be signed in to change notification settings - Fork 0
/
extra_vars.yml.example
297 lines (262 loc) · 9.16 KB
/
extra_vars.yml.example
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
#
# Whether to deploy sylva-core
#
deploy_sylva_core: false
########################################################
# Sylva Configurations #
# #
# NOTE: these configurations are effective only if #
# "deploy_sylva_core" is set to "true" #
########################################################
#
# Repo which contains the workload cluster manifests
#
# workload_cluster_manifest_repo: https://gitlab.suse.de/cloud-solutions-sys-eng/metal3-clusters
#
# The workload cluster manifest repo branch to use. By default,
# it is set to the "main" branch.
#
# workload_cluster_manifest_repo_branch: main
#
# The path to the chart (in the workload cluster manifests repo above) to use
#
# workload_cluster_chart_path: ./hlm006
#
# If the repo is a private repo and the trusted CA certificate needed in
# order to validate the repo HTTPs certificate. Specify the name of the
# secret which contains that CA certificate.
#
# NOTE: the secret is expected to be created prior
#
# workload_cluster_secret_ref_name: suse-root-ca
##########################
# General Configurations #
##########################
#
# RKE2 channel to install a specific version of RKE2 server.
# This is to ensure that the given RKE2 version is compatible with the latest
# version of Rancher.
#
rke2_channel_version: v1.24
#
# *Your* github person access token, used to interact with github APIs
# during provisioning of the Metal3 stack, if you are consistently running
# into github API rate limiting problems.
#
# NOTE: this is *optional*. You only need to specify your github personal
# access token if you run into github API rate limiting problems.
#
#github_token: <your github personal access token here>
#
# Optional: specify the pull request to use for the
# https:/github.com/rancher-sandbox/baremetal.git repo. By default, the
# latest main branch is used.
#
#baremetal_pull_request: 63
#
# VM user account. This is the default user for all the VMs.
# This is also the Ansible user, used to setup the Metal3 stack
# on the VMs. This user will also have sudo access to the VMs
# per Ansible and Metal3 requirements.
#
# VM user name
vm_user: metal
# VM user plain text password (not hash)
vm_user_plain_text_password: metal
# NOTE: this should be *your* (local user) SSH public key since *you*
# will be using it to login to the VMs. The SSH public keys listed
# here will be appended to the VM user's authorized_keys file.
#
vm_authorized_ssh_keys:
- <Your ssh public key here>
# OS image
opensuse_leap_image_url: https://download.opensuse.org/repositories/Cloud:/Images:/Leap_15.5/images/openSUSE-Leap-15.5.x86_64-NoCloud.qcow2
opensuse_leap_image_checksum: sha256:https://download.opensuse.org/repositories/Cloud:/Images:/Leap_15.5/images/openSUSE-Leap-15.5.x86_64-NoCloud.qcow2.sha256
opensuse_leap_image_name: openSUSE-Leap-15.5.x86_64-NoCloud.qcow2
#########################################
# Network Infrastructure Configurations #
#########################################
# NOTE: the Metal3 demo environment is consisted of two VMs,
# metal3-network-infra and metal3-core respectively. The metal3-network-infra
# VM contains all necessary networking infrastructure components that
# Metal3 stack dependeds on, which are expected to be available in a
# typical production environment. i.e. DNS server, DHCP server, etc.
# The metal3-core VM, on the other hand, has only the essential CAPI and
# Metal3 components. i.e. CAPI, RKE2 bootstrap and control plane provider,
# Metal3 infrastructure provider, baremetal operator, and Ironic.
# The external-dns in metal3-core is configured to utilize the DNS server
# running in metal3-network-infra. Likewise, metal3-core is leveraging the
# DNS server in metal3-network-infra for all DNS resolutions.
#
# The Metal3 demo environment is expected to have two networks, which are
# provisioning and public network. The provisioning network is used
# for provisioning, which means it must be able to access the bare metal
# computers (BMC). For all intents and purposes, the provisioning network
# is the same as the iLO network (or has route to the iLO network).
# The public network, as the name implies, should have access (egress)
# to the internet. It is used to download bits from public repos
# or endpoints during cluster provisioning. And it can also be used
# to access the workload cluster API endpoints.
#
# For same environments, though not recommended for obviouse reasons,
# provisioning and public network can be the same so as long as the
# network have public access.
#
# DNS domain for the Metal3 stack. All the public endpoints created in DNS
# will have this domain. e.g. boot.ironic.suse.baremetal, media.suse.baremetal, etc
#
dns_domain: suse.baremetal
#
# NIC for the provisioning network
#
# NOTE: Please configure the NIC after the "&metal3_provisioning_nic"
# part because we need this reference in the VM network configuration below.
#
metal3_provisioning_nic: &metal3_provisioning_nic eth0
#
# Host to VM NIC mapping, determines how to map host NICs to
# VM NICs. This is the "--network" parameters for virt-install
# when creating the VMs.
#
metal3_vm_libvirt_network_params: '--network bridge=virbr0,model=virtio --network bridge=br-eth3,model=virtio'
###################################################################
# Configurations specific to the Metal3 Network Infrastructure VM #
###################################################################
#
# Provisioning IP
#
metal3_network_infra_provisioning_ip: 192.168.122.8
#
# Public IP
#
metal3_network_infra_public_ip: 10.84.56.112
#
# Whether to enable DHCP server. It is enabled by default.
#
enable_dhcp: true
#
# Must configure the following if DHCP server is enabled.
#
#dhcp_router: 192.168.122.1
#dhcp_range: 192.168.122.100,192.168.122.130
#
# DNS provider. Valid values are "pdns" or "cloudflare"
# By default, it is set to "pdns".
#
# NOTE: if it is set to "cloudflare". PowerDNS will NOT be installed
# on the network infra VM.
#
dns_provider: pdns
#
# CloudFlare DNS configuration. This configuration is required if
# dns_provider is set to "cloudflare"
#
# cloudflare:
# apiToken: <cf api token>
# proxied: false
#
# DNS server IP. If dns_provider is set to "pdns",
# this must be the provisioning IP of the network infra VM.
# If dns_provider is set to "cloudflare", and the provisioning network
# have access to the public DNS, it should set to
# the IP 1.1.1.1, which is the DNS server for CloudFlare. Otherwise,
# if the provisioning have no route to public DNS, it must set to the
# provisioning IP of the network infra VM.
#
dns_server: "{{ metal3_network_infra_provisioning_ip }}"
#
# PowerDNS configurations. This configuration is required if
# metal3_network_infra_dns_provider is "pdns".
#
pdns_ip: "{{ metal3_network_infra_provisioning_ip }}"
pdns_api_port: 8081
pdns_web_password: IveG0tDaPower8
pdns_api_key: IveG0tDaPower8
#
# Networking configurations for the Metal3 Network Infrastructure VM
#
# NOTE: the network configuration is expected to be specified in
# cloud-init network-config format as the data will be passed into
# cloud-init as is when setting up the VM.
# See https://cloudinit.readthedocs.io/en/latest/reference/network-config-format-v2.html
#
metal3_network_infra_vm_network:
version: 2
ethernets:
*metal3_provisioning_nic:
dhcp4: false
addresses: ["{{ metal3_network_infra_provisioning_ip }}/24"]
nameservers:
addresses: [8.8.8.8]
search:
- "{{ dns_domain }}"
routes:
- to: 192.168.122.0/24
via: 192.168.122.1
eth1:
dhcp4: false
vlans:
eth1.824:
id: 824
link: eth1
addresses: ["{{ metal3_network_infra_public_ip }}/21"]
nameservers:
addresses: [8.8.8.8]
search:
- "{{ dns_domain }}"
routes:
- to: default
via: 10.84.63.254
#################################################
# Configurations specific to the Metal3 Core VM #
#################################################
#
# Provisioning IP
#
metal3_core_provisioning_ip: 192.168.122.9
#
# Public IP
#
metal3_core_public_ip: 10.84.56.113
#
# Networking configurations for the Metal3 Core VM
#
# NOTE: the network configuration is expected to be specified in
# cloud-init network-config format as the data will be passed into
# cloud-init as is when setting up the VM.
# See https://cloudinit.readthedocs.io/en/latest/reference/network-config-format-v2.html
#
metal3_core_vm_network:
version: 2
ethernets:
*metal3_provisioning_nic:
dhcp4: false
addresses: ["{{ metal3_core_provisioning_ip }}/24"]
nameservers:
addresses: ["{{ dns_server }}"]
search:
- "{{ dns_domain }}"
routes:
- to: 192.168.122.0/24
via: 192.168.122.1
eth1:
dhcp4: false
vlans:
eth1.824:
id: 824
link: eth1
addresses: ["{{ metal3_core_public_ip }}/21"]
nameservers:
addresses: ["{{ dns_server }}"]
search:
- "{{ dns_domain }}"
routes:
- to: default
via: 10.84.63.254
# Storage setup on the Metal3 Core VM
storage:
class_name: dynamic
access_mode: ReadWriteMany
nfs:
create: true
path: "/nfs/share"