@@ -17,6 +17,10 @@ HA_NETWORK_V6_ARRAY[1]="2001:db8:20::/64"
17
17
BEHAVE_CASE_DIR=" $( dirname $0 ) /features/"
18
18
BEHAVE_CASE_EXCLUDE=" sbd|ocfs2"
19
19
20
+ declare -a hanode_list_to_form_cluster
21
+ declare -a hanode_list_new_members
22
+ declare -a hanode_list_current_cluster
23
+
20
24
read -r -d ' ' SSHD_CONFIG_AZURE << EOM
21
25
PermitRootLogin no
22
26
AuthorizedKeysFile .ssh/authorized_keys
@@ -105,7 +109,7 @@ check_docker_env() {
105
109
for network in ${HA_NETWORK_ARRAY[@]} ; do
106
110
docker network ls| grep -q " $network "
107
111
if [ " $? " -eq 0 ]; then
108
- fatal " HA specific network \" $network \" already exists"
112
+ warning " HA specific network \" $network \" already exists"
109
113
fi
110
114
done
111
115
}
@@ -140,22 +144,29 @@ Users can make the code change under crmsh.git including test cases. This tool w
140
144
141
145
OPTIONS:
142
146
-h, --help Show this help message and exit
143
- -l List existing functional test cases and exit
144
- -n NUM Only setup a cluster with NUM nodes(containers)
147
+ -l List existing functional test cases and exit
148
+ -n NUM NUM of nodes(containers) from hanode1 to hanode $NUM
145
149
-x Don't config corosync on containers(with -n option)
146
150
-d Cleanup the cluster containers
147
- -u Create normal users, and Azure like ssh environment
151
+ -u Run test as a normal user and enforce sshd_config to be close as Public Cloud, eg. Azure
148
152
-q Create a qnetd node(with -n and -x option)
149
153
150
154
EXAMPLES:
155
+
151
156
To launch 2 nodes with the running cluster with the very basic corosync.conf
152
157
# crmsh.git/test/run-functional-tests -n 2
153
158
154
- To launch 2 nodes without the cluster stack running to play with "crm cluster init/join"
155
- # crmsh.git/run-functional-tests -n 2 -x
159
+ To grow more nodes with a bigger number than '2' in the above example, and skip existing nodes
160
+ # crmsh.git/test/run-functional-tests -n 5
161
+
162
+ To launch 2 nodes without the running cluster, for use cases to play with "crm cluster init/join"
163
+ # crmsh.git/test/run-functional-tests -n 2 -x
164
+
165
+ To grow more nodes without configure the cluster stack
166
+ # crmsh.git/test/run-functional-tests -n 7 -x
156
167
157
- To launch 2 nodes without the cluster stack running, and a qnetd node(named 'qnetd-node')
158
- # crmsh.git/run-functional-tests -n 2 -x -q
168
+ To launch 2 nodes without the running cluster , and a qnetd node(named 'qnetd-node')
169
+ # crmsh.git/test/ run-functional-tests -n 2 -x -q
159
170
160
171
To list the existing test cases. Users could add his own new test cases.
161
172
# crmsh.git/test/run-functional-tests -l
@@ -212,8 +223,12 @@ deploy_ha_node() {
212
223
213
224
info " Deploying \" $node_name \" ..."
214
225
docker run --restart always $docker_options $DOCKER_IMAGE & > /dev/null
226
+ if [ $? -ne 0 ]; then
227
+ warning Likely $node_name already exists.
228
+ return
229
+ fi
215
230
for network in ${HA_NETWORK_ARRAY[@]} ; do
216
- docker network connect $network $node_name
231
+ docker network connect $network $node_name & > /dev/null
217
232
done
218
233
219
234
if [ " $node_name " != " qnetd-node" ]; then
@@ -224,29 +239,26 @@ deploy_ha_node() {
224
239
docker_exec $node_name " echo 'StrictHostKeyChecking no' >> /etc/ssh/ssh_config"
225
240
226
241
if [ " $node_name " != " qnetd-node" ]; then
227
- docker cp $PROJECT_PATH $node_name :/opt/crmsh
228
- info " Building crmsh on \" $node_name \" ..."
229
- docker_exec $node_name " $make_cmd " 1> /dev/null || \
242
+ docker cp $PROJECT_PATH $node_name :/opt/crmsh
243
+ info " Building crmsh on \" $node_name \" ..."
244
+ docker_exec $node_name " $make_cmd " 1> /dev/null || \
230
245
fatal " Building failed on $node_name !"
231
- docker_exec $node_name " chown hacluster:haclient -R /var/log/crmsh"
232
- docker_exec $node_name " chmod g+w -R /var/log/crmsh"
233
- create_alice_bob_carol
234
- if [ " $NORMAL_USER_FLAG " -eq 1 ]; then
235
- set_sshd_config_like_in_azure $node_name
236
- fi
246
+ docker_exec $node_name " chown hacluster:haclient -R /var/log/crmsh"
247
+ docker_exec $node_name " chmod g+w -R /var/log/crmsh"
248
+ create_alice_bob_carol
237
249
else
238
- docker_exec $node_name " useradd -m -s /bin/bash alice 2>/dev/null"
239
- docker_exec $node_name " echo \" alice ALL=(ALL) NOPASSWD:ALL\" > /etc/sudoers.d/alice"
240
- docker_exec $node_name " cp -r /root/.ssh ~alice/ && chown alice:users -R ~alice/.ssh"
241
- info " Create user 'alice' on $node_name "
242
- [ " $NORMAL_USER_FLAG " -eq 1 ] && set_sshd_config_like_in_azure $node_name
250
+ docker_exec $node_name " useradd -m -s /bin/bash alice 2>/dev/null"
251
+ docker_exec $node_name " echo \" alice ALL=(ALL) NOPASSWD:ALL\" > /etc/sudoers.d/alice"
252
+ docker_exec $node_name " cp -r /root/.ssh ~alice/ && chown alice:users -R ~alice/.ssh"
253
+ info " Create user 'alice' on $node_name "
243
254
fi
255
+ [ " $NORMAL_USER_FLAG " -eq 1 ] && set_sshd_config_like_in_azure $node_name
244
256
}
245
257
246
258
247
259
create_node () {
248
260
info " Loading docker image $DOCKER_IMAGE ..."
249
- docker pull $DOCKER_IMAGE & > /dev/null
261
+ docker pull $DOCKER_IMAGE & > /dev/null
250
262
251
263
for index in ${! HA_NETWORK_ARRAY[@]} ; do
252
264
network=${HA_NETWORK_ARRAY[$index]}
@@ -260,40 +272,67 @@ create_node() {
260
272
wait
261
273
}
262
274
275
+ get_cluster_new_nodes () {
276
+ hanode_list_to_form_cluster=($( docker ps -a --format ' {{.Names}}' | grep hanode| sort -n -k1.7| tr ' \r' ' ' ) )
277
+ hanode_list_current_cluster=($( docker_exec hanode1 " crm node server 2>/dev/null" 2> /dev/null| sort -n -k1.7| tr ' \r' ' ' ) )
278
+ hanode_list_new_members=()
279
+ for element in " ${hanode_list_to_form_cluster[@]} " ; do
280
+ if ! [[ " ${hanode_list_current_cluster[@]} " =~ " $element " ]]; then
281
+ hanode_list_new_members+=(" $element " )
282
+ fi
283
+ done
284
+ }
263
285
264
286
config_cluster () {
265
- node_num=$#
266
- insert_str=" "
267
- container_ip_array=(` docker network inspect $HA_NETWORK_ARRAY -f ' {{range .Containers}}{{printf "%s " .IPv4Address}}{{end}}' ` )
287
+ get_cluster_new_nodes
268
288
269
- for i in $( seq $node_num -1 1) ; do
270
- ip=` echo ${container_ip_array[$((i-1))]} | awk -F/ ' {print $1}' `
289
+ if [ ${# hanode_list_new_members[@]} -eq 0 ]; then
290
+ return
291
+ else
292
+ info ${# hanode_list_new_members[@]} new node\( s\) " '${hanode_list_new_members[@]} '"
293
+ fi
294
+
295
+ insert_str=" "
296
+ for i in $( seq 1 ${# hanode_list_to_form_cluster[@]} ) ; do
297
+ node=${hanode_list_to_form_cluster[$((i-1))]}
298
+ ip=$( docker network inspect " $HA_NETWORK_ARRAY " --format ' {{range .Containers}}{{if eq .Name "' " ${node} " ' "}}{{.IPv4Address}}{{end}}{{end}}' | awk -F/ ' {print $1}' )
271
299
insert_str+=" \\ n\\ tnode {\n\t\tring0_addr: $ip \n\t\tnodeid: $i \n\t}"
272
300
done
301
+
273
302
corosync_conf_str=$( sed " /nodelist/a \\ ${insert_str} " <( echo " $COROSYNC_CONF_TEMPLATE " ) )
274
- if [ $node_num -eq 2 ]; then
303
+
304
+ if [ ${# hanode_list_to_form_cluster[@]} -eq 2 ]; then
275
305
corosync_conf_str=$( sed " /corosync_votequorum/a \\\\ ttwo_node: 1" <( echo " $corosync_conf_str " ) )
276
306
fi
307
+ docker_exec " hanode1" " echo \" $corosync_conf_str \" > $COROSYNC_CONF "
308
+ if is_container_existing " qnetd-node" ; then
309
+ info " Generate corosync.conf without qdevice/qnetd for the cluster hanode{1..${# hanode_list_to_form_cluster[@]} }"
310
+ else
311
+ info " Generate corosync.conf for the cluster hanode{1..${# hanode_list_to_form_cluster[@]} }"
312
+ fi
277
313
278
- info " Copy corosync.conf to $* "
279
- for node in $* ; do
280
- if [ $node == $1 ]; then
281
- docker_exec $1 " echo \" $corosync_conf_str \" >> $COROSYNC_CONF "
282
- docker_exec $1 " corosync-keygen -l -k $COROSYNC_AUTH &> /dev/null"
314
+ info " Copy corosync.conf to all cluster nodes hanode{1..${# hanode_list_to_form_cluster[@]} } "
315
+ for node in ${hanode_list_to_form_cluster[@]} ; do
316
+ if [ $node == " hanode1" ]; then
317
+ docker_exec " hanode1" " corosync-keygen -l -k $COROSYNC_AUTH &> /dev/null"
283
318
else
284
319
while :
285
320
do
286
- docker_exec $1 " ssh -T -o Batchmode=yes $node true &> /dev/null" && break
321
+ docker_exec " hanode1 " " ssh -T -o Batchmode=yes $node true &> /dev/null" && break
287
322
sleep 1
288
323
done
289
- docker_exec $1 " scp -p $COROSYNC_CONF $COROSYNC_AUTH $node :/etc/corosync &> /dev/null"
324
+ docker_exec " hanode1 " " scp -p $COROSYNC_CONF $COROSYNC_AUTH $node :/etc/corosync &> /dev/null"
290
325
fi
291
326
done
292
327
}
293
328
294
-
295
329
start_cluster () {
296
- for node in $* ; do
330
+ if [ ${# hanode_list_current_cluster[@]} -ne 0 ] && [ ${# hanode_list_new_members[@]} -ne 0 ]; then
331
+ docker_exec hanode1 " corosync-cfgtool -R > /dev/null"
332
+ info On the existing cluster hanode{1..${# hanode_list_current_cluster[@]} }: Reloading corosync.conf... Done
333
+ fi
334
+
335
+ for node in ${hanode_list_new_members[@]} ; do
297
336
docker_exec $node " crm cluster enable && crm cluster start" 1> /dev/null
298
337
if [ " $? " -eq 0 ]; then
299
338
info " Cluster service started on \" $node \" "
@@ -303,35 +342,46 @@ start_cluster() {
303
342
done
304
343
}
305
344
306
-
307
- container_already_exists () {
308
- docker ps -a| grep -q " $1 "
309
- if [ " $? " -eq 0 ]; then
310
- fatal " Container \" $1 \" already running"
311
- fi
345
+ is_container_existing () {
346
+ docker ps -a --format ' {{.Names}}' | grep -q " ^$1 $"
312
347
}
313
348
314
349
315
350
setup_cluster () {
316
- hanodes_arry=()
317
- is_number $1
318
- if [ " $? " -eq 0 ]; then
319
- for i in $( seq 1 $1 ) ; do
320
- hanodes_arry+=(" hanode$i " )
321
- done
351
+ get_cluster_new_nodes
352
+
353
+ hanodes_array=()
354
+ if is_number " $1 " ; then
355
+ # add more nodes after the last node, ordered by the node name
356
+ if [ ${# hanode_list_to_form_cluster[@]} -gt 0 ]; then
357
+ last_node_num=" ${hanode_list_to_form_cluster[-1]: 6} "
358
+ warning Skip creating the existing cluster nodes: hanode{1..${# hanode_list_to_form_cluster[@]} }
359
+ else
360
+ last_node_num=0
361
+ fi
362
+ num_of_new_nodes=$(( $1 - ${# hanode_list_to_form_cluster[@]} ))
363
+ if [ " $num_of_new_nodes " -gt 0 ]; then
364
+ for i in $( seq $(( last_node_num + 1 )) $(( last_node_num + num_of_new_nodes )) ) ; do
365
+ hanodes_array+=(" hanode$i " )
366
+ done
367
+ elif [ $WITH_QNETD_NODE -eq 0 ]; then
368
+ return
369
+ fi
322
370
else
323
- hanodes_arry =($* )
371
+ hanodes_array =($* )
324
372
fi
325
373
326
- if [ $WITH_QNETD_NODE -eq 1 ]; then
327
- create_node ${hanodes_arry [@]} " qnetd-node"
374
+ if [ $WITH_QNETD_NODE -eq 1 ] && ! is_container_existing " qnetd-node " ; then
375
+ create_node ${hanodes_array [@]} " qnetd-node"
328
376
else
329
- create_node ${hanodes_arry[@]}
377
+ [ $WITH_QNETD_NODE -eq 1 ] && warning Skip creating the existing qnetd-node
378
+ [ " $num_of_new_nodes " -eq 0 ] && return
379
+ create_node ${hanodes_array[@]}
330
380
fi
331
381
332
382
[ " $CONFIG_COROSYNC_FLAG " -eq 0 ] && return
333
- config_cluster ${hanodes_arry[@]}
334
- start_cluster ${hanodes_arry[@]}
383
+ config_cluster
384
+ start_cluster
335
385
docker_exec " hanode1" " crm configure property stonith-enabled=false"
336
386
}
337
387
@@ -410,7 +460,7 @@ run_origin_regression_test() {
410
460
411
461
prepare_coverage_env () {
412
462
for node in $* ; do
413
- docker exec -t $node /bin/sh -c ' sed -i ' \' ' 1a\import coverage\nimport atexit\ncov=coverage.Coverage(config_file="/opt/crmsh/test/features/coveragerc")\natexit.register(lambda:(cov.stop(),cov.save()))\ncov.start()' \' ' /usr/sbin/crm'
463
+ docker exec -t $node /bin/sh -c ' sed -i ' \' ' 1a\import coverage\nimport atexit\ncov=coverage.Coverage(config_file="/opt/crmsh/test/features/coveragerc")\natexit.register(lambda:(cov.stop(),cov.save()))\ncov.start()' \' ' /usr/sbin/crm'
414
464
done
415
465
}
416
466
@@ -517,13 +567,13 @@ for case_num in $*;do
517
567
setup_cluster ${node_arry[@]}
518
568
adjust_test_case ${node_arry[0]} $case_file_in_container
519
569
echo
520
- prepare_coverage_env " ${node_arry[@]} "
570
+ prepare_coverage_env " ${node_arry[@]} "
521
571
if [ " $NORMAL_USER_FLAG " -eq 0 ]; then
522
572
info " Running \" $case_file_in_container \" under 'root'..."
523
573
docker_exec ${node_arry[0]} " behave --no-logcapture $case_file_in_container || exit 1" || exit 1
524
574
else
525
575
info " Running \" $case_file_in_container \" under normal user 'alice'..."
526
- docker_exec ${node_arry[0]} " su - alice -c 'sudo behave --no-logcapture $case_file_in_container || exit 1'" || exit 1
576
+ docker_exec ${node_arry[0]} " su - alice -c 'sudo behave --no-logcapture $case_file_in_container || exit 1'" || exit 1
527
577
fi
528
578
fetch_coverage_report " ${node_arry[@]} "
529
579
echo
0 commit comments