Skip to content

Commit 14bfe4a

Browse files
committed
Dev: run-functional-tests: "-n" option can grow more nodes
1 parent 31ab66c commit 14bfe4a

File tree

1 file changed

+110
-60
lines changed

1 file changed

+110
-60
lines changed

test/run-functional-tests

Lines changed: 110 additions & 60 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,10 @@ HA_NETWORK_V6_ARRAY[1]="2001:db8:20::/64"
1717
BEHAVE_CASE_DIR="$(dirname $0)/features/"
1818
BEHAVE_CASE_EXCLUDE="sbd|ocfs2"
1919

20+
declare -a hanode_list_to_form_cluster
21+
declare -a hanode_list_new_members
22+
declare -a hanode_list_current_cluster
23+
2024
read -r -d '' SSHD_CONFIG_AZURE << EOM
2125
PermitRootLogin no
2226
AuthorizedKeysFile .ssh/authorized_keys
@@ -105,7 +109,7 @@ check_docker_env() {
105109
for network in ${HA_NETWORK_ARRAY[@]};do
106110
docker network ls|grep -q "$network"
107111
if [ "$?" -eq 0 ];then
108-
fatal "HA specific network \"$network\" already exists"
112+
warning "HA specific network \"$network\" already exists"
109113
fi
110114
done
111115
}
@@ -140,22 +144,29 @@ Users can make the code change under crmsh.git including test cases. This tool w
140144
141145
OPTIONS:
142146
-h, --help Show this help message and exit
143-
-l List existing functional test cases and exit
144-
-n NUM Only setup a cluster with NUM nodes(containers)
147+
-l List existing functional test cases and exit
148+
-n NUM NUM of nodes(containers) from hanode1 to hanode$NUM
145149
-x Don't config corosync on containers(with -n option)
146150
-d Cleanup the cluster containers
147-
-u Create normal users, and Azure like ssh environment
151+
-u Run test as a normal user and enforce sshd_config to be close as Public Cloud, eg. Azure
148152
-q Create a qnetd node(with -n and -x option)
149153
150154
EXAMPLES:
155+
151156
To launch 2 nodes with the running cluster with the very basic corosync.conf
152157
# crmsh.git/test/run-functional-tests -n 2
153158
154-
To launch 2 nodes without the cluster stack running to play with "crm cluster init/join"
155-
# crmsh.git/run-functional-tests -n 2 -x
159+
To grow more nodes with a bigger number than '2' in the above example, and skip existing nodes
160+
# crmsh.git/test/run-functional-tests -n 5
161+
162+
To launch 2 nodes without the running cluster, for use cases to play with "crm cluster init/join"
163+
# crmsh.git/test/run-functional-tests -n 2 -x
164+
165+
To grow more nodes without configure the cluster stack
166+
# crmsh.git/test/run-functional-tests -n 7 -x
156167
157-
To launch 2 nodes without the cluster stack running, and a qnetd node(named 'qnetd-node')
158-
# crmsh.git/run-functional-tests -n 2 -x -q
168+
To launch 2 nodes without the running cluster, and a qnetd node(named 'qnetd-node')
169+
# crmsh.git/test/run-functional-tests -n 2 -x -q
159170
160171
To list the existing test cases. Users could add his own new test cases.
161172
# crmsh.git/test/run-functional-tests -l
@@ -212,8 +223,12 @@ deploy_ha_node() {
212223

213224
info "Deploying \"$node_name\"..."
214225
docker run --restart always $docker_options $DOCKER_IMAGE &> /dev/null
226+
if [ $? -ne 0 ]; then
227+
warning Likely $node_name already exists.
228+
return
229+
fi
215230
for network in ${HA_NETWORK_ARRAY[@]};do
216-
docker network connect $network $node_name
231+
docker network connect $network $node_name &> /dev/null
217232
done
218233

219234
if [ "$node_name" != "qnetd-node" ];then
@@ -224,29 +239,26 @@ deploy_ha_node() {
224239
docker_exec $node_name "echo 'StrictHostKeyChecking no' >> /etc/ssh/ssh_config"
225240

226241
if [ "$node_name" != "qnetd-node" ];then
227-
docker cp $PROJECT_PATH $node_name:/opt/crmsh
228-
info "Building crmsh on \"$node_name\"..."
229-
docker_exec $node_name "$make_cmd" 1> /dev/null || \
242+
docker cp $PROJECT_PATH $node_name:/opt/crmsh
243+
info "Building crmsh on \"$node_name\"..."
244+
docker_exec $node_name "$make_cmd" 1> /dev/null || \
230245
fatal "Building failed on $node_name!"
231-
docker_exec $node_name "chown hacluster:haclient -R /var/log/crmsh"
232-
docker_exec $node_name "chmod g+w -R /var/log/crmsh"
233-
create_alice_bob_carol
234-
if [ "$NORMAL_USER_FLAG" -eq 1 ];then
235-
set_sshd_config_like_in_azure $node_name
236-
fi
246+
docker_exec $node_name "chown hacluster:haclient -R /var/log/crmsh"
247+
docker_exec $node_name "chmod g+w -R /var/log/crmsh"
248+
create_alice_bob_carol
237249
else
238-
docker_exec $node_name "useradd -m -s /bin/bash alice 2>/dev/null"
239-
docker_exec $node_name "echo \"alice ALL=(ALL) NOPASSWD:ALL\" > /etc/sudoers.d/alice"
240-
docker_exec $node_name "cp -r /root/.ssh ~alice/ && chown alice:users -R ~alice/.ssh"
241-
info "Create user 'alice' on $node_name"
242-
[ "$NORMAL_USER_FLAG" -eq 1 ] && set_sshd_config_like_in_azure $node_name
250+
docker_exec $node_name "useradd -m -s /bin/bash alice 2>/dev/null"
251+
docker_exec $node_name "echo \"alice ALL=(ALL) NOPASSWD:ALL\" > /etc/sudoers.d/alice"
252+
docker_exec $node_name "cp -r /root/.ssh ~alice/ && chown alice:users -R ~alice/.ssh"
253+
info "Create user 'alice' on $node_name"
243254
fi
255+
[ "$NORMAL_USER_FLAG" -eq 1 ] && set_sshd_config_like_in_azure $node_name
244256
}
245257

246258

247259
create_node() {
248260
info "Loading docker image $DOCKER_IMAGE..."
249-
docker pull $DOCKER_IMAGE &> /dev/null
261+
docker pull $DOCKER_IMAGE &> /dev/null
250262

251263
for index in ${!HA_NETWORK_ARRAY[@]};do
252264
network=${HA_NETWORK_ARRAY[$index]}
@@ -260,40 +272,67 @@ create_node() {
260272
wait
261273
}
262274

275+
get_cluster_new_nodes() {
276+
hanode_list_to_form_cluster=($(docker ps -a --format '{{.Names}}'|grep hanode|sort -n -k1.7|tr '\r' ' '))
277+
hanode_list_current_cluster=($(docker_exec hanode1 "crm node server 2>/dev/null" 2>/dev/null|sort -n -k1.7|tr '\r' ' '))
278+
hanode_list_new_members=()
279+
for element in "${hanode_list_to_form_cluster[@]}"; do
280+
if ! [[ " ${hanode_list_current_cluster[@]} " =~ " $element " ]]; then
281+
hanode_list_new_members+=("$element")
282+
fi
283+
done
284+
}
263285

264286
config_cluster() {
265-
node_num=$#
266-
insert_str=""
267-
container_ip_array=(`docker network inspect $HA_NETWORK_ARRAY -f '{{range .Containers}}{{printf "%s " .IPv4Address}}{{end}}'`)
287+
get_cluster_new_nodes
268288

269-
for i in $(seq $node_num -1 1);do
270-
ip=`echo ${container_ip_array[$((i-1))]}|awk -F/ '{print $1}'`
289+
if [ ${#hanode_list_new_members[@]} -eq 0 ]; then
290+
return
291+
else
292+
info ${#hanode_list_new_members[@]} new node\(s\) "'${hanode_list_new_members[@]}'"
293+
fi
294+
295+
insert_str=""
296+
for i in $(seq 1 ${#hanode_list_to_form_cluster[@]});do
297+
node=${hanode_list_to_form_cluster[$((i-1))]}
298+
ip=$(docker network inspect "$HA_NETWORK_ARRAY" --format '{{range .Containers}}{{if eq .Name "'"${node}"'"}}{{.IPv4Address}}{{end}}{{end}}'|awk -F/ '{print $1}')
271299
insert_str+="\\n\\tnode {\n\t\tring0_addr: $ip\n\t\tnodeid: $i\n\t}"
272300
done
301+
273302
corosync_conf_str=$(sed "/nodelist/a \\${insert_str}" <(echo "$COROSYNC_CONF_TEMPLATE"))
274-
if [ $node_num -eq 2 ];then
303+
304+
if [ ${#hanode_list_to_form_cluster[@]} -eq 2 ];then
275305
corosync_conf_str=$(sed "/corosync_votequorum/a \\\\ttwo_node: 1" <(echo "$corosync_conf_str"))
276306
fi
307+
docker_exec "hanode1" "echo \"$corosync_conf_str\" > $COROSYNC_CONF"
308+
if is_container_existing "qnetd-node";then
309+
info "Generate corosync.conf without qdevice/qnetd for the cluster hanode{1..${#hanode_list_to_form_cluster[@]}}"
310+
else
311+
info "Generate corosync.conf for the cluster hanode{1..${#hanode_list_to_form_cluster[@]}}"
312+
fi
277313

278-
info "Copy corosync.conf to $*"
279-
for node in $*;do
280-
if [ $node == $1 ];then
281-
docker_exec $1 "echo \"$corosync_conf_str\" >> $COROSYNC_CONF"
282-
docker_exec $1 "corosync-keygen -l -k $COROSYNC_AUTH &> /dev/null"
314+
info "Copy corosync.conf to all cluster nodes hanode{1..${#hanode_list_to_form_cluster[@]}} "
315+
for node in ${hanode_list_to_form_cluster[@]};do
316+
if [ $node == "hanode1" ];then
317+
docker_exec "hanode1" "corosync-keygen -l -k $COROSYNC_AUTH &> /dev/null"
283318
else
284319
while :
285320
do
286-
docker_exec $1 "ssh -T -o Batchmode=yes $node true &> /dev/null" && break
321+
docker_exec "hanode1" "ssh -T -o Batchmode=yes $node true &> /dev/null" && break
287322
sleep 1
288323
done
289-
docker_exec $1 "scp -p $COROSYNC_CONF $COROSYNC_AUTH $node:/etc/corosync &> /dev/null"
324+
docker_exec "hanode1" "scp -p $COROSYNC_CONF $COROSYNC_AUTH $node:/etc/corosync &> /dev/null"
290325
fi
291326
done
292327
}
293328

294-
295329
start_cluster() {
296-
for node in $*;do
330+
if [ ${#hanode_list_current_cluster[@]} -ne 0 ] && [ ${#hanode_list_new_members[@]} -ne 0 ]; then
331+
docker_exec hanode1 "corosync-cfgtool -R > /dev/null"
332+
info On the existing cluster hanode{1..${#hanode_list_current_cluster[@]}}: Reloading corosync.conf... Done
333+
fi
334+
335+
for node in ${hanode_list_new_members[@]};do
297336
docker_exec $node "crm cluster enable && crm cluster start" 1> /dev/null
298337
if [ "$?" -eq 0 ];then
299338
info "Cluster service started on \"$node\""
@@ -303,35 +342,46 @@ start_cluster() {
303342
done
304343
}
305344

306-
307-
container_already_exists() {
308-
docker ps -a|grep -q "$1"
309-
if [ "$?" -eq 0 ];then
310-
fatal "Container \"$1\" already running"
311-
fi
345+
is_container_existing() {
346+
docker ps -a --format '{{.Names}}' | grep -q "^$1$"
312347
}
313348

314349

315350
setup_cluster() {
316-
hanodes_arry=()
317-
is_number $1
318-
if [ "$?" -eq 0 ];then
319-
for i in $(seq 1 $1);do
320-
hanodes_arry+=("hanode$i")
321-
done
351+
get_cluster_new_nodes
352+
353+
hanodes_array=()
354+
if is_number "$1"; then
355+
# add more nodes after the last node, ordered by the node name
356+
if [ ${#hanode_list_to_form_cluster[@]} -gt 0 ]; then
357+
last_node_num="${hanode_list_to_form_cluster[-1]:6}"
358+
warning Skip creating the existing cluster nodes: hanode{1..${#hanode_list_to_form_cluster[@]}}
359+
else
360+
last_node_num=0
361+
fi
362+
num_of_new_nodes=$(( $1 - ${#hanode_list_to_form_cluster[@]} ))
363+
if [ "$num_of_new_nodes" -gt 0 ]; then
364+
for i in $(seq $(( last_node_num + 1 )) $(( last_node_num + num_of_new_nodes )) ); do
365+
hanodes_array+=("hanode$i")
366+
done
367+
elif [ $WITH_QNETD_NODE -eq 0 ];then
368+
return
369+
fi
322370
else
323-
hanodes_arry=($*)
371+
hanodes_array=($*)
324372
fi
325373

326-
if [ $WITH_QNETD_NODE -eq 1 ];then
327-
create_node ${hanodes_arry[@]} "qnetd-node"
374+
if [ $WITH_QNETD_NODE -eq 1 ] && ! is_container_existing "qnetd-node";then
375+
create_node ${hanodes_array[@]} "qnetd-node"
328376
else
329-
create_node ${hanodes_arry[@]}
377+
[ $WITH_QNETD_NODE -eq 1 ] && warning Skip creating the existing qnetd-node
378+
[ "$num_of_new_nodes" -eq 0 ] && return
379+
create_node ${hanodes_array[@]}
330380
fi
331381

332382
[ "$CONFIG_COROSYNC_FLAG" -eq 0 ] && return
333-
config_cluster ${hanodes_arry[@]}
334-
start_cluster ${hanodes_arry[@]}
383+
config_cluster
384+
start_cluster
335385
docker_exec "hanode1" "crm configure property stonith-enabled=false"
336386
}
337387

@@ -410,7 +460,7 @@ run_origin_regression_test() {
410460

411461
prepare_coverage_env() {
412462
for node in $*; do
413-
docker exec -t $node /bin/sh -c 'sed -i '\''1a\import coverage\nimport atexit\ncov=coverage.Coverage(config_file="/opt/crmsh/test/features/coveragerc")\natexit.register(lambda:(cov.stop(),cov.save()))\ncov.start()'\'' /usr/sbin/crm'
463+
docker exec -t $node /bin/sh -c 'sed -i '\''1a\import coverage\nimport atexit\ncov=coverage.Coverage(config_file="/opt/crmsh/test/features/coveragerc")\natexit.register(lambda:(cov.stop(),cov.save()))\ncov.start()'\'' /usr/sbin/crm'
414464
done
415465
}
416466

@@ -517,13 +567,13 @@ for case_num in $*;do
517567
setup_cluster ${node_arry[@]}
518568
adjust_test_case ${node_arry[0]} $case_file_in_container
519569
echo
520-
prepare_coverage_env "${node_arry[@]}"
570+
prepare_coverage_env "${node_arry[@]}"
521571
if [ "$NORMAL_USER_FLAG" -eq 0 ];then
522572
info "Running \"$case_file_in_container\" under 'root'..."
523573
docker_exec ${node_arry[0]} "behave --no-logcapture $case_file_in_container || exit 1" || exit 1
524574
else
525575
info "Running \"$case_file_in_container\" under normal user 'alice'..."
526-
docker_exec ${node_arry[0]} "su - alice -c 'sudo behave --no-logcapture $case_file_in_container || exit 1'" || exit 1
576+
docker_exec ${node_arry[0]} "su - alice -c 'sudo behave --no-logcapture $case_file_in_container || exit 1'" || exit 1
527577
fi
528578
fetch_coverage_report "${node_arry[@]}"
529579
echo

0 commit comments

Comments
 (0)