Skip to content

Commit 7a712e6

Browse files
feat(storage/linstor): Enhance multi-disk support and provisioning flexibility in Linstor SR tests
- Added `sr_disks_for_all_hosts` fixture to support multiple disks, ensuring availability across all hosts and handling "auto" selection dynamically. - Updated `lvm_disks` (`lvm_disk`) fixture to provision multiple disks collectively, refining vgcreate and pvcreate logic. - Introduced `provisioning_type` and `storage_pool_name` fixtures to dynamically configure storage provisioning (thin or thick). - Refactored Linstor SR test cases to use the new fixtures, improving test coverage across provisioning types. - Optimized Linstor installation and cleanup using concurrent execution, reducing setup time. - Enhanced validation and logging for disk selection. Signed-off-by: Rushikesh Jadhav <[email protected]>
1 parent 5285223 commit 7a712e6

File tree

3 files changed

+100
-33
lines changed

3 files changed

+100
-33
lines changed

conftest.py

Lines changed: 38 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -307,7 +307,7 @@ def sr_disk_for_all_hosts(pytestconfig, request, host):
307307
pytest.fail("This test requires exactly one --sr-disk parameter")
308308
disk = disks[0]
309309
master_disks = host.available_disks()
310-
assert len(master_disks) > 0, "a free disk device is required on the master host"
310+
assert len(master_disks) > 0, "A free disk device is required on the master host"
311311

312312
if disk != "auto":
313313
assert disk in master_disks, \
@@ -329,6 +329,43 @@ def sr_disk_for_all_hosts(pytestconfig, request, host):
329329
logging.info(f">> Disk or block device {disk} is present and free on all pool members")
330330
yield candidates[0]
331331

332+
@pytest.fixture(scope='session')
333+
def sr_disks_for_all_hosts(pytestconfig, request, host):
334+
disks_args = pytestconfig.getoption("sr_disk")
335+
disks_split = [disklist.split(',') for disklist in disks_args]
336+
disks = list(itertools.chain(*disks_split))
337+
assert len(disks) > 0, "This test requires at least one --sr-disk parameter"
338+
# Fetch available disks on the master host
339+
master_disks = host.available_disks()
340+
assert len(master_disks) > 0, "A free disk device is required on the master host"
341+
342+
if "auto" not in disks:
343+
# Validate that all specified disks exist on the master host
344+
for disk in disks:
345+
assert disk in master_disks, \
346+
f"Disk or block device {disk} is either not present or already used on the master host"
347+
master_disks = [disk for disk in disks if disk in master_disks]
348+
349+
candidates = list(master_disks)
350+
351+
# Check if all disks are available on all hosts in the pool
352+
for h in host.pool.hosts[1:]:
353+
other_disks = h.available_disks()
354+
candidates = [d for d in candidates if d in other_disks]
355+
356+
if "auto" in disks:
357+
# Automatically select disks if "auto" is passed
358+
assert len(candidates) > 0, \
359+
f"Free disk devices are required on all pool members. Pool master has: {' '.join(master_disks)}."
360+
logging.info(f">> Found free disk device(s) on all pool hosts: {' '.join(candidates)}. "
361+
f"Using: {', '.join(candidates)}")
362+
else:
363+
# Ensure specified disks are free on all hosts
364+
assert len(candidates) == len(disks), \
365+
f"Some specified disks ({', '.join(disks)}) are not free or available on all hosts."
366+
logging.info(f">> Disk(s) {', '.join(candidates)} are present and free on all pool members")
367+
yield candidates
368+
332369
@pytest.fixture(scope='module')
333370
def vm_ref(request):
334371
ref = request.param

tests/storage/linstor/conftest.py

Lines changed: 50 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -12,57 +12,85 @@
1212
LINSTOR_PACKAGE = 'xcp-ng-linstor'
1313

1414
@pytest.fixture(scope='package')
15-
def lvm_disk(host, sr_disk_for_all_hosts):
16-
device = '/dev/' + sr_disk_for_all_hosts
15+
def lvm_disks(host, sr_disks_for_all_hosts, provisioning_type):
16+
devices = [f"/dev/{disk}" for disk in sr_disks_for_all_hosts]
1717
hosts = host.pool.hosts
1818

1919
for host in hosts:
20-
try:
21-
host.ssh(['pvcreate', '-ff', '-y', device])
22-
except commands.SSHCommandFailed as e:
23-
if e.stdout.endswith('Mounted filesystem?'):
24-
host.ssh(['vgremove', '-f', GROUP_NAME, '-y'])
20+
for device in devices:
21+
try:
2522
host.ssh(['pvcreate', '-ff', '-y', device])
26-
elif e.stdout.endswith('excluded by a filter.'):
27-
host.ssh(['wipefs', '-a', device])
28-
host.ssh(['pvcreate', '-ff', '-y', device])
29-
else:
30-
raise e
23+
except commands.SSHCommandFailed as e:
24+
if e.stdout.endswith('Mounted filesystem?'):
25+
host.ssh(['vgremove', '-f', GROUP_NAME, '-y'])
26+
host.ssh(['pvcreate', '-ff', '-y', device])
27+
elif e.stdout.endswith('excluded by a filter.'):
28+
host.ssh(['wipefs', '-a', device])
29+
host.ssh(['pvcreate', '-ff', '-y', device])
30+
else:
31+
raise e
3132

32-
host.ssh(['vgcreate', GROUP_NAME, device])
33-
host.ssh(['lvcreate', '-l', '100%FREE', '-T', STORAGE_POOL_NAME])
33+
device_list = " ".join(devices)
34+
host.ssh(['vgcreate', GROUP_NAME] + devices)
35+
if provisioning_type == 'thin':
36+
host.ssh(['lvcreate', '-l', '100%FREE', '-T', STORAGE_POOL_NAME])
3437

35-
yield device
38+
yield devices
3639

3740
for host in hosts:
3841
host.ssh(['vgremove', '-f', GROUP_NAME])
39-
host.ssh(['pvremove', device])
42+
for device in devices:
43+
host.ssh(['pvremove', device])
44+
45+
@pytest.fixture(scope="package")
46+
def storage_pool_name(provisioning_type):
47+
return GROUP_NAME if provisioning_type == "thick" else STORAGE_POOL_NAME
48+
49+
@pytest.fixture(params=["thin", "thick"], scope="session")
50+
def provisioning_type(request):
51+
return request.param
4052

4153
@pytest.fixture(scope='package')
42-
def pool_with_linstor(hostA2, lvm_disk, pool_with_saved_yum_state):
54+
def pool_with_linstor(hostA2, lvm_disks, pool_with_saved_yum_state):
55+
import concurrent.futures
4356
pool = pool_with_saved_yum_state
44-
for host in pool.hosts:
57+
58+
def is_linstor_installed(host):
4559
if host.is_package_installed(LINSTOR_PACKAGE):
4660
raise Exception(
4761
f'{LINSTOR_PACKAGE} is already installed on host {host}. This should not be the case.'
4862
)
4963

50-
for host in pool.hosts:
64+
with concurrent.futures.ThreadPoolExecutor() as executor:
65+
executor.map(is_linstor_installed, pool.hosts)
66+
67+
def install_linstor(host):
68+
logging.info(f"Installing {LINSTOR_PACKAGE} on host {host}...")
5169
host.yum_install([LINSTOR_RELEASE_PACKAGE])
5270
host.yum_install([LINSTOR_PACKAGE], enablerepo="xcp-ng-linstor-testing")
5371
# Needed because the linstor driver is not in the xapi sm-plugins list
5472
# before installing the LINSTOR packages.
5573
host.ssh(["systemctl", "restart", "multipathd"])
5674
host.restart_toolstack(verify=True)
5775

76+
with concurrent.futures.ThreadPoolExecutor() as executor:
77+
executor.map(install_linstor, pool.hosts)
78+
5879
yield pool
5980

81+
def remove_linstor(host):
82+
logging.info(f"Cleaning up {LINSTOR_PACKAGE} from host {host}...")
83+
host.yum_remove([LINSTOR_PACKAGE])
84+
85+
with concurrent.futures.ThreadPoolExecutor() as executor:
86+
executor.map(remove_linstor, pool.hosts)
87+
6088
@pytest.fixture(scope='package')
61-
def linstor_sr(pool_with_linstor):
89+
def linstor_sr(pool_with_linstor, provisioning_type, storage_pool_name):
6290
sr = pool_with_linstor.master.sr_create('linstor', 'LINSTOR-SR-test', {
63-
'group-name': STORAGE_POOL_NAME,
91+
'group-name': storage_pool_name,
6492
'redundancy': str(min(len(pool_with_linstor.hosts), 3)),
65-
'provisioning': 'thin'
93+
'provisioning': provisioning_type
6694
}, shared=True)
6795
yield sr
6896
sr.destroy()

tests/storage/linstor/test_linstor_sr.py

Lines changed: 12 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -2,13 +2,13 @@
22
import pytest
33
import time
44

5-
from .conftest import STORAGE_POOL_NAME, LINSTOR_PACKAGE
5+
from .conftest import LINSTOR_PACKAGE
66
from lib.commands import SSHCommandFailed
77
from lib.common import wait_for, vm_image
88
from tests.storage import vdi_is_open
99

1010
# Requirements:
11-
# - one XCP-ng host >= 8.2 with an additional unused disk for the SR
11+
# - two or more XCP-ng hosts >= 8.2 with additional unused disk(s) for the SR
1212
# - access to XCP-ng RPM repository from the host
1313

1414
class TestLinstorSRCreateDestroy:
@@ -18,15 +18,15 @@ class TestLinstorSRCreateDestroy:
1818
and VM import.
1919
"""
2020

21-
def test_create_sr_without_linstor(self, host, lvm_disk):
21+
def test_create_sr_without_linstor(self, host, lvm_disks, provisioning_type, storage_pool_name):
2222
# This test must be the first in the series in this module
2323
assert not host.is_package_installed('python-linstor'), \
2424
"linstor must not be installed on the host at the beginning of the tests"
2525
try:
2626
sr = host.sr_create('linstor', 'LINSTOR-SR-test', {
27-
'group-name': STORAGE_POOL_NAME,
27+
'group-name': storage_pool_name,
2828
'redundancy': '1',
29-
'provisioning': 'thin'
29+
'provisioning': provisioning_type
3030
}, shared=True)
3131
try:
3232
sr.destroy()
@@ -36,13 +36,13 @@ def test_create_sr_without_linstor(self, host, lvm_disk):
3636
except SSHCommandFailed as e:
3737
logging.info("SR creation failed, as expected: {}".format(e))
3838

39-
def test_create_and_destroy_sr(self, pool_with_linstor):
39+
def test_create_and_destroy_sr(self, pool_with_linstor, provisioning_type, storage_pool_name):
4040
# Create and destroy tested in the same test to leave the host as unchanged as possible
4141
master = pool_with_linstor.master
4242
sr = master.sr_create('linstor', 'LINSTOR-SR-test', {
43-
'group-name': STORAGE_POOL_NAME,
43+
'group-name': storage_pool_name,
4444
'redundancy': '1',
45-
'provisioning': 'thin'
45+
'provisioning': provisioning_type
4646
}, shared=True)
4747
# import a VM in order to detect vm import issues here rather than in the vm_on_linstor_sr fixture used in
4848
# the next tests, because errors in fixtures break teardown
@@ -147,7 +147,7 @@ def _ensure_resource_remain_diskless(host, controller_option, volume_name, diskl
147147

148148
class TestLinstorDisklessResource:
149149
@pytest.mark.small_vm
150-
def test_diskless_kept(self, host, linstor_sr, vm_on_linstor_sr):
150+
def test_diskless_kept(self, host, linstor_sr, vm_on_linstor_sr, storage_pool_name):
151151
vm = vm_on_linstor_sr
152152
vdi_uuids = vm.vdi_uuids(sr_uuid=linstor_sr.uuid)
153153
vdi_uuid = vdi_uuids[0]
@@ -157,10 +157,12 @@ def test_diskless_kept(self, host, linstor_sr, vm_on_linstor_sr):
157157
for member in host.pool.hosts:
158158
controller_option += f"{member.hostname_or_ip},"
159159

160+
sr_group_name = "xcp-sr-" + storage_pool_name.replace("/", "_")
161+
160162
# Get volume name from VDI uuid
161163
# "xcp/volume/{vdi_uuid}/volume-name": "{volume_name}"
162164
output = host.ssh([
163-
"linstor-kv-tool", "--dump-volumes", "-g", "xcp-sr-linstor_group_thin_device",
165+
"linstor-kv-tool", "--dump-volumes", "-g", sr_group_name,
164166
"|", "grep", "volume-name", "|", "grep", vdi_uuid
165167
])
166168
volume_name = output.split(': ')[1].split('"')[1]

0 commit comments

Comments
 (0)