forked from ceph/cephadm-ansible
-
Notifications
You must be signed in to change notification settings - Fork 0
/
tox.ini
95 lines (80 loc) · 3.6 KB
/
tox.ini
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
[tox]
envlist =
flake8,mypy,unittests
{el8,el9}-{functional}
skipsdist = True
[testenv:mypy]
basepython = python3
deps =
mypy
commands = mypy {toxinidir}/library {toxinidir}/module_utils
[testenv:flake8]
basepython = python3
deps =
flake8
commands = flake8 --max-line-length 160 {toxinidir}/library/ {toxinidir}/module_utils/ {toxinidir}/tests/library/ {toxinidir}/tests/module_utils
[testenv:unittests]
basepython = python3
deps =
pytest-xdist
pytest
mock
ansible
setenv=
PYTHONPATH = {env:PYTHONPATH:}:{toxinidir}/library:{toxinidir}/module_utils:{toxinidir}/tests/library
commands = py.test -vvv -n=auto {toxinidir}/tests/library/ {toxinidir}/tests/module_utils
[testenv:{el8,el9}-functional]
allowlist_externals =
vagrant
bash
pip
passenv=*
sitepackages=True
setenv=
# Set the vagrant box image to use
el8: CEPH_ANSIBLE_VAGRANT_BOX = centos/stream8
el9: CEPH_ANSIBLE_VAGRANT_BOX = centos/stream9
ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey
ANSIBLE_CONFIG = {toxinidir}/ansible.cfg
ANSIBLE_CALLBACK_WHITELIST = profile_tasks
ANSIBLE_KEEP_REMOTE_FILES = 1
ANSIBLE_CACHE_PLUGIN = memory
ANSIBLE_GATHERING = implicit
# only available for ansible >= 2.5
ANSIBLE_STDOUT_CALLBACK = yaml
deps= -r{toxinidir}/tests/requirements.txt
changedir= {toxinidir}/tests/functional
commands=
bash {toxinidir}/tests/scripts/vagrant_up.sh --no-provision {posargs:--provider=virtualbox}
bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}
# Get a system up-to-date before deploying
ansible -vv -i {changedir}/hosts all -b -m command -a 'dnf update -y'
# Install prerequisites
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/cephadm-preflight.yml --extra-vars "\
ceph_origin=shaman \
client_group=clients \
"
py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts --ssh-config={changedir}/vagrant_ssh_config {changedir}/tests/test_preflight.py
# Deploy a Ceph cluster
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/deploy-cluster.yml --extra-vars "\
monitor_address=192.168.9.12 \
ceph_container_registry_auth=true \
ceph_container_registry_username={env:QUAY_IO_USERNAME} \
ceph_container_registry_password={env:QUAY_IO_PASSWORD} \
fsid=4217f198-b8b7-11eb-941d-5254004b7a69 \
"
# Deploy clients using cephadm-clients.yml
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/cephadm-clients.yml --extra-vars "\
keyring=/etc/ceph/ceph.client.admin.keyring \
client_group=clients \
fsid=4217f198-b8b7-11eb-941d-5254004b7a69 \
"
py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts --ssh-config={changedir}/vagrant_ssh_config {changedir}/tests/test_cluster.py {changedir}/tests/test_clients.py
# wait for all osd to be up before resharding rocksdb database (osd.0 only)
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/wait_all_osd_are_up.yml
# rocksdb resharding for osd.0
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/rocksdb-resharding.yml -e fsid=4217f198-b8b7-11eb-941d-5254004b7a69 -e osd_id=0 -e admin_node=ceph-node0
# Purge the cluster
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/cephadm-purge-cluster.yml -e ireallymeanit=yes -e fsid=4217f198-b8b7-11eb-941d-5254004b7a69
py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts --ssh-config={changedir}/vagrant_ssh_config {changedir}/tests/test_purge.py
vagrant destroy -f