diff --git a/cfme/fixtures/cli.py b/cfme/fixtures/cli.py index c5fd956f52..c9d5c0f3ed 100644 --- a/cfme/fixtures/cli.py +++ b/cfme/fixtures/cli.py @@ -1,18 +1,26 @@ from collections import namedtuple +from contextlib import contextmanager +import fauxfactory import pytest from six import iteritems import cfme.utils.auth as authutil +from cfme.cloud.provider.ec2 import EC2Provider +from cfme.configure.configuration.region_settings import RedHatUpdates from cfme.fixtures.appliance import sprout_appliances -from cfme.utils.conf import credentials, auth_data +from cfme.infrastructure.provider.virtualcenter import VMwareProvider +from cfme.test_framework.sprout.client import SproutClient, SproutException +from cfme.utils import conf +from cfme.utils.appliance.implementations.ui import navigate_to +from cfme.utils.conf import credentials, auth_data, cfme_data from cfme.utils.log import logger +from cfme.utils.providers import list_providers_by_class +from cfme.utils.version import Version from cfme.utils.wait import wait_for -TimedCommand = namedtuple('TimedCommand', ['command', 'timeout']) - -""" The Following fixtures are for provisioning one preconfigured or unconfigured appliance for - testing from an FQDN provider unless there are no provisions available""" +REPOSITORIES = ["https://github.com/lcouzens/ansible_playbooks"] +TimedCommand = namedtuple("TimedCommand", ["command", "timeout"]) @pytest.fixture() @@ -65,12 +73,20 @@ def configured_appliance(appliance, pytestconfig): @pytest.fixture(scope="function") def dedicated_db_appliance(app_creds, unconfigured_appliance): - """'ap' launch appliance_console, '' clear info screen, '5' setup db, '1' Creates v2_key, - '1' selects internal db, '1' use partition, 'y' create dedicated db, 'pwd' - db password, 'pwd' confirm db password + wait 360 secs and '' finish.""" + """Commands: + 1. 'ap' launch appliance_console, + 2. '' clear info screen, + 3. '5' setup db, + 4. '1' Creates v2_key, + 5. '1' selects internal db, + 6. '1' use partition, + 7. 'y' create dedicated db, + 8. 'pwd' db password, + 9. 'pwd' confirm db password + wait 360 secs and + 10. '' finish.""" app = unconfigured_appliance - pwd = app_creds['password'] - command_set = ('ap', '', '5', '1', '1', '1', 'y', pwd, TimedCommand(pwd, 360), '') + pwd = app_creds["password"] + command_set = ("ap", "", "5", "1", "1", "1", "y", pwd, TimedCommand(pwd, 360), "") app.appliance_console.run_commands(command_set) wait_for(lambda: app.db.is_dedicated_active) yield app @@ -79,12 +95,14 @@ def dedicated_db_appliance(app_creds, unconfigured_appliance): @pytest.fixture(scope="function") def appliance_with_preset_time(temp_appliance_preconfig_funcscope): """Grabs fresh appliance and sets time and date prior to running tests""" - command_set = ('ap', '', '3', 'y', '2020-10-20', '09:58:00', 'y', '') + command_set = ("ap", "", "3", "y", "2020-10-20", "09:58:00", "y", "") temp_appliance_preconfig_funcscope.appliance_console.run_commands(command_set) def date_changed(): return temp_appliance_preconfig_funcscope.ssh_client.run_command( - "date +%F-%T | grep 2020-10-20-09").success + "date +%F-%T | grep 2020-10-20-09" + ).success + wait_for(date_changed) return temp_appliance_preconfig_funcscope @@ -92,15 +110,17 @@ def date_changed(): @pytest.fixture() def ipa_crud(): try: - ipa_keys = [key - for key, yaml in iteritems(auth_data.auth_providers) - if yaml.type == authutil.FreeIPAAuthProvider.auth_type] + ipa_keys = [ + key + for key, yaml in iteritems(auth_data.auth_providers) + if yaml.type == authutil.FreeIPAAuthProvider.auth_type + ] ipa_provider = authutil.get_auth_crud(ipa_keys[0]) except AttributeError: - pytest.skip('Unable to parse auth_data.yaml for freeipa server') + pytest.skip("Unable to parse auth_data.yaml for freeipa server") except IndexError: - pytest.skip('No freeipa server available for testing') - logger.info('Configuring first available freeipa auth provider %s', ipa_provider) + pytest.skip("No freeipa server available for testing") + logger.info("Configuring first available freeipa auth provider %s", ipa_provider) return ipa_provider @@ -108,18 +128,303 @@ def ipa_crud(): @pytest.fixture() def app_creds(): return { - 'username': credentials['database']['username'], - 'password': credentials['database']['password'], - 'sshlogin': credentials['ssh']['username'], - 'sshpass': credentials['ssh']['password'] + "username": credentials["database"]["username"], + "password": credentials["database"]["password"], + "sshlogin": credentials["ssh"]["username"], + "sshpass": credentials["ssh"]["password"], } @pytest.fixture(scope="module") def app_creds_modscope(): return { - 'username': credentials['database']['username'], - 'password': credentials['database']['password'], - 'sshlogin': credentials['ssh']['username'], - 'sshpass': credentials['ssh']['password'] + "username": credentials["database"]["username"], + "password": credentials["database"]["password"], + "sshlogin": credentials["ssh"]["username"], + "sshpass": credentials["ssh"]["password"], } + + +@contextmanager +def get_apps(appliance, old_version, count, preconfigured): + """Requests appliance from sprout based on old_versions, edits partitions and adds + repo file for update""" + series = appliance.version.series() + update_url = "update_url_{}".format(series.replace(".", "")) + usable = [] + sp = SproutClient.from_config() + available_versions = set(sp.call_method("available_cfme_versions")) + for a in available_versions: + if a.startswith(old_version): + usable.append(Version(a)) + usable_sorted = sorted(usable, key=lambda o: o.version) + apps = [] + pool_id = None + try: + apps, pool_id = sp.provision_appliances( + count=count, + preconfigured=preconfigured, + provider_type="rhevm", + lease_time=180, + version=str(usable_sorted[-1]), + ) + urls = cfme_data["basic_info"][update_url] + for app in apps: + app.db.extend_partition() + app.ssh_client.run_command( + "curl {} -o /etc/yum.repos.d/update.repo".format(urls) + ) + yield apps + except Exception as e: + logger.error("Couldn't provision appliance with following error:{}".format(e)) + raise SproutException("Not able to configure provision request") + finally: + for app in apps: + app.ssh_client.close() + if pool_id: + sp.destroy_pool(pool_id) + + +@pytest.fixture +def appliance_preupdate(appliance, old_version): + """Requests single appliance from sprout.""" + with get_apps(appliance, old_version, count=1, preconfigured=True) as apps: + yield apps[0] + + +@pytest.fixture +def multiple_preupdate_appliances(appliance, old_version): + """Requests multiple appliances from sprout.""" + with get_apps(appliance, old_version, count=2, preconfigured=False) as apps: + yield apps + + +@pytest.fixture +def ha_multiple_preupdate_appliances(appliance, old_version): + """Requests multiple appliances from sprout.""" + with get_apps(appliance, old_version, count=3, preconfigured=False) as apps: + yield apps + + +@pytest.fixture +def ha_appliances_with_providers(ha_multiple_preupdate_appliances, app_creds): + """Configure HA environment + + Appliance one configuring dedicated database, 'ap' launch appliance_console, + '' clear info screen, '5' setup db, '1' Creates v2_key, '1' selects internal db, + '1' use partition, 'y' create dedicated db, 'pwd' db password, 'pwd' confirm db password + wait + 360 secs and '' finish. + + Appliance two creating region in dedicated database, 'ap' launch appliance_console, '' clear + info screen, '5' setup db, '2' fetch v2_key, 'app0_ip' appliance ip address, '' default user, + 'pwd' appliance password, '' default v2_key location, '2' create region in external db, '0' db + region number, 'y' confirm create region in external db 'app0_ip', '' ip and default port for + dedicated db, '' use default db name, '' default username, 'pwd' db password, 'pwd' confirm db + password + wait 360 seconds and '' finish. + + Appliance one configuring primary node for replication, 'ap' launch appliance_console, '' clear + info screen, '6' configure db replication, '1' configure node as primary, '1' cluster node + number set to 1, '' default dbname, '' default user, 'pwd' password, 'pwd' confirm password, + 'app0_ip' primary appliance ip, confirm settings and wait 360 seconds to configure, '' finish. + + + Appliance three configuring standby node for replication, 'ap' launch appliance_console, '' + clear info screen, '6' configure db replication, '1' configure node as primary, '1' cluster node + number set to 1, '' default dbname, '' default user, 'pwd' password, 'pwd' confirm password, + 'app0_ip' primary appliance ip, confirm settings and wait 360 seconds to configure, '' finish. + + + Appliance two configuring automatic failover of database nodes, 'ap' launch appliance_console, + '' clear info screen '9' configure application database failover monitor, '1' start failover + monitor. wait 30 seconds for service to start '' finish. + + """ + apps0, apps1, apps2 = ha_multiple_preupdate_appliances + app0_ip = apps0.hostname + app1_ip = apps1.hostname + pwd = app_creds["password"] + # Configure first appliance as dedicated database + command_set = ("ap", "", "5", "1", "1", "1", "y", pwd, TimedCommand(pwd, 360), "") + apps0.appliance_console.run_commands(command_set) + wait_for(lambda: apps0.db.is_dedicated_active) + # Configure EVM webui appliance with create region in dedicated database + command_set = ("ap", "", "5", "2", app0_ip, "", pwd, "", "2", "0", "y", app0_ip, "", "", "", + TimedCommand(pwd, 360), "") + apps2.appliance_console.run_commands(command_set) + apps2.wait_for_evm_service() + apps2.wait_for_web_ui() + # Configure primary replication node + command_set = ("ap", "", "6", "1", "1", "", "", pwd, pwd, app0_ip, TimedCommand("y", 60), + "") + apps0.appliance_console.run_commands(command_set) + # Configure secondary replication node + command_set = ("ap", "", "6", "2", "1", "2", "", "", pwd, pwd, app0_ip, app1_ip, "y", + TimedCommand("y", 90), "") + apps1.appliance_console.run_commands(command_set) + # Configure automatic failover on EVM appliance + command_set = ("ap", "", "8", TimedCommand("1", 30), "") + apps2.appliance_console.run_commands(command_set) + + def is_ha_monitor_started(appliance): + return appliance.ssh_client.run_command( + "grep {} /var/www/miq/vmdb/config/failover_databases.yml".format(app1_ip) + ).success + + wait_for( + is_ha_monitor_started, func_args=[apps2], timeout=300, handle_exception=True + ) + # Add infra/cloud providers and create db backup + provider_app_crud(VMwareProvider, apps2).setup() + provider_app_crud(EC2Provider, apps2).setup() + return ha_appliances_with_providers + + +@pytest.fixture +def replicated_appliances_with_providers(multiple_preupdate_appliances): + """Returns two database-owning appliances, configures with providers.""" + appl1, appl2 = multiple_preupdate_appliances + # configure appliances + appl1.configure(region=0) + appl1.wait_for_web_ui() + appl2.configure(region=99) + appl2.wait_for_web_ui() + # configure replication between appliances + appl1.set_pglogical_replication(replication_type=":remote") + appl2.set_pglogical_replication(replication_type=":global") + appl2.add_pglogical_replication_subscription(appl1.hostname) + # Add infra/cloud providers + provider_app_crud(VMwareProvider, appl1).setup() + provider_app_crud(EC2Provider, appl1).setup() + return multiple_preupdate_appliances + + +@pytest.fixture +def ext_appliances_with_providers(multiple_preupdate_appliances, app_creds_modscope): + """Returns two database-owning appliances, configures first appliance with providers.""" + appl1, appl2 = multiple_preupdate_appliances + app_ip = appl1.hostname + # configure appliances + appl1.configure(region=0) + appl1.wait_for_web_ui() + appl2.appliance_console_cli.configure_appliance_external_join( + app_ip, + app_creds_modscope["username"], + app_creds_modscope["password"], + "vmdb_production", + app_ip, + app_creds_modscope["sshlogin"], + app_creds_modscope["sshpass"], + ) + appl2.wait_for_web_ui() + # Add infra/cloud providers and create db backup + provider_app_crud(VMwareProvider, appl1).setup() + provider_app_crud(EC2Provider, appl1).setup() + return multiple_preupdate_appliances + + +@pytest.fixture +def enabled_embedded_appliance(appliance_preupdate): + """Takes a preconfigured appliance and enables the embedded ansible role""" + appliance_preupdate.enable_embedded_ansible_role() + assert appliance_preupdate.is_embedded_ansible_running + return appliance_preupdate + + +@pytest.fixture +def appliance_with_providers(appliance_preupdate): + """Adds providers to appliance""" + appl1 = appliance_preupdate + # Add infra/cloud providers + provider_app_crud(VMwareProvider, appl1).setup() + provider_app_crud(EC2Provider, appl1).setup() + return appliance_preupdate + + +@pytest.fixture(scope="module") +def ansible_repository(appliance): + repositories = appliance.collections.ansible_repositories + repository = repositories.create( + fauxfactory.gen_alpha(), REPOSITORIES[0], description=fauxfactory.gen_alpha() + ) + view = navigate_to(repository, "Details") + refresh = view.toolbar.refresh.click + wait_for( + lambda: view.entities.summary("Properties").get_text_of("Status") == "successful", + timeout=60, + fail_func=refresh, + ) + yield repository + + if repository.exists: + repository.delete() + + +def provider_app_crud(provider_class, appliance): + try: + prov = list_providers_by_class(provider_class)[0] + logger.info("using provider {}".format(prov.name)) + prov.appliance = appliance + return prov + except IndexError: + pytest.skip("No {} providers available (required)".format(provider_class.type)) + + +def provision_vm(request, provider): + """Function to provision appliance to the provider being tested""" + vm_name = "test_rest_db_{}".format(fauxfactory.gen_alphanumeric()) + coll = provider.appliance.provider_based_collection(provider, coll_type="vms") + vm = coll.instantiate(vm_name, provider) + request.addfinalizer(vm.delete) + if not provider.mgmt.does_vm_exist(vm_name): + logger.info("deploying %s on provider %s", vm_name, provider.key) + vm.create_on_provider(allow_skip="default") + else: + logger.info("recycling deployed vm %s on provider %s", vm_name, provider.key) + vm.provider.refresh_provider_relationships() + return vm + + +def update_appliance(appliance): + with appliance: + red_hat_updates = RedHatUpdates( + service="rhsm", + url=conf.cfme_data["redhat_updates"]["registration"]["rhsm"]["url"], + username=conf.credentials["rhsm"]["username"], + password=conf.credentials["rhsm"]["password"], + set_default_repository=True, + ) + red_hat_updates.update_registration(validate=False) + red_hat_updates.check_updates() + wait_for( + func=red_hat_updates.checked_updates, + func_args=[appliance.server.name], + delay=10, + num_sec=100, + fail_func=red_hat_updates.refresh, + ) + if red_hat_updates.platform_updates_available(): + red_hat_updates.update_appliances() + return appliance + + +def upgrade_appliances(appliances): + for appliance in appliances: + result = appliance.ssh_client.run_command("yum update -y", timeout=3600) + assert result.success, "update failed {}".format(result.output) + + +def do_appliance_versions_match(appliance1, appliance2): + """Checks if cfme-appliance has been updated by clearing the cache and checking the versions""" + try: + appliance2.rest_api._load_data() + except Exception: + logger.info("Couldn't reload the REST_API data - does server have REST?") + pass + try: + del appliance2.version + del appliance2.ssh_client.vmdb_version + except AttributeError: + logger.info( + "Couldn't clear one or more cache - best guess it has already been cleared." + ) + assert appliance1.version == appliance2.version diff --git a/cfme/tests/cli/test_appliance_update.py b/cfme/tests/cli/test_appliance_update.py index 9100d6bcdc..054af1018d 100644 --- a/cfme/tests/cli/test_appliance_update.py +++ b/cfme/tests/cli/test_appliance_update.py @@ -1,15 +1,21 @@ +from collections import namedtuple + +import fauxfactory import pytest -from cfme.utils.conf import cfme_data -from cfme.configure.configuration.region_settings import RedHatUpdates -from cfme.fixtures.pytest_store import store +from cfme.fixtures.cli import (provider_app_crud, provision_vm, update_appliance, + do_appliance_versions_match) +from cfme.infrastructure.provider.virtualcenter import VMwareProvider from cfme.test_framework.sprout.client import SproutClient, SproutException -from cfme.utils import conf from cfme.utils.appliance import find_appliance +from cfme.utils.appliance.implementations.ui import navigate_to +from cfme.utils.conf import cfme_data from cfme.utils.log import logger from cfme.utils.version import Version from cfme.utils.wait import wait_for +TimedCommand = namedtuple('TimedCommand', ['command', 'timeout']) +REPOSITORIES = ["https://github.com/lcouzens/ansible_playbooks"] pytestmark = [ pytest.mark.uncollectif(lambda appliance: appliance.is_pod, reason="pod appliance should be updated thru openshift mechanism") @@ -36,7 +42,9 @@ def pytest_generate_tests(metafunc): ))) else: for i in range(int(minor_build) - 1, -1, -1): - versions.append("{}.{}.{}".format(split_ver[0], split_ver[1], i)) + # removing older 5.9 builds due to sprout limitation. + if version < '5.10' and i > 1: + versions.append("{}.{}.{}".format(split_ver[0], split_ver[1], i)) metafunc.parametrize('old_version', versions, indirect=True) @@ -63,7 +71,7 @@ def appliance_preupdate(old_version, appliance): usable.sort(reverse=True) try: apps, pool_id = sp.provision_appliances(count=1, preconfigured=True, - lease_time=180, version=str(usable[0])) + lease_time=180, version=str(usable[0])) except Exception as e: logger.exception("Couldn't provision appliance with following error:{}".format(e)) raise SproutException('No provision available') @@ -81,10 +89,8 @@ def appliance_preupdate(old_version, appliance): @pytest.mark.rhel_testing -@pytest.mark.uncollectif(lambda: not store.current_appliance.is_downstream) def test_update_yum(appliance_preupdate, appliance): """Tests appliance update between versions""" - appliance_preupdate.evmserverd.stop() with appliance_preupdate.ssh_client as ssh: result = ssh.run_command('yum update -y', timeout=3600) @@ -95,50 +101,141 @@ def test_update_yum(appliance_preupdate, appliance): assert result.output in appliance.version -@pytest.fixture(scope="function") -def enabled_embedded_appliance(appliance_preupdate): - """Takes a preconfigured appliance and enables the embedded ansible role""" - appliance_preupdate.enable_embedded_ansible_role() - assert appliance_preupdate.is_embedded_ansible_running - return appliance_preupdate - - -@pytest.fixture(scope="function") -def update_embedded_appliance(enabled_embedded_appliance, appliance): - with enabled_embedded_appliance: - red_hat_updates = RedHatUpdates( - service='rhsm', - url=conf.cfme_data['redhat_updates']['registration']['rhsm']['url'], - username=conf.credentials['rhsm']['username'], - password=conf.credentials['rhsm']['password'], - set_default_repository=True - ) - red_hat_updates.update_registration(validate=False) - red_hat_updates.check_updates() - wait_for( - func=red_hat_updates.checked_updates, - func_args=[appliance.server.name], - delay=10, - num_sec=100, - fail_func=red_hat_updates.refresh - ) - if red_hat_updates.platform_updates_available(): - red_hat_updates.update_appliances() - return enabled_embedded_appliance +@pytest.mark.ignore_stream("upstream") +def test_update_webui(appliance_with_providers, appliance, request, old_version): + """ Tests updating an appliance with providers, also confirms that the + provisioning continues to function correctly after the update has completed""" + update_appliance(appliance_with_providers) + + wait_for(do_appliance_versions_match, func_args=(appliance, appliance_with_providers), + num_sec=900, delay=20, handle_exception=True, + message='Waiting for appliance to update') + # Verify that existing provider can detect new VMs on the second appliance + virtual_crud = provider_app_crud(VMwareProvider, appliance_with_providers) + vm = provision_vm(request, virtual_crud) + assert vm.provider.mgmt.does_vm_exist(vm.name), "vm not provisioned" @pytest.mark.ignore_stream("upstream") -def test_embedded_ansible_update(update_embedded_appliance, appliance, old_version): +def test_update_scap_webui(appliance_with_providers, appliance, request, old_version): + """ Tests updating an appliance with providers and scap hardened, also confirms that the + provisioning continues to function correctly after the update has completed""" + appliance_with_providers.appliance_console.scap_harden_appliance() + rules_failures = appliance_with_providers.appliance_console.scap_check_rules() + assert not rules_failures, "Some rules have failed, check log" + update_appliance(appliance_with_providers) + + wait_for(do_appliance_versions_match, func_args=(appliance, appliance_with_providers), + num_sec=900, delay=20, handle_exception=True, + message='Waiting for appliance to update') + # Re-harden appliance and confirm rules are applied. + rules_failures = appliance_with_providers.appliance_console.scap_check_rules() + assert not rules_failures, "Some rules have failed, check log" + # Verify that existing provider can detect new VMs on the second appliance + virtual_crud = provider_app_crud(VMwareProvider, appliance_with_providers) + vm = provision_vm(request, virtual_crud) + assert vm.provider.mgmt.does_vm_exist(vm.name), "vm not provisioned" + + +@pytest.mark.ignore_stream("upstream") +def test_update_embedded_ansible_webui(enabled_embedded_appliance, appliance, old_version): """ Tests updating an appliance which has embedded ansible role enabled, also confirms that the role continues to function correctly after the update has completed""" - def is_appliance_updated(appliance): - """Checks if cfme-appliance has been updated""" - result = update_embedded_appliance.ssh_client.run_command('cat /var/www/miq/vmdb/VERSION') - assert result.output in appliance.version - - wait_for(is_appliance_updated, func_args=[update_embedded_appliance], num_sec=900) - assert wait_for(func=lambda: update_embedded_appliance.is_embedded_ansible_running, num_sec=30) - assert wait_for(func=lambda: update_embedded_appliance.is_rabbitmq_running, num_sec=30) - assert wait_for(func=lambda: update_embedded_appliance.is_nginx_running, num_sec=30) - assert update_embedded_appliance.ssh_client.run_command( - 'curl -kL https://localhost/ansibleapi | grep "AWX REST API"') + update_appliance(enabled_embedded_appliance) + wait_for(do_appliance_versions_match, func_args=(appliance, enabled_embedded_appliance), + num_sec=900, delay=20, handle_exception=True, + message='Waiting for appliance to update') + assert wait_for(func=lambda: enabled_embedded_appliance.is_embedded_ansible_running, num_sec=90) + assert wait_for(func=lambda: enabled_embedded_appliance.is_rabbitmq_running, num_sec=60) + assert wait_for(func=lambda: enabled_embedded_appliance.is_nginx_running, num_sec=60) + repositories = enabled_embedded_appliance.collections.ansible_repositories + name = "example_{}".format(fauxfactory.gen_alpha()) + description = "edited_{}".format(fauxfactory.gen_alpha()) + repository = repositories.create( + name, + REPOSITORIES[0], + description=description) + view = navigate_to(repository, "Details") + refresh = view.toolbar.refresh.click + wait_for( + lambda: view.entities.summary("Properties").get_text_of("Status").lower() == "successful", + timeout=60, + fail_func=refresh + ) + + +@pytest.mark.ignore_stream("upstream") +def test_update_distributed_webui(ext_appliances_with_providers, appliance, request, old_version, + soft_assert): + """ Tests updating an appliance with providers, also confirms that the + provisioning continues to function correctly after the update has completed""" + update_appliance(ext_appliances_with_providers[0]) + wait_for(do_appliance_versions_match, func_args=(appliance, ext_appliances_with_providers[0]), + num_sec=900, delay=20, handle_exception=True, + message='Waiting for appliance to update') + wait_for(do_appliance_versions_match, func_args=(appliance, ext_appliances_with_providers[1]), + num_sec=900, delay=20, handle_exception=True, + message='Waiting for appliance to update') + # Verify that existing provider can detect new VMs on both apps + virtual_crud_appl1 = provider_app_crud(VMwareProvider, ext_appliances_with_providers[0]) + virtual_crud_appl2 = provider_app_crud(VMwareProvider, ext_appliances_with_providers[1]) + vm1 = provision_vm(request, virtual_crud_appl1) + vm2 = provision_vm(request, virtual_crud_appl2) + soft_assert(vm1.provider.mgmt.does_vm_exist(vm1.name), "vm not provisioned") + soft_assert(vm2.provider.mgmt.does_vm_exist(vm2.name), "vm not provisioned") + + +@pytest.mark.ignore_stream("upstream") +def test_update_replicated_webui(replicated_appliances_with_providers, appliance, request, + old_version, soft_assert): + """ Tests updating an appliance with providers, also confirms that the + provisioning continues to function correctly after the update has completed""" + providers_before_upgrade = set(replicated_appliances_with_providers[0].managed_provider_names) + update_appliance(replicated_appliances_with_providers[0]) + update_appliance(replicated_appliances_with_providers[1]) + wait_for(do_appliance_versions_match, + func_args=(appliance, replicated_appliances_with_providers[0]), + num_sec=900, delay=20, handle_exception=True, + message='Waiting for appliance to update') + wait_for(do_appliance_versions_match, + func_args=(appliance, replicated_appliances_with_providers[1]), + num_sec=900, delay=20, handle_exception=True, + message='Waiting for appliance to update') + + # Assert providers exist after upgrade and replicated to second appliances + assert providers_before_upgrade == set( + replicated_appliances_with_providers[1].managed_provider_names), 'Providers are missing' + # Verify that existing provider can detect new VMs on both apps + virtual_crud_appl1 = provider_app_crud(VMwareProvider, replicated_appliances_with_providers[0]) + virtual_crud_appl2 = provider_app_crud(VMwareProvider, replicated_appliances_with_providers[1]) + vm1 = provision_vm(request, virtual_crud_appl1) + vm2 = provision_vm(request, virtual_crud_appl2) + soft_assert(vm1.provider.mgmt.does_vm_exist(vm1.name), "vm not provisioned") + soft_assert(vm2.provider.mgmt.does_vm_exist(vm2.name), "vm not provisioned") + + +@pytest.mark.ignore_stream("upstream") +def test_update_ha_webui(ha_appliances_with_providers, appliance, request, old_version): + """ Tests updating an appliance with providers, also confirms that the + provisioning continues to function correctly after the update has completed""" + update_appliance(ha_appliances_with_providers[2]) + wait_for(do_appliance_versions_match, func_args=(appliance, ha_appliances_with_providers[2]), + num_sec=900, delay=20, handle_exception=True, + message='Waiting for appliance to update') + # Cause failover to occur + result = ha_appliances_with_providers[0].ssh_client.run_command( + 'systemctl stop $APPLIANCE_PG_SERVICE', timeout=15) + assert result.success, "Failed to stop APPLIANCE_PG_SERVICE: {}".format(result.output) + + def is_failover_started(): + return ha_appliances_with_providers[2].ssh_client.run_command( + "grep 'Starting to execute failover' /var/www/miq/vmdb/log/ha_admin.log").success + + wait_for(is_failover_started, timeout=450, handle_exception=True, + message='Waiting for HA failover') + ha_appliances_with_providers[2].wait_for_evm_service() + ha_appliances_with_providers[2].wait_for_web_ui() + # Verify that existing provider can detect new VMs + virtual_crud = provider_app_crud(VMwareProvider, ha_appliances_with_providers[2]) + vm = provision_vm(request, virtual_crud) + assert vm.provider.mgmt.does_vm_exist(vm.name), "vm not provisioned" diff --git a/cfme/utils/appliance/__init__.py b/cfme/utils/appliance/__init__.py index 3ee5f42dc7..22c626fcec 100644 --- a/cfme/utils/appliance/__init__.py +++ b/cfme/utils/appliance/__init__.py @@ -1,7 +1,11 @@ import json import logging +import os +import re import socket +import tempfile import traceback +import warnings from copy import copy from datetime import datetime from tempfile import NamedTemporaryFile @@ -10,12 +14,10 @@ import attr import dateutil.parser import fauxfactory -import os -import re +import lxml.etree import requests import sentaku import six -import warnings import yaml from cached_property import cached_property from debtcollector import removals @@ -30,6 +32,7 @@ from cfme.utils import clear_property_cache from cfme.utils import conf, ssh, ports from cfme.utils.blockers import BZ +from cfme.utils.conf import hidden from cfme.utils.datafile import load_data_file from cfme.utils.log import logger, create_sublogger, logger_wrap from cfme.utils.net import net_check @@ -131,6 +134,56 @@ def run_commands(self, commands, autoreturn=True, timeout=10, channel=None): pass logger.debug(result) + def scap_harden_appliance(self): + """Commands: + 1. 'ap' launches appliance_console, + 2. '' clears info screen, + 3. '14' Hardens appliance using SCAP configuration, + 4. '' complete.""" + command_set = ('ap', '', '13', '') + self.appliance.appliance_console.run_commands(command_set) + + def scap_check_rules(self): + """Check that rules have been applied correctly.""" + rules_failures = [] + with tempfile.NamedTemporaryFile('w') as f: + f.write(hidden['scap.rb']) + f.flush() + os.fsync(f.fileno()) + self.appliance.ssh_client.put_file( + f.name, '/tmp/scap.rb') + if self.appliance.version >= "5.8": + rules = '/var/www/miq/vmdb/productization/appliance_console/config/scap_rules.yml' + else: + rules = '/var/www/miq/vmdb/gems/pending/appliance_console/config/scap_rules.yml' + self.appliance.ssh_client.run_command( + 'cd /tmp/ && ruby scap.rb --rulesfile={rules}'.format(rules=rules)) + self.appliance.ssh_client.get_file( + '/tmp/scap-results.xccdf.xml', '/tmp/scap-results.xccdf.xml') + self.appliance.ssh_client.get_file( + '{rules}'.format(rules=rules), '/tmp/scap_rules.yml') # Get the scap rules + + with open('/tmp/scap_rules.yml') as f: + yml = yaml.load(f.read()) + rules = yml['rules'] + + tree = lxml.etree.parse('/tmp/scap-results.xccdf.xml') + root = tree.getroot() + for rule in rules: + elements = root.findall( + './/{{http://checklists.nist.gov/xccdf/1.1}}rule-result[@idref="{}"]'.format(rule)) + if elements: + result = elements[0].findall('./{http://checklists.nist.gov/xccdf/1.1}result') + if result: + if result[0].text != 'pass': + rules_failures.append(rule) + logger.info("{}: {}".format(rule, result[0].text)) + else: + logger.info("{}: no result".format(rule)) + else: + logger.info("{}: rule not found".format(rule)) + return rules_failures + class ApplianceConsoleCli(object):