diff --git a/.gitignore b/.gitignore index 464e916..e47e765 100644 --- a/.gitignore +++ b/.gitignore @@ -96,4 +96,5 @@ ENV/ # Generated files by framework examples/baseruntime/generated.py -examples/memcached/generated.py \ No newline at end of file +examples/memcached/generated.py + diff --git a/.gitmodules b/.gitmodules index 7d49d0b..459aded 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,3 +1,7 @@ [submodule "tools/check_modulemd"] path = tools/check_modulemd url = https://github.com/fedora-modularity/check_modulemd + +[submodule "build_manpages"] + path = build_manpages + url = https://github.com/praiskup/build_manpages diff --git a/.tito/packages/meta-test-family b/.tito/packages/meta-test-family index 5955df4..64108fe 100644 --- a/.tito/packages/meta-test-family +++ b/.tito/packages/meta-test-family @@ -1 +1 @@ -0.7.3-1 ./ +0.7.8-1 ./ diff --git a/.travis.yml b/.travis.yml index 162adf6..50433fe 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,4 +1,5 @@ language: python + python: - "2.7" @@ -8,21 +9,22 @@ services: - docker before_install: - - sudo apt-get -y install curl python-software-properties software-properties-common python-pip python-dev build-essential git make - - sudo apt-get -y install libkrb5-dev netcat mysql-client-5.5 - - sudo pip install avocado-framework - - pip install avocado-framework + - sudo ./requirements.sh + - ./requirements.sh -install: sudo make install +install: sudo make install_pip script: + - sudo make travis - sudo make -C examples/testing-module check-inheritance - sudo make -C examples/testing-module check-default-config - sudo make -C examples/testing-module check-pure-docker - sudo make -C examples/testing-module check-exceptions - sudo make -C examples/testing-module check-test-mtf-bin-modulelint - - sudo make travis - sudo make -C examples/testing-module check-real-rpm-destructive - sudo make -C examples/testing-module check-docker-scl-multi-travis + - sudo make -C examples/testing-module check-mtf-pdc-module-info-reader + - sudo make -C mtf/metadata check + - sudo make -C examples/testing-module check-mtf-metadata after_script: sudo cat ~/avocado/job-results/latest/job.log diff --git a/MANIFEST.in b/MANIFEST.in index 22efa61..d380ac3 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -10,3 +10,5 @@ recursive-include docs * recursive-include examples * recursive-include tools * recursive-include distro * +recursive-include man * +recursive-include build_manpages *.py diff --git a/Makefile b/Makefile index 7950cad..a589226 100644 --- a/Makefile +++ b/Makefile @@ -2,20 +2,36 @@ NAME=moduleframework INSTALLPATH=/usr/share/$(NAME) PYTHONSITE=/usr/lib/python2.7/site-packages -all: install check +all: install_pip check check: make -C examples/testing-module check +check-linter: + @# don't use $(shell ) -- it messes out output + cd examples/linter/tools && PYTHONPATH=${PWD} MODULE=docker ${PWD}/tools/mtf -l + cd examples/linter/rhscl-postgresql && PYTHONPATH=${PWD} MODULE=docker ${PWD}/tools/mtf -l + cd examples/linter/rhscl-nginx && PYTHONPATH=${PWD} MODULE=docker ${PWD}/tools/mtf -l + cd examples/linter/f26-etcd && PYTHONPATH=${PWD} MODULE=docker ${PWD}/tools/mtf -l + cd examples/linter/f26-flannel && PYTHONPATH=${PWD} MODULE=docker ${PWD}/tools/mtf -l + travis: make -C examples/testing-module travis + cd examples/linter/tools && PYTHONPATH=${PWD} MODULE=docker mtf -l .PHONY: clean +clean_pip: + pip uninstall . + rm -rf build/* dist/* + + +install_pip: clean_pip + pip install -U . + clean: @python setup.py clean - git clean -fd - rm -rf build/html + rm -rf build/* dist/* install: clean @python setup.py install diff --git a/Makefile.docs b/Makefile.docs index 0fed393..262d472 100644 --- a/Makefile.docs +++ b/Makefile.docs @@ -3,7 +3,7 @@ # You can set these variables from the command line. SPHINXOPTS = -SPHINXBUILD = sphinx-build +SPHINXBUILD = sphinx-build-2 PAPER = BUILDDIR = build diff --git a/Vagrantfile b/Vagrantfile index 506d27e..b31dd2e 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -45,11 +45,10 @@ Vagrant.configure(2) do |config| set -x TARGET=#{ENV['TARGET']} test -z "$TARGET" && TARGET=check-docker - - dnf install -y make docker httpd git python2-avocado python2-avocado-plugins-output-html \ - python-netifaces redhat-rpm-config python2-devel python-gssapi krb5-devel cd /opt/meta-test-family - make install + git submodule update --init + ./requirements.sh + make install_pip make -C examples/testing-module $TARGET cp -r /root/avocado /var/www/html/ chmod -R a+x /var/www/html/ diff --git a/build_manpages b/build_manpages new file mode 160000 index 0000000..44d1548 --- /dev/null +++ b/build_manpages @@ -0,0 +1 @@ +Subproject commit 44d1548b21490de18af92c59fa29ccf7ce19d87b diff --git a/docs/conf.py b/docs/conf.py index a918447..9da5ab8 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -22,8 +22,6 @@ mtf_dir = os.path.abspath('..') sys.path.insert(0, mtf_dir) -#from moduleframework import version as mtf_version - # Determine if this script is running inside RTD build environment on_rtd = os.environ.get('READTHEDOCS', None) == 'True' diff --git a/docs/example-config.yaml b/docs/example-config.yaml index b41f0e6..2988937 100644 --- a/docs/example-config.yaml +++ b/docs/example-config.yaml @@ -14,8 +14,10 @@ name: memcached # MANDATORY (or compose-url) modulemd-url: https://src.fedoraproject.org/modules/memcached/raw/master/f/memcached.yaml # final compose done by pungi (contain also modulemd files for modules) can suppy also previous part +# if you use compose, url,repo,repos in module->rpm will be ignored, and there will not be added more repos +# be carefull when using compose url # env var: COMPOSEURL -compose-url: url_to_compose_in done in fedora +compose-url: https://kojipkgs.fedoraproject.org/compose/latest-Fedora-Modular-26/compose/Server/x86_64/os/ # variables what could be used in test service: port: 11211 diff --git a/docs/howitworks.dia b/docs/howitworks.dia index 82aef4a..e77b66b 100644 Binary files a/docs/howitworks.dia and b/docs/howitworks.dia differ diff --git a/docs/howitworks.png b/docs/howitworks.png index baa6ece..8ba2ceb 100644 Binary files a/docs/howitworks.png and b/docs/howitworks.png differ diff --git a/docs/user_guide/environment_setup.rst b/docs/user_guide/environment_setup.rst index 890e289..3416d4b 100644 --- a/docs/user_guide/environment_setup.rst +++ b/docs/user_guide/environment_setup.rst @@ -21,12 +21,24 @@ Manual Setup - No any configuration needed +**OpenShift** + + - Install OpenShift if not installed and if environment variable ``OPENSHIFT_LOCAL`` is specified. + - if ``OPENSHIFT_LOCAL`` variable is specified, then it starts an OpenShift by command ``oc cluster up`` or stops it by command ``oc cluster down``. + Automated Setup ~~~~~~~~~~~~~~~ The environment configuration scripts should be executed in the same directory where the tests are, otherwise the environment variable **CONFIG** should be set. - to setup environment run ``MODULE=docker mtf-env-set`` - - to execute tests run ``MODULE=docker avocado run your.test.py`` + - to execute tests run ``MODULE=docker mtf your.test.py`` - to cleanup environment ``MODULE=docker mtf-env-clean`` +Test Creation +~~~~~~~~~~~~~~~ + +There is a script called mtf-init which generates easy template of test for module docker as example. + + - to create template for module docker ``mtf-init --name your_name --container path_to_your_container`` + diff --git a/docs/user_guide/environment_variables.rst b/docs/user_guide/environment_variables.rst index 7d12672..3ac7139 100644 --- a/docs/user_guide/environment_variables.rst +++ b/docs/user_guide/environment_variables.rst @@ -24,6 +24,12 @@ Environment variables allow to overwrite some values of a module configuration f - **MTF_REUSE=yes** uses the same module between tests. It speeds up test execution. It can cause side effects. - **MTF_REMOTE_REPOS=yes** disables downloading of Koji packages and creating a local repo, and speeds up test execution. - **MTF_DISABLE_MODULE=yes** disables module handling to use nonmodular test mode (see `multihost tests`_ as an example). +- **DOCKERFILE=" +LABEL summary="High Performance, Distributed Memory Object Cache" \ + name="$FGC/$NAME" \ + version="0" \ + release="1.$DISTTAG" \ + architecture="$ARCH" \ + com.redhat.component=$NAME \ + usage="docker run -p 11211:11211 f26/memcached" \ + help="Runs memcached, which listens on port 11211. No dependencies. See Help File below for more details." \ + description="memcached is a high-performance, distributed memory object caching system, generic in nature, but intended for use in speeding up dynamic web applications by alleviating database load." \ + io.k8s.description="memcached is a high-performance, distributed memory object caching system, generic in nature, but intended for use in speeding up dynamic web applications by alleviating database load." \ + io.k8s.diplay-name="Memcached 1.4 " \ + io.openshift.expose-services="11211:memcached" \ + io.openshift.tags="memcached" + +COPY repos/* /etc/yum.repos.d/ +RUN microdnf --nodocs --enablerepo memcached install memcached && \ + microdnf -y clean all + + +ADD files /files +ADD README.md / + +EXPOSE 11211 + +# memcached will be run under standard user on Fedora +USER 1000 + +CMD ["/files/memcached.sh"] diff --git a/examples/mtf-linters/config.yaml b/examples/mtf-linters/config.yaml new file mode 100644 index 0000000..d551115 --- /dev/null +++ b/examples/mtf-linters/config.yaml @@ -0,0 +1,37 @@ +document: meta-test +version: 1 +name: memcached +modulemd-url: https://src.fedoraproject.org/modules/memcached/raw/master/f/memcached.yaml +service: + port: 11211 +packages: + rpms: + - memcached + - perl-Carp +testdependencies: + rpms: + - nc +module: + docker: + start: "docker run -it -e CACHE_SIZE=128 -p 11211:11211" + labels: + description: "memcached is a high-performance, distributed memory" + io.k8s.description: "memcached is a high-performance, distributed memory" + source: https://github.com/container-images/memcached.git + container: docker.io/modularitycontainers/memcached + rpm: + start: systemctl start memcached + stop: systemctl stop memcached + status: systemctl status memcached + url: https://kojipkgs.fedoraproject.org/compose/latest-Fedora-Modular-26/compose/Server/x86_64/os/ +test: + processrunning: + - 'ls /proc/*/exe -alh | grep memcached' +testhost: + selfcheck: + - 'echo errr | nc localhost 11211' + - 'echo set AAA 0 4 2 | nc localhost 11211' + - 'echo get AAA | nc localhost 11211' + selcheckError: + - 'echo errr | nc localhost 11211 |grep ERROR' + diff --git a/examples/mtf-linters/help.md b/examples/mtf-linters/help.md new file mode 100644 index 0000000..b79f79e --- /dev/null +++ b/examples/mtf-linters/help.md @@ -0,0 +1,31 @@ +% MEMCACHED(1) Container Image Pages +% Petr Hracek +% February 6, 2017 + +# NAME +{{ spec.envvars.name }} - {{ spec.description }} + +# DESCRIPTION +Memcached is a high-performance, distributed memory object caching system, generic in nature, but intended for use in speeding up dynamic web applications by alleviating database load. + +The container itself consists of: + - fedora/{{ config.os.version }} base image + - {{ spec.envvars.name }} RPM package + +Files added to the container during docker build include: /files/memcached.sh + +# USAGE +To get the memcached container image on your local system, run the following: + + docker pull docker.io/modularitycontainers/{{ spec.envvars.name }} + + +# SECURITY IMPLICATIONS +Lists of security-related attributes that are opened to the host. + +-p 11211:11211 + Opens container port 11211 and maps it to the same port on the host. + +# SEE ALSO +Memcached page + diff --git a/examples/mtf_systemd_test/Makefile b/examples/mtf_systemd_test/Makefile new file mode 100644 index 0000000..4e33995 --- /dev/null +++ b/examples/mtf_systemd_test/Makefile @@ -0,0 +1,9 @@ +test: + avocado run example1.py + rm -rf /tmp/dddd1 + REPOS=http://ftp.fi.muni.cz/pub/linux/centos/7/os/x86_64/ avocado run example1.py + rm -rf /tmp/dddd1 + +all: test + +.PHONY: all \ No newline at end of file diff --git a/examples/mtf_systemd_test/example1.py b/examples/mtf_systemd_test/example1.py new file mode 100644 index 0000000..482ebfc --- /dev/null +++ b/examples/mtf_systemd_test/example1.py @@ -0,0 +1,100 @@ +from avocado import Test +from mtf.backend.nspawn import Image, Container +from avocado.utils import process +import os + +# Centos Hack. it does not support wait athought fedora support it () +# [stderr] Failed to start transient service unit: Cannot set property AddRef, or unknown property. +# force disable wait support +Container._run_systemdrun_decide = lambda x:False + +if os.environ.get("REPOS"): + repo = os.environ.get("REPOS").split(";") +else: + repo = ["http://ftp.fi.muni.cz/pub/linux/fedora/linux/releases/26/Everything/x86_64/os/"] +packages = ["bash", "iproute", "passwd"] + +class testSystemd1(Test): + c1 = None + cname = "contA" + sname = "nonexistingservice" + exitcode = 2 + def setUp(self): + loc1 = "/tmp/dddd1" + self.i1 = Image(repos=repo, packageset=packages, location=loc1, ignore_installed=True) + self.c1 = Container(image=self.i1, name=self.cname) + self.c1.boot_machine() + + def test_basic(self): + self.assertIn("sbin",self.c1.execute(command="ls /").stdout) + + def test_status(self): + self.assertIn("systemd-logind", self.c1.execute(command="systemctl status").stdout) + self.assertNotIn("gnome",self.c1.execute(command="systemctl status").stdout) + + def test_exception(self): + self.assertRaises(process.CmdError, self.c1.execute, "badcommand") + self.assertRaises(process.CmdError, self.c1.execute, "exit %s" % self.exitcode) + self.assertEqual(self.exitcode,self.c1.execute(command = "exit %s" % self.exitcode, ignore_status=True).exit_status) + + def test_nonexisting_service_start(self): + self.assertEqual(5, self.c1.execute(command="systemctl start %s" % self.sname, ignore_status=True).exit_status) + + def test_nonexisting_service_status(self): + self.assertEqual(4, self.c1.execute(command="systemctl status %s" % self.sname, ignore_status=True).exit_status) + + def test_nonexisting_service_stop(self): + self.assertEqual(5, self.c1.execute(command="systemctl stop %s" % self.sname, ignore_status=True).exit_status) + + def test_nonexisting_action(self): + self.assertEqual(1, self.c1.execute(command="systemctl %s" % self.sname, ignore_status=True).exit_status) + + def tearDown(self): + self.c1.stop() + +class testSystemd2(Test): + """ + It tests Container object and his abilities to run various commands + """ + c1 = None + cname = "contA" + def setUp(self): + loc1 = "/tmp/dddd1" + self.i1 = Image(repos=repo, packageset=packages, location=loc1, ignore_installed=True) + + def test_basic(self): + self.c1 = Container(image=self.i1, name=self.cname) + self.assertIn("sbin", self.c1.boot_machine(boot_cmd="ls /", wait_finish=True).get_stdout()) + self.c1.boot_machine(boot_cmd="""bash -c "echo redhat | passwd --stdin" """, wait_finish=True) + self.c1.boot_machine() + self.assertIn("sbin",self.c1.execute(command="ls /").stdout) + + def tearDown(self): + self.c1.stop() + + +class testSystemdMultihost(Test): + c1 = None + c2 = None + loc1 = "/tmp/dddd1" + loc2 = "/tmp/dddd2" + loc3 = "/tmp/dddd3" + + def setUp(self): + self.i1 = Image(repos=repo, packageset=packages, location=self.loc1, ignore_installed=True) + self.c1 = Container(image=self.i1.create_snapshot(destination=self.loc2), name=self.loc2.split('/')[-1]) + self.c1.boot_machine() + self.c2 = Container(image=self.i1.create_snapshot(destination=self.loc3), name=self.loc3.split('/')[-1]) + self.c2.boot_machine() + + def test_basic(self): + process.run("machinectl status %s" % self.loc2.split('/')[-1]) + process.run("machinectl status %s" % self.loc3.split('/')[-1]) + self.c1.stop() + self.c2.stop() + self.assertRaises(process.CmdError, process.run, "machinectl status %s" % self.loc2.split('/')[-1]) + self.assertRaises(process.CmdError, process.run, "machinectl status %s" % self.loc3.split('/')[-1]) + + def tearDown(self): + self.c1.rm() + self.c2.rm() diff --git a/examples/rhscl_maria/example1.py b/examples/rhscl_maria/example1.py index a171865..a1cecca 100644 --- a/examples/rhscl_maria/example1.py +++ b/examples/rhscl_maria/example1.py @@ -5,7 +5,7 @@ from avocado import Test import time -WAIT_TIME=10 +WAIT_TIME=15 class OneMachine(module_framework.ContainerAvocadoTest): """ diff --git a/examples/template/test.py b/examples/template/test.py new file mode 100644 index 0000000..b7e4d44 --- /dev/null +++ b/examples/template/test.py @@ -0,0 +1,25 @@ +#!/usr/bin/python + +# this is template showed in tool mtf-init +# try mtf-init to create basic config.yaml +# start test by: "sudo mtf" + +from avocado import main +from avocado.core import exceptions +from moduleframework import module_framework + +class Smoke1(module_framework.AvocadoTest): + """ + :avocado: enable + """ + + def test_uname(self): + self.start() + self.run("uname | grep Linux") + + def test_echo(self): + self.start() + self.runHost("echo test | grep test") + +if __name__ == '__main__': + main() diff --git a/examples/test_metadata_loader/Makefile b/examples/test_metadata_loader/Makefile new file mode 100644 index 0000000..c9a2e86 --- /dev/null +++ b/examples/test_metadata_loader/Makefile @@ -0,0 +1,16 @@ +LOGFILE=test_output.log + +prepare-docker: + MODULE=docker URL=fedora mtf-env-set + +test: prepare-docker + MODULE=docker URL=fedora mtf --metadata 2>&1 | tee $(LOGFILE) + grep DockerLint $(LOGFILE) + grep Add1 $(LOGFILE) + grep /bin/true $(LOGFILE) + grep "3/3" $(LOGFILE) + grep -v not_schedule $(LOGFILE) + grep -v rpmvalidation $(LOGFILE) + MODULE=docker URL=fedora mtf --metadata *.sh 2>&1 | tee $(LOGFILE) && true + grep "4/4" $(LOGFILE) + diff --git a/examples/test_metadata_loader/metadata.yaml b/examples/test_metadata_loader/metadata.yaml new file mode 100644 index 0000000..91ef523 --- /dev/null +++ b/examples/test_metadata_loader/metadata.yaml @@ -0,0 +1,8 @@ +document: test-metadata +subtype: general +enable_lint: True +tag_filters: + - "docker_labels_inspect_test" +import_tests: + - "*.py" + - "/bin/true" diff --git a/examples/test_metadata_loader/not_schedule.sh b/examples/test_metadata_loader/not_schedule.sh new file mode 100755 index 0000000..d87f29e --- /dev/null +++ b/examples/test_metadata_loader/not_schedule.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +exit 1 diff --git a/examples/test_metadata_loader/simple.py b/examples/test_metadata_loader/simple.py new file mode 100644 index 0000000..1a6fa1c --- /dev/null +++ b/examples/test_metadata_loader/simple.py @@ -0,0 +1,16 @@ +from mtf.metatest import AvocadoTest + + +class Add1(AvocadoTest): + """ + :avocado: enable + """ + + def setUp(self): + pass + + def tearDown(self, *args, **kwargs): + pass + + def test(self): + pass diff --git a/examples/testing-module/Dockerfile b/examples/testing-module/Dockerfile index 14872d4..c81c311 100644 --- a/examples/testing-module/Dockerfile +++ b/examples/testing-module/Dockerfile @@ -8,7 +8,7 @@ FROM baseruntime/baseruntime:latest # * 11211 ENV NAME=memcached ARCH=x86_64 -LABEL MAINTAINER "Petr Hracek" +LABEL maintainer "Petr Hracek" LABEL summary="High Performance, Distributed Memory Object Cache" \ name="$FGC/$NAME" \ version="0" \ @@ -24,8 +24,8 @@ LABEL summary="High Performance, Distributed Memory Object Cache" \ io.openshift.tags="memcached" COPY repos/* /etc/yum.repos.d/ -RUN microdnf --nodocs --enablerepo memcached install memcached && \ - microdnf -y clean all +RUN dnf --nodocs --enablerepo memcached install memcached && \ + dnf -y clean all ADD files /files @@ -36,4 +36,4 @@ EXPOSE 11211 # memcached will be run under standard user on Fedora USER 1000 -CMD ["/files/memcached.sh"] \ No newline at end of file +CMD ["/files/memcached.sh"] diff --git a/examples/testing-module/Makefile b/examples/testing-module/Makefile index 892da8f..6f11916 100644 --- a/examples/testing-module/Makefile +++ b/examples/testing-module/Makefile @@ -1,4 +1,4 @@ -CMD=avocado run +CMD=mtf TESTS=$(shell ls *.py *.sh ../../moduleframework/tools/*.py) SIMPLE=simpleTest.py export MTF_REMOTE_REPOS=yes @@ -40,8 +40,8 @@ check-run-them-fedmsg-testmodule: prepare-nspawn ../../tools/run-them.sh testmodule ../../tools/example_message_module.yaml fedmsg check-exceptions: prepare-docker - URL=NOT_VALID_URL MODULE=docker $(CMD) --show-job-log simpleTest.py | grep 'raise CmdError' - CONFIG=NOT_VALID_CONFIG MODULE=docker $(CMD) --show-job-log simpleTest.py | grep 'raise ConfigExc' + URL=NOT_VALID_URL MODULE=docker $(CMD) --show-job-log simpleTest.py 2>&1 | grep 'raise CmdError' + CONFIG=NOT_VALID_CONFIG MODULE=docker $(CMD) --show-job-log simpleTest.py 2>&1 | grep 'raise ConfigExc' check-pure-rpm: prepare-nspawn CONFIG=config-pure-rpm.yaml MODULE=nspawn $(CMD) $(SIMPLE) @@ -66,7 +66,6 @@ check-docker-scl-multi: cd ../rhscl_maria; MODULE=docker mtf-env-set cd ../rhscl_maria; MODULE=docker $(CMD) *.py - travis: MODULE=docker $(CMD) $(TESTS) @@ -78,6 +77,42 @@ check-memcached-both: prepare-docker prepare-nspawn cd ../memcached; MODULE=nspawn $(CMD) sanity1.py +check-minimal-config-rpm-noenvvar: prepare-nspawn + MTF_REMOTE_REPOS= DOCKERFILE= MODULE=nspawn $(CMD) $(TESTS) + +check-mtf-init: + $(eval TEMPDIR := $(shell mktemp -d)) + cd $(TEMPDIR); mtf-init --name NAME1 --container docker.io/modularitycontainers/memcached + cd $(TEMPDIR); grep 'class Smoke1' test.py ; grep 'default_module' config.yaml; sudo mtf test.py + rm -rf "$(TEMPDIR)" + +check-mtf-linters: + cd ../mtf-linters; MODULE=docker mtf-env-set + cd ../mtf-linters; MODULE=docker $(CMD) -l + +check-mtf-list: + $(CMD) --action=list + +check-mtf-options: + $(eval MYPWD := $(shell pwd)) + $(eval TEMPDIR := $(shell mktemp -d)) + cd $(TEMPDIR); MODULE=docker $(CMD) --module=nspawn --debug --config=$(MYPWD)/config.yaml $(MYPWD)/simpleTest.py &> log + cd $(TEMPDIR); grep 'MODULE=docker' log + rm -rf "$(TEMPDIR)" + +check-mtf-pdc-module-info-reader: + mtf-pdc-module-info-reader -r testmodule-master-20170926102903 + mtf-pdc-module-info-reader -r testmodule-master-20170926102903 --commit | grep 9107dcf53f6201a01b8c8d18493aae0175bcfb19 + mtf-pdc-module-info-reader -r testmodule-master-20170926102903 | grep 'MODULE=nspawn' + mtf-pdc-module-info-reader -r testmodule-master-20170926102903 | grep 'MODULEMDURL=file:/' + mtf-pdc-module-info-reader -r testmodule-master-20170926102903 | grep 'URL=https://koj' + +check-mtf-metadata: + make -C ../test_metadata_loader test + +check-memcached-metadata-loader: prepare-docker + make -C ../memcached test + check: check-docker diff --git a/examples/testing-module/help.md b/examples/testing-module/help.md new file mode 100644 index 0000000..b79f79e --- /dev/null +++ b/examples/testing-module/help.md @@ -0,0 +1,31 @@ +% MEMCACHED(1) Container Image Pages +% Petr Hracek +% February 6, 2017 + +# NAME +{{ spec.envvars.name }} - {{ spec.description }} + +# DESCRIPTION +Memcached is a high-performance, distributed memory object caching system, generic in nature, but intended for use in speeding up dynamic web applications by alleviating database load. + +The container itself consists of: + - fedora/{{ config.os.version }} base image + - {{ spec.envvars.name }} RPM package + +Files added to the container during docker build include: /files/memcached.sh + +# USAGE +To get the memcached container image on your local system, run the following: + + docker pull docker.io/modularitycontainers/{{ spec.envvars.name }} + + +# SECURITY IMPLICATIONS +Lists of security-related attributes that are opened to the host. + +-p 11211:11211 + Opens container port 11211 and maps it to the same port on the host. + +# SEE ALSO +Memcached page + diff --git a/examples/testing-module/run_remote_script.py b/examples/testing-module/run_remote_script.py new file mode 100644 index 0000000..335f65d --- /dev/null +++ b/examples/testing-module/run_remote_script.py @@ -0,0 +1,65 @@ +# -*- coding: utf-8 -*- +# +# Meta test family (MTF) is a tool to test components of a modular Fedora: +# https://docs.pagure.org/modularity/ +# Copyright (C) 2017 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# he Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Authors: Jan Scotka +# + +from moduleframework import module_framework +from avocado.utils import process +from tempfile import mktemp +import os + +class remoteBinary(module_framework.AvocadoTest): + """ + :avocado: enable + """ + + def test_outputs(self): + self.start() + scriptfile = mktemp() + script="""#!/bin/bash +echo stdoutput +echo erroutput >&2 +echo $@ + """ + with open(scriptfile, "w") as text_file: + text_file.write(script) + outputprocess = self.run_script(scriptfile) + self.assertIn("stdoutput", outputprocess.stdout) + self.assertIn("erroutput", outputprocess.stderr) + outputprocess = self.run_script(scriptfile, "Hallo", "World") + self.assertIn("stdoutput", outputprocess.stdout) + self.assertIn("Hallo World", outputprocess.stdout) + os.remove(scriptfile) + + def test_exit_code(self): + self.start() + scriptfile = mktemp() + ecode = 15 + script = """#!/bin/bash +exit {} +""".format(ecode) + with open(scriptfile, "w") as text_file: + text_file.write(script) + outputprocess = self.run_script(scriptfile, ignore_status=True) + self.assertEqual(ecode, outputprocess.exit_status) + + self.assertRaises(process.CmdError, self.run_script, scriptfile) + os.remove(scriptfile) diff --git a/examples/testing-module/skipTest.py b/examples/testing-module/skipTest.py index 195c76a..a849133 100644 --- a/examples/testing-module/skipTest.py +++ b/examples/testing-module/skipTest.py @@ -35,17 +35,17 @@ class SkipTest(module_framework.AvocadoTest): def testGccSkippedInsideTest(self): # rewrite it to calling cancell, it was not in production of avocado, # but it is fixed. - if "gcc" not in self.getActualProfile(): + if True: self.cancel() self.start() self.run("gcc -v") - @skipIf(common.get_profile() == "default") + @skipIf(False) def testDecoratorNotSkippedForDefault(self): self.start() self.run("echo for default profile") - @skipUnless(common.get_profile() == "gcc") + @skipUnless(False) def testDecoratorSkip(self): self.start() self.run("gcc -v") diff --git a/man/mtf-env-clean.1 b/man/mtf-env-clean.1 new file mode 100644 index 0000000..202e793 --- /dev/null +++ b/man/mtf-env-clean.1 @@ -0,0 +1,41 @@ +.\" Copyright Petr Hracek, 2017 +.\" +.\" This page is distributed under GPL. +.\" +.TH MTF-ENV-CLEAN 1 2017-11-01 "" "Linux User's Manual" +.SH NAME +mtf-env-clean \- cleans environment for testing containers. + +.SH SYNOPSIS +\fIMODULE=docker\/\fR +.B mtf-env-clean + +\fIMODULE=rpm\/\fR +.B mtf-env-clean + +\fIMODULE=nspawn\/\fR +.B mtf-env-clean + +.SH DESCRIPTION +.PP +\fBmtf-env-clean\fP is a binary file used for cleaning environment after usage of Meta-Test-Family. + +.PP +\fIMODULE=docker\/\fR stops docker service. + +.PP +\fIMODULE=rpm\/\fR does not do any cleanup, +as that could potentially uninstall essential packages and therefore damage this machine. + +.PP +\fIMODULE=nspawn\/\fR switches SELinux back to original state. + +.SH NOTES +\fBmtf-env-clean\fP mtf-env-set is useful for people who don't want to clean the environment +manually and wish to use this executable to do the job. + +.SH AUTHORS +Petr Hracek, (man page) + +.SH "SEE ALSO" +Full documentation at: \ No newline at end of file diff --git a/man/mtf-env-set.1 b/man/mtf-env-set.1 new file mode 100644 index 0000000..fecd1d9 --- /dev/null +++ b/man/mtf-env-set.1 @@ -0,0 +1,44 @@ +.\" Copyright Petr Hracek, 2017 +.\" +.\" This page is distributed under GPL. +.\" +.TH MTF-ENV-SET 1 2017-11-01 "" "Linux User's Manual" +.SH NAME +mtf-env-set \- prepares environment for testing containers. + +.SH SYNOPSIS +\fIMODULE=docker\/\fR +.B mtf-env-set + +\fIMODULE=rpm\/\fR +.B mtf-env-set + +\fIMODULE=nspawn\/\fR +.B mtf-env-set + + +.SH DESCRIPTION +.PP +\fBmtf-env-set\fP is a binary file used for setting environment before usage of Meta-Test-Family. + +.PP +\fIMODULE=docker\/\fR It installs tests dependencies, docker service +and starts docker service for testing containers + +.PP +\fIMODULE=rpm\/\fR installs RPM dependencies on this machine. + +.PP +\fIMODULE=nspawn\/\fR installs RPM dependencies. If environment variable MTF_SKIP_DISABLING_SELINUX is +set, it disables SELinux. It installs systemd-container package on this machine. + + +.SH NOTES +\fBmtf-env-set\fP mtf-env-set is useful for people who don't want to set the environment manually +and wish to use this executable to do the job. + +.SH AUTHORS +Petr Hracek, (man page) + +.SH "SEE ALSO" +Full documentation at: \ No newline at end of file diff --git a/man/mtf-generator.1 b/man/mtf-generator.1 new file mode 100644 index 0000000..5fb9e73 --- /dev/null +++ b/man/mtf-generator.1 @@ -0,0 +1,19 @@ +.\" Copyright Petr Hracek, 2017 +.\" +.\" This page is distributed under GPL. +.\" +.TH mtf-generator 1 2017-11-01 "" "Linux User's Manual" +.SH NAME +mtf-generator \- generates code for tests written in \fBconfig.yaml\fP file. + +.SH SYNOPSIS +.B +mtf + +.SH DESCRIPTION +\fBmtf-generator\fP is a binary file included in Meta-Test-Family package. +It generates code for tests written in \fBconfig.yaml\fP file for usage by \fBmtf\fP binary. + +.SH AUTHORS +Petr Hracek, (man page) + diff --git a/meta-test-family.spec b/meta-test-family.spec index e86b602..1d3f2d3 100644 --- a/meta-test-family.spec +++ b/meta-test-family.spec @@ -1,13 +1,13 @@ %global framework_name moduleframework Name: meta-test-family -Version: 0.7.4 -Release: 2%{?dist} +Version: 0.7.8 +Release: 1%{?dist} Summary: Tool to test components of a modular Fedora License: GPLv2+ URL: https://github.com/fedora-modularity/meta-test-family -Source0: https://codeload.github.com/fedora-modularity/%{name}/tar.gz/%{name}-%{version}.tar.gz +Source0: %{url}/archive/%{version}/%{name}-%{version}.tar.gz BuildArch: noarch # Exlcude ppc64: there is no docker package on ppc64 # https://bugzilla.redhat.com/show_bug.cgi?id=1465176 @@ -22,6 +22,8 @@ Requires: docker Requires: python2-pdc-client Requires: python2-modulemd Requires: python2-dockerfile-parse +Requires: python-mistune +Requires: python2-odcs-client Provides: modularity-testing-framework = %{version}-%{release} Obsoletes: modularity-testing-framework < 0.5.18-2 @@ -42,18 +44,207 @@ install -d -p -m 755 %{buildroot}%{_datadir}/%{framework_name} %files %license LICENSE +%{_mandir}/man1/mtf-env-clean.1* +%{_mandir}/man1/mtf-env-set.1* +%{_mandir}/man1/mtf-generator.1* %{_bindir}/mtf %{_bindir}/mtf-cmd %{_bindir}/mtf-generator %{_bindir}/mtf-env-set %{_bindir}/mtf-env-clean -%{_bindir}/mtf-log-parser +%{_bindir}/mtf-init +%{_bindir}/mtf-pdc-module-info-reader %{python2_sitelib}/moduleframework/ +%{python2_sitelib}/mtf/ %{python2_sitelib}/meta_test_family-*.egg-info/ %{_datadir}/moduleframework/ %changelog +* Wed Dec 06 2017 Jan Scotka 0.7.8-1 +- remove mtf-env-clean from runthem script, it is on not good place and cleanup + of env is not important to have it ther (jscotka@redhat.com) +- add vagrant file (jscotka@redhat.com) +- add vagrant file for metadata (jscotka@redhat.com) +- support for metadata to mtf tool (jscotka@redhat.com) +- metadata: fix generic test filters (jscotka@redhat.com) +- dependencies for testing in nicer and cleaner format for various distros + (jscotka@redhat.com) +- remove mtf manpage (jscotka@redhat.com) +- revert generated man pages code (jscotka@redhat.com) +- Revert "remove all manpages" (scottyh@post.cz) +- run also without sudo, to improve pip for avocado (jscotka@redhat.com) +- dependencies in vagrant, travis and for local installation to file + (jscotka@redhat.com) +- trying to clean man-page-generator (psklenar@redhat.com) +- add test for mtf-pdc-module-info-reader tool and enable it in travis + (jscotka@redhat.com) +- trying to clean man-page-generator (psklenar@redhat.com) +- trying to clean man-page-generator (psklenar@redhat.com) +- remove regression, pdc_data needs to import BASEPATHDIR (jscotka@redhat.com) +- fix vagrant file, fix odcs format of repo, expected dir not repofile + (jscotka@redhat.com) +- remove print function, typo (jscotka@redhat.com) +- create clean commit based on PR#172 (jscotka@redhat.com) +- add cool comments (psklenar@redhat.com) +- man page is generated now (psklenar@redhat.com) +- man page is generated now (psklenar@redhat.com) +- man page is generated now (psklenar@redhat.com) +- remove mtf-log-parser from specfile (jscotka@redhat.com) +- add all variables (psklenar@redhat.com) +- fix copr builds (jscotka@redhat.com) +- removed unused imports (psklenar@redhat.com) +- fixing all issues (jscotka@redhat.com) +- add version as its needed for man page generator (psklenar@redhat.com) +- script to run containers in taskotron (jscotka@redhat.com) +- trying travis (psklenar@redhat.com) +- some info about VARIABLES (psklenar@redhat.com) +- empty commit to start tests (psklenar@redhat.com) +- delete file (psklenar@redhat.com) +- not needed (psklenar@redhat.com) +- new line (psklenar@redhat.com) +- needed setup, parser in function (psklenar@redhat.com) +- have parser in function (psklenar@redhat.com) +- man mtf page is generated (psklenar@redhat.com) +- revert back to using python setup.py for package installation + (jscotka@redhat.com) +- remove avocado html plugin from python dependencies, it is not important for + mtf anyhow (jscotka@redhat.com) +- test metadata support tool for MTF (jscotka@redhat.com) +- fix profile handling (jscotka@redhat.com) +- add some tags to modulelint tests, to be able to filter them + (jscotka@redhat.com) +- Updating docstring and adding pod functions (phracek@redhat.com) +- Support run command. (phracek@redhat.com) +- Better check if application exists (phracek@redhat.com) +- Fixes based on the PR comments. (phracek@redhat.com) +- Use command oc get and stdout instead of parsing json. (phracek@redhat.com) +- Add more docu stuff and some fixes. (phracek@redhat.com) +- testing containers in OpenShift (phracek@redhat.com) +- Update setup.py (phracek@redhat.com) +- Fix problem with paramters (phracek@redhat.com) +- Rewrite dnf/yum clean all functions (phracek@redhat.com) +- Check specific file extensions in /var/cache/yum|dnf directories + (phracek@redhat.com) +- doc test is splitted into two tests. One is for whole image and second one is + related only for install RPMs by RUN command (phracek@redhat.com) +- Add suport for check nodocs and clean_all (phracek@redhat.com) +- Update docstring (phracek@redhat.com) +- Update RTD. Use sphinx-build-2 (phracek@redhat.com) +- Update name classes (phracek@redhat.com) +- Update setup.py (phracek@redhat.com) +- Bump version to 0.7.7 (phracek@redhat.com) +- Documentation about linters (phracek@redhat.com) +- Split linters into more classes (phracek@redhat.com) +- Couple updates based on PR. (phracek@redhat.com) +- Rename function to assert_to_warn (phracek@redhat.com) +- Package man pages (phracek@redhat.com) +- man page updates based on #151 PR (phracek@redhat.com) +- Implement func mark_as_warn (phracek@redhat.com) +- Fix error in case help_md does not exist (phracek@redhat.com) +- Couple updates. (phracek@redhat.com) +- remove workarounds and add rpm to base package set workaround + (jscotka@redhat.com) +- typos (psklenar@redhat.com) +- modify how it works images and store rendered output (jscotka@redhat.com) +- docs into RTD (psklenar@redhat.com) +- testsuite for mtf-init (psklenar@redhat.com) +- Bump version to 0.7.6 (phracek@redhat.com) +- Add mtf-generator man page. (phracek@redhat.com) +- Manual page for Meta-Test-Family (phracek@redhat.com) +- Fix error in case help_md does not exist (phracek@redhat.com) +- Fixes #142 Fix tracebacks for COPY and ADD directives (phracek@redhat.com) +- systemd test examples - testing fedora or centos via nspawn + (jscotka@redhat.com) +- fix multihost regression, caused by code cleanup (jscotka@redhat.com) +- change test for decorators to generic one, and change self.skip to + self.cancel() (jscotka@redhat.com) +- mistake in os.path.exist (there were makedirs by mistake) + (jscotka@redhat.com) +- there is sometimes problem to do chmod, so run it via bash + (jscotka@redhat.com) +- test for exception return in case of failed command, check ret code and + raised exception (jscotka@redhat.com) +- add function to run script on remote machine (jscotka@redhat.com) +- nspawn operation moved to low level library not depenedent on mtf structure + (jscotka@redhat.com) +- add argparse, move test.py into templates (psklenar@redhat.com) +- Bump new release (phracek@redhat.com) +- Update documentation and use absolute path (phracek@redhat.com) +- Fix some logging issues and yum checks (phracek@redhat.com) +- raise error in case of compatibility (error has to be raised explicitly) + (jscotka@redhat.com) +- script which generate easy template (psklenar@redhat.com) +- create snapshot before calling setup from config, because machine does not + have root directory (jscotka@redhat.com) +- Skip help.md for now if it does not exist (phracek@redhat.com) +- add tests for RUN instructions. One for dnf part and the other one for the + rest (phracek@redhat.com) +- Use WARNING in case of ENVIRONMENT VARIABLES are not set in help.md + (phracek@redhat.com) +- Add check for presence help.md (phracek@redhat.com) +- several fixes based on comment from PR. (phracek@redhat.com) +- New help.md fixes (phracek@redhat.com) +- Fix problems found during review (phracek@redhat.com) +- help.md sanity checker (phracek@redhat.com) +- Linter for help.md file (phracek@redhat.com) +- Check for is FROM first (phracek@redhat.com) +- linter: check Red Hat's and Fedora's images (ttomecek@redhat.com) +- add comment and link to bugzilla (jscotka@redhat.com) +- partial change of backward compatibility (jscotka@redhat.com) +- fix issue with bad exit code of mtf command (jscotka@redhat.com) +- Hidden feature for install packages from default module via ENVVAR, for + further purposes, should not be used now (jscotka@redhat.com) +- pep8 change (jscotka@redhat.com) +- test module uses this config, after fixing composeurl handling, if there is + bad link, causes error (jscotka@redhat.com) +- back to original timeout library (jscotka@redhat.com) +- spec: fix URL (phracek@redhat.com) +- fix compose handling and fix container issue with using container instead of + url (jscotka@redhat.com) +- Remove shebang from two python files (phracek@redhat.com) +- Fix shebangs and so (phracek@redhat.com) +- avocado could say 'FAIL' too (psklenar@redhat.com) +- typo (jscotka@redhat.com) +- repair typo in config.yaml and add call of mtf-set-env to makefile + (jscotka@redhat.com) +- better name of the file (psklenar@redhat.com) +- move main into site package (psklenar@redhat.com) +- new line fix (psklenar@redhat.com) +- function add, not so many spaces (psklenar@redhat.com) +- new line (psklenar@redhat.com) +- new tool avocado_log_json.py (psklenar@redhat.com) +- mtf summary (psklenar@redhat.com) +- add sample output, to see what you can expect (jscotka@redhat.com) +- add internal usage test as class of simpleTest.py (jscotka@redhat.com) +- add usage tests and improve doc (jscotka@redhat.com) +- Revert "add usage tests and improve doc" (jscotka@redhat.com) +- add usage tests and improve doc (jscotka@redhat.com) +- improv base avocado class to not skip modules with proper backend (parent) + (jscotka@redhat.com) +- repaired submodule for check_modulemd (jscotka@redhat.com) +- revert back submodule (jscotka@redhat.com) +- example how S2I image can be tested with build process (jscotka@redhat.com) +- Update dockerlint a bit according to Container:Guidelines + (phracek@redhat.com) +- remov baseruntime from Makefile (jscotka@redhat.com) +- remove python docker requirements, cause trouble in taskotron for shell test: + (jscotka@redhat.com) +- move this important testcase to the end, cause sometimes error + (jscotka@redhat.com) +- function removed, have to remove from nspawn helper (jscotka@redhat.com) +- taskotron - fix issue with missing base compose repo, when disabled local + koji cloning (jscotka@redhat.com) + +* Tue Oct 31 2017 Petr Hracek 0.7.7-1 +- new upstream release + +* Tue Oct 24 2017 Petr Hracek 0.7.6-1 +- new upstream release + +* Tue Oct 17 2017 Petr Hracek 0.7.5-1 +- new upstream release + * Wed Oct 04 2017 Petr Hracek 0.7.4-2 - fix shebang from two python files diff --git a/moduleframework/avocado_testers/avocado_test.py b/moduleframework/avocado_testers/avocado_test.py index 8e8b549..e0b1d9d 100644 --- a/moduleframework/avocado_testers/avocado_test.py +++ b/moduleframework/avocado_testers/avocado_test.py @@ -34,6 +34,7 @@ from moduleframework.helpers.container_helper import ContainerHelper from moduleframework.helpers.nspawn_helper import NspawnHelper from moduleframework.helpers.rpm_helper import RpmHelper +from moduleframework.helpers.openshift_helper import OpenShiftHelper # INTERFACE CLASS FOR GENERAL TESTS OF MODULES @@ -252,6 +253,31 @@ def getModuleDependencies(self): """ return self.backend.getModuleDependencies() + def run_script(self, *args, **kwargs): + """ + run script or binary inside module + + :param filename: filename to copy to module + :param args: pass this args as cmdline args to run binary + :param kwargs: pass thru to avocado process.run + :return: avocado process.run object + """ + return self.backend.run_script(*args, **kwargs) + + def assert_to_warn(self, func, *args, **kwargs): + """ + run function which you would like to mark as WARN + :param func: function for run + :param args: pass this args to run function + :param kwargs: pass this args to run function + :return: returns either PASS or WARN + """ + try: + func(*args, **kwargs) + except AssertionError as e: + self.log.warn("Warning raised: %s" % e) + + def get_backend(): """ Return proper module backend, set by config by default_module section, or defined via @@ -267,6 +293,9 @@ def get_backend(): return RpmHelper() elif parent == 'nspawn': return NspawnHelper() + elif parent == 'openshift': + return OpenShiftHelper() + # To keep backward compatibility. This method could be used by pure avocado tests and is already used get_correct_backend = get_backend diff --git a/moduleframework/avocado_testers/container_avocado_test.py b/moduleframework/avocado_testers/container_avocado_test.py index 000fe02..13ba293 100644 --- a/moduleframework/avocado_testers/container_avocado_test.py +++ b/moduleframework/avocado_testers/container_avocado_test.py @@ -35,7 +35,7 @@ class ContainerAvocadoTest(AvocadoTest): def setUp(self): if get_module_type_base() != "docker": - self.skip("Docker specific test") + self.cancel("Docker specific test") super(ContainerAvocadoTest, self).setUp() def checkLabel(self, key, value): diff --git a/moduleframework/avocado_testers/nspawn_avocado_test.py b/moduleframework/avocado_testers/nspawn_avocado_test.py index bbbae3b..ecb3f46 100644 --- a/moduleframework/avocado_testers/nspawn_avocado_test.py +++ b/moduleframework/avocado_testers/nspawn_avocado_test.py @@ -32,7 +32,7 @@ class NspawnAvocadoTest(AvocadoTest): def setUp(self): if get_module_type_base() != "nspawn": - self.skip("Nspawn specific test") + self.cancel("Nspawn specific test") super(NspawnAvocadoTest, self).setUp() diff --git a/moduleframework/avocado_testers/openshift_avocado_test.py b/moduleframework/avocado_testers/openshift_avocado_test.py new file mode 100644 index 0000000..f7dfa47 --- /dev/null +++ b/moduleframework/avocado_testers/openshift_avocado_test.py @@ -0,0 +1,39 @@ +# -*- coding: utf-8 -*- +# +# This Modularity Testing Framework helps you to write tests for modules +# Copyright (C) 2017 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# he Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Authors: Petr Hracek +# + +from moduleframework.module_framework import AvocadoTest +from moduleframework.common import get_module_type_base + + +# INTERFACE CLASSES FOR SPECIFIC MODULE TESTS +class OpenShiftAvocadoTest(AvocadoTest): + """ + Class for writing tests specific just for OpenShift + derived from AvocadoTest class. + + :avocado: disable + """ + + def setUp(self): + if get_module_type_base() != "openshift": + self.cancel("OpenShift specific test") + super(OpenShiftAvocadoTest, self).setUp() diff --git a/moduleframework/avocado_testers/rpm_avocado_test.py b/moduleframework/avocado_testers/rpm_avocado_test.py index a6d1698..2c4cc4b 100644 --- a/moduleframework/avocado_testers/rpm_avocado_test.py +++ b/moduleframework/avocado_testers/rpm_avocado_test.py @@ -34,7 +34,7 @@ class RpmAvocadoTest(AvocadoTest): def setUp(self): if get_module_type_base() != "rpm": - self.skip("Rpm specific test") + self.cancel("Rpm specific test") super(RpmAvocadoTest, self).setUp() diff --git a/moduleframework/common.py b/moduleframework/common.py index 30ffc54..95233b1 100644 --- a/moduleframework/common.py +++ b/moduleframework/common.py @@ -24,6 +24,8 @@ Custom configuration and debugging library. """ +from __future__ import print_function + import netifaces import socket import os @@ -31,12 +33,12 @@ import yaml import subprocess import copy -import warnings - +import sys +import random +import string +import requests from avocado.utils import process - -from moduleframework.exceptions import * -from moduleframework.compose_info import ComposeParser +from moduleframework.mtfexceptions import ModuleFrameworkException, ConfigExc, CmdExc defroutedev = netifaces.gateways().get('default').values( )[0][1] if netifaces.gateways().get('default') else "lo" @@ -53,6 +55,7 @@ hostpackager = subprocess.check_output([PACKAGER_COMMAND], shell=True).strip() guestpackager = hostpackager ARCH = "x86_64" +DOCKERFILE = "Dockerfile" __persistent_config = None @@ -82,15 +85,12 @@ # time in seconds DEFAULTRETRYTIMEOUT = 30 DEFAULTNSPAWNTIMEOUT = 10 +MODULE_DEFAULT_PROFILE = "default" +TRUE_VALUES_DICT = ['yes', 'YES', 'yes', 'True', 'true', 'ok', 'OK'] + +def generate_unique_name(size=10): + return ''.join(random.choice(string.ascii_lowercase) for _ in range(size)) -def get_compose_url_modular_release(): - release = os.environ.get("MTF_FEDORA_RELEASE") or "26" - if release == "master": - release = "26" - compose_url = os.environ.get("MTF_BASE_COMPOSE_URL") or \ - "https://kojipkgs.fedoraproject.org/compose/latest-Fedora-Modular-%s/compose/Server/%s/os" \ - % (release, ARCH) - return compose_url def is_debug(): """ @@ -110,6 +110,47 @@ def is_not_silent(): return is_debug() +def get_openshift_local(): + """ + Return the **OPENSHIFT_LOCAL** envvar. + :return: bool + """ + return bool(os.environ.get('OPENSHIFT_LOCAL')) + + +def get_openshift_ip(): + """ + Return the **OPENSHIFT_IP** envvar or None. + :return: OpenShift IP or None + """ + try: + return os.environ.get('OPENSHIFT_IP') + except KeyError: + return None + + +def get_openshift_user(): + """ + Return the **OPENSHIFT_USER** envvar or None. + :return: OpenShift User or None + """ + try: + return os.environ.get('OPENSHIFT_USER') + except KeyError: + return None + + +def get_openshift_passwd(): + """ + Return the **OPENSHIFT_PASSWORD** envvar or None. + :return: OpenShift password or None + """ + try: + return os.environ.get('OPENSHIFT_PASSWORD') + except KeyError: + return None + + def print_info(*args): """ Print information from the expected stdout and @@ -123,17 +164,7 @@ def print_info(*args): :return: None """ for arg in args: - result = arg - if isinstance(arg, basestring): - try: - result = arg.format(**trans_dict) - except KeyError: - raise ModuleFrameworkException( - "String is formatted by using trans_dict. If you want to use " - "brackets { } in your code, please use double brackets {{ }}." - "Possible values in trans_dict are: %s" - % trans_dict) - print >> sys.stderr, result + print(arg, file=sys.stderr) def print_debug(*args): @@ -152,6 +183,17 @@ def print_debug(*args): if is_debug(): print_info(*args) + +def get_if_install_default_profile(): + """ + Return the **MTF_INSTALL_DEFAULT** envvar. + + :return: bool + """ + envvar = os.environ.get('MTF_INSTALL_DEFAULT') + return bool(envvar) + + def is_recursive_download(): """ Return the **MTF_RECURSIVE_DOWNLOAD** envvar. @@ -160,6 +202,7 @@ def is_recursive_download(): """ return bool(os.environ.get("MTF_RECURSIVE_DOWNLOAD")) + def get_if_do_cleanup(): """ Return the **MTF_DO_NOT_CLEANUP** envvar. @@ -169,6 +212,7 @@ def get_if_do_cleanup(): cleanup = os.environ.get('MTF_DO_NOT_CLEANUP') return not bool(cleanup) + def get_if_reuse(): """ Return the **MTF_REUSE** envvar. @@ -178,6 +222,7 @@ def get_if_reuse(): reuse = os.environ.get('MTF_REUSE') return bool(reuse) + def get_if_remoterepos(): """ Return the **MTF_REMOTE_REPOS** envvar. @@ -188,6 +233,49 @@ def get_if_remoterepos(): return bool(remote_repos) +def get_odcs_auth(): + """ + use ODCS for creating composes as URL parameter + It enables this feature in case MTF_ODCS envvar is set + MTF_ODCS=yes -- use openidc and token for your user + MTF_ODCS=OIDC_token_string -- use this token for authentication + + :envvar MTF_ODCS: yes or token + :return: + """ + odcstoken = os.environ.get('MTF_ODCS') + + # in case you dont have token enabled, try to ask for openidc via web browser + if odcstoken in TRUE_VALUES_DICT: + # to not have hard dependency on openidc (use just when using ODCS without defined token) + import openidc_client + id_provider = 'https://id.fedoraproject.org/openidc/' + # Get the auth token using the OpenID client. + oidc = openidc_client.OpenIDCClient( + 'odcs', + id_provider, + {'Token': 'Token', 'Authorization': 'Authorization'}, + 'odcs-authorizer', + 'notsecret', + ) + + scopes = [ + 'openid', + 'https://id.fedoraproject.org/scope/groups', + 'https://pagure.io/odcs/new-compose', + 'https://pagure.io/odcs/renew-compose', + 'https://pagure.io/odcs/delete-compose', + ] + try: + odcstoken = oidc.get_token(scopes, new_token=True) + except requests.exceptions.HTTPError as e: + print_info(e.response.text) + raise ModuleFrameworkException("Unable to get token via OpenIDC for your user") + if odcstoken and len(odcstoken)<10: + raise ModuleFrameworkException("Unable to parse token for ODCS, token is too short: %s" % odcstoken) + return odcstoken + + def get_if_module(): """ Return the **MTF_DISABLE_MODULE** envvar. @@ -205,8 +293,8 @@ def sanitize_text(text, replacement="_", invalid_chars=["/", ";", "&", ">", "<", invalid_chars=["/", ";", "&", ">", "<", "|"] - :param (str): text to sanitize - :param (str): replacement char, default: "_" + :param replacement: text to sanitize + :param invalid_chars: replacement char, default: "_" :return: str """ for char in invalid_chars: @@ -219,7 +307,7 @@ def sanitize_cmd(cmd): """ Escape apostrophes in a command line. - :param (str): command to sanitize + :param cmd: command to sanitize :return: str """ if '"' in cmd: @@ -227,6 +315,20 @@ def sanitize_cmd(cmd): return cmd +def translate_cmd(cmd, translation_dict=None): + if not translation_dict: + return cmd + try: + formattedcommand = cmd.format(**translation_dict) + except KeyError: + raise ModuleFrameworkException( + "Command is formatted by using trans_dict. If you want to use " + "brackets { } in your code, please use {{ }}. Possible values " + "in trans_dict are: %s. \nBAD COMMAND: %s" + % (translation_dict, cmd)) + return formattedcommand + + def get_profile(): """ Return a profile name. @@ -236,10 +338,8 @@ def get_profile(): :return: str """ - profile = os.environ.get('PROFILE') - if not profile: - profile = "default" - return profile + + return os.environ.get('PROFILE') or MODULE_DEFAULT_PROFILE def get_url(): @@ -260,20 +360,9 @@ def get_compose_url(): :return: str """ - compose_url = os.environ.get('COMPOSEURL') - if not compose_url: - readconfig = CommonFunctions() - readconfig.loadconfig() - try: - if readconfig.config.get("compose-url"): - compose_url = readconfig.config.get("compose-url") - elif readconfig.config['module']['rpm'].get("repo"): - compose_url = readconfig.config['module']['rpm'].get("repo") - else: - compose_url = readconfig.config['module']['rpm'].get("repos")[0] - except AttributeError: - return None - return compose_url + readconfig = get_config() + compose_url = os.environ.get('COMPOSEURL') or readconfig.get("compose-url") + return [compose_url] if compose_url else [] def get_modulemdurl(): @@ -296,20 +385,17 @@ class CommonFunctions(object): """ config = None modulemdConf = None + component_name = None + source = None + arch = None + sys_arch = None + dependencylist = {} + is_it_module = False + packager = None + # general use case is to have forwarded services to host (so thats why it is same) + ipaddr = trans_dict["HOSTIPADDR"] def __init__(self, *args, **kwargs): - self.config = None - self.modulemdConf = None - self.moduleName = None - self.source = None - self.arch = None - self.sys_arch = None - self.dependencylist = {} - self.moduledeps = {} - self.is_it_module = False - self.packager = None - # general use case is to have forwarded services to host (so thats why it is same) - self.ipaddr = trans_dict["HOSTIPADDR"] trans_dict["GUESTARCH"] = self.getArch() self.loadconfig() @@ -321,9 +407,9 @@ def loadconfig(self): """ # we have to copy object. because there is just one global object, to improve performance self.config = copy.deepcopy(get_config()) - self.info = self.config.get("module",{}).get(get_module_type_base()) + self.info = self.config.get("module", {}).get(get_module_type_base()) # if there is inheritance join both dictionary - self.info.update(self.config.get("module",{}).get(get_module_type())) + self.info.update(self.config.get("module", {}).get(get_module_type())) if not self.info: raise ConfigExc("There is no section for (module: -> %s:) in the configuration file." % get_module_type_base()) @@ -333,7 +419,7 @@ def loadconfig(self): else: pass - self.moduleName = sanitize_text(self.config['name']) + self.component_name = sanitize_text(self.config['name']) self.source = self.config.get('source') self.set_url() @@ -368,7 +454,6 @@ def get_url(self): """ return self.info.get("url") - def getArch(self): """ Get system architecture. @@ -383,20 +468,12 @@ def runHost(self, command="ls /", **kwargs): """ Run commands on a host. - :param (str): command to exectute + :param common: command to exectute ** kwargs: avocado process.run params like: shell, ignore_status, verbose :return: avocado.process.run """ - try: - formattedcommand = command.format(**trans_dict) - except KeyError: - raise ModuleFrameworkException( - "Command is formatted by using trans_dict. If you want to use " - "brackets { } in your code, please use {{ }}. Possible values " - "in trans_dict are: %s. \nBAD COMMAND: %s" - % (trans_dict, command)) - return process.run("%s" % formattedcommand, **kwargs) + return process.run("%s" % translate_cmd(command, translation_dict=trans_dict), **kwargs) def get_test_dependencies(self): """ @@ -428,7 +505,6 @@ def installTestDependencies(self, packages=None): except process.CmdError as e: raise CmdExc("Installation failed; Do you have permission to do that?", e) - def getPackageList(self, profile=None): """ Return list of packages what has to be installed inside module @@ -437,15 +513,20 @@ def getPackageList(self, profile=None): :return: list of packages (rpms) """ package_list = [] + mddata = self.getModulemdYamlconfig() if not profile: if 'packages' in self.config: packages_rpm = self.config.get('packages',{}).get('rpms', []) packages_profiles = [] - for profile_in_conf in self.config.get('packages',{}).get('profiles',[]): - packages_profiles += self.getModulemdYamlconfig()['data']['profiles'][profile_in_conf]['rpms'] + for profile_in_conf in self.config.get('packages', {}).get('profiles', []): + packages_profiles += mddata['data']['profiles'][profile_in_conf]['rpms'] package_list += packages_rpm + packages_profiles + if get_if_install_default_profile(): + profile_append = mddata.get('data', {})\ + .get('profiles', {}).get(get_profile(), {}).get('rpms', []) + package_list += profile_append else: - package_list += self.getModulemdYamlconfig()['data']['profiles'][profile]['rpms'] + package_list += mddata['data']['profiles'][profile].get('rpms', []) print_info("PCKGs to install inside module:", package_list) return package_list @@ -490,7 +571,6 @@ def getModulemdYamlconfig(self, urllink=None): self.modulemdConf = link return link - def getIPaddr(self): """ Return protocol (IP or IPv6) address on a guest machine. @@ -573,7 +653,6 @@ def stop(self, command="/bin/true"): command = self.info.get('stop') or command self.run(command, shell=True, ignore_bg_processes=True, verbose=is_not_silent()) - def install_packages(self, packages=None): """ Install packages in config (by config or via parameter) @@ -607,6 +686,45 @@ def tearDown(self): else: print_info("TearDown phase skipped.") + def copyTo(self, src, dest): + """ + Copy file to module from host + + :param src: source file on host + :param dest: destination file on module + :return: None + """ + if src is not dest: + self.run("cp -rf %s %s" % (src, dest)) + + def copyFrom(self, src, dest): + """ + Copy file from module to host + + :param src: source file on module + :param dest: destination file on host + :return: None + """ + if src is not dest: + self.run("cp -rf %s %s" % (src, dest)) + + def run_script(self, filename, *args, **kwargs): + """ + run script or binary inside module + :param filename: filename to copy to module + :param args: pass this args as cmdline args to run binary + :param kwargs: pass thru to avocado process.run + :return: avocado process.run object + """ + dest = "/tmp/%s" % generate_unique_name() + self.copyTo(filename, dest) + #self.run("bash %s" % dest) + parameters = "" + if args: + parameters = " " + " ".join(args) + return self.run("bash " + dest + parameters, **kwargs) + + def get_config(): """ Read the module's configuration file. @@ -647,8 +765,8 @@ def get_config(): raise ConfigExc("No module in yaml config defined") # copy rpm section to nspawn, in case not defined explicitly # make it backward compatible - if xcfg.get("module",{}).get("rpm") and not xcfg.get("module",{}).get("nspawn"): - xcfg["module"]["nspawn"] = copy.deepcopy(xcfg.get("module",{}).get("rpm")) + if xcfg.get("module", {}).get("rpm") and not xcfg.get("module", {}).get("nspawn"): + xcfg["module"]["nspawn"] = copy.deepcopy(xcfg.get("module", {}).get("rpm")) __persistent_config = xcfg return xcfg except IOError: @@ -669,15 +787,17 @@ def list_modules_from_config(): modulelist = get_config().get("module").keys() return modulelist + def get_backend_list(): """ Get backends :return: list """ - base_module_list = ["rpm", "nspawn", "docker"] + base_module_list = ["rpm", "nspawn", "docker", "openshift"] return base_module_list + def get_module_type(): """ Get which module are you actually using. @@ -705,10 +825,31 @@ def get_module_type_base(): module_type = get_module_type() parent = module_type if module_type not in get_backend_list(): - parent = get_config().get("module",{}).get(module_type, {}).get("parent") + parent = get_config().get("module", {}).get(module_type, {}).get("parent") if not parent: raise ModuleFrameworkException("Module (%s) does not provide parent backend parameter (there are: %s)" % (module_type, get_backend_list())) if parent not in get_backend_list(): raise ModuleFrameworkException("As parent is allowed just base type: %s" % get_backend_list) return parent + + +def get_docker_file(dir_name="../"): + """ + Function returns full path to dockerfile. + :param dir_name: dir_name, where should be Dockerfile located + :return: full_path to Dockerfile + """ + fromenv = os.environ.get("DOCKERFILE") + if fromenv: + dockerfile = fromenv + dir_name = os.getcwd() + else: + dir_name = os.path.abspath(dir_name) + dockerfile = DOCKERFILE + dockerfile = os.path.join(dir_name, dockerfile) + + if not os.path.exists(dockerfile): + dockerfile = None + print_debug("Dockerfile should exists in the %s directory." % os.path.abspath(dir_name)) + return dockerfile diff --git a/moduleframework/compose_info.py b/moduleframework/compose_info.py index c1d8f13..119cc2d 100644 --- a/moduleframework/compose_info.py +++ b/moduleframework/compose_info.py @@ -27,8 +27,6 @@ module files """ -import yaml -import urllib import xml.etree.ElementTree import gzip import tempfile diff --git a/moduleframework/dockerlinter.py b/moduleframework/dockerlinter.py index 9a9d456..c456d1c 100644 --- a/moduleframework/dockerlinter.py +++ b/moduleframework/dockerlinter.py @@ -1,13 +1,10 @@ -from __future__ import print_function - -import os import re import ast + from dockerfile_parse import DockerfileParser -import common +from moduleframework.common import get_docker_file, print_info # Dockerfile path -DOCKERFILE = "Dockerfile" EXPOSE = "EXPOSE" VOLUME = "VOLUME" LABEL = "LABEL" @@ -15,28 +12,16 @@ PORTS = "PORTS" FROM = "FROM" RUN = "RUN" +USER = "USER" +COPY = "COPY" +ADD = "ADD" +INSTRUCT = "instruction" def get_string(value): return ast.literal_eval(value) -def get_docker_file(dir_name): - fromenv = os.environ.get("DOCKERFILE") - if fromenv: - dockerfile = fromenv - dir_name = os.getcwd() - else: - dir_name = os.path.abspath(dir_name) - dockerfile = DOCKERFILE - dockerfile = os.path.join(dir_name, dockerfile) - - if not os.path.exists(dockerfile): - dockerfile = None - common.print_debug("Dockerfile should exists in the %s directory." % dir_name) - return dockerfile - - class DockerfileLinter(object): """ Class checks a Dockerfile @@ -45,15 +30,17 @@ class DockerfileLinter(object): dockerfile = None oc_template = None - dfp = {} + dfp_structure = {} docker_dict = {} def __init__(self, dir_name="../"): dockerfile = get_docker_file(dir_name) if dockerfile: - self.dfp = DockerfileParser(path=os.path.dirname(dockerfile)) self.dockerfile = dockerfile - self._get_structure_as_dict() + with open(self.dockerfile, "r") as f: + self.dfp = DockerfileParser(fileobj=f) + self.dfp_structure = self.dfp.structure + self._get_structure_as_dict() else: self.dfp = None self.dockerfile = None @@ -61,7 +48,7 @@ def __init__(self, dir_name="../"): def _get_general(self, value): """ Function returns exposes as field. - It is used for RUN, EXPOSE and FROM + It is used for RUN, EXPOSE, USER, COPY, ADD and FROM :param value: :return: """ @@ -99,31 +86,30 @@ def _get_structure_as_dict(self): VOLUME: self._get_volume, LABEL: self._get_label, FROM: self._get_general, - RUN: self._get_general} + RUN: self._get_general, + USER: self._get_general, + COPY: self._get_general, + ADD: self._get_general, + } + self.docker_dict[LABEL] = {} + for label in self.dfp.labels: + self.docker_dict[LABEL][label] = self.dfp.labels[label] for struct in self.dfp.structure: - key = struct["instruction"] + key = struct[INSTRUCT] val = struct["value"] - if key == LABEL: - if key not in self.docker_dict: - self.docker_dict[key] = {} - value = functions[key](val) - if value is not None: - self.docker_dict[key].update(value) - else: + if key != LABEL: if key not in self.docker_dict: self.docker_dict[key] = [] try: ret_val = functions[key](val) for v in ret_val: - if v not in self.docker_dict[key]: - self.docker_dict[key].append(v) + self.docker_dict[key].append(v) except KeyError: - print("Dockerfile tag %s is not parsed by MTF" % key) + print_info("Dockerfile tag %s is not parsed by MTF" % key) def get_docker_env(self): - if ENV in self.docker_dict and self.docker_dict[ENV]: - return self.docker_dict[ENV] + return self.docker_dict.get(ENV) def get_docker_specific_env(self, env_name=None): """ @@ -134,7 +120,7 @@ def get_docker_specific_env(self, env_name=None): if env_name is None: return [] env_list = self.get_docker_env() - return [env_name in env_list] + return [x for x in env_list if env_name in x] def get_docker_expose(self): """ @@ -142,9 +128,8 @@ def get_docker_expose(self): :return: list of PORTS """ ports_list = [] - if EXPOSE in self.docker_dict and self.docker_dict[EXPOSE]: - for p in self.docker_dict[EXPOSE]: - ports_list.append(int(p)) + for p in self.docker_dict.get(EXPOSE, []): + ports_list.append(int(p)) return ports_list def get_docker_labels(self): @@ -152,9 +137,7 @@ def get_docker_labels(self): Function returns docker labels :return: label dictionary """ - if LABEL in self.docker_dict and self.docker_dict[LABEL]: - return self.docker_dict[LABEL] - return None + return self.docker_dict.get(LABEL, {}) def get_specific_label(self, label_name=None): """ @@ -165,12 +148,117 @@ def get_specific_label(self, label_name=None): if label_name is None: return [] label_list = self.get_docker_labels() - return [label_name in label_list] + return [label_list[key] for key in label_list.keys() if label_name == key] - def check_baseruntime(self): + def check_from_is_first(self): """ - Function returns docker labels - :return: label dictionary + Function checks if FROM directive is really first directive. + :return: True if FROM is first, False if FROM is not first directive + """ + if self.dfp_structure[0].get('instruction') == 'FROM': + return True + else: + return False + + def check_from_directive_is_valid(self): + """ + Function checks if FROM directive contains valid format like is specified here + http://docs.projectatomic.io/container-best-practices/#_line_rule_section + Regular expression is: ^[a-z0-9.]+(\/[a-z0-9\D.]+)+$ + Example registry: + registry.fedoraproject.org/f26/etcd + registry.fedoraproject.org/f26/flannel + registry.access.redhat.com/rhscl/nginx-18-rhel7 + registry.access.redhat.com/rhel7/rhel-tools + registry.access.redhat.com/rhscl/postgresql-95-rhel7 + + :return: + """ + correct_format = False + struct = self.dfp_structure[0] + if struct.get(INSTRUCT) == 'FROM': + p = re.compile("^[a-z0-9.]+(\/[a-z0-9\D.]+)+$") + if p.search(struct.get('value')) is not None: + correct_format = True + return correct_format + + def check_chained_run_dnf_commands(self): + """ + This function checks that there are no consecutive + RUN commands executing dnf/yum in the Dockerfile, + as these need to be chained. + + BAD examples: + ~~~~~~~~~~~~ + FROM fedora + RUN dnf install foobar1 + RUN dnf clean all + + GOOD example: + ~~~~~~~~~~~~ + FROM fedora + RUN dnf install foobar1 && dnf clean all + :return: True if Dockerfile contains RUN dnf instructions in one row + False if Dockerfile contains RUN dnf instructions in more rows + """ + value = 0 + for struct in self.dfp_structure: + if struct.get(INSTRUCT) == RUN: + if "dnf" in struct.get("value") or "yum" in struct.get("value"): + value += 1 + if int(value) > 1: + return False + return True + + def check_chained_run_rest_commands(self): + """ + Function checks if Dockerfile does not contain more `RUN` commands, + except RUN dnf, in more then one row. + BAD examples: + FROM fedora + RUN ls / + RUN cd / + GOOD example: + FROM fedora + RUN ls / && cd / + :return: True if Dockerfile contains RUN instructions, except dnf, in one row + False if Dockerfile contains RUN instructions, except dnf, in more rows + """ + value = 0 + for struct in self.dfp_structure: + if struct.get(INSTRUCT) == RUN: + if "dnf" not in struct.get("value") and "yum" not in struct.get("value"): + value += 1 + if int(value) > 1: + return False + return True + + def check_clean_all_command(self): + """ + This function checks whether every RUN instruction containing a dnf/yum operation ends with a "dnf/yum clean all". + + :return: True if every dnf/yum instruction contains a cleanup step + False otherwise + """ + for struct in self.dfp_structure: + if struct.get(INSTRUCT) == RUN: + if "dnf" in struct.get("value") or "yum" in struct.get("value"): + if "clean all" in struct.get("value"): + return True + return False + + def check_helpmd_is_present(self): + """ + Function checks if helpmd. is present in COPY or ADD directives + :return: True if help.md is present + False if help.md is not specified in Dockerfile """ - if FROM in self.docker_dict: - return [x for x in self.docker_dict[FROM] if "baseruntime/baseruntime" in x] + helpmd_present = False + for instruction in [COPY, ADD]: + try: + helpmd = [help for help in self.docker_dict[instruction] if "help.md" in help] + if helpmd: + helpmd_present = True + except KeyError: + print_info("Instruction %s is not present in Dockerfile", instruction) + return helpmd_present diff --git a/moduleframework/environment_prepare/docker_prepare.py b/moduleframework/environment_prepare/docker_prepare.py index b5488d9..4e98d95 100644 --- a/moduleframework/environment_prepare/docker_prepare.py +++ b/moduleframework/environment_prepare/docker_prepare.py @@ -26,7 +26,8 @@ """ from avocado.utils import service -from moduleframework.common import * +from moduleframework.common import print_info, CommonFunctions +import os class EnvDocker(CommonFunctions): diff --git a/moduleframework/environment_prepare/nspawn_prepare.py b/moduleframework/environment_prepare/nspawn_prepare.py index 0013e33..29d24f7 100644 --- a/moduleframework/environment_prepare/nspawn_prepare.py +++ b/moduleframework/environment_prepare/nspawn_prepare.py @@ -25,7 +25,8 @@ module for environment setup and cleanup, to be able to split action for ansible, more steps instead of one complex """ -from moduleframework.common import * +import os +from moduleframework.common import CommonFunctions, print_info, is_not_silent selinux_state_file="/var/tmp/mtf_selinux_state" setseto = "Permissive" diff --git a/moduleframework/environment_prepare/openshift_prepare.py b/moduleframework/environment_prepare/openshift_prepare.py new file mode 100644 index 0000000..90ba6c6 --- /dev/null +++ b/moduleframework/environment_prepare/openshift_prepare.py @@ -0,0 +1,87 @@ +# -*- coding: utf-8 -*- +# +# Meta test family (MTF) is a tool to test components of a modular Fedora: +# https://docs.pagure.org/modularity/ +# Copyright (C) 2017 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# he Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Authors: Petr Hracek +# + +""" +module for OpenShift environment setup and cleanup +""" + +import os +from moduleframework.common import CommonFunctions +from moduleframework import common + +selinux_state_file="/var/tmp/mtf_selinux_state" +setseto = "Permissive" + + +class EnvOpenShift(CommonFunctions): + + def prepare_env(self): + common.print_info('Loaded config for name: {}'.format(self.config['name'])) + self.__start_openshift_cluster() + + def cleanup_env(self): + self.__stop_openshift_cluster() + + def __oc_status(self): + oc_status = self.runHost("oc status", ignore_status=True, verbose=common.is_not_silent()) + common.print_debug(oc_status.stdout) + common.print_debug(oc_status.stderr) + return oc_status.exit_status + + def __install_env(self): + """ + Internal method, do not use it anyhow + + :return: None + """ + if common.get_openshift_local(): + if not os.path.exists('/usr/bin/oc'): + self.installTestDependencies(['origin', 'origin-clients']) + + def __start_openshift_cluster(self): + """ + Internal method, do not use it anyhow. It starts OpenShift cluster + + :return: None + """ + + if common.get_openshift_local(): + if int(self.__oc_status()) == 0: + common.print_info("Seems like OpenShift is already started.") + else: + common.print_info("Starting OpenShift") + self.runHost("oc cluster up", verbose=common.is_not_silent()) + + def __stop_openshift_cluster(self): + """ + Internal method, do not use it anyhow. It stops OpenShift cluster + + :return: None + """ + if common.get_openshift_local(): + if int(self.__oc_status()) == 0: + common.print_info("Stopping OpenShift") + self.runHost("oc cluster down", verbose=common.is_not_silent()) + else: + common.print_info("OpenShift is already stopped.") + diff --git a/moduleframework/environment_prepare/rpm_prepare.py b/moduleframework/environment_prepare/rpm_prepare.py index 77fa1e1..75ffaba 100644 --- a/moduleframework/environment_prepare/rpm_prepare.py +++ b/moduleframework/environment_prepare/rpm_prepare.py @@ -25,7 +25,7 @@ module for environment setup and cleanup, to be able to split action for ansible, more steps instead of one complex """ -from moduleframework.common import * +from moduleframework.common import CommonFunctions, print_info class EnvRpm(CommonFunctions): diff --git a/moduleframework/helpers/container_helper.py b/moduleframework/helpers/container_helper.py index 993517d..c39203b 100644 --- a/moduleframework/helpers/container_helper.py +++ b/moduleframework/helpers/container_helper.py @@ -22,6 +22,7 @@ import json from moduleframework.common import * +from moduleframework.mtfexceptions import ContainerExc class ContainerHelper(CommonFunctions): @@ -49,10 +50,6 @@ def __init__(self): if "docker=" in self.icontainer: self.jmeno = self.icontainer[7:] self.tarbased = False - elif "docker.io" in self.info['container']: - # Trusted source - self.tarbased = False - self.jmeno = self.icontainer else: # untrusted source self.tarbased = False @@ -98,7 +95,7 @@ def tearDown(self): :return: None """ - super(ContainerHelper,self).tearDown() + super(ContainerHelper, self).tearDown() if get_if_do_cleanup(): print_info("To run a command inside a container execute: ", "docker exec %s /bin/bash" % self.docker_id) @@ -129,7 +126,6 @@ def __load_inspect_json(self): "docker inspect %s" % self.jmeno, verbose=is_not_silent()).stdout)[0]["Config"] - def start(self, args="-it -d", command="/bin/bash"): """ start the docker container @@ -155,7 +151,7 @@ def start(self, args="-it -d", command="/bin/bash"): if self.status() is False: raise ContainerExc( "Container %s (for module %s) is not running, probably DEAD immediately after start (ID: %s)" % ( - self.jmeno, self.moduleName, self.docker_id)) + self.jmeno, self.component_name, self.docker_id)) trans_dict["GUESTPACKAGER"] = self.get_packager() def stop(self): diff --git a/moduleframework/helpers/nspawn_helper.py b/moduleframework/helpers/nspawn_helper.py index 66664e8..d1243b1 100644 --- a/moduleframework/helpers/nspawn_helper.py +++ b/moduleframework/helpers/nspawn_helper.py @@ -20,16 +20,14 @@ # Authors: Petr Hracek # -import shutil -import re -import glob import time import hashlib +import os -from moduleframework.timeoutlib import Retry -from moduleframework.common import * -from moduleframework.exceptions import * +from moduleframework.common import BASEPATHDIR, translate_cmd, \ + get_if_reuse, trans_dict, print_info, is_debug, get_if_do_cleanup from moduleframework.helpers.rpm_helper import RpmHelper +from mtf.backend.nspawn import Image, Container class NspawnHelper(RpmHelper): @@ -48,26 +46,16 @@ def __init__(self): """ super(NspawnHelper, self).__init__() self.baseprefix = os.path.join(BASEPATHDIR, "chroot_") - self.__selinuxState = None time.time() actualtime = time.time() self.chrootpath_baseimage = "" if not get_if_reuse(): - self.jmeno = "%s_%r" % (self.moduleName, actualtime) + self.jmeno = "%s_%r" % (self.component_name, actualtime) else: - self.jmeno = self.moduleName + self.jmeno = self.component_name self.chrootpath = os.path.abspath(self.baseprefix + self.jmeno) - self.__default_command_sleep = 2 - def __machined_restart(self): - """ - Machined is not reliable well, restart it whenever you want. - :return: None - """ - #return self.runHost("systemctl restart systemd-machined", verbose=is_debug(), ignore_status=True) - # remove restarting when used systemd-run - pass def setUp(self): """ @@ -82,140 +70,19 @@ def setUp(self): trans_dict["ROOT"] = self.chrootpath print_info("name of CHROOT directory:", self.chrootpath) self.setRepositoriesAndWhatToInstall() - self.__prepareSetup() - self._callSetupFromConfig() - self.__bootMachine() - - def __is_killed(self): - for foo in range(DEFAULTRETRYTIMEOUT): - time.sleep(1) - out = self.runHost("machinectl status %s" % self.jmeno, verbose=is_debug(), ignore_status=True) - if out.exit_status != 0: - print_debug("NSPAWN machine %s stopped" % self.jmeno) - return True - raise NspawnExc("Unable to stop machine %s within %d" % (self.jmeno, DEFAULTRETRYTIMEOUT)) - - def __is_booted(self): - for foo in range(DEFAULTRETRYTIMEOUT): - time.sleep(1) - out = self.runHost("machinectl status %s" % self.jmeno, verbose=is_debug(), ignore_status=True) - if "systemd-logind" in out.stdout: - time.sleep(2) - print_debug("NSPAWN machine %s booted" % self.jmeno) - return True - raise NspawnExc("Unable to start machine %s within %d" % (self.jmeno, DEFAULTRETRYTIMEOUT)) - - def __create_snaphot(self): - """ - Internal method, do not use it anyhow - - :return: None - """ - - if get_if_do_cleanup(): - # delete directory with same same (in case used option DO NOT CLEANUP) - if os.path.exists(self.chrootpath): - shutil.rmtree(self.chrootpath, ignore_errors=True) - # copy files from base image directory to working copy (instead of overlay) - if self.chrootpath_baseimage != self.chrootpath and \ - not os.path.exists(os.path.join(self.chrootpath, "usr")): - self.runHost("cp -rf %s %s" % (self.chrootpath_baseimage, self.chrootpath)) - - def __prepareSetup(self): - """ - Internal method, do not use it anyhow - - :return: None - """ + # never move this line to __init__ this localtion can change before setUp (set repositories) self.chrootpath_baseimage = os.path.abspath(self.baseprefix + - self.moduleName + + self.component_name + "_image_" + hashlib.md5(" ".join(self.repos)).hexdigest()) - if not os.path.exists(os.path.join(self.chrootpath_baseimage, "usr")): - repos_to_use = "" - counter = 0 - for repo in self.repos: - counter = counter + 1 - repos_to_use += " --repofrompath %s%d,%s" % ( - self.moduleName, counter, repo) - try: - self.runHost( - ("%s install --nogpgcheck --setopt=install_weak_deps=False " - "--installroot %s --allowerasing --disablerepo=* --enablerepo=%s* %s %s") % - (trans_dict["HOSTPACKAGER"], self.chrootpath_baseimage, self.moduleName, repos_to_use, self.whattoinstallrpm), verbose=is_not_silent()) - except Exception as e: - raise NspawnExc( - "ERROR: Unable to install packages %s\n original exeption:\n%s\n" % - (self.whattoinstallrpm, str(e))) - # COPY yum repository inside NSPAW, to be able to do installations - insiderepopath = os.path.join(self.chrootpath_baseimage, self.yumrepo[1:]) - try: - os.makedirs(os.path.dirname(insiderepopath)) - except: - pass - counter = 0 - f = open(insiderepopath, 'w') - for repo in self.repos: - counter = counter + 1 - add = """[%s%d] -name=%s%d -baseurl=%s -enabled=1 -gpgcheck=0 - -""" % (self.moduleName, counter, self.moduleName, counter, repo) - f.write(add) - f.close() - - # shutil.copy(self.yumrepo, insiderepopath) - # self.runHost("sed s/enabled=0/enabled=1/ -i %s" % insiderepopath, ignore_status=True) - for repo in self.repos: - if "file:///" in repo: - src = repo[7:] - srcto = os.path.join(self.chrootpath_baseimage, src[1:]) - try: - os.makedirs(os.path.dirname(srcto)) - except Exception as e: - print_debug(e, "Unable to create DIR (already created)", srcto) - pass - try: - shutil.copytree(src, srcto) - except Exception as e: - print_debug(e, "Unable to copy files from:", src, "to:", srcto) - pass - pkipath = "/etc/pki/rpm-gpg" - pkipath_ch = os.path.join(self.chrootpath_baseimage, pkipath[1:]) - try: - os.makedirs(pkipath_ch) - except BaseException: - pass - for filename in glob.glob(os.path.join(pkipath, '*')): - shutil.copy(filename, pkipath_ch) - print_info("repo prepared:", insiderepopath, open(insiderepopath, 'r').read()) - else: - print_info("Base image for NSPAWN already exist: %s" % self.chrootpath_baseimage) - - def __bootMachine(self): - """ - Internal function. - Start machine via nspawn and wait untill booted. - - :return: None - """ - self.__create_snaphot() - print_debug("starting NSPAWN") - nspawncont = process.SubProcess( - "systemd-nspawn --machine=%s -bD %s" % - (self.jmeno, self.chrootpath), verbose=is_debug()) - nspawncont.start() - self.__is_booted() - print_info("machine: %s started" % self.jmeno) - - trans_dict["GUESTIPADDR"] = trans_dict["HOSTIPADDR"] - self.ipaddr = trans_dict["GUESTIPADDR"] + self.__image_base = Image(location=self.chrootpath_baseimage, packageset=self.whattoinstallrpm, repos=self.repos, ignore_installed=True) + self.__image = self.__image_base.create_snapshot(self.chrootpath) + self.__container = Container(image=self.__image, name=self.jmeno) + self._callSetupFromConfig() + self.__container.boot_machine() def run (self, command, **kwargs): - return self.__run_systemdrun(command, **kwargs) + return self.__container.execute(command=translate_cmd(command, translation_dict=trans_dict), **kwargs) def start(self, command="/bin/true"): """ @@ -226,90 +93,10 @@ def start(self, command="/bin/true"): :return: None """ command = self.info.get('start') or command - self.__run_systemdrun(command, internal_background=False, ignore_bg_processes=True, verbose=is_debug()) + self.run(command, internal_background=False, ignore_bg_processes=True, verbose=is_debug()) self.status() trans_dict["GUESTPACKAGER"] = self.get_packager() - def __run_systemdrun(self, command, internal_background=False, **kwargs): - """ - Run command inside nspawn module type. It uses systemd-run. - since Fedora 26 there is important --wait option - - :param command: str command to be executed - :param kwargs: dict parameters passed to avocado.process.run - :return: avocado.process.run - """ - self.__machined_restart() - lpath = "/var/tmp" - add_wait_var = "--wait" - add_sleep_infinite = "" - if internal_background: - add_wait_var="" - add_sleep_infinite = "&& sleep infinity" - try: - comout = self.runHost("""systemd-run {wait} -M {machine} /bin/bash -c "({comm})>{pin}/stdout 2>{pin}/stderr {sleep}" """.format( - wait=add_wait_var, - machine=self.jmeno, - comm=sanitize_cmd(command), - pin=lpath, - sleep=add_sleep_infinite), - **kwargs) - if not internal_background: - with open("{chroot}{pin}/stdout".format(chroot=self.chrootpath, pin=lpath), 'r') as content_file: - comout.stdout = content_file.read() - with open("{chroot}{pin}/stderr".format(chroot=self.chrootpath, pin=lpath), 'r') as content_file: - comout.stderr = content_file.read() - comout.command = command - print_debug(comout) - return comout - except process.CmdError as e: - raise CmdExc("Command in SYSTEMD-RUN failed: %s" % command, e) - - def __run_machinectl(self, command, **kwargs): - """ - Run command inside nspawn module type. It uses machinectl shell command. - It need few workarounds, that's why it the code seems so strange - - TODO: workaround because machinedctl is unable to behave like ssh. It is bug - systemd-run should be used, but in F-25 it does not contain --wait option - - :param command: str command to be executed - :param kwargs: dict parameters passed to avocado.process.run - :return: avocado.process.run - """ - self.__machined_restart() - lpath = "/var/tmp" - if not kwargs: - kwargs = {} - should_ignore = kwargs.get("ignore_status") - kwargs["ignore_status"] = True - comout = self.runHost("""machinectl shell root@{machine} /bin/bash -c "({comm})>{pin}/stdout 2>{pin}/stderr; echo $?>{pin}/retcode; sleep {defaultsleep}" """.format( - machine=self.jmeno, - comm=sanitize_cmd(command), - pin=lpath, - defaultsleep=self.__default_command_sleep ), - **kwargs) - if comout.exit_status != 0: - raise NspawnExc("This command should not fail anyhow inside NSPAWN:", sanitize_cmd(command)) - try: - kwargs["verbose"] = is_not_silent() - b = self.runHost( - 'bash -c "cat {chroot}{pin}/stdout; cat {chroot}{pin}/stderr > /dev/stderr; exit `cat {chroot}{pin}/retcode`"'.format( - chroot=self.chrootpath, - pin=lpath), - **kwargs) - finally: - comout.stdout = b.stdout - comout.stderr = b.stderr - comout.exit_status = b.exit_status - removesworkaround = re.search('[^(]*\((.*)\)[^)]*', comout.command) - if removesworkaround: - comout.command = removesworkaround.group(1) - if comout.exit_status == 0 or should_ignore: - return comout - else: - raise process.CmdError(comout.command, comout) - def selfcheck(self): """ Test if default command will pass, it is more important for nspawn, because it happens that @@ -317,7 +104,7 @@ def selfcheck(self): :return: avocado.process.run """ - return self.run().stdout + return self.run(command="/bin/true").stdout def copyTo(self, src, dest): """ @@ -327,9 +114,7 @@ def copyTo(self, src, dest): :param dest: destination file on module :return: None """ - self.runHost( - " machinectl copy-to %s %s %s" % - (self.jmeno, src, dest), timeout=DEFAULTPROCESSTIMEOUT, ignore_bg_processes=True, verbose=is_not_silent()) + self.__container.copy_to(src, dest) def copyFrom(self, src, dest): """ @@ -339,9 +124,7 @@ def copyFrom(self, src, dest): :param dest: destination file on host :return: None """ - self.runHost( - " machinectl copy-from %s %s %s" % - (self.jmeno, src, dest), timeout=DEFAULTPROCESSTIMEOUT, ignore_bg_processes=True, verbose=is_not_silent()) + self.__container.copy_from(src, dest) def tearDown(self): """ @@ -351,28 +134,13 @@ def tearDown(self): """ if get_if_do_cleanup() and not get_if_reuse(): try: - self.stop() - except Exception as stopexception: - print_info("Stop action caused exception. It should not happen.", - stopexception) + self.__container.stop() + except: pass - self.__machined_restart() try: - self.runHost("machinectl poweroff %s" % self.jmeno, verbose=is_not_silent()) - self.__is_killed() - except Exception as poweroffex: - print_info("Unable to stop machine via poweroff, terminating", poweroffex) - try: - self.runHost("machinectl terminate %s" % self.jmeno, ignore_status=True) - self.__is_killed() - except Exception as poweroffexterm: - print_info("Unable to stop machine via terminate, STRANGE", poweroffexterm) - time.sleep(DEFAULTRETRYTIMEOUT) - pass + self.__container.rm() + except: pass - self._callCleanupFromConfig() - if os.path.exists(self.chrootpath): - shutil.rmtree(self.chrootpath, ignore_errors=True) else: print_info("tearDown skipped", "running nspawn: %s" % self.jmeno) print_info("To connect to a machine use:", diff --git a/moduleframework/helpers/openshift_helper.py b/moduleframework/helpers/openshift_helper.py new file mode 100644 index 0000000..ae158a4 --- /dev/null +++ b/moduleframework/helpers/openshift_helper.py @@ -0,0 +1,310 @@ +# -*- coding: utf-8 -*- +# +# This Modularity Testing Framework helps you to write tests for modules +# Copyright (C) 2017 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# he Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Authors: Petr Hracek +# + +import json +import os +import time +from moduleframework import common +from moduleframework.helpers.container_helper import ContainerHelper +from moduleframework.mtfexceptions import ConfigExc + + +class OpenShiftHelper(ContainerHelper): + """ + Basic Helper class for OpenShift container module type + + :avocado: disable + """ + + def __init__(self): + """ + set basic object variables + """ + super(OpenShiftHelper, self).__init__() + self.name = None + self.icontainer = self.get_url() + self.pod_id = None + self._pod_status = None + if not self.icontainer: + raise ConfigExc("No container image specified in the configuration file or environment variable.") + if "docker=" in self.icontainer: + self.container_name = self.icontainer[7:] + else: + # untrusted source + self.container_name = self.icontainer + # application name is taken from docker.io/modularitycontainer/memcached + self.app_name = self.container_name.split('/')[-1] + self.app_ip = None + common.print_debug(self.icontainer, self.app_name) + + def _app_exists(self): + """ + It checks if an application already exists in OpenShift environment + :return: True, application exists + False, application does not exist + """ + oc_status = self.runHost("oc get dc %s -o json" % self.app_name, ignore_status=True) + if int(oc_status.exit_status) == 0: + common.print_info("Application already exists.") + return True + oc_services = self.runHost("oc get services -o json", ignore_status=True).stdout + oc_services = self._convert_string_to_json(oc_services) + # Check if 'items' in json output is empty or not + if not oc_services: + return False + # check if 'items', which is not empty, in json output contains app_name + if not self._check_app_in_json(oc_services, self.app_name): + return False + return True + + def _check_app_in_json(self, json_output, app_name): + """ + Function checks if json_output contains container with specified name + + + :param json_output: json output from an OpenShift command + :param app_name: an application which should be checked + :return: True if the application exists + False if the application does not exist + """ + try: + labels = json_output.get('metadata').get('labels') + if labels.get('app') == app_name: + # In metadata dictionary and name is stored pod_name + self.pod_id = json_output.get('metadata').get('name') + return True + except KeyError: + return False + + def _convert_string_to_json(self, inp_string): + """ + It converts a string to json format and returns first item in items. + :param inp_string: String to format to json + :return: items from OpenShift output + """ + try: + items = json.loads(inp_string) + return items.get('items') + except TypeError: + return None + + def _remove_apps_from_openshift_namespaces(self, oc_service="svc"): + """ + It removes an application from specific "namespace" like svc, dc, is. + :param oc_service: Service from which we would like to remove application + """ + # Check status of svc/dc/is + oc_get = self.runHost("oc get %s -o json" % oc_service, ignore_status=True).stdout + oc_get = self._convert_string_to_json(oc_get) + # The output is like + # dovecot 172.30.1.1:5000/myproject/dovecot latest 15 minutes ago + # memcached 172.30.1.1:5000/myproject/memcached latest 13 minutes ago + + for item in oc_get: + if self._check_app_in_json(item, self.app_name): + # If application exists in svc / dc / is namespace, then remove it + oc_delete = self.runHost("oc delete %s %s" % (oc_service, self.app_name), + ignore_status=True, + verbose=common.is_not_silent()) + + def _app_remove(self): + """ + Function removes an application from all OpenShift namespaces like 'svc', 'dc', 'is' + """ + if self._app_exists(): + # TODO get info from oc status and delete relevat svc/dc/is + for ns in ['svc', 'dc', 'is']: + self._remove_apps_from_openshift_namespaces(ns) + + def _create_app(self): + """ + It creates an application in OpenShift environment + """ + # Switching to system user + oc_new_app = self.runHost("oc new-app -l mtf_testing=true %s --name=%s" % (self.container_name, + self.app_name), + ignore_status=True) + common.print_info(oc_new_app.stdout) + time.sleep(1) + + def _get_pod_status(self): + """ + This method checks if the POD is running within OpenShift environment. + :return: True if POD is running with status "Running" + False all other statuses + """ + pod_initiated = False + pod_state = self.runHost("oc get pods -o json", + ignore_status=True, + verbose=common.is_not_silent()) + + pod_state = self._convert_string_to_json(pod_state.stdout) + for pod in pod_state: + if self._check_app_in_json(pod, self.app_name): + self._pod_status = pod.get('status').get('phase') + if self._pod_status == "Running": + pod_initiated = True + break + return pod_initiated + + def _verify_pod(self): + """ + It verifies if an application POD is initiated and ready for testing + :return: False, application is not initiated during 10 seconds + True, application is initiated and ready for testing + """ + pod_initiated = False + for x in range(0, 20): + # We need wait a second before pod is really initiated. + time.sleep(1) + if self._get_pod_status(): + break + return pod_initiated + + def setUp(self): + """ + It is called by child class and it is same methof as Avocado/Unittest has. It prepares environment + for OpenShift testing + * setup environment from config + + :return: None + """ + self._callSetupFromConfig() + self.icontainer = self.get_url() + + def _openshift_login(self, oc_ip="127.0.0.1", oc_user='developer', oc_passwd='developer', env=False): + """ + It logins to an OpenShift environment on specific IP and under user and his password. + :param oc_ip: an IP where is an OpenShift environment running + :param oc_user: an username under which we can login to OpenShift environment + :param oc_passwd: a password for specific username + :param env: is used for specification OpenShift IP, user and password, otherwise defaults are used + :return: + """ + if env: + oc_ip = common.get_openshift_ip() + oc_user = common.get_openshift_user() + oc_passwd = common.get_openshift_passwd() + oc_output = self.runHost("oc login %s:8443 --username=%s --password=%s" % (oc_ip, + oc_user, + oc_passwd), + verbose=common.is_not_silent()) + return oc_output.exit_status + + def tearDown(self): + """ + Cleanup environment and call also cleanup from config + + :return: None + """ + super(OpenShiftHelper, self).tearDown() + try: + self._app_remove() + except Exception as e: + common.print_info(e, "OpenShift application already removed") + pass + + def _get_ip_instance(self): + """ + This method verifies that we can obtain an IP address of the application + deployed within OpenShift. + :return: True: getting IP address was successful + False: getting IP address was not successful + """ + oc_get_service = self.runHost("oc get service -o json") + service = self._convert_string_to_json(oc_get_service.stdout) + try: + for svc in service: + if svc.get('metadata').get('labels').get('app') == self.app_name: + self.ipaddr = svc.get('spec').get("clusterIP") + common.trans_dict['GUESTIPADDR'] = self.ipaddr + return True + except KeyError as e: + common.print_info(e.message) + return False + except IndexError as e: + common.print_info(e.message) + return False + + def getIPaddr(self): + """ + Return protocol (IP or IPv6) address on a POD OpenShift instance. + + It returns IP address of POD instance + + :return: str + """ + return self.ipaddr + + def start(self): + """ + starts the OpenShift application + + :param command: Do not use it directly (It is defined in config.yaml) + :return: None + """ + if not self._app_exists(): + self._create_app() + # Verify application is really deploy and prepared for testing. + self._verify_pod() + self._get_ip_instance() + + def stop(self): + """ + This method checks if the application is deployed within OpenShift environment + and removes service, deployment config and imagestream from OpenShift. + + :return: None + """ + if self._app_exists(): + try: + self._app_remove() + except Exception as e: + common.print_info(e, "OpenShift application already removed") + pass + + def status(self): + """ + Function returns whether the application exists + and is Running in OpenShift environment + + :return: True application exists + False application does not exist. + """ + status = False + if self._app_exists(): + if self._get_pod_status(): + status = True + return status + + def run(self, command="ls /", **kwargs): + """ + Run command inside OpenShift POD, all params what allows avocado are passed inside shell,ignore_status, etc. + https://docs.openshift.com/container-platform/3.6/dev_guide/executing_remote_commands.html + + :param command: str + :param kwargs: dict + :return: avocado.process.run + """ + return self.runHost("oc exec %s %s" % (self.pod_id, + common.sanitize_cmd(command)), + **kwargs) diff --git a/moduleframework/helpers/rpm_helper.py b/moduleframework/helpers/rpm_helper.py index f16d9ad..e62550b 100644 --- a/moduleframework/helpers/rpm_helper.py +++ b/moduleframework/helpers/rpm_helper.py @@ -22,7 +22,6 @@ from moduleframework import pdc_data from moduleframework.common import * -from moduleframework.exceptions import * class RpmHelper(CommonFunctions): @@ -42,27 +41,11 @@ def __init__(self): # allow to fake environment in ubuntu (for Travis) if not os.path.exists(baserepodir): baserepodir="/var/tmp" - self.yumrepo = os.path.join(baserepodir, "%s.repo" % self.moduleName) - self.whattoinstallrpm = "" + self.yumrepo = os.path.join(baserepodir, "%s.repo" % self.component_name) + self.whattoinstallrpm = [] self.bootstrappackages = [] self.repos = [] - def __setModuleDependencies(self): - if self.is_it_module: - if not get_if_remoterepos(): - temprepositories = self.getModulemdYamlconfig()\ - .get("data",{}).get("dependencies",{}).get("requires",{}) - temprepositories_cycle = dict(temprepositories) - for x in temprepositories_cycle: - pdc = pdc_data.PDCParser() - pdc.setLatestPDC(x, temprepositories_cycle[x]) - temprepositories.update(pdc.generateDepModules()) - self.moduledeps = temprepositories - print_info("Detected module dependencies:", self.moduledeps) - else: - self.moduledeps = {"base-runtime": "master"} - print_info("Remote repositories are enabled", self.moduledeps) - def getURL(self): """ Return semicolon separated string of repositories what will be used, could be simialr to URL param, @@ -81,13 +64,12 @@ def setUp(self): :return: None """ - #self.setModuleDependencies() self.setRepositoriesAndWhatToInstall() self._callSetupFromConfig() self.__prepare() def __addModuleDependency(self, url, name=None, stream="master"): - name = name if name else self.moduleName + name = name if name else self.component_name if name in self.dependencylist: self.dependencylist[name]['urls'].append(url) else: @@ -105,24 +87,21 @@ def setRepositoriesAndWhatToInstall(self, repos=[], whattooinstall=None): if repos: self.repos = repos else: - self.repos += self.get_url() + self.repos += get_compose_url() or self.get_url() # add also all dependent modules repositories if it is module # TODO: removed this dependency search - if self.is_it_module: - depend_repos = [get_compose_url_modular_release()] - #for dep in self.moduledeps: - # latesturl = pdc_data.get_repo_url(dep, self.moduledeps[dep]) - # depend_repos.append(latesturl) - # self.__addModuleDependency(url=latesturl, name = dep, stream = self.moduledeps[dep]) - #map(self.__addModuleDependency, depend_repos) - self.repos += depend_repos - #map(self.__addModuleDependency, self.repos) + if self.is_it_module and not get_compose_url(): + # inside this code we don't know anything about modules, this leads to + # generic repositories in pdc_data.PDCParserGeneral + pdcsolver = pdc_data.PDCParserGeneral(self.component_name) + self.repos += [pdcsolver.get_repo()] + self.repos = list(set(self.repos)) if whattooinstall: - self.whattoinstallrpm = " ".join(set(whattooinstall)) + self.whattoinstallrpm = list(set(whattooinstall)) else: - self.bootstrappackages = pdc_data.getBasePackageSet(modulesDict=self.moduledeps, + self.bootstrappackages = pdc_data.getBasePackageSet(modulesDict=None, isModule=self.is_it_module, isContainer=False) - self.whattoinstallrpm = " ".join(set(self.getPackageList() + self.bootstrappackages)) + self.whattoinstallrpm = list(set(self.getPackageList() + self.bootstrappackages)) def __prepare(self): """ @@ -140,7 +119,7 @@ def __prepare(self): enabled=1 gpgcheck=0 -""" % (self.moduleName, counter, self.moduleName, counter, repo) +""" % (self.component_name, counter, self.component_name, counter, repo) f.write(add) f.close() self.install_packages() diff --git a/moduleframework/helpfile_linter.py b/moduleframework/helpfile_linter.py new file mode 100644 index 0000000..1ecdc7b --- /dev/null +++ b/moduleframework/helpfile_linter.py @@ -0,0 +1,58 @@ +from __future__ import print_function + +import os +import common + +HELP_MD = "help.md" + + +class HelpMDLinter(object): + """ + Class checks a Help.md file + + It requires only directory with help.md file. + """ + + help_md = None + + def __init__(self, dockerfile=None): + if dockerfile is None: + dir_name = os.getcwd() + else: + dir_name = os.path.dirname(dockerfile) + help_md_file = os.path.join(dir_name, HELP_MD) + common.print_debug("help.md path is %s." % help_md_file) + if os.path.exists(help_md_file): + with open(help_md_file, 'r') as f: + lines = f.readlines() + # Count with all lines which begins with # + self.help_md = [x.strip() for x in lines if x.startswith('#')] + # Count with all lines which begins with % + self.help_md.extend([x.strip() for x in lines if x.startswith('%')]) + else: + common.print_debug("help.md should exists in the %s directory." % dir_name) + self.help_md = None + + def get_image_name(self, name): + name = '%% %s' % name + if not self.help_md: + return False + tag_exists = [x for x in self.help_md if name.upper() in x] + return tag_exists + + def get_maintainer_name(self, name): + name = '%% %s' % name + if not self.help_md: + return False + tag_exists = [x for x in self.help_md if name.startswith(x)] + return tag_exists + + def get_tag(self, name): + name = '# %s' % name + tag_found = True + if not self.help_md: + common.print_info("help md does not exist.") + return False + if not [x for x in self.help_md if name.upper() in x]: + tag_found = False + return tag_found diff --git a/moduleframework/module_framework.py b/moduleframework/module_framework.py index 8839627..78be9aa 100644 --- a/moduleframework/module_framework.py +++ b/moduleframework/module_framework.py @@ -26,10 +26,12 @@ what you should use for your tests (inherited) """ -from moduleframework.avocado_testers.avocado_test import * +from moduleframework.avocado_testers.avocado_test import AvocadoTest, get_backend from moduleframework.avocado_testers.container_avocado_test import ContainerAvocadoTest from moduleframework.avocado_testers.nspawn_avocado_test import NspawnAvocadoTest from moduleframework.avocado_testers.rpm_avocado_test import RpmAvocadoTest +from moduleframework.avocado_testers.openshift_avocado_test import OpenShiftAvocadoTest +from moduleframework.mtfexceptions import ModuleFrameworkException PROFILE = None @@ -46,6 +48,3 @@ def skipTestIf(value, text="Test not intended for this module profile"): if value: raise ModuleFrameworkException( "DEPRECATED, don't use this skip, use self.cancel() inside test function, or self.skip() in setUp()") - - - diff --git a/moduleframework/mtf_environment.py b/moduleframework/mtf_environment.py index e9d3c85..3d9e42a 100644 --- a/moduleframework/mtf_environment.py +++ b/moduleframework/mtf_environment.py @@ -24,10 +24,11 @@ """ Module to setup and cleanup the test environment. """ -from moduleframework.module_framework import * +from moduleframework.common import get_module_type_base, print_info from moduleframework.environment_prepare.docker_prepare import EnvDocker from moduleframework.environment_prepare.rpm_prepare import EnvRpm from moduleframework.environment_prepare.nspawn_prepare import EnvNspawn +from moduleframework.environment_prepare.openshift_prepare import EnvOpenShift module_name = get_module_type_base() @@ -39,6 +40,8 @@ env = EnvRpm() elif module_name == "nspawn": env = EnvNspawn() +elif module_name == "openshift": + env = EnvOpenShift() def mtfenvset(): diff --git a/moduleframework/mtf_generator.py b/moduleframework/mtf_generator.py index ba98895..755757e 100644 --- a/moduleframework/mtf_generator.py +++ b/moduleframework/mtf_generator.py @@ -33,8 +33,7 @@ .. _Multiline Bash snippet tests: ../user_guide/how_to_write_conf_file#multiline-bash-snippet-tests """ -from __future__ import print_function -from moduleframework.common import CommonFunctions +from common import print_info, CommonFunctions class TestGenerator(CommonFunctions): @@ -89,7 +88,7 @@ def test_%s(self): self.output = self.output + \ ' self.%s(""" %s """, shell=%r)\n' % ( method, line, method == "runHost") - print("Added test (runmethod: %s): %s" % (method, testname)) + print_info("Added test (runmethod: %s): %s" % (method, testname)) def main(): diff --git a/moduleframework/mtf_init.py b/moduleframework/mtf_init.py new file mode 100644 index 0000000..0ee9e29 --- /dev/null +++ b/moduleframework/mtf_init.py @@ -0,0 +1,142 @@ +# -*- coding: utf-8 -*- +# Script generates super easy template of test for module docker +# Purpose of this script is to generate needed files to start testing +# Author Petr Sklenar psklenar@gmail.com +# +# Meta test family (MTF) is a tool to test components of a modular Fedora: +# https://docs.pagure.org/modularity/ +# Copyright (C) 2017 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# he Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# + +import argparse +import os.path +import logging +import sys +import yaml + +logger = logging.getLogger("mtf-init") + +# path fot templates test.py: +TEMPLATE_TEST = '/usr/share/moduleframework/examples/template/test.py' + + +def set_logging(level=logging.INFO): + global logger + logger.setLevel(level) + + handler = logging.StreamHandler(sys.stderr) + handler.setLevel(logging.DEBUG) + + formatter = logging.Formatter( + '%(asctime)s %(levelname)-6s %(message)s', '%H:%M:%S') + handler.setFormatter(formatter) + logger.addHandler(handler) + + mekk_logger = logging.getLogger("mekk.xmind") + null_handler = logging.NullHandler() + mekk_logger.addHandler(null_handler) + + +def cli(): + parser = argparse.ArgumentParser( + description="Create template of your first test!", + ) + parser.add_argument('--verbose', '-v', action='store_true', default=False) + parser.add_argument("--name", "-n", action="store", default="name not given", help='Name of module for testing') + parser.add_argument("--container", "-c", action="store", required=True, + help='Specify container path, example: docker.io/modularitycontainers/memcached') + + args = parser.parse_args() + + set_logging(level=logging.DEBUG if args.verbose else logging.INFO) + + return args + + +class Template(object): + def __init__(self, name, container): + self.name = name + self.container = container + + def set_content_config_yaml(self): + self.filePathConfig = 'config.yaml' + data = {"document" : "meta-test", + "version" : "1", + "name" : "xxx", + "default_module" : "docker", + "module" : {"docker" : {"container" : "xxx"}}} + data['name'] = self.name + data['module']['docker']['container'] = self.container + self.configYaml = yaml.dump(data) + logger.debug("{0}\n{1}".format(self.filePathConfig, self.configYaml)) + + def set_content_test_py(self): + # local name of the file: + self.filePathTest = 'test.py' + # use it from examples/template directory + with open(TEMPLATE_TEST,'r') as file: + self.test = file.read() + file.close() + + logger.debug("{0}\n{1}".format(self.filePathTest, self.test)) + + def confirm(self): + gogo = raw_input("Continue? yes/no\n") + if gogo.lower() == 'yes': + exit_condition = 0 + return exit_condition + elif gogo.lower() == "no": + exit_condition = 1 + exit(1) + return exit_condition + else: + print "Please answer with yes or no." + return 2 + + def check_file(self): + if os.path.isfile(self.filePathConfig) and os.path.isfile(self.filePathTest): + print("!!! File exists, rewrite?") + continue1=2 + while continue1 is 2: + continue1=self.confirm() + if continue1 is 1: + return False + return True + + def save(self): + with open(self.filePathConfig,'w') as f1: + f1.write(self.configYaml) + logger.debug("{0} was changed".format(self.filePathConfig)) + f1.close() + + with open(self.filePathTest,'w') as f2: + f2.write(self.test) + logger.debug("{0} was changed".format(self.filePathTest)) + f2.close() + + +def main(): + args = cli() + logger.debug("Options: name={0}, container={1}".format(args.name, args.container)) + resobj = Template(args.name, args.container) + resobj.set_content_config_yaml() + resobj.set_content_test_py() + if not resobj.check_file(): + print("do nothing") + exit(1) + resobj.save() + print("Done, to run test:\n\tsudo mtf test.py") diff --git a/moduleframework/mtf_log_parser.py b/moduleframework/mtf_log_parser.py deleted file mode 100644 index f199cf8..0000000 --- a/moduleframework/mtf_log_parser.py +++ /dev/null @@ -1,50 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Meta test family (MTF) is a tool to test components of a modular Fedora: -# https://docs.pagure.org/modularity/ -# Copyright (C) 2017 Red Hat, Inc. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# he Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -# -# Authors: Petr Sklenar -# - -""" -Utility for reading avocado json files. -""" - -import sys -import json - -def main(): - try: - json_data=open(sys.argv[1]).read() - data = json.loads(json_data) - except (IOError, ValueError) as e: - # file is not readable as json: No JSON object could be decoded - print(e) - exit(1) - except: - print("no file: specify 1 argument as existing json file") - exit(3) - delimiter="" - for i in data['tests']: - if i.get('status') in ['ERROR','FAIL']: - print(delimiter) - print("TEST: {0}".format(i.get('id'))) - print("ERROR: {0}".format(i.get('fail_reason'))) - print(" {0}".format(i.get('logfile'))) - delimiter = "-------------------------" - diff --git a/moduleframework/mtf_scheduler.py b/moduleframework/mtf_scheduler.py new file mode 100755 index 0000000..c24b54b --- /dev/null +++ b/moduleframework/mtf_scheduler.py @@ -0,0 +1,295 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Tool to start MTF tests +# Author Petr Sklenar psklenar@gmail.com +# +# Meta test family (MTF) is a tool to test components of a modular Fedora: +# https://docs.pagure.org/modularity/ +# Copyright (C) 2017 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# he Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +import argparse +import os +import moduleframework +import tempfile +import json + +from avocado.utils import process +from moduleframework import common +from mtf.metadata.tmet.filter import filtertests +from mtf.metadata.tmet import common as metadata_common + + +def mtfparser(): + # for right name of man; man page generator needs it: script_name differs, its defined in setup.py + script_name = "mtf" + description = \ +""" +VARIABLES + + AVOCADO_LOG_DEBUG=yes enables avocado debug output. + + DEBUG=yes enables debugging mode to test output. + + CONFIG defines the module configuration file. It defaults to config.yaml. + + MODULE defines tested module type, if default-module is not set in config.yaml. + + =docker uses the docker section of config.yaml. + =rpm uses the rpm section of config.yaml and tests RPMs directly on a host. + =nspawn tests RPMs in a virtual environment with systemd-nspawn. + + URL overrides the value of module.docker.container or module.rpm.repo. + The URL should correspond to the MODULE variable, for example: + URL=docker.io/modularitycontainers/haproxy if MODULE=docker + URL=https://phracek.fedorapeople.org/haproxy-module-repo # if MODULE=nspawn or MODULE=rpm + + MODULEMDURL overwrites the location of a moduleMD file. + + COMPOSEURL overwrites the location of a compose Pungi build. + + MTF_SKIP_DISABLING_SELINUX=yes + does not disable SELinux. In nspawn type on Fedora 25 SELinux should be disabled, + because it does not work well with SELinux enabled. + + MTF_DO_NOT_CLEANUP=yes does not clean up module after tests execution. + + MTF_REUSE=yes uses the same module between tests. It speeds up test execution. + + MTF_REMOTE_REPOS=yes disables downloading of Koji packages and creating a local repo. + + MTF_DISABLE_MODULE=yes disables module handling to use nonmodular test mode. +""" + parser = argparse.ArgumentParser( + # TODO + prog="{0}".format(script_name), + description=description, + formatter_class=argparse.RawTextHelpFormatter, + epilog="see http://meta-test-family.readthedocs.io for more info", + usage="[VARIABLES] {0} [options] local_tests".format(script_name), + ) + parser.add_argument("--linter", "-l", action="store_true", + default=False, help='adds additional compose checks') + parser.add_argument("--setup", action="store_true", + default=False, help='Setup by mtfenvset') + parser.add_argument("--action", action="store", default='run', + help='Action for avocado, see avocado --help for subcommands') + parser.add_argument("--version", action="store_true", + default=False, help='show version and exit') + parser.add_argument("--metadata", action="store_true", + default=False, help="""load configuration for test sets from metadata file + (https://github.com/fedora-modularity/meta-test-family/blob/devel/mtf/metadata/README.md)""") + + + # Solely for the purpose of manpage generator, copy&paste from setup.py + parser.man_short_description = \ +""" +tool to test components for a modular Fedora. + +mtf is a main binary file of Meta-Test-Family. + +It tests container images and/or modules with user defined tests using avocado framework as test runner. +""" + + # parameters tights to avocado + group_avocado = parser.add_argument_group( + 'arguments forwarded to avocado') + + group_avocado.add_argument( + "--xunit", action="store", help='Enable xUnit result format and write it to FILE. Use - to redirect to the standard output.') + # some useful bash variables + # there are more possible variables up to the doc, not sure what could be like options too + group = parser.add_argument_group( + 'additional arguments are like environment variables up to the http://meta-test-family.readthedocs.io/en/latest/user_guide/environment_variables.html ') + group.add_argument("--module", action="store", + help='Module type, like: docker, nspawn or rpm') + group.add_argument("--debug", action="store_true", help='more logging') + group.add_argument("--config", action="store", + help='defines the module configuration file') + group.add_argument("--url", action="store", + help='URL overrides the value of module.docker.container or module.rpm.repo.') + group.add_argument("--modulemdurl", action="store", + help='overwrites the location of a moduleMD file') + return parser + + +def cli(): + # unknown options are forwarded to avocado run + args, unknown = mtfparser().parse_known_args() + + if args.version: + print "0.7.7" + exit(0) + + # uses additional arguments, set up variable asap, its used afterwards: + if args.debug: + os.environ['DEBUG'] = 'yes' + os.environ['AVOCADO_LOG_DEBUG'] = 'yes' + if args.config: + os.environ['CONFIG'] = args.config + if args.url: + os.environ['URL'] = args.url + if args.modulemdurl: + os.environ['MODULEMDURL'] = args.modulemdurl + + common.print_debug("Options: linter={0}, setup={1}, action={2}, module={3}".format( + args.linter, args.setup, args.action, args.module)) + common.print_debug( + "remaining options for avocado or test files: {0}".format(unknown)) + + # environment usage: + # read: os.environ.get('MODULE') + # write: os.environ['MODULE'] + + # MODULE could be from: + # 1. common.get_module_type() ... it reads config.yaml and treceback if it doesn't exist + # 2. environment ... MODULE=docker etc + # 3. argument ... --module=docker + try: + args.module = common.get_module_type() + # TODO it wrongly writes: 'Using default minimal config ...', change in common.py + except moduleframework.mtfexceptions.ModuleFrameworkException: + pass + + if os.environ.get('MODULE') is not None: + # environment is the highest priority because mtf uses environment (too much) + args.module = os.environ['MODULE'] + + if args.module: + # setup also environment + os.environ['MODULE'] = args.module + + if args.module in common.get_backend_list(): + # for debug purposes, to be sure about module variables or options + common.print_debug("MODULE={0}, options={1}".format( + os.environ.get('MODULE'), args.module)) + else: + # TODO: what to do here? whats the defaults value for MODULE, do I know it? + common.print_info("MODULE={0} ; we support {1} \n === expecting your magic, enjoy! === ".format( + os.environ.get('MODULE'), common.get_backend_list())) + + common.print_debug("MODULE={0}".format(os.environ.get('MODULE'))) + return args, unknown + + +class AvocadoStart(object): + tests = [] + json_tmppath = None + additionalAvocadoArg = '' + + def __init__(self, args, unknown): + # choose between TESTS and ADDITIONAL ENVIRONMENT from options + if args.linter: + self.tests.append( + "{MTF_TOOLS}/*.py".format(MTF_TOOLS=metadata_common.MetadataLoaderMTF.MTF_LINTER_PATH)) + self.args = args + + for param in unknown: + # take care of this, see tags for safe/unsafe: + # http://avocado-framework.readthedocs.io/en/52.0/WritingTests.html#categorizing-tests + if os.path.exists(param): + # this is list of tests in local file + self.tests.append(param) + else: + # this is additional avocado param + self.additionalAvocadoArg += " {0} ".format(param) + if self.args.metadata: + common.print_info("Using Metadata loader for tests and filtering") + metadata_tests = filtertests(backend="mtf", location=os.getcwd(), linters=False, tests=[], tags=[], relevancy="") + tests_dict = [x[metadata_common.SOURCE] for x in metadata_tests] + self.tests += tests_dict + common.print_debug("Loaded tests via metadata file: %s" % tests_dict) + common.print_debug("tests = {0}".format(self.tests)) + common.print_debug("additionalAvocadoArg = {0}".format( + self.additionalAvocadoArg)) + + + def avocado_run(self): + self.json_tmppath = tempfile.mktemp() + avocado_args = "--json {JSON_LOG}".format( + JSON_LOG=self.json_tmppath) + if self.args.xunit: + avocado_args += " --xunit {XUNIT} ".format(XUNIT=self.args.xunit) + avocadoAction = "avocado {ACTION} {AVOCADO_ARGS}".format( + ACTION=self.args.action, AVOCADO_ARGS=avocado_args) + + # run avocado with right cmd arguments + bash = process.run("{AVOCADO} {a} {b}".format( + AVOCADO=avocadoAction, a=self.additionalAvocadoArg, b=" ".join(self.tests)), shell=True, ignore_status=True) + common.print_info(bash.stdout, bash.stderr) + common.print_debug("Command used: ", bash.command) + return bash.exit_status + + def avocado_general(self): + # additional parameters + # self.additionalAvocadoArg: its from cmd line, whats unknown to this tool + avocado_args = "" # when needed => specify HERE your additional stuff + avocadoAction = "avocado {ACTION} {AVOCADO_ARGS}".format( + ACTION=self.args.action, AVOCADO_ARGS=avocado_args) + bash = process.run("{AVOCADO} {a} {b}".format( + AVOCADO=avocadoAction, a=self.additionalAvocadoArg, b=" ".join(self.tests)), shell=True, ignore_status=True) + common.print_info(bash.stdout, bash.stderr) + common.print_debug("Command used: ", bash.command) + return bash.exit_status + + def show_error(self): + if os.path.exists(self.json_tmppath): + try: + # file has to by json, otherwise it fails + json_data = open(self.json_tmppath).read() + data = json.loads(json_data) + except (IOError, ValueError) as e: + # file is not readable as json: No JSON object could be decoded + print(e) + # remove file as its not readable + os.remove(self.json_tmppath) + # fatal error when this command fails, its unexpected + exit(127) + # errors follow after 'normal' output with no delimiter, then with ------- + delimiter = "" + for testcase in data['tests']: + if testcase.get('status') in ['ERROR', 'FAIL']: + common.print_info(delimiter) + common.print_info("TEST: {0}".format(testcase.get('id'))) + common.print_info("ERROR: {0}".format( + testcase.get('fail_reason'))) + common.print_info(" {0}".format( + testcase.get('logfile'))) + delimiter = "-------------------------" + os.remove(self.json_tmppath) + + +def main(): + common.print_debug('verbose/debug mode') + args, unknown = cli() + + if args.setup: + # mtfenvset need bash environment! + from moduleframework.mtf_environment import mtfenvset + mtfenvset() + + a = AvocadoStart(args, unknown) + if args.action == 'run': + returncode = a.avocado_run() + a.show_error() + else: + # when there is any need, change general method or create specific one: + returncode = a.avocado_general() + exit(returncode) + + + + diff --git a/moduleframework/exceptions.py b/moduleframework/mtfexceptions.py similarity index 84% rename from moduleframework/exceptions.py rename to moduleframework/mtfexceptions.py index 38775e7..996c21b 100644 --- a/moduleframework/exceptions.py +++ b/moduleframework/mtfexceptions.py @@ -24,10 +24,6 @@ Custom exceptions library. """ -from __future__ import print_function -import sys -import linecache - class ModuleFrameworkException(Exception): """ @@ -36,15 +32,7 @@ class ModuleFrameworkException(Exception): """ def __init__(self, *args, **kwargs): super(ModuleFrameworkException, self).__init__( - 'EXCEPTION MTF: ', *args, **kwargs) - exc_type, exc_obj, tb = sys.exc_info() - if tb is not None: - f = tb.tb_frame - lineno = tb.tb_lineno - filename = f.f_code.co_filename - linecache.checkcache(filename) - line = linecache.getline(filename, lineno, f.f_globals) - print("-----------\n| EXCEPTION IN: {} \n| LINE: {}, {} \n| ERROR: {}\n-----------".format(filename, lineno, line.strip(), exc_obj)) + 'EXCEPTION MTF: ' + str(args), **kwargs) class NspawnExc(ModuleFrameworkException): diff --git a/moduleframework/pdc_data.py b/moduleframework/pdc_data.py index ab10765..567633d 100644 --- a/moduleframework/pdc_data.py +++ b/moduleframework/pdc_data.py @@ -29,156 +29,100 @@ """ import yaml -import re -from avocado import utils -from common import * +import os +import sys +from avocado.utils import process +from common import print_info, DEFAULTRETRYCOUNT, DEFAULTRETRYTIMEOUT, \ + get_if_remoterepos, BASEPATHDIR, MODULEFILE, print_debug,\ + is_debug, ARCH, is_recursive_download, trans_dict, get_odcs_auth +from moduleframework import mtfexceptions from pdc_client import PDCClient from timeoutlib import Retry +try: + from odcs.client.odcs import ODCS, AuthMech +except: + print_info("ODCS library cannot be imported. ODCS is not supported") -PDC_SERVER = "https://pdc.fedoraproject.org/rest_api/v1/unreleasedvariants" - -def getBasePackageSet(modulesDict=None, isModule=True, isContainer=False): - """ - Get list of base packages (for bootstrapping of various module types) - It is used internally, you should not use it in case you don't know where to use it. - - :param modulesDict: dictionary of dependent modules - :param isModule: bool is module - :param isContainer: bool is contaner? - :return: list of packages to install - """ - # nspawn container need to install also systemd to be able to boot - out = [] - brmod = "base-runtime" - brmod_profiles = ["container", "baseimage"] - BASEPACKAGESET_WORKAROUND = ["systemd"] - BASEPACKAGESET_WORKAROUND_NOMODULE = BASEPACKAGESET_WORKAROUND + ["dnf"] - pdc = None - basepackageset = [] - if isModule: - # TODO: workaround, when disabled local compose repos - if not modulesDict: - modulesDict[brmod] = "master" - if modulesDict.get(brmod): - print_info("Searching for packages base package set inside %s" % brmod) - pdc = PDCParser() - pdc.setLatestPDC(brmod, modulesDict[brmod]) - for pr in brmod_profiles: - if pdc.getmoduleMD()['data']['profiles'].get(pr): - basepackageset = pdc.getmoduleMD( - )['data']['profiles'][pr]['rpms'] - break - if isContainer: - out = basepackageset - else: - out = basepackageset + BASEPACKAGESET_WORKAROUND - else: - if isContainer: - out = basepackageset - else: - out = basepackageset + BASEPACKAGESET_WORKAROUND_NOMODULE - print_info("ALL packages to install:", out) - return out - -def get_repo_url(wmodule="base-runtime", wstream="master", fake=False): - """ - Return URL location of rpm repository. - It reads data from PDC and construct url locator. - It is used to solve repos for dependent modules (eg. memcached is dependent on perl and baseruntime) - :param wmodule: module name - :param wstream: module stream - :param fake: - :return: str - """ - if fake: - return "http://mirror.vutbr.cz/fedora/releases/25/Everything/x86_64/os/" - else: - tmp_pdc = PDCParser() - tmp_pdc.setLatestPDC(wmodule, wstream) - return tmp_pdc.generateRepoUrl() - - -class PDCParser(): +PDC_SERVER = "https://pdc.fedoraproject.org/rest_api/v1/unreleasedvariants" +ODCS_URL = "https://odcs.fedoraproject.org" +DEFAULT_MODULE_STREAM = "master" +BASE_REPO_URL = "https://kojipkgs.fedoraproject.org/compose/latest-Fedora-Modular-{}/compose/Server/{}/os" + +def get_module_nsv(name=None, stream=None, version=None): + name = name or os.environ.get('MODULE_NAME') + stream = stream or os.environ.get('MODULE_STREAM') or DEFAULT_MODULE_STREAM + version = version or os.environ.get('MODULE_VERSION') + return {'name':name, 'stream':stream, 'version':version} + + +def get_base_compose(): + default_release = "27" + release = os.environ.get("MTF_FEDORA_RELEASE") or default_release + if release == "master": + release = default_release + compose_url = os.environ.get("MTF_COMPOSE_BASE") or BASE_REPO_URL.format(release, ARCH) + return compose_url + +class PDCParserGeneral(): """ - Class for parsing PDC data via some setters line setFullVersion, setViaFedMsg, setLatestPDC + Generic class for parsing PDC data (get repo leads to fedora official composes) """ - - def __getDataFromPdc(self): - """ - Internal method, do not use it - - :return: None - """ - pdc_query = { 'variant_id' : self.name, 'active': True } - if self.stream: - pdc_query['variant_version'] = self.stream - if self.version: - pdc_query['variant_release'] = self.version - @Retry(attempts=DEFAULTRETRYCOUNT,timeout=DEFAULTRETRYTIMEOUT,error=PDCExc("Could not query PDC server")) - def retry_tmpfunc(): - # Using develop=True to not authenticate to the server - pdc_session = PDCClient(PDC_SERVER, ssl_verify=True, develop=True) - return pdc_session(**pdc_query) - mod_info = retry_tmpfunc() - if not mod_info or "results" not in mod_info.keys() or not mod_info["results"]: - raise PDCExc("QUERY: %s is not available on PDC" % pdc_query) - self.pdcdata = mod_info["results"][-1] - self.modulemd = yaml.load(self.pdcdata["modulemd"]) - - def setFullVersion(self, nvr): - """ - Set parameters of class via name-stream-version string - Taskotron uses this format - - :param nvr: - :return: None - """ - self.name, self.stream, self.version = re.search( - "(.*)-(.*)-(.*)", nvr).groups() - self.__getDataFromPdc() - - def setViaFedMsg(self, yamlinp): - """ - Sets parameters via RAW fedora message from message bus - used by internal CI - - :param yamlinp: yaml input string - :return: - """ - raw = yaml.load(yamlinp) - self.name = raw["msg"]["name"] - self.stream = raw["msg"]["stream"] - self.version = raw["msg"]["version"] - self.__getDataFromPdc() - - def setLatestPDC(self, name, stream="master", version=""): + name = None + stream = None + version = None + pdcdata = None + modulemd = None + moduledeps = None + + def __init__(self, name, stream=None, version=None): """ - Most flexible method how to set name stream version for search + Set basic parametrs, module names, streams, versions :param name: name of module :param stream: optional :param version: optional :return: """ - self.name = name - self.stream = stream - self.version = version - self.__getDataFromPdc() + modulensv = get_module_nsv(name=name, stream=stream, version=version) + self.name = modulensv['name'] + self.stream = modulensv['stream'] + self.version = modulensv['version'] + + def __getDataFromPdc(self): + """ + Internal method, do not use it - def generateRepoUrl(self): + :return: None + """ + if not self.pdcdata: + pdc_query = { 'variant_id' : self.name, 'active': True } + if self.stream: + pdc_query['variant_version'] = self.stream + if self.version: + pdc_query['variant_release'] = self.version + @Retry(attempts=DEFAULTRETRYCOUNT, timeout=DEFAULTRETRYTIMEOUT, error=mtfexceptions.PDCExc("Could not query PDC server")) + def retry_tmpfunc(): + # Using develop=True to not authenticate to the server + pdc_session = PDCClient(PDC_SERVER, ssl_verify=True, develop=True) + print_debug(pdc_session, pdc_query) + return pdc_session(**pdc_query) + mod_info = retry_tmpfunc() + if not mod_info or "results" not in mod_info.keys() or not mod_info["results"]: + raise mtfexceptions.PDCExc("QUERY: %s is not available on PDC" % pdc_query) + self.pdcdata = mod_info["results"][-1] + self.modulemd = yaml.load(self.pdcdata["modulemd"]) + return self.pdcdata + + + def get_repo(self): """ Return string of generated repository located on fedora koji :return: str """ - # rpmrepo = "http://kojipkgs.fedoraproject.org/repos/%s/latest/%s" % ( - # self.pdcdata["koji_tag"] + "-build", ARCH) - if get_if_remoterepos(): - rpmrepo = get_compose_url_modular_release() - return rpmrepo - else: - return self.createLocalRepoFromKoji() + return get_base_compose() + def generateGitHash(self): """ @@ -189,8 +133,12 @@ def generateGitHash(self): return self.getmoduleMD()['data']['xmd']['mbs']['commit'] def getmoduleMD(self): + self.__getDataFromPdc() return self.modulemd + def get_pdc_info(self): + return self.__getDataFromPdc() + def generateModuleMDFile(self): """ Store moduleMD file locally from PDC to tempmodule.yaml file @@ -212,25 +160,40 @@ def generateParams(self): :return: list """ output = [] - output.append("URL=%s" % self.generateRepoUrl()) + output.append("URL=%s" % self.get_repo()) output.append("MODULEMDURL=%s" % self.generateModuleMDFile()) output.append("MODULE=%s" % "nspawn") return output + def __get_module_requires(self): + return self.getmoduleMD().get("data", {}).get("dependencies", {}).get("requires", {}) + def generateDepModules(self): - x = self.getmoduleMD() - out = {} - if x["data"].get("dependencies") and x["data"]["dependencies"].get("requires"): - deps = x["data"]["dependencies"]["requires"] - for dep in deps: - a = PDCParser() - a.setLatestPDC(dep, deps[dep]) - out.update(a.generateDepModules()) - out.update(deps) + if self.moduledeps is None: + rootdepdict = {} + self.__generateDepModules_solver(parentdict=rootdepdict) + self.moduledeps = rootdepdict + return self.moduledeps + + def __generateDepModules_solver(self, parentdict): + deps = self.__get_module_requires() + print_debug("tree traverse from %s: %s"% (self.name, deps)) + for dep in deps: + if dep not in parentdict: + parentdict[dep] = deps[dep] + a = PDCParser(dep, deps[dep]) + a.__generateDepModules_solver(parentdict=parentdict) + + def get_module_identifier(self): + if self.version: + return "%s-%s-%s" % (self.name, self.stream, self.version) + elif self.stream: + return "%s-%s" % (self.name, self.stream) else: - out = {} - return out + return "%s-%s" % (self.name, "master") + +class PDCParserKoji(PDCParserGeneral): def download_tagged(self,dirname): """ Downloads packages to directory, based on koji tags @@ -240,16 +203,16 @@ def download_tagged(self,dirname): :return: None """ print_info("DOWNLOADING ALL packages for %s_%s_%s" % (self.name, self.stream, self.version)) - for foo in utils.process.run("koji list-tagged --quiet %s" % self.pdcdata["koji_tag"], verbose=is_debug()).stdout.split("\n"): + for foo in process.run("koji list-tagged --quiet %s" % self.get_pdc_info()["koji_tag"], verbose=is_debug()).stdout.split("\n"): pkgbouid = foo.strip().split(" ")[0] if len(pkgbouid) > 4: print_debug("DOWNLOADING: %s" % foo) @Retry(attempts=DEFAULTRETRYCOUNT * 10, timeout=DEFAULTRETRYTIMEOUT * 60, delay=DEFAULTRETRYTIMEOUT, - error=KojiExc( + error=mtfexceptions.KojiExc( "RETRY: Unbale to fetch package from koji after %d attempts" % (DEFAULTRETRYCOUNT * 10))) def tmpfunc(): - a = utils.process.run( + a = process.run( "cd %s; koji download-build %s -a %s -a noarch" % (dirname, pkgbouid, ARCH), shell=True, verbose=is_debug(), ignore_status=True) if a.exit_status == 1: @@ -257,13 +220,13 @@ def tmpfunc(): print_debug( 'UNABLE TO DOWNLOAD package (intended for other architectures, GOOD):', a.command) else: - raise KojiExc( + raise mtfexceptions.KojiExc( 'UNABLE TO DOWNLOAD package (KOJI issue, BAD):', a.command) tmpfunc() print_info("DOWNLOADING finished") - def createLocalRepoFromKoji(self): + def get_repo(self): """ Return string of generated repository located LOCALLY It downloads all tagged packages and creates repo via createrepo @@ -271,7 +234,7 @@ def createLocalRepoFromKoji(self): :return: str """ dir_prefix = BASEPATHDIR - utils.process.run("{HOSTPACKAGER} install createrepo koji".format( + process.run("{HOSTPACKAGER} install createrepo koji".format( **trans_dict), ignore_status=True) if is_recursive_download(): dirname = os.path.join(dir_prefix,"localrepo_recursive") @@ -282,16 +245,151 @@ def createLocalRepoFromKoji(self): if os.path.exists(os.path.join(absdir,"repodata","repomd.xml")): pass else: - os.mkdir(absdir) + if not os.path.exists(absdir): + os.mkdir(absdir) self.download_tagged(absdir) if is_recursive_download(): allmodules = self.generateDepModules() for mo in allmodules: - localrepo = PDCParser() - localrepo.setLatestPDC(mo, allmodules[mo]) + localrepo = PDCParserKoji(mo, allmodules[mo]) localrepo.download_tagged(dirname) - utils.process.run( + process.run( "cd %s; createrepo -v %s" % (absdir, absdir), shell=True, verbose=is_debug()) return "file://%s" % absdir + + +class PDCParserODCS(PDCParserGeneral): + compose_type = "module" + auth_token = get_odcs_auth() + + def get_repo(self): + odcs = ODCS(ODCS_URL, auth_mech=AuthMech.OpenIDC, openidc_token=self.auth_token) + print_debug("ODCS Starting module composing: %s" % odcs, + "%s compose for: %s" % (self.compose_type, self.get_module_identifier())) + compose_builder = odcs.new_compose(self.get_module_identifier(), self.compose_type) + timeout_time=600 + print_debug("ODCS Module compose started, timeout set to %ss" % timeout_time) + compose_state = odcs.wait_for_compose(compose_builder["id"], timeout=timeout_time) + if compose_state["state_name"] == "done": + compose = "{compose}/{arch}/os".format(compose=compose_state["result_repo"], arch=ARCH) + print_info("ODCS Compose done, URL with repo file", compose) + return compose + else: + raise mtfexceptions.PDCExc("ODCS: Failed to generate compose for module: %s" % + self.get_module_identifier()) + +def getBasePackageSet(modulesDict=None, isModule=True, isContainer=False): + """ + Get list of base packages (for bootstrapping of various module types) + It is used internally, you should not use it in case you don't know where to use it. + + :param modulesDict: dictionary of dependent modules + :param isModule: bool is module + :param isContainer: bool is contaner? + :return: list of packages to install + """ + # nspawn container need to install also systemd to be able to boot + out = [] + BASEPACKAGESET_WORKAROUND = ["systemd"] + BASEPACKAGESET_WORKAROUND_NOMODULE = BASEPACKAGESET_WORKAROUND + ["dnf"] + # https://pagure.io/fedora-kickstarts/blob/f27/f/fedora-modular-container-base.ks + BASE_MODULAR_CONTAINER = ["rootfiles", "tar", "vim-minimal", "dnf", "dnf-yum", "sssd-client"] + # https://pagure.io/fedora-kickstarts/blob/f27/f/fedora-modular-container-common.ks + BASE_MODULAR = ["fedora-modular-release", "bash", "coreutils-single", "glibc-minimal-langpack", + "libcrypt", "rpm", "shadow-utils", "sssd-client", "util-linux"] + if isModule: + if isContainer: + out = BASE_MODULAR_CONTAINER + else: + + out = BASE_MODULAR + BASEPACKAGESET_WORKAROUND + else: + if isContainer: + out = [] + else: + out = BASEPACKAGESET_WORKAROUND_NOMODULE + print_info("Base packages to install:", out) + return out + + +def get_repo_url(wmodule="base-runtime", wstream="master"): + """ + Return URL location of rpm repository. + It reads data from PDC and construct url locator. + It is used to solve repos for dependent modules (eg. memcached is dependent on perl and baseruntime) + + :param wmodule: module name + :param wstream: module stream + :param fake: + :return: str + """ + + tmp_pdc = PDCParser(wmodule, wstream) + return tmp_pdc.get_repo() + + +PDCParser = PDCParserGeneral +if get_odcs_auth(): + PDCParser = PDCParserODCS +elif not get_if_remoterepos(): + PDCParser = PDCParserKoji + +def test_PDC_general_base_runtime(): + print_info(sys._getframe().f_code.co_name) + parser = PDCParserGeneral("base-runtime", "master") + assert not parser.generateDepModules() + assert "module-" in parser.get_pdc_info()["koji_tag"] + print_info(parser.get_repo()) + assert BASE_REPO_URL[:30] in parser.get_repo() + print_info(parser.generateParams()) + assert len(parser.generateParams()) == 3 + assert "MODULE=nspawn" in " ".join(parser.generateParams()) + print_info("URL=%s" % BASE_REPO_URL[:30]) + assert "URL=%s" % BASE_REPO_URL[:30] in " ".join(parser.generateParams()) + +def test_PDC_general_nodejs(): + print_info(sys._getframe().f_code.co_name) + parser = PDCParserGeneral("nodejs", "8") + deps = parser.generateDepModules() + print_info(deps) + assert 'platform' in deps + assert 'host' in deps + assert 'python2' in deps + assert 'python3' in deps + +def test_PDC_koji_nodejs(): + global BASEPATHDIR + BASEPATHDIR = "." + + print_info(sys._getframe().f_code.co_name) + parser = PDCParserKoji("nodejs", "8") + deps = parser.generateDepModules() + print_info(deps) + assert 'platform' in deps + assert 'host' in deps + assert 'python2' in deps + assert 'python3' in deps + print_info(parser.get_repo()) + assert "file://" in parser.get_repo() + assert os.path.abspath(BASEPATHDIR) in parser.get_repo() + assert "MODULE=nspawn" in " ".join(parser.generateParams()) + assert "URL=file://" in " ".join(parser.generateParams()) + # TODO: this subtest is too slow, commented out + #global is_recursive_download + #is_recursive_download = lambda: True + #print_info(parser.get_repo()) + +def test_PDC_ODCS_nodejs(): + print_info(sys._getframe().f_code.co_name) + parser = PDCParserODCS("nodejs", "8") + # TODO: need to setup MTF_ODCS variable with odcs token, and ODCS version at least 0.1.2 + # or your user will be asked to for token interactively + if get_odcs_auth(): + print_info(parser.get_repo()) + + + + +#test_PDC_ODCS_nodejs() \ No newline at end of file diff --git a/tools/taskotron-msg-reader.py b/moduleframework/pdc_msg_module_info_reader.py similarity index 69% rename from tools/taskotron-msg-reader.py rename to moduleframework/pdc_msg_module_info_reader.py index 972f432..b0e4de6 100755 --- a/tools/taskotron-msg-reader.py +++ b/moduleframework/pdc_msg_module_info_reader.py @@ -23,46 +23,54 @@ # from moduleframework.pdc_data import PDCParser -from optparse import OptionParser +from argparse import ArgumentParser +import yaml +import re -if __name__ == '__main__': - parser = OptionParser() - parser.add_option( +def cli(): + parser = ArgumentParser() + parser.add_argument( "-f", "--file", dest="filename", help="file with message to read fedora message bus", default=None) - parser.add_option( + parser.add_argument( "-r", "--release", dest="release", help="Use release in format name-stream-version as input", default=None) - parser.add_option("-l", "--latest", dest="latest", + parser.add_argument("-l", "--latest", dest="latest", help="Use latest bits, build by MBS and stored in PDC") - parser.add_option( + parser.add_argument( "--commit", dest="commit", action="store_true", - default=False, help="print git commit hash of exact version of module") + return parser.parse_args() - a = PDCParser() - (options, args) = parser.parse_args() + +def main(): + options = cli() + name = None + stream = None + version = None if options.filename: flh = open(options.filename) stdinput = "".join(flh.readlines()).strip() + raw = yaml.load(stdinput) + name = raw["msg"]["name"] + stream = raw["msg"]["stream"] + version = raw["msg"]["version"] flh.close() - a.setViaFedMsg(stdinput) elif options.release: - a.setFullVersion(options.release) + name, stream, version = re.search( + "(.*)-(.*)-(.*)", options.release).groups() elif options.latest: - a.setLatestPDC(options.latest) - else: - raise Exception(parser.print_help()) - + name = options.latest + pdc_solver = PDCParser(name, stream, version) if options.commit: - print a.generateGitHash() + print pdc_solver.generateGitHash() else: - print " ".join(a.generateParams()) + print " ".join(pdc_solver.generateParams()) diff --git a/moduleframework/setup.py b/moduleframework/setup.py deleted file mode 100644 index 5bb56a9..0000000 --- a/moduleframework/setup.py +++ /dev/null @@ -1,78 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Meta test family (MTF) is a tool to test components of a modular Fedora: -# https://docs.pagure.org/modularity/ -# Copyright (C) 2017 Red Hat, Inc. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# he Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -# -# Authors: Jan Scotka -# - -from __future__ import print_function -import glob -from moduleframework import module_framework -from avocado import utils - - -class Module(module_framework.CommonFunctions): - - whattoinstall = None - baseruntimeyaml = None - - def __init__(self): - self.loadconfig() - self.yamlconfig = self.getModulemdYamlconfig() - self.profile = module_framework.PROFILE if module_framework.PROFILE else "default" - if self.yamlconfig: - self.whattoinstall = self.yamlconfig['data']['profiles'][self.profile] - self.rootdir = "/tmp/tmpmodule1" - self.rpmsrepo = self.rootdir + "/rpms" - self.rpmsinstalled = self.rootdir + "/installed" - utils.process.run("mkdir -p %s" % self.rootdir) - utils.process.run("mkdir -p %s" % self.rpmsrepo) - utils.process.run("mkdir -p %s" % self.rpmsinstalled) - self.baseruntimeyaml = self.getModulemdYamlconfig( - "https://raw.githubusercontent.com/fedora-modularity/check_modulemd/develop/examples-modulemd/base-runtime.yaml") - - def CreateLocalRepo(self): - allmodulerpms = None - allbasertrpms = None - if self.whattoinstall: - allmodulerpms = " ".join(self.whattoinstall['rpms']) - if self.baseruntimeyaml: - allbasertrpms = " ".join(self.baseruntimeyaml['data'][ - 'profiles']['default']['rpms']) - if allbasertrpms is not None and allmodulerpms is not None: - utils.process.run( - "yumdownloader --destdir=%s --resolve %s %s" % - (self.rpmsrepo, allmodulerpms, allbasertrpms)) - utils.process.run( - "cd %s; createrepo --database %s" % - (self.rpmsrepo, self.rpmsrepo), shell=True) - print("file://%s" % self.rpmsrepo) - - def CreateContainer(self): - localfiles = glob.glob('%s/*.rpm' % self.rpmsrepo) - if localfiles and self.rpmsinstalled: - utils.process.run( - "dnf -y install --disablerepo=* --allowerasing --installroot=%s %s" % - (self.rpmsinstalled, " ".join(localfiles))) - print("file://%s" % self.rpmsrepo) - - -m = Module() -m.CreateLocalRepo() -m.CreateContainer() diff --git a/moduleframework/timeoutlib.py b/moduleframework/timeoutlib.py index 3c998ed..dc330ef 100644 --- a/moduleframework/timeoutlib.py +++ b/moduleframework/timeoutlib.py @@ -21,18 +21,11 @@ import signal import time -import logging -from common import print_info - -log = logging.getLogger('avocado.test') - class Timeout(object): def __init__(self, retry, timeout): self.retry = retry self.timeout = timeout - log.debug("Started timeout period: ", timeout) - log.debug("Number of remainig retry: ", retry) def __enter__(self): def timeout_handler(signum, frame): @@ -45,7 +38,6 @@ def timeout_handler(signum, frame): signal.alarm(self.timeout) def __exit__(self, type, value, traceback): - log.debug("Time were exceeded") signal.alarm(0) signal.signal(signal.SIGALRM, self.orig_sighand) @@ -65,7 +57,7 @@ class Retry(object): def __init__(self, attempts=1, timeout=None, exceptions=(Exception,), error=None, inverse=False, delay=None): """ Try to run things ATTEMPTS times, at max, each attempt must not exceed TIMEOUT seconds. - Restart only when one of EXCEPTIONS is raised, all other exceptions will just bubble up. + Restart only when one of EXCEPTIONS is raised, all other mtfexceptions will just bubble up. When the maximal number of attempts is reached, raise ERROR. Wait DELAY seconds between attempts. When INVERSE is True, successfull return of wrapped code is considered as a failure. @@ -84,13 +76,12 @@ def __init__(self, attempts=1, timeout=None, exceptions=(Exception,), error=None self.failed_attempts = 0 self.timeouts_triggered = 0 - def handle_failure(self, start_time, exc): + def handle_failure(self, start_time): if __debug__: self.failed_attempts += 1 + self.attempts -= 1 - log.debug("Remaining attempts: ", self.attempts) if self.attempts == 0: - print_info("Original exeption:", exc) raise self.error # Before the next iteration sleep $delay seconds. It's the @@ -115,8 +106,7 @@ def __wrap(*args, **kwargs): while True: if delay is not None: - log.debug("Sleeping for delay:", delay) - time.sleep(delay) + time.sleep(delay) with self.timeout_wrapper(self, self.timeout): start_time = time.time() @@ -130,21 +120,18 @@ def __wrap(*args, **kwargs): if self.inverse: return True - # Handle exceptions we are expected to catch, by logging a failed + # Handle mtfexceptions we are expected to catch, by logging a failed # attempt, and checking the number of attempts. - delay = self.handle_failure(start_time, e) - log.debug("Exception were catch:", e) - log.debug("Continue to next round") + delay = self.handle_failure(start_time) continue except Exception as e: - # Handle all other exceptions, by logging a failed attempt and + # Handle all other mtfexceptions, by logging a failed attempt and # re-raising the exception, effectively killing the loop. if __debug__: self.failed_attempts += 1 raise e - delay = self.handle_failure(start_time, "") + delay = self.handle_failure(start_time) return __wrap - diff --git a/moduleframework/tools/__init__.py b/moduleframework/tools/__init__.py index 6d224aa..8906e36 100644 --- a/moduleframework/tools/__init__.py +++ b/moduleframework/tools/__init__.py @@ -25,9 +25,11 @@ from moduleframework import module_framework -class ModulelintSanity(module_framework.AvocadoTest): +class Basic(module_framework.AvocadoTest): """ :avocado: enable + :avocado: tags=sanity,general,fedora,rhel """ - def testPass(self): - pass \ No newline at end of file + + def test(self): + self.start() \ No newline at end of file diff --git a/moduleframework/tools/check_compose.py b/moduleframework/tools/check_compose.py index 13b5799..26773f5 100644 --- a/moduleframework/tools/check_compose.py +++ b/moduleframework/tools/check_compose.py @@ -33,6 +33,7 @@ class ComposeTest(module_framework.NspawnAvocadoTest): Validate overall module compose. :avocado: enable + :avocado: tags=sanity,rhel,fedora,compose_test,module """ def test_component_profile_installability(self): diff --git a/moduleframework/tools/dockerlint.py b/moduleframework/tools/dockerlint.py new file mode 100644 index 0000000..9315570 --- /dev/null +++ b/moduleframework/tools/dockerlint.py @@ -0,0 +1,233 @@ +# -*- coding: utf-8 -*- +# +# Meta test family (MTF) is a tool to test components of a modular Fedora: +# https://docs.pagure.org/modularity/ +# Copyright (C) 2017 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# he Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Authors: Jan Scotka +# +from __future__ import print_function + +import os +from moduleframework import module_framework +from moduleframework import dockerlinter +from moduleframework.avocado_testers import container_avocado_test + + +class DockerInstructionsTests(module_framework.AvocadoTest): + """ + :avocado: enable + :avocado: tags=sanity,rhel,fedora,docker,docker_instruction_test + + """ + + dp = None + + def setUp(self): + # it is not intended just for docker, but just docker packages are + # actually properly signed + self.dp = dockerlinter.DockerfileLinter() + if self.dp.dockerfile is None: + dir_name = os.getcwd() + self.log.info("Dockerfile was not found in %s directory." % dir_name) + self.skip() + + def test_from_is_first_directive(self): + self.assertTrue(self.dp.check_from_is_first()) + + def test_from_directive_is_valid(self): + self.assertTrue(self.dp.check_from_directive_is_valid()) + + def test_chained_run_dnf_commands(self): + self.assertTrue(self.dp.check_chained_run_dnf_commands()) + + def test_chained_run_rest_commands(self): + self.assertTrue(self.dp.check_chained_run_rest_commands()) + + def test_helpmd_is_present(self): + self.assert_to_warn(self.assertTrue, self.dp.check_helpmd_is_present()) + + +class DockerLabelsTests(module_framework.AvocadoTest): + """ + :avocado: enable + :avocado: tags=sanity,rhel,fedora,docker,docker_labels_test + + """ + + dp = None + + def setUp(self): + # it is not intended just for docker, but just docker packages are + # actually properly signed + self.dp = dockerlinter.DockerfileLinter() + if self.dp.dockerfile is None: + dir_name = os.getcwd() + self.log.info("Dockerfile was not found in %s directory." % dir_name) + self.skip() + + def _test_for_env_and_label(self, docker_env, docker_label, env=True): + label_found = True + if env: + label = self.dp.get_docker_specific_env(docker_env) + else: + label = self.dp.get_specific_label(docker_env) + if not label: + label_found = self.dp.get_specific_label(docker_label) + return label_found + + def test_architecture_in_env_and_label_exists(self): + self.assertTrue(self.dp.get_specific_label("architecture")) + + def test_name_in_env_and_label_exists(self): + self.assertTrue(self.dp.get_docker_specific_env("NAME=")) + self.assertTrue(self.dp.get_specific_label("name")) + + def test_maintainer_label_exists(self): + self.assertTrue(self.dp.get_specific_label("maintainer")) + + def test_release_label_exists(self): + self.assertTrue(self.dp.get_specific_label("release")) + + def test_version_label_exists(self): + self.assertTrue(self.dp.get_specific_label("version")) + + def test_com_redhat_component_label_exists(self): + self.assertTrue(self.dp.get_specific_label("com.redhat.component")) + + def test_summary_label_exists(self): + self.assertTrue(self.dp.get_specific_label("summary")) + + def test_run_or_usage_label_exists(self): + self.assertTrue(self._test_for_env_and_label("run", "usage", env=False)) + + +class DockerfileLinterInContainer(container_avocado_test.ContainerAvocadoTest): + """ + :avocado: enable + :avocado: tags=sanity,rhel,fedora,docker,docker_lint_inside_test + + """ + + def _file_to_check(self, doc_file_list): + test_failed = False + for doc in doc_file_list: + exit_status = self.run("test -e %s" % doc, ignore_status=True).exit_status + if int(exit_status) == 0: + self.log.debug("%s doc file exists in container" % doc) + test_failed = True + return test_failed + + def test_all_nodocs(self): + self.start() + all_docs = self.run("rpm -qad", verbose=False).stdout + test_failed = self._file_to_check(all_docs.split('\n')) + if test_failed: + self.log.warn("Documentation files exist in container. They are installed by Platform or by RUN commands.") + self.assertTrue(True) + + def test_installed_docs(self): + """ + This test checks whether no docs are installed by RUN dnf command + :return: FAILED in case we found some docs + PASS in case there is no doc file found + """ + self.start() + # Double brackets has to by used because of trans_dict. + # 'EXCEPTION MTF: ', 'Command is formatted by using trans_dict. + # If you want to use brackets { } in your code, please use {{ }}. + installed_pkgs = self.run("rpm -qa --qf '%{{NAME}}\n'", verbose=False).stdout + defined_pkgs = self.backend.getPackageList() + list_pkg = set(installed_pkgs).intersection(set(defined_pkgs)) + test_failed = False + for pkg in list_pkg: + pkg_doc = self.run("rpm -qd %s" % pkg, verbose=False).stdout + if self._file_to_check(pkg_doc.split('\n')): + test_failed = True + self.assertFalse(test_failed) + + def _check_container_files(self, exts, pkg_mgr): + found_files = False + file_list = [] + for ext in exts: + dir_with_ext = "/var/cache/{pkg_mgr}/**/*.{ext}".format(pkg_mgr=pkg_mgr, ext=ext) + # Some images does not contain find command and therefore we have to use for or ls. + ret = self.run('shopt -s globstar && for i in {dir}; do printf "%s\\n" "$i" ; done'.format( + dir=dir_with_ext), + ignore_status=True) + # we did not find any file with an extension. + # TODO I don't how to detect failure or empty files. + if ret.stdout.strip() == dir_with_ext: + continue + file_list.extend(ret.stdout.split('\n')) + if self._file_to_check(file_list): + found_files = True + return found_files + + def _dnf_clean_all(self): + """ + Function checks if files with relevant extensions exist in /var/cache/dnf directory + :return: True if at least one file exists + False if no file exists + """ + exts = ["solv", "solvx", "xml.gz", "rpm"] + return self._check_container_files(exts, "dnf") + + def _yum_clean_all(self): + """ + Function checks if files with relevant extensions exist in /var/cache/dnf directory + :return: True if at least one file exists + False if no file exists + """ + # extensions are taken from https://github.com/rpm-software-management/yum/blob/master/yum/__init__.py#L2854 + exts = ['rpm', 'sqlite', 'sqlite.bz2', 'xml.gz', 'asc', 'mirrorlist.txt', 'cachecookie', 'xml'] + return self._check_container_files(exts, "yum") + + def test_docker_clean_all(self): + """ + This test checks if `dnf/yum clean all` was called in image + + :return: return True if clean all is called + return False if clean all is not called + """ + self.start() + # Detect distro in image + distro = self.run("cat /etc/os-release").stdout + if 'Fedora' in distro: + self.assertFalse(self._dnf_clean_all()) + else: + self.assertFalse(self._yum_clean_all()) + + +class DockerLint(container_avocado_test.ContainerAvocadoTest): + """ + :avocado: enable + :avocado: tags=sanity,rhel,fedora,docker,docker_labels_inspect_test + """ + + def testLabels(self): + """ + Function tests whether labels are set in modulemd YAML file properly. + :return: + """ + llabels = self.getConfigModule().get('labels') + if llabels is None or len(llabels) == 0: + self.log.info("No labels defined in config to check") + self.cancel() + for key in self.getConfigModule()['labels']: + aaa = self.checkLabel(key, self.getConfigModule()['labels'][key]) + self.assertTrue(aaa) diff --git a/moduleframework/tools/helpmd_lint.py b/moduleframework/tools/helpmd_lint.py new file mode 100644 index 0000000..964b1c8 --- /dev/null +++ b/moduleframework/tools/helpmd_lint.py @@ -0,0 +1,81 @@ +# -*- coding: utf-8 -*- +# +# Meta test family (MTF) is a tool to test components of a modular Fedora: +# https://docs.pagure.org/modularity/ +# Copyright (C) 2017 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# he Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Authors: Jan Scotka +# +from __future__ import print_function + +import os +from moduleframework import helpfile_linter +from moduleframework import dockerlinter +from moduleframework import module_framework +from moduleframework.common import get_docker_file + + +class HelpFileSanity(module_framework.AvocadoTest): + """ + :avocado: enable + :avocado: tags=optional,rhel,fedora,docker,helpmd_sanity_test + + """ + + dp = None + + def setUp(self): + # it is not intended just for docker, but just docker packages are + # actually properly signed + self.dp = dockerlinter.DockerfileLinter() + if self.dp.dockerfile is None: + dir_name = os.getcwd() + self.log.info("Dockerfile was not found in %s directory." % dir_name) + self.skip() + self.helpmd = helpfile_linter.HelpMDLinter(dockerfile=self.dp.dockerfile) + if self.helpmd.help_md is None: + self.log.info("help.md file was not found in Dockerfile directory") + self.skip("help.md file was not found in Dockerfile directory") + + def test_helpmd_exists(self): + self.assertTrue(self.helpmd) + + def test_helpmd_image_name(self): + container_name = self.dp.get_docker_specific_env("NAME=") + if container_name: + self.assertTrue(self.helpmd.get_image_name(container_name[0].split('=')[1])) + + def test_helpmd_maintainer_name(self): + maintainer_name = self.dp.get_specific_label("maintainer") + if maintainer_name: + self.assertTrue(self.helpmd.get_maintainer_name(maintainer_name[0])) + + def test_helpmd_name(self): + self.assertTrue(self.helpmd.get_tag("NAME")) + + def test_helpmd_description(self): + self.assertTrue(self.helpmd.get_tag("DESCRIPTION")) + + def test_helpmd_usage(self): + self.assertTrue(self.helpmd.get_tag("USAGE")) + + def test_helpmd_environment_variables(self): + self.assert_to_warn(self.assertTrue, self.helpmd.get_tag("ENVIRONMENT VARIABLES")) + + def test_helpmd_security_implications(self): + if self.dp.get_docker_expose(): + self.assertTrue(self.helpmd.get_tag("SECURITY IMPLICATIONS")) diff --git a/moduleframework/tools/modulelint.py b/moduleframework/tools/modulelint.py index 5212a98..2402246 100644 --- a/moduleframework/tools/modulelint.py +++ b/moduleframework/tools/modulelint.py @@ -21,93 +21,14 @@ # Authors: Jan Scotka # from __future__ import print_function -import os from moduleframework import module_framework -from moduleframework import dockerlinter -from moduleframework.avocado_testers import container_avocado_test - - -class DockerFileLinter(module_framework.AvocadoTest): - """ - :avocado: enable - - """ - - dp = None - - def setUp(self): - # it is not intended just for docker, but just docker packages are - # actually properly signed - self.dp = dockerlinter.DockerfileLinter() - if self.dp.dockerfile is None: - self.skip() - - def test_architecture_in_env_and_label_exists(self): - self.assertTrue(self.dp.get_docker_specific_env("ARCH=")) - self.assertTrue(self.dp.get_specific_label("architecture")) - - def test_name_in_env_and_label_exists(self): - self.assertTrue(self.dp.get_docker_specific_env("NAME=")) - self.assertTrue(self.dp.get_specific_label("name")) - - def test_release_label_exists(self): - self.assertTrue(self.dp.get_specific_label("release")) - - def test_version_label_exists(self): - self.assertTrue(self.dp.get_specific_label("version")) - - def test_com_redhat_component_label_exists(self): - self.assertTrue(self.dp.get_specific_label("com.redhat.component")) - - def test_summary_label_exists(self): - self.assertTrue(self.dp.get_specific_label("summary")) - - def test_run_or_usage_label_exists(self): - label_found = True - run = self.dp.get_specific_label("run") - if not run: - label_found = self.dp.get_specific_label("usage") - self.assertTrue(label_found) - - - -class DockerLint(container_avocado_test.ContainerAvocadoTest): - """ - :avocado: enable - """ - - def testBasic(self): - self.start() - self.assertTrue("bin" in self.run("ls /").stdout) - - def testContainerIsRunning(self): - """ - Function tests whether container is running - :return: - """ - self.start() - self.assertIn(self.backend.jmeno.rsplit("/")[-1], self.runHost("docker ps").stdout) - - def testLabels(self): - """ - Function tests whether labels are set in modulemd YAML file properly. - :return: - """ - llabels = self.getConfigModule().get('labels') - if llabels is None or len(llabels) == 0: - print("No labels defined in config to check") - self.cancel() - for key in self.getConfigModule()['labels']: - aaa = self.checkLabel(key, self.getConfigModule()['labels'][key]) - print(">>>>>> ", aaa, key) - self.assertTrue(aaa) class ModuleLintSigning(module_framework.AvocadoTest): """ :avocado: disable - :avocado: tags=WIP + :avocado: tags=WIP,rhel,fedora,docker,module,package_signing_test """ def setUp(self): @@ -132,7 +53,10 @@ def test(self): class ModuleLintPackagesCheck(module_framework.AvocadoTest): """ + Check if packages what are expected to be installed all installed + :avocado: enable + :avocado: tags=sanity,rhel,fedora,docker,module,package_installed_test """ def test(self): diff --git a/moduleframework/tools/rpmvalidation.py b/moduleframework/tools/rpmvalidation.py index 9f110cf..4ec5ad1 100644 --- a/moduleframework/tools/rpmvalidation.py +++ b/moduleframework/tools/rpmvalidation.py @@ -35,6 +35,7 @@ class rpmvalidation(module_framework.AvocadoTest): http://refspecs.linuxfoundation.org/FHS_3.0/fhs/index.html :avocado: enable + :avocado: tags=sanity,rhel,fedora,docker,module,rpmvalidation_test """ fhs_base_paths_workaound = [ '/var/kerberos', @@ -93,7 +94,7 @@ def _compare_fhs(self, filepath): self.log.info("%s not found in %s" % (filepath, self.fhs_base_paths)) return False - def test(self): + def testPaths(self): self.start() allpackages = filter(bool, self.run("rpm -qa").stdout.split("\n")) common.print_debug(allpackages) diff --git a/moduleframework/version.py b/moduleframework/version.py deleted file mode 100644 index 5f42bb1..0000000 --- a/moduleframework/version.py +++ /dev/null @@ -1,21 +0,0 @@ -import os - -SPECFILEPATH = os.path.abspath( - # Path to SPECFILE - os.path.join( - os.path.dirname(__file__), - "..", - "meta-test-family.spec" - )) - - -def version_func(): - with open(SPECFILEPATH, 'r') as infile: - for line in infile.readlines(): - if "Version: " in line: - return line[16:].strip() - raise BaseException( - "Unable to read Version string from specfile:", SPECFILEPATH) - - -VERSION = version_func() diff --git a/mtf/__init__.py b/mtf/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/mtf/backend/__init__.py b/mtf/backend/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/mtf/backend/nspawn.py b/mtf/backend/nspawn.py new file mode 100644 index 0000000..9969874 --- /dev/null +++ b/mtf/backend/nspawn.py @@ -0,0 +1,579 @@ +# -*- coding: utf-8 -*- +# +# This Modularity Testing Framework helps you to write tests for modules +# Copyright (C) 2017 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# he Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Authors: Jan Scotka +# + +""" +Low level library handling Systemd nspawn containers and images +""" + +import os +import logging +import shutil +import glob +import time +import re + +from avocado import Test +from avocado.utils import process +from mtf import common +from mtf import mtfexceptions + +DEFAULT_RETRYTIMEOUT = 30 +DEFAULT_SLEEP = 1 +base_package_set = ["systemd"] + +is_debug_low = common.is_debug +if is_debug_low(): + logging.basicConfig(level=logging.DEBUG) + +class Image(object): + """ + It represents image object for Nspawn virtualization + Actually it is directory + """ + logger = logging.getLogger("Image") + def __init__(self, repos, packageset, location, installed=False, packager="dnf -y", + name="unique", ignore_installed=False): + self.repos = repos + self.packageset = list(set(packageset + base_package_set)) + self.location = location + self.packager = packager + self.name = name + baserepodir=os.path.join("/etc", "yum.repos.d") + # allow to fake environment in ubuntu (for Travis) + if not os.path.exists(baserepodir): + baserepodir="/var/tmp" + self.yumrepo = os.path.join(baserepodir, "%s.repo" % self.name) + if installed: + pass + else: + try: + self.__install() + except mtfexceptions.NspawnExc as e: + if ignore_installed: + pass + else: + raise e + + def create_snapshot(self, destination): + """ + returns Image object with copyied files from base image + + :param destination: directory where to crete copy + :return: Image + """ + self.logger.debug("Create Snapshot: %s -> %s" % (self.location, destination)) + # copytree somethimes fails, it is not reliable in case of copy of system + # shutil.copytree(self.location, destination) + # cp will do better work + process.run("cp -rf %s %s" % (self.location, destination)) + return self.__class__(repos=self.repos, packageset=self.packageset, + location=destination, installed=True, + packager=self.packager, name=self.name) + + def __install(self): + """ + Internal method for installing packages to chroot and set repositories. + + :return: None + """ + self.logger.debug("Install system to direcory: %s" % self.location) + if not os.path.exists(os.path.join(self.location, "usr")): + if not os.path.exists(self.location): + os.makedirs(self.location) + repos_to_use = "" + counter = 0 + for repo in self.repos: + counter = counter + 1 + repos_to_use += " --repofrompath %s%d,%s" % ( + self.name, counter, repo) + self.logger.debug("Install packages: %s" % self.packageset) + self.logger.debug("Repositories: %s" % self.repos) + process.run("%s install --nogpgcheck --setopt=install_weak_deps=False " + "--installroot %s --allowerasing --disablerepo=* --enablerepo=%s* %s %s" % + (self.packager, self.location, self.name, + repos_to_use, " ".join(self.packageset)), + verbose=is_debug_low()) + insiderepopath = os.path.join(self.location, self.yumrepo[1:]) + if not os.path.exists(os.path.dirname(insiderepopath)): + os.makedirs(os.path.dirname(insiderepopath)) + counter = 0 + with open(insiderepopath, 'w') as f: + for repo in self.repos: + counter = counter + 1 + add = """[%s%d] + name=%s%d + baseurl=%s + enabled=1 + gpgcheck=0 + + """ % (self.name, counter, self.name, counter, repo) + f.write(add) + for repo in self.repos: + if "file:///" in repo: + src = repo[7:] + srcto = os.path.join(self.location, src[1:]) + if not os.path.exists(os.path.dirname(srcto)): + os.makedirs(os.path.dirname(srcto)) + shutil.copytree(src, srcto) + pkipath = "/etc/pki/rpm-gpg" + pkipath_ch = os.path.join(self.location, pkipath[1:]) + if not os.path.exists(pkipath_ch): + os.makedirs(pkipath_ch) + for filename in glob.glob(os.path.join(pkipath, '*')): + shutil.copy(filename, pkipath_ch) + else: + raise mtfexceptions.NspawnExc("Directory %s already in use" % self.location) + + def get_location(self): + """ + return directory location + + :return: str + """ + return self.location + + def rmi(self): + shutil.rmtree(self.location) + +class Container(object): + """ + It represents nspawn container virtualization with + methods for start/run/execute commands inside + + """ + logger = logging.getLogger("Container") + __systemd_wait_support = False + __default_command_sleep = 2 + __alternative_boot = False + + def __init__(self, image, name=None): + """ + + :param image: Image object + :param name: optional, use unique name for generating containers in case not given, some name is generated + """ + self.image = image + self.name = name or common.generate_unique_name() + self.location = self.image.get_location() + self.__systemd_wait_support = self._run_systemdrun_decide() + + def __machined_restart(self): + # this is removed, it was important for crappy machinectl shell handling + #self.logger.debug("restart systemd-machined") + #return process.run("systemctl restart systemd-machined", verbose=is_debug_low(), ignore_status=True) + pass + + def __is_killed(self): + for foo in range(DEFAULT_RETRYTIMEOUT): + time.sleep(DEFAULT_SLEEP) + out = process.run("machinectl status %s" % self.name, ignore_status=True, verbose=is_debug_low()) + if out.exit_status != 0: + return True + raise mtfexceptions.NspawnExc("Unable to stop machine %s within %d" % (self.name, DEFAULT_RETRYTIMEOUT)) + + def __is_booted(self): + for foo in range(DEFAULT_RETRYTIMEOUT): + time.sleep(DEFAULT_SLEEP) + out = process.run("machinectl status %s" % self.name, ignore_status=True, verbose=is_debug_low()) + if not self.__alternative_boot: + if "systemd-logind" in out.stdout: + time.sleep(DEFAULT_SLEEP) + return True + else: + if "Unit: machine" in out.stdout: + time.sleep(DEFAULT_SLEEP) + return True + raise mtfexceptions.NspawnExc("Unable to start machine %s within %d" % (self.name, DEFAULT_RETRYTIMEOUT)) + + def boot_machine(self, nspawn_add_option_list=[], boot_cmd="", wait_finish=False): + """ + start machine via -b option (full boot, default) or + via boot_cmd (usefull with wait_finish=True option) + + :param nspawn_add_option_list: list - additional nspawn parameters + :param boot_cmd: std - command with aruments for starting + :param wait_finish: - bool - wait to process finish (by default it just wait for creting systemd unit and boot) + :return: process.Subprocess object + """ + self.logger.debug("starting NSPAWN") + bootmachine = "" + bootmachine_cmd = "" + if boot_cmd: + self.__alternative_boot = True + bootmachine_cmd = boot_cmd + process.run("systemctl reset-failed machine-%s.scope" % self.name, + ignore_status=True, verbose=is_debug_low()) + else: + bootmachine = "-b" + command = "systemd-nspawn --machine=%s %s %s -D %s %s" % \ + (self.name, " ".join(nspawn_add_option_list), bootmachine, self.location, bootmachine_cmd) + self.logger.debug("Start command: %s" % command) + nspawncont = process.SubProcess(command) + self.logger.info("machine: %s starting" % self.name) + if wait_finish: + nspawncont.wait() + else: + nspawncont.start() + self.__is_booted() + self.logger.info("machine: %s starting finished" % self.name) + return nspawncont + + def execute(self, command, **kwargs): + """ + execute command inside container, it hides what method will be used + + :param command: str + :param kwargs: pass thru to avocado.process.run command + :return: process object + """ + return self.run_systemdrun(command, **kwargs) + + def _run_systemdrun_decide(self): + """ + Internal method + decide if it is possible to use --wait option to systemd + + :return: + """ + return "--wait" in process.run("systemd-run --help", verbose=is_debug_low()).stdout + + def __systemctl_wait_until_finish(self, machine, unit): + """ + Internal method + workaround for systemd-run without --wait option + + :param machine: + :param unit: + :return: + """ + while True: + output = [x.strip() for x in + process.run("systemctl show -M {} {}".format(machine, unit), + verbose=is_debug_low()).stdout.split("\n")] + retcode = int([x[-1] for x in output if "ExecMainStatus=" in x][0]) + if not ("SubState=exited" in output or "SubState=failed" in output): + time.sleep(0.1) + else: + break + process.run("systemctl -M {} stop {}".format(machine, unit), ignore_status=True, verbose=is_debug_low()) + return retcode + + def run_systemdrun(self, command, internal_background=False, **kwargs): + """ + execute command via systemd-run inside container + + :param command: + :param internal_background: + :param kwargs: + :return: + """ + if not kwargs: + kwargs = {} + self.__machined_restart() + add_sleep_infinite = "" + unit_name = common.generate_unique_name() + lpath = "/var/tmp/{}".format(unit_name) + if self.__systemd_wait_support: + add_wait_var = "--wait" + else: + # keep service exist after it finish, to be able to read exit code + add_wait_var = "-r" + if internal_background: + add_wait_var = "" + add_sleep_infinite = "&& sleep infinity" + opts = " --unit {unitname} {wait} -M {machine}".format(wait=add_wait_var, + machine=self.name, + unitname=unit_name + ) + try: + comout = process.run("""systemd-run {opts} /bin/bash -c "({comm})>{pin}.stdout 2>{pin}.stderr {sleep}" """.format( + opts=opts, comm=common.sanitize_cmd(command), pin=lpath, sleep=add_sleep_infinite), + **kwargs) + if not internal_background: + if not self.__systemd_wait_support: + comout.exit_status = self.__systemctl_wait_until_finish(self.name,unit_name) + with open("{chroot}{pin}.stdout".format(chroot=self.location, pin=lpath), 'r') as content_file: + comout.stdout = content_file.read() + with open("{chroot}{pin}.stderr".format(chroot=self.location, pin=lpath), 'r') as content_file: + comout.stderr = content_file.read() + comout.command = command + os.remove("{chroot}{pin}.stdout".format(chroot=self.location, pin=lpath)) + os.remove("{chroot}{pin}.stderr".format(chroot=self.location, pin=lpath)) + self.logger.debug(comout) + if not self.__systemd_wait_support and not kwargs.get("ignore_status") and comout.exit_status != 0: + raise process.CmdError(comout.command, comout) + return comout + except process.CmdError as e: + raise e + + def run_machinectl(self, command, **kwargs): + """ + execute command via machinectl shell inside container + + :param command: + :param kwargs: + :return: + """ + self.__machined_restart() + lpath = "/var/tmp" + if not kwargs: + kwargs = {} + should_ignore = kwargs.get("ignore_status") + kwargs["ignore_status"] = True + comout = process.run("""machinectl shell root@{machine} /bin/bash -c "({comm})>{pin}/stdout 2>{pin}/stderr; echo $?>{pin}/retcode; sleep {defaultsleep}" """.format( + machine=self.name, comm=common.sanitize_cmd(command), pin=lpath, + defaultsleep=self.__default_command_sleep ), **kwargs) + if comout.exit_status != 0: + raise mtfexceptions.NspawnExc("This command should not fail anyhow inside NSPAWN:", command) + try: + kwargs["verbose"] = False + b = process.run( + 'bash -c "cat {chroot}{pin}/stdout; cat {chroot}{pin}/stderr > /dev/stderr; exit `cat {chroot}{pin}/retcode`"'.format( + chroot=self.location, + pin=lpath), + **kwargs) + finally: + comout.stdout = b.stdout + comout.stderr = b.stderr + comout.exit_status = b.exit_status + removesworkaround = re.search('[^(]*\((.*)\)[^)]*', comout.command) + if removesworkaround: + comout.command = removesworkaround.group(1) + if comout.exit_status == 0 or should_ignore: + return comout + else: + raise process.CmdError(comout.command, comout) + + def selfcheck(self): + """ + Test if default command will pass, it is more important for nspawn, because it happens that + it does not returns anything + + :return: avocado.process.run + """ + return self.execute("true") + + def copy_to(self, src, dest): + """ + Copy file to module from host + + :param src: source file on host + :param dest: destination file on module + :return: None + """ + self.logger.debug("copy files (inside) from: %s to: %s" % (src, dest)) + process.run( + " machinectl copy-to %s %s %s" % + (self.name, src, dest), timeout=DEFAULT_RETRYTIMEOUT, verbose=is_debug_low()) + + def copy_from(self, src, dest): + """ + Copy file from module to host + + :param src: source file on module + :param dest: destination file on host + :return: None + """ + self.logger.debug("copy files (outside) from: %s to: %s" % (src, dest)) + process.run( + " machinectl copy-from %s %s %s" % + (self.name, src, dest), timeout=DEFAULT_RETRYTIMEOUT, verbose=is_debug_low()) + + def stop(self): + """ + Stop the nspawn container + + :return: + """ + self.logger.debug("Stop") + self.__machined_restart() + try: + if not self.__alternative_boot: + process.run("machinectl poweroff %s" % self.name, verbose=is_debug_low()) + else: + try: + process.run("systemctl kill --kill-who=all -s9 machine-%s.scope" % self.name, + ignore_status=True, verbose=is_debug_low()) + except Exception: + pass + try: + process.run("systemctl reset-failed machine-%s.scope" % self.name, + ignore_status=True, verbose=is_debug_low()) + except Exception: + pass + self.__is_killed() + except BaseException as poweroffex: + self.logger.debug("Unable to stop machine via poweroff, terminating : %s" % poweroffex) + try: + process.run("machinectl terminate %s" % self.name, ignore_status=True, verbose=is_debug_low()) + self.__is_killed() + except BaseException as poweroffexterm: + self.logger.debug("Unable to stop machine via terminate, STRANGE: %s" % poweroffexterm) + time.sleep(DEFAULT_RETRYTIMEOUT) + pass + pass + + def rm(self): + """ + Remove container image via image method + + :return: + """ + self.logger.debug("Remove") + self.image.rmi() + + +# ====================== Self Tests ====================== + +class testImage(Test): + """ + Test Image class for folders and nspawn installation to dirs + """ + loc1 = "/tmp/dddd1" + loc2 = "/tmp/dddd2" + + def setUp(self): + # cleanup dirs, to ensure that it will pass + # it raises error in case of existing and not used installed=True as option + process.run("rm -rf %s %s" % (self.loc1, self.loc2), ignore_status=True) + self.i1=Image(repos=["http://ftp.fi.muni.cz/pub/linux/fedora/linux/releases/26/Everything/x86_64/os/"], + packageset=["bash"], + location=self.loc1) + + def test_basic(self): + assert self.loc1 == self.i1.get_location() + assert os.path.exists(os.path.join(self.i1.get_location(),"usr")) + self.i2 = self.i1.create_snapshot(self.loc2) + assert self.loc2 == self.i2.get_location() + assert os.path.exists(os.path.join(self.i2.get_location(), "usr")) + self.i2.rmi() + + def tearDown(self): + try: + self.i1.rmi() + except: + pass + try: + self.i2.rmi() + except: + pass + + +class testContainer(Test): + """ + It tests Container object and his abilities to run various commands + """ + c1 = None + cname = "contA" + def setUp(self): + loc1 = "/tmp/dddd1" + self.i1 = Image(repos=["http://ftp.fi.muni.cz/pub/linux/fedora/linux/releases/26/Everything/x86_64/os/"], + packageset=["bash", "systemd"], location=loc1, ignore_installed=True) + + def test_basic(self): + self.c1 = Container(image=self.i1, name=self.cname) + self.c1.boot_machine() + assert "sbin" in self.c1.execute(command="ls /").stdout + + + def test_basic_noname(self): + self.c1 = Container(image=self.i1) + self.c1.boot_machine() + assert "sbin" in self.c1.execute(command="ls /").stdout + + def test_basic_systemd_run(self): + self.c1 = Container(image=self.i1, name=self.cname) + self.c1.boot_machine() + assert "sbin" in self.c1.run_systemdrun(command="ls /").stdout + + def test_basic_systemd_run_no_wait(self): + class ContainerNoWait(Container): + def _run_systemdrun_decide(self): + return False + self.c1 = ContainerNoWait(image=self.i1, name=self.cname) + self.c1.boot_machine() + assert "sbin" in self.c1.run_systemdrun(command="ls /").stdout + + + def BAD_test_basic_machinectl_shell(self): + # this test is able to break machine (lock machinectl) + self.c1 = Container(image=self.i1, name=self.cname) + self.c1.boot_machine() + assert "sbin" in self.c1.run_machinectl(command="ls /").stdout + + + def test_copy(self): + self.c1 = Container(image=self.i1, name=self.cname) + self.c1.boot_machine() + ff1 = "/tmp/ee" + ff2 = "/tmp/eee" + process.run("rm -f %s %s" % (ff1, ff2), ignore_status=True) + self.c1.execute("rm -f %s %s" %(ff1, ff2), ignore_status=True) + process.run("echo outside > %s" % ff1, shell=True) + assert "outside" in process.run("cat %s" % ff1).stdout + self.c1.copy_to(ff1, ff2) + assert "outside" in self.c1.execute("cat %s" % ff2).stdout + self.c1.execute("echo inside > %s" % ff1) + assert "inside" in self.c1.execute("cat %s" % ff1).stdout + self.c1.copy_from(ff1, ff2) + assert "inside" in process.run("cat %s" % ff2).stdout + + def test_boot_command(self): + self.c1 = Container(image=self.i1, name=self.cname) + self.c1.boot_machine(boot_cmd="sleep 100") + assert "sleep 100" in process.run("machinectl status %s" % self.cname).stdout + self.c1.stop() + try: + process.run("machinectl status %s" % self.cname) + except: + pass + else: + assert False + + def test_boot_command_wait(self): + self.c1 = Container(image=self.i1, name=self.cname) + sleep_time = 10 + t_before = time.time() + self.c1.boot_machine(boot_cmd="sleep %s" % sleep_time, wait_finish=True) + t_after = time.time() + assert (t_after - t_before) > sleep_time + assert (t_after - t_before) < sleep_time*1.5 + try: + process.run("machinectl status %s" % self.cname) + except: + pass + else: + assert False + + + def test_container_additional_options(self): + self.c1 = Container(image=self.i1, name=self.cname) + self.c1.boot_machine(nspawn_add_option_list=["--private-network"]) + assert "sbin" in self.c1.execute(command="ls /").stdout + ifaces = self.c1.execute(command="cat /proc/net/dev") + print ifaces + assert "lo:" in ifaces.stdout + assert len(ifaces.stdout.split("\n")) > 2 + assert len(ifaces.stdout.split("\n"))<=4 + + def tearDown(self): + self.c1.stop() \ No newline at end of file diff --git a/mtf/common/__init__.py b/mtf/common/__init__.py new file mode 100644 index 0000000..c1c5911 --- /dev/null +++ b/mtf/common/__init__.py @@ -0,0 +1 @@ +from moduleframework.common import * diff --git a/mtf/metadata/Makefile b/mtf/metadata/Makefile new file mode 100644 index 0000000..b1d3535 --- /dev/null +++ b/mtf/metadata/Makefile @@ -0,0 +1,35 @@ +PYTHONSITE=/usr/lib/python2.7/site-packages + +all: check + +integrationtests: install + cd examples/general-component/tests; tmet-agregator + cd examples/general-component/tests; tmet-agregator |grep -o 63 + cd examples/general-component/tests; tmet-filter + cd examples/general-component/tests; tmet-filter | grep 'sanity/generaltest.py' + cd examples/general-component/tests; tmet-filter -b generic | grep 'sanity/generaltest.py' + cd examples/general-component/tests; tmet-filter -b generic | grep meta-test-family.git + cd examples/general-component/tests; tmet-filter -b generic | grep fedora_specific + cd examples/general-component/tests; tmet-filter -b generic -t nonsense | grep -v fedora_specific + cd examples/general-component/tests; tmet-filter -b generic -t nonsense | grep -v meta-test-family.git + cd examples/general-component/tests; tmet-filter -b generic -t tag1 | grep fedora_specific + cd examples/general-component/tests; tmet-filter -b generic -t tag1 | grep -v meta-test-family.git + cd examples/general-component/tests; tmet-filter -b generic -t optional | grep meta-test-family.git + cd examples/general-component/tests; tmet-filter -b generic -t optional | grep -v fedora_specific + + +unittests: + py.test tmet/selftests.py + +check: install unittests integrationtests + +clean: + pip uninstall . + +install: clean + pip install -U . + +source: clean + @python setup.py sdist + +.PHONY: check \ No newline at end of file diff --git a/mtf/metadata/README.md b/mtf/metadata/README.md new file mode 100644 index 0000000..60d6c6a --- /dev/null +++ b/mtf/metadata/README.md @@ -0,0 +1,157 @@ +# Upstream Test Metadata PoC +This project defines file strucuture and tooling to work with general test metadata. + + +## User Stories +* __US1:__ Schedule/__Filter__ testsets based on some filters because you have limited resources(eg. not have enought time, just tests what does not need network, because you are offine) + * I want to select some cases based on tags + * I want to select some cases based on relevancy +* __US2:__ Find/__Agregate__ uncovered parts in testsuite to write new tests, or report status of testsuite to managers + * I want to see acual coverage for component + * I want to see uncovered parts +* __US3:__ General format allows us more, Describing tests inside source code is `programming lang` specific and harder to handle + * I want cover simple usecases in as simplest way as possible + * I want to be able to write complex structure as well + * I want to have it human readable + * I want to see some examples + +## Scope of project +* filter test set for frameworks +* create test coverage report + +### What we covers +* __Test case filtering__, based on relevancy and tags +* __Test coverage__ document and analysis. +* __Example__ [component](examples/general-component/tests) +* __Tooling__ to work with this example, to show abilities +* __Modular__ various tools can define own items for each test and parse it how you want. + +## Out of scope +* __NO schedule tests__ - it is in scope of framework: (eg. avocado, unittest, py.test, restraint) and also it is part of [Invoking test initiative](https://fedoraproject.org/wiki/Changes/InvokingTests) +* __NO dependency solving__ - each framework has own dependency solving (eg. python pypi deps, rpm dependencies in specfile) + * optional scope: it can be part of scope here after some discussion, but just as an optional feature +* __NO test linking__ - each framework has to known how to interpret filtered tests format (eg. set of: local files, python classes, URLs) + * optional scope: could be transform formats to format of selected framework (backend) (eg. download tests from URLs and store it locally) + +## How it works +* Tree structure of metadata splitted to two types +* can use one metadata file, or split metadata file to each test or use combination of both solution. +* Two type of `metadata.yaml` + * __general__ - fully descriptive file for writing general info about component and testing and whatever you want - [metadata.yaml](examples/general-component/tests/metadata.yaml) + * __test__ - basic metadata for test, it has same value as any test in `tests` element [metadata.yaml](examples/general-component/tests/sanity/metadata.yaml) + + +## Installation +``` +sudo make install +``` + +## Self-Check +``` +sudo make check +``` + +## Usage + * Two tools: `tmet-filter` and `tmet-agregator` + * Swithc to example directory `examples/general-component/tests` and try them + +## Config examples + +### Simple config +As output there will be two tests independent on backend framework + +``` +document: test-metadata +subtype: general +import_tests: + - "/bin/true" + - "/bin/false" +``` + +### Simple Config with MTF Linters +tag filetrs with imported test and enabled MTF modulelint and import all tests (relatively to base dir) + +``` +document: test-metadata +subtype: general +enable_lint: True +tag_filters: + - "add,-rem" + - "dockerfilelint" +import_tests: + - "*.py" +``` + +### Configs just for tests +when you want to have metadata to each test put similar config to directory (coverage inclueded) +``` +document: test-metadata +subtype: test +source: generaltest.py +relevancy: + - rule 1 + - rule 2 +description: some general test doing +envvars: + ATOMIC: link to atomic container +``` + +### Complex config with coverage + see [example component](examples/general-component/tests/metadata.yaml) + +#### Example output of commands + +``` +$ tmet-agregator +50% +``` + +``` +$ tmet-agregator -a md +# Coverage for: tests + +## Description +Not given + + +## Tests +* general + * by: generaltest.py + * description: some general test doing +* networking/use_tcp (MISSING coverage) + * description: desc of not covered, missing source +* networking/use_udp (MISSING coverage) + * description: desc of not covered, missing source +* options/extend_test + * by: https://github.com/fedora-modularity/meta-test-family.git + * description: some general test doing verbose test +* options/fedora_test + * by: fedora_specifictest.py + * description: some general test doing verbose test +* options/new_option (MISSING coverage) + * description: desc of not covered, missing source +* sanity + * by: generaltest.py + * description: some general test doing +* sanity/SSSSSS (MISSING coverage) + * description: some general test doing + +## Overall Coverage: 50% +``` + +``` +$ tmet-filter +file://general/generaltest.py file://networking/use_tcp/ file://networking/use_udp/ https://github.com/fedora-modularity/meta-test-family.git file://options/fedora_test/fedora_specifictest.py file://options/new_option/ file://sanity/generaltest.py file://sanity/SSSSSS/ +``` + +``` +$ tmet-filter --help +usage: tmet-filter [-h] [-r RELEVANCY] [-t TAGS] + +Filter and print tests + +optional arguments: + -h, --help show this help message and exit + -r RELEVANCY apply relevancy filtering, expect environment specification + -t TAGS apply tags filtering, expect tags in DNF form +``` diff --git a/mtf/metadata/Vagrantfile b/mtf/metadata/Vagrantfile new file mode 100644 index 0000000..26459b9 --- /dev/null +++ b/mtf/metadata/Vagrantfile @@ -0,0 +1,56 @@ +# vi: set ft=ruby : +# -*- coding: utf-8 -*- +# +# Meta test family (MTF) is a tool to test components of a modular Fedora: +# https://docs.pagure.org/modularity/ +# Copyright (C) 2017 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# he Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Authors: Jan Scotka +# + +Vagrant.configure(2) do |config| + + config.vm.box = "fedora/26-cloud-base" + config.vm.synced_folder ".", "/opt/metadata" + config.vm.network "private_network", ip: "192.168.50.10" + config.vm.hostname = "metadatatesting" + config.vm.post_up_message = "Machine is prepared or you, to test PoC for metadata, examples stored in /opt/metadata" + + config.vm.provider "libvirt" do |libvirt| + libvirt.memory = 1024 + libvirt.nested = true + libvirt.cpu_mode = "host-model" + end + + config.vm.provider "virtualbox" do |virtualbox| + virtualbox.memory = 1024 + end + + config.vm.provision "shell", inline: <<-SHELL + set -ex + dnf -y install python-pip python2-pip + cd /opt/metadata + + make install + + # if you want to test linters distributed by MTF, selftest is rely on that + dnf -y copr enable phracek/meta-test-family-devel + dnf -y install meta-test-family || true + + make check + SHELL +end diff --git a/mtf/metadata/__init__.py b/mtf/metadata/__init__.py new file mode 100644 index 0000000..f6ae7a3 --- /dev/null +++ b/mtf/metadata/__init__.py @@ -0,0 +1,4 @@ +""" +tmet - Test METadata library, whole library is exported as module interface +""" +from tmet import * diff --git a/mtf/metadata/examples/general-component/tests/fedora_specifictest.sh b/mtf/metadata/examples/general-component/tests/fedora_specifictest.sh new file mode 100644 index 0000000..3395065 --- /dev/null +++ b/mtf/metadata/examples/general-component/tests/fedora_specifictest.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +true diff --git a/mtf/metadata/examples/general-component/tests/generaltest.py b/mtf/metadata/examples/general-component/tests/generaltest.py new file mode 100644 index 0000000..9dfc0ef --- /dev/null +++ b/mtf/metadata/examples/general-component/tests/generaltest.py @@ -0,0 +1,5 @@ +from avocado import Test + +class X1(Test): + def test(self): + pass diff --git a/mtf/metadata/examples/general-component/tests/metadata.yaml b/mtf/metadata/examples/general-component/tests/metadata.yaml new file mode 100644 index 0000000..b5849dd --- /dev/null +++ b/mtf/metadata/examples/general-component/tests/metadata.yaml @@ -0,0 +1,73 @@ +# document identifier +document: test-metadata +# there are two types: full config is "general",for simple test metadata use "test" +subtype: general +# enable linters for selected module type (not supported for generic class) +enable_lint: False +# Import tests by path glob or by name, without complex features for coverage mapping +import_tests: + - "*.py" + - "/bin/true" +# DNF form of tags: Lines are logical OR, "," is logical AND, "-" is logical NOT +tag_filters: + - tier1 + - optional,-fedora + - dockerlinter + - dockerfilelinter +# Coverage test mapping. Describe which part are covered and what are not covered (does not contain "source") +# important are: +# leafs what contains test +# nonleafs means coverage path mapping +tests: + general: + # path in coverage mapping + source: generaltest.py + # code of test, where it lives (filesystem or some URL depending on backend (what is supported) + relevancy: + # relevancy descriptiont (NOT implemented now) + - rule 1 + - rule 2 + description: some general test doing + # description of test + backend: mtf + # which backend to use and group tests (you can filter tests for one backend) + options: + fedora_test: + source: fedora_specifictest.sh + # how to specify tags in generic tests + tags: tier1,tier2,tag1 + description: some general test doing verbose test + backend: general + extend_test: + source: https://github.com/fedora-modularity/meta-test-family.git + tags: optional + description: some general test doing verbose test + backend: general + test_new_option: + description: desc of not covered, missing source + networking: + test_use_tcp: + description: desc of not covered, missing source + backend: general + test_use_udp: + description: desc of not covered, missing source + backend: general + +# Test Sets, +# when you would like to schedule just part of tests and have varisou relevancy tag filters, use test_sets +# Not implemented for now +test_sets: + fedora: + tests: + - general + - fedora_test + all: + tests: + - "*" + tag_filters: + test_options: + tests: + - options/* + fedora: + tests: + - general diff --git a/mtf/metadata/examples/general-component/tests/sanity/SSSSSS/metadata.yaml b/mtf/metadata/examples/general-component/tests/sanity/SSSSSS/metadata.yaml new file mode 100644 index 0000000..b0d5344 --- /dev/null +++ b/mtf/metadata/examples/general-component/tests/sanity/SSSSSS/metadata.yaml @@ -0,0 +1,3 @@ +document: test-metadata +subtype: test +description: some general test doing diff --git a/mtf/metadata/examples/general-component/tests/sanity/generaltest.py b/mtf/metadata/examples/general-component/tests/sanity/generaltest.py new file mode 100644 index 0000000..2ae2839 --- /dev/null +++ b/mtf/metadata/examples/general-component/tests/sanity/generaltest.py @@ -0,0 +1 @@ +pass diff --git a/mtf/metadata/examples/general-component/tests/sanity/metadata.yaml b/mtf/metadata/examples/general-component/tests/sanity/metadata.yaml new file mode 100644 index 0000000..f670a89 --- /dev/null +++ b/mtf/metadata/examples/general-component/tests/sanity/metadata.yaml @@ -0,0 +1,10 @@ +document: test-metadata +subtype: test +source: generaltest.py +relevancy: + - rule 1 + - rule 2 +description: some general test doing +envvars: + ATOMIC: link to atomic container +backend: general diff --git a/mtf/metadata/examples/general-component/tests/simple.py b/mtf/metadata/examples/general-component/tests/simple.py new file mode 100644 index 0000000..d1ee52c --- /dev/null +++ b/mtf/metadata/examples/general-component/tests/simple.py @@ -0,0 +1,5 @@ +from avocado import Test + +class X2(Test): + def test(self): + pass diff --git a/mtf/metadata/examples/general-simple/metadata.yaml b/mtf/metadata/examples/general-simple/metadata.yaml new file mode 100644 index 0000000..6d7bb5d --- /dev/null +++ b/mtf/metadata/examples/general-simple/metadata.yaml @@ -0,0 +1,5 @@ +document: test-metadata +subtype: general +import_tests: + - "/bin/true" + - "/bin/false" diff --git a/mtf/metadata/examples/mtf-clean/tests/all.py b/mtf/metadata/examples/mtf-clean/tests/all.py new file mode 100644 index 0000000..23724ea --- /dev/null +++ b/mtf/metadata/examples/mtf-clean/tests/all.py @@ -0,0 +1,32 @@ +from avocado import Test + + +class Add1(Test): + """ + :avocado: enable + :avocado: tags=add + """ + + def test(self): + pass + + +class Add2(Test): + """ + :avocado: enable + """ + + def test(self): + """ + :avocado: tags=add + """ + pass + + +class Add3(Test): + """ + :avocado: enable + """ + + def test(self): + pass diff --git a/mtf/metadata/examples/mtf-clean/tests/dockerlinter.py b/mtf/metadata/examples/mtf-clean/tests/dockerlinter.py new file mode 100644 index 0000000..0cf538e --- /dev/null +++ b/mtf/metadata/examples/mtf-clean/tests/dockerlinter.py @@ -0,0 +1,21 @@ +from avocado import Test + + +class DockerFileLint(Test): + """ + :avocado: enable + :avocado: tags=dockerfilelint,docker,rhel,fedora + """ + + def test(self): + pass + + +class DockerLint(Test): + """ + :avocado: enable + :avocado: tags=dockerlint,docker,rhel,fedora + """ + + def test(self): + pass diff --git a/mtf/metadata/examples/mtf-clean/tests/metadata.yaml b/mtf/metadata/examples/mtf-clean/tests/metadata.yaml new file mode 100644 index 0000000..30e1fd7 --- /dev/null +++ b/mtf/metadata/examples/mtf-clean/tests/metadata.yaml @@ -0,0 +1,2 @@ +document: test-metadata +subtype: general diff --git a/mtf/metadata/examples/mtf-clean/tests/none.py b/mtf/metadata/examples/mtf-clean/tests/none.py new file mode 100644 index 0000000..151c3bf --- /dev/null +++ b/mtf/metadata/examples/mtf-clean/tests/none.py @@ -0,0 +1,32 @@ +from avocado import Test + + +class Rem1(Test): + """ + :avocado: enable + :avocado: tags=rem + """ + + def test(self): + pass + + +class Rem2(Test): + """ + :avocado: enable + """ + + def test(self): + """ + :avocado: tags=rem + """ + pass + + +class Rem3(Test): + """ + :avocado: disable + """ + + def test(self): + pass diff --git a/mtf/metadata/examples/mtf-clean/tests/some.py b/mtf/metadata/examples/mtf-clean/tests/some.py new file mode 100644 index 0000000..b680160 --- /dev/null +++ b/mtf/metadata/examples/mtf-clean/tests/some.py @@ -0,0 +1,37 @@ +from avocado import Test + + +class Add1(Test): + """ + :avocado: enable + :avocado: tags=add + """ + + def test(self): + pass + + +class AddPart(Test): + """ + :avocado: enable + :avocado: tags=add + """ + + def testAdd(self): + pass + + def testBad(self): + """ + :avocado: tags=rem + """ + pass + + +class Rem2(Test): + """ + :avocado: enable + :avocado: tags=rem + """ + + def test(self): + pass diff --git a/mtf/metadata/examples/mtf-component/tests/all.py b/mtf/metadata/examples/mtf-component/tests/all.py new file mode 100644 index 0000000..23724ea --- /dev/null +++ b/mtf/metadata/examples/mtf-component/tests/all.py @@ -0,0 +1,32 @@ +from avocado import Test + + +class Add1(Test): + """ + :avocado: enable + :avocado: tags=add + """ + + def test(self): + pass + + +class Add2(Test): + """ + :avocado: enable + """ + + def test(self): + """ + :avocado: tags=add + """ + pass + + +class Add3(Test): + """ + :avocado: enable + """ + + def test(self): + pass diff --git a/mtf/metadata/examples/mtf-component/tests/dockerlinter.py b/mtf/metadata/examples/mtf-component/tests/dockerlinter.py new file mode 100644 index 0000000..0cf538e --- /dev/null +++ b/mtf/metadata/examples/mtf-component/tests/dockerlinter.py @@ -0,0 +1,21 @@ +from avocado import Test + + +class DockerFileLint(Test): + """ + :avocado: enable + :avocado: tags=dockerfilelint,docker,rhel,fedora + """ + + def test(self): + pass + + +class DockerLint(Test): + """ + :avocado: enable + :avocado: tags=dockerlint,docker,rhel,fedora + """ + + def test(self): + pass diff --git a/mtf/metadata/examples/mtf-component/tests/metadata.yaml b/mtf/metadata/examples/mtf-component/tests/metadata.yaml new file mode 100644 index 0000000..a5ec764 --- /dev/null +++ b/mtf/metadata/examples/mtf-component/tests/metadata.yaml @@ -0,0 +1,7 @@ +document: test-metadata +subtype: general +tag_filters: + - "add,-rem" + - "dockerfilelint" +import_tests: + - "*.py" diff --git a/mtf/metadata/examples/mtf-component/tests/none.py b/mtf/metadata/examples/mtf-component/tests/none.py new file mode 100644 index 0000000..151c3bf --- /dev/null +++ b/mtf/metadata/examples/mtf-component/tests/none.py @@ -0,0 +1,32 @@ +from avocado import Test + + +class Rem1(Test): + """ + :avocado: enable + :avocado: tags=rem + """ + + def test(self): + pass + + +class Rem2(Test): + """ + :avocado: enable + """ + + def test(self): + """ + :avocado: tags=rem + """ + pass + + +class Rem3(Test): + """ + :avocado: disable + """ + + def test(self): + pass diff --git a/mtf/metadata/examples/mtf-component/tests/some.py b/mtf/metadata/examples/mtf-component/tests/some.py new file mode 100644 index 0000000..b680160 --- /dev/null +++ b/mtf/metadata/examples/mtf-component/tests/some.py @@ -0,0 +1,37 @@ +from avocado import Test + + +class Add1(Test): + """ + :avocado: enable + :avocado: tags=add + """ + + def test(self): + pass + + +class AddPart(Test): + """ + :avocado: enable + :avocado: tags=add + """ + + def testAdd(self): + pass + + def testBad(self): + """ + :avocado: tags=rem + """ + pass + + +class Rem2(Test): + """ + :avocado: enable + :avocado: tags=rem + """ + + def test(self): + pass diff --git a/mtf/metadata/requirements.txt b/mtf/metadata/requirements.txt new file mode 100644 index 0000000..ca52ff6 --- /dev/null +++ b/mtf/metadata/requirements.txt @@ -0,0 +1,3 @@ +avocado-framework +pytest +PyYAML diff --git a/mtf/metadata/setup.py b/mtf/metadata/setup.py new file mode 100644 index 0000000..18e5819 --- /dev/null +++ b/mtf/metadata/setup.py @@ -0,0 +1,84 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2013-2014 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# he Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Authors: Jan Scotka + +import os +import sys + +try: + from setuptools import setup, find_packages +except ImportError: + from distutils.core import setup + +# copy from https://github.com/avocado-framework/avocado/blob/master/setup.py +VIRTUAL_ENV = hasattr(sys, 'real_prefix') + + +def get_dir(system_path=None, virtual_path=None): + """ + Retrieve VIRTUAL_ENV friendly path + :param system_path: Relative system path + :param virtual_path: Overrides system_path for virtual_env only + :return: VIRTUAL_ENV friendly path + """ + if virtual_path is None: + virtual_path = system_path + if VIRTUAL_ENV: + if virtual_path is None: + virtual_path = [] + return os.path.join(*virtual_path) + else: + if system_path is None: + system_path = [] + return os.path.join(*(['/'] + system_path)) + +data_files = {} + + +setup( + name='tmet', + version="0.0.1", + description='Test METadata for tests (filter, agregate metadata)', + keywords='metadata,test', + author='Jan Scotka', + author_email='jscotka@redhat.com', + url='https://None', + license='GPLv2+', + packages=find_packages(), + include_package_data=True, + data_files=data_files.items(), + entry_points={ + 'console_scripts': [ + 'tmet-filter = tmet.filter:main', + 'tmet-agregator = tmet.agregator:main', + ] + }, + setup_requires=[], + classifiers=[ + 'Development Status :: 4 - Beta', + 'Environment :: Console', + 'Intended Audience :: Developers', + 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)', + 'Operating System :: POSIX :: Linux', + 'Programming Language :: Python', + 'Topic :: Software Development', + ], + install_requires=open('requirements.txt').read().splitlines() +) diff --git a/mtf/metadata/tmet/__init__.py b/mtf/metadata/tmet/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/mtf/metadata/tmet/agregator.py b/mtf/metadata/tmet/agregator.py new file mode 100644 index 0000000..963a383 --- /dev/null +++ b/mtf/metadata/tmet/agregator.py @@ -0,0 +1,54 @@ + +""" +Create agregation report (wiki style) + +""" + +import argparse +import common +import os + + +def get_options(): + parser = argparse.ArgumentParser(description='Create Coverage reports') + parser.add_argument('-a', dest='action', default="statistic", + help='print to stdout in selected format') + args = parser.parse_args() + return args + + +def print_md_file(meta): + items = meta.get_coverage() + output = ["# Coverage for: %s" % os.path.basename(os.getcwd()), ""] + output += ["## Description", meta.base_element.get(common.DESC) or "Not given", "", ""] + output += ["## Tests"] + for key in sorted(items): + if items[key].get(common.SOURCE): + output += ["* %s" % key] + output += [" * by: %s" % items[key].get(common.SOURCE)] + output += [" * description: %s" % items[key].get(common.DESC)] + + else: + output += ["* %s (MISSING coverage)" % key] + output += [" * description: %s" % items[key].get(common.DESC)] + output += ["", "## Overall Coverage: %s" % statistic(meta)] + return output + + +def statistic(meta): + items = meta.get_coverage() + counter = 0 + all = len(items) + for key, value in items.iteritems(): + if value.get(common.SOURCE): + counter += 1 + return "%s%%" % (counter * 100 / all) + + +def main(): + options = get_options() + meta = common.MetadataLoader() + if options.action == "statistic": + print statistic(meta) + elif options.action == "md": + print "\n".join(print_md_file(meta)) diff --git a/mtf/metadata/tmet/common.py b/mtf/metadata/tmet/common.py new file mode 100644 index 0000000..cac55d4 --- /dev/null +++ b/mtf/metadata/tmet/common.py @@ -0,0 +1,390 @@ +from __future__ import print_function +import yaml +import os +import sys +import glob +from avocado.utils import process +from urlparse import urlparse +from warnings import warn + +""" +Basic classes for metatadata handling, it contains classes derived from general metadata parser +for example for MTF testcases + +for usage and deeper understand see examples unittests in selftest.py file +""" + +MFILENAME = "metadata.yaml" +DESC = "description" +SOURCE = "source" +TESTE = "tests" +TESTC = "tests_coverage" +DOCUMENT = "document" +DOCUMENT_TYPES = ["metadata", "test-metadata", "tmet"] +SUBTYPE = "subtype" +SUBTYPE_G = "general" +SUBTYPE_T = "test" +BACKEND = "backend" +TAGS = "tags" +RELEVANCY = "relevancy" +DEPENDENCIES = "deps" +COVPATH = "coverage_path" +MODULELINT = "enable_lint" +IMPORT_TESTS = "import_tests" +TAG_FILETERS = "tag_filters" + +def print_debug(*args): + """ + Own implementation of print_debug, to not be dependent on MTF anyhow inside metadata + """ + if os.environ.get("DEBUG"): + for arg in args: + print(arg, file=sys.stderr) + + +def logic_formula(statement, filters, op_negation="-", op_and=",", op_or=None): + """ + disjunctive normla form statement parser https://en.wikipedia.org/wiki/Disjunctive_normal_form + + :param statement: + :param filters: + :param op_negation: + :param op_and: + :param op_or: + :return: + """ + def logic_simple(simple): + key = simple + value = True + if key.startswith(op_negation): + key = key[len(op_negation):] + value = False + return key, value + + def logic_and(normalform): + dictset = {} + for one in normalform.split(op_and): + k, v = logic_simple(one) + dictset[k] = v + return dictset + + def logic_filter(actual_tag_list, tag_filter): + # TODO: try to replace this part with http://www.sympy.org + statement_or = False + # if no tags in test then add this test to set (same behaviour as avocado --tag...empty) + # print actual_tag_list, tag_filter + if not actual_tag_list: + statement_or = True + actualinput = logic_and(actual_tag_list) + for onefilter in tag_filter: + filterinput = logic_and(onefilter) + statement_and = True + for key in filterinput: + if filterinput.get(key) != bool(actualinput.get(key)): + statement_and = False + break + if statement_and: + statement_or = True + break + return statement_or + + if op_or: + filters = filters.split(op_or) + elif isinstance(filters,str): + filters = [filters] + return logic_filter(statement, filters) + + +class MetadataLoader(object): + """ + General class for parsing test metadata from metadata.yaml files + """ + base_element = {} + backends = ["generic", "general"] + # in filter there will be items like: {"relevancy": None, "tags": None} + filter_list = [None] + + def __init__(self, location=".", linters=False, **kwargs): + self.location = os.path.abspath(location) + self._load_recursive() + if linters or self.base_element.get(MODULELINT): + self._import_linters() + if IMPORT_TESTS in self.base_element: + for testglob in self.base_element.get(IMPORT_TESTS): + self._import_tests(os.path.join(self.location, testglob)) + if TAG_FILETERS in self.base_element: + self.add_filter(tags=self.base_element.get(TAG_FILETERS)) + + def _import_tests(self, testglob, pathlenght=0): + """ + import tests based on file path glob like "*.py" + + :param testglob: string + :param pathlenght: lenght of path for coverage usage (by default full path from glob is used) + :return: + """ + pathglob = testglob if testglob.startswith(os.pathsep) else os.path.join(self.location, testglob) + print_debug("Import tests: %s" % pathglob) + for testfile in glob.glob(pathglob): + test = {SOURCE: testfile, DESC: "Imported tests by path: %s" % pathglob} + self._insert_to_test_tree(testfile.strip(os.sep).split(os.sep)[pathlenght:], + test) + + def _import_linters(self): + """ + Import linters if any for backend type + + :return: + """ + raise NotImplementedError + + def get_metadata(self): + """ + get whole metadata loaded object + :return: dict + """ + return self.base_element + + def load_yaml(self, location): + """ + internal method for loading data from yaml file + + :param location: + :return: + """ + print_debug("Loading metadata from file: %s" % location) + with open(location, 'r') as ymlfile: + xcfg = yaml.load(ymlfile.read()) + if xcfg.get(DOCUMENT) not in DOCUMENT_TYPES: + raise BaseException("bad yaml file: item (%s)", xcfg.get(DOCUMENT)) + else: + return xcfg + + def _insert_to_coverage(self, path_list, test): + """ + translate test with path to coverage mapping TESTC + + :param path_list: how to store coverage + :param test: dict object representing test + :return: + """ + coverage_key = "/".join(path_list) + print_debug("insert to coverage %s to %s" % (test, coverage_key)) + # add test coverage key + # add backend key if does not exist + self.base_element[TESTC][coverage_key] = test + self.base_element[TESTC][coverage_key][COVPATH] = coverage_key + if BACKEND not in self.base_element[TESTC][coverage_key]: + self.base_element[TESTC][coverage_key][BACKEND] = self.backends[0] + + def _parse_base_coverage(self, base=None, path=[]): + """ + RECURSIVE internal method for parsing coverage in GENERAL metadata yaml tests: key in file + + :param base: + :param path: + :return: + """ + base = base or self.base_element.get(TESTE, {}) + if DESC in base: + if path: + self._insert_to_coverage(path, base) + else: + for key, value in base.iteritems(): + self._parse_base_coverage(base=value, path=path + [key]) + + def _insert_to_test_tree(self, path_list, test): + """ + Internal method to insert test to tests: dict object, used by simple metadata files + + :param path_list: where to store item (based on FS path) + :param test: test object + :return: + """ + actualelem = self.base_element[TESTE] + # sanitize testpath with test for these tests what are not fully qualified files + # append directory locaiton to source + if SOURCE in test \ + and not urlparse(test.get(SOURCE)).scheme \ + and not test.get(SOURCE).startswith(os.pathsep): + test[SOURCE] = os.path.join(*(path_list + [test[SOURCE]])) + print_debug("source testpath extended %s" % test[SOURCE]) + previous_item = None + link_previous = None + # Next code create full dictionary path to test if does not exist. + # like ['a','b','c'] creates {'a':{'b':{'c':{}}}} + for item in path_list: + if actualelem.get(item) is None: + actualelem[item] = dict() + link_previous = actualelem + previous_item = item + actualelem = actualelem[item] + link_previous[previous_item] = test + self._insert_to_coverage(path_list, test) + return self.base_element + + def _load_recursive(self): + """ + Internal method to parse all metadata files + It uses os.walk to find all files recursively + + :return: + """ + allfiles = [] + location = self.location + for root, sub_folders, files in os.walk(location): + if MFILENAME in files: + allfiles.append(os.path.join(root, MFILENAME)) + elem_element = {} + if allfiles: + elem_element = self.load_yaml(allfiles[0]) + if elem_element.get(SUBTYPE) == SUBTYPE_G: + # this code cannont cause traceback because default value is {} or it loads yaml + allfiles = allfiles[1:] + self.base_element = elem_element + else: + self.base_element = {} + if TESTC not in self.base_element: + self.base_element[TESTC] = dict() + if TESTE not in self.base_element: + self.base_element[TESTE] = dict() + self._parse_base_coverage() + for item in allfiles: + self._insert_to_test_tree(os.path.dirname(item)[len(location):].split("/")[1:], + self.load_yaml(item)) + + def get_coverage(self): + """ + return coverage elemetn + :return: dict + """ + return self.base_element.get(TESTC) + + def get_backends(self): + """ + List of all backends mentioned in metadata file + :return: list + """ + return set([x.get(BACKEND) for x in self.get_coverage().values()]) + + def filter_relevancy(self, tests, envrion_description): + """ + apply relevancy filtering, actually just a stub, returns everything + Not implemented + + :param tests: list of tests + :param envrion_description: enviroment description + :return: + """ + return tests + + def filter_tags(self, tests, tag_list): + """ + filter tags based on tags in metadata files for test + + :param tests: + :param tag_list: + :return: + """ + output = [] + for test in tests: + test_tags = test.get(TAGS, "") + if logic_formula(test_tags, tag_list): + output.append(test) + return output + + def add_filter(self, tags=[], relevancy={}): + """ + You can define multiple filters and apply them, + :param tags: + :param relevancy: + :return: + """ + addedfilter = {RELEVANCY: relevancy, TAGS: tags} + if self.filter_list[-1] is None: + self.filter_list = self.filter_list[:-1] + self.filter_list.append(addedfilter) + + def apply_filters(self): + output = self.backend_tests() + for infilter in self.filter_list: + if infilter: + if infilter.get(TAGS): + output = self.filter_tags(output, infilter.get(TAGS)) + if infilter.get(RELEVANCY): + output = self.filter_relevancy(output, infilter.get(RELEVANCY)) + return output + + def backend_tests(self): + cov = self.get_coverage() + return [cov[x] for x in cov if cov[x].get(BACKEND) in self.backends and SOURCE in cov[x]] + + def backend_passtrought_args(self): + return self.get_filters() + + def get_filters(self): + return self.filter_list + + +class MetadataLoaderMTF(MetadataLoader): + """ + metadata specific class for MTF (avocado) tests + """ + try: + import moduleframework.tools + MTF_LINTER_PATH = os.path.dirname(moduleframework.tools.__file__) + except: + warn("MTF library not installed, linters are ignored") + MTF_LINTER_PATH = None + listcmd = "avocado list" + backends = ["mtf", "avocado"] + + def _import_tests(self, testglob, pathlenght=0): + pathglob = testglob if testglob.startswith(os.pathsep) else os.path.join(self.location, testglob) + print_debug("Import by pathglob: %s" % pathglob) + tests_cmd = process.run("%s %s" % (self.listcmd, pathglob), shell=True, verbose=False, ignore_status=True) + tests = tests_cmd.stdout.splitlines() + if tests_cmd.exit_status != 0: + raise BaseException("unbale to import tests (avocado list) via location: %s" % pathglob) + for testurl in tests: + if testurl and len(testurl) > 1: + testlinesplitted = testurl.split(" ") + testfile = " ".join(testlinesplitted[1:]) + testtype = testlinesplitted[0] + print_debug("\t%s" % testfile) + test = {SOURCE: testfile, + DESC: "Imported (%s) tests by path: %s" % (testtype, pathglob), + "avocado_test_type": testtype + } + self._insert_to_test_tree(testfile.strip(os.sep).split(os.sep)[pathlenght:], + test) + + def _import_linters(self): + if self.MTF_LINTER_PATH: + self._import_tests(os.path.join(self.MTF_LINTER_PATH, "*.py"), pathlenght=-3) + + def __avcado_tag_args(self, tag_list, defaultparam="--filter-by-tags-include-empty"): + output = [] + for tag in tag_list: + output.append("--filter-by-tags=%s" % tag) + if output: + output.append(defaultparam) + return " ".join(output) + + def filter_tags(self, tests, tag_list): + output = [] + for test in tests: + cmd = process.run("%s %s %s" % (self.listcmd, self.__avcado_tag_args(tag_list), test[SOURCE]), + shell=True, verbose=False) + if len(cmd.stdout) > 10: + output.append(test) + return output + + +def get_backend_class(backend): + if backend == "mtf": + out = MetadataLoaderMTF + else: + out = MetadataLoader + print_debug("Backend is: %s" % out) + return out diff --git a/mtf/metadata/tmet/filter.py b/mtf/metadata/tmet/filter.py new file mode 100644 index 0000000..53d910f --- /dev/null +++ b/mtf/metadata/tmet/filter.py @@ -0,0 +1,69 @@ +""" +Filter testcases based on various parameters + +""" +import argparse +import common + + +def get_options(): + parser = argparse.ArgumentParser(description='Filter and print tests') + parser.add_argument('-r', dest='relevancy', + help='apply relevancy filtering, expect environment specification') + parser.add_argument( + '-t', + dest='tags', + action="append", + help='apply tags filtering, expect tags in DNF form (expressions in one option means AND, more -t means OR)') + parser.add_argument('-b', dest='backend', + help='output for selected backend') + parser.add_argument('--location', dest='location', default='.', + help='output for selected backend') + parser.add_argument('--linters', dest='linters', action='store_true', + help='output for selected backend') + parser.add_argument('--nofilters', dest='nofilters', action='store_true', + help='disable all filters in config file and show all tests for backend') + parser.add_argument('tests', nargs='*', help='import tests for selected backed') + + args = parser.parse_args() + return args + + +def main(): + options = get_options() + output = filtertests(backend=options.backend, + location=options.location, + linters=options.linters, + tests=options.tests, + tags=options.tags, + relevancy=options.relevancy, + applyfilters=not options.nofilters + ) + print " ".join([x[common.SOURCE] for x in output]) + + +def filtertests(backend, location, linters, tests, tags, relevancy, applyfilters=True): + """ + Basic method to use it for wrapping inside another python code, + allows apply tag filters and relevancy + + :param backend: + :param location: + :param linters: + :param tests: + :param tags: + :param relevancy: + :return: + """ + meta = common.get_backend_class(backend)(location=location, + linters=linters, + backend=backend) + if tests: + for test in tests: + meta._import_tests(test) + + if applyfilters: + meta.add_filter(tags=tags, relevancy=relevancy) + return meta.apply_filters() + else: + return meta.backend_tests() diff --git a/mtf/metadata/tmet/selftests.py b/mtf/metadata/tmet/selftests.py new file mode 100644 index 0000000..6f7266e --- /dev/null +++ b/mtf/metadata/tmet/selftests.py @@ -0,0 +1,207 @@ +from common import MetadataLoader, MetadataLoaderMTF, SOURCE, print_debug, logic_formula +from filter import filtertests +import yaml + +__TC_GENERAL_COMPONENT = "examples/general-component/tests" +__TC_MTF_COMPOMENT = "examples/mtf-clean/tests" +__TC_MTF_CONF = "examples/mtf-component/tests" +__TC_GENERAL_CONF = "examples/general-simple" + + +def test_loader(): + """ + Test general backend loader for complex case + :return: + """ + mt = MetadataLoader(location=__TC_GENERAL_COMPONENT) + print_debug(yaml.dump(mt.get_metadata())) + print_debug(mt.get_backends()) + assert 'sanity/generaltest.py' in [x[SOURCE] for x in mt.backend_tests()] + + +def test_mtf_metadata_linters_and_tests_noconfig(): + """ + Test linter only for MTF loader, using no config + :return: + """ + mt = MetadataLoaderMTF(location=__TC_MTF_COMPOMENT, linters=True) + # print yaml.dump(mt.get_metadata()) + # print mt.backend_passtrought_args() + # print mt.apply_filters() + case_justlinters_nofilter = mt.apply_filters() + print_debug(case_justlinters_nofilter) + mt._import_tests("*.py") + case_lintersanstests_nofilter = mt.apply_filters() + print_debug(case_lintersanstests_nofilter) + mt.add_filter(tags=["add"]) + case_lintersanstests_filter1 = mt.apply_filters() + print_debug(case_lintersanstests_filter1) + mt.add_filter(tags=["-add"]) + case_lintersanstests_filter2 = mt.apply_filters() + print_debug(case_lintersanstests_filter2) + + assert len(case_justlinters_nofilter) > 20 + assert len(case_lintersanstests_nofilter) > len(case_justlinters_nofilter) + assert len(case_lintersanstests_filter1) < len(case_justlinters_nofilter) + assert len(case_lintersanstests_filter1) < len(case_lintersanstests_nofilter) + assert len(case_lintersanstests_filter1) > len(case_lintersanstests_filter2) + assert len(case_lintersanstests_filter2) < len(case_justlinters_nofilter) + # print [v[SOURCE] for v in mt.backend_tests()] + # mt.apply_filters() + + +def test_filter_mtf_justlintes(): + """ + test load just linter with simple config + :return: + """ + out = filtertests(backend="mtf", + location=__TC_MTF_COMPOMENT, + linters=True, + tests=[], + tags=[], + relevancy="") + print_debug(out) + assert len(out) > 20 + + +def test_filter_mtf_nothing(): + """ + use config what loads no tests + :return: + """ + out = filtertests(backend="mtf", + location=__TC_MTF_COMPOMENT, + linters=False, + tests=[], + tags=[], + relevancy="") + print_debug(out) + assert len(out) == 0 + + +def test_filter_mtf_justtests(): + """ + tests load configu and just tests there + """ + out = filtertests(backend="mtf", + location=__TC_MTF_COMPOMENT, + linters=False, + tests=["*.py"], + tags=[], + relevancy="") + print_debug(out) + assert len(out) == 11 + + +def test_filter_mtf_filtered_tests_add(): + """ + tests load config with filters + :return: + """ + out = filtertests(backend="mtf", + location=__TC_MTF_COMPOMENT, + linters=False, + tests=["*.py"], + tags=["add"], + relevancy="") + print_debug(out) + assert len(out) == 6 + + +def test_filter_mtf_filtered_notadd(): + """ + tests load config with filters + :return: + """ + out = filtertests(backend="mtf", + location=__TC_MTF_COMPOMENT, + linters=False, + tests=["*.py"], + tags=["-add"], + relevancy="") + print_debug(out) + assert len(out) == 6 + + +def test_filter_mtf_filtered_rem(): + """ + tests load config with filters + + :return: + """ + out = filtertests(backend="mtf", + location=__TC_MTF_COMPOMENT, + linters=False, + tests=["*.py"], + tags=["rem"], + relevancy="") + print_debug(out) + assert len(out) == 5 + + +def test_filter_general(): + """ + tests load config with filters, for general module + :return: + """ + out = filtertests(backend=None, + location=__TC_GENERAL_COMPONENT, + linters=False, + tests=[], + tags=[], + relevancy="") + print_debug(out) + assert len(out) == 6 + + +def test_mtf_config(): + """ + test real life example and check if proper tests were filtered based on config file + :return: + """ + out = filtertests(backend="mtf", + location=__TC_MTF_CONF, + linters=False, + tests=[], + tags=[], + relevancy="") + tests = [x[SOURCE] for x in out] + print_debug(tests) + assert len(tests) == 6 + assert "Rem" not in " ".join(tests) + assert "Add" in " ".join(tests) + assert "DockerFileLint" in " ".join(tests) + + +def test_general_config(): + """ + test loading general config and check number of tests, linters disabled + :return: + """ + out = (filtertests(backend=None, + location=__TC_GENERAL_CONF, + linters=False, + tests=[], + tags=[], + relevancy="")) + print_debug(out) + assert len(out) == 2 + +def test_logic_formula_parser(): + assert logic_formula('tag1', ['tag1']) + assert not logic_formula('tag2', ['tag1']) + assert logic_formula('tag1,tag2', ['tag1']) + assert logic_formula('', ['tag1']) + assert logic_formula('tag1', ['tag1','tag2']) + assert not logic_formula('tag3', ['tag1', 'tag2']) + assert logic_formula('tag3,tag1', ['tag1', 'tag2']) + assert logic_formula('tag3,tag2,tag1', ['tag1', 'tag2']) + +# test_loader() +# test_mtf_metadata_linters_only() +# test_filter_mtf() +# test_filter_general() +# test_mtf_config() +# test_general_config() +# test_logic_formula_parser() diff --git a/mtf/metatest/__init__.py b/mtf/metatest/__init__.py new file mode 100644 index 0000000..80135ca --- /dev/null +++ b/mtf/metatest/__init__.py @@ -0,0 +1 @@ +from moduleframework.module_framework import * diff --git a/mtf/mtfexceptions/__init__.py b/mtf/mtfexceptions/__init__.py new file mode 100644 index 0000000..ad65883 --- /dev/null +++ b/mtf/mtfexceptions/__init__.py @@ -0,0 +1 @@ +from moduleframework.mtfexceptions import * diff --git a/requirements.sh b/requirements.sh new file mode 100755 index 0000000..0c4d59a --- /dev/null +++ b/requirements.sh @@ -0,0 +1,51 @@ +#!/bin/bash + +GENERICPACKAGES=" +curl +git +make +python-pip +" +RPMPACKAGES=" +fedpkg +httpd +koji +krb5-devel +nc +pdc-client +python-devel +python-gssapi +python-netifaces +python2-avocado +python2-avocado-plugins-output-html +python2-devel +python2-dockerfile-parse +python2-modulemd +python2-odcs-client +python2-pytest +redhat-rpm-config +" + +APTPACKAGES=" +build-essential +libkrb5-dev +mysql-client-5.5 +netcat +python-dev +python-pytest +python-software-properties +software-properties-common +" + +PIPPACKAGES=" +avocado-framework +" + +if [ -e /usr/bin/dnf ]; then + dnf -y install $GENERICPACKAGES $RPMPACKAGES +elif [ -e /usr/bin/yum ]; then + yum -y install $GENERICPACKAGES $RPMPACKAGES +else + apt-get -y install $GENERICPACKAGES $APTPACKAGES + pip install $PIPPACKAGES +fi diff --git a/requirements.txt b/requirements.txt index 4167436..64b479b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,5 @@ PyYAML avocado-framework -avocado-framework-plugin-result-html dockerfile-parse modulemd netifaces diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index b88034e..0000000 --- a/setup.cfg +++ /dev/null @@ -1,2 +0,0 @@ -[metadata] -description-file = README.md diff --git a/setup.py b/setup.py index e160e0d..fa947d7 100755 --- a/setup.py +++ b/setup.py @@ -23,10 +23,7 @@ import os import sys -try: - from setuptools import setup, find_packages -except ImportError: - from distutils.core import setup +from setuptools import setup, find_packages # copy from https://github.com/avocado-framework/avocado/blob/master/setup.py VIRTUAL_ENV = hasattr(sys, 'real_prefix') @@ -50,6 +47,7 @@ def get_dir(system_path=None, virtual_path=None): system_path = [] return os.path.join(*(['/'] + system_path)) + data_files = {} paths = ['docs', 'examples', 'tools'] @@ -61,10 +59,19 @@ def get_dir(system_path=None, virtual_path=None): ['usr', 'share', 'moduleframework', root])] = [ os.path.join(root, f) for f in files] +paths = ['man'] + +for path in paths: + for root, dirs, files in os.walk(path, followlinks=True): + data_files[ + get_dir( + ['usr', 'share', 'man', 'man1'])] = [ + os.path.join(root, f) for f in files] + setup( name='meta-test-family', - version="0.7.3", - description='Tool to test components fo a modular Fedora.', + version="0.7.8", + description='Tool to test components for a modular Fedora.', keywords='modules,containers,testing,framework', author='Jan Scotka', author_email='jscotka@redhat.com', @@ -73,14 +80,16 @@ def get_dir(system_path=None, virtual_path=None): packages=find_packages(exclude=['docs', 'examples', 'tools']), include_package_data=True, data_files=data_files.items(), - scripts=['tools/mtf'], + scripts=[], entry_points={ 'console_scripts': [ 'mtf-cmd = moduleframework.bashhelper:main', 'mtf-generator = moduleframework.mtf_generator:main', 'mtf-env-set = moduleframework.mtf_environment:mtfenvset', 'mtf-env-clean = moduleframework.mtf_environment:mtfenvclean', - 'mtf-log-parser = moduleframework.mtf_log_parser:main', + 'mtf-init = moduleframework.mtf_init:main', + 'mtf = moduleframework.mtf_scheduler:main', + 'mtf-pdc-module-info-reader = moduleframework.pdc_msg_module_info_reader:main', ] }, setup_requires=[], diff --git a/tools/mtf b/tools/mtf deleted file mode 100755 index 8e540d9..0000000 --- a/tools/mtf +++ /dev/null @@ -1,39 +0,0 @@ -#!/bin/bash - -SITE_LIB=$(python -c "import moduleframework, os; print os.path.dirname(moduleframework.__file__)") -MTF_TOOLS="tools" -JSON_LOG=$(mktemp) -AVOCADO_ARGS="--json $JSON_LOG" - -function print_help { - cat < +# + + +function inst_env(){ + dnf install -y python-pip make docker httpd git python2-avocado fedpkg python2-avocado-plugins-output-html \ + pdc-client python2-modulemd python-netifaces python2-dockerfile-parse + pip install PyYAML behave +# it should not fail anyhow + true +# pip install --upgrade avocado-framework avocado-framework-plugin-result-html + +} + +function installdeps(){ + DEPS="requirements.sh" + echo "INSTALL TEST DEPENDENCY IF ANY FILE: $DEPS exist" + if [ -e $DEPS ]; then + sh $DEPS + fi +} + +function runtests(){ + echo "RUN MAKE TEST" + make test +} + +function schedule(){ + set -x + local RESULTTOOLS=0 + local RESULT=0 + + inst_env + RESULTTOOLS=$(($RESULTTOOLS+$?)) + + installdeps + RESULT=$(($RESULT+$?)) + runtests + RESULT=$(($RESULT+$?)) + + if [ "$RESULTTOOLS" -ne 0 ]; then + return 2 + fi + + if [ "$RESULT" -eq 0 ]; then + # return code what means PASS + return 0 + else + # return code what means that some part of infra failed + return 125 + fi + set +x +} + +schedule +exit $?