diff --git a/Makefile b/Makefile index 6770d1cd15..7a2dd5829b 100644 --- a/Makefile +++ b/Makefile @@ -47,7 +47,7 @@ build-dev: # same as build, but sets up locust as well macbuild-dev: # same as macbuild, but sets up locust as well make macbuild - pip install locust + make build-locust build-locust: # just pip installs locust - may cause instability pip install locust @@ -102,7 +102,7 @@ help: @make info info: - @: $(info Printing some info on how to use make) + @: $(info Here are some 'make' options:) $(info - Use 'make aws-ip-ranges' to download latest ip range information. Invoked automatically when needed.) $(info - Use 'make build' (or 'make macbuild' on OSX Catalina) to build only application dependencies.) $(info - Use 'make build-dev' (or 'make macbuild-dev' on OSX Catalina) to build all dependencies, even locust.) diff --git a/bin/test b/bin/test index 59675651c4..745c421244 100755 --- a/bin/test +++ b/bin/test @@ -32,9 +32,12 @@ else echo " You may need '--setup-moto' if moto server doesn't start." fi +echo "TRAVIS_JOB_ID = ${TRAVIS_JOB_ID}" if [ "${TRAVIS_JOB_ID}" = "" -o "$(echo "$TRAVIS_JOB_ID" | cut -c '1-4')" = "4dn-" ]; then export TRAVIS_JOB_ID=4dn-`date "+%m%d%H%M%Y%S"` - echo "NOTE: Simulating TRAVIS_JOB_ID=$TRAVIS_JOB_ID" + echo "NOTE: Simulating fresh TRAVIS_JOB_ID=${TRAVIS_JOB_ID}" +else + echo "NOTE: Using existing TRAVIS_JOB_ID." fi python $(dirname $0)/test.py "$@" diff --git a/conftest.py b/conftest.py index 571f801334..8ff004ce07 100644 --- a/conftest.py +++ b/conftest.py @@ -1,4 +1,6 @@ import pytest +import tempfile + def pytest_addoption(parser): parser.addoption("--es", action="store", default="", dest='es', @@ -15,3 +17,10 @@ def remote_es(request): @pytest.fixture(scope='session') def aws_auth(request): return request.config.getoption("--aws-auth") + + +def pytest_configure(): + # This adjustment is important to set the default choice of temporary filenames to a nice short name + # because without it some of the filenames we generate end up being too long, and critical functionality + # ends up failing. Some socket-related filenames, for example, seem to have length limits. -kmp 5-Jun-2020 + tempfile.tempdir = '/tmp' diff --git a/deploy/travis_after_all.py b/deploy/travis_after_all.py index 7c380bdb0d..282a249109 100644 --- a/deploy/travis_after_all.py +++ b/deploy/travis_after_all.py @@ -121,7 +121,7 @@ def get_token(): log.error("Others Failed") os.environ[BUILD_AGGREGATE_STATUS] = "others_failed" else: - log.warn("Others Unknown") + log.warning("Others Unknown") os.environ[BUILD_AGGREGATE_STATUS] = "unknown" # since python is subprocess, env variables are exported back via file with open(".to_export_back", "w") as export_var: diff --git a/development.ini b/development.ini index ac6702c8a8..076b60bf2c 100644 --- a/development.ini +++ b/development.ini @@ -11,7 +11,6 @@ load_test_only = true create_tables = true testing = true postgresql.statement_timeout = 20 -# indexer.processes = mpindexer = true indexer = true elasticsearch.aws_auth = false @@ -24,6 +23,8 @@ pyramid.default_locale_name = en # most deployments use: "load_test_data = encoded.loadxl:load_test_data" load_test_data = encoded.loadxl:load_local_data encoded_version = 100.200.300 +snovault_version = 200.300.400 +utils_version = 300.400.500 eb_app_version = app-v-development-simulation [pipeline:debug] diff --git a/poetry.lock b/poetry.lock index deaae037c6..74056ecd4a 100644 --- a/poetry.lock +++ b/poetry.lock @@ -418,6 +418,23 @@ version = "1.4.1" [package.dependencies] apipkg = ">=1.4" +[[package]] +category = "dev" +description = "the modular source code checker: pep8 pyflakes and co" +name = "flake8" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7" +version = "3.8.3" + +[package.dependencies] +mccabe = ">=0.6.0,<0.7.0" +pycodestyle = ">=2.6.0a1,<2.7.0" +pyflakes = ">=2.2.0,<2.3.0" + +[package.dependencies.importlib-metadata] +python = "<3.8" +version = "*" + [[package]] category = "dev" description = "Plugin for nose or pytest that automatically reruns flaky tests." @@ -648,6 +665,14 @@ optional = false python-versions = "*" version = "0.23" +[[package]] +category = "dev" +description = "McCabe checker, plugin for flake8" +name = "mccabe" +optional = false +python-versions = "*" +version = "0.6.1" + [[package]] category = "dev" description = "Rolling backport of unittest.mock for all Pythons" @@ -832,6 +857,14 @@ version = "0.10.0" [package.dependencies] requests = "*" +[[package]] +category = "dev" +description = "Python style guide checker" +name = "pycodestyle" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +version = "2.6.0" + [[package]] category = "main" description = "C parser in Python" @@ -848,6 +881,14 @@ optional = false python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" version = "3.9.8" +[[package]] +category = "dev" +description = "passive checker of Python programs" +name = "pyflakes" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +version = "2.2.0" + [[package]] category = "main" description = "JSON Web Token implementation in Python" @@ -1277,10 +1318,11 @@ category = "main" description = "Database Abstraction Library" name = "sqlalchemy" optional = false -python-versions = "*" -version = "1.2.16" +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +version = "1.3.16" [package.extras] +mssql = ["pyodbc"] mssql_pymssql = ["pymssql"] mssql_pyodbc = ["pyodbc"] mysql = ["mysqlclient"] @@ -1553,6 +1595,7 @@ version = "0.12.0" [[package]] category = "main" description = "Backport of pathlib-compatible object wrapper for zip files" +marker = "python_version < \"3.8\"" name = "zipp" optional = false python-versions = ">=3.6" @@ -1611,7 +1654,7 @@ transaction = ">=1.6.0" test = ["zope.testing"] [metadata] -content-hash = "c98233211908adc53b5166ef693ff6c53db02b8b736c1e6610e93b3bc717b04b" +content-hash = "f480337cad8012fe1536aef687bdbce1f60139b9b9887feeed77fc59be1db485" python-versions = ">=3.6,<3.7" [metadata.files] @@ -1804,6 +1847,10 @@ execnet = [ {file = "execnet-1.4.1-py2.py3-none-any.whl", hash = "sha256:d2b909c7945832e1c19cfacd96e78da68bdadc656440cfc7dfe59b766744eb8c"}, {file = "execnet-1.4.1.tar.gz", hash = "sha256:f66dd4a7519725a1b7e14ad9ae7d3df8e09b2da88062386e08e941cafc0ef3e6"}, ] +flake8 = [ + {file = "flake8-3.8.3-py2.py3-none-any.whl", hash = "sha256:15e351d19611c887e482fb960eae4d44845013cc142d42896e9862f775d8cf5c"}, + {file = "flake8-3.8.3.tar.gz", hash = "sha256:f04b9fcbac03b0a3e58c0ab3a0ecc462e023a9faf046d57794184028123aa208"}, +] flaky = [ {file = "flaky-3.6.1-py2.py3-none-any.whl", hash = "sha256:5471615b32b0f8086573de924475b1f0d31e0e8655a089eb9c38a0fbff3f11aa"}, {file = "flaky-3.6.1.tar.gz", hash = "sha256:8cd5455bb00c677f787da424eaf8c4a58a922d0e97126d3085db5b279a98b698"}, @@ -1889,6 +1936,10 @@ loremipsum = [ markupsafe = [ {file = "MarkupSafe-0.23.tar.gz", hash = "sha256:a4ec1aff59b95a14b45eb2e23761a0179e98319da5a7eb76b56ea8cdc7b871c3"}, ] +mccabe = [ + {file = "mccabe-0.6.1-py2.py3-none-any.whl", hash = "sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42"}, + {file = "mccabe-0.6.1.tar.gz", hash = "sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f"}, +] mock = [ {file = "mock-4.0.2-py3-none-any.whl", hash = "sha256:3f9b2c0196c60d21838f307f5825a7b86b678cedc58ab9e50a8988187b4d81e0"}, {file = "mock-4.0.2.tar.gz", hash = "sha256:dd33eb70232b6118298d516bbcecd26704689c386594f0f3c4f13867b2c56f72"}, @@ -2054,6 +2105,10 @@ pyasn1 = [ pybrowserid = [ {file = "PyBrowserID-0.10.0.tar.gz", hash = "sha256:e540cfe54c2c3cfb8cc7e5c33fe19d9e7c2ad267063afe1f699a8e1b03d940d7"}, ] +pycodestyle = [ + {file = "pycodestyle-2.6.0-py2.py3-none-any.whl", hash = "sha256:2295e7b2f6b5bd100585ebcb1f616591b652db8a741695b3d8f5d28bdc934367"}, + {file = "pycodestyle-2.6.0.tar.gz", hash = "sha256:c58a7d2815e0e8d7972bf1803331fb0152f867bd89adf8a01dfd55085434192e"}, +] pycparser = [ {file = "pycparser-2.14.tar.gz", hash = "sha256:7959b4a74abdc27b312fed1c21e6caf9309ce0b29ea86b591fd2e99ecdf27f73"}, ] @@ -2088,6 +2143,10 @@ pycryptodome = [ {file = "pycryptodome-3.9.8-cp39-cp39-manylinux1_i686.whl", hash = "sha256:39ef9fb52d6ec7728fce1f1693cb99d60ce302aeebd59bcedea70ca3203fda60"}, {file = "pycryptodome-3.9.8-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:de6e1cd75677423ff64712c337521e62e3a7a4fc84caabbd93207752e831a85a"}, ] +pyflakes = [ + {file = "pyflakes-2.2.0-py2.py3-none-any.whl", hash = "sha256:0d94e0e05a19e57a99444b6ddcf9a6eb2e5c68d3ca1e98e90707af8152c90a92"}, + {file = "pyflakes-2.2.0.tar.gz", hash = "sha256:35b2d75ee967ea93b55750aa9edbbf72813e06a66ba54438df2cfac9e3c27fc8"}, +] pyjwt = [ {file = "PyJWT-1.5.3-py2.py3-none-any.whl", hash = "sha256:a4e5f1441e3ca7b382fd0c0b416777ced1f97c64ef0c33bfa39daf38505cfd2f"}, {file = "PyJWT-1.5.3.tar.gz", hash = "sha256:500be75b17a63f70072416843dc80c8821109030be824f4d14758f114978bae7"}, @@ -2280,7 +2339,25 @@ sparqlwrapper = [ {file = "SPARQLWrapper-1.7.6.zip", hash = "sha256:052c5b99ede756e227ff2db61a6664633a135d99fa4a8d728459480f5bc9a540"}, ] sqlalchemy = [ - {file = "SQLAlchemy-1.2.16.tar.gz", hash = "sha256:6af3ca2f7f00844465ab4fa78337d487b39e53f516c51328aed4ed3a719d4264"}, + {file = "SQLAlchemy-1.3.16-cp27-cp27m-macosx_10_13_x86_64.whl", hash = "sha256:8d8c21e9d4efef01351bf28513648ceb988031be4159745a7ad1b3e28c8ff68a"}, + {file = "SQLAlchemy-1.3.16-cp27-cp27m-win32.whl", hash = "sha256:083e383a1dca8384d0ea6378bd182d83c600ed4ff4ec8247d3b2442cf70db1ad"}, + {file = "SQLAlchemy-1.3.16-cp27-cp27m-win_amd64.whl", hash = "sha256:128f6179325f7597a46403dde0bf148478f868df44841348dfc8d158e00db1f9"}, + {file = "SQLAlchemy-1.3.16-cp36-cp36m-macosx_10_13_x86_64.whl", hash = "sha256:6056b671aeda3fc451382e52ab8a753c0d5f66ef2a5ccc8fa5ba7abd20988b4d"}, + {file = "SQLAlchemy-1.3.16-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:7025c639ce7e170db845e94006cf5f404e243e6fc00d6c86fa19e8ad8d411880"}, + {file = "SQLAlchemy-1.3.16-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:e18752cecaef61031252ca72031d4d6247b3212ebb84748fc5d1a0d2029c23ea"}, + {file = "SQLAlchemy-1.3.16-cp36-cp36m-win32.whl", hash = "sha256:0a690a6486658d03cc6a73536d46e796b6570ac1f8a7ec133f9e28c448b69828"}, + {file = "SQLAlchemy-1.3.16-cp36-cp36m-win_amd64.whl", hash = "sha256:d00b393f05dbd4ecd65c989b7f5a81110eae4baea7a6a4cdd94c20a908d1456e"}, + {file = "SQLAlchemy-1.3.16-cp37-cp37m-macosx_10_13_x86_64.whl", hash = "sha256:114b6ace30001f056e944cebd46daef38fdb41ebb98f5e5940241a03ed6cad43"}, + {file = "SQLAlchemy-1.3.16-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:13d48cd8b925b6893a4e59b2dfb3e59a5204fd8c98289aad353af78bd214db49"}, + {file = "SQLAlchemy-1.3.16-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:211a1ce7e825f7142121144bac76f53ac28b12172716a710f4bf3eab477e730b"}, + {file = "SQLAlchemy-1.3.16-cp37-cp37m-win32.whl", hash = "sha256:68d78cf4a9dfade2e6cf57c4be19f7b82ed66e67dacf93b32bb390c9bed12749"}, + {file = "SQLAlchemy-1.3.16-cp37-cp37m-win_amd64.whl", hash = "sha256:2dc57ee80b76813759cccd1a7affedf9c4dbe5b065a91fb6092c9d8151d66078"}, + {file = "SQLAlchemy-1.3.16-cp38-cp38-macosx_10_13_x86_64.whl", hash = "sha256:43078c7ec0457387c79b8d52fff90a7ad352ca4c7aa841c366238c3e2cf52fdf"}, + {file = "SQLAlchemy-1.3.16-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:bbb545da054e6297242a1bb1ba88e7a8ffb679f518258d66798ec712b82e4e07"}, + {file = "SQLAlchemy-1.3.16-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:5b1bf3c2c2dca738235ce08079783ef04f1a7fc5b21cf24adaae77f2da4e73c3"}, + {file = "SQLAlchemy-1.3.16-cp38-cp38-win32.whl", hash = "sha256:3e625e283eecc15aee5b1ef77203bfb542563fa4a9aa622c7643c7b55438ff49"}, + {file = "SQLAlchemy-1.3.16-cp38-cp38-win_amd64.whl", hash = "sha256:7d98e0785c4cd7ae30b4a451416db71f5724a1839025544b4edbd92e00b91f0f"}, + {file = "SQLAlchemy-1.3.16.tar.gz", hash = "sha256:7224e126c00b8178dfd227bc337ba5e754b197a3867d33b9f30dc0208f773d70"}, ] strict-rfc3339 = [ {file = "strict-rfc3339-0.7.tar.gz", hash = "sha256:5cad17bedfc3af57b399db0fed32771f18fc54bbd917e85546088607ac5e1277"}, diff --git a/pyproject.toml b/pyproject.toml index f792af5b14..a9c94c029e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,7 +1,7 @@ [tool.poetry] # Note: Various modules refer to this system as "encoded", not "cgap-portal". name = "encoded" -version = "2.1.10" +version = "2.1.11" description = "Clinical Genomics Analysis Platform" authors = ["4DN-DCIC Team "] license = "MIT" @@ -42,7 +42,7 @@ certifi = ">=2020.4.5.2" chardet = "3.0.4" colorama = "0.3.3" dcicpyvcf = "1.0.0" -dcicsnovault = ">=3.1.1,<4" +dcicsnovault = ">=3.1.4,<4" dcicutils = ">=0.31.1,<1" docutils = "0.12" elasticsearch = "5.5.3" @@ -91,7 +91,7 @@ rutter = ">=0.2,<1" s3transfer = "^0.2.0" simplejson = "^3.17.0" SPARQLWrapper = "1.7.6" -SQLAlchemy = "1.2.16" +SQLAlchemy = "1.3.16" structlog = ">=18.1.0,<20" submit4dn = "0.9.7" subprocess-middleware = ">=0.3,<1" @@ -103,7 +103,7 @@ translationstring = "1.3" uptime = ">=3.0.1,<4" urllib3 = "^1.24.3" venusian = "^1.2.0" -waitress = "1.2.0" +waitress = "1.2.0" # 1.4.3 had lots of problems, so pin this -kmp 18-May-2020 WebOb = "1.8.5" WebTest = "^2.0.21" wheel = "0.29.0" @@ -112,13 +112,13 @@ xlrd = "^1.0.0" xlwt = "1.2.0" "zope.deprecation" = "4.4.0" "zope.interface" = "4.6.0" -"zope.sqlalchemy" = "^1.2" +"zope.sqlalchemy" = "1.3" [tool.poetry.dev-dependencies] coverage = ">=5.2" codacy-coverage = ">=1.3.11" coveralls = ">=2.1.1" -# flake8 = "3.7.8" +flake8 = "^3.7.8" flaky = "3.6.1" # flask only for moto[server] flask = ">=1.1.1" diff --git a/pytest.ini b/pytest.ini index 6c83b5dd29..9adf37d891 100644 --- a/pytest.ini +++ b/pytest.ini @@ -9,6 +9,7 @@ markers = indexing: mark a test as an indexing test (deselect with '-m "not indexing"') ingestion: mark a test as an ingestion test (deselect with '-m "not ingestion"') performance: mark a test as a performance test (deselect with '-m "not performance"') + schema: mark a test as a schema-related test (deselect with '-m "not schema"') slow: mark a test as slow (deselect with '-m "not slow"') storage: mark a test as about storage (deselect with '-m "not storage"') working: mark a test as working (deselect with '-m "not working"') diff --git a/setup_eb.py b/setup_eb.py index b1fb0fc58e..b22b99df61 100644 --- a/setup_eb.py +++ b/setup_eb.py @@ -10,7 +10,7 @@ PYPROJECT_TOML = toml.decoder.load(os.path.join(ROOT_DIR, 'pyproject.toml')) POETRY_DATA = PYPROJECT_TOML['tool']['poetry'] -_CARET_MATCH = re.compile(r"[\^]([0-9]+)([.].*)$") +_CARET_MATCH = re.compile(r"[\^]([0-9]+)([.].*)?$") def fix_requirement(requirement): diff --git a/src/encoded/authentication.py b/src/encoded/authentication.py index cd403a2653..7b32a7c1a7 100644 --- a/src/encoded/authentication.py +++ b/src/encoded/authentication.py @@ -46,6 +46,20 @@ CRYPT_CONTEXT = __name__ + ':crypt_context' +JWT_ENCODING_ALGORITHM = 'HS256' + +# Might need to keep a list of previously used algorithms here, not just the one we use now. +# Decryption algorithm used to default to a long list, +# but more recent versions of jwt library say we should stop assuming that. +# +# In case it goes away, as far as I can tell, the default for decoding from their +# default_algorithms() method used to be: ['ES512', 'RS384', 'HS512', 'ES256', 'none', +# 'RS256', 'PS512', 'ES384', 'HS384', 'ES521', 'PS384', 'HS256', 'PS256', 'RS512'] +# -kmp 15-May-2020 + +# TODO: JWT_DECODING_ALGORITHMS = [JWT_ENCODING_ALGORITHM] + + def includeme(config): config.include('.edw_hash') setting_prefix = 'passlib.' @@ -222,6 +236,7 @@ def get_token_info(self, token, request): if auth0_client and auth0_secret: # leeway accounts for clock drift between us and auth0 payload = jwt.decode(token, b64decode(auth0_secret, '-_'), + # algorithms=JWT_DECODING_ALGORITHMS audience=auth0_client, leeway=30) if 'email' in payload and self.email_is_partners_or_hms(payload): request.set_property(lambda r: False, 'auth0_expired') @@ -427,7 +442,9 @@ def impersonate_user(context, request): 'aud': auth0_client, } - id_token = jwt.encode(jwt_contents, b64decode(auth0_secret, '-_'), algorithm='HS256') + id_token = jwt.encode(jwt_contents, b64decode(auth0_secret, '-_'), + algorithm=JWT_ENCODING_ALGORITHM + ) user_properties['id_token'] = id_token.decode('utf-8') return user_properties diff --git a/src/encoded/commands/add_date_created.py b/src/encoded/commands/add_date_created.py index 3a7b8d6f9b..065a89c22b 100644 --- a/src/encoded/commands/add_date_created.py +++ b/src/encoded/commands/add_date_created.py @@ -12,11 +12,17 @@ %(prog)s development.ini --app-name app """ -from future.utils import iteritems -from pyramid.traversal import resource_path + +import argparse import logging import pytz +from future.utils import iteritems +from pyramid import paster +from pyramid.traversal import resource_path +from webtest import TestApp + + pacific = pytz.timezone('US/Pacific') EPILOG = __doc__ @@ -25,8 +31,6 @@ def internal_app(configfile, app_name=None, username=None): - from webtest import TestApp - from pyramid import paster app = paster.get_app(configfile, app_name) if not username: username = 'IMPORT' @@ -86,8 +90,7 @@ def run(testapp, collections=None, exclude=None, dry_run=False): def main(): - import argparse - parser = argparse.ArgumentParser( # noqa - PyCharm wrongly thinks the formatter_class is specified wrong here. + parser = argparse.ArgumentParser( # noqa - PyCharm wrongly thinks the formatter_class is invalid description="Fix date_created", epilog=EPILOG, formatter_class=argparse.RawDescriptionHelpFormatter, ) diff --git a/src/encoded/commands/check_rendering.py b/src/encoded/commands/check_rendering.py index 82ddb9694b..6e68aee47d 100644 --- a/src/encoded/commands/check_rendering.py +++ b/src/encoded/commands/check_rendering.py @@ -10,10 +10,15 @@ %(prog)s development.ini --app-name app """ +import argparse import json import logging + from future.utils import itervalues +from pyramid import paster from pyramid.traversal import resource_path +from webtest import TestApp + EPILOG = __doc__ @@ -62,8 +67,6 @@ def run(testapp, collections=None): def internal_app(configfile, app_name=None, username='TEST', accept='text/html'): - from pyramid import paster - from webtest import TestApp app = paster.get_app(configfile, app_name) environ = { 'HTTP_ACCEPT': accept, @@ -73,8 +76,7 @@ def internal_app(configfile, app_name=None, username='TEST', accept='text/html') def main(): - import argparse - parser = argparse.ArgumentParser( # noqa - PyCharm wrongly thinks the formatter_class is specified wrong here. + parser = argparse.ArgumentParser( # noqa - PyCharm wrongly thinks the formatter_class is invalid description="Check rendering of items", epilog=EPILOG, formatter_class=argparse.RawDescriptionHelpFormatter, ) diff --git a/src/encoded/commands/clear_db_es_contents.py b/src/encoded/commands/clear_db_es_contents.py index 6bf4633a55..81dddb0233 100644 --- a/src/encoded/commands/clear_db_es_contents.py +++ b/src/encoded/commands/clear_db_es_contents.py @@ -31,7 +31,8 @@ def clear_db_tables(app): """ success = False session = app.registry[DBSESSION] - meta = MetaData(bind=session.connection(), reflect=True) + meta = MetaData(bind=session.connection()) + meta.reflect() connection = session.connection().connect() try: # truncate tables by only deleting contents @@ -103,7 +104,7 @@ def main(): # Loading app will have configured from config file. Reconfigure here: logging.getLogger('encoded').setLevel(logging.DEBUG) - parser = argparse.ArgumentParser( + parser = argparse.ArgumentParser( # noqa - PyCharm wrongly thinks the formatter_class is specified wrong here. description='Clear DB and ES Contents', epilog=EPILOG, formatter_class=argparse.RawDescriptionHelpFormatter, ) diff --git a/src/encoded/commands/configure_kibana_index.py b/src/encoded/commands/configure_kibana_index.py index 1dc594ca1f..7a0f3c50c7 100644 --- a/src/encoded/commands/configure_kibana_index.py +++ b/src/encoded/commands/configure_kibana_index.py @@ -16,7 +16,7 @@ def main(): # Loading app will have configured from config file. Reconfigure here: logging.getLogger('encoded').setLevel(logging.INFO) - parser = argparse.ArgumentParser( # noqa - PyCharm wrongly thinks the formatter_class is invalid + parser = argparse.ArgumentParser( # noqa - PyCharm wrongly thinks the formatter_class is specified wrong here. description="Configure Kibana Index", epilog=EPILOG, formatter_class=argparse.RawDescriptionHelpFormatter, ) diff --git a/src/encoded/commands/export_data.py b/src/encoded/commands/export_data.py index 776167c1bb..7069a11518 100644 --- a/src/encoded/commands/export_data.py +++ b/src/encoded/commands/export_data.py @@ -97,7 +97,7 @@ def perform_request(uri, attempt = 1): def main(): import argparse - parser = argparse.ArgumentParser( # noqa - PyCharm wrongly thinks the formatter_class is invalid + parser = argparse.ArgumentParser( # noqa - PyCharm wrongly thinks the formatter_class is specified wrong here. description="Export Data", epilog=EPILOG, formatter_class=argparse.RawDescriptionHelpFormatter, ) diff --git a/src/encoded/commands/extract_test_data.py b/src/encoded/commands/extract_test_data.py index 13b17bbe8a..f8abc333f5 100644 --- a/src/encoded/commands/extract_test_data.py +++ b/src/encoded/commands/extract_test_data.py @@ -1,7 +1,6 @@ import argparse import csv import loremipsum -import os import random import re import sys diff --git a/src/encoded/commands/import_data.py b/src/encoded/commands/import_data.py index ad20a36e07..9e4de995a4 100644 --- a/src/encoded/commands/import_data.py +++ b/src/encoded/commands/import_data.py @@ -20,18 +20,23 @@ --patch ../updates/ http://localhost:6543 """ +import argparse +import logging +import os.path + +from base64 import b64encode +from pyramid.compat import ascii_native_ +from pyramid import paster from webtest import TestApp +from wsgiproxy.proxies import ALLOWED_METHODS from urllib.parse import urlparse from .. import loadxl -import logging -import os.path + EPILOG = __doc__ def basic_auth(username, password): - from base64 import b64encode - from pyramid.compat import ascii_native_ return 'Basic ' + ascii_native_(b64encode(('%s:%s' % (username, password)).encode('utf-8'))) @@ -44,7 +49,6 @@ def remote_app(base, username='', password=''): def internal_app(configfile, app_name=None, username=''): - from pyramid import paster app = paster.get_app(configfile, app_name) if not username: username = 'IMPORT' @@ -67,12 +71,10 @@ def run(testapp, filename, docsdir, method, item_type, test=False): def main(): # https://github.com/gawel/WSGIProxy2/pull/3 (and change to WebTest) - from wsgiproxy.proxies import ALLOWED_METHODS if 'PATCH' not in ALLOWED_METHODS: ALLOWED_METHODS.append('PATCH') - import argparse - parser = argparse.ArgumentParser( # noqa - PyCharm wrongly thinks the formatter_class is invalid + parser = argparse.ArgumentParser( # noqa - PyCharm wrongly thinks the formatter_class is specified wrong here. description="Import data", epilog=EPILOG, formatter_class=argparse.RawDescriptionHelpFormatter, ) diff --git a/src/encoded/commands/jsonld_rdf.py b/src/encoded/commands/jsonld_rdf.py index fd338b396a..ae3a9f6168 100644 --- a/src/encoded/commands/jsonld_rdf.py +++ b/src/encoded/commands/jsonld_rdf.py @@ -4,9 +4,13 @@ %(prog)s "https://www.encodeproject.org/search/?type=organism&frame=object" """ -EPILOG = __doc__ +import argparse import rdflib +import sys + + +EPILOG = __doc__ def run(sources, output, parser='json-ld', serializer='xml', base=None): @@ -17,8 +21,6 @@ def run(sources, output, parser='json-ld', serializer='xml', base=None): def main(): - import argparse - import sys stdout = sys.stdout if sys.version_info.major > 2: stdout = stdout.buffer @@ -29,7 +31,7 @@ def main(): rdflib_serializers = sorted( p.name for p in rdflib.plugin.plugins(kind=rdflib.serializer.Serializer) if '/' not in p.name) - parser = argparse.ArgumentParser( # noqa - PyCharm wrongly thinks the formatter_class is invalid + parser = argparse.ArgumentParser( # noqa - PyCharm wrongly thinks the formatter_class is specified wrong here. description="Convert JSON-LD from source URLs to RDF", epilog=EPILOG, formatter_class=argparse.RawDescriptionHelpFormatter, ) diff --git a/src/encoded/commands/load_access_keys.py b/src/encoded/commands/load_access_keys.py index 4725562652..425ca47c26 100644 --- a/src/encoded/commands/load_access_keys.py +++ b/src/encoded/commands/load_access_keys.py @@ -77,7 +77,7 @@ def main(): # Loading app will have configured from config file. Reconfigure here: logging.getLogger('encoded').setLevel(logging.INFO) - parser = argparse.ArgumentParser( # noqa - PyCharm wrongly thinks the formatter_class is invalid + parser = argparse.ArgumentParser( # noqa - PyCharm wrongly thinks the formatter_class is specified wrong here. description="Load Access Keys", epilog=EPILOG, formatter_class=argparse.RawDescriptionHelpFormatter, ) diff --git a/src/encoded/commands/migrate_attachments_aws.py b/src/encoded/commands/migrate_attachments_aws.py index 92612ffd63..64259e298e 100644 --- a/src/encoded/commands/migrate_attachments_aws.py +++ b/src/encoded/commands/migrate_attachments_aws.py @@ -48,7 +48,7 @@ def run(app): def main(): import argparse - parser = argparse.ArgumentParser( # noqa - PyCharm wrongly thinks the formatter_class is invalid + parser = argparse.ArgumentParser( # noqa - PyCharm wrongly thinks the formatter_class is specified wrong here. description="Move attachment blobs to S3", epilog=EPILOG, formatter_class=argparse.RawDescriptionHelpFormatter, ) diff --git a/src/encoded/commands/migrate_dataset_type.py b/src/encoded/commands/migrate_dataset_type.py index d0ee980c46..e4883251b6 100644 --- a/src/encoded/commands/migrate_dataset_type.py +++ b/src/encoded/commands/migrate_dataset_type.py @@ -37,7 +37,7 @@ def run(app): def main(): import argparse - parser = argparse.ArgumentParser( # noqa - PyCharm wrongly thinks the formatter_class is invalid + parser = argparse.ArgumentParser( # noqa - PyCharm wrongly thinks the formatter_class is specified wrong here. description="Migrate dataset type", epilog=EPILOG, formatter_class=argparse.RawDescriptionHelpFormatter, ) diff --git a/src/encoded/commands/migrate_files_aws.py b/src/encoded/commands/migrate_files_aws.py index 1a8c97850b..8e6b658db9 100644 --- a/src/encoded/commands/migrate_files_aws.py +++ b/src/encoded/commands/migrate_files_aws.py @@ -41,7 +41,7 @@ def run(app, files): def main(): import argparse - parser = argparse.ArgumentParser( # noqa - PyCharm wrongly thinks the formatter_class is invalid + parser = argparse.ArgumentParser( # noqa - PyCharm wrongly thinks the formatter_class is specified wrong here. description="Migrate files to AWS", epilog=EPILOG, formatter_class=argparse.RawDescriptionHelpFormatter, ) diff --git a/src/encoded/commands/purge_item_type.py b/src/encoded/commands/purge_item_type.py index 2e3740c10d..a22468cb5e 100644 --- a/src/encoded/commands/purge_item_type.py +++ b/src/encoded/commands/purge_item_type.py @@ -62,7 +62,7 @@ def main(): """ Entry point for this command """ logging.basicConfig() - parser = argparse.ArgumentParser( # noqa - PyCharm wrongly thinks the formatter_class is invalid + parser = argparse.ArgumentParser( # noqa - PyCharm wrongly thinks the formatter_class is specified wrong here. description='Clear an item type out of metadata storage', epilog=EPILOG, formatter_class=argparse.RawDescriptionHelpFormatter diff --git a/src/encoded/commands/run_upgrader_on_inserts.py b/src/encoded/commands/run_upgrader_on_inserts.py index 4ccc04aac3..c1af99cbbe 100644 --- a/src/encoded/commands/run_upgrader_on_inserts.py +++ b/src/encoded/commands/run_upgrader_on_inserts.py @@ -21,7 +21,7 @@ def main(): # Loading app will have configured from config file. Reconfigure here: logging.getLogger('encoded').setLevel(logging.DEBUG) - parser = argparse.ArgumentParser( # noqa - PyCharm wrongly thinks the formatter_class is invalid + parser = argparse.ArgumentParser( # noqa - PyCharm wrongly thinks the formatter_class is specified wrong here. description="Run inserts through an upgrader", epilog=EPILOG, formatter_class=argparse.RawDescriptionHelpFormatter, ) diff --git a/src/encoded/commands/spreadsheet_to_json.py b/src/encoded/commands/spreadsheet_to_json.py index bd46009a42..3455f2930d 100644 --- a/src/encoded/commands/spreadsheet_to_json.py +++ b/src/encoded/commands/spreadsheet_to_json.py @@ -46,7 +46,7 @@ def convert(filename, sheetname=None, outputdir=None, skip_blanks=False): def main(): import argparse - parser = argparse.ArgumentParser( # noqa - PyCharm wrongly thinks the formatter_class is invalid + parser = argparse.ArgumentParser( # noqa - PyCharm wrongly thinks the formatter_class is specified wrong here. description="Convert spreadsheet to json list", epilog=EPILOG, formatter_class=argparse.RawDescriptionHelpFormatter, ) diff --git a/src/encoded/commands/update_inserts_from_server.py b/src/encoded/commands/update_inserts_from_server.py index 446d7257c8..6aced3f354 100644 --- a/src/encoded/commands/update_inserts_from_server.py +++ b/src/encoded/commands/update_inserts_from_server.py @@ -58,7 +58,7 @@ def main(): # Loading app will have configured from config file. Reconfigure here: logging.getLogger('encoded').setLevel(logging.DEBUG) - parser = argparse.ArgumentParser( # noqa - PyCharm wrongly thinks the formatter_class is invalid + parser = argparse.ArgumentParser( # noqa - PyCharm wrongly thinks the formatter_class is specified wrong here. description="Update Inserts", epilog=EPILOG, formatter_class=argparse.RawDescriptionHelpFormatter, ) diff --git a/src/encoded/commands/verify_item.py b/src/encoded/commands/verify_item.py index fb7bcccbdc..ee589849aa 100755 --- a/src/encoded/commands/verify_item.py +++ b/src/encoded/commands/verify_item.py @@ -30,7 +30,7 @@ def run(app, uuids=None): def main(): ''' Verifies and item against database / ES and checks embeds ''' - parser = argparse.ArgumentParser( # noqa - PyCharm wrongly thinks the formatter_class is invalid + parser = argparse.ArgumentParser( # noqa - PyCharm wrongly thinks the formatter_class is specified wrong here. description="Verifies and item against database / ES and checks embeds", epilog=EPILOG, formatter_class=argparse.RawDescriptionHelpFormatter, ) diff --git a/src/encoded/loadxl.py b/src/encoded/loadxl.py index 9a6157264e..7de9845717 100644 --- a/src/encoded/loadxl.py +++ b/src/encoded/loadxl.py @@ -1,10 +1,12 @@ # -*- coding: utf-8 -*- """Load collections and determine the order.""" -import mimetypes -import structlog -import magic + import json +import magic +import mimetypes import os +import structlog +import webtest from base64 import b64encode from past.builtins import basestring @@ -14,7 +16,6 @@ from pyramid.response import Response from pyramid.view import view_config from snovault.util import debug_log -from webtest import TestApp from .server_defaults import add_last_modified @@ -98,7 +99,7 @@ def load_data_view(context, request): post_only = request.json.get('post_only', False) app = get_app(config_uri, 'app') environ = {'HTTP_ACCEPT': 'application/json', 'REMOTE_USER': 'TEST'} - testapp = TestApp(app, environ) + testapp = webtest.TestApp(app, environ) # expected response request.response.status = 200 result = { @@ -472,7 +473,7 @@ def load_data(app, indir='inserts', docsdir=None, overwrite=False, 'HTTP_ACCEPT': 'application/json', 'REMOTE_USER': 'TEST', } - testapp = TestApp(app, environ) + testapp = webtest.TestApp(app, environ) # load master-inserts by default if indir != 'master-inserts' and use_master_inserts: master_inserts = resource_filename('encoded', 'tests/data/master-inserts/') diff --git a/src/encoded/memlimit.py b/src/encoded/memlimit.py index 3e0c915108..dca99351fd 100644 --- a/src/encoded/memlimit.py +++ b/src/encoded/memlimit.py @@ -32,7 +32,7 @@ def __init__(self, application, callback): def __call__(self, environ, start_response): try: result = self.__application(environ, start_response) - except: + except BaseException: self.__callback(environ) raise return Generator2(result, self.__callback, environ) diff --git a/src/encoded/renderers.py b/src/encoded/renderers.py index a07098003a..728b819461 100644 --- a/src/encoded/renderers.py +++ b/src/encoded/renderers.py @@ -314,6 +314,46 @@ def canonical_redirect(event): raise HTTPMovedPermanently(location=location, detail="Redirected from " + str(request.path_info)) +# Web browsers send an Accept request header for initial (e.g. non-AJAX) page requests +# which should contain 'text/html' +MIME_TYPES_SUPPORTED = ['text/html', 'application/json', 'application/ld+json'] +MIME_TYPE_DEFAULT = 'application/json' +MIME_TYPE_TRIAGE_MODE = 'legacy' # 'modern' # if this doesn't work, fall back to 'legacy' + + +def best_mime_type(request, mode=MIME_TYPE_TRIAGE_MODE): + """ + Given a request, tries to figure out the best kind of MIME type to use in response + based on what kinds of responses we support and what was requested. + + In the case we can't comply, we just use application/json whether or not that's what was asked for. + """ + if mode == 'legacy': + # See: https://tedboy.github.io/flask/generated/generated/werkzeug.Accept.best_match.html#werkzeug-accept-best-match + # Note that this is now deprecated, or will be. The message is oddly worded ("will be deprecated") + # that presumably means "will be removed". Deprecation IS the warning of actual action, not the action itself. + # "This is currently maintained for backward compatibility, and will be deprecated in the future. + # AcceptValidHeader.best_match() uses its own algorithm (one not specified in RFC 7231) to determine + # what is a best match. The algorithm has many issues, and does not conform to RFC 7231." + # Anyway, we were getting this warning during testing: + # DeprecationWarning: The behavior of AcceptValidHeader.best_match is currently + # being maintained for backward compatibility, but it will be deprecated in the future, + # as it does not conform to the RFC. + # TODO: Once the modern replacement is shown to work, we should remove this conditional branch. + return request.accept.best_match(MIME_TYPES_SUPPORTED, MIME_TYPE_DEFAULT) + else: + options = request.accept.acceptable_offers(MIME_TYPES_SUPPORTED) + if not options: + # TODO: Probably we should return a 406 response by raising HTTPNotAcceptable if + # no acceptable types are available. (Certainly returning JSON in this case is + # not some kind of friendly help toa naive user with an old browser.) + # Ref: https://developer.mozilla.org/en-US/docs/Web/HTTP/Status + return MIME_TYPE_DEFAULT + else: + mime_type, score = options[0] + return mime_type + + @lru_cache(maxsize=16) def should_transform(request, response): ''' @@ -345,6 +385,7 @@ def should_transform(request, response): # Web browsers send an Accept request header for initial (e.g. non-AJAX) page requests # which should contain 'text/html' # See: https://tedboy.github.io/flask/generated/generated/werkzeug.Accept.best_match.html#werkzeug-accept-best-match + # TODO: Maybe use mime_type = best_mime_type(request) instead. mime_type = request.accept.best_match(['text/html', 'application/json', 'application/ld+json'], 'application/json') format = mime_type.split('/', 1)[1] # Will be 1 of 'html', 'json', 'json-ld' diff --git a/src/encoded/tests/conftest.py b/src/encoded/tests/conftest.py index 08573b6f34..ecd7562056 100644 --- a/src/encoded/tests/conftest.py +++ b/src/encoded/tests/conftest.py @@ -4,7 +4,6 @@ ''' import os import logging -import pkg_resources import pytest import webtest diff --git a/src/encoded/tests/test_access_key.py b/src/encoded/tests/test_access_key.py index 4126604583..682ec51422 100644 --- a/src/encoded/tests/test_access_key.py +++ b/src/encoded/tests/test_access_key.py @@ -151,12 +151,12 @@ def test_access_key_user_disable_login(anontestapp, no_login_access_key): def test_access_key_edit(anontestapp, access_key): headers = {'Authorization': auth_header(access_key)} - NEW_DESCRIPTION = 'new description' - properties = {'description': NEW_DESCRIPTION} + new_description = 'new description' + properties = {'description': new_description} anontestapp.put_json(access_key['@id'], properties, headers=headers) res = anontestapp.get(access_key['@id'], properties, headers=headers) - assert res.json['description'] == NEW_DESCRIPTION + assert res.json['description'] == new_description @pytest.mark.parametrize('frame', ['', 'raw', 'object', 'embedded', 'page']) diff --git a/src/encoded/tests/test_aggregation.py b/src/encoded/tests/test_aggregation.py index 52d134e6d2..a5d97a35f6 100644 --- a/src/encoded/tests/test_aggregation.py +++ b/src/encoded/tests/test_aggregation.py @@ -1,9 +1,14 @@ -import pytest -from .workbook_fixtures import workbook +# import pytest +# from dcicutils.qa_utils import notice_pytest_fixtures +# from .workbook_fixtures import app_settings, workbook # XXX: All need refactor -pytestmark = [pytest.mark.working, pytest.mark.indexing] - +# pytestmark = [pytest.mark.working, pytest.mark.indexing] +# +# +# notice_pytest_fixtures(app_settings, workbook) +# +# # def test_aggregation_facet(workbook, testapp): # res = testapp.get('/search/?type=ExperimentSetReplicate').json # badge_facets = [facet for facet in res['facets'] if facet['title'] in diff --git a/src/encoded/tests/test_auth0.py b/src/encoded/tests/test_auth0.py index 41fcbf5e3b..d69129c887 100644 --- a/src/encoded/tests/test_auth0.py +++ b/src/encoded/tests/test_auth0.py @@ -25,7 +25,7 @@ def auth0_access_token(): except Exception as e: pytest.skip("Error retrieving auth0 test user access token: %r" % e) - data = res.json() + data = res.json() # noqa - PyCharm doesn't know pytest.skip will unconditionally raise, so fears 'res' undefined if 'id_token' not in data: pytest.skip("Missing 'id_token' in auth0 test user access token: %r" % data) @@ -49,7 +49,7 @@ def auth0_access_token_no_email(): except Exception as e: pytest.skip("Error retrieving auth0 test user access token: %r" % e) - data = res.json() + data = res.json() # noqa - PyCharm doesn't know pytest.skip will unconditionally raise, so fears 'res' undefined if 'id_token' not in data: pytest.skip("Missing 'id_token' in auth0 test user access token: %r" % data) @@ -68,15 +68,19 @@ def auth0_4dn_user_profile(): @pytest.fixture(scope='session') def headers(auth0_access_token): - return {'Accept': 'application/json', 'Content-Type': 'application/json', 'Authorization': 'Bearer ' + - auth0_access_token} + return { + 'Accept': 'application/json', + 'Content-Type': 'application/json', + 'Authorization': 'Bearer ' + auth0_access_token + } @pytest.fixture(scope='session') def fake_request(headers): class FakeRequest(object): - '''TODO: See if could/should use or subclass from DummyRequest''' + """Mocked Request class""" + # TODO: See if could/should use or subclass from DummyRequest def __init__(self): self.headers = headers self.cookies = {} @@ -115,10 +119,10 @@ def test_login_token_no_email(anontestapp, auth0_access_token_no_email, headers) def test_invalid_login(anontestapp, headers): - headers1=headers.copy() + headers1 = headers.copy() headers1['Authorization'] = 'Bearer invalid token' # Log in without headers - res = anontestapp.post_json('/login', headers=headers1, status=401) + anontestapp.post_json('/login', headers=headers1, status=401) # TODO (C4-173): This is intentionally disabled for now. It requires additional security that we need to reconsider. @@ -170,16 +174,12 @@ def test_404_keeps_auth_info(testapp, anontestapp, headers, # X-User-Info header is only set for text/html -formatted Responses. page_view_request_headers.update({ - "Accept" : "text/html", - "Content-Type" : "text/html", - "Cookie" : "jwtToken=" + headers['Authorization'][7:] + "Accept": "text/html", + "Content-Type": "text/html", + "Cookie": "jwtToken=" + headers['Authorization'][7:] }) # Log in - res = anontestapp.get( - '/not_found_url', - headers=page_view_request_headers, - status = 404 - ) + res = anontestapp.get('/not_found_url', headers=page_view_request_headers, status=404) assert str(res.status_int) == "404" try: @@ -221,7 +221,7 @@ def test_404_keeps_auth_info(testapp, anontestapp, headers, def test_jwt_is_stateless_so_doesnt_actually_need_login(testapp, anontestapp, auth0_4dn_user_token, - auth0_4dn_user_profile, headers): + auth0_4dn_user_profile, headers): # Create a user with the proper email url = '/users/' email = auth0_4dn_user_profile['email'] @@ -237,7 +237,7 @@ def test_jwt_is_stateless_so_doesnt_actually_need_login(testapp, anontestapp, au def test_jwt_works_without_keys(testapp, anontestapp, auth0_4dn_user_token, - auth0_4dn_user_profile, headers): + auth0_4dn_user_profile, headers): # Create a user with the proper email url = '/users/' @@ -249,7 +249,7 @@ def test_jwt_works_without_keys(testapp, anontestapp, auth0_4dn_user_token, } testapp.post_json(url, item, status=201) - #clear out keys + # clear out keys old_key = anontestapp.app.registry.settings['auth0.secret'] anontestapp.app.registry.settings['auth0.secret'] = None res2 = anontestapp.get('/users/', headers=headers, status=200) @@ -258,7 +258,6 @@ def test_jwt_works_without_keys(testapp, anontestapp, auth0_4dn_user_token, assert '@id' in res2.json['@graph'][0] - def test_impersonate_invalid_user(anontestapp, admin): anontestapp.post_json( '/impersonate-user', {'userid': 'not@here.usr'}, @@ -276,13 +275,15 @@ def test_impersonate_user(anontestapp, admin, submitter): extra_environ={'REMOTE_USER': str(admin['email'])}) - #we should get back a new token + # we should get back a new token assert 'user_actions' in res.json assert 'id_token' in res.json - # and we should be able to use that token as the new user - headers = {'Accept': 'applicatin/json', 'Content-Type': 'application/json', 'Authorization': 'Bearer ' + - res.json['id_token']} + headers = { + 'Accept': 'application/json', + 'Content-Type': 'application/json', + 'Authorization': 'Bearer ' + res.json['id_token'] + } res2 = anontestapp.get('/users/', headers=headers) assert '@id' in res2.json['@graph'][0] diff --git a/src/encoded/tests/test_embedding.py b/src/encoded/tests/test_embedding.py index d2c2401534..525a85382e 100644 --- a/src/encoded/tests/test_embedding.py +++ b/src/encoded/tests/test_embedding.py @@ -143,10 +143,10 @@ def test_fictitous_embed(registry): assert error is None -def test_get_item_if_you_can(content, dummy_request, threadlocals): +def test_get_item_or_none(content, dummy_request, threadlocals): """ Not necessarily the best place for this test, but test that the - `get_item_if_you_can` function works with multiple inputs + `get_item_or_none` function works with multiple inputs """ used_item = sources[0] # all of these should get the full item diff --git a/src/encoded/tests/test_generate_item_from_owl.py b/src/encoded/tests/test_generate_item_from_owl.py index 229276a330..3df658f7b7 100644 --- a/src/encoded/tests/test_generate_item_from_owl.py +++ b/src/encoded/tests/test_generate_item_from_owl.py @@ -1,4 +1,3 @@ -import contextlib import copy import io import json @@ -28,9 +27,10 @@ def test_gifo_get_args_defaults(): assert args.full is False -@pytest.fixture -def owler(mocker): - return mocker.patch.object(gifo, 'Owler') +@pytest.yield_fixture +def owler(): + with mock.patch.object(gifo, 'Owler') as mocked: + yield mocked @pytest.fixture @@ -75,11 +75,14 @@ def mkd_class(): class MockS3UtilsForAccessKeys: + def __init__(self, env): assert env == TEST_KEYS_ENV + def get_access_keys(self): return json.loads(TEST_KEYS_STORED) + def test_connect2server_w_env(connection): non_dictionary = object() with mock.patch.object(s3_utils, "s3Utils", MockS3UtilsForAccessKeys): @@ -120,11 +123,11 @@ def test_prompt_check_for_output_options_w_load_y_and_no_file(monkeypatch, mock_ assert loadit -def test_prompt_check_for_output_options_w_load_y_and_no_file_but_wantit(mocker, mock_logger): - mocker.patch('builtins.input', side_effect=['y', 'n']) - ofile, loadit = gifo.prompt_check_for_output_options(True, None, 'Disorder', 'test_server', mock_logger) - assert ofile == 'Disorder.json' - assert loadit +def test_prompt_check_for_output_options_w_load_y_and_no_file_but_wantit(mock_logger): + with mock.patch('builtins.input', side_effect=['y', 'n']): + ofile, loadit = gifo.prompt_check_for_output_options(True, None, 'Disorder', 'test_server', mock_logger) + assert ofile == 'Disorder.json' + assert loadit def test_prompt_check_for_output_options_wo_load_nofile(mock_logger): @@ -149,44 +152,44 @@ def delobs_disorder_gen(delobs_disorders): return iter(delobs_disorders) -def test_get_existing_items(mocker, connection, rel_disorders, delobs_disorders): - ''' Currently this test passes but is not really a unit test and also does not - quite mock the correct things so should be refactored - ''' +def test_get_existing_items(connection, rel_disorders, delobs_disorders): + # TODO: Currently this test passes but is not really a unit test and also does not + # quite mock the correct things so should be refactored disorder_ids = [d.get('disorder_id') for d in rel_disorders + delobs_disorders] - mocker.patch('encoded.commands.generate_items_from_owl.search_metadata', side_effect=[rel_disorders, delobs_disorders]) - dbdiseases = gifo.get_existing_items(connection, 'Disorder') - assert len(dbdiseases) == len(rel_disorders) + len(delobs_disorders) - assert all([d in dbdiseases for d in disorder_ids]) + with mock.patch('encoded.commands.generate_items_from_owl.search_metadata', side_effect=[rel_disorders, delobs_disorders]): + dbdiseases = gifo.get_existing_items(connection, 'Disorder') + assert len(dbdiseases) == len(rel_disorders) + len(delobs_disorders) + assert all([d in dbdiseases for d in disorder_ids]) -def test_get_existing_items_from_db_w_deleted(mocker, connection, disorder_gen, delobs_disorder_gen, rel_disorders, delobs_disorders): +def test_get_existing_items_from_db_w_deleted(connection, disorder_gen, delobs_disorder_gen, rel_disorders, delobs_disorders): disorder_ids = [d.get('disorder_id') for d in rel_disorders + delobs_disorders] - mocker.patch('encoded.commands.generate_items_from_owl.search_metadata', side_effect=[disorder_gen, delobs_disorder_gen]) - dbdiseases = list(gifo.get_existing_items_from_db(connection, 'Disorder')) - assert len(dbdiseases) == len(rel_disorders) + len(delobs_disorders) - assert all([dis.get('disorder_id') in disorder_ids for dis in dbdiseases]) + with mock.patch('encoded.commands.generate_items_from_owl.search_metadata', side_effect=[disorder_gen, delobs_disorder_gen]): + dbdiseases = list(gifo.get_existing_items_from_db(connection, 'Disorder')) + assert len(dbdiseases) == len(rel_disorders) + len(delobs_disorders) + assert all([dis.get('disorder_id') in disorder_ids for dis in dbdiseases]) -def test_get_existing_items_from_db_wo_deleted(mocker, connection, disorder_gen, rel_disorders): +def test_get_existing_items_from_db_wo_deleted(connection, disorder_gen, rel_disorders): disorder_ids = [d.get('disorder_id') for d in rel_disorders] - mocker.patch('encoded.commands.generate_items_from_owl.search_metadata', side_effect=[disorder_gen]) - dbdiseases = list(gifo.get_existing_items_from_db(connection, 'Disorder', include_invisible=False)) - assert len(dbdiseases) == len(rel_disorders) - assert all([dis.get('disorder_id') in disorder_ids for dis in dbdiseases]) + with mock.patch('encoded.commands.generate_items_from_owl.search_metadata', side_effect=[disorder_gen]): + dbdiseases = list(gifo.get_existing_items_from_db(connection, 'Disorder', include_invisible=False)) + assert len(dbdiseases) == len(rel_disorders) + assert all([dis.get('disorder_id') in disorder_ids for dis in dbdiseases]) -def test_get_existing_items_from_db_w_duplicates(mocker, connection, rel_disorders): - ''' The tested function is agnostic to duplicates so testing to make sure - if duplicates are present they are returned - ''' +def test_get_existing_items_from_db_w_duplicates(connection, rel_disorders): + """ + Tests to make sure that if duplicates are present, they are returned + because the tested function is agnostic to duplicates. + """ rel_disorders.append(rel_disorders[0]) # add the duplicate item dgen = iter(rel_disorders) disorder_ids = [d.get('disorder_id') for d in rel_disorders] - mocker.patch('encoded.commands.generate_items_from_owl.search_metadata', side_effect=[dgen]) - dbdiseases = list(gifo.get_existing_items_from_db(connection, 'Disorder', include_invisible=False)) - assert len(dbdiseases) == len(rel_disorders) - assert all([dis.get('disorder_id') in disorder_ids for dis in dbdiseases]) + with mock.patch('encoded.commands.generate_items_from_owl.search_metadata', side_effect=[dgen]): + dbdiseases = list(gifo.get_existing_items_from_db(connection, 'Disorder', include_invisible=False)) + assert len(dbdiseases) == len(rel_disorders) + assert all([dis.get('disorder_id') in disorder_ids for dis in dbdiseases]) def test_create_dict_keyed_by_field_from_items_valid_field_for_all(rel_disorders): @@ -334,11 +337,11 @@ def test_is_deprecated_not_deprecated(uberon_owler5): assert not gifo._is_deprecated(class_, uberon_owler5) -def test_create_term_dict(mocker, mkd_class, uberon_owler5): - mocker.patch('encoded.commands.generate_items_from_owl.get_term_name_from_rdf', - return_value='Multicystic kidney dysplasia') - term = gifo.create_term_dict(mkd_class, 'HP:0000003', uberon_owler5, 'Phenotype') - assert term == {'hpo_id': 'HP:0000003', 'hpo_url': 'http://purl.obolibrary.org/obo/HP_0000003', 'phenotype_name': 'Multicystic kidney dysplasia'} +def test_create_term_dict(mkd_class, uberon_owler5): + with mock.patch('encoded.commands.generate_items_from_owl.get_term_name_from_rdf', + return_value='Multicystic kidney dysplasia'): + term = gifo.create_term_dict(mkd_class, 'HP:0000003', uberon_owler5, 'Phenotype') + assert term == {'hpo_id': 'HP:0000003', 'hpo_url': 'http://purl.obolibrary.org/obo/HP_0000003', 'phenotype_name': 'Multicystic kidney dysplasia'} def test_process_parents(uberon_owler4): @@ -366,26 +369,26 @@ def simple_terms(): return OrderedDict(sorted(terms.items(), key=lambda t: t[0])) -def test_add_additional_term_info(mocker, simple_terms): +def test_add_additional_term_info(simple_terms): val_lists = [[], ['val1'], ['val1', 'val2']] fields = ['definition', 'synonyms', 'dbxrefs', 'alternative_ids'] - mocker.patch('encoded.commands.generate_items_from_owl.convert2URIRef', return_value='blah') - mocker.patch('encoded.commands.generate_items_from_owl.get_synonyms', side_effect=val_lists) - mocker.patch('encoded.commands.generate_items_from_owl.get_definitions', side_effect=val_lists) - mocker.patch('encoded.commands.generate_items_from_owl.get_dbxrefs', side_effect=val_lists) - mocker.patch('encoded.commands.generate_items_from_owl.get_alternative_ids', side_effect=val_lists) - result = gifo.add_additional_term_info(simple_terms, 'data', 'synterms', 'defterms', 'Phenotype') - for tid, term in result.items(): - for f in fields: - if tid == 't1': - assert f not in term - else: - if f == 'definition': # only one added - assert term[f] == 'val1' - elif tid == 't2': - assert term[f] == val_lists[1] - else: - assert term[f] == val_lists[2] + with mock.patch('encoded.commands.generate_items_from_owl.convert2URIRef', return_value='blah'): + with mock.patch('encoded.commands.generate_items_from_owl.get_synonyms', side_effect=val_lists): + with mock.patch('encoded.commands.generate_items_from_owl.get_definitions', side_effect=val_lists): + with mock.patch('encoded.commands.generate_items_from_owl.get_dbxrefs', side_effect=val_lists): + with mock.patch('encoded.commands.generate_items_from_owl.get_alternative_ids', side_effect=val_lists): + result = gifo.add_additional_term_info(simple_terms, 'data', 'synterms', 'defterms', 'Phenotype') + for tid, term in result.items(): + for f in fields: + if tid == 't1': + assert f not in term + else: + if f == 'definition': # only one added + assert term[f] == 'val1' + elif tid == 't2': + assert term[f] == val_lists[1] + else: + assert term[f] == val_lists[2] @pytest.fixture @@ -398,24 +401,24 @@ def returned_synonyms(): return copies -def test_get_syn_def_dbxref_altid(mocker, owler, returned_synonyms): - mocker.patch('encoded.commands.generate_items_from_owl.getObjectLiteralsOfType', - side_effect=returned_synonyms) - checks = ['test_val1', 'test_val2'] - class_ = 'test_class' - terms = ['1'] - for i in range(int(len(returned_synonyms) / 4)): - synonyms = gifo.get_synonyms(class_, owler, terms) - definitions = gifo.get_definitions(class_, owler, terms) - dbxrefs = gifo.get_dbxrefs(class_, owler) - altids = gifo.get_alternative_ids(class_, owler) - assert synonyms == definitions == dbxrefs == altids - if i == 0: - assert not synonyms - else: - assert len(synonyms) == i - for syn in synonyms: - assert syn in checks +def test_get_syn_def_dbxref_altid(owler, returned_synonyms): + with mock.patch('encoded.commands.generate_items_from_owl.getObjectLiteralsOfType', + side_effect=returned_synonyms): + checks = ['test_val1', 'test_val2'] + class_ = 'test_class' + terms = ['1'] + for i in range(int(len(returned_synonyms) / 4)): + synonyms = gifo.get_synonyms(class_, owler, terms) + definitions = gifo.get_definitions(class_, owler, terms) + dbxrefs = gifo.get_dbxrefs(class_, owler) + altids = gifo.get_alternative_ids(class_, owler) + assert synonyms == definitions == dbxrefs == altids + if i == 0: + assert not synonyms + else: + assert len(synonyms) == i + for syn in synonyms: + assert syn in checks @pytest.fixture @@ -578,9 +581,9 @@ def test_get_raw_form_raw_and_embedded(raw_item_dict, embedded_item_dict): def test_get_raw_form_does_not_convert_object(raw_item_dict, object_item_dict): - ''' This may not be necessary but shows object frame of item is not + """ This may not be necessary but shows object frame of item is not converted - and is in fact unchanged - ''' + """ rawresult = gifo.get_raw_form(raw_item_dict) objresult = gifo.get_raw_form(object_item_dict) assert rawresult != objresult @@ -594,8 +597,8 @@ def test_compare_terms_no_diff(raw_item_dict): def test_compare_terms_extra_field_in_t2(raw_item_dict): - ''' should ignore any extra fields in t2 - ''' + """ should ignore any extra fields in t2 + """ t1 = raw_item_dict t2 = raw_item_dict.copy() t2['extra_field'] = 'extra_val' @@ -603,8 +606,8 @@ def test_compare_terms_extra_field_in_t2(raw_item_dict): def test_compare_terms_extra_fields_in_t1(raw_item_dict): - ''' should ignore any extra fields in t2 - ''' + """ should ignore any extra fields in t2 + """ extra_fields = { 'extra_field1': 'extra_val1', 'extra_list_field': ['v1', 'v2', 'v3'], @@ -632,27 +635,27 @@ def test_check_for_fields_to_keep(raw_item_dict): assert result == fields_to_keep -def test_id_fields2patch_unchanged(mocker, raw_item_dict): - mocker.patch('encoded.commands.generate_items_from_owl.get_raw_form', return_value=raw_item_dict) - mocker.patch('encoded.commands.generate_items_from_owl.compare_terms', return_value=None) - assert not gifo.id_fields2patch(raw_item_dict, raw_item_dict, True) +def test_id_fields2patch_unchanged(raw_item_dict): + with mock.patch('encoded.commands.generate_items_from_owl.get_raw_form', return_value=raw_item_dict): + with mock.patch('encoded.commands.generate_items_from_owl.compare_terms', return_value=None): + assert not gifo.id_fields2patch(raw_item_dict, raw_item_dict, True) -def test_id_fields2patch_keep_term(mocker, raw_item_dict): - ''' case when remove unchanged (rm_unch) param is False just returns term - ''' - mocker.patch('encoded.commands.generate_items_from_owl.get_raw_form', return_value=raw_item_dict) - mocker.patch('encoded.commands.generate_items_from_owl.compare_terms', return_value=None) - assert gifo.id_fields2patch(raw_item_dict, raw_item_dict, False) == raw_item_dict +def test_id_fields2patch_keep_term(raw_item_dict): + """ case when remove unchanged (rm_unch) param is False just returns term + """ + with mock.patch('encoded.commands.generate_items_from_owl.get_raw_form', return_value=raw_item_dict): + with mock.patch('encoded.commands.generate_items_from_owl.compare_terms', return_value=None): + assert gifo.id_fields2patch(raw_item_dict, raw_item_dict, False) == raw_item_dict -def test_id_fields2patch_find_some_fields(mocker, raw_item_dict): - ''' case when remove unchanged (rm_unch) param is False just returns term - ''' +def test_id_fields2patch_find_some_fields(raw_item_dict): + """ case when remove unchanged (rm_unch) param is False just returns term + """ patch = {'uuid': 'uuid1', 'field1': 'val1', 'field2': ['a', 'b']} - mocker.patch('encoded.commands.generate_items_from_owl.get_raw_form', return_value=raw_item_dict) - mocker.patch('encoded.commands.generate_items_from_owl.compare_terms', return_value=patch) - assert gifo.id_fields2patch(raw_item_dict, raw_item_dict, True) == patch + with mock.patch('encoded.commands.generate_items_from_owl.get_raw_form', return_value=raw_item_dict): + with mock.patch('encoded.commands.generate_items_from_owl.compare_terms', return_value=patch): + assert gifo.id_fields2patch(raw_item_dict, raw_item_dict, True) == patch @pytest.fixture @@ -692,16 +695,16 @@ def test_get_uuids_for_linked_one_missing(term_w_slims_and_parents, mock_logger, assert out == 'WARNING: HP0000002 - MISSING FROM IDMAP\n' -def test_identify_item_updates_no_changes(mocker, terms, mock_logger): +def test_identify_item_updates_no_changes(terms, mock_logger): dbterms = terms.copy() for i, tid in enumerate(dbterms.keys()): dbterms[tid].update({'uuid': 'uuid' + str(i + 1)}) - mocker.patch('encoded.commands.generate_items_from_owl._get_uuids_for_linked', return_value={}) - mocker.patch('encoded.commands.generate_items_from_owl.id_fields2patch', return_value=None) - assert not gifo.identify_item_updates(terms, dbterms, 'Phenotype', logger=mock_logger) + with mock.patch('encoded.commands.generate_items_from_owl._get_uuids_for_linked', return_value={}): + with mock.patch('encoded.commands.generate_items_from_owl.id_fields2patch', return_value=None): + assert not gifo.identify_item_updates(terms, dbterms, 'Phenotype', logger=mock_logger) -def test_identify_item_updates_w_new_term(mocker, terms, mock_logger): +def test_identify_item_updates_w_new_term(terms, mock_logger): dbterms = copy.deepcopy(terms) for i, tid in enumerate(dbterms.keys()): dbterms[tid].update({'uuid': 'uuid' + str(i + 1)}) @@ -709,15 +712,15 @@ def test_identify_item_updates_w_new_term(mocker, terms, mock_logger): terms['hp:11'] = new_term side_effect = [None] * 9 side_effect.append(new_term) - mocker.patch('encoded.commands.generate_items_from_owl.uuid4', return_value='uuid11') - mocker.patch('encoded.commands.generate_items_from_owl._get_uuids_for_linked', return_value={}) - mocker.patch('encoded.commands.generate_items_from_owl.id_fields2patch', side_effect=side_effect) - to_update = gifo.identify_item_updates(terms, dbterms, 'Phenotype', logger=mock_logger) - new_term.update({'uuid': 'uuid11'}) - assert to_update[0] == new_term + with mock.patch('encoded.commands.generate_items_from_owl.uuid4', return_value='uuid11'): + with mock.patch('encoded.commands.generate_items_from_owl._get_uuids_for_linked', return_value={}): + with mock.patch('encoded.commands.generate_items_from_owl.id_fields2patch', side_effect=side_effect): + to_update = gifo.identify_item_updates(terms, dbterms, 'Phenotype', logger=mock_logger) + new_term.update({'uuid': 'uuid11'}) + assert to_update[0] == new_term -def test_identify_item_updates_w_patch_term(mocker, terms, mock_logger): +def test_identify_item_updates_w_patch_term(terms, mock_logger): dbterms = copy.deepcopy(terms) added_field = {'definition': 'this is what it means'} for i, tid in enumerate(dbterms.keys()): @@ -729,16 +732,16 @@ def test_identify_item_updates_w_patch_term(mocker, terms, mock_logger): se = copy.deepcopy(added_field) se.update({'uuid': 'uuid{}'.format(n)}) side_effect.append(se) - mocker.patch('encoded.commands.generate_items_from_owl._get_uuids_for_linked', return_value={}) - mocker.patch('encoded.commands.generate_items_from_owl.id_fields2patch', side_effect=side_effect) - to_update = gifo.identify_item_updates(terms, dbterms, 'Phenotype', logger=mock_logger) - assert len(to_update) == 2 - for upd in to_update: - assert 'uuid' in upd - assert upd['definition'] == 'this is what it means' + with mock.patch('encoded.commands.generate_items_from_owl._get_uuids_for_linked', return_value={}): + with mock.patch('encoded.commands.generate_items_from_owl.id_fields2patch', side_effect=side_effect): + to_update = gifo.identify_item_updates(terms, dbterms, 'Phenotype', logger=mock_logger) + assert len(to_update) == 2 + for upd in to_update: + assert 'uuid' in upd + assert upd['definition'] == 'this is what it means' -def test_identify_item_updates_set_obsolete_true_obsolete(mocker, terms, mock_logger): +def test_identify_item_updates_set_obsolete_true_obsolete(terms, mock_logger): """ if set_obsolete is true (the default) then the extra dbterm should be added to patches as a term to set to obsolete """ @@ -748,16 +751,16 @@ def test_identify_item_updates_set_obsolete_true_obsolete(mocker, terms, mock_lo for tid in dbterms.keys(): uid = tid.replace('hp:', 'uuid') dbterms[tid].update({'uuid': uid}) - mocker.patch('encoded.commands.generate_items_from_owl._get_uuids_for_linked', return_value={}) - mocker.patch('encoded.commands.generate_items_from_owl.id_fields2patch', return_value=None) - to_update = gifo.identify_item_updates(terms, dbterms, 'Phenotype', logger=mock_logger) - assert len(to_update) == 1 - obsterm = to_update[0] - assert obsterm['uuid'] == 'uuid10' - assert obsterm['status'] == 'obsolete' + with mock.patch('encoded.commands.generate_items_from_owl._get_uuids_for_linked', return_value={}): + with mock.patch('encoded.commands.generate_items_from_owl.id_fields2patch', return_value=None): + to_update = gifo.identify_item_updates(terms, dbterms, 'Phenotype', logger=mock_logger) + assert len(to_update) == 1 + obsterm = to_update[0] + assert obsterm['uuid'] == 'uuid10' + assert obsterm['status'] == 'obsolete' -def test_identify_item_updates_set_obsolete_false_do_not_obsolete_live_term(mocker, terms, mock_logger): +def test_identify_item_updates_set_obsolete_false_do_not_obsolete_live_term(terms, mock_logger): """ if set_obsolete is false then the extra dbterm should not be added to patches as a term to set to obsolete as long as it's status is not obsolete or deleted """ @@ -766,13 +769,13 @@ def test_identify_item_updates_set_obsolete_false_do_not_obsolete_live_term(mock dbterms.update({added_obs['hpo_id']: added_obs}) for i, tid in enumerate(dbterms.keys()): dbterms[tid].update({'uuid': 'uuid' + str(i + 1)}) - mocker.patch('encoded.commands.generate_items_from_owl._get_uuids_for_linked', return_value={}) - mocker.patch('encoded.commands.generate_items_from_owl.id_fields2patch', return_value=None) - to_update = gifo.identify_item_updates(terms, dbterms, 'Phenotype', set_obsoletes=False, logger=mock_logger) - assert not to_update + with mock.patch('encoded.commands.generate_items_from_owl._get_uuids_for_linked', return_value={}): + with mock.patch('encoded.commands.generate_items_from_owl.id_fields2patch', return_value=None): + to_update = gifo.identify_item_updates(terms, dbterms, 'Phenotype', set_obsoletes=False, logger=mock_logger) + assert not to_update -def test_identify_item_updates_set_obsolete_true_do_not_patch_obsolete_term(mocker, terms, mock_logger): +def test_identify_item_updates_set_obsolete_true_do_not_patch_obsolete_term(terms, mock_logger): """ if set_obsolete is True then the extra dbterm should not be added to patches if it's already status = obsolete """ @@ -781,10 +784,10 @@ def test_identify_item_updates_set_obsolete_true_do_not_patch_obsolete_term(mock dbterms.update({added_obs['hpo_id']: added_obs}) for i, tid in enumerate(dbterms.keys()): dbterms[tid].update({'uuid': 'uuid' + str(i + 1)}) - mocker.patch('encoded.commands.generate_items_from_owl._get_uuids_for_linked', return_value={}) - mocker.patch('encoded.commands.generate_items_from_owl.id_fields2patch', return_value=None) - to_update = gifo.identify_item_updates(terms, dbterms, 'Phenotype', logger=mock_logger) - assert not to_update + with mock.patch('encoded.commands.generate_items_from_owl._get_uuids_for_linked', return_value={}): + with mock.patch('encoded.commands.generate_items_from_owl.id_fields2patch', return_value=None): + to_update = gifo.identify_item_updates(terms, dbterms, 'Phenotype', logger=mock_logger) + assert not to_update def test_write_outfile(simple_terms): diff --git a/src/encoded/tests/test_indexing.py b/src/encoded/tests/test_indexing.py index 77796d1427..207160e127 100644 --- a/src/encoded/tests/test_indexing.py +++ b/src/encoded/tests/test_indexing.py @@ -77,7 +77,8 @@ def setup_and_teardown(app): # AFTER THE TEST session = app.registry[DBSESSION] connection = session.connection().connect() - meta = MetaData(bind=session.connection(), reflect=True) + meta = MetaData(bind=session.connection()) + meta.reflect() for table in meta.sorted_tables: print('Clear table %s' % table) print('Count before -->', str(connection.scalar("SELECT COUNT(*) FROM %s" % table))) @@ -257,7 +258,7 @@ def test_real_validation_error(app, indexer_testapp, testapp, institution, assert val_err_view['validation_errors'] == es_res['_source']['validation_errors'] -# @pytest.mark.performance +@pytest.mark.performance @pytest.mark.skip(reason="need to update perf-testing inserts") def test_load_and_index_perf_data(testapp, indexer_testapp): ''' diff --git a/src/encoded/tests/test_key.py b/src/encoded/tests/test_key.py index f9e7fcfb38..dfd5f914bb 100644 --- a/src/encoded/tests/test_key.py +++ b/src/encoded/tests/test_key.py @@ -1,4 +1,6 @@ import pytest + + pytestmark = [pytest.mark.setone, pytest.mark.working] items = [ diff --git a/src/encoded/tests/test_link.py b/src/encoded/tests/test_link.py index 0094739e3b..6e2b1938ec 100644 --- a/src/encoded/tests/test_link.py +++ b/src/encoded/tests/test_link.py @@ -1,6 +1,11 @@ import pytest + +from snovault.storage import Link + + pytestmark = [pytest.mark.setone, pytest.mark.working] + targets = [ {'name': 'one', 'uuid': '775795d3-4410-4114-836b-8eeecf1d0c2f'}, {'name': 'two', 'uuid': 'd6784f5e-48a1-4b40-9b11-c8aefb6e1377'}, @@ -35,7 +40,6 @@ def content(testapp): def test_links_add(content, session): - from snovault.storage import Link links = sorted([ (str(link.source_rid), link.rel, str(link.target_rid)) for link in session.query(Link).all() @@ -48,8 +52,6 @@ def test_links_add(content, session): def test_links_update(content, testapp, session): - from snovault.storage import Link - url = '/testing-link-sources/' + sources[1]['uuid'] new_item = {'name': 'B updated', 'target': targets[0]['name']} testapp.put_json(url, new_item, status=200) diff --git a/src/encoded/tests/test_load_access_key.py b/src/encoded/tests/test_load_access_key.py index 6caf3e1004..b3636ded98 100644 --- a/src/encoded/tests/test_load_access_key.py +++ b/src/encoded/tests/test_load_access_key.py @@ -1,13 +1,14 @@ import pytest + from unittest import mock from ..commands.load_access_keys import generate_access_key pytestmark = [pytest.mark.setone, pytest.mark.working] - # TODO: test load_access_keys.get_existing_key_ids, which would use ES + def test_gen_access_keys(testapp, admin): with mock.patch('encoded.commands.load_access_keys.get_beanstalk_real_url') as mocked_url: mocked_url.return_value = 'http://fourfront-hotseat' diff --git a/src/encoded/tests/test_owltools.py b/src/encoded/tests/test_owltools.py index 01c2322747..bbd25a663c 100644 --- a/src/encoded/tests/test_owltools.py +++ b/src/encoded/tests/test_owltools.py @@ -1,6 +1,7 @@ import pytest -from rdflib import RDFS, BNode, URIRef, Literal +from rdflib import BNode, Literal +from unittest import mock from ..commands import owltools as ot @@ -8,8 +9,8 @@ @pytest.fixture -def owler(mocker): - return mocker.patch.object(ot, 'Owler') +def owler(): + return mock.patch.object(ot, 'Owler') @pytest.fixture @@ -30,9 +31,9 @@ def rdf_objects_2_3(): return [Literal(rdfobj) for rdfobj in rdfobjs] -def test_get_rdfobjects_one_type_two_rdfobjs(mocker, owler, rdf_objects): +def test_get_rdfobjects_one_type_two_rdfobjs(owler, rdf_objects): checks = ['testrdfobj1', 'testrdfobj2'] - with mocker.patch('encoded.commands.owltools.ConjunctiveGraph') as graph: + with mock.patch('encoded.commands.owltools.ConjunctiveGraph') as graph: graph.objects.return_value = rdf_objects owler = ot.Owler('http://test.com') owler.rdfGraph = graph @@ -44,9 +45,9 @@ def test_get_rdfobjects_one_type_two_rdfobjs(mocker, owler, rdf_objects): assert rdfobj in checks -def test_get_rdfobjects_two_types_one_rdfobj(mocker, owler, rdf_objects_2_1): +def test_get_rdfobjects_two_types_one_rdfobj(owler, rdf_objects_2_1): check = 'testrdfobj1' - with mocker.patch('encoded.commands.owltools.ConjunctiveGraph') as graph: + with mock.patch('encoded.commands.owltools.ConjunctiveGraph') as graph: graph.objects.return_value = rdf_objects_2_1 owler = ot.Owler('http://test.com') owler.rdfGraph = graph @@ -56,9 +57,9 @@ def test_get_rdfobjects_two_types_one_rdfobj(mocker, owler, rdf_objects_2_1): assert rdfobjects[0] == check -def test_get_rdfobjects_two_types_three_rdfobj(mocker, rdf_objects_2_3): +def test_get_rdfobjects_two_types_three_rdfobj(rdf_objects_2_3): checks = ['testrdfobj1', 'testrdfobj2', 'testrdfobj3'] - with mocker.patch('encoded.commands.owltools.ConjunctiveGraph') as graph: + with mock.patch('encoded.commands.owltools.ConjunctiveGraph') as graph: graph.objects.return_value = rdf_objects_2_3 owler = ot.Owler('http://test.com') owler.rdfGraph = graph @@ -70,8 +71,8 @@ def test_get_rdfobjects_two_types_three_rdfobj(mocker, rdf_objects_2_3): assert rdfobj in checks -def test_get_rdfobjects_none_there(mocker, owler): - with mocker.patch('encoded.commands.owltools.ConjunctiveGraph') as graph: +def test_get_rdfobjects_none_there(owler): + with mock.patch('encoded.commands.owltools.ConjunctiveGraph') as graph: graph.objects.return_value = [] owler = ot.Owler('http://test.com') owler.rdfGraph = graph diff --git a/src/encoded/tests/test_parse_hpoa.py b/src/encoded/tests/test_parse_hpoa.py index a4339fab35..a5d8265ed8 100644 --- a/src/encoded/tests/test_parse_hpoa.py +++ b/src/encoded/tests/test_parse_hpoa.py @@ -3,6 +3,7 @@ import pytest import copy from io import StringIO +from unittest import mock from collections import OrderedDict from ..commands import parse_hpoa as ph @@ -80,33 +81,33 @@ def mini_hpoa_lines(): ] -def test_get_header_info_and_field_names(mocker, capsys, mock_logger, mini_hpoa_lines): - mocker.patch('encoded.commands.parse_hpoa.has_unexpected_fields', return_value=False) - lfields = ph.line2list(mini_hpoa_lines[4]) - fields, lines = ph.get_header_info_and_field_names(iter(mini_hpoa_lines), mock_logger) - assert fields == lfields - assert next(lines).startswith('OMIM:210100') - out = capsys.readouterr()[0] - assert out == 'INFO: Annotation file info:\n\tdate: 2019-11-08\n\tdescription: HPO annotations for rare diseases [7623: OMIM; 47: DECIPHER; 3771 ORPHANET]\n' +def test_get_header_info_and_field_names(capsys, mock_logger, mini_hpoa_lines): + with mock.patch('encoded.commands.parse_hpoa.has_unexpected_fields', return_value=False): + lfields = ph.line2list(mini_hpoa_lines[4]) + fields, lines = ph.get_header_info_and_field_names(iter(mini_hpoa_lines), mock_logger) + assert fields == lfields + assert next(lines).startswith('OMIM:210100') + out = capsys.readouterr()[0] + assert out == 'INFO: Annotation file info:\n\tdate: 2019-11-08\n\tdescription: HPO annotations for rare diseases [7623: OMIM; 47: DECIPHER; 3771 ORPHANET]\n' -def test_get_header_info_and_field_names_no_comments(mocker, capsys, mock_logger, mini_hpoa_lines): - mocker.patch('encoded.commands.parse_hpoa.has_unexpected_fields', return_value=False) - lfields = ph.line2list(mini_hpoa_lines[4]) - fields, lines = ph.get_header_info_and_field_names(iter(mini_hpoa_lines[4:]), mock_logger) - assert fields == lfields - assert next(lines).startswith('OMIM:210100') - out = capsys.readouterr()[0] - assert out == 'INFO: Annotation file info:\n\tdate: unknown\n\tdescription: unknown\n' +def test_get_header_info_and_field_names_no_comments(capsys, mock_logger, mini_hpoa_lines): + with mock.patch('encoded.commands.parse_hpoa.has_unexpected_fields', return_value=False): + lfields = ph.line2list(mini_hpoa_lines[4]) + fields, lines = ph.get_header_info_and_field_names(iter(mini_hpoa_lines[4:]), mock_logger) + assert fields == lfields + assert next(lines).startswith('OMIM:210100') + out = capsys.readouterr()[0] + assert out == 'INFO: Annotation file info:\n\tdate: unknown\n\tdescription: unknown\n' -def test_get_header_info_and_field_names_misformatted(mocker, capsys, mock_logger, mini_hpoa_lines): +def test_get_header_info_and_field_names_misformatted(capsys, mock_logger, mini_hpoa_lines): mini_hpoa_lines.insert(2, 'bad stuff') - mocker.patch('encoded.commands.parse_hpoa.has_unexpected_fields', return_value=['bad']) - with pytest.raises(SystemExit): - fields, lines = ph.get_header_info_and_field_names(iter(mini_hpoa_lines[4:]), mock_logger) - out = capsys.readouterr()[0] - assert out == 'INFO: Annotation file info:\n\tdate: unknown\n\tdescription: unknown\nERROR: UNKNOWN FIELDS FOUND: bad\n' + with mock.patch('encoded.commands.parse_hpoa.has_unexpected_fields', return_value=['bad']): + with pytest.raises(SystemExit): + fields, lines = ph.get_header_info_and_field_names(iter(mini_hpoa_lines[4:]), mock_logger) + out = capsys.readouterr()[0] + assert out == 'INFO: Annotation file info:\n\tdate: unknown\n\tdescription: unknown\nERROR: UNKNOWN FIELDS FOUND: bad\n' @pytest.fixture @@ -246,24 +247,24 @@ def test_create_evi_annotation_with_freq_str(hpoa_data, hpo2uid_map): assert evi.get('frequency_value') == freq -def test_create_evi_annotation_with_hp_modifier(mocker, hpoa_data, hpo2uid_map): +def test_create_evi_annotation_with_hp_modifier(hpoa_data, hpo2uid_map): mod_phe = 'HP:0500252' phe_uuid = '05648474-44de-4cdb-b35b-18f5362b8281' hpoa_data['Modifier'] = mod_phe - mocker.patch('encoded.commands.parse_hpoa.check_hpo_id_and_note_problems', return_value=phe_uuid) - evi = ph.create_evi_annotation(hpoa_data, hpo2uid_map, {}) - assert evi.get('modifier') == phe_uuid + with mock.patch('encoded.commands.parse_hpoa.check_hpo_id_and_note_problems', return_value=phe_uuid): + evi = ph.create_evi_annotation(hpoa_data, hpo2uid_map, {}) + assert evi.get('modifier') == phe_uuid -def test_create_evi_annotation_with_unknown_hp_modifier(mocker, hpoa_data, hpo2uid_map): +def test_create_evi_annotation_with_unknown_hp_modifier(hpoa_data, hpo2uid_map): mod_phe = 'HP:0000002' hpoa_data['Modifier'] = mod_phe - mocker.patch('encoded.commands.parse_hpoa.check_hpo_id_and_note_problems', return_value=None) - evi = ph.create_evi_annotation(hpoa_data, hpo2uid_map, {}) - assert 'modifier' not in evi + with mock.patch('encoded.commands.parse_hpoa.check_hpo_id_and_note_problems', return_value=None): + evi = ph.create_evi_annotation(hpoa_data, hpo2uid_map, {}) + assert 'modifier' not in evi -def test_convert2raw(mocker, embedded_item_dict, raw_item_dict): +def test_convert2raw(embedded_item_dict, raw_item_dict): # this is not really testing much as the mocked return value is what is being # checked so no way to know if fields are really being stripped as expected # first add some fields that should be ignored when getting raw form @@ -271,9 +272,9 @@ def test_convert2raw(mocker, embedded_item_dict, raw_item_dict): embedded_item_dict['date_created'] = "2020-03-03T20:08:10.690526+00:00" embedded_item_dict['institution'] = '/institution/bwh' embedded_item_dict["principals_allowed"] = {"view": ["system.Everyone"], "edit": ["group.admin"]} - mocker.patch('encoded.commands.parse_hpoa.get_raw_form', return_value=raw_item_dict) - raw_item = ph.convert2raw(embedded_item_dict) - assert raw_item == raw_item_dict + with mock.patch('encoded.commands.parse_hpoa.get_raw_form', return_value=raw_item_dict): + raw_item = ph.convert2raw(embedded_item_dict) + assert raw_item == raw_item_dict @pytest.fixture @@ -300,36 +301,36 @@ def evi_items(): ] -def test_compare_existing_to_newly_generated_all_new(mocker, mock_logger, connection, evi_items): +def test_compare_existing_to_newly_generated_all_new(mock_logger, connection, evi_items): itemcnt = len(evi_items) - mocker.patch('encoded.commands.parse_hpoa.search_metadata', return_value=[]) - evi, exist, to_obs = ph.compare_existing_to_newly_generated(mock_logger, connection, evi_items, 'EvidenceDisPheno') - assert evi == evi_items - assert not to_obs - assert exist == 0 + with mock.patch('encoded.commands.parse_hpoa.search_metadata', return_value=[]): + evi, exist, to_obs = ph.compare_existing_to_newly_generated(mock_logger, connection, evi_items, 'EvidenceDisPheno') + assert evi == evi_items + assert not to_obs + assert exist == 0 -def test_compare_existing_to_newly_generated_all_same(mocker, mock_logger, connection, evi_items): +def test_compare_existing_to_newly_generated_all_same(mock_logger, connection, evi_items): itemcnt = len(evi_items) - mocker.patch('encoded.commands.parse_hpoa.search_metadata', return_value=evi_items[:]) - mocker.patch('encoded.commands.parse_hpoa.get_raw_form', side_effect=evi_items[:]) - evi, exist, to_obs = ph.compare_existing_to_newly_generated(mock_logger, connection, evi_items, 'EvidenceDisPheno') - assert not evi - assert not to_obs - assert itemcnt == exist + with mock.patch('encoded.commands.parse_hpoa.search_metadata', return_value=evi_items[:]): + with mock.patch('encoded.commands.parse_hpoa.get_raw_form', side_effect=evi_items[:]): + evi, exist, to_obs = ph.compare_existing_to_newly_generated(mock_logger, connection, evi_items, 'EvidenceDisPheno') + assert not evi + assert not to_obs + assert itemcnt == exist -def test_compare_existing_to_newly_generated_none_same(mocker, mock_logger, connection, evi_items): +def test_compare_existing_to_newly_generated_none_same(mock_logger, connection, evi_items): dbitems = [] for e in evi_items: dbitems.append({k: v + '9' for k, v in e.items()}) dbuuids = [d.get('uuid') for d in dbitems] - mocker.patch('encoded.commands.parse_hpoa.search_metadata', return_value=dbitems) - mocker.patch('encoded.commands.parse_hpoa.get_raw_form', side_effect=dbitems) - evi, exist, to_obs = ph.compare_existing_to_newly_generated(mock_logger, connection, evi_items, 'EvidenceDisPheno') - assert evi == evi_items - assert to_obs == dbuuids - assert exist == 0 + with mock.patch('encoded.commands.parse_hpoa.search_metadata', return_value=dbitems): + with mock.patch('encoded.commands.parse_hpoa.get_raw_form', side_effect=dbitems): + evi, exist, to_obs = ph.compare_existing_to_newly_generated(mock_logger, connection, evi_items, 'EvidenceDisPheno') + assert evi == evi_items + assert to_obs == dbuuids + assert exist == 0 @pytest.fixture diff --git a/src/encoded/tests/test_schemas.py b/src/encoded/tests/test_schemas.py index a9b76a8103..d94995eb40 100644 --- a/src/encoded/tests/test_schemas.py +++ b/src/encoded/tests/test_schemas.py @@ -83,6 +83,10 @@ def pattern_fields(): @pytest.fixture(scope='module') def master_mixins(): + return compute_master_mixins() + + +def compute_master_mixins(): mixins = load_schema('encoded:schemas/mixins.json') mixin_keys = [ 'schema_version', diff --git a/src/encoded/tests/test_search.py b/src/encoded/tests/test_search.py index d1b6c2b4d8..582db8ff97 100644 --- a/src/encoded/tests/test_search.py +++ b/src/encoded/tests/test_search.py @@ -503,9 +503,15 @@ def test_index_data_workbook(app, workbook, testapp, indexer_testapp, htmltestap create_mapping.run(app, sync_index=True) # check counts and ensure they're equal testapp_counts = testapp.get('/counts') - split_counts = testapp_counts.json['db_es_total'].split() - assert(int(split_counts[1]) == int(split_counts[3])) # 2nd is db, 4th is es + # e.g., {"db_es_total": "DB: 748 ES: 748 ", ...} + db_es_total = testapp_counts.json['db_es_total'] + split_counts = db_es_total.split() + db_total = int(split_counts[1]) + es_total = int(split_counts[3]) + assert(db_total == es_total) # 2nd is db, 4th is es + # e.g., {..., "db_es_compare": {"AnalysisStep": "DB: 26 ES: 26 ", ...}, ...} for item_name, item_counts in testapp_counts.json['db_es_compare'].items(): + print("item_name=", item_name, "item_counts=", item_counts) # make sure counts for each item match ES counts split_item_counts = item_counts.split() db_item_count = int(split_item_counts[1]) @@ -522,8 +528,11 @@ def test_index_data_workbook(app, workbook, testapp, indexer_testapp, htmltestap if es_item_count == 0: continue + # check items in search result individually - res = testapp.get('/%s?limit=all' % item_type, status=[200, 301]).follow() + search_url = '/%s?limit=all' % item_type + print("search_url=", search_url) + res = testapp.get(search_url, status=[200, 301]).follow() for item_res in res.json.get('@graph', []): index_view_res = es.get(index=namespaced_index, doc_type=item_type, id=item_res['uuid'])['_source'] diff --git a/src/encoded/tests/test_server_defaults.py b/src/encoded/tests/test_server_defaults.py index 1ed2a27fa4..bb10a87cea 100644 --- a/src/encoded/tests/test_server_defaults.py +++ b/src/encoded/tests/test_server_defaults.py @@ -1,13 +1,15 @@ import pytest import webtest -from pytest import fixture +from dcicutils.qa_utils import notice_pytest_fixtures from .. import main pytestmark = [pytest.mark.setone, pytest.mark.working] def test_server_defaults(admin, anontestapp): + notice_pytest_fixtures(admin, anontestapp) + email = admin['email'] extra_environ = {'REMOTE_USER': str(email)} res = anontestapp.post_json( @@ -25,17 +27,19 @@ def test_server_defaults(admin, anontestapp): ) -@fixture(scope='session') +@pytest.fixture(scope='session') def test_accession_app(request, check_constraints, zsa_savepoints, app_settings): + notice_pytest_fixtures(request, check_constraints, zsa_savepoints, app_settings) + app_settings = app_settings.copy() app_settings['accession_factory'] = 'encoded.server_defaults.test_accession' return main({}, **app_settings) -@fixture +@pytest.fixture def test_accession_anontestapp(request, test_accession_app, external_tx, zsa_savepoints): - '''TestApp with JSON accept header. - ''' + """ TestApp with JSON accept header. """ + notice_pytest_fixtures(request, test_accession_app, external_tx, zsa_savepoints) environ = { 'HTTP_ACCEPT': 'application/json', } @@ -43,6 +47,7 @@ def test_accession_anontestapp(request, test_accession_app, external_tx, zsa_sav def test_test_accession_server_defaults(admin, test_accession_anontestapp): + notice_pytest_fixtures(admin, test_accession_anontestapp) email = admin['email'] extra_environ = {'REMOTE_USER': str(email)} res = test_accession_anontestapp.post_json( diff --git a/src/encoded/tests/test_static_page.py b/src/encoded/tests/test_static_page.py index 4d9a56c675..296e4ff533 100644 --- a/src/encoded/tests/test_static_page.py +++ b/src/encoded/tests/test_static_page.py @@ -1,7 +1,11 @@ import pytest -import time +import webtest + +from dcicutils.qa_utils import notice_pytest_fixtures from .workbook_fixtures import app -from webtest import AppError + + +notice_pytest_fixtures(app) pytestmark = [pytest.mark.indexing, pytest.mark.working] @@ -71,7 +75,7 @@ def help_page(testapp, posted_help_page_section, help_page_json): try: res = testapp.post_json('/pages/', help_page_json, status=201) val = res.json['@graph'][0] - except AppError: + except webtest.AppError: res = testapp.get('/' + help_page_json['uuid'], status=301).follow() val = res.json return val @@ -82,7 +86,7 @@ def help_page_deleted(testapp, posted_help_page_section, help_page_json_draft): try: res = testapp.post_json('/pages/', help_page_json_draft, status=201) val = res.json['@graph'][0] - except AppError: + except webtest.AppError: res = testapp.get('/' + help_page_json_draft['uuid'], status=301).follow() val = res.json return val @@ -93,7 +97,7 @@ def help_page_restricted(testapp, posted_help_page_section, help_page_json_delet try: res = testapp.post_json('/pages/', help_page_json_deleted, status=201) val = res.json['@graph'][0] - except AppError: + except webtest.AppError: res = testapp.get('/' + help_page_json_deleted['uuid'], status=301).follow() val = res.json return val @@ -102,10 +106,6 @@ def help_page_restricted(testapp, posted_help_page_section, help_page_json_delet def test_get_help_page(testapp, help_page): help_page_url = "/" + help_page['name'] res = testapp.get(help_page_url, status=200) - - #import pdb - #pdb.set_trace() - assert res.json['@id'] == help_page_url assert res.json['@context'] == help_page_url assert 'HelpPage' in res.json['@type'] @@ -131,7 +131,10 @@ def test_page_unique_name(testapp, help_page, help_page_deleted): new_page = {'name': help_page['name']} res = testapp.post_json('/page', new_page, status=422) expected_val_err = "%s already exists with name '%s'" % (help_page['uuid'], new_page['name']) - assert expected_val_err in res.json['errors'][0]['description'] + actual_error_description = res.json['errors'][0]['description'] + print("expected:", expected_val_err) + print("actual:", actual_error_description) + assert expected_val_err in actual_error_description # also test PATCH of an existing page with another name res = testapp.patch_json(help_page_deleted['@id'], {'name': new_page['name']}, status=422) diff --git a/src/encoded/tests/test_types_cohort.py b/src/encoded/tests/test_types_cohort.py index 1443d2f35c..0b7ee6ff6b 100644 --- a/src/encoded/tests/test_types_cohort.py +++ b/src/encoded/tests/test_types_cohort.py @@ -1,5 +1,5 @@ import pytest -pytestmark = [pytest.mark.work, pytest.mark.schema] +pytestmark = [pytest.mark.working, pytest.mark.schema] @pytest.fixture diff --git a/src/encoded/tests/test_types_family.py b/src/encoded/tests/test_types_family.py index af3ef7a457..b23f7d6d2f 100644 --- a/src/encoded/tests/test_types_family.py +++ b/src/encoded/tests/test_types_family.py @@ -3,7 +3,7 @@ from datetime import datetime from xml.etree.ElementTree import fromstring from encoded.types.family import * -pytestmark = [pytest.mark.work, pytest.mark.schema] +pytestmark = [pytest.mark.working, pytest.mark.schema] @pytest.fixture diff --git a/src/encoded/tests/test_types_file.py b/src/encoded/tests/test_types_file.py index 4187120b2c..98d93e5f0a 100644 --- a/src/encoded/tests/test_types_file.py +++ b/src/encoded/tests/test_types_file.py @@ -4,6 +4,7 @@ import tempfile from pyramid.httpexceptions import HTTPForbidden +from unittest import mock from .. import source_beanstalk_env_vars from ..types.file import FileFastq, post_upload, external_creds @@ -30,17 +31,18 @@ def file(testapp, project, experiment, institution, file_formats): return res.json['@graph'][0] -def test_external_creds(mocker): - mocker.patch('encoded.types.file.boto3', autospec=True) +def test_external_creds(): - ret = external_creds('test-wfout-bucket', 'test-key', 'name') - assert ret['key'] == 'test-key' - assert ret['bucket'] == 'test-wfout-bucket' - assert ret['service'] == 's3' - assert 'upload_credentials' in ret.keys() + with mock.patch('encoded.types.file.boto3', autospec=True): + ret = external_creds('test-wfout-bucket', 'test-key', 'name') + assert ret['key'] == 'test-key' + assert ret['bucket'] == 'test-wfout-bucket' + assert ret['service'] == 's3' + assert 'upload_credentials' in ret.keys() -def test_force_beanstalk_env(mocker): + +def test_force_beanstalk_env(): """ This test is a bit outdated, since env variable loading has moved to application __init__ from file.py. But let's keep the test... @@ -57,19 +59,20 @@ def test_force_beanstalk_env(mocker): test_cfg.close() # mock_boto - mock_boto = mocker.patch('encoded.tests.test_types_file.boto3', autospec=True) - - source_beanstalk_env_vars(test_cfg_name) - boto3.client('sts', aws_access_key_id=os.environ.get("AWS_ACCESS_KEY_ID"), - aws_secret_access_key=os.environ.get("AWS_SECRET_ACCESS_KEY")) - # reset - os.environ["AWS_SECRET_ACCESS_KEY"] = secret - os.environ["AWS_ACCESS_KEY_ID"] = key - # os.remove(test_cfg.delete) - - # ensure boto called with correct arguments - mock_boto.client.assert_called_once_with('sts', aws_access_key_id='its a secret id', - aws_secret_access_key='its a secret') + with mock.patch('encoded.tests.test_types_file.boto3', autospec=True) as mock_boto: + + source_beanstalk_env_vars(test_cfg_name) + boto3.client('sts', aws_access_key_id=os.environ.get("AWS_ACCESS_KEY_ID"), + aws_secret_access_key=os.environ.get("AWS_SECRET_ACCESS_KEY")) + # reset + os.environ["AWS_SECRET_ACCESS_KEY"] = secret + os.environ["AWS_ACCESS_KEY_ID"] = key + # os.remove(test_cfg.delete) + + # ensure boto called with correct arguments + mock_boto.client.assert_called_once_with('sts', + aws_access_key_id='its a secret id', + aws_secret_access_key='its a secret') @pytest.fixture diff --git a/src/encoded/tests/test_types_individual.py b/src/encoded/tests/test_types_individual.py index d1af00dd6f..b43a51b284 100644 --- a/src/encoded/tests/test_types_individual.py +++ b/src/encoded/tests/test_types_individual.py @@ -1,5 +1,5 @@ import pytest -pytestmark = [pytest.mark.work, pytest.mark.schema] +pytestmark = [pytest.mark.working, pytest.mark.schema] @pytest.fixture diff --git a/src/encoded/tests/test_types_institution.py b/src/encoded/tests/test_types_institution.py index 523ad655bc..15532c445e 100644 --- a/src/encoded/tests/test_types_institution.py +++ b/src/encoded/tests/test_types_institution.py @@ -1,5 +1,5 @@ import pytest -pytestmark = [pytest.mark.work, pytest.mark.schema] +pytestmark = [pytest.mark.working, pytest.mark.schema] @pytest.fixture diff --git a/src/encoded/tests/test_types_phenotype.py b/src/encoded/tests/test_types_phenotype.py index 2e45a5f475..61b1f5a39d 100644 --- a/src/encoded/tests/test_types_phenotype.py +++ b/src/encoded/tests/test_types_phenotype.py @@ -1,5 +1,5 @@ import pytest -pytestmark = [pytest.mark.work, pytest.mark.schema] +pytestmark = [pytest.mark.working, pytest.mark.schema] @pytest.fixture diff --git a/src/encoded/tests/test_types_project.py b/src/encoded/tests/test_types_project.py index a2d2d223ff..409971c9c1 100644 --- a/src/encoded/tests/test_types_project.py +++ b/src/encoded/tests/test_types_project.py @@ -1,5 +1,5 @@ import pytest -pytestmark = [pytest.mark.work, pytest.mark.schema] +pytestmark = [pytest.mark.working, pytest.mark.schema] @pytest.fixture diff --git a/src/encoded/tests/test_types_tracking_item.py b/src/encoded/tests/test_types_tracking_item.py index 786858710c..db70bf7978 100644 --- a/src/encoded/tests/test_types_tracking_item.py +++ b/src/encoded/tests/test_types_tracking_item.py @@ -1,13 +1,12 @@ import pytest -# Supports commented-out code below. +# Code that uses this is commented-out below. # from ..types import TrackingItem pytestmark = [pytest.mark.setone, pytest.mark.working, pytest.mark.schema] - @pytest.fixture def tracking_item(): return {"tracking_type": "other", "other_tracking": {"extra_field": "extra_value"}} diff --git a/src/encoded/tests/test_types_variant_consequence.py b/src/encoded/tests/test_types_variant_consequence.py index a83864f32d..85fa7a03bc 100644 --- a/src/encoded/tests/test_types_variant_consequence.py +++ b/src/encoded/tests/test_types_variant_consequence.py @@ -1,5 +1,5 @@ import pytest -pytestmark = [pytest.mark.work, pytest.mark.schema] +pytestmark = [pytest.mark.working, pytest.mark.schema] def test_calculated_variant_consequence_display_title(testapp, project, institution): diff --git a/src/encoded/types/page.py b/src/encoded/types/page.py index 5627533c73..bef0fd1141 100644 --- a/src/encoded/types/page.py +++ b/src/encoded/types/page.py @@ -36,6 +36,7 @@ urlparse, urlencode ) +from dcicutils.misc_utils import filtered_warnings from ..search.search import get_iterable_search_results from .base import Item from .user_content import ( @@ -163,16 +164,27 @@ def is_static_page(info, request): def includeme(config): - config.add_route( - 'staticpage', - '/*subpath', - custom_predicates=[is_static_page], - request_method="GET" - ) + with filtered_warnings("ignore", category=DeprecationWarning): + config.add_route( + 'staticpage', + '/*subpath', + # TODO: Replace custom_predicates=[is_static_page] with something more modern. + # The custom_predicates needs to be rewritten. + # Although there is a complex rewrite using .add_route_predicate, + # the simpler case of just using .add_static_view may bypass a lot of complexity. + # But this needs more study to get right. For now this code will work and + # we're just going to suppress the warning. -kmp 16-May-2020 + # Refs: + # - https://stackoverflow.com/questions/30102767/custom-route-predicates-in-pyramid + # - https://docs.pylonsproject.org/projects/pyramid/en/latest/_modules/pyramid/config/routes.html + # - https://docs.pylonsproject.org/projects/pyramid/en/master/narr/hooks.html#view-and-route-predicates + # - https://docs.pylonsproject.org/projects/pyramid/en/latest/api/config.html + custom_predicates=[is_static_page], + request_method="GET" + ) config.add_view(static_page, route_name='staticpage') - @collection( name='pages', lookup_key='name', diff --git a/src/encoded/types/workflow.py b/src/encoded/types/workflow.py index 3d8ae6d995..fa9ab6284e 100644 --- a/src/encoded/types/workflow.py +++ b/src/encoded/types/workflow.py @@ -777,7 +777,7 @@ def get_global_source_or_target(all_io_source_targets): # is done against step step.[inputs | output].[target | source].name. global_pointing_source_target = [ source_target for source_target in all_io_source_targets - if source_target.get('step') == None + if source_target.get('step') is None ] if len(global_pointing_source_target) > 1: raise Exception('Found more than one source or target without a step.') @@ -795,8 +795,8 @@ def map_run_data_to_io_arg(step_io_arg, wfr_runtime_inputs, io_type): :param wfr_runtime_inputs: List of Step inputs or outputs, such as 'input_files', 'output_files', 'quality_metric', or 'parameters'. :returns: True if found and added run_data property to analysis_step.input or analysis_step.output (param inputOrOutput). ''' - #is_global_arg = step_io_arg.get('meta', {}).get('global', False) == True - #if not is_global_arg: + # is_global_arg = step_io_arg.get('meta', {}).get('global', False) == True + # if not is_global_arg: # return False # Skip. We only care about global arguments. value_field_name = 'value' if io_type == 'parameter' else 'file' @@ -859,9 +859,9 @@ def mergeArgumentsWithSameArgumentName(args): return resultArgs - output_files = mergeArgumentsWithSameArgumentName(self.properties.get('output_files',[])) - input_files = mergeArgumentsWithSameArgumentName(self.properties.get('input_files',[])) - input_params = mergeArgumentsWithSameArgumentName(self.properties.get('parameters',[])) + output_files = mergeArgumentsWithSameArgumentName(self.properties.get('output_files', [])) + input_files = mergeArgumentsWithSameArgumentName(self.properties.get('input_files', [])) + input_params = mergeArgumentsWithSameArgumentName(self.properties.get('parameters', [])) for step in analysis_steps: # Add output file metadata to step outputs & inputs, based on workflow_argument_name v step output target name. diff --git a/src/encoded/verifier.py b/src/encoded/verifier.py index 3d28a8f3da..df756a0fa6 100644 --- a/src/encoded/verifier.py +++ b/src/encoded/verifier.py @@ -3,7 +3,7 @@ # TODO: Production code should not rely on tests. from .tests.test_create_mapping import test_create_mapping from .tests.test_embedding import test_add_default_embeds, test_manual_embeds -from .tests.test_schemas import master_mixins, test_load_schema +from .tests.test_schemas import compute_master_mixins, test_load_schema def verifier(func): @@ -57,7 +57,7 @@ def verify_profile(item_type, indexer_testapp): @verifier def verify_schema(item_type_camel, registry): # test schema - test_load_schema(item_type_camel + ".json", master_mixins(), registry) + test_load_schema(item_type_camel + ".json", compute_master_mixins(), registry) @verifier diff --git a/test.ini b/test.ini index 537c428a54..e70cb502ef 100644 --- a/test.ini +++ b/test.ini @@ -5,11 +5,12 @@ file_upload_bucket = elasticbeanstalk-encoded-4dn-files blob_bucket = elasticbeanstalk-encoded-4dn-blobs #blob_store_profile_name = encoded-4dn-files accession_factory = encoded.server_defaults.test_accession -# indexer.processes = elasticsearch.server = 172.31.49.128:9872 snovault.app_version = 1.3.0 ga_config_location = ./src/encoded/static/ga_config.json encoded_version = 111.222.333 +snovault_version = 222.333.444 +utils_version = 333.444.555 eb_app_version = app-v-test-simulation create_tables = true load_test_data = encoded.loadxl:load_test_data