From a08b6e65d9028c6cd759cc86fef1ae98cf22c33c Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Sat, 30 Oct 2021 14:44:29 +0000 Subject: [PATCH 001/162] Use mainEntity or root dataset name from RO-Crate as default name --- lifemonitor/api/models/rocrate.py | 4 ++++ lifemonitor/api/models/workflows.py | 4 ++++ lifemonitor/api/services.py | 5 +++-- 3 files changed, 11 insertions(+), 2 deletions(-) diff --git a/lifemonitor/api/models/rocrate.py b/lifemonitor/api/models/rocrate.py index e7d2e2563..e166a6b81 100644 --- a/lifemonitor/api/models/rocrate.py +++ b/lifemonitor/api/models/rocrate.py @@ -83,6 +83,10 @@ def get_roc_suite(self, roc_suite_identifier): def dataset_name(self): return self._roc_helper.name + @property + def main_entity_name(self): + return self._roc_helper.mainEntity['name'] + @property def _roc_helper(self): if not self.__roc_helper: diff --git a/lifemonitor/api/models/workflows.py b/lifemonitor/api/models/workflows.py index 574ac275b..daa42543b 100644 --- a/lifemonitor/api/models/workflows.py +++ b/lifemonitor/api/models/workflows.py @@ -237,6 +237,10 @@ def authorizations(self): def roc_link(self) -> str: return self.uri + @property + def workflow_name(self) -> str: + return self.name or self.main_entity_name or self.dataset_name + @property def is_latest(self) -> bool: return self.workflow.latest_version.version == self.version diff --git a/lifemonitor/api/services.py b/lifemonitor/api/services.py index e895145d5..58806e660 100644 --- a/lifemonitor/api/services.py +++ b/lifemonitor/api/services.py @@ -137,8 +137,9 @@ def register_workflow(cls, roc_link, workflow_submitter: User, workflow_version, auth = ExternalServiceAuthorizationHeader(workflow_submitter, header=authorization) auth.resources.append(wv) if name is None: - w.name = wv.dataset_name - wv.name = wv.dataset_name + status=400) + w.name = wv.workflow_name + wv.name = wv.workflow_name # set workflow visibility w.public = public From 9b3e2954dd38c8a15b57f5586e7ef074255e0c5f Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Sat, 30 Oct 2021 15:15:32 +0000 Subject: [PATCH 002/162] Raise a proper error when no workflow name can be set --- lifemonitor/api/services.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lifemonitor/api/services.py b/lifemonitor/api/services.py index 58806e660..4c37af45e 100644 --- a/lifemonitor/api/services.py +++ b/lifemonitor/api/services.py @@ -136,7 +136,12 @@ def register_workflow(cls, roc_link, workflow_submitter: User, workflow_version, if authorization: auth = ExternalServiceAuthorizationHeader(workflow_submitter, header=authorization) auth.resources.append(wv) + if name is None: + if wv.workflow_name is None: + raise lm_exceptions.LifeMonitorException(title="Missing attribute 'name'", + detail="Attribute 'name' is not defined and it cannot be retrieved ' \ + 'from the workflow RO-Crate (name of 'mainEntity' and '/' dataset not set)", status=400) w.name = wv.workflow_name wv.name = wv.workflow_name From 8cea24d9c96af5f3978506a00d4787c40d6d11ee Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Sat, 30 Oct 2021 15:16:25 +0000 Subject: [PATCH 003/162] Add tests --- tests/config/data/make-test-rocrates.py | 12 ++++ tests/conftest.py | 14 ++++- .../integration/api/controllers/test_users.py | 58 +++++++++++++++++++ 3 files changed, 83 insertions(+), 1 deletion(-) diff --git a/tests/config/data/make-test-rocrates.py b/tests/config/data/make-test-rocrates.py index 86d30e7f3..47245f7ec 100644 --- a/tests/config/data/make-test-rocrates.py +++ b/tests/config/data/make-test-rocrates.py @@ -49,6 +49,7 @@ test_crates.append(('ro-crate-galaxy-sortchangecase', 'ro-crate-galaxy-sortchangecase-invalid-service-type')) test_crates.append(('ro-crate-galaxy-sortchangecase', 'ro-crate-galaxy-sortchangecase-invalid-service-url')) test_crates.append(('ro-crate-galaxy-sortchangecase', 'ro-crate-galaxy-sortchangecase-github-actions')) +test_crates.append(('ro-crate-galaxy-sortchangecase', 'ro-crate-galaxy-sortchangecase-no-name')) # clean up RO-Crates folder if os.path.exists(crates_target_path): @@ -168,6 +169,17 @@ def patch_metadata_graph_node(metadata_file, node, properties): "url": {"@id": "https://github.com"} }) +patch_metadata_graph_node('crates/ro-crate-galaxy-sortchangecase-no-name/ro-crate-metadata.json', + node=("@type", "Dataset"), + properties={ + 'name': None + }) +patch_metadata_graph_node('crates/ro-crate-galaxy-sortchangecase-no-name/ro-crate-metadata.json', + node=("@id", "sort-and-change-case.ga"), + properties={ + 'name': None + }) + # create zip archives print("Creating RO-Crate archives:") for c in test_crates: diff --git a/tests/conftest.py b/tests/conftest.py index 6c3e733bf..c2e1bd66f 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -248,7 +248,19 @@ def generic_workflow(app_client): 'uuid': str(uuid.uuid4()), 'version': '1', 'roc_link': "http://webserver:5000/download?file=ro-crate-galaxy-sortchangecase.crate.zip", - 'name': 'Galaxy workflow from Generic Link', + 'name': 'sort-and-change-case', + 'testing_service_type': 'jenkins', + 'authorization': app_client.application.config['WEB_SERVER_AUTH_TOKEN'] + } + + +@pytest.fixture +def workflow_no_name(app_client): + return { + 'uuid': str(uuid.uuid4()), + 'version': '1', + 'roc_link': "http://webserver:5000/download?file=ro-crate-galaxy-sortchangecase-no-name.crate.zip", + 'name': 'Galaxy workflow from Generic Link (no name)', 'testing_service_type': 'jenkins', 'authorization': app_client.application.config['WEB_SERVER_AUTH_TOKEN'] } diff --git a/tests/integration/api/controllers/test_users.py b/tests/integration/api/controllers/test_users.py index 6c312b0f7..ee0357283 100644 --- a/tests/integration/api/controllers/test_users.py +++ b/tests/integration/api/controllers/test_users.py @@ -155,6 +155,64 @@ def test_generic_workflow_registration_wo_uuid(app_client, client_auth_method, assert data['uuid'], "Workflow UUID was not generated or returned" +@pytest.mark.parametrize("client_auth_method", [ + ClientAuthenticationMethod.API_KEY, + ClientAuthenticationMethod.AUTHORIZATION_CODE, +], indirect=True) +def test_generic_workflow_registration_no_name_exception(app_client, client_auth_method, + user1, user1_auth, client_credentials_registry, workflow_no_name): + logger.debug("User: %r", user1) + logger.debug("headers: %r", user1_auth) + workflow = workflow_no_name + logger.debug("Selected workflow: %r", workflow) + logger.debug("Using oauth2 user: %r", user1) + # prepare body + body = {'roc_link': workflow['roc_link'], + 'version': workflow['version'], + 'authorization': workflow['authorization']} + logger.debug("The BODY: %r", body) + response = app_client.post('/users/current/workflows', json=body, headers=user1_auth) + logger.debug("The actual response: %r", response.data) + + utils.assert_status_code(400, response.status_code) + data = json.loads(response.data) + logger.debug("Response data: %r", data) + assert data['title'] == 'Missing attribute \'name\'', "Unexpected error" + + +@pytest.mark.parametrize("client_auth_method", [ + ClientAuthenticationMethod.API_KEY, + ClientAuthenticationMethod.AUTHORIZATION_CODE, +], indirect=True) +def test_generic_workflow_registration_no_name(app_client, client_auth_method, + user1, user1_auth, client_credentials_registry, generic_workflow): + logger.debug("User: %r", user1) + logger.debug("headers: %r", user1_auth) + workflow = generic_workflow + logger.debug("Selected workflow: %r", workflow) + logger.debug("Using oauth2 user: %r", user1) + # prepare body + body = {'roc_link': workflow['roc_link'], + 'version': workflow['version'], + 'authorization': workflow['authorization']} + logger.debug("The BODY: %r", body) + response = app_client.post('/users/current/workflows', json=body, headers=user1_auth) + logger.debug("The actual response: %r", response.data) + utils.assert_status_code(201, response.status_code) + data = json.loads(response.data) + logger.debug("Response data: %r", data) + assert data['wf_version'] == workflow['version'], \ + "Response should be equal to the workflow UUID" + assert data['uuid'], "Workflow UUID was not generated or returned" + + response = app_client.get(f'/workflows/{data["uuid"]}', headers=user1_auth) + logger.debug("The actual response: %r", response.data) + utils.assert_status_code(200, response.status_code) + data = json.loads(response.data) + logger.debug("Response data: %r", data) + assert data['name'] == workflow['name'], "Unexpected workflow name" + + @pytest.mark.parametrize("client_auth_method", [ ClientAuthenticationMethod.API_KEY, ClientAuthenticationMethod.AUTHORIZATION_CODE, From 7afb5de554211602ecce049b856f32bae9d65d3e Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Sat, 30 Oct 2021 16:14:29 +0000 Subject: [PATCH 004/162] Add migration to set to 'unknown' workflows with no name --- ...861eca55901d_fix_workflows_with_no_name.py | 24 +++++++++++++++++++ 1 file changed, 24 insertions(+) create mode 100644 migrations/versions/861eca55901d_fix_workflows_with_no_name.py diff --git a/migrations/versions/861eca55901d_fix_workflows_with_no_name.py b/migrations/versions/861eca55901d_fix_workflows_with_no_name.py new file mode 100644 index 000000000..6319fd8fd --- /dev/null +++ b/migrations/versions/861eca55901d_fix_workflows_with_no_name.py @@ -0,0 +1,24 @@ +"""Fix workflows with no name + +Revision ID: 861eca55901d +Revises: 01684f92a380 +Create Date: 2021-10-30 15:51:52.296778 + +""" +from alembic import op + + +# revision identifiers, used by Alembic. +revision = '861eca55901d' +down_revision = '01684f92a380' +branch_labels = None +depends_on = None + + +def upgrade(): + bind = op.get_bind() + bind.execute("update resource set name='unknown' where id in (select id from workflow natural join resource where name='')") + + +def downgrade(): + pass From da5ccf3565d6d7812f5322917d25b2fb82379d39 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Tue, 2 Nov 2021 13:55:09 +0000 Subject: [PATCH 005/162] Allow to register a TestingService class at runtime --- lifemonitor/utils.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lifemonitor/utils.py b/lifemonitor/utils.py index 79898e6ab..a2bfc3fba 100644 --- a/lifemonitor/utils.py +++ b/lifemonitor/utils.py @@ -453,6 +453,9 @@ def _load_concrete_types(self): logger.exception(e) return self.__concrete_types__ + def add_class(self, type_name, type_class): + self.__concrete_types__[type_name] = (type_class,) + def get_class(self, concrete_type): return self._load_concrete_types()[concrete_type][0] From 9b58ed8e740e4576ad4d7b704acafcbd2d6488ba Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Tue, 2 Nov 2021 13:55:49 +0000 Subject: [PATCH 006/162] Update exception title --- lifemonitor/exceptions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lifemonitor/exceptions.py b/lifemonitor/exceptions.py index a4470023f..df642394e 100644 --- a/lifemonitor/exceptions.py +++ b/lifemonitor/exceptions.py @@ -176,7 +176,7 @@ def __init__(self, title="Testing service error", detail="", class RateLimitExceededException(TestingServiceException): def __init__(self, detail=None, type="about:blank", status=403, instance=None, **kwargs): - super().__init__(title="RateLimitExceededException", + super().__init__(title="Rate Limit Exceeded", detail=detail, status=status, **kwargs) From df5ce10a94818c21e40224c14034abe96c391528 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Tue, 2 Nov 2021 21:14:27 +0000 Subject: [PATCH 007/162] Fix __repr__ of LifeMonitorException when 'detail' is empty --- lifemonitor/exceptions.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lifemonitor/exceptions.py b/lifemonitor/exceptions.py index df642394e..f0cda481b 100644 --- a/lifemonitor/exceptions.py +++ b/lifemonitor/exceptions.py @@ -50,7 +50,8 @@ def __init__(self, title=None, detail=None, pass def __repr__(self): - return f"[{self.status}] {self.title}: {self.detail}" + detail = f": {self.detail}" if self.detail else "" + return f"[{self.status}] {self.title}{detail}" def __str__(self): return self.__repr__() From 6c6d1b45e4d7b7ebdbb41852fdcf4664ce6defc2 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Tue, 2 Nov 2021 21:20:55 +0000 Subject: [PATCH 008/162] Report RateLimitException on endpoints related to instances --- lifemonitor/api/controllers.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lifemonitor/api/controllers.py b/lifemonitor/api/controllers.py index 6417f9fd1..8ac3b1b93 100644 --- a/lifemonitor/api/controllers.py +++ b/lifemonitor/api/controllers.py @@ -635,6 +635,8 @@ def instances_builds_get_by_id(instance_uuid, build_id): return lm_exceptions\ .report_problem(404, "Not Found", detail=messages.instance_build_not_found.format(build_id, instance_uuid)) + except lm_exceptions.RateLimitExceededException as e: + return lm_exceptions.report_problem(403, e.title, detail=e.detail) except Exception as e: return lm_exceptions.report_problem(500, "Internal Error", extra_info={"exception": str(e)}) @@ -660,6 +662,8 @@ def instances_builds_get_logs(instance_uuid, build_id, offset_bytes=0, limit_byt return lm_exceptions\ .report_problem(404, "Not Found", detail=messages.instance_build_not_found.format(build_id, instance_uuid)) + except lm_exceptions.RateLimitExceededException as e: + return lm_exceptions.report_problem(403, e.title, detail=e.detail) except ValueError as e: return lm_exceptions.report_problem(400, "Bad Request", detail=str(e)) except Exception as e: From 7002a64681a76fcd8ce4323c914569dc083f3325 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Tue, 2 Nov 2021 21:25:03 +0000 Subject: [PATCH 009/162] Add property 'reason ' when 'status' is unavailable --- lifemonitor/api/controllers.py | 14 ++------- lifemonitor/api/serializers.py | 52 ++++++++++++++++++++++++++++++++-- specs/api.yaml | 12 ++++++++ 3 files changed, 64 insertions(+), 14 deletions(-) diff --git a/lifemonitor/api/controllers.py b/lifemonitor/api/controllers.py index 8ac3b1b93..791286aa9 100644 --- a/lifemonitor/api/controllers.py +++ b/lifemonitor/api/controllers.py @@ -475,17 +475,9 @@ def suites_get_by_uuid(suite_uuid): @cached() def suites_get_status(suite_uuid): - try: - response = _get_suite_or_problem(suite_uuid) - return response if isinstance(response, Response) \ - else serializers.SuiteStatusSchema().dump(response.status) - except lm_exceptions.RateLimitExceededException as e: - logger.debug(e) - return { - "status": "not_available", - "latest_build": [], - "reason": str(e) - } + response = _get_suite_or_problem(suite_uuid) + return response if isinstance(response, Response) \ + else serializers.SuiteStatusSchema().dump(response.status) @cached() diff --git a/lifemonitor/api/serializers.py b/lifemonitor/api/serializers.py index 7796cfb46..119bf08db 100644 --- a/lifemonitor/api/serializers.py +++ b/lifemonitor/api/serializers.py @@ -24,10 +24,10 @@ from typing import List from urllib.parse import urljoin -from lifemonitor import utils as lm_utils from lifemonitor import exceptions as lm_exceptions +from lifemonitor import utils as lm_utils from lifemonitor.auth import models as auth_models -from lifemonitor.auth.serializers import UserSchema, SubscriptionSchema +from lifemonitor.auth.serializers import SubscriptionSchema, UserSchema from lifemonitor.serializers import (BaseSchema, ListOfItems, ResourceMetadataSchema, ResourceSchema, ma) @@ -267,6 +267,37 @@ def get_links(self, obj): return links +def format_availability_issues(status: models.WorkflowStatus): + issues = status.availability_issues + logger.info(issues) + if 'not_available' == status.aggregated_status and len(issues) > 0: + return ', '.join([f"{i['issue']}: Unable to get resource '{i['resource']}' from service '{i['service']}'" if 'service' in i else i['issue'] for i in issues]) + return None + + +class WorkflowStatusSchema(WorkflowVersionSchema): + __envelope__ = {"single": None, "many": "items"} + __model__ = models.WorkflowStatus + + class Meta: + model = models.WorkflowStatus + + aggregate_test_status = fields.String(attribute="status.aggregated_status") + latest_builds = ma.Nested(BuildSummarySchema(exclude=('meta', 'links')), + attribute="status.latest_builds", many=True) + reason = fields.Method("get_reason") + + def get_reason(self, workflow_version): + return format_availability_issues(workflow_version.status) + + @post_dump + def remove_skip_values(self, data, **kwargs): + return { + key: value for key, value in data.items() + if value is not None + } + + class WorkflowVersionListItem(WorkflowSchema): subscriptionsOf: List[auth_models.User] = None @@ -281,10 +312,14 @@ def __init__(self, *args, self_link: bool = True, subscriptionsOf: List[auth_mod def get_status(self, workflow): try: - return { + result = { "aggregate_test_status": workflow.latest_version.status.aggregated_status, "latest_build": self.get_latest_build(workflow) } + reason = format_availability_issues(workflow.latest_version.status) + if reason: + result['reason'] = reason + return result except lm_exceptions.RateLimitExceededException as e: logger.debug(e) return { @@ -381,6 +416,17 @@ class Meta: suite_uuid = fields.String(attribute="suite.uuid") status = fields.String(attribute="aggregated_status") latest_builds = fields.Nested(BuildSummarySchema(exclude=('meta', 'links')), many=True) + reason = fields.Method("get_reason") + + def get_reason(self, status): + return format_availability_issues(status) + + @post_dump + def remove_skip_values(self, data, **kwargs): + return { + key: value for key, value in data.items() + if value is not None + } class ListOfTestInstancesSchema(ListOfItems): diff --git a/specs/api.yaml b/specs/api.yaml index 737e18da5..b727c6b41 100644 --- a/specs/api.yaml +++ b/specs/api.yaml @@ -1786,6 +1786,10 @@ components: type: array items: $ref: "#/components/schemas/BuildSummary" + reason: + description: "Reason why the status is unavailable" + type: string + nullable: true required: - aggregate_test_status - version @@ -1805,6 +1809,10 @@ components: type: array items: $ref: "#/components/schemas/BuildSummary" + reason: + description: "Reason why the status is unavailable" + type: string + nullable: true required: - version - workflow @@ -1890,6 +1898,10 @@ components: type: array items: $ref: "#/components/schemas/BuildSummary" + reason: + description: Reason why the status is unavailable + type: string + nullable: true required: - suite_uuid - status From 552b5d446dc5ac45a4080bbef433bd4eb29a89d9 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Tue, 2 Nov 2021 21:27:23 +0000 Subject: [PATCH 010/162] Set 'origin' link to 'null' when it cannot be calculated --- lifemonitor/api/serializers.py | 8 +++++--- specs/api.yaml | 1 + 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/lifemonitor/api/serializers.py b/lifemonitor/api/serializers.py index 119bf08db..9d2647b55 100644 --- a/lifemonitor/api/serializers.py +++ b/lifemonitor/api/serializers.py @@ -219,9 +219,11 @@ class Meta: links = fields.Method('get_links') def get_links(self, obj): - links = { - 'origin': obj.external_link - } + links = {} + try: + links['origin'] = obj.external_link + except lm_exceptions.RateLimitExceededException: + links['origin'] = None if self._self_link: links['self'] = self.self_link return links diff --git a/specs/api.yaml b/specs/api.yaml index b727c6b41..527088b43 100644 --- a/specs/api.yaml +++ b/specs/api.yaml @@ -2066,6 +2066,7 @@ components: type: string description: Link to the test instance on the testing service example: "https://github.com/crs4/life_monitor/workflows/docs.yaml" + nullable: true required: - service - resource From 2678ca800bb76f5ccee7c3387ec19ed96b386602 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Tue, 2 Nov 2021 21:30:23 +0000 Subject: [PATCH 011/162] Clean up --- lifemonitor/api/serializers.py | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/lifemonitor/api/serializers.py b/lifemonitor/api/serializers.py index 9d2647b55..9e508050a 100644 --- a/lifemonitor/api/serializers.py +++ b/lifemonitor/api/serializers.py @@ -367,18 +367,6 @@ def get_items(self, obj): if self.__item_scheme__ else None -class WorkflowStatusSchema(WorkflowVersionSchema): - __envelope__ = {"single": None, "many": "items"} - __model__ = models.WorkflowStatus - - class Meta: - model = models.WorkflowStatus - - aggregate_test_status = fields.String(attribute="status.aggregated_status") - latest_builds = ma.Nested(BuildSummarySchema(exclude=('meta', 'links')), - attribute="status.latest_builds", many=True) - - class SuiteSchema(ResourceMetadataSchema): __envelope__ = {"single": None, "many": "items"} __model__ = models.TestSuite From 39c493044ce02c4c10fa30d74096a597b5388fb1 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Tue, 2 Nov 2021 21:31:56 +0000 Subject: [PATCH 012/162] Refactor error messages --- lifemonitor/api/models/status.py | 7 ++++--- lifemonitor/lang/messages.py | 3 +++ 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/lifemonitor/api/models/status.py b/lifemonitor/api/models/status.py index 4792bf6c4..a4854497a 100644 --- a/lifemonitor/api/models/status.py +++ b/lifemonitor/api/models/status.py @@ -23,6 +23,7 @@ import logging import lifemonitor.exceptions as lm_exceptions +from lifemonitor.lang import messages # set module level logger logger = logging.getLogger(__name__) @@ -78,13 +79,13 @@ def check_status(suites): if len(suites) == 0: availability_issues.append({ - "issue": "No test suite configured for this workflow" + "issue": messages.no_test_suite }) for suite in suites: if len(suite.test_instances) == 0: availability_issues.append({ - "issue": f"No test instances configured for suite {suite}" + "issue": messages.no_test_instance_for_suite.format(suite) }) for test_instance in suite.test_instances: try: @@ -93,7 +94,7 @@ def check_status(suites): availability_issues.append({ "service": test_instance.testing_service.url, "test_instance": test_instance, - "issue": "No build found" + "issue": messages.no_build_found_for_instance.format(test_instance) }) else: latest_builds.append(latest_build) diff --git a/lifemonitor/lang/messages.py b/lifemonitor/lang/messages.py index 4f060e5e9..3d53ca304 100644 --- a/lifemonitor/lang/messages.py +++ b/lifemonitor/lang/messages.py @@ -25,6 +25,9 @@ no_registry_found = "Unable to find the registry {}" no_submitter_id_provided = "The registry client should provide a 'submitter_id'" no_user_oauth_identity_on_registry = "Unable to link the identity of user '{}' on the registry '{}' (not authorized yet)" +no_test_suite = "No test suite configured for this workflow" +no_build_found_for_instance = "No build found for instance {}" +no_test_instance_for_suite = "No test instances configured for suite {}" not_authorized_registry_access = "User not authorized to access the registry '{}'" not_authorized_workflow_access = "User not authorized to get workflow data" input_data_missing = "One or more input data are missing" From 6f6f0ed7d148003b5e325adccad107cdf49047f0 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Tue, 2 Nov 2021 21:36:47 +0000 Subject: [PATCH 013/162] Add workflow to test service in "rate limit exceeded" status --- tests/config/data/make-test-rocrates.py | 23 ++++++++++++++++++++ tests/conftest.py | 28 ++++++++++++++++++++++++- 2 files changed, 50 insertions(+), 1 deletion(-) diff --git a/tests/config/data/make-test-rocrates.py b/tests/config/data/make-test-rocrates.py index 86d30e7f3..94629c7c1 100644 --- a/tests/config/data/make-test-rocrates.py +++ b/tests/config/data/make-test-rocrates.py @@ -50,6 +50,8 @@ test_crates.append(('ro-crate-galaxy-sortchangecase', 'ro-crate-galaxy-sortchangecase-invalid-service-url')) test_crates.append(('ro-crate-galaxy-sortchangecase', 'ro-crate-galaxy-sortchangecase-github-actions')) +test_crates.append(('ro-crate-galaxy-sortchangecase', 'ro-crate-galaxy-sortchangecase-rate-limit-exceeded')) + # clean up RO-Crates folder if os.path.exists(crates_target_path): shutil.rmtree(crates_target_path) @@ -94,6 +96,27 @@ def patch_metadata_graph_node(metadata_file, node, properties): } }) + +patch_metadata_graph_node('crates/ro-crate-galaxy-sortchangecase-rate-limit-exceeded/ro-crate-metadata.json', + node=("@type", "TestInstance"), + properties={ + 'url': 'http://ratelimit:8080/', + 'resource': 'job/test/', + "runsOn": { + "@id": "https://w3id.org/ro/terms/test#RateLimitExceededService" + } + }) + +patch_metadata_graph_node('crates/ro-crate-galaxy-sortchangecase-rate-limit-exceeded/ro-crate-metadata.json', + node=("@type", "TestService"), + properties={ + "@id": "https://w3id.org/ro/terms/test#RateLimitExceededService", + "name": "RateLimit", + "url": { + "@id": "http://ratelimit:8080" + } + }) + patch_metadata_graph_node('crates/ro-crate-galaxy-sortchangecase-travis/ro-crate-metadata.json', node=("name", "sort-and-change-case"), properties={ diff --git a/tests/conftest.py b/tests/conftest.py index 6c3e733bf..ff594cb7b 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -30,11 +30,15 @@ import lifemonitor.db as lm_db import pytest from lifemonitor import auth -from lifemonitor.api.models import TestSuite, User +from lifemonitor.api.models import (TestingService, TestingServiceTokenManager, + TestSuite, User) from lifemonitor.api.services import LifeMonitor +from lifemonitor.utils import ClassManager + from . import conftest_helpers as helpers from .conftest_types import ClientAuthenticationMethod, RegistryType +from .rate_limit_exceeded import RateLimitExceededTestingService # set the module level logger logger = logging.getLogger(__name__) @@ -254,6 +258,28 @@ def generic_workflow(app_client): } +@pytest.fixture +def rate_limit_exceeded_workflow(app_client, service_registry: ClassManager, user1): + service_registry.add_class("unknown", RateLimitExceededTestingService) + wfdata = { + 'uuid': str(uuid.uuid4()), + 'version': '1', + 'roc_link': "http://webserver:5000/download?file=ro-crate-galaxy-sortchangecase-rate-limit-exceeded.crate.zip", + 'name': 'Galaxy workflow (rate limit exceeded)', + 'testing_service_type': 'unknown', + 'authorization': app_client.application.config['WEB_SERVER_AUTH_TOKEN'] + } + wfdata, workflow_version = register_workflow(user1, wfdata) + logger.info(wfdata) + logger.info(workflow_version) + assert workflow_version, "Workflows not found" + workflow = workflow_version.workflow + workflow.public = True + workflow.save() + assert workflow.public == True, "Workflow should be public" + return workflow + + @pytest.fixture def unmanaged_test_instance(app_client): return { From b666a02db78fbe85662d315f0d664b787cc5e5cc Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Tue, 2 Nov 2021 21:37:53 +0000 Subject: [PATCH 014/162] Add mock of TestingService in "rate limit exceeded" status --- tests/rate_limit_exceeded.py | 87 ++++++++++++++++++++++++++++++++++++ 1 file changed, 87 insertions(+) create mode 100644 tests/rate_limit_exceeded.py diff --git a/tests/rate_limit_exceeded.py b/tests/rate_limit_exceeded.py new file mode 100644 index 000000000..529ad6d4a --- /dev/null +++ b/tests/rate_limit_exceeded.py @@ -0,0 +1,87 @@ +# Copyright (c) 2020-2021 CRS4 +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + + +from typing import Any, Dict, List + +from lifemonitor import exceptions as lm_exceptions +from lifemonitor.api import models + + +class RateLimitExceededTestingService(models.TestingService): + + __mapper_args__ = { + 'polymorphic_identity': 'unknown' + } + + def initialize(self): + pass + + @property + def token(self) -> models.TestingServiceToken: + return None + + def initialize(self): + pass + + def check_connection(self) -> bool: + raise lm_exceptions.RateLimitExceededException() + + def is_workflow_healthy(self, test_instance: models.TestInstance) -> bool: + raise lm_exceptions.RateLimitExceededException() + + def get_instance_external_link(self, test_instance: models.TestInstance) -> str: + raise lm_exceptions.RateLimitExceededException() + + def get_last_test_build(self, test_instance: models.TestInstance) -> models.TestBuild: + raise lm_exceptions.RateLimitExceededException() + + def get_last_passed_test_build(self, test_instance: models.TestInstance) -> models.TestBuild: + raise lm_exceptions.RateLimitExceededException() + + def get_last_failed_test_build(self, test_instance: models.TestInstance) -> models.TestBuild: + raise lm_exceptions.RateLimitExceededException() + + def get_test_build(self, test_instance: models.TestInstance, build_number: int) -> models.TestBuild: + raise lm_exceptions.RateLimitExceededException() + + def get_test_build_external_link(self, test_build: models.TestBuild) -> str: + raise lm_exceptions.RateLimitExceededException() + + def get_test_builds(self, test_instance: models.TestInstance, limit: int = 10) -> list: + raise lm_exceptions.RateLimitExceededException() + + def get_test_builds_as_dict(self, test_instance: models.TestInstance, test_output) -> Dict[str, Any]: + raise lm_exceptions.RateLimitExceededException() + + def to_dict(self, test_builds: bool = False, test_output: bool = False) -> dict: + raise lm_exceptions.RateLimitExceededException() + + @classmethod + def all(cls) -> List[models.TestingService]: + raise lm_exceptions.RateLimitExceededException() + + @classmethod + def find_by_uuid(cls, uuid) -> models.TestingService: + raise lm_exceptions.RateLimitExceededException() + + @classmethod + def find_by_url(cls, url) -> models.TestingService: + raise lm_exceptions.RateLimitExceededException() From 37db8e5ba1a6cc7cace0bb02f1a6caf36e894b32 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Tue, 2 Nov 2021 21:40:51 +0000 Subject: [PATCH 015/162] Add tests for rate limit exceeded --- tests/conftest.py | 11 +++ .../api/controllers/test_instances.py | 45 +++++++++++- .../api/controllers/test_workflows.py | 26 ++++++- tests/unit/api/controllers/test_instances.py | 50 +++++++++++++ tests/unit/api/controllers/test_suites.py | 48 ++++++++++++- tests/unit/api/controllers/test_workflows.py | 70 ++++++++++++++++++- 6 files changed, 244 insertions(+), 6 deletions(-) diff --git a/tests/conftest.py b/tests/conftest.py index ff594cb7b..98580b97e 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -35,6 +35,7 @@ from lifemonitor.api.services import LifeMonitor from lifemonitor.utils import ClassManager +from tests.utils import register_workflow from . import conftest_helpers as helpers from .conftest_types import ClientAuthenticationMethod, RegistryType @@ -158,6 +159,16 @@ def lm() -> LifeMonitor: return LifeMonitor.get_instance() +@pytest.fixture +def service_registry() -> ClassManager: + return TestingService.service_type_registry + + +@pytest.fixture +def token_manager() -> TestingServiceTokenManager: + return TestingServiceTokenManager.get_instance() + + @pytest.fixture() def user1(app_context, provider_type, client_credentials_registry, request): register_workflows = False diff --git a/tests/integration/api/controllers/test_instances.py b/tests/integration/api/controllers/test_instances.py index 1037f0c00..44e1922fe 100644 --- a/tests/integration/api/controllers/test_instances.py +++ b/tests/integration/api/controllers/test_instances.py @@ -19,13 +19,13 @@ # SOFTWARE. import json -import pytest import logging +import pytest +from lifemonitor.api import models from tests import utils from tests.conftest_types import ClientAuthenticationMethod - logger = logging.getLogger() @@ -185,6 +185,22 @@ def test_get_instance_builds(app_client, client_auth_method, user1, user1_auth, utils.assert_properties_exist(["build_id", "instance"], item) +def test_get_instance_builds_rate_limit_exceeded(app_client, client_auth_method, user1, user1_auth, rate_limit_exceeded_workflow: models.Workflow): + workflow = rate_limit_exceeded_workflow.latest_version + assert len(workflow.test_suites) > 0, "Unexpected number of test suites" + suite = workflow.test_suites[0] + logger.debug("The test suite: %r", suite) + assert len(suite.test_instances) > 0, "Unexpected number of test instances" + instance = suite.test_instances[0] + logger.debug("The test instance: %r", instance) + response = app_client.get(f"{utils.build_instances_path(instance.uuid)}/latest-builds?limit=2", headers=user1_auth) + logger.debug(response) + utils.assert_status_code(403, response.status_code) + data = json.loads(response.data) + logger.debug("Response data: %r", data) + assert data['title'] == 'Rate Limit Exceeded', "Unexpected error title" + + @pytest.mark.parametrize("client_auth_method", [ # ClientAuthenticationMethod.BASIC, ClientAuthenticationMethod.API_KEY, @@ -247,6 +263,31 @@ def test_get_instance_build(app_client, client_auth_method, user1, user1_auth, v utils.assert_properties_exist(["build_id", "instance"], data) +@pytest.mark.parametrize("client_auth_method", [ + # ClientAuthenticationMethod.BASIC, + ClientAuthenticationMethod.NOAUTH, + ClientAuthenticationMethod.API_KEY, + ClientAuthenticationMethod.AUTHORIZATION_CODE, + ClientAuthenticationMethod.CLIENT_CREDENTIALS, + ClientAuthenticationMethod.REGISTRY_CODE_FLOW +], indirect=True) +def test_get_instance_build_rate_limit_exceeded(app_client, client_auth_method, user1, user1_auth, rate_limit_exceeded_workflow: models.Workflow): + workflow = rate_limit_exceeded_workflow.latest_version + assert len(workflow.test_suites) > 0, "Unexpected number of test suites" + suite = workflow.test_suites[0] + logger.debug("The test suite: %r", suite) + assert len(suite.test_instances) > 0, "Unexpected number of test instances" + instance = suite.test_instances[0] + logger.debug("The test instance: %r", instance) + + response = app_client.get(f"{utils.build_instances_path(instance.uuid)}/builds/0", headers=user1_auth) + logger.debug(response) + utils.assert_status_code(response.status_code, 403) + data = json.loads(response.data) + logger.debug("Response data: %r", data) + assert data['title'] == 'Rate Limit Exceeded', "Unexpected error title" + + @pytest.mark.parametrize("client_auth_method", [ # ClientAuthenticationMethod.BASIC, ClientAuthenticationMethod.API_KEY, diff --git a/tests/integration/api/controllers/test_workflows.py b/tests/integration/api/controllers/test_workflows.py index 8e67fa010..6b31b06d4 100644 --- a/tests/integration/api/controllers/test_workflows.py +++ b/tests/integration/api/controllers/test_workflows.py @@ -23,7 +23,7 @@ import uuid import pytest -from lifemonitor.api.models import WorkflowVersion +from lifemonitor.api.models import WorkflowVersion, Workflow from lifemonitor.auth import current_user from lifemonitor.auth.models import ApiKey from lifemonitor.auth.oauth2.server.models import Token @@ -139,6 +139,30 @@ def test_get_workflows_public(app_client, client_auth_method, user1): assert len(workflows) == 1, "Unexpected number of public workflows" +@pytest.mark.parametrize("client_auth_method", [ + ClientAuthenticationMethod.NOAUTH, +], indirect=True) +@pytest.mark.parametrize("user1", [True], indirect=True) +def test_get_workflows_public_with_rate_limit_exceeded_workflow(app_client, client_auth_method, user1, rate_limit_exceeded_workflow: Workflow): + # get workflows registered by user1 + response = app_client.get(f"{utils.build_workflow_path()}?status=true") + assert response.status_code == 200, "Error getting public workflows" + workflows = json.loads(response.data)['items'] + assert len(workflows) == 2, "Unexpected number of public workflows" + logger.debug("Got workflows: %r", workflows) + for w in workflows: + logger.debug("Checking workflow %r", w) + assert 'status' in w, f"Unable to find the status for the workflow {w['uuid']}" + assert 'aggregate_test_status' in w['status'], f"Unable to find the aggregate_test_status for the workflow {w['uuid']}" + if w['uuid'] == str(rate_limit_exceeded_workflow.uuid): + logger.debug("Checking workflow with rate limit exceeded %r", w['uuid']) + assert w['status']["aggregate_test_status"] == 'not_available', "Unexpected status for workflow with rate limit exceeded" + assert "reason" in w['status'], f"Unable to find the 'reason' property for the workflow {w['uuid']}" + assert "Rate Limit Exceeded" in w['status']['reason'], f"Invalid 'reason' value for the workflow {w['uuid']}" + else: + assert "reason" not in w['status'], f"The 'reason' property should not be set for the workflow {w['uuid']}" + + @pytest.mark.parametrize("client_auth_method", [ # ClientAuthenticationMethod.BASIC, ClientAuthenticationMethod.API_KEY, diff --git a/tests/unit/api/controllers/test_instances.py b/tests/unit/api/controllers/test_instances.py index dfec2163d..dee8ce0a8 100644 --- a/tests/unit/api/controllers/test_instances.py +++ b/tests/unit/api/controllers/test_instances.py @@ -135,6 +135,31 @@ def test_get_instance_build_by_user(m, request_context, mock_user): assert isinstance(response, dict), "Unexpected response type" +@patch("lifemonitor.api.controllers.lm") +def test_get_instance_build_by_user_rate_limit_exceeded(lm, request_context, mock_user, rate_limit_exceeded_workflow: models.Workflow): + assert not auth.current_user.is_anonymous, "Unexpected user in session" + assert auth.current_registry is not None, "Unexpected registry in session" + # set workflow + workflow = rate_limit_exceeded_workflow + lm.get_public_workflows.return_value = [] + lm.get_user_workflows.return_value = [rate_limit_exceeded_workflow] + # set suite + suite: models.TestSuite = workflow.latest_version.test_suites[0] + lm.get_suite.return_value = suite + # set instance + instance: models.TestInstance = suite.test_instances[0] + lm.get_test_instance.return_value = instance + # get and check suite status + response = controllers.instances_builds_get_by_id(instance.uuid, "123") + logger.debug(response.data) + lm.get_test_instance.assert_called_once() + lm.get_suite.assert_called_once() + data = json.loads(response.data) + assert isinstance(data, dict), "Unexpected response type" + assert 403 == int(data["status"]), "Unexpected status code" + assert "Rate Limit Exceeded" == data["title"], "Unexpected error title" + + @patch("lifemonitor.api.controllers.lm") def test_get_instance_build_last_logs_by_user(m, request_context, mock_user): assert not auth.current_user.is_anonymous, "Unexpected user in session" @@ -331,3 +356,28 @@ def test_get_instance_build_by_registry(m, request_context, mock_registry): response = controllers.instances_builds_get_by_id(instance['uuid'], build.id) m.get_test_instance.assert_called_once() assert isinstance(response, dict), "Unexpected response type" + + +@patch("lifemonitor.api.controllers.lm") +def test_get_instance_build_by_registry_rate_limit_exceeded(lm, request_context, mock_registry, rate_limit_exceeded_workflow: models.Workflow): + assert auth.current_user.is_anonymous, "Unexpected user in session" + assert auth.current_registry, "Unexpected registry in session" + # set workflow + workflow = rate_limit_exceeded_workflow + lm.get_public_workflows.return_value = [] + lm.get_user_workflows.return_value = [rate_limit_exceeded_workflow] + # set suite + suite: models.TestSuite = workflow.latest_version.test_suites[0] + lm.get_suite.return_value = suite + # set instance + instance: models.TestInstance = suite.test_instances[0] + lm.get_test_instance.return_value = instance + # get and check suite status + response = controllers.instances_builds_get_by_id(instance.uuid, "123") + logger.debug(response.data) + lm.get_test_instance.assert_called_once() + lm.get_suite.assert_called_once() + data = json.loads(response.data) + assert isinstance(data, dict), "Unexpected response type" + assert 403 == int(data["status"]), "Unexpected status code" + assert "Rate Limit Exceeded" == data["title"], "Unexpected error title" diff --git a/tests/unit/api/controllers/test_suites.py b/tests/unit/api/controllers/test_suites.py index 9360143f0..4d0cfb26d 100644 --- a/tests/unit/api/controllers/test_suites.py +++ b/tests/unit/api/controllers/test_suites.py @@ -21,6 +21,7 @@ import logging from unittest.mock import MagicMock, patch +import lifemonitor.api.models as models import lifemonitor.api.controllers as controllers import lifemonitor.auth as auth import lifemonitor.exceptions as lm_exceptions @@ -154,6 +155,29 @@ def test_get_suite_status_by_user(m, request_context, mock_user): assert p in response, f"Property {p} not found on response" +@patch("lifemonitor.api.controllers.lm") +def test_get_suite_status_by_user_rate_limit_exceeded(lm, mock_user, rate_limit_exceeded_workflow: models.Workflow): + # add one user to the current session + assert not auth.current_user.is_anonymous, "Unexpected user in session" + assert auth.current_user == mock_user, "Unexpected user in session" + logger.debug("Current registry: %r", auth.current_registry) + assert not auth.current_registry, "Unexpected registry in session" + # set workflow + workflow = rate_limit_exceeded_workflow + lm.get_public_workflows.return_value = [] + lm.get_user_workflows.return_value = [rate_limit_exceeded_workflow] + # set suite + suite: models.TestSuite = workflow.latest_version.test_suites[0] + lm.get_suite.return_value = suite + # get and check suite status + response = controllers.suites_get_status(suite.uuid) + lm.get_suite.assert_called_once() + logger.info(response) + for p in ["latest_builds", "suite_uuid", "status"]: + assert p in response, f"Property {p} not found on response" + assert response['status'] == 'not_available' + + @patch("lifemonitor.api.controllers.lm") def test_get_suite_status_by_registry(m, request_context, mock_registry): # add one user to the current session @@ -169,7 +193,7 @@ def test_get_suite_status_by_registry(m, request_context, mock_registry): suite.workflow = workflow m.get_suite.return_value = suite m.get_public_workflow_version.return_value = None - m.get_registry_workflow_version.return_value = suite + m.get_registry_workflow_version.return_value = workflow response = controllers.suites_get_status(suite.suite) m.get_suite.assert_called_once() m.get_registry_workflow_version.assert_called_once() @@ -179,6 +203,28 @@ def test_get_suite_status_by_registry(m, request_context, mock_registry): assert p in response, f"Property {p} not found on response" +@patch("lifemonitor.api.controllers.lm") +def test_get_suite_status_by_registry_rate_limit_exceeded(lm, request_context, mock_registry, rate_limit_exceeded_workflow: models.Workflow): + # add one user to the current session + assert auth.current_user.is_anonymous, "Unexpected user in session" + logger.debug("Current registry: %r", auth.current_registry) + assert auth.current_registry, "Unexpected registry in session" + # set workflow + workflow = rate_limit_exceeded_workflow + lm.get_public_workflows.return_value = [] + # set suite + suite: models.TestSuite = workflow.latest_version.test_suites[0] + lm.get_suite.return_value = suite + lm.get_registry_workflow_version = workflow.latest_version + # get and check suite status + response = controllers.suites_get_status(suite.uuid) + lm.get_suite.assert_called_once() + logger.info(response) + for p in ["latest_builds", "suite_uuid", "status"]: + assert p in response, f"Property {p} not found on response" + assert response['status'] == 'not_available' + + @patch("lifemonitor.api.controllers.lm") def test_get_suite_instances_by_user(m, request_context, mock_user): # add one user to the current session diff --git a/tests/unit/api/controllers/test_workflows.py b/tests/unit/api/controllers/test_workflows.py index 061bf4950..615d3416c 100644 --- a/tests/unit/api/controllers/test_workflows.py +++ b/tests/unit/api/controllers/test_workflows.py @@ -18,11 +18,13 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. + import logging from unittest.mock import MagicMock, patch import pytest + import lifemonitor.api.controllers as controllers import lifemonitor.api.models as models import lifemonitor.api.serializers as serializers @@ -46,9 +48,9 @@ def test_get_workflows_with_user(m, request_context, mock_user, fake_uri): # make empty the list of public workflows m.get_public_workflows.return_value = [] # add one fake workflow - data = {"uuid": "123456", "version": "1.0", "uri": fake_uri} + data = {"uuid": "123456", "version": "1.0", "name": "Fake workflow", "uri": fake_uri} w = models.Workflow(uuid=data['uuid']) - w.add_version(data["version"], data['uri'], MagicMock()) + w.add_version(data["version"], data['uri'], MagicMock(), name="Prova") m.get_user_workflows.return_value = [w] response = controllers.workflows_get(status=True) m.get_public_workflows.assert_called_once() @@ -58,6 +60,46 @@ def test_get_workflows_with_user(m, request_context, mock_user, fake_uri): assert response == serializers.ListOfWorkflows(workflow_status=True).dump([w]) +@patch("lifemonitor.api.controllers.lm") +def test_get_public_workflows_rate_limit_exceeded(lm, rate_limit_exceeded_workflow): + # set workflow as public + lm.get_public_workflows.return_value = [rate_limit_exceeded_workflow] + # get workflows + data = controllers.workflows_get(status=True) + logger.info(data) + # check number of items + assert len(data['items']) == 1, "Unexpected number of items" + # inspect item + item = data['items'][0] + assert 'status' in item, "Workflow status should be set" + assert 'aggregate_test_status' in item['status'], "AggregateStatus Workflow status should be set" + assert item['status']['aggregate_test_status'] == 'not_available' + assert "Rate Limit Exceeded" in item['status']['reason'], "Unexpected reason for unavailability" + + +@patch("lifemonitor.api.controllers.lm") +def test_get_workflows_with_user_rate_limit_exceeded(lm, mock_user, rate_limit_exceeded_workflow): + # add one user to the current session + assert not auth.current_user.is_anonymous, "Unexpected user in session" + assert auth.current_user == mock_user, "Unexpected user in session" + logger.debug("Current registry: %r", auth.current_registry) + assert not auth.current_registry, "Unexpected registry in session" + # set workflows + lm.get_public_workflows.return_value = [] + lm.get_user_workflows.return_value = [rate_limit_exceeded_workflow] + # get workflows + data = controllers.workflows_get(status=True) + logger.info(data) + # check number of items + assert len(data['items']) == 1, "Unexpected number of items" + # inspect item + item = data['items'][0] + assert 'status' in item, "Workflow status should be set" + assert 'aggregate_test_status' in item['status'], "AggregateStatus Workflow status should be set" + assert item['status']['aggregate_test_status'] == 'not_available' + assert "Rate Limit Exceeded" in item['status']['reason'], "Unexpected reason for unavailability" + + @patch("lifemonitor.api.controllers.lm") def test_get_workflows_with_registry(m, request_context, mock_registry, fake_uri): assert auth.current_user.is_anonymous, "Unexpected user in session" @@ -75,6 +117,30 @@ def test_get_workflows_with_registry(m, request_context, mock_registry, fake_uri assert response == serializers.ListOfWorkflows(workflow_status=True).dump([w]) +@patch("lifemonitor.api.controllers.lm") +def test_get_workflows_with_registry_rate_limit_exceeded(m, request_context, mock_registry, fake_uri, rate_limit_exceeded_workflow): + assert auth.current_user.is_anonymous, "Unexpected user in session" + assert auth.current_registry, "Unexpected registry in session" + # make empty the list of public workflows + m.get_public_workflows.return_value = [] + m.get_registry_workflows.return_value = [rate_limit_exceeded_workflow] + response = controllers.workflows_get(status=True) + m.get_registry_workflows.assert_called_once() + assert isinstance(response, dict), "Unexpected result type" + assert response == serializers.ListOfWorkflows(workflow_status=True).dump([rate_limit_exceeded_workflow]) + # check number of items + assert len(response['items']) == 1, "Unexpected number of items" + # inspect item + item = response['items'][0] + assert 'status' in item, "Workflow status should be set" + assert 'aggregate_test_status' in item['status'], "AggregateStatus Workflow status should be set" + assert item['status']['aggregate_test_status'] == 'not_available' + assert 'status' in item, "Workflow status should be set" + assert 'aggregate_test_status' in item['status'], "AggregateStatus Workflow status should be set" + assert item['status']['aggregate_test_status'] == 'not_available' + assert "Rate Limit Exceeded" in item['status']['reason'], "Unexpected reason for unavailability" + + @patch("lifemonitor.api.controllers.lm") def test_post_workflows_no_authorization(m, request_context): assert auth.current_user.is_anonymous, "Unexpected user in session" From 8d1441b957a308a2a6fb2e8a586d527c069c0047 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Tue, 2 Nov 2021 21:45:43 +0000 Subject: [PATCH 016/162] Fix flake8 issues --- tests/conftest.py | 2 +- tests/rate_limit_exceeded.py | 3 --- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/tests/conftest.py b/tests/conftest.py index 98580b97e..246c12b15 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -287,7 +287,7 @@ def rate_limit_exceeded_workflow(app_client, service_registry: ClassManager, use workflow = workflow_version.workflow workflow.public = True workflow.save() - assert workflow.public == True, "Workflow should be public" + assert workflow.public is True, "Workflow should be public" return workflow diff --git a/tests/rate_limit_exceeded.py b/tests/rate_limit_exceeded.py index 529ad6d4a..458b05c20 100644 --- a/tests/rate_limit_exceeded.py +++ b/tests/rate_limit_exceeded.py @@ -31,9 +31,6 @@ class RateLimitExceededTestingService(models.TestingService): 'polymorphic_identity': 'unknown' } - def initialize(self): - pass - @property def token(self) -> models.TestingServiceToken: return None From 6f610b5938872bfab550b257c8f64e25ee3c1ed4 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Tue, 2 Nov 2021 22:27:38 +0000 Subject: [PATCH 017/162] Fix test config --- lifemonitor/utils.py | 3 +++ tests/conftest.py | 33 +++++++++++++++++---------------- 2 files changed, 20 insertions(+), 16 deletions(-) diff --git a/lifemonitor/utils.py b/lifemonitor/utils.py index a2bfc3fba..8f0f8ec1c 100644 --- a/lifemonitor/utils.py +++ b/lifemonitor/utils.py @@ -456,6 +456,9 @@ def _load_concrete_types(self): def add_class(self, type_name, type_class): self.__concrete_types__[type_name] = (type_class,) + def remove_class(self, type_name): + return self.__concrete_types__.pop(type_name, None) + def get_class(self, concrete_type): return self._load_concrete_types()[concrete_type][0] diff --git a/tests/conftest.py b/tests/conftest.py index 246c12b15..6e05e8f52 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -69,8 +69,24 @@ def headers(): return helpers.get_headers() +@pytest.fixture +def lm() -> LifeMonitor: + return LifeMonitor.get_instance() + + +@pytest.fixture +def service_registry() -> ClassManager: + return TestingService.service_type_registry + + +@pytest.fixture +def token_manager() -> TestingServiceTokenManager: + return TestingServiceTokenManager.get_instance() + + @pytest.fixture(autouse=True) -def initialize(app_settings, request_context): +def initialize(app_settings, request_context, service_registry: ClassManager): + service_registry.remove_class("unknown") helpers.clean_db() helpers.init_db(app_settings) helpers.disable_auto_login() @@ -154,21 +170,6 @@ def app_context(app_settings): yield from helpers.app_context(app_settings, init_db=True, clean_db=False, drop_db=False) -@pytest.fixture -def lm() -> LifeMonitor: - return LifeMonitor.get_instance() - - -@pytest.fixture -def service_registry() -> ClassManager: - return TestingService.service_type_registry - - -@pytest.fixture -def token_manager() -> TestingServiceTokenManager: - return TestingServiceTokenManager.get_instance() - - @pytest.fixture() def user1(app_context, provider_type, client_credentials_registry, request): register_workflows = False From 5383d1a001ac7d0f147824a4b3224d41f7213fae Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Wed, 3 Nov 2021 14:36:47 +0000 Subject: [PATCH 018/162] Remove logs endpoint from specs --- specs/api.yaml | 57 -------------------------------------------------- 1 file changed, 57 deletions(-) diff --git a/specs/api.yaml b/specs/api.yaml index 527088b43..b298789b0 100644 --- a/specs/api.yaml +++ b/specs/api.yaml @@ -1101,47 +1101,6 @@ paths: "404": $ref: "#/components/responses/NotFound" - /instances/{instance_uuid}/builds/{build_id}/logs: - get: - summary: "Get test instance build logs" - description: "Get the build logs for the specified test instance and build" - x-openapi-router-controller: lifemonitor.api.controllers - operationId: "instances_builds_get_logs" - tags: ["Test Instances"] - deprecated: true - security: - - apiKey: ["workflow.read"] - - RegistryClientCredentials: ["workflow.read"] - - RegistryCodeFlow: ["workflow.read"] - - AuthorizationCodeFlow: ["workflow.read"] - - {} - parameters: - - $ref: "#/components/parameters/instance_uuid" - - $ref: "#/components/parameters/build_id" - - $ref: "#/components/parameters/offset_bytes" - - $ref: "#/components/parameters/limit_bytes" - responses: - "200": - description: "Log data" - content: - application/json: - schema: - type: string - description: | - Log messages from the test build - example: | - [15/April/2021:13:55:36 -0700] "GET /workflow.cwl HTTP/1.0" 200 2326 - [15/April/2021:13:55:36 -0700] "GET /workflow.cwl HTTP/1.0" 200 2326 - [15/April/2021:13:55:36 -0700] "GET /workflow.cwl HTTP/1.0" 200 2326 - "400": - $ref: "#/components/responses/BadRequest" - "401": - $ref: "#/components/responses/Unauthorized" - "403": - $ref: "#/components/responses/Forbidden" - "404": - $ref: "#/components/responses/NotFound" - components: parameters: registry_uuid: @@ -1260,22 +1219,6 @@ components: minimum: 1 default: 10 description: "Maximum number of items to retrieve" - limit_bytes: - name: "limit_bytes" - description: "Maximum number of log bytes to retrieve" - in: query - schema: - type: integer - minimum: 1 - default: 131072 # 128 kB - offset_bytes: - name: "offset_bytes" - description: "Number of bytes to skip while fetching the log" - in: query - schema: - type: integer - minimum: 0 - default: 0 responses: NotFound: From 593030e935a7254a95ba5fb848b4249ca9985f6a Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Wed, 3 Nov 2021 14:37:53 +0000 Subject: [PATCH 019/162] Remove integration tests for the logs endpoint --- .../api/controllers/test_instances.py | 28 ------------------- 1 file changed, 28 deletions(-) diff --git a/tests/integration/api/controllers/test_instances.py b/tests/integration/api/controllers/test_instances.py index 44e1922fe..9d2ea2b4a 100644 --- a/tests/integration/api/controllers/test_instances.py +++ b/tests/integration/api/controllers/test_instances.py @@ -286,31 +286,3 @@ def test_get_instance_build_rate_limit_exceeded(app_client, client_auth_method, data = json.loads(response.data) logger.debug("Response data: %r", data) assert data['title'] == 'Rate Limit Exceeded', "Unexpected error title" - - -@pytest.mark.parametrize("client_auth_method", [ - # ClientAuthenticationMethod.BASIC, - ClientAuthenticationMethod.API_KEY, - ClientAuthenticationMethod.AUTHORIZATION_CODE, - ClientAuthenticationMethod.CLIENT_CREDENTIALS, - ClientAuthenticationMethod.REGISTRY_CODE_FLOW -], indirect=True) -def test_get_instance_build_logs(app_client, client_auth_method, user1, user1_auth, valid_workflow): - w, workflow = utils.pick_and_register_workflow(user1, valid_workflow) - assert len(workflow.test_suites) > 0, "Unexpected number of test suites" - suite = workflow.test_suites[0] - logger.debug("The test suite: %r", suite) - assert len(suite.test_instances) > 0, "Unexpected number of test instances" - instance = suite.test_instances[0] - logger.debug("The test instance: %r", instance) - assert len(instance.get_test_builds()) > 0, "Unexpected number of test builds" - build = instance.get_test_builds()[0] - - response = app_client.get(f"{utils.build_instances_path(instance.uuid)}/builds/{build.id}/logs", - headers=user1_auth) - logger.debug(response.data) - utils.assert_status_code(response.status_code, 200) - data = json.loads(response.data) - logger.debug("Response data: %r", data) - # redundant check: the validation is performed by the connexion framework - assert isinstance(data, str), "Unexpected result type" From e8e8e6e35589c9904f27924a93a41deff4697c16 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Wed, 3 Nov 2021 14:49:38 +0000 Subject: [PATCH 020/162] Fix missing interface method on Github service --- lifemonitor/api/models/services/github.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lifemonitor/api/models/services/github.py b/lifemonitor/api/models/services/github.py index 259885b4a..d98d2ef5e 100644 --- a/lifemonitor/api/models/services/github.py +++ b/lifemonitor/api/models/services/github.py @@ -212,6 +212,9 @@ def get_test_build_external_link(self, test_build: models.TestBuild) -> str: repo = test_build.test_instance.testing_service._get_repo(test_build.test_instance) return f'https://github.com/{repo.full_name}/actions/runs/{test_build.id}' + def get_test_build_output(self, test_instance: models.TestInstance, build_number, offset_bytes=0, limit_bytes=131072): + raise lm_exceptions.NotImplementedException(detail="not supported for GitHub test builds") + @classmethod def _parse_workflow_url(cls, resource: str) -> Tuple[str, str, str]: """ From d52bc8cdd26d814384bfb9d27c4a9afbe13499cb Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Wed, 3 Nov 2021 15:58:14 +0000 Subject: [PATCH 021/162] Fix specs: add missing response when POSTing a test instance --- specs/api.yaml | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/specs/api.yaml b/specs/api.yaml index b298789b0..df6db116e 100644 --- a/specs/api.yaml +++ b/specs/api.yaml @@ -969,7 +969,7 @@ paths: # valueB: "#/components/schemas/ManagedTestInstanceCreationData" responses: "201": - description: Instance created by this operation + $ref: "#/components/responses/TestInstanceRegistered" "400": $ref: "#/components/responses/BadRequest" "401": @@ -1270,6 +1270,20 @@ components: schema: $ref: "#/components/schemas/Workflow" + TestInstanceRegistered: + description: A new test instance has been registered. + content: + application/json: + schema: + type: object + properties: + uuid: + type: string + description: | + Universal unique identifier of the test instance + readOnly: true + example: ba5bbdc3-d9fb-4381-a1d8-96a8ac5594d7 + schemas: User: type: object From e726d6886ccd7ecb97f48373e4ca1b48bb4c8004 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Wed, 3 Nov 2021 16:00:05 +0000 Subject: [PATCH 022/162] Fix name of property returned when POSTing an instance --- lifemonitor/api/controllers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lifemonitor/api/controllers.py b/lifemonitor/api/controllers.py index 791286aa9..8ce01857f 100644 --- a/lifemonitor/api/controllers.py +++ b/lifemonitor/api/controllers.py @@ -539,7 +539,7 @@ def suites_post_instance(suite_uuid): data['service']['url'], data['resource']) clear_cache(suites_get_instances, suite_uuid) - return {'test_instance_uuid': str(test_instance.uuid)}, 201 + return {'uuid': str(test_instance.uuid)}, 201 except KeyError as e: return lm_exceptions.report_problem(400, "Bad Request", extra_info={"exception": str(e)}, detail=messages.input_data_missing) From bcabdfc65070408902c93572fdf49d405aa68078 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Wed, 3 Nov 2021 16:27:10 +0000 Subject: [PATCH 023/162] Fix tests --- tests/integration/api/controllers/test_instances.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/api/controllers/test_instances.py b/tests/integration/api/controllers/test_instances.py index 9d2ea2b4a..9544cd6f7 100644 --- a/tests/integration/api/controllers/test_instances.py +++ b/tests/integration/api/controllers/test_instances.py @@ -51,7 +51,7 @@ def test_add_unmanaged_instance(app_client, client_auth_method, user1, user1_aut logger.debug(response) utils.assert_status_code(201, response.status_code) response_data = json.loads(response.data) - assert "test_instance_uuid" in response_data, "Unexpcted response: missing 'test_instance_uuid'" + assert "uuid" in response_data, "Unexpcted response: missing 'uuid'" # check number of instances after assert len(suite.test_instances) == num_of_instances + 1, "Unexpected number of instances" From 6f8d14bff996890535262c1029a345e49b268a21 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Thu, 4 Nov 2021 20:31:18 +0000 Subject: [PATCH 024/162] Add request timeout --- lifemonitor/cache.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lifemonitor/cache.py b/lifemonitor/cache.py index ab47fa8eb..709205e9d 100644 --- a/lifemonitor/cache.py +++ b/lifemonitor/cache.py @@ -31,7 +31,9 @@ class Timeout: + NONE = 0 DEFAULT = os.environ.get('CACHE_DEFAULT_TIMEOUT', 300) + REQUEST = os.environ.get('CACHE_REQUEST_TIMEOUT', 60) SESSION = os.environ.get('CACHE_SESSION_TIMEOUT', 3600) BUILDS = os.environ.get('CACHE_SESSION_TIMEOUT', 84600) From 4e07e96c26c495faf9f8420f04b0684b4d9edb48 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Thu, 4 Nov 2021 20:32:40 +0000 Subject: [PATCH 025/162] Update default timeout on cached decorator --- lifemonitor/cache.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lifemonitor/cache.py b/lifemonitor/cache.py index 709205e9d..d591dabe1 100644 --- a/lifemonitor/cache.py +++ b/lifemonitor/cache.py @@ -90,7 +90,7 @@ def clear_cache(func=None, *args, **kwargs): logger.error("Error deleting cache: %r", e) -def cached(timeout=Timeout.DEFAULT, unless=False): +def cached(timeout=Timeout.REQUEST, unless=False): def decorator(function): @cache.memoize(timeout=timeout, unless=unless, make_name=_make_name) From d47e79d2b8c0f7e44c77fe2cb14546dd316bb1ce Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Thu, 4 Nov 2021 20:34:44 +0000 Subject: [PATCH 026/162] Set default timeout to REQUEST on api controller --- lifemonitor/api/controllers.py | 44 ++++++++++++++++------------------ 1 file changed, 21 insertions(+), 23 deletions(-) diff --git a/lifemonitor/api/controllers.py b/lifemonitor/api/controllers.py index 8ce01857f..9f8cc7dcc 100644 --- a/lifemonitor/api/controllers.py +++ b/lifemonitor/api/controllers.py @@ -48,16 +48,14 @@ def _row_to_dict(row): return d -# @authorized -@cached() +@cached(timeout=Timeout.REQUEST) def workflow_registries_get(): registries = lm.get_workflow_registries() logger.debug("registries_get. Got %s registries", len(registries)) return serializers.ListOfWorkflowRegistriesSchema().dump(registries) -# @authorized -@cached() +@cached(timeout=Timeout.REQUEST) def workflow_registries_get_by_uuid(registry_uuid): registry = lm.get_workflow_registry_by_uuid(registry_uuid) logger.debug("registries_get. Got %s registry", registry) @@ -65,7 +63,7 @@ def workflow_registries_get_by_uuid(registry_uuid): @authorized -@cached() +@cached(timeout=Timeout.REQUEST) def workflow_registries_get_current(): if current_registry: registry = current_registry @@ -74,7 +72,7 @@ def workflow_registries_get_current(): return lm_exceptions.report_problem(401, "Unauthorized") -@cached(timeout=Timeout.SESSION) +@cached(timeout=Timeout.REQUEST) def workflows_get(status=False): workflows = lm.get_public_workflows() if current_user and not current_user.is_anonymous: @@ -114,7 +112,7 @@ def _get_workflow_or_problem(wf_uuid, wf_version=None): detail=messages.unauthorized_workflow_access.format(wf_uuid)) -@cached() +@cached(timeout=Timeout.REQUEST) def workflows_get_by_id(wf_uuid, wf_version): response = _get_workflow_or_problem(wf_uuid, wf_version) return response if isinstance(response, Response) \ @@ -123,7 +121,7 @@ def workflows_get_by_id(wf_uuid, wf_version): else None).dump(response) -@cached() +@cached(timeout=Timeout.REQUEST) def workflows_get_latest_version_by_id(wf_uuid, previous_versions=False, ro_crate=False): response = _get_workflow_or_problem(wf_uuid, None) exclude = ['previous_versions'] if not previous_versions else [] @@ -134,14 +132,14 @@ def workflows_get_latest_version_by_id(wf_uuid, previous_versions=False, ro_crat subscriptionsOf=[current_user] if not current_user.is_anonymous else None).dump(response) -@cached() +@cached(timeout=Timeout.REQUEST) def workflows_get_versions_by_id(wf_uuid): response = _get_workflow_or_problem(wf_uuid, None) return response if isinstance(response, Response) \ else serializers.ListOfWorkflowVersions().dump(response.workflow) -@cached() +@cached(timeout=Timeout.REQUEST) def workflows_get_status(wf_uuid): wf_version = request.args.get('version', 'latest').lower() response = _get_workflow_or_problem(wf_uuid, wf_version) @@ -149,7 +147,7 @@ def workflows_get_status(wf_uuid): else serializers.WorkflowStatusSchema().dump(response) -@cached() +@cached(timeout=Timeout.REQUEST) def workflows_rocrate_metadata(wf_uuid, wf_version): response = _get_workflow_or_problem(wf_uuid, wf_version) if isinstance(response, Response): @@ -157,7 +155,7 @@ def workflows_rocrate_metadata(wf_uuid, wf_version): return response.crate_metadata -@cached() +@cached(timeout=Timeout.REQUEST) def workflows_rocrate_download(wf_uuid, wf_version): response = _get_workflow_or_problem(wf_uuid, wf_version) if isinstance(response, Response): @@ -177,7 +175,7 @@ def workflows_rocrate_download(wf_uuid, wf_version): @authorized -@cached() +@cached(timeout=Timeout.REQUEST) def registry_workflows_get(status=False): workflows = lm.get_registry_workflows(current_registry) logger.debug("workflows_get. Got %s workflows (registry: %s)", len(workflows), current_registry) @@ -193,7 +191,7 @@ def registry_workflows_post(body): @authorized -@cached() +@cached(timeout=Timeout.REQUEST) def registry_user_workflows_get(user_id, status=False): if not current_registry: return lm_exceptions.report_problem(401, "Unauthorized", detail=messages.no_registry_found) @@ -217,7 +215,7 @@ def registry_user_workflows_post(user_id, body): @authorized -@cached() +@cached(timeout=Timeout.REQUEST) def user_workflows_get(status=False, subscriptions=False): if not current_user or current_user.is_anonymous: return lm_exceptions.report_problem(401, "Unauthorized", detail=messages.no_user_in_session) @@ -261,7 +259,7 @@ def user_workflow_unsubscribe(wf_uuid): @authorized -@cached() +@cached(timeout=Timeout.REQUEST) def user_registry_workflows_get(registry_uuid, status=False): if not current_user or current_user.is_anonymous: return lm_exceptions.report_problem(401, "Unauthorized", detail=messages.no_user_in_session) @@ -431,7 +429,7 @@ def workflows_delete(wf_uuid, wf_version): raise lm_exceptions.LifeMonitorException(title="Internal Error", detail=str(e)) -@cached() +@cached(timeout=Timeout.REQUEST) def workflows_get_suites(wf_uuid, version='latest'): response = _get_workflow_or_problem(wf_uuid, version) return response if isinstance(response, Response) \ @@ -466,21 +464,21 @@ def _get_suite_or_problem(suite_uuid): return lm_exceptions.report_problem(404, "Not Found", detail=messages.suite_not_found.format(suite_uuid)) -@cached() +@cached(timeout=Timeout.REQUEST) def suites_get_by_uuid(suite_uuid): response = _get_suite_or_problem(suite_uuid) return response if isinstance(response, Response) \ else serializers.SuiteSchema().dump(response) -@cached() +@cached(timeout=Timeout.REQUEST) def suites_get_status(suite_uuid): response = _get_suite_or_problem(suite_uuid) return response if isinstance(response, Response) \ else serializers.SuiteStatusSchema().dump(response.status) -@cached() +@cached(timeout=Timeout.REQUEST) def suites_get_instances(suite_uuid): response = _get_suite_or_problem(suite_uuid) return response if isinstance(response, Response) \ @@ -574,7 +572,7 @@ def _get_instances_or_problem(instance_uuid): detail=messages.instance_not_found.format(instance_uuid)) -@cached() +@cached(timeout=Timeout.REQUEST) def instances_get_by_id(instance_uuid): response = _get_instances_or_problem(instance_uuid) return response if isinstance(response, Response) \ @@ -601,7 +599,7 @@ def instances_delete_by_id(instance_uuid): raise lm_exceptions.LifeMonitorException(title="Internal Error", detail=str(e)) -@cached() +@cached(timeout=Timeout.REQUEST) def instances_get_builds(instance_uuid, limit): response = _get_instances_or_problem(instance_uuid) logger.info("Number of builds to load: %r", limit) @@ -609,7 +607,7 @@ def instances_get_builds(instance_uuid, limit): else serializers.ListOfTestBuildsSchema().dump(response.get_test_builds(limit=limit)) -@cached() +@cached(timeout=Timeout.REQUEST) def instances_builds_get_by_id(instance_uuid, build_id): response = _get_instances_or_problem(instance_uuid) if isinstance(response, Response): From cfdce35e169463be2d08deb1b03c9dbca652c71a Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Thu, 4 Nov 2021 20:39:17 +0000 Subject: [PATCH 027/162] Add cache support to TestInstance --- .../api/models/testsuites/testinstance.py | 23 +++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/lifemonitor/api/models/testsuites/testinstance.py b/lifemonitor/api/models/testsuites/testinstance.py index 8c8664b2a..6147d6abf 100644 --- a/lifemonitor/api/models/testsuites/testinstance.py +++ b/lifemonitor/api/models/testsuites/testinstance.py @@ -26,6 +26,7 @@ import lifemonitor.api.models as models from lifemonitor.api.models import db +from lifemonitor.cache import Timeout, cache from lifemonitor.models import JSON, UUID, ModelMixin from .testsuite import TestSuite @@ -84,17 +85,26 @@ def managed(self): @property def external_link(self): + logger.debug("Getting external link...") return self.testing_service.get_instance_external_link(self) @property + @cache.memoize(timeout=Timeout.REQUEST) def last_test_build(self): - return self.testing_service.get_last_test_build(self) + # return self.testing_service.get_last_test_build(self) + builds = self.get_test_builds() + return builds[0] if builds and len(builds) > 0 else None + @cache.memoize(timeout=Timeout.REQUEST) def get_test_builds(self, limit=10): + logger.debug("Getting test builds...") return self.testing_service.get_test_builds(self, limit=limit) + @cache.memoize(timeout=Timeout.REQUEST) def get_test_build(self, build_number): - return self.testing_service.get_test_build(self, build_number) + logger.debug("Getting test build...") + # return self.testing_service.get_test_build(self, build_number) + return next((b for b in self.get_test_builds() if b.number == build_number), None) def to_dict(self, test_build=False, test_output=False): data = { @@ -107,6 +117,15 @@ def to_dict(self, test_build=False, test_output=False): data.update(self.testing_service.get_test_builds_as_dict(test_output=test_output)) return data + def refresh(self): + try: + import lifemonitor + cache.delete_memoized(lifemonitor.api.models.testsuites.testinstance.TestInstance.get_test_build) + cache.delete_memoized(lifemonitor.api.models.testsuites.testinstance.TestInstance.get_test_builds) + except Exception as e: + logger.debug(e) + self.get_test_builds() + @classmethod def all(cls) -> List[TestInstance]: return cls.query.all() From 14b0e8664cd0f6685f247afb14fc55e2a6d64859 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Thu, 4 Nov 2021 20:40:35 +0000 Subject: [PATCH 028/162] Fix logger name --- lifemonitor/api/models/services/github.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lifemonitor/api/models/services/github.py b/lifemonitor/api/models/services/github.py index d98d2ef5e..329b94bad 100644 --- a/lifemonitor/api/models/services/github.py +++ b/lifemonitor/api/models/services/github.py @@ -39,7 +39,7 @@ from .service import TestingService # set module level logger -logger = logging.getLogger() +logger = logging.getLogger(__name__) class GithubTestingService(TestingService): From c494d98fa7f8839c00975420a7c0a25febbc5395 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Thu, 4 Nov 2021 20:41:44 +0000 Subject: [PATCH 029/162] Add initial cache support to Github service --- lifemonitor/api/models/services/github.py | 43 +++++++++++++++++------ 1 file changed, 33 insertions(+), 10 deletions(-) diff --git a/lifemonitor/api/models/services/github.py b/lifemonitor/api/models/services/github.py index 329b94bad..42cefb1fd 100644 --- a/lifemonitor/api/models/services/github.py +++ b/lifemonitor/api/models/services/github.py @@ -104,10 +104,14 @@ def _gh_service(self) -> Github: self.initialize() return self._gh_obj - @cache.memoize(timeout=Timeout.BUILDS) + @cache.memoize(timeout=Timeout.NONE) + def _get_workflow_info(self, resource): + return self._parse_workflow_url(resource) + + @cache.memoize(timeout=Timeout.NONE) def _get_repo(self, test_instance: models.TestInstance): - _, repo_full_name, _ = self._parse_workflow_url(test_instance.resource) - repository = self._gh_obj.get_repo(repo_full_name) + _, repo_full_name, _ = self._get_workflow_info(test_instance.resource) + repository = self._gh_service.get_repo(repo_full_name) logger.debug("Repo ID: %s", repository.id) logger.debug("Repo full name: %s", repository.full_name) logger.debug("Repo URL: %s", f'https://github.com/{repository.full_name}') @@ -134,13 +138,19 @@ def check_connection(self) -> bool: logger.info("Caught exception from Github GET /rate_limit: %s. Connection not working?", e) return False + @cache.memoize(timeout=Timeout.SESSION) + def _get_gh_workflow(self, repository, workflow_id): + return self._gh_service.get_repo(repository).get_workflow(workflow_id) + def _iter_runs(self, test_instance: models.TestInstance, status: str = None) -> Generator[github.WorkflowRun.WorkflowRun]: - _, repository, workflow_id = self._parse_workflow_url(test_instance.resource) + _, repository, workflow_id = self._get_workflow_info(test_instance.resource) logger.debug("iterating over runs -- wf id: %s; repository: %s; status: %s", workflow_id, repository, status) - workflow = self._gh_service.get_repo(repository).get_workflow(workflow_id) + workflow = self._get_gh_workflow(repository, workflow_id) logger.debug("Retrieved workflow %s from github", workflow_id) + for run in workflow.get_runs(): + logger.debug("Loading Github run ID %r", run.id) # The Workflow.get_runs method in the PyGithub API has a status argument # which in theory we could use to filter the runs that are retrieved to # only the ones with the status that interests us. This worked in the past, @@ -152,20 +162,21 @@ def _iter_runs(self, test_instance: models.TestInstance, status: str = None) -> if status is None or run.status == status: yield run - def get_instance_external_link(self, test_instance: models.TestInstance) -> str: - _, repo_full_name, workflow_id = self._parse_workflow_url(test_instance.resource) - return f'https://github.com/{repo_full_name}/actions/workflows/{workflow_id}' - + @cache.memoize(timeout=Timeout.REQUEST) def get_last_test_build(self, test_instance: models.TestInstance) -> Optional[GithubTestBuild]: try: + logger.debug("Getting latest build...") for run in self._iter_runs(test_instance, status=self.GithubStatus.COMPLETED): return GithubTestBuild(self, test_instance, run) + logger.debug("Getting latest build... DONE") return None except GithubRateLimitExceededException as e: raise lm_exceptions.RateLimitExceededException(detail=str(e), instance=test_instance) + @cache.memoize(timeout=Timeout.REQUEST) def get_last_passed_test_build(self, test_instance: models.TestInstance) -> Optional[GithubTestBuild]: try: + logger.debug("Getting last passed build...") for run in self._iter_runs(test_instance, status=self.GithubStatus.COMPLETED): if run.conclusion == self.GithubConclusion.SUCCESS: return GithubTestBuild(self, test_instance, run) @@ -173,8 +184,10 @@ def get_last_passed_test_build(self, test_instance: models.TestInstance) -> Opti except GithubRateLimitExceededException as e: raise lm_exceptions.RateLimitExceededException(detail=str(e), instance=test_instance) + @cache.memoize(timeout=Timeout.REQUEST) def get_last_failed_test_build(self, test_instance: models.TestInstance) -> Optional[GithubTestBuild]: try: + logger.debug("Getting last failed build...") for run in self._iter_runs(test_instance, status=self.GithubStatus.COMPLETED): if run.conclusion == self.GithubConclusion.FAILURE: return GithubTestBuild(self, test_instance, run) @@ -182,13 +195,16 @@ def get_last_failed_test_build(self, test_instance: models.TestInstance) -> Opti except GithubRateLimitExceededException as e: raise lm_exceptions.RateLimitExceededException(detail=str(e), instance=test_instance) + @cache.memoize(timeout=Timeout.REQUEST) def get_test_builds(self, test_instance: models.TestInstance, limit=10) -> list: try: + logger.debug("Getting test builds...") return list(GithubTestBuild(self, test_instance, run) for run in it.islice(self._iter_runs(test_instance), limit)) except GithubRateLimitExceededException as e: raise lm_exceptions.RateLimitExceededException(detail=str(e), instance=test_instance) + @cache.memoize(timeout=Timeout.REQUEST) def get_test_build(self, test_instance: models.TestInstance, build_number: int) -> GithubTestBuild: try: logger.debug("Inefficient get_test_build implementation. Rewrite me!") @@ -208,14 +224,21 @@ def get_test_build(self, test_instance: models.TestInstance, build_number: int) except GithubRateLimitExceededException as e: raise lm_exceptions.RateLimitExceededException(detail=str(e), instance=test_instance) + @cache.memoize(timeout=Timeout.NONE) + def get_instance_external_link(self, test_instance: models.TestInstance) -> str: + _, repo_full_name, workflow_id = self._get_workflow_info(test_instance.resource) + return f'https://github.com/{repo_full_name}/actions/workflows/{workflow_id}' + + @cache.memoize(timeout=Timeout.NONE) def get_test_build_external_link(self, test_build: models.TestBuild) -> str: - repo = test_build.test_instance.testing_service._get_repo(test_build.test_instance) + repo = self._get_repo(test_build.test_instance) return f'https://github.com/{repo.full_name}/actions/runs/{test_build.id}' def get_test_build_output(self, test_instance: models.TestInstance, build_number, offset_bytes=0, limit_bytes=131072): raise lm_exceptions.NotImplementedException(detail="not supported for GitHub test builds") @classmethod + @cache.memoize(timeout=Timeout.NONE) def _parse_workflow_url(cls, resource: str) -> Tuple[str, str, str]: """ Utility method to parse github workflow URIs. Given a URL to the testing From 26525945e8b44f06cde67615430dbda1a587e759 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Thu, 4 Nov 2021 20:50:24 +0000 Subject: [PATCH 030/162] Update cache of Travis and Jenkins services --- lifemonitor/api/models/services/jenkins.py | 27 +++++++---------- lifemonitor/api/models/services/travis.py | 35 +++++++++++----------- 2 files changed, 29 insertions(+), 33 deletions(-) diff --git a/lifemonitor/api/models/services/jenkins.py b/lifemonitor/api/models/services/jenkins.py index 64648733b..af9528d12 100644 --- a/lifemonitor/api/models/services/jenkins.py +++ b/lifemonitor/api/models/services/jenkins.py @@ -69,6 +69,7 @@ def server(self) -> jenkins.Jenkins: return self._server @staticmethod + @cache.memoize(timeout=Timeout.NONE) def get_job_name(resource): # extract the job name from the resource path logger.debug(f"Getting project metadata - resource: {resource}") @@ -79,32 +80,32 @@ def get_job_name(resource): f"Unable to get the Jenkins job from the resource {job_name}") return job_name - @cache.memoize() + @cache.memoize(timeout=Timeout.NONE) def get_instance_external_link(self, test_instance: models.TestInstance) -> str: return self.get_project_metadata(test_instance)['url'] - @cache.memoize() + @cache.memoize(timeout=Timeout.REQUEST) def get_last_test_build(self, test_instance: models.TestInstance) -> Optional[JenkinsTestBuild]: metadata = self.get_project_metadata(test_instance) if 'lastBuild' in metadata and metadata['lastBuild']: return self.get_test_build(test_instance, metadata['lastBuild']['number']) return None - @cache.memoize() + @cache.memoize(timeout=Timeout.REQUEST) def get_last_passed_test_build(self, test_instance: models.TestInstance) -> Optional[JenkinsTestBuild]: metadata = self.get_project_metadata(test_instance) if 'lastSuccessfulBuild' in metadata and metadata['lastSuccessfulBuild']: return self.get_test_build(test_instance, metadata['lastSuccessfulBuild']['number']) return None - @cache.memoize() + @cache.memoize(timeout=Timeout.REQUEST) def get_last_failed_test_build(self, test_instance: models.TestInstance) -> Optional[JenkinsTestBuild]: metadata = self.get_project_metadata(test_instance) if 'lastFailedBuild' in metadata and metadata['lastFailedBuild']: return self.get_test_build(test_instance, metadata['lastFailedBuild']['number']) return None - @cache.memoize() + @cache.memoize(timeout=Timeout.REQUEST) def test_builds(self, test_instance: models.TestInstance) -> list: builds = [] metadata = self.get_project_metadata(test_instance) @@ -112,7 +113,7 @@ def test_builds(self, test_instance: models.TestInstance) -> list: builds.append(self.get_test_build(test_instance, build_info['number'])) return builds - @cache.memoize() + @cache.memoize(timeout=Timeout.REQUEST) def get_project_metadata(self, test_instance: models.TestInstance, fetch_all_builds=False): if not hasattr(test_instance, "_raw_metadata") or test_instance._raw_metadata is None: try: @@ -122,7 +123,7 @@ def get_project_metadata(self, test_instance: models.TestInstance, fetch_all_bui raise lm_exceptions.TestingServiceException(f"{self}: {e}") return test_instance._raw_metadata - @cache.memoize() + @cache.memoize(timeout=Timeout.REQUEST) def get_test_builds(self, test_instance: models.TestInstance, limit=10) -> list: builds = [] project_metadata = self.get_project_metadata(test_instance, fetch_all_builds=(limit > 100)) @@ -132,17 +133,11 @@ def get_test_builds(self, test_instance: models.TestInstance, limit=10) -> list: builds.append(self.get_test_build(test_instance, build_info['number'])) return builds - @cache.memoize() + @cache.memoize(timeout=Timeout.REQUEST) def _get_build_info(self, test_instance: models.TestInstance, build_number: int): return self.server.get_build_info(self.get_job_name(test_instance.resource), int(build_number)) - def _disable_build_cache(func, obj: JenkinsTestingService, - test_instance: models.TestInstance, build_number: int, - *args, **kwargs): - build = JenkinsTestBuild(obj, test_instance, obj._get_build_info(test_instance, build_number)) - return build.is_running() - - @cache.memoize(timeout=Timeout.BUILDS, unless=_disable_build_cache) + @cache.memoize(timeout=Timeout.REQUEST) def get_test_build(self, test_instance: models.TestInstance, build_number: int) -> JenkinsTestBuild: try: build_metadata = self._get_build_info(test_instance, build_number) @@ -152,7 +147,7 @@ def get_test_build(self, test_instance: models.TestInstance, build_number: int) except jenkins.JenkinsException as e: raise lm_exceptions.TestingServiceException(e) - @cache.memoize() + @cache.memoize(timeout=Timeout.NONE) def get_test_build_external_link(self, test_build: models.TestBuild) -> str: return urllib.parse.urljoin(test_build.url, "console") diff --git a/lifemonitor/api/models/services/travis.py b/lifemonitor/api/models/services/travis.py index aa85bb0e3..ef8b5c1e3 100644 --- a/lifemonitor/api/models/services/travis.py +++ b/lifemonitor/api/models/services/travis.py @@ -28,12 +28,11 @@ import lifemonitor.api.models as models import requests +from lifemonitor.api.models.services.service import TestingService from lifemonitor.cache import Timeout, cache from lifemonitor.exceptions import (EntityNotFoundException, TestingServiceException) -from .service import TestingService - # set module level logger logger = logging.getLogger(__name__) @@ -86,13 +85,14 @@ def _build_url(self, path, params=None): query = "?" + urllib.parse.urlencode(params) if params else "" return urllib.parse.urljoin(self.api_base_url, path + query) - @cache.memoize() + @cache.memoize(Timeout.REQUEST) def _get(self, path, token: models.TestingServiceToken = None, params=None) -> object: logger.debug("Getting resource: %r", self._build_url(path, params)) response = requests.get(self._build_url(path, params), headers=self._build_headers(token)) return response.json() if response.status_code == 200 else response @staticmethod + # @cache.memoize(Timeout.NONE) def get_repo_id(test_instance: models.TestInstance, quote=True): # extract the job name from the resource path logger.debug(f"Getting project metadata - resource: {test_instance.resource}") @@ -104,7 +104,7 @@ def get_repo_id(test_instance: models.TestInstance, quote=True): f"Unable to get the Travis job from the resource {test_instance.resource}") return repo_id - @cache.memoize() + @cache.memoize(Timeout.NONE) def get_repo_slug(self, test_instance: models.TestInstance): metadata = self.get_project_metadata(test_instance) return metadata['slug'] @@ -128,32 +128,26 @@ def _get_last_test_build(self, test_instance: models.TestInstance, state=None) - except Exception as e: raise TestingServiceException(e) - @cache.memoize() - def get_instance_external_link(self, test_instance: models.TestInstance) -> str: - testing_service = test_instance.testing_service - repo_slug = testing_service.get_repo_slug(test_instance) - return urllib.parse.urljoin(testing_service.base_url, f'{repo_slug}/builds') - - @cache.memoize() + @cache.memoize(Timeout.REQUEST) def get_last_test_build(self, test_instance: models.TestInstance) -> Optional[models.TravisTestBuild]: return self._get_last_test_build(test_instance) - @cache.memoize() + # @cache.memoize() def get_last_passed_test_build(self, test_instance: models.TestInstance) -> Optional[models.TravisTestBuild]: return self._get_last_test_build(test_instance, state='passed') - @cache.memoize() + # @cache.memoize() def get_last_failed_test_build(self, test_instance: models.TestInstance) -> Optional[models.TravisTestBuild]: return self._get_last_test_build(test_instance, state='failed') - @cache.memoize() + # @cache.memoize(Timeout.REQUEST) def get_project_metadata(self, test_instance: models.TestInstance): try: return self._get("/repo/{}".format(self.get_repo_id(test_instance))) except Exception as e: raise TestingServiceException(f"{self}: {e}") - @cache.memoize() + @cache.memoize(timeout=Timeout.REQUEST) def get_test_builds(self, test_instance: models.TestInstance, limit=10) -> list: try: repo_id = self.get_repo_id(test_instance) @@ -172,6 +166,7 @@ def get_test_builds(self, test_instance: models.TestInstance, limit=10) -> list: except Exception as e: raise TestingServiceException(details=f"{e}") + @cache.memoize(timeout=Timeout.REQUEST) def _get_test_build(self, test_instance: models.TestInstance, build_number: int) -> models.TravisTestBuild: try: response = self._get("/build/{}".format(build_number)) @@ -191,11 +186,17 @@ def _disable_build_cache(func, obj: TravisTestingService, build = obj._get_test_build(test_instance, build_number) return build.is_running() - @cache.memoize(timeout=Timeout.BUILDS, unless=_disable_build_cache) + @cache.memoize(timeout=Timeout.REQUEST) def get_test_build(self, test_instance: models.TestInstance, build_number: int) -> models.TravisTestBuild: return self._get_test_build(test_instance, build_number) - @cache.memoize() + @cache.memoize(Timeout.NONE) + def get_instance_external_link(self, test_instance: models.TestInstance) -> str: + testing_service = test_instance.testing_service + repo_slug = testing_service.get_repo_slug(test_instance) + return urllib.parse.urljoin(testing_service.base_url, f'{repo_slug}/builds') + + @cache.memoize(Timeout.NONE) def get_test_build_external_link(self, test_build: models.TestBuild) -> str: testing_service = test_build.test_instance.testing_service repo_slug = testing_service.get_repo_slug(test_build.test_instance) From 6a7de5e09f104272578d8ab4e426b9553b4ef83a Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Thu, 4 Nov 2021 21:12:50 +0000 Subject: [PATCH 031/162] Update defaults --- lifemonitor/api/controllers.py | 42 +++++++++++++++++----------------- lifemonitor/cache.py | 6 ++--- settings.conf | 5 ++-- 3 files changed, 27 insertions(+), 26 deletions(-) diff --git a/lifemonitor/api/controllers.py b/lifemonitor/api/controllers.py index 9f8cc7dcc..991d73fcc 100644 --- a/lifemonitor/api/controllers.py +++ b/lifemonitor/api/controllers.py @@ -48,14 +48,14 @@ def _row_to_dict(row): return d -@cached(timeout=Timeout.REQUEST) +@cached(timeout=Timeout.SESSION) def workflow_registries_get(): registries = lm.get_workflow_registries() logger.debug("registries_get. Got %s registries", len(registries)) return serializers.ListOfWorkflowRegistriesSchema().dump(registries) -@cached(timeout=Timeout.REQUEST) +@cached(timeout=Timeout.SESSION) def workflow_registries_get_by_uuid(registry_uuid): registry = lm.get_workflow_registry_by_uuid(registry_uuid) logger.debug("registries_get. Got %s registry", registry) @@ -63,7 +63,7 @@ def workflow_registries_get_by_uuid(registry_uuid): @authorized -@cached(timeout=Timeout.REQUEST) +@cached(timeout=Timeout.SESSION) def workflow_registries_get_current(): if current_registry: registry = current_registry @@ -72,7 +72,7 @@ def workflow_registries_get_current(): return lm_exceptions.report_problem(401, "Unauthorized") -@cached(timeout=Timeout.REQUEST) +@cached(timeout=Timeout.SESSION) def workflows_get(status=False): workflows = lm.get_public_workflows() if current_user and not current_user.is_anonymous: @@ -112,7 +112,7 @@ def _get_workflow_or_problem(wf_uuid, wf_version=None): detail=messages.unauthorized_workflow_access.format(wf_uuid)) -@cached(timeout=Timeout.REQUEST) +@cached(timeout=Timeout.SESSION) def workflows_get_by_id(wf_uuid, wf_version): response = _get_workflow_or_problem(wf_uuid, wf_version) return response if isinstance(response, Response) \ @@ -121,7 +121,7 @@ def workflows_get_by_id(wf_uuid, wf_version): else None).dump(response) -@cached(timeout=Timeout.REQUEST) +@cached(timeout=Timeout.SESSION) def workflows_get_latest_version_by_id(wf_uuid, previous_versions=False, ro_crate=False): response = _get_workflow_or_problem(wf_uuid, None) exclude = ['previous_versions'] if not previous_versions else [] @@ -132,14 +132,14 @@ def workflows_get_latest_version_by_id(wf_uuid, previous_versions=False, ro_crat subscriptionsOf=[current_user] if not current_user.is_anonymous else None).dump(response) -@cached(timeout=Timeout.REQUEST) +@cached(timeout=Timeout.SESSION) def workflows_get_versions_by_id(wf_uuid): response = _get_workflow_or_problem(wf_uuid, None) return response if isinstance(response, Response) \ else serializers.ListOfWorkflowVersions().dump(response.workflow) -@cached(timeout=Timeout.REQUEST) +@cached(timeout=Timeout.SESSION) def workflows_get_status(wf_uuid): wf_version = request.args.get('version', 'latest').lower() response = _get_workflow_or_problem(wf_uuid, wf_version) @@ -147,7 +147,7 @@ def workflows_get_status(wf_uuid): else serializers.WorkflowStatusSchema().dump(response) -@cached(timeout=Timeout.REQUEST) +@cached(timeout=Timeout.SESSION) def workflows_rocrate_metadata(wf_uuid, wf_version): response = _get_workflow_or_problem(wf_uuid, wf_version) if isinstance(response, Response): @@ -155,7 +155,7 @@ def workflows_rocrate_metadata(wf_uuid, wf_version): return response.crate_metadata -@cached(timeout=Timeout.REQUEST) +@cached(timeout=Timeout.SESSION) def workflows_rocrate_download(wf_uuid, wf_version): response = _get_workflow_or_problem(wf_uuid, wf_version) if isinstance(response, Response): @@ -175,7 +175,7 @@ def workflows_rocrate_download(wf_uuid, wf_version): @authorized -@cached(timeout=Timeout.REQUEST) +@cached(timeout=Timeout.SESSION) def registry_workflows_get(status=False): workflows = lm.get_registry_workflows(current_registry) logger.debug("workflows_get. Got %s workflows (registry: %s)", len(workflows), current_registry) @@ -191,7 +191,7 @@ def registry_workflows_post(body): @authorized -@cached(timeout=Timeout.REQUEST) +@cached(timeout=Timeout.SESSION) def registry_user_workflows_get(user_id, status=False): if not current_registry: return lm_exceptions.report_problem(401, "Unauthorized", detail=messages.no_registry_found) @@ -215,7 +215,7 @@ def registry_user_workflows_post(user_id, body): @authorized -@cached(timeout=Timeout.REQUEST) +@cached(timeout=Timeout.SESSION) def user_workflows_get(status=False, subscriptions=False): if not current_user or current_user.is_anonymous: return lm_exceptions.report_problem(401, "Unauthorized", detail=messages.no_user_in_session) @@ -259,7 +259,7 @@ def user_workflow_unsubscribe(wf_uuid): @authorized -@cached(timeout=Timeout.REQUEST) +@cached(timeout=Timeout.SESSION) def user_registry_workflows_get(registry_uuid, status=False): if not current_user or current_user.is_anonymous: return lm_exceptions.report_problem(401, "Unauthorized", detail=messages.no_user_in_session) @@ -429,7 +429,7 @@ def workflows_delete(wf_uuid, wf_version): raise lm_exceptions.LifeMonitorException(title="Internal Error", detail=str(e)) -@cached(timeout=Timeout.REQUEST) +@cached(timeout=Timeout.SESSION) def workflows_get_suites(wf_uuid, version='latest'): response = _get_workflow_or_problem(wf_uuid, version) return response if isinstance(response, Response) \ @@ -464,21 +464,21 @@ def _get_suite_or_problem(suite_uuid): return lm_exceptions.report_problem(404, "Not Found", detail=messages.suite_not_found.format(suite_uuid)) -@cached(timeout=Timeout.REQUEST) +@cached(timeout=Timeout.SESSION) def suites_get_by_uuid(suite_uuid): response = _get_suite_or_problem(suite_uuid) return response if isinstance(response, Response) \ else serializers.SuiteSchema().dump(response) -@cached(timeout=Timeout.REQUEST) +@cached(timeout=Timeout.SESSION) def suites_get_status(suite_uuid): response = _get_suite_or_problem(suite_uuid) return response if isinstance(response, Response) \ else serializers.SuiteStatusSchema().dump(response.status) -@cached(timeout=Timeout.REQUEST) +@cached(timeout=Timeout.SESSION) def suites_get_instances(suite_uuid): response = _get_suite_or_problem(suite_uuid) return response if isinstance(response, Response) \ @@ -572,7 +572,7 @@ def _get_instances_or_problem(instance_uuid): detail=messages.instance_not_found.format(instance_uuid)) -@cached(timeout=Timeout.REQUEST) +@cached(timeout=Timeout.SESSION) def instances_get_by_id(instance_uuid): response = _get_instances_or_problem(instance_uuid) return response if isinstance(response, Response) \ @@ -599,7 +599,7 @@ def instances_delete_by_id(instance_uuid): raise lm_exceptions.LifeMonitorException(title="Internal Error", detail=str(e)) -@cached(timeout=Timeout.REQUEST) +@cached(timeout=Timeout.SESSION) def instances_get_builds(instance_uuid, limit): response = _get_instances_or_problem(instance_uuid) logger.info("Number of builds to load: %r", limit) @@ -607,7 +607,7 @@ def instances_get_builds(instance_uuid, limit): else serializers.ListOfTestBuildsSchema().dump(response.get_test_builds(limit=limit)) -@cached(timeout=Timeout.REQUEST) +@cached(timeout=Timeout.SESSION) def instances_builds_get_by_id(instance_uuid, build_id): response = _get_instances_or_problem(instance_uuid) if isinstance(response, Response): diff --git a/lifemonitor/cache.py b/lifemonitor/cache.py index d591dabe1..58e0de99b 100644 --- a/lifemonitor/cache.py +++ b/lifemonitor/cache.py @@ -32,9 +32,9 @@ class Timeout: NONE = 0 - DEFAULT = os.environ.get('CACHE_DEFAULT_TIMEOUT', 300) - REQUEST = os.environ.get('CACHE_REQUEST_TIMEOUT', 60) - SESSION = os.environ.get('CACHE_SESSION_TIMEOUT', 3600) + DEFAULT = os.environ.get('CACHE_DEFAULT_TIMEOUT', 60) + REQUEST = os.environ.get('CACHE_REQUEST_TIMEOUT', 300) + SESSION = os.environ.get('CACHE_SESSION_TIMEOUT', 600) BUILDS = os.environ.get('CACHE_SESSION_TIMEOUT', 84600) diff --git a/settings.conf b/settings.conf index 56fd419a8..22a01de1d 100644 --- a/settings.conf +++ b/settings.conf @@ -55,8 +55,9 @@ REDIS_PORT_NUMBER=6379 # Cache settings CACHE_REDIS_DB=0 -CACHE_DEFAULT_TIMEOUT=300 -CACHE_SESSION_TIMEOUT=3600 +CACHE_DEFAULT_TIMEOUT=60 +CACHE_REQUEST_TIMEOUT=300 +CACHE_SESSION_TIMEOUT=600 CACHE_BUILDS_TIMEOUT=84600 # Github OAuth2 settings From 7d137e59e3b3aeb95c372edd72e38cd53b2d4731 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Thu, 4 Nov 2021 21:14:21 +0000 Subject: [PATCH 032/162] Add simple task to update cache --- lifemonitor/tasks/tasks.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/lifemonitor/tasks/tasks.py b/lifemonitor/tasks/tasks.py index 47756a929..27ee1444f 100644 --- a/lifemonitor/tasks/tasks.py +++ b/lifemonitor/tasks/tasks.py @@ -39,3 +39,18 @@ def decorator(actor): @dramatiq.actor def heartbeat(): logger.info("Heartbeat!") + + +@schedule(CronTrigger(minute="*/5")) +@dramatiq.actor +def check_last_build(): + logger.info("Checking last build....") + from lifemonitor.api.models import Workflow + + for w in Workflow.all(): + for s in w.latest_version.test_suites: + logger.info("Updating workflow: %r", w) + for i in s.test_instances: + i.refresh() + + logger.info("Checking last build: DONE!") From cbe59e11a285f57501c3c0b4636c5911af07ad88 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Fri, 5 Nov 2021 12:07:42 +0000 Subject: [PATCH 033/162] Fix get test build --- lifemonitor/api/models/testsuites/testinstance.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/lifemonitor/api/models/testsuites/testinstance.py b/lifemonitor/api/models/testsuites/testinstance.py index 6147d6abf..084028168 100644 --- a/lifemonitor/api/models/testsuites/testinstance.py +++ b/lifemonitor/api/models/testsuites/testinstance.py @@ -91,7 +91,6 @@ def external_link(self): @property @cache.memoize(timeout=Timeout.REQUEST) def last_test_build(self): - # return self.testing_service.get_last_test_build(self) builds = self.get_test_builds() return builds[0] if builds and len(builds) > 0 else None @@ -103,8 +102,7 @@ def get_test_builds(self, limit=10): @cache.memoize(timeout=Timeout.REQUEST) def get_test_build(self, build_number): logger.debug("Getting test build...") - # return self.testing_service.get_test_build(self, build_number) - return next((b for b in self.get_test_builds() if b.number == build_number), None) + return self.testing_service.get_test_build(self, build_number) def to_dict(self, test_build=False, test_output=False): data = { From 575ec402acd0f40159e6b748a5e07388e8ba6795 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Fri, 5 Nov 2021 12:08:04 +0000 Subject: [PATCH 034/162] Fix tests --- tests/integration/api/controllers/test_instances.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/integration/api/controllers/test_instances.py b/tests/integration/api/controllers/test_instances.py index 9544cd6f7..34cdaa684 100644 --- a/tests/integration/api/controllers/test_instances.py +++ b/tests/integration/api/controllers/test_instances.py @@ -256,7 +256,7 @@ def test_get_instance_build(app_client, client_auth_method, user1, user1_auth, v response = app_client.get(f"{utils.build_instances_path(instance.uuid)}/builds/{build.id}", headers=user1_auth) logger.debug(response) - utils.assert_status_code(response.status_code, 200) + utils.assert_status_code(200, response.status_code) data = json.loads(response.data) logger.debug("Response data: %r", data) # redundant check: the validation is performed by the connexion framework @@ -282,7 +282,7 @@ def test_get_instance_build_rate_limit_exceeded(app_client, client_auth_method, response = app_client.get(f"{utils.build_instances_path(instance.uuid)}/builds/0", headers=user1_auth) logger.debug(response) - utils.assert_status_code(response.status_code, 403) + utils.assert_status_code(403, response.status_code) data = json.loads(response.data) logger.debug("Response data: %r", data) assert data['title'] == 'Rate Limit Exceeded', "Unexpected error title" From 2e4f092fd7b6d983fce51047911595cc469da07b Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Fri, 5 Nov 2021 17:12:47 +0000 Subject: [PATCH 035/162] Disable task to update cache --- lifemonitor/tasks/tasks.py | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/lifemonitor/tasks/tasks.py b/lifemonitor/tasks/tasks.py index 27ee1444f..e880b300d 100644 --- a/lifemonitor/tasks/tasks.py +++ b/lifemonitor/tasks/tasks.py @@ -41,16 +41,16 @@ def heartbeat(): logger.info("Heartbeat!") -@schedule(CronTrigger(minute="*/5")) -@dramatiq.actor -def check_last_build(): - logger.info("Checking last build....") - from lifemonitor.api.models import Workflow - - for w in Workflow.all(): - for s in w.latest_version.test_suites: - logger.info("Updating workflow: %r", w) - for i in s.test_instances: - i.refresh() - - logger.info("Checking last build: DONE!") +# @schedule(CronTrigger(minute="*/5")) +# @dramatiq.actor +# def check_last_build(): +# logger.info("Checking last build....") +# from lifemonitor.api.models import Workflow + +# for w in Workflow.all(): +# for s in w.latest_version.test_suites: +# #logger.info("Updating workflow: %r", w) +# for i in s.test_instances: +# i.refresh() + +# logger.info("Checking last build: DONE!") From eb46e5dcc05fc7db424bf055d4bb8d628a63594f Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Mon, 8 Nov 2021 16:56:21 +0000 Subject: [PATCH 036/162] Set timeout of controller cache to request --- lifemonitor/api/controllers.py | 42 +++++++++++++++++----------------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/lifemonitor/api/controllers.py b/lifemonitor/api/controllers.py index 991d73fcc..9f8cc7dcc 100644 --- a/lifemonitor/api/controllers.py +++ b/lifemonitor/api/controllers.py @@ -48,14 +48,14 @@ def _row_to_dict(row): return d -@cached(timeout=Timeout.SESSION) +@cached(timeout=Timeout.REQUEST) def workflow_registries_get(): registries = lm.get_workflow_registries() logger.debug("registries_get. Got %s registries", len(registries)) return serializers.ListOfWorkflowRegistriesSchema().dump(registries) -@cached(timeout=Timeout.SESSION) +@cached(timeout=Timeout.REQUEST) def workflow_registries_get_by_uuid(registry_uuid): registry = lm.get_workflow_registry_by_uuid(registry_uuid) logger.debug("registries_get. Got %s registry", registry) @@ -63,7 +63,7 @@ def workflow_registries_get_by_uuid(registry_uuid): @authorized -@cached(timeout=Timeout.SESSION) +@cached(timeout=Timeout.REQUEST) def workflow_registries_get_current(): if current_registry: registry = current_registry @@ -72,7 +72,7 @@ def workflow_registries_get_current(): return lm_exceptions.report_problem(401, "Unauthorized") -@cached(timeout=Timeout.SESSION) +@cached(timeout=Timeout.REQUEST) def workflows_get(status=False): workflows = lm.get_public_workflows() if current_user and not current_user.is_anonymous: @@ -112,7 +112,7 @@ def _get_workflow_or_problem(wf_uuid, wf_version=None): detail=messages.unauthorized_workflow_access.format(wf_uuid)) -@cached(timeout=Timeout.SESSION) +@cached(timeout=Timeout.REQUEST) def workflows_get_by_id(wf_uuid, wf_version): response = _get_workflow_or_problem(wf_uuid, wf_version) return response if isinstance(response, Response) \ @@ -121,7 +121,7 @@ def workflows_get_by_id(wf_uuid, wf_version): else None).dump(response) -@cached(timeout=Timeout.SESSION) +@cached(timeout=Timeout.REQUEST) def workflows_get_latest_version_by_id(wf_uuid, previous_versions=False, ro_crate=False): response = _get_workflow_or_problem(wf_uuid, None) exclude = ['previous_versions'] if not previous_versions else [] @@ -132,14 +132,14 @@ def workflows_get_latest_version_by_id(wf_uuid, previous_versions=False, ro_crat subscriptionsOf=[current_user] if not current_user.is_anonymous else None).dump(response) -@cached(timeout=Timeout.SESSION) +@cached(timeout=Timeout.REQUEST) def workflows_get_versions_by_id(wf_uuid): response = _get_workflow_or_problem(wf_uuid, None) return response if isinstance(response, Response) \ else serializers.ListOfWorkflowVersions().dump(response.workflow) -@cached(timeout=Timeout.SESSION) +@cached(timeout=Timeout.REQUEST) def workflows_get_status(wf_uuid): wf_version = request.args.get('version', 'latest').lower() response = _get_workflow_or_problem(wf_uuid, wf_version) @@ -147,7 +147,7 @@ def workflows_get_status(wf_uuid): else serializers.WorkflowStatusSchema().dump(response) -@cached(timeout=Timeout.SESSION) +@cached(timeout=Timeout.REQUEST) def workflows_rocrate_metadata(wf_uuid, wf_version): response = _get_workflow_or_problem(wf_uuid, wf_version) if isinstance(response, Response): @@ -155,7 +155,7 @@ def workflows_rocrate_metadata(wf_uuid, wf_version): return response.crate_metadata -@cached(timeout=Timeout.SESSION) +@cached(timeout=Timeout.REQUEST) def workflows_rocrate_download(wf_uuid, wf_version): response = _get_workflow_or_problem(wf_uuid, wf_version) if isinstance(response, Response): @@ -175,7 +175,7 @@ def workflows_rocrate_download(wf_uuid, wf_version): @authorized -@cached(timeout=Timeout.SESSION) +@cached(timeout=Timeout.REQUEST) def registry_workflows_get(status=False): workflows = lm.get_registry_workflows(current_registry) logger.debug("workflows_get. Got %s workflows (registry: %s)", len(workflows), current_registry) @@ -191,7 +191,7 @@ def registry_workflows_post(body): @authorized -@cached(timeout=Timeout.SESSION) +@cached(timeout=Timeout.REQUEST) def registry_user_workflows_get(user_id, status=False): if not current_registry: return lm_exceptions.report_problem(401, "Unauthorized", detail=messages.no_registry_found) @@ -215,7 +215,7 @@ def registry_user_workflows_post(user_id, body): @authorized -@cached(timeout=Timeout.SESSION) +@cached(timeout=Timeout.REQUEST) def user_workflows_get(status=False, subscriptions=False): if not current_user or current_user.is_anonymous: return lm_exceptions.report_problem(401, "Unauthorized", detail=messages.no_user_in_session) @@ -259,7 +259,7 @@ def user_workflow_unsubscribe(wf_uuid): @authorized -@cached(timeout=Timeout.SESSION) +@cached(timeout=Timeout.REQUEST) def user_registry_workflows_get(registry_uuid, status=False): if not current_user or current_user.is_anonymous: return lm_exceptions.report_problem(401, "Unauthorized", detail=messages.no_user_in_session) @@ -429,7 +429,7 @@ def workflows_delete(wf_uuid, wf_version): raise lm_exceptions.LifeMonitorException(title="Internal Error", detail=str(e)) -@cached(timeout=Timeout.SESSION) +@cached(timeout=Timeout.REQUEST) def workflows_get_suites(wf_uuid, version='latest'): response = _get_workflow_or_problem(wf_uuid, version) return response if isinstance(response, Response) \ @@ -464,21 +464,21 @@ def _get_suite_or_problem(suite_uuid): return lm_exceptions.report_problem(404, "Not Found", detail=messages.suite_not_found.format(suite_uuid)) -@cached(timeout=Timeout.SESSION) +@cached(timeout=Timeout.REQUEST) def suites_get_by_uuid(suite_uuid): response = _get_suite_or_problem(suite_uuid) return response if isinstance(response, Response) \ else serializers.SuiteSchema().dump(response) -@cached(timeout=Timeout.SESSION) +@cached(timeout=Timeout.REQUEST) def suites_get_status(suite_uuid): response = _get_suite_or_problem(suite_uuid) return response if isinstance(response, Response) \ else serializers.SuiteStatusSchema().dump(response.status) -@cached(timeout=Timeout.SESSION) +@cached(timeout=Timeout.REQUEST) def suites_get_instances(suite_uuid): response = _get_suite_or_problem(suite_uuid) return response if isinstance(response, Response) \ @@ -572,7 +572,7 @@ def _get_instances_or_problem(instance_uuid): detail=messages.instance_not_found.format(instance_uuid)) -@cached(timeout=Timeout.SESSION) +@cached(timeout=Timeout.REQUEST) def instances_get_by_id(instance_uuid): response = _get_instances_or_problem(instance_uuid) return response if isinstance(response, Response) \ @@ -599,7 +599,7 @@ def instances_delete_by_id(instance_uuid): raise lm_exceptions.LifeMonitorException(title="Internal Error", detail=str(e)) -@cached(timeout=Timeout.SESSION) +@cached(timeout=Timeout.REQUEST) def instances_get_builds(instance_uuid, limit): response = _get_instances_or_problem(instance_uuid) logger.info("Number of builds to load: %r", limit) @@ -607,7 +607,7 @@ def instances_get_builds(instance_uuid, limit): else serializers.ListOfTestBuildsSchema().dump(response.get_test_builds(limit=limit)) -@cached(timeout=Timeout.SESSION) +@cached(timeout=Timeout.REQUEST) def instances_builds_get_by_id(instance_uuid, build_id): response = _get_instances_or_problem(instance_uuid) if isinstance(response, Response): From 885864761e54b3258c620cd84f8e35e747234496 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Mon, 8 Nov 2021 16:57:41 +0000 Subject: [PATCH 037/162] Add cache helper to model classes --- lifemonitor/cache.py | 51 +++++++++++++++++++++++++++++++++++++++++-- lifemonitor/models.py | 3 ++- requirements.txt | 1 + 3 files changed, 52 insertions(+), 3 deletions(-) diff --git a/lifemonitor/cache.py b/lifemonitor/cache.py index 58e0de99b..55eaf2ae0 100644 --- a/lifemonitor/cache.py +++ b/lifemonitor/cache.py @@ -23,14 +23,14 @@ import functools import logging import os +import redis_lock from flask.app import Flask from flask_caching import Cache -# Set default timeouts - class Timeout: + # Set default timeouts NONE = 0 DEFAULT = os.environ.get('CACHE_DEFAULT_TIMEOUT', 60) REQUEST = os.environ.get('CACHE_REQUEST_TIMEOUT', 300) @@ -122,3 +122,50 @@ def wrapper(*args, **kwargs): return wrapper return decorator + + +class CacheMixin(object): + + _helper: CacheHelper = None + + @property + def cache(self) -> CacheHelper: + if self._helper is None: + self._helper = CacheHelper() + return self._helper + + +class CacheHelper(object): + + # Enable/Disable cache + cache_enabled = True + # Ignore cache values even if cache is enabled + ignore_cache_values = False + + @staticmethod + def size(): + return len(cache.get_dict()) + + @staticmethod + def to_dict(): + return cache.get_dict() + + @staticmethod + def lock(key: str): + return redis_lock.Lock(cache.cache._read_clients, key) + + def set(self, key: str, value, timeout: int = Timeout.NONE): + val = None + if key is not None and self.cache_enabled: + lock = self.lock(key) + if lock.acquire(blocking=True): + try: + val = cache.get(key) + if not val: + cache.set(key, value, timeout=timeout) + finally: + lock.release() + return val + + def get(self, key: str): + return cache.get(key) if self.cache_enabled and not self.ignore_cache_values else None diff --git a/lifemonitor/models.py b/lifemonitor/models.py index b67532368..b38ac3863 100644 --- a/lifemonitor/models.py +++ b/lifemonitor/models.py @@ -24,10 +24,11 @@ from typing import List from lifemonitor.db import db +from lifemonitor.cache import CacheMixin from sqlalchemy import types -class ModelMixin(object): +class ModelMixin(CacheMixin): def refresh(self, **kwargs): db.session.refresh(self, **kwargs) diff --git a/requirements.txt b/requirements.txt index 462a06a40..aad751f3a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -34,3 +34,4 @@ requests~=2.26.0 rocrate~=0.4.0 SQLAlchemy~=1.3.23 wheel~=0.37.0 +python-redis-lock~=3.7.0 From 3a43fc16572b58383a7171c78cd4aa296df54b6a Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Mon, 8 Nov 2021 17:01:31 +0000 Subject: [PATCH 038/162] Update cache of testing services --- lifemonitor/api/models/services/github.py | 40 ++++++++++++---------- lifemonitor/api/models/services/jenkins.py | 13 +------ lifemonitor/api/models/services/travis.py | 25 ++++++-------- 3 files changed, 34 insertions(+), 44 deletions(-) diff --git a/lifemonitor/api/models/services/github.py b/lifemonitor/api/models/services/github.py index 42cefb1fd..1e5a2917a 100644 --- a/lifemonitor/api/models/services/github.py +++ b/lifemonitor/api/models/services/github.py @@ -29,7 +29,7 @@ import lifemonitor.api.models as models import lifemonitor.exceptions as lm_exceptions -from lifemonitor.cache import Timeout, cache + import github from github import Github, GithubException @@ -104,17 +104,23 @@ def _gh_service(self) -> Github: self.initialize() return self._gh_obj - @cache.memoize(timeout=Timeout.NONE) def _get_workflow_info(self, resource): return self._parse_workflow_url(resource) - @cache.memoize(timeout=Timeout.NONE) def _get_repo(self, test_instance: models.TestInstance): - _, repo_full_name, _ = self._get_workflow_info(test_instance.resource) - repository = self._gh_service.get_repo(repo_full_name) - logger.debug("Repo ID: %s", repository.id) - logger.debug("Repo full name: %s", repository.full_name) - logger.debug("Repo URL: %s", f'https://github.com/{repository.full_name}') + logger.debug("Getting github repository...") + key = f"github_repo_{test_instance.uuid}" + repository = self.cache.get(key) + if repository is None: + logger.debug("Getting github repository from remote service...") + _, repo_full_name, _ = self._get_workflow_info(test_instance.resource) + repository = self._gh_service.get_repo(repo_full_name) + logger.debug("Repo ID: %s", repository.id) + logger.debug("Repo full name: %s", repository.full_name) + logger.debug("Repo URL: %s", f'https://github.com/{repository.full_name}') + self.cache.set(key, repository) + else: + logger.debug("Reusing github repository from cache...") return repository @staticmethod @@ -138,9 +144,15 @@ def check_connection(self) -> bool: logger.info("Caught exception from Github GET /rate_limit: %s. Connection not working?", e) return False - @cache.memoize(timeout=Timeout.SESSION) def _get_gh_workflow(self, repository, workflow_id): - return self._gh_service.get_repo(repository).get_workflow(workflow_id) + logger.debug("Getting github workflow...") + key = f"github_workflow_{repository}_{workflow_id}" + workflow = self.cache.get(key) + if workflow is None: + logger.debug("Getting github workflow from remote service...") + workflow = self._gh_service.get_repo(repository).get_workflow(workflow_id) + self.cache.set(key, workflow) + return workflow def _iter_runs(self, test_instance: models.TestInstance, status: str = None) -> Generator[github.WorkflowRun.WorkflowRun]: _, repository, workflow_id = self._get_workflow_info(test_instance.resource) @@ -162,7 +174,6 @@ def _iter_runs(self, test_instance: models.TestInstance, status: str = None) -> if status is None or run.status == status: yield run - @cache.memoize(timeout=Timeout.REQUEST) def get_last_test_build(self, test_instance: models.TestInstance) -> Optional[GithubTestBuild]: try: logger.debug("Getting latest build...") @@ -173,7 +184,6 @@ def get_last_test_build(self, test_instance: models.TestInstance) -> Optional[Gi except GithubRateLimitExceededException as e: raise lm_exceptions.RateLimitExceededException(detail=str(e), instance=test_instance) - @cache.memoize(timeout=Timeout.REQUEST) def get_last_passed_test_build(self, test_instance: models.TestInstance) -> Optional[GithubTestBuild]: try: logger.debug("Getting last passed build...") @@ -184,7 +194,6 @@ def get_last_passed_test_build(self, test_instance: models.TestInstance) -> Opti except GithubRateLimitExceededException as e: raise lm_exceptions.RateLimitExceededException(detail=str(e), instance=test_instance) - @cache.memoize(timeout=Timeout.REQUEST) def get_last_failed_test_build(self, test_instance: models.TestInstance) -> Optional[GithubTestBuild]: try: logger.debug("Getting last failed build...") @@ -195,7 +204,6 @@ def get_last_failed_test_build(self, test_instance: models.TestInstance) -> Opti except GithubRateLimitExceededException as e: raise lm_exceptions.RateLimitExceededException(detail=str(e), instance=test_instance) - @cache.memoize(timeout=Timeout.REQUEST) def get_test_builds(self, test_instance: models.TestInstance, limit=10) -> list: try: logger.debug("Getting test builds...") @@ -204,7 +212,6 @@ def get_test_builds(self, test_instance: models.TestInstance, limit=10) -> list: except GithubRateLimitExceededException as e: raise lm_exceptions.RateLimitExceededException(detail=str(e), instance=test_instance) - @cache.memoize(timeout=Timeout.REQUEST) def get_test_build(self, test_instance: models.TestInstance, build_number: int) -> GithubTestBuild: try: logger.debug("Inefficient get_test_build implementation. Rewrite me!") @@ -224,12 +231,10 @@ def get_test_build(self, test_instance: models.TestInstance, build_number: int) except GithubRateLimitExceededException as e: raise lm_exceptions.RateLimitExceededException(detail=str(e), instance=test_instance) - @cache.memoize(timeout=Timeout.NONE) def get_instance_external_link(self, test_instance: models.TestInstance) -> str: _, repo_full_name, workflow_id = self._get_workflow_info(test_instance.resource) return f'https://github.com/{repo_full_name}/actions/workflows/{workflow_id}' - @cache.memoize(timeout=Timeout.NONE) def get_test_build_external_link(self, test_build: models.TestBuild) -> str: repo = self._get_repo(test_build.test_instance) return f'https://github.com/{repo.full_name}/actions/runs/{test_build.id}' @@ -238,7 +243,6 @@ def get_test_build_output(self, test_instance: models.TestInstance, build_number raise lm_exceptions.NotImplementedException(detail="not supported for GitHub test builds") @classmethod - @cache.memoize(timeout=Timeout.NONE) def _parse_workflow_url(cls, resource: str) -> Tuple[str, str, str]: """ Utility method to parse github workflow URIs. Given a URL to the testing diff --git a/lifemonitor/api/models/services/jenkins.py b/lifemonitor/api/models/services/jenkins.py index af9528d12..e439fa47a 100644 --- a/lifemonitor/api/models/services/jenkins.py +++ b/lifemonitor/api/models/services/jenkins.py @@ -27,7 +27,7 @@ import lifemonitor.api.models as models import lifemonitor.exceptions as lm_exceptions -from lifemonitor.cache import Timeout, cache + from lifemonitor.lang import messages import jenkins @@ -69,7 +69,6 @@ def server(self) -> jenkins.Jenkins: return self._server @staticmethod - @cache.memoize(timeout=Timeout.NONE) def get_job_name(resource): # extract the job name from the resource path logger.debug(f"Getting project metadata - resource: {resource}") @@ -80,32 +79,27 @@ def get_job_name(resource): f"Unable to get the Jenkins job from the resource {job_name}") return job_name - @cache.memoize(timeout=Timeout.NONE) def get_instance_external_link(self, test_instance: models.TestInstance) -> str: return self.get_project_metadata(test_instance)['url'] - @cache.memoize(timeout=Timeout.REQUEST) def get_last_test_build(self, test_instance: models.TestInstance) -> Optional[JenkinsTestBuild]: metadata = self.get_project_metadata(test_instance) if 'lastBuild' in metadata and metadata['lastBuild']: return self.get_test_build(test_instance, metadata['lastBuild']['number']) return None - @cache.memoize(timeout=Timeout.REQUEST) def get_last_passed_test_build(self, test_instance: models.TestInstance) -> Optional[JenkinsTestBuild]: metadata = self.get_project_metadata(test_instance) if 'lastSuccessfulBuild' in metadata and metadata['lastSuccessfulBuild']: return self.get_test_build(test_instance, metadata['lastSuccessfulBuild']['number']) return None - @cache.memoize(timeout=Timeout.REQUEST) def get_last_failed_test_build(self, test_instance: models.TestInstance) -> Optional[JenkinsTestBuild]: metadata = self.get_project_metadata(test_instance) if 'lastFailedBuild' in metadata and metadata['lastFailedBuild']: return self.get_test_build(test_instance, metadata['lastFailedBuild']['number']) return None - @cache.memoize(timeout=Timeout.REQUEST) def test_builds(self, test_instance: models.TestInstance) -> list: builds = [] metadata = self.get_project_metadata(test_instance) @@ -113,7 +107,6 @@ def test_builds(self, test_instance: models.TestInstance) -> list: builds.append(self.get_test_build(test_instance, build_info['number'])) return builds - @cache.memoize(timeout=Timeout.REQUEST) def get_project_metadata(self, test_instance: models.TestInstance, fetch_all_builds=False): if not hasattr(test_instance, "_raw_metadata") or test_instance._raw_metadata is None: try: @@ -123,7 +116,6 @@ def get_project_metadata(self, test_instance: models.TestInstance, fetch_all_bui raise lm_exceptions.TestingServiceException(f"{self}: {e}") return test_instance._raw_metadata - @cache.memoize(timeout=Timeout.REQUEST) def get_test_builds(self, test_instance: models.TestInstance, limit=10) -> list: builds = [] project_metadata = self.get_project_metadata(test_instance, fetch_all_builds=(limit > 100)) @@ -133,11 +125,9 @@ def get_test_builds(self, test_instance: models.TestInstance, limit=10) -> list: builds.append(self.get_test_build(test_instance, build_info['number'])) return builds - @cache.memoize(timeout=Timeout.REQUEST) def _get_build_info(self, test_instance: models.TestInstance, build_number: int): return self.server.get_build_info(self.get_job_name(test_instance.resource), int(build_number)) - @cache.memoize(timeout=Timeout.REQUEST) def get_test_build(self, test_instance: models.TestInstance, build_number: int) -> JenkinsTestBuild: try: build_metadata = self._get_build_info(test_instance, build_number) @@ -147,7 +137,6 @@ def get_test_build(self, test_instance: models.TestInstance, build_number: int) except jenkins.JenkinsException as e: raise lm_exceptions.TestingServiceException(e) - @cache.memoize(timeout=Timeout.NONE) def get_test_build_external_link(self, test_build: models.TestBuild) -> str: return urllib.parse.urljoin(test_build.url, "console") diff --git a/lifemonitor/api/models/services/travis.py b/lifemonitor/api/models/services/travis.py index ef8b5c1e3..ffa0cbfe3 100644 --- a/lifemonitor/api/models/services/travis.py +++ b/lifemonitor/api/models/services/travis.py @@ -29,7 +29,6 @@ import lifemonitor.api.models as models import requests from lifemonitor.api.models.services.service import TestingService -from lifemonitor.cache import Timeout, cache from lifemonitor.exceptions import (EntityNotFoundException, TestingServiceException) @@ -85,14 +84,12 @@ def _build_url(self, path, params=None): query = "?" + urllib.parse.urlencode(params) if params else "" return urllib.parse.urljoin(self.api_base_url, path + query) - @cache.memoize(Timeout.REQUEST) def _get(self, path, token: models.TestingServiceToken = None, params=None) -> object: logger.debug("Getting resource: %r", self._build_url(path, params)) response = requests.get(self._build_url(path, params), headers=self._build_headers(token)) return response.json() if response.status_code == 200 else response @staticmethod - # @cache.memoize(Timeout.NONE) def get_repo_id(test_instance: models.TestInstance, quote=True): # extract the job name from the resource path logger.debug(f"Getting project metadata - resource: {test_instance.resource}") @@ -104,7 +101,6 @@ def get_repo_id(test_instance: models.TestInstance, quote=True): f"Unable to get the Travis job from the resource {test_instance.resource}") return repo_id - @cache.memoize(Timeout.NONE) def get_repo_slug(self, test_instance: models.TestInstance): metadata = self.get_project_metadata(test_instance) return metadata['slug'] @@ -128,26 +124,31 @@ def _get_last_test_build(self, test_instance: models.TestInstance, state=None) - except Exception as e: raise TestingServiceException(e) - @cache.memoize(Timeout.REQUEST) def get_last_test_build(self, test_instance: models.TestInstance) -> Optional[models.TravisTestBuild]: return self._get_last_test_build(test_instance) - # @cache.memoize() def get_last_passed_test_build(self, test_instance: models.TestInstance) -> Optional[models.TravisTestBuild]: return self._get_last_test_build(test_instance, state='passed') - # @cache.memoize() def get_last_failed_test_build(self, test_instance: models.TestInstance) -> Optional[models.TravisTestBuild]: return self._get_last_test_build(test_instance, state='failed') - # @cache.memoize(Timeout.REQUEST) def get_project_metadata(self, test_instance: models.TestInstance): try: - return self._get("/repo/{}".format(self.get_repo_id(test_instance))) + logger.debug("Getting Travis project metadata...") + key = f"project_metadata_{test_instance.uuid}" + metadata = self.cache.get(key) + if metadata is None: + logger.debug("Getting project metadata from remote service...") + metadata = self._get("/repo/{}".format(self.get_repo_id(test_instance))) + if metadata is not None: + self.cache.set(key, metadata) + else: + logger.debug("Reusing travis project metadata from cache...") + return metadata except Exception as e: raise TestingServiceException(f"{self}: {e}") - @cache.memoize(timeout=Timeout.REQUEST) def get_test_builds(self, test_instance: models.TestInstance, limit=10) -> list: try: repo_id = self.get_repo_id(test_instance) @@ -166,7 +167,6 @@ def get_test_builds(self, test_instance: models.TestInstance, limit=10) -> list: except Exception as e: raise TestingServiceException(details=f"{e}") - @cache.memoize(timeout=Timeout.REQUEST) def _get_test_build(self, test_instance: models.TestInstance, build_number: int) -> models.TravisTestBuild: try: response = self._get("/build/{}".format(build_number)) @@ -186,17 +186,14 @@ def _disable_build_cache(func, obj: TravisTestingService, build = obj._get_test_build(test_instance, build_number) return build.is_running() - @cache.memoize(timeout=Timeout.REQUEST) def get_test_build(self, test_instance: models.TestInstance, build_number: int) -> models.TravisTestBuild: return self._get_test_build(test_instance, build_number) - @cache.memoize(Timeout.NONE) def get_instance_external_link(self, test_instance: models.TestInstance) -> str: testing_service = test_instance.testing_service repo_slug = testing_service.get_repo_slug(test_instance) return urllib.parse.urljoin(testing_service.base_url, f'{repo_slug}/builds') - @cache.memoize(Timeout.NONE) def get_test_build_external_link(self, test_build: models.TestBuild) -> str: testing_service = test_build.test_instance.testing_service repo_slug = testing_service.get_repo_slug(test_build.test_instance) From 54fbeb00cd64d45518c9f980a15b8beb7c02a8d4 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Mon, 8 Nov 2021 17:04:30 +0000 Subject: [PATCH 039/162] Direct cache access on TestInstance model --- .../api/models/testsuites/testinstance.py | 64 ++++++++++++++++--- 1 file changed, 55 insertions(+), 9 deletions(-) diff --git a/lifemonitor/api/models/testsuites/testinstance.py b/lifemonitor/api/models/testsuites/testinstance.py index 084028168..1e61b4e0b 100644 --- a/lifemonitor/api/models/testsuites/testinstance.py +++ b/lifemonitor/api/models/testsuites/testinstance.py @@ -26,7 +26,6 @@ import lifemonitor.api.models as models from lifemonitor.api.models import db -from lifemonitor.cache import Timeout, cache from lifemonitor.models import JSON, UUID, ModelMixin from .testsuite import TestSuite @@ -75,6 +74,22 @@ def __init__(self, testing_suite: TestSuite, submitter: models.User, def __repr__(self): return ''.format(self.uuid, self.test_suite.uuid) + @property + def _cache_key_prefix(self): + return str(self) + + def _get_cache_key_external_link(self): + return f"{self._cache_key_prefix}_external_link" + + def _get_cache_key_last_build(self): + return f"{self._cache_key_prefix}_last_build" + + def _get_cache_key_test_builds(self, limit=10): + return f"{self._cache_key_prefix}_test_builds_limit{limit}" + + def _get_cache_key_test_build(self, build_number): + return f"{self._cache_key_prefix}_test_build_{build_number}" + @property def is_roc_instance(self): return self.roc_instance is not None @@ -86,23 +101,54 @@ def managed(self): @property def external_link(self): logger.debug("Getting external link...") - return self.testing_service.get_instance_external_link(self) + key = self._get_cache_key_external_link() + link = self.cache.get(key) + if link is None: + logger.debug("Getting external link from testing service...") + link = self.testing_service.get_instance_external_link(self) + if link is not None: + self.cache.set(key, link) + else: + logger.debug("Reusing external link from cache...") + return link @property - @cache.memoize(timeout=Timeout.REQUEST) def last_test_build(self): - builds = self.get_test_builds() - return builds[0] if builds and len(builds) > 0 else None + key = self._get_cache_key_last_build() + build = self.cache.get(key) + if build is None: + builds = self.get_test_builds() + build = builds[0] if builds and len(builds) > 0 else None + if build is not None: + self.cache.set(key, build) + return build - @cache.memoize(timeout=Timeout.REQUEST) def get_test_builds(self, limit=10): logger.debug("Getting test builds...") - return self.testing_service.get_test_builds(self, limit=limit) + key = self._get_cache_key_test_builds(limit) + builds = self.cache.get(key) + if builds is None: + logger.debug("Getting test builds from testing service...") + builds = self.testing_service.get_test_builds(self, limit=limit) + if builds is not None: + self.cache.set(key, builds) + else: + logger.debug("Reusing test builds from cache...") + return builds - @cache.memoize(timeout=Timeout.REQUEST) def get_test_build(self, build_number): logger.debug("Getting test build...") - return self.testing_service.get_test_build(self, build_number) + key = self._get_cache_key_test_build(build_number) + build = self.cache.get(key) + if build is None: + logger.debug("Getting test build from testing service...") + build = self.testing_service.get_test_build(self, build_number) + if build is not None: + if build.status not in [models.BuildStatus.RUNNING, models.BuildStatus.WAITING]: + self.cache.set(key, build) + else: + logger.debug(f"Reusing test build {build} from cache...") + return build def to_dict(self, test_build=False, test_output=False): data = { From 010ec0083410cc778b20f233d79599ef9e72f4a7 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Mon, 8 Nov 2021 17:05:21 +0000 Subject: [PATCH 040/162] Fix missing __eq__ operator --- lifemonitor/api/models/testsuites/testbuild.py | 4 ++++ lifemonitor/api/models/testsuites/testinstance.py | 3 +++ 2 files changed, 7 insertions(+) diff --git a/lifemonitor/api/models/testsuites/testbuild.py b/lifemonitor/api/models/testsuites/testbuild.py index ac2892149..8c891393b 100644 --- a/lifemonitor/api/models/testsuites/testbuild.py +++ b/lifemonitor/api/models/testsuites/testbuild.py @@ -53,6 +53,10 @@ def __init__(self, testing_service: models.TestingService, test_instance: models def __repr__(self) -> str: return f"TestBuild '{self.id}' @ instance '{self.test_instance.uuid}'" + def __eq__(self, other): + return isinstance(other, TestBuild) \ + and self.id == other.id and self.test_instance == other.test_instance + def is_successful(self): return self.result == TestBuild.Result.SUCCESS diff --git a/lifemonitor/api/models/testsuites/testinstance.py b/lifemonitor/api/models/testsuites/testinstance.py index 1e61b4e0b..edf2b1200 100644 --- a/lifemonitor/api/models/testsuites/testinstance.py +++ b/lifemonitor/api/models/testsuites/testinstance.py @@ -74,6 +74,9 @@ def __init__(self, testing_suite: TestSuite, submitter: models.User, def __repr__(self): return ''.format(self.uuid, self.test_suite.uuid) + def __eq__(self, o: object) -> bool: + return isinstance(o, TestInstance) and o.uuid == self.uuid + @property def _cache_key_prefix(self): return str(self) From 77ca6a05cb404c3ad5d050a1e325d8ac65f294da Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Mon, 8 Nov 2021 17:06:17 +0000 Subject: [PATCH 041/162] Set cache timeout of user profile controller to session --- lifemonitor/auth/controllers.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/lifemonitor/auth/controllers.py b/lifemonitor/auth/controllers.py index 656b713d2..cbf544e64 100644 --- a/lifemonitor/auth/controllers.py +++ b/lifemonitor/auth/controllers.py @@ -23,7 +23,7 @@ import flask from flask import flash, redirect, render_template, request, session, url_for from flask_login import login_required, login_user, logout_user -from lifemonitor.cache import cached +from lifemonitor.cache import cached, Timeout from lifemonitor.utils import (NextRouteRegistry, next_route_aware, split_by_crlf) @@ -50,7 +50,7 @@ @authorized -@cached(timeout=3600) +@cached(timeout=Timeout.SESSION) def show_current_user_profile(): try: if current_user and not current_user.is_anonymous: @@ -66,7 +66,7 @@ def user_subscriptions_get(): @authorized -@cached() +@cached(timeout=Timeout.REQUEST) def get_registry_users(): try: if current_registry and current_user.is_anonymous: @@ -81,7 +81,7 @@ def get_registry_users(): @authorized -@cached() +@cached(timeout=Timeout.REQUEST) def get_registry_user(user_id): try: if current_registry: @@ -97,6 +97,7 @@ def index(): @blueprint.route("/profile", methods=("GET",)) +@cached(timeout=Timeout.SESSION) def profile(form=None, passwordForm=None, currentView=None): currentView = currentView or request.args.get("currentView", 'accountsTab') logger.debug(OpenApiSpecs.get_instance().authorization_code_scopes) From e45154929feedd138b9af5892ab64404ed06b77f Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Mon, 8 Nov 2021 17:07:07 +0000 Subject: [PATCH 042/162] Fix and restore task to update cache --- lifemonitor/tasks/tasks.py | 31 ++++++++++++++++++------------- 1 file changed, 18 insertions(+), 13 deletions(-) diff --git a/lifemonitor/tasks/tasks.py b/lifemonitor/tasks/tasks.py index e880b300d..c0977f792 100644 --- a/lifemonitor/tasks/tasks.py +++ b/lifemonitor/tasks/tasks.py @@ -41,16 +41,21 @@ def heartbeat(): logger.info("Heartbeat!") -# @schedule(CronTrigger(minute="*/5")) -# @dramatiq.actor -# def check_last_build(): -# logger.info("Checking last build....") -# from lifemonitor.api.models import Workflow - -# for w in Workflow.all(): -# for s in w.latest_version.test_suites: -# #logger.info("Updating workflow: %r", w) -# for i in s.test_instances: -# i.refresh() - -# logger.info("Checking last build: DONE!") +@schedule(CronTrigger(minute="*/5")) +@dramatiq.actor +def check_last_build(): + logger.info("Checking last build....") + from lifemonitor.api.models import Workflow + + for w in Workflow.all(): + for s in w.latest_version.test_suites: + logger.info("Updating workflow: %r", w) + for i in s.test_instances: + try: + i.ignore_cache_values = True + logger.debug("Updating latest builds: %r", i.get_test_builds()) + finally: + i.ignore_cache_values = False + logger.debug("Updating latest build: %r", i.last_test_build) + + logger.info("Checking last build: DONE!") From c3fc678e5bcec0cad0f5d234ad00b434e9c08461 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Mon, 8 Nov 2021 17:53:43 +0000 Subject: [PATCH 043/162] Update defaults --- settings.conf | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/settings.conf b/settings.conf index 22a01de1d..39a25b2c7 100644 --- a/settings.conf +++ b/settings.conf @@ -55,9 +55,9 @@ REDIS_PORT_NUMBER=6379 # Cache settings CACHE_REDIS_DB=0 -CACHE_DEFAULT_TIMEOUT=60 -CACHE_REQUEST_TIMEOUT=300 -CACHE_SESSION_TIMEOUT=600 +CACHE_DEFAULT_TIMEOUT=30 +CACHE_REQUEST_TIMEOUT=30 +CACHE_SESSION_TIMEOUT=3600 CACHE_BUILDS_TIMEOUT=84600 # Github OAuth2 settings From 06c303d6c2cc978e301cdaa3cfc22dd03dcb75ab Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Mon, 8 Nov 2021 18:02:09 +0000 Subject: [PATCH 044/162] Clean up --- lifemonitor/api/models/testsuites/testinstance.py | 9 --------- 1 file changed, 9 deletions(-) diff --git a/lifemonitor/api/models/testsuites/testinstance.py b/lifemonitor/api/models/testsuites/testinstance.py index edf2b1200..82f227db6 100644 --- a/lifemonitor/api/models/testsuites/testinstance.py +++ b/lifemonitor/api/models/testsuites/testinstance.py @@ -164,15 +164,6 @@ def to_dict(self, test_build=False, test_output=False): data.update(self.testing_service.get_test_builds_as_dict(test_output=test_output)) return data - def refresh(self): - try: - import lifemonitor - cache.delete_memoized(lifemonitor.api.models.testsuites.testinstance.TestInstance.get_test_build) - cache.delete_memoized(lifemonitor.api.models.testsuites.testinstance.TestInstance.get_test_builds) - except Exception as e: - logger.debug(e) - self.get_test_builds() - @classmethod def all(cls) -> List[TestInstance]: return cls.query.all() From c060cb7852b27075beaf6d14da80b857d636c8f5 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Mon, 8 Nov 2021 20:37:10 +0000 Subject: [PATCH 045/162] Fix requirements --- requirements.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index aad751f3a..1b8c6b603 100644 --- a/requirements.txt +++ b/requirements.txt @@ -14,7 +14,6 @@ Flask-APScheduler==1.12.2 Flask-SQLAlchemy==2.5.1 Flask-Migrate==3.1.0 Flask-Caching==1.10.1 -flask-wtf~=0.15.1 Flask>=1.1.4,<2.0.0 gunicorn~=20.1.0 jwt==1.2.0 @@ -27,6 +26,7 @@ pytest-mock~=3.6.1 pytest~=6.2.5 python-dotenv~=0.19.0 python-jenkins==1.7.0 +python-redis-lock~=3.7.0 PyGithub~=1.55 PyYAML~=5.4.1 redis~=3.5.3 @@ -34,4 +34,4 @@ requests~=2.26.0 rocrate~=0.4.0 SQLAlchemy~=1.3.23 wheel~=0.37.0 -python-redis-lock~=3.7.0 + From 7af97e6aa0460a68ce2ab5f6d4ffc648e63a95b4 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Mon, 8 Nov 2021 20:50:09 +0000 Subject: [PATCH 046/162] Fix URLField import --- lifemonitor/auth/forms.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lifemonitor/auth/forms.py b/lifemonitor/auth/forms.py index 76cc82a5c..5476e4506 100644 --- a/lifemonitor/auth/forms.py +++ b/lifemonitor/auth/forms.py @@ -28,8 +28,7 @@ from lifemonitor.utils import OpenApiSpecs from sqlalchemy.exc import IntegrityError from wtforms import (BooleanField, HiddenField, PasswordField, SelectField, - SelectMultipleField, StringField) -from wtforms.fields.html5 import URLField + SelectMultipleField, StringField, URLField) from wtforms.validators import URL, DataRequired, EqualTo, Optional from .models import User, db From 8b34c630f9a58c7d76eededad6708f9a07ce4fea Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Mon, 8 Nov 2021 21:19:10 +0000 Subject: [PATCH 047/162] Add tests --- lifemonitor/config.py | 3 +- tests/unit/cache/test_cache.py | 86 ++++++++++++++++++++++++++++++++++ 2 files changed, 88 insertions(+), 1 deletion(-) create mode 100644 tests/unit/cache/test_cache.py diff --git a/lifemonitor/config.py b/lifemonitor/config.py index 226853883..59546b948 100644 --- a/lifemonitor/config.py +++ b/lifemonitor/config.py @@ -104,7 +104,8 @@ class TestingConfig(BaseConfig): TESTING = True LOG_LEVEL = "DEBUG" # SQLALCHEMY_DATABASE_URI = "sqlite:///{0}/app-test.db".format(basedir) - CACHE_TYPE = "flask_caching.backends.nullcache.NullCache" + # CACHE_TYPE = "flask_caching.backends.nullcache.NullCache" + CACHE_TYPE = "flask_caching.backends.rediscache.RedisCache" class TestingSupportConfig(TestingConfig): diff --git a/tests/unit/cache/test_cache.py b/tests/unit/cache/test_cache.py new file mode 100644 index 000000000..a898b6c04 --- /dev/null +++ b/tests/unit/cache/test_cache.py @@ -0,0 +1,86 @@ +# Copyright (c) 2020-2021 CRS4 +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import logging +from unittest.mock import MagicMock + +import lifemonitor.api.models as models +from tests import utils +from lifemonitor.cache import CacheHelper + +logger = logging.getLogger(__name__) + + +def test_cache_last_build(app_client, user1): + valid_workflow = 'sort-and-change-case' + assert CacheHelper.size() == 0, "Cache should be empty" + _, workflow = utils.pick_and_register_workflow(user1, valid_workflow) + assert workflow, "Workflow should be set" + assert len(workflow.test_suites) > 0, "The workflow should have at least one suite" + suite: models.TestSuite = workflow.test_suites[0] + assert len(suite.test_instances) > 0, "The suite should have at least one test instance" + instance: models.TestInstance = suite.test_instances[0] + + assert instance._cache_key_prefix == str(instance), "Invalid cache key prefix" + + assert instance.cache.get(instance._get_cache_key_last_build()) is None, "Cache should be empty" + build = instance.last_test_build + assert build, "Last build should not be empty" + cached_build = instance.cache.get(instance._get_cache_key_last_build()) + assert cached_build is not None, "Cache should not be empty" + assert build == cached_build, "Build should be equal to the cached build" + + instance.get_test_builds = MagicMock(return_value=None) + + build = instance.last_test_build + assert build, "Last build should not be empty" + assert instance.get_test_builds.assert_not_called, "instance.get_test_builds should not be used" + assert build == cached_build, "Build should be equal to the cached build" + + +def test_cache_test_builds(app_client, user1): + valid_workflow = 'sort-and-change-case' + assert CacheHelper.size() == 0, "Cache should be empty" + _, workflow = utils.pick_and_register_workflow(user1, valid_workflow) + assert workflow, "Workflow should be set" + assert len(workflow.test_suites) > 0, "The workflow should have at least one suite" + suite: models.TestSuite = workflow.test_suites[0] + assert len(suite.test_instances) > 0, "The suite should have at least one test instance" + instance: models.TestInstance = suite.test_instances[0] + + limit = 10 + cache_key = instance._get_cache_key_test_builds(limit=limit) + assert instance.cache.get(cache_key) is None, "Cache should be empty" + builds = instance.get_test_builds(limit=limit) + assert builds and len(builds) > 0, "Invalid number of builds" + + cached_builds = instance.cache.get(cache_key) + assert cached_builds is not None and len(cached_builds) > 0, "Cache should not be empty" + assert len(builds) == len(cached_builds), "Unexpected number of cached builds" + + instance.testing_service.get_test_builds = MagicMock(return_value=None) + builds = instance.get_test_builds(limit=limit) + assert builds and len(builds) > 0, "Invalid number of builds" + assert instance.testing_service.get_test_builds.assert_not_called, "instance.get_test_builds should not be used" + assert len(builds) == len(cached_builds), "Unexpected number of cached builds" + + limit = 20 + cache_key = instance._get_cache_key_test_builds(limit=limit) + assert instance.cache.get(cache_key) is None, "Cache should be empty" From ed4c18bebeef7420bbf2c84bc4a5964988f74b0c Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Mon, 8 Nov 2021 22:03:12 +0000 Subject: [PATCH 048/162] Disable cache when CACHE_TYPE is NullCache --- lifemonitor/cache.py | 28 +++++++++++++++++----------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/lifemonitor/cache.py b/lifemonitor/cache.py index 55eaf2ae0..4bd8913aa 100644 --- a/lifemonitor/cache.py +++ b/lifemonitor/cache.py @@ -23,10 +23,11 @@ import functools import logging import os -import redis_lock +import redis_lock from flask.app import Flask from flask_caching import Cache +from flask_caching.backends.nullcache import NullCache class Timeout: @@ -156,16 +157,21 @@ def lock(key: str): def set(self, key: str, value, timeout: int = Timeout.NONE): val = None - if key is not None and self.cache_enabled: - lock = self.lock(key) - if lock.acquire(blocking=True): - try: - val = cache.get(key) - if not val: - cache.set(key, value, timeout=timeout) - finally: - lock.release() + if not isinstance(cache, NullCache): + if key is not None and self.cache_enabled: + lock = self.lock(key) + if lock.acquire(blocking=True): + try: + val = cache.get(key) + if not val: + cache.set(key, value, timeout=timeout) + finally: + lock.release() return val def get(self, key: str): - return cache.get(key) if self.cache_enabled and not self.ignore_cache_values else None + return cache.get(key) \ + if not isinstance(cache, NullCache) \ + and self.cache_enabled \ + and not self.ignore_cache_values \ + else None From 4285b1ed5355a136ec0d5e5f22942de4aa80e53e Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Mon, 8 Nov 2021 22:03:56 +0000 Subject: [PATCH 049/162] Disable cache on some unit tests --- tests/conftest.py | 5 +++++ tests/unit/api/controllers/test_instances.py | 10 +++++----- tests/unit/api/controllers/test_workflows.py | 2 +- 3 files changed, 11 insertions(+), 6 deletions(-) diff --git a/tests/conftest.py b/tests/conftest.py index cf66b550b..3ad2ca893 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -27,6 +27,7 @@ import uuid from unittest.mock import MagicMock +from lifemonitor.cache import init_cache import lifemonitor.db as lm_db import pytest from lifemonitor import auth @@ -83,6 +84,10 @@ def service_registry() -> ClassManager: def token_manager() -> TestingServiceTokenManager: return TestingServiceTokenManager.get_instance() +@pytest.fixture +def no_cache(app_context): + app_context.app.config['CACHE_TYPE'] = "flask_caching.backends.nullcache.NullCache" + init_cache(app_context.app) @pytest.fixture(autouse=True) def initialize(app_settings, request_context, service_registry: ClassManager): diff --git a/tests/unit/api/controllers/test_instances.py b/tests/unit/api/controllers/test_instances.py index dee8ce0a8..98798fe4e 100644 --- a/tests/unit/api/controllers/test_instances.py +++ b/tests/unit/api/controllers/test_instances.py @@ -71,7 +71,7 @@ def test_get_instance_by_user_error_forbidden(m, request_context, mock_user): @patch("lifemonitor.api.controllers.lm") -def test_get_instance_by_user(m, request_context, mock_user): +def test_get_instance_by_user(m, request_context, no_cache, mock_user): assert not auth.current_user.is_anonymous, "Unexpected user in session" assert auth.current_registry is not None, "Unexpected registry in session" workflow = MagicMock() @@ -95,7 +95,7 @@ def test_get_instance_by_user(m, request_context, mock_user): @patch("lifemonitor.api.controllers.lm") -def test_get_instance_build_by_user_error_not_found(m, request_context, mock_user): +def test_get_instance_build_by_user_error_not_found(m, request_context, no_cache, mock_user): assert not auth.current_user.is_anonymous, "Unexpected user in session" assert auth.current_registry is not None, "Unexpected registry in session" instance = MagicMock() @@ -161,7 +161,7 @@ def test_get_instance_build_by_user_rate_limit_exceeded(lm, request_context, moc @patch("lifemonitor.api.controllers.lm") -def test_get_instance_build_last_logs_by_user(m, request_context, mock_user): +def test_get_instance_build_last_logs_by_user(m, request_context, no_cache, mock_user): assert not auth.current_user.is_anonymous, "Unexpected user in session" assert auth.current_registry is not None, "Unexpected registry in session" workflow = {"uuid": "1111-222"} @@ -307,7 +307,7 @@ def test_get_instance_by_registry_error_forbidden(m, request_context, mock_regis @patch("lifemonitor.api.controllers.lm") -def test_get_instance_by_registry_error_not_found(m, request_context, mock_registry): +def test_get_instance_by_registry_error_not_found(m, request_context, no_cache, mock_registry): assert auth.current_user.is_anonymous, "Unexpected user in session" assert auth.current_registry, "Unexpected registry in session" workflow = {"uuid": "1111-222"} @@ -322,7 +322,7 @@ def test_get_instance_by_registry_error_not_found(m, request_context, mock_regis @patch("lifemonitor.api.controllers.lm") -def test_get_instance_build_by_registry_error_not_found(m, request_context, mock_registry): +def test_get_instance_build_by_registry_error_not_found(m, request_context, no_cache, mock_registry): assert auth.current_user.is_anonymous, "Unexpected user in session" assert auth.current_registry, "Unexpected registry in session" build = MagicMock() diff --git a/tests/unit/api/controllers/test_workflows.py b/tests/unit/api/controllers/test_workflows.py index 615d3416c..3576f63e5 100644 --- a/tests/unit/api/controllers/test_workflows.py +++ b/tests/unit/api/controllers/test_workflows.py @@ -78,7 +78,7 @@ def test_get_public_workflows_rate_limit_exceeded(lm, rate_limit_exceeded_workfl @patch("lifemonitor.api.controllers.lm") -def test_get_workflows_with_user_rate_limit_exceeded(lm, mock_user, rate_limit_exceeded_workflow): +def test_get_workflows_with_user_rate_limit_exceeded(lm, mock_user, no_cache, rate_limit_exceeded_workflow): # add one user to the current session assert not auth.current_user.is_anonymous, "Unexpected user in session" assert auth.current_user == mock_user, "Unexpected user in session" From b4a4fce00c107554452fc394fa1bf68e369d6f57 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Tue, 9 Nov 2021 01:01:29 +0000 Subject: [PATCH 050/162] Fix missing blank lines --- tests/conftest.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/conftest.py b/tests/conftest.py index 3ad2ca893..83c894178 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -84,11 +84,13 @@ def service_registry() -> ClassManager: def token_manager() -> TestingServiceTokenManager: return TestingServiceTokenManager.get_instance() + @pytest.fixture def no_cache(app_context): app_context.app.config['CACHE_TYPE'] = "flask_caching.backends.nullcache.NullCache" init_cache(app_context.app) + @pytest.fixture(autouse=True) def initialize(app_settings, request_context, service_registry: ClassManager): service_registry.remove_class("unknown") From c34ee7999c2ee89dc02e07d1603c0b0164e213dc Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Tue, 9 Nov 2021 09:46:57 +0000 Subject: [PATCH 051/162] Fix check of cache type --- lifemonitor/cache.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lifemonitor/cache.py b/lifemonitor/cache.py index 4bd8913aa..cd70b723c 100644 --- a/lifemonitor/cache.py +++ b/lifemonitor/cache.py @@ -157,7 +157,7 @@ def lock(key: str): def set(self, key: str, value, timeout: int = Timeout.NONE): val = None - if not isinstance(cache, NullCache): + if not isinstance(cache.cache, NullCache): if key is not None and self.cache_enabled: lock = self.lock(key) if lock.acquire(blocking=True): @@ -171,7 +171,7 @@ def set(self, key: str, value, timeout: int = Timeout.NONE): def get(self, key: str): return cache.get(key) \ - if not isinstance(cache, NullCache) \ + if not isinstance(cache.cache, NullCache) \ and self.cache_enabled \ and not self.ignore_cache_values \ else None From 7de62921b40b7c2ea35e9f43c46b38e1cc1fea8d Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Tue, 9 Nov 2021 10:57:15 +0000 Subject: [PATCH 052/162] Fix tests --- tests/integration/api/controllers/test_registries.py | 2 +- tests/unit/api/controllers/test_suites.py | 12 ++++++------ tests/unit/cache/test_cache.py | 4 ++-- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/tests/integration/api/controllers/test_registries.py b/tests/integration/api/controllers/test_registries.py index e347ab65f..98767f8d9 100644 --- a/tests/integration/api/controllers/test_registries.py +++ b/tests/integration/api/controllers/test_registries.py @@ -45,7 +45,7 @@ def test_get_registries(app_client, client_auth_method, user1, user1_auth): assert len(data['items']) == 1, "Invalid number of registries" -def test_get_registries_no_authorization(app_client, fake_registry): +def test_get_registries_no_authorization(app_client, no_cache, fake_registry): response = app_client.get(utils.build_registries_path()) utils.assert_status_code(200, response.status_code) assert response.data, "Empty response" diff --git a/tests/unit/api/controllers/test_suites.py b/tests/unit/api/controllers/test_suites.py index 4d0cfb26d..08c6bbcf2 100644 --- a/tests/unit/api/controllers/test_suites.py +++ b/tests/unit/api/controllers/test_suites.py @@ -42,7 +42,7 @@ def test_get_suite_error_not_found(m, request_context, mock_user): @patch("lifemonitor.api.controllers.lm") -def test_get_suite_by_user_without_auth_access_to_workflow(m, request_context, mock_user): +def test_get_suite_by_user_without_auth_access_to_workflow(m, request_context, no_cache, mock_user): # add one user to the current session assert not auth.current_user.is_anonymous, "Unexpected user in session" assert auth.current_user == mock_user, "Unexpected user in session" @@ -65,7 +65,7 @@ def test_get_suite_by_user_without_auth_access_to_workflow(m, request_context, m @patch("lifemonitor.api.controllers.lm") -def test_get_suite_by_registry_without_auth_access_to_workflow(m, request_context, mock_registry): +def test_get_suite_by_registry_without_auth_access_to_workflow(m, request_context, no_cache, mock_registry): # add one user to the current session assert auth.current_user.is_anonymous, "Unexpected user in session" logger.debug("Current registry: %r", auth.current_registry) @@ -133,7 +133,7 @@ def test_get_suite_by_registry(m, request_context, mock_registry): @patch("lifemonitor.api.controllers.lm") -def test_get_suite_status_by_user(m, request_context, mock_user): +def test_get_suite_status_by_user(m, request_context, no_cache, mock_user): # add one user to the current session assert not auth.current_user.is_anonymous, "Unexpected user in session" assert auth.current_user == mock_user, "Unexpected user in session" @@ -179,7 +179,7 @@ def test_get_suite_status_by_user_rate_limit_exceeded(lm, mock_user, rate_limit_ @patch("lifemonitor.api.controllers.lm") -def test_get_suite_status_by_registry(m, request_context, mock_registry): +def test_get_suite_status_by_registry(m, request_context, no_cache, mock_registry): # add one user to the current session assert auth.current_user.is_anonymous, "Unexpected user in session" logger.debug("Current registry: %r", auth.current_registry) @@ -226,7 +226,7 @@ def test_get_suite_status_by_registry_rate_limit_exceeded(lm, request_context, m @patch("lifemonitor.api.controllers.lm") -def test_get_suite_instances_by_user(m, request_context, mock_user): +def test_get_suite_instances_by_user(m, request_context, no_cache, mock_user): # add one user to the current session assert not auth.current_user.is_anonymous, "Unexpected user in session" assert auth.current_user == mock_user, "Unexpected user in session" @@ -248,7 +248,7 @@ def test_get_suite_instances_by_user(m, request_context, mock_user): @patch("lifemonitor.api.controllers.lm") -def test_get_suite_instances_by_registry(m, request_context, mock_registry): +def test_get_suite_instances_by_registry(m, request_context, no_cache, mock_registry): # add one user to the current session assert auth.current_user.is_anonymous, "Unexpected user in session" logger.debug("Current registry: %r", auth.current_registry) diff --git a/tests/unit/cache/test_cache.py b/tests/unit/cache/test_cache.py index a898b6c04..74831f809 100644 --- a/tests/unit/cache/test_cache.py +++ b/tests/unit/cache/test_cache.py @@ -28,7 +28,7 @@ logger = logging.getLogger(__name__) -def test_cache_last_build(app_client, user1): +def test_cache_last_build(app_client, redis_cache, user1): valid_workflow = 'sort-and-change-case' assert CacheHelper.size() == 0, "Cache should be empty" _, workflow = utils.pick_and_register_workflow(user1, valid_workflow) @@ -55,7 +55,7 @@ def test_cache_last_build(app_client, user1): assert build == cached_build, "Build should be equal to the cached build" -def test_cache_test_builds(app_client, user1): +def test_cache_test_builds(app_client, redis_cache, user1): valid_workflow = 'sort-and-change-case' assert CacheHelper.size() == 0, "Cache should be empty" _, workflow = utils.pick_and_register_workflow(user1, valid_workflow) From 69999d7671f44c388e18f1599c1b7e32eff50763 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Tue, 9 Nov 2021 11:16:52 +0000 Subject: [PATCH 053/162] Fix missing fixture --- tests/conftest.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/tests/conftest.py b/tests/conftest.py index 83c894178..c26c8b308 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -27,13 +27,13 @@ import uuid from unittest.mock import MagicMock -from lifemonitor.cache import init_cache import lifemonitor.db as lm_db import pytest from lifemonitor import auth from lifemonitor.api.models import (TestingService, TestingServiceTokenManager, TestSuite, User) from lifemonitor.api.services import LifeMonitor +from lifemonitor.cache import cache, init_cache from lifemonitor.utils import ClassManager from tests.utils import register_workflow @@ -89,6 +89,14 @@ def token_manager() -> TestingServiceTokenManager: def no_cache(app_context): app_context.app.config['CACHE_TYPE'] = "flask_caching.backends.nullcache.NullCache" init_cache(app_context.app) + return cache + + +@pytest.fixture +def redis_cache(app_context): + app_context.app.config['CACHE_TYPE'] = "flask_caching.backends.rediscache.RedisCache" + init_cache(app_context.app) + return cache @pytest.fixture(autouse=True) From c050ada5640f90fa64fdf0ea7efd08624a72f652 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Tue, 9 Nov 2021 13:20:37 +0000 Subject: [PATCH 054/162] Set alternative import for URLFiled --- lifemonitor/auth/forms.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/lifemonitor/auth/forms.py b/lifemonitor/auth/forms.py index 5476e4506..4ae88053a 100644 --- a/lifemonitor/auth/forms.py +++ b/lifemonitor/auth/forms.py @@ -28,11 +28,17 @@ from lifemonitor.utils import OpenApiSpecs from sqlalchemy.exc import IntegrityError from wtforms import (BooleanField, HiddenField, PasswordField, SelectField, - SelectMultipleField, StringField, URLField) + SelectMultipleField, StringField) from wtforms.validators import URL, DataRequired, EqualTo, Optional from .models import User, db +try: + from wtforms import URLField +except ImportError: + from wtforms.fields.html5 import URLField + + # Set the module level logger logger = logging.getLogger(__name__) From bfbb8777d6ed2200ea330b7075f3a4c5cc63b589 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Tue, 9 Nov 2021 15:39:23 +0000 Subject: [PATCH 055/162] Update 'cached' decorator --- lifemonitor/cache.py | 135 +++++++++++++++++++++++++------------------ 1 file changed, 79 insertions(+), 56 deletions(-) diff --git a/lifemonitor/cache.py b/lifemonitor/cache.py index cd70b723c..69d69c2ff 100644 --- a/lifemonitor/cache.py +++ b/lifemonitor/cache.py @@ -29,6 +29,9 @@ from flask_caching import Cache from flask_caching.backends.nullcache import NullCache +# Set prefix +CACHE_PREFIX = "flask_cache" + class Timeout: # Set default timeouts @@ -69,62 +72,6 @@ def init_cache(app: Flask): logger.debug(f"Cache initialised (type: {cache_type})") -def _make_name(fname) -> str: - from lifemonitor.auth import current_registry, current_user - result = fname - if current_user and not current_user.is_anonymous: - result += "-{}-{}".format(current_user.username, current_user.id) - if current_registry: - result += "-{}".format(current_registry.uuid) - logger.debug("Calculated function name: %r", result) - - return result - - -def clear_cache(func=None, *args, **kwargs): - try: - if func: - cache.delete_memoized(func, *args, **kwargs) - else: - cache.clear() - except Exception as e: - logger.error("Error deleting cache: %r", e) - - -def cached(timeout=Timeout.REQUEST, unless=False): - def decorator(function): - - @cache.memoize(timeout=timeout, unless=unless, make_name=_make_name) - @functools.wraps(function) - def wrapper(*args, **kwargs): - logger.debug("Cache arguments: %r", args) - logger.debug("Caghe kwargs: %r", kwargs) - # wrap concrete function - return function(*args, **kwargs) - - return wrapper - return decorator - - -def cached_method(timeout=None, unless=False): - def decorator(function): - - def unless_wrapper(func, obj, *args, **kwargs): - f = getattr(obj, unless) - return f(obj, func, *args, **kwargs) - - @cache.memoize(timeout=timeout, unless=unless_wrapper, make_name=_make_name) - @functools.wraps(function) - def wrapper(*args, **kwargs): - logger.debug("Cache arguments: %r", args) - logger.debug("Caghe kwargs: %r", kwargs) - # wrap concrete function - return function(*args, **kwargs) - - return wrapper - return decorator - - class CacheMixin(object): _helper: CacheHelper = None @@ -143,6 +90,13 @@ class CacheHelper(object): # Ignore cache values even if cache is enabled ignore_cache_values = False + @staticmethod + def redis_client(): + try: + return cache.cache._read_clients + except Exception: + return None + @staticmethod def size(): return len(cache.get_dict()) @@ -175,3 +129,72 @@ def get(self, key: str): and self.cache_enabled \ and not self.ignore_cache_values \ else None + + @classmethod + def delete_keys(cls, pattern: str): + redis = cls.redis_client() + logger.debug(f"Deleting keys by pattern: {pattern}") + for key in redis.scan_iter(f"{CACHE_PREFIX}{pattern}"): + logger.debug("Delete key: %r", key) + redis.delete(key) + + +# global cache helper instance +helper: CacheHelper = CacheHelper() + + +def _make_key(func=None, *args, **kwargs) -> str: + from lifemonitor.auth import current_registry, current_user + logger.debug("Cache arguments: %r", args) + logger.debug("Caghe kwargs: %r", kwargs) + result = "" + if current_user and not current_user.is_anonymous: + result += "{}-{}_".format(current_user.username, current_user.id) + if current_registry: + result += "{}_".format(current_registry.uuid) + if func: + result += func if isinstance(func, str) else func.__name__ if callable(func) else str(func) + if args: + result += "_" + "-".join([str(_) for _ in args]) + if kwargs: + result += "_" + "-".join([f"{str(k)}={str(v)}" for k, v in kwargs.items()]) + logger.debug("Calculated key: %r", result) + return result + + +def clear_cache(func=None, *args, **kwargs): + try: + if func: + key = _make_key(func) + helper.delete_keys(f"{key}*") + if args or kwargs: + key = _make_key(func, *args, **kwargs) + helper.delete_keys(f"{key}*") + else: + key = _make_key() + helper.delete_keys(f"{key}*") + except Exception as e: + logger.error("Error deleting cache: %r", e) + + +def cached(timeout=Timeout.REQUEST, unless=False): + def decorator(function): + + @functools.wraps(function) + def wrapper(*args, **kwargs): + logger.debug("Function: %r", str(function.__name__)) + logger.debug("Cache arguments: %r", args) + logger.debug("Caghe kwargs: %r", kwargs) + key = _make_key(function.__name__, *args, **kwargs) + logger.debug("Calculated key: %r", key) + result = helper.get(key) + if result is None: + logger.debug(f"Getting value from the actual function for key {key}...") + result = function(*args, **kwargs) + helper.set(key, result, timeout=timeout) + else: + logger.debug(f"Reusing value from cache key '{key}'...") + return result + + return wrapper + return decorator From be45f371b61f21a1a724430e435eb5a193adfc5e Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Tue, 9 Nov 2021 18:39:19 +0000 Subject: [PATCH 056/162] Build external link: refactoring; cache support --- lifemonitor/api/models/services/github.py | 4 ---- lifemonitor/api/models/services/jenkins.py | 4 ---- lifemonitor/api/models/services/travis.py | 4 ---- lifemonitor/api/models/testsuites/testbuild.py | 16 +++++++++++++--- 4 files changed, 13 insertions(+), 15 deletions(-) diff --git a/lifemonitor/api/models/services/github.py b/lifemonitor/api/models/services/github.py index 1e5a2917a..718f3f7c4 100644 --- a/lifemonitor/api/models/services/github.py +++ b/lifemonitor/api/models/services/github.py @@ -346,7 +346,3 @@ def timestamp(self) -> int: @property def url(self) -> str: return self._metadata.url - - @property - def external_link(self) -> str: - return self.testing_service.get_test_build_external_link(self) diff --git a/lifemonitor/api/models/services/jenkins.py b/lifemonitor/api/models/services/jenkins.py index e439fa47a..842080c77 100644 --- a/lifemonitor/api/models/services/jenkins.py +++ b/lifemonitor/api/models/services/jenkins.py @@ -208,7 +208,3 @@ def result(self) -> models.TestBuild.Result: @property def url(self) -> str: return self.metadata['url'] - - @property - def external_link(self) -> str: - return self.testing_service.get_test_build_external_link(self) diff --git a/lifemonitor/api/models/services/travis.py b/lifemonitor/api/models/services/travis.py index ffa0cbfe3..55542cf5d 100644 --- a/lifemonitor/api/models/services/travis.py +++ b/lifemonitor/api/models/services/travis.py @@ -293,7 +293,3 @@ def result(self) -> models.TestBuild.Result: @property def url(self) -> str: return "{}{}".format(self.testing_service.url, self.metadata['@href']) - - @property - def external_link(self) -> str: - return self.testing_service.get_test_build_external_link(self) diff --git a/lifemonitor/api/models/testsuites/testbuild.py b/lifemonitor/api/models/testsuites/testbuild.py index 8c891393b..b6f16892b 100644 --- a/lifemonitor/api/models/testsuites/testbuild.py +++ b/lifemonitor/api/models/testsuites/testbuild.py @@ -25,6 +25,7 @@ from enum import Enum import lifemonitor.api.models as models +from lifemonitor.cache import CacheMixin # set module level logger logger = logging.getLogger(__name__) @@ -39,7 +40,7 @@ class BuildStatus: ABORTED = "aborted" -class TestBuild(ABC): +class TestBuild(ABC, CacheMixin): class Result(Enum): SUCCESS = 0 FAILED = 1 @@ -110,9 +111,18 @@ def url(self) -> str: pass @property - @abstractmethod def external_link(self) -> str: - pass + logger.debug("Getting external link...") + key = f"{str(self)}_external_link" + link = self.cache.get(key) + if link is None: + logger.debug("Getting external link from testing service...") + link = self.testing_service.get_test_build_external_link(self) + if link is not None: + self.cache.set(key, link) + else: + logger.debug("Reusing external link from cache...") + return link def get_output(self, offset_bytes=0, limit_bytes=131072): return self.testing_service.get_test_build_output(self.test_instance, self.id, offset_bytes, limit_bytes) From 4272dd7ecf25808d56ee5857250075fb1c2bc9cf Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Wed, 10 Nov 2021 10:51:25 +0000 Subject: [PATCH 057/162] Update default cache prefix --- lifemonitor/cache.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/lifemonitor/cache.py b/lifemonitor/cache.py index 69d69c2ff..3588aeb1b 100644 --- a/lifemonitor/cache.py +++ b/lifemonitor/cache.py @@ -27,10 +27,10 @@ import redis_lock from flask.app import Flask from flask_caching import Cache -from flask_caching.backends.nullcache import NullCache +from flask_caching.backends.rediscache import RedisCache # Set prefix -CACHE_PREFIX = "flask_cache" +CACHE_PREFIX = "lifemonitor-api-cache:" class Timeout: @@ -61,6 +61,7 @@ def init_cache(app: Flask): app.config.setdefault('CACHE_REDIS_PORT', os.environ.get('REDIS_PORT_NUMBER', 6379)) app.config.setdefault('CACHE_REDIS_PASSWORD', os.environ.get('REDIS_PASSWORD', '')) app.config.setdefault('CACHE_REDIS_DB', int(os.environ.get('CACHE_REDIS_DB', 0))) + app.config.setdefault("CACHE_KEY_PREFIX", CACHE_PREFIX) app.config.setdefault('CACHE_REDIS_URL', "redis://:{0}@{1}:{2}/{3}".format( app.config.get('CACHE_REDIS_PASSWORD'), app.config.get('CACHE_REDIS_HOST'), From 13eac5a1a1a4d9c31caf798708beb0ed2d892abc Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Wed, 10 Nov 2021 11:07:25 +0000 Subject: [PATCH 058/162] Update cache helper --- lifemonitor/cache.py | 77 ++++++++++++++++++++++---------------------- 1 file changed, 39 insertions(+), 38 deletions(-) diff --git a/lifemonitor/cache.py b/lifemonitor/cache.py index 3588aeb1b..ec5cf76f7 100644 --- a/lifemonitor/cache.py +++ b/lifemonitor/cache.py @@ -24,6 +24,7 @@ import logging import os +import redis import redis_lock from flask.app import Flask from flask_caching import Cache @@ -73,17 +74,6 @@ def init_cache(app: Flask): logger.debug(f"Cache initialised (type: {cache_type})") -class CacheMixin(object): - - _helper: CacheHelper = None - - @property - def cache(self) -> CacheHelper: - if self._helper is None: - self._helper = CacheHelper() - return self._helper - - class CacheHelper(object): # Enable/Disable cache @@ -91,57 +81,57 @@ class CacheHelper(object): # Ignore cache values even if cache is enabled ignore_cache_values = False - @staticmethod - def redis_client(): - try: - return cache.cache._read_clients - except Exception: - return None + def __init__(self, cache) -> None: + self._cache = cache + + @property + def cache(self) -> RedisCache: + return self._cache.cache + + @property + def backend(self) -> redis.Redis: + return self.cache._read_clients - @staticmethod - def size(): - return len(cache.get_dict()) + def size(self): + return len(self.cache.get_dict()) - @staticmethod - def to_dict(): - return cache.get_dict() + def to_dict(self): + return self.cache.get_dict() - @staticmethod - def lock(key: str): - return redis_lock.Lock(cache.cache._read_clients, key) + def lock(self, key: str): + return redis_lock.Lock(self.backend, key) def set(self, key: str, value, timeout: int = Timeout.NONE): val = None - if not isinstance(cache.cache, NullCache): + if isinstance(self.cache, RedisCache): if key is not None and self.cache_enabled: lock = self.lock(key) if lock.acquire(blocking=True): try: - val = cache.get(key) + val = self.cache.get(key) if not val: - cache.set(key, value, timeout=timeout) + self.cache.set(key, value, timeout=timeout) finally: lock.release() return val def get(self, key: str): - return cache.get(key) \ - if not isinstance(cache.cache, NullCache) \ + return self.cache.get(key) \ + if isinstance(self.cache, RedisCache) \ and self.cache_enabled \ and not self.ignore_cache_values \ else None - @classmethod - def delete_keys(cls, pattern: str): - redis = cls.redis_client() + def delete_keys(self, pattern: str): logger.debug(f"Deleting keys by pattern: {pattern}") - for key in redis.scan_iter(f"{CACHE_PREFIX}{pattern}"): - logger.debug("Delete key: %r", key) - redis.delete(key) + if isinstance(self.cache, RedisCache): + for key in self.backend.scan_iter(f"{CACHE_PREFIX}{pattern}"): + logger.debug("Delete key: %r", key) + redis.delete(key) # global cache helper instance -helper: CacheHelper = CacheHelper() +helper: CacheHelper = CacheHelper(cache) def _make_key(func=None, *args, **kwargs) -> str: @@ -199,3 +189,14 @@ def wrapper(*args, **kwargs): return wrapper return decorator + + +class CacheMixin(object): + + _helper: CacheHelper = helper + + @property + def cache(self) -> CacheHelper: + if self._helper is None: + self._helper = CacheHelper(cache) + return self._helper From 793693b804a40a87ff4da32dff4cad4a9677f316 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Wed, 10 Nov 2021 11:09:54 +0000 Subject: [PATCH 059/162] Update function make_key: fix fname --- lifemonitor/cache.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/lifemonitor/cache.py b/lifemonitor/cache.py index ec5cf76f7..98b1f516f 100644 --- a/lifemonitor/cache.py +++ b/lifemonitor/cache.py @@ -136,20 +136,24 @@ def delete_keys(self, pattern: str): def _make_key(func=None, *args, **kwargs) -> str: from lifemonitor.auth import current_registry, current_user - logger.debug("Cache arguments: %r", args) - logger.debug("Caghe kwargs: %r", kwargs) + fname = "" if func is None \ + else func if isinstance(func, str) \ + else f"{func.__module__}.{func.__name__}" if callable(func) else str(func) + logger.debug("make_key func: %r", fname) + logger.debug("make_key args: %r", args) + logger.debug("make_key kwargs: %r", kwargs) result = "" if current_user and not current_user.is_anonymous: result += "{}-{}_".format(current_user.username, current_user.id) if current_registry: result += "{}_".format(current_registry.uuid) if func: - result += func if isinstance(func, str) else func.__name__ if callable(func) else str(func) + result += fname if args: result += "_" + "-".join([str(_) for _ in args]) if kwargs: result += "_" + "-".join([f"{str(k)}={str(v)}" for k, v in kwargs.items()]) - logger.debug("Calculated key: %r", result) + logger.debug("make_key calculated key: %r", result) return result @@ -173,11 +177,7 @@ def decorator(function): @functools.wraps(function) def wrapper(*args, **kwargs): - logger.debug("Function: %r", str(function.__name__)) - logger.debug("Cache arguments: %r", args) - logger.debug("Caghe kwargs: %r", kwargs) - key = _make_key(function.__name__, *args, **kwargs) - logger.debug("Calculated key: %r", key) + key = _make_key(function, *args, **kwargs) result = helper.get(key) if result is None: logger.debug(f"Getting value from the actual function for key {key}...") From f743e1c108416850b7229be9985ae6c7bce06b68 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Wed, 10 Nov 2021 11:39:42 +0000 Subject: [PATCH 060/162] Fix tests --- tests/unit/cache/test_cache.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/unit/cache/test_cache.py b/tests/unit/cache/test_cache.py index 74831f809..0f7824458 100644 --- a/tests/unit/cache/test_cache.py +++ b/tests/unit/cache/test_cache.py @@ -23,14 +23,14 @@ import lifemonitor.api.models as models from tests import utils -from lifemonitor.cache import CacheHelper +from lifemonitor.cache import helper logger = logging.getLogger(__name__) def test_cache_last_build(app_client, redis_cache, user1): valid_workflow = 'sort-and-change-case' - assert CacheHelper.size() == 0, "Cache should be empty" + assert helper.size() == 0, "Cache should be empty" _, workflow = utils.pick_and_register_workflow(user1, valid_workflow) assert workflow, "Workflow should be set" assert len(workflow.test_suites) > 0, "The workflow should have at least one suite" @@ -57,7 +57,7 @@ def test_cache_last_build(app_client, redis_cache, user1): def test_cache_test_builds(app_client, redis_cache, user1): valid_workflow = 'sort-and-change-case' - assert CacheHelper.size() == 0, "Cache should be empty" + assert helper.size() == 0, "Cache should be empty" _, workflow = utils.pick_and_register_workflow(user1, valid_workflow) assert workflow, "Workflow should be set" assert len(workflow.test_suites) > 0, "The workflow should have at least one suite" From 68027e8286b07b689ed84fb6202fbdfc4d36b3af Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Wed, 10 Nov 2021 13:30:07 +0000 Subject: [PATCH 061/162] Fix cache: clear after update --- lifemonitor/api/controllers.py | 36 ++++++++++++---------------------- lifemonitor/cache.py | 6 +++++- 2 files changed, 17 insertions(+), 25 deletions(-) diff --git a/lifemonitor/api/controllers.py b/lifemonitor/api/controllers.py index 9f8cc7dcc..6c21b13a6 100644 --- a/lifemonitor/api/controllers.py +++ b/lifemonitor/api/controllers.py @@ -186,7 +186,7 @@ def registry_workflows_get(status=False): def registry_workflows_post(body): if not current_registry: return lm_exceptions.report_problem(401, "Unauthorized", detail=messages.no_registry_found) - clear_cache(registry_workflows_get) + clear_cache() return workflows_post(body) @@ -210,7 +210,7 @@ def registry_user_workflows_get(user_id, status=False): def registry_user_workflows_post(user_id, body): if not current_registry: return lm_exceptions.report_problem(401, "Unauthorized", detail=messages.no_registry_found) - clear_cache(registry_user_workflows_get, user_id) + clear_cache() return workflows_post(body, _submitter_id=user_id) @@ -230,7 +230,7 @@ def user_workflows_get(status=False, subscriptions=False): def user_workflows_post(body): if not current_user or current_user.is_anonymous: return lm_exceptions.report_problem(401, "Unauthorized", detail=messages.no_user_in_session) - clear_cache(user_workflows_get) + clear_cache() return workflows_post(body) @@ -241,8 +241,7 @@ def user_workflow_subscribe(wf_uuid): return response subscription = lm.subscribe_user_resource(current_user, response.workflow) logger.debug("Created new subscription: %r", subscription) - clear_cache(user_workflows_get) - clear_cache(workflows_get_latest_version_by_id) + clear_cache() return auth_serializers.SubscriptionSchema(exclude=('meta', 'links')).dump(subscription), 201 @@ -253,8 +252,7 @@ def user_workflow_unsubscribe(wf_uuid): return response subscription = lm.unsubscribe_user_resource(current_user, response.workflow) logger.debug("Delete subscription: %r", subscription) - clear_cache(user_workflows_get) - clear_cache(workflows_get_latest_version_by_id) + clear_cache() return connexion.NoContent, 204 @@ -280,7 +278,7 @@ def user_registry_workflows_post(registry_uuid, body): return lm_exceptions.report_problem(401, "Unauthorized", detail=messages.no_user_in_session) try: registry = lm.get_workflow_registry_by_uuid(registry_uuid) - clear_cache(user_registry_workflows_get, registry_uuid) + clear_cache() return workflows_post(body, _registry=registry) except lm_exceptions.EntityNotFoundException: return lm_exceptions.report_problem(404, "Not Found", @@ -342,7 +340,7 @@ def workflows_post(body, _registry=None, _submitter_id=None): public=body.get('public', False) ) logger.debug("workflows_post. Created workflow '%s' (ver.%s)", w.uuid, w.version) - clear_cache(workflows_get) + clear_cache() return {'uuid': str(w.workflow.uuid), 'wf_version': w.version, 'name': w.name}, 201 except KeyError as e: return lm_exceptions.report_problem(400, "Bad Request", extra_info={"exception": str(e)}, @@ -379,12 +377,7 @@ def workflows_put(wf_uuid, body): wv.workflow.name = body.get('name', wv.workflow.name) wv.workflow.public = body.get('public', wv.workflow.public) wv.workflow.save() - clear_cache(workflows_get) - clear_cache(workflows_get_by_id) - clear_cache(workflows_get_latest_version_by_id) - clear_cache(workflows_get_versions_by_id) - clear_cache(workflows_get_status) - clear_cache(registry_workflows_get) + clear_cache() return connexion.NoContent, 204 @@ -397,12 +390,7 @@ def workflows_version_put(wf_uuid, wf_version, body): wv.name = body.get('name', wv.name) wv.version = body.get('version', wv.version) wv.save() - clear_cache(workflows_get) - clear_cache(workflows_get_by_id) - clear_cache(workflows_get_latest_version_by_id) - clear_cache(workflows_get_versions_by_id) - clear_cache(workflows_get_status) - clear_cache(registry_workflows_get) + clear_cache() return connexion.NoContent, 204 @@ -416,7 +404,7 @@ def workflows_delete(wf_uuid, wf_version): else: return lm_exceptions.report_problem(403, "Forbidden", detail=messages.no_user_in_session) - clear_cache(workflows_get) + clear_cache() return connexion.NoContent, 204 except OAuthIdentityNotFoundException as e: return lm_exceptions.report_problem(401, "Unauthorized", extra_info={"exception": str(e)}) @@ -536,7 +524,7 @@ def suites_post_instance(suite_uuid): data['service']['type'], data['service']['url'], data['resource']) - clear_cache(suites_get_instances, suite_uuid) + clear_cache() return {'uuid': str(test_instance.uuid)}, 201 except KeyError as e: return lm_exceptions.report_problem(400, "Bad Request", extra_info={"exception": str(e)}, @@ -586,7 +574,7 @@ def instances_delete_by_id(instance_uuid): if isinstance(response, Response): return response lm.deregister_test_instance(response) - clear_cache(suites_get_instances, instance_uuid) + clear_cache() return connexion.NoContent, 204 except OAuthIdentityNotFoundException as e: return lm_exceptions.report_problem(401, "Unauthorized", extra_info={"exception": str(e)}) diff --git a/lifemonitor/cache.py b/lifemonitor/cache.py index 98b1f516f..e12b19c68 100644 --- a/lifemonitor/cache.py +++ b/lifemonitor/cache.py @@ -125,9 +125,11 @@ def get(self, key: str): def delete_keys(self, pattern: str): logger.debug(f"Deleting keys by pattern: {pattern}") if isinstance(self.cache, RedisCache): + logger.debug("Redis backend detected!") + logger.debug(f"Pattern: {CACHE_PREFIX}{pattern}") for key in self.backend.scan_iter(f"{CACHE_PREFIX}{pattern}"): logger.debug("Delete key: %r", key) - redis.delete(key) + self.backend.delete(key) # global cache helper instance @@ -147,6 +149,8 @@ def _make_key(func=None, *args, **kwargs) -> str: result += "{}-{}_".format(current_user.username, current_user.id) if current_registry: result += "{}_".format(current_registry.uuid) + if not current_registry and current_user.is_anonymous: + result += "anonymous_" if func: result += fname if args: From cc5e9f919dd23952cc2d0f2d987dd52de9ec983a Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Wed, 10 Nov 2021 14:30:43 +0000 Subject: [PATCH 062/162] Allow to set cache scope --- lifemonitor/cache.py | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/lifemonitor/cache.py b/lifemonitor/cache.py index e12b19c68..cb13093b9 100644 --- a/lifemonitor/cache.py +++ b/lifemonitor/cache.py @@ -136,7 +136,7 @@ def delete_keys(self, pattern: str): helper: CacheHelper = CacheHelper(cache) -def _make_key(func=None, *args, **kwargs) -> str: +def _make_key(func=None, client_scope=True, *args, **kwargs) -> str: from lifemonitor.auth import current_registry, current_user fname = "" if func is None \ else func if isinstance(func, str) \ @@ -145,12 +145,13 @@ def _make_key(func=None, *args, **kwargs) -> str: logger.debug("make_key args: %r", args) logger.debug("make_key kwargs: %r", kwargs) result = "" - if current_user and not current_user.is_anonymous: - result += "{}-{}_".format(current_user.username, current_user.id) - if current_registry: - result += "{}_".format(current_registry.uuid) - if not current_registry and current_user.is_anonymous: - result += "anonymous_" + if client_scope: + if current_user and not current_user.is_anonymous: + result += "{}-{}_".format(current_user.username, current_user.id) + if current_registry: + result += "{}_".format(current_registry.uuid) + if not current_registry and current_user.is_anonymous: + result += "anonymous_" if func: result += fname if args: @@ -161,27 +162,27 @@ def _make_key(func=None, *args, **kwargs) -> str: return result -def clear_cache(func=None, *args, **kwargs): +def clear_cache(func=None, client_scope=True, *args, **kwargs): try: if func: - key = _make_key(func) + key = _make_key(func, client_scope) helper.delete_keys(f"{key}*") if args or kwargs: - key = _make_key(func, *args, **kwargs) + key = _make_key(func, client_scope, *args, **kwargs) helper.delete_keys(f"{key}*") else: - key = _make_key() + key = _make_key(client_scope) helper.delete_keys(f"{key}*") except Exception as e: logger.error("Error deleting cache: %r", e) -def cached(timeout=Timeout.REQUEST, unless=False): +def cached(timeout=Timeout.REQUEST, client_scope=True): def decorator(function): @functools.wraps(function) def wrapper(*args, **kwargs): - key = _make_key(function, *args, **kwargs) + key = _make_key(function, client_scope, *args, **kwargs) result = helper.get(key) if result is None: logger.debug(f"Getting value from the actual function for key {key}...") From dba4d5f79a53e4ac0b2b76c55305df2209ce2a09 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Wed, 10 Nov 2021 15:12:55 +0000 Subject: [PATCH 063/162] Update default cache timeout --- lifemonitor/api/models/testsuites/testinstance.py | 3 ++- settings.conf | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/lifemonitor/api/models/testsuites/testinstance.py b/lifemonitor/api/models/testsuites/testinstance.py index 82f227db6..90f54eec2 100644 --- a/lifemonitor/api/models/testsuites/testinstance.py +++ b/lifemonitor/api/models/testsuites/testinstance.py @@ -26,6 +26,7 @@ import lifemonitor.api.models as models from lifemonitor.api.models import db +from lifemonitor.cache import Timeout from lifemonitor.models import JSON, UUID, ModelMixin from .testsuite import TestSuite @@ -148,7 +149,7 @@ def get_test_build(self, build_number): build = self.testing_service.get_test_build(self, build_number) if build is not None: if build.status not in [models.BuildStatus.RUNNING, models.BuildStatus.WAITING]: - self.cache.set(key, build) + self.cache.set(key, build, timeout=Timeout.DEFAULT) else: logger.debug(f"Reusing test build {build} from cache...") return build diff --git a/settings.conf b/settings.conf index 39a25b2c7..90bad8738 100644 --- a/settings.conf +++ b/settings.conf @@ -55,7 +55,7 @@ REDIS_PORT_NUMBER=6379 # Cache settings CACHE_REDIS_DB=0 -CACHE_DEFAULT_TIMEOUT=30 +CACHE_DEFAULT_TIMEOUT=300 CACHE_REQUEST_TIMEOUT=30 CACHE_SESSION_TIMEOUT=3600 CACHE_BUILDS_TIMEOUT=84600 From 8ac2536af91e42111c8c7703607a0ba755e75952 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Wed, 10 Nov 2021 17:55:30 +0000 Subject: [PATCH 064/162] Allow to configure timeouts on settings --- lifemonitor/cache.py | 44 +++++++++++++++++++++++++++++++++++--------- settings.conf | 2 +- 2 files changed, 36 insertions(+), 10 deletions(-) diff --git a/lifemonitor/cache.py b/lifemonitor/cache.py index cb13093b9..281d144a3 100644 --- a/lifemonitor/cache.py +++ b/lifemonitor/cache.py @@ -34,15 +34,6 @@ CACHE_PREFIX = "lifemonitor-api-cache:" -class Timeout: - # Set default timeouts - NONE = 0 - DEFAULT = os.environ.get('CACHE_DEFAULT_TIMEOUT', 60) - REQUEST = os.environ.get('CACHE_REQUEST_TIMEOUT', 300) - SESSION = os.environ.get('CACHE_SESSION_TIMEOUT', 600) - BUILDS = os.environ.get('CACHE_SESSION_TIMEOUT', 84600) - - # Set module logger logger = logging.getLogger(__name__) @@ -50,6 +41,40 @@ class Timeout: cache = Cache() +def _get_timeout(name: str, default: int = 0, config=None) -> int: + result = None + if config is not None: + try: + result = config.get(name) + except Exception as e: + logger.debug(e) + result = result or os.environ.get(name, default) + logger.debug("Getting timeout %r: %r", name, result) + return result + + +def _get_timeout_key(n: str) -> str: + return f"CACHE_{n}_TIMEOUT" + + +class Timeout: + # Set default timeouts + NONE = 0 + DEFAULT = _get_timeout(_get_timeout_key('DEFAULT'), default=300) + REQUEST = _get_timeout(_get_timeout_key('REQUEST'), default=30) + SESSION = _get_timeout(_get_timeout_key('SESSION'), default=3600) + BUILD = _get_timeout(_get_timeout_key('BUILD'), default=300) + + @classmethod + def update(cls, config): + for t in ('DEFAULT', 'REQUEST', 'SESSION', 'BUILD'): + try: + key = _get_timeout_key(t) + setattr(cls, key, _get_timeout(key, config=config)) + except: + logger.debug("Error when updating timeout %r", t) + + def init_cache(app: Flask): cache_type = app.config.get( 'CACHE_TYPE', @@ -71,6 +96,7 @@ def init_cache(app: Flask): )) logger.debug("RedisCache connection url: %s", app.config.get('CACHE_REDIS_URL')) cache.init_app(app) + Timeout.update(app.config) logger.debug(f"Cache initialised (type: {cache_type})") diff --git a/settings.conf b/settings.conf index 90bad8738..8e7b80cc5 100644 --- a/settings.conf +++ b/settings.conf @@ -58,7 +58,7 @@ CACHE_REDIS_DB=0 CACHE_DEFAULT_TIMEOUT=300 CACHE_REQUEST_TIMEOUT=30 CACHE_SESSION_TIMEOUT=3600 -CACHE_BUILDS_TIMEOUT=84600 +CACHE_BUILD_TIMEOUT=84600 # Github OAuth2 settings #GITHUB_CLIENT_ID="___YOUR_GITHUB_OAUTH2_CLIENT_ID___" From 6564f78a5719e0610fc51c11b926e6d860c97a7e Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Wed, 10 Nov 2021 17:57:00 +0000 Subject: [PATCH 065/162] Update type of timeout for builds --- lifemonitor/api/models/testsuites/testinstance.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lifemonitor/api/models/testsuites/testinstance.py b/lifemonitor/api/models/testsuites/testinstance.py index 90f54eec2..673c0ad3f 100644 --- a/lifemonitor/api/models/testsuites/testinstance.py +++ b/lifemonitor/api/models/testsuites/testinstance.py @@ -149,7 +149,7 @@ def get_test_build(self, build_number): build = self.testing_service.get_test_build(self, build_number) if build is not None: if build.status not in [models.BuildStatus.RUNNING, models.BuildStatus.WAITING]: - self.cache.set(key, build, timeout=Timeout.DEFAULT) + self.cache.set(key, build, timeout=Timeout.BUILD) else: logger.debug(f"Reusing test build {build} from cache...") return build From 4ffd90cc44c69043eeee964eace343d6f2036cd6 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Wed, 10 Nov 2021 18:04:51 +0000 Subject: [PATCH 066/162] Fix bare except --- lifemonitor/cache.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lifemonitor/cache.py b/lifemonitor/cache.py index 281d144a3..9fcb4b8e8 100644 --- a/lifemonitor/cache.py +++ b/lifemonitor/cache.py @@ -71,7 +71,7 @@ def update(cls, config): try: key = _get_timeout_key(t) setattr(cls, key, _get_timeout(key, config=config)) - except: + except Exception: logger.debug("Error when updating timeout %r", t) From 68b36731e4b562b761d5686311721681e1b3ba4d Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Wed, 10 Nov 2021 19:19:04 +0000 Subject: [PATCH 067/162] Fix missing REDIS_HOST on pod env --- k8s/templates/_helpers.tpl | 2 ++ 1 file changed, 2 insertions(+) diff --git a/k8s/templates/_helpers.tpl b/k8s/templates/_helpers.tpl index ebeb4ae3e..b26ba66b2 100644 --- a/k8s/templates/_helpers.tpl +++ b/k8s/templates/_helpers.tpl @@ -77,6 +77,8 @@ Define environment variables shared by some pods. value: "{{ .Values.postgresql.postgresqlPassword }}" - name: POSTGRESQL_DATABASE value: "{{ .Values.postgresql.postgresqlDatabase }}" +- name: REDIS_HOST + value: "{{ .Release.Name }}-redis-master" - name: LIFEMONITOR_TLS_KEY value: "/lm/certs/tls.key" - name: LIFEMONITOR_TLS_CERT From 1334955d3dfc4c9114e93b6dd0842f984fd3458e Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Wed, 10 Nov 2021 19:19:26 +0000 Subject: [PATCH 068/162] Fix missing WORKER_PROCESSES on pod env --- k8s/templates/_helpers.tpl | 2 ++ 1 file changed, 2 insertions(+) diff --git a/k8s/templates/_helpers.tpl b/k8s/templates/_helpers.tpl index b26ba66b2..afaf478af 100644 --- a/k8s/templates/_helpers.tpl +++ b/k8s/templates/_helpers.tpl @@ -79,6 +79,8 @@ Define environment variables shared by some pods. value: "{{ .Values.postgresql.postgresqlDatabase }}" - name: REDIS_HOST value: "{{ .Release.Name }}-redis-master" +- name: WORKER_PROCESSES + value: "{{ .Values.lifemonitor.worker_processes }}" - name: LIFEMONITOR_TLS_KEY value: "/lm/certs/tls.key" - name: LIFEMONITOR_TLS_CERT From 44eb4a7fc0b0d3bd38dcca11cada49fa788a84f9 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Thu, 11 Nov 2021 08:21:56 +0000 Subject: [PATCH 069/162] Fix back-end selector --- k8s/templates/backend-deployment.yaml | 3 +++ k8s/templates/service.yaml | 1 + 2 files changed, 4 insertions(+) diff --git a/k8s/templates/backend-deployment.yaml b/k8s/templates/backend-deployment.yaml index 7ee59cd8f..6c2b85387 100644 --- a/k8s/templates/backend-deployment.yaml +++ b/k8s/templates/backend-deployment.yaml @@ -4,6 +4,7 @@ metadata: name: {{ include "chart.fullname" . }}-backend labels: {{- include "chart.labels" . | nindent 4 }} + app.kubernetes.io/component: backend spec: {{- if not .Values.lifemonitor.autoscaling.enabled }} replicas: {{ .Values.lifemonitor.replicaCount }} @@ -11,6 +12,7 @@ spec: selector: matchLabels: {{- include "chart.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: backend template: metadata: {{- with .Values.lifemonitor.podAnnotations }} @@ -19,6 +21,7 @@ spec: {{- end }} labels: {{- include "chart.selectorLabels" . | nindent 8 }} + app.kubernetes.io/component: backend spec: {{- with .Values.lifemonitor.imagePullSecrets }} imagePullSecrets: diff --git a/k8s/templates/service.yaml b/k8s/templates/service.yaml index 90cb29f12..ac8c8ccf6 100644 --- a/k8s/templates/service.yaml +++ b/k8s/templates/service.yaml @@ -13,3 +13,4 @@ spec: name: http selector: {{- include "chart.selectorLabels" . | nindent 4 }} + app.kubernetes.io/component: backend From 88501c16e63d899a1b090316503d38b05283aad2 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Thu, 11 Nov 2021 08:23:36 +0000 Subject: [PATCH 070/162] Update container name of worker process --- k8s/templates/worker-deployment.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/k8s/templates/worker-deployment.yaml b/k8s/templates/worker-deployment.yaml index cede78b95..7938674da 100644 --- a/k8s/templates/worker-deployment.yaml +++ b/k8s/templates/worker-deployment.yaml @@ -33,7 +33,7 @@ spec: imagePullPolicy: IfNotPresent args: ["job", "{{ include "chart.fullname" . }}-init"] containers: - - name: app + - name: worker securityContext: {{- toYaml .Values.lifemonitor.securityContext | nindent 12 }} image: {{ .Values.lifemonitor.image }} From 2798637f0df9c64485be343024d75b26767b9ccb Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Thu, 11 Nov 2021 08:47:31 +0000 Subject: [PATCH 071/162] Add dedicated values for workers --- k8s/templates/_helpers.tpl | 2 +- k8s/templates/secret.yaml | 2 +- k8s/templates/worker-deployment.yaml | 48 ++++++++++++------------ k8s/values.yaml | 56 ++++++++++++++++++++++++++-- 4 files changed, 78 insertions(+), 30 deletions(-) diff --git a/k8s/templates/_helpers.tpl b/k8s/templates/_helpers.tpl index afaf478af..57da81499 100644 --- a/k8s/templates/_helpers.tpl +++ b/k8s/templates/_helpers.tpl @@ -80,7 +80,7 @@ Define environment variables shared by some pods. - name: REDIS_HOST value: "{{ .Release.Name }}-redis-master" - name: WORKER_PROCESSES - value: "{{ .Values.lifemonitor.worker_processes }}" + value: "{{ .Values.worker.processes }}" - name: LIFEMONITOR_TLS_KEY value: "/lm/certs/tls.key" - name: LIFEMONITOR_TLS_CERT diff --git a/k8s/templates/secret.yaml b/k8s/templates/secret.yaml index 936cfc7e0..0ed2204cc 100644 --- a/k8s/templates/secret.yaml +++ b/k8s/templates/secret.yaml @@ -37,7 +37,7 @@ stringData: POSTGRESQL_PASSWORD={{ .Values.postgresql.postgresqlPassword }} # Dramatiq worker settings - WORKER_PROCESSES={{ .Values.lifemonitor.worker_processes }} + WORKER_PROCESSES={{ .Values.worker.processes }} # Redis settings REDIS_HOST={{ .Release.Name }}-redis-master diff --git a/k8s/templates/worker-deployment.yaml b/k8s/templates/worker-deployment.yaml index 7938674da..073390975 100644 --- a/k8s/templates/worker-deployment.yaml +++ b/k8s/templates/worker-deployment.yaml @@ -5,28 +5,28 @@ metadata: labels: {{- include "chart.labels" . | nindent 4 }} spec: - {{- if not .Values.lifemonitor.autoscaling.enabled }} - replicas: {{ .Values.lifemonitor.replicaCount }} + {{- if not .Values.worker.autoscaling.enabled }} + replicas: {{ .Values.worker.replicaCount }} {{- end }} selector: matchLabels: {{- include "chart.selectorLabels" . | nindent 6 }} template: metadata: - {{- with .Values.lifemonitor.podAnnotations }} + {{- with .Values.worker.podAnnotations }} annotations: {{- toYaml . | nindent 8 }} {{- end }} labels: {{- include "chart.selectorLabels" . | nindent 8 }} spec: - {{- with .Values.lifemonitor.imagePullSecrets }} + {{- with .Values.worker.imagePullSecrets }} imagePullSecrets: {{- toYaml . | nindent 8 }} {{- end }} serviceAccountName: {{ include "chart.serviceAccountName" . }} securityContext: - {{- toYaml .Values.lifemonitor.podSecurityContext | nindent 8 }} + {{- toYaml .Values.worker.podSecurityContext | nindent 8 }} initContainers: - name: init image: "crs4/k8s-wait-for:latest" @@ -35,40 +35,40 @@ spec: containers: - name: worker securityContext: - {{- toYaml .Values.lifemonitor.securityContext | nindent 12 }} - image: {{ .Values.lifemonitor.image }} - imagePullPolicy: {{ .Values.lifemonitor.imagePullPolicy }} + {{- toYaml .Values.worker.securityContext | nindent 12 }} + image: {{ .Values.worker.image }} + imagePullPolicy: {{ .Values.worker.imagePullPolicy }} command: ["/bin/sh","-c"] args: ["/usr/local/bin/worker_entrypoint.sh"] env: {{ include "lifemonitor.common-env" . | indent 12 }} volumeMounts: {{ include "lifemonitor.common-volume-mounts" . | indent 12 }} - livenessProbe: - httpGet: - scheme: HTTPS - path: /health - port: 8000 - readinessProbe: - httpGet: - scheme: HTTPS - path: /health - port: 8000 - initialDelaySeconds: 5 - periodSeconds: 3 + # livenessProbe: + # httpGet: + # scheme: HTTPS + # path: /health + # port: 8000 + # readinessProbe: + # httpGet: + # scheme: HTTPS + # path: /health + # port: 8000 + # initialDelaySeconds: 5 + # periodSeconds: 3 resources: - {{- toYaml .Values.lifemonitor.resources | nindent 12 }} + {{- toYaml .Values.worker.resources | nindent 12 }} volumes: {{ include "lifemonitor.common-volume" . | indent 12 }} - {{- with .Values.lifemonitor.nodeSelector }} + {{- with .Values.worker.nodeSelector }} nodeSelector: {{- toYaml . | nindent 8 }} {{- end }} - {{- with .Values.lifemonitor.affinity }} + {{- with .Values.worker.affinity }} affinity: {{- toYaml . | nindent 8 }} {{- end }} - {{- with .Values.lifemonitor.tolerations }} + {{- with .Values.worker.tolerations }} tolerations: {{- toYaml . | nindent 8 }} {{- end }} diff --git a/k8s/values.yaml b/k8s/values.yaml index e0c9fef4c..980b820c7 100644 --- a/k8s/values.yaml +++ b/k8s/values.yaml @@ -96,16 +96,64 @@ lifemonitor: type: ClusterIP port: 8000 - # Dramatiq worker settings - worker_processes: 1 - persistence: storageClass: *storageClass # Enable/Disable the pod to test connection to the LifeMonitor back-end enableTestConnection: false - resources: {} + resources: + {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + + autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 100 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + + nodeSelector: {} + + tolerations: [] + + affinity: {} + +worker: + image: *lifemonitorImage + imagePullPolicy: *lifemonitorImagePullPolicy + imagePullSecrets: [] + + processes: 1 + + podAnnotations: {} + + podSecurityContext: + {} + # fsGroup: 2000 + + securityContext: + {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + + replicaCount: 1 + + resources: + {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following From 8a533f9295c7607afc4d53f7c7f54cc7173617fe Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Thu, 11 Nov 2021 08:57:27 +0000 Subject: [PATCH 072/162] Add values to configure cache --- k8s/templates/secret.yaml | 12 +++++++----- k8s/values.yaml | 8 ++++++++ 2 files changed, 15 insertions(+), 5 deletions(-) diff --git a/k8s/templates/secret.yaml b/k8s/templates/secret.yaml index 0ed2204cc..adb370bf3 100644 --- a/k8s/templates/secret.yaml +++ b/k8s/templates/secret.yaml @@ -45,11 +45,13 @@ stringData: REDIS_PASSWORD={{ .Values.redis.auth.password }} # Redis Cache - CACHE_REDIS_DB=0 - CACHE_REDIS_URL=redis://:{{ .Values.redis.auth.password }}@{{ .Release.Name }}-redis-master:{{ .Values.redis.master.service.port }}/0 - CACHE_DEFAULT_TIMEOUT=300 - CACHE_SESSION_TIMEOUT=3600 - CACHE_BUILDS_TIMEOUT=84600 + CACHE_REDIS_HOST={{ .Release.Name }}-redis-master + CACHE_REDIS_DB={{ .Values.cache.db }} + CACHE_REDIS_URL=redis://:{{ .Values.redis.auth.password }}@{{ .Release.Name }}-redis-master:{{ .Values.redis.master.service.port }}/{{ .Values.cachedb }} + CACHE_DEFAULT_TIMEOUT={{ .Values.cache.timeout.default }} + CACHE_REQUEST_TIMEOUT={{ .Values.cache.timeout.request }} + CACHE_SESSION_TIMEOUT={{ .Values.cache.timeout.session }} + CACHE_BUILD_TIMEOUT={{ .Values.cache.timeout.build }} # Set admin credentials LIFEMONITOR_ADMIN_PASSWORD={{ .Values.lifemonitor.administrator.password }} diff --git a/k8s/values.yaml b/k8s/values.yaml index 980b820c7..b5a61582e 100644 --- a/k8s/values.yaml +++ b/k8s/values.yaml @@ -59,6 +59,14 @@ testing_services: # token: # type: travis +cache: + db: 0 + timeout: + default: 30 + request: 15 + session: 3600 + build: 84600 + lifemonitor: replicaCount: 1 From 2a46c871599d0bf5e43ec70574697440d701c317 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Thu, 11 Nov 2021 09:46:21 +0000 Subject: [PATCH 073/162] Fix typo --- k8s/templates/secret.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/k8s/templates/secret.yaml b/k8s/templates/secret.yaml index adb370bf3..bbe1a757b 100644 --- a/k8s/templates/secret.yaml +++ b/k8s/templates/secret.yaml @@ -47,7 +47,7 @@ stringData: # Redis Cache CACHE_REDIS_HOST={{ .Release.Name }}-redis-master CACHE_REDIS_DB={{ .Values.cache.db }} - CACHE_REDIS_URL=redis://:{{ .Values.redis.auth.password }}@{{ .Release.Name }}-redis-master:{{ .Values.redis.master.service.port }}/{{ .Values.cachedb }} + CACHE_REDIS_URL=redis://:{{ .Values.redis.auth.password }}@{{ .Release.Name }}-redis-master:{{ .Values.redis.master.service.port }}/{{ .Values.cache.db }} CACHE_DEFAULT_TIMEOUT={{ .Values.cache.timeout.default }} CACHE_REQUEST_TIMEOUT={{ .Values.cache.timeout.request }} CACHE_SESSION_TIMEOUT={{ .Values.cache.timeout.session }} From 5bd22da15b440bf2b14d175758921d0d78d8705d Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Thu, 11 Nov 2021 11:34:36 +0000 Subject: [PATCH 074/162] Rename filename of init job --- k8s/templates/{init-job.yaml => job-init.yaml} | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) rename k8s/templates/{init-job.yaml => job-init.yaml} (93%) diff --git a/k8s/templates/init-job.yaml b/k8s/templates/job-init.yaml similarity index 93% rename from k8s/templates/init-job.yaml rename to k8s/templates/job-init.yaml index 243a1c8a6..07fe94ea7 100644 --- a/k8s/templates/init-job.yaml +++ b/k8s/templates/job-init.yaml @@ -13,6 +13,7 @@ spec: imagePullPolicy: {{ .Values.lifemonitor.imagePullPolicy }} command: ["/bin/sh","-c"] args: ["wait-for-postgres.sh && flask init db"] +# args: ["wait-for-postgres.sh && sleep infinity"] env: {{ include "lifemonitor.common-env" . | indent 10 }} volumeMounts: @@ -32,4 +33,4 @@ spec: tolerations: {{- toYaml . | nindent 8 }} {{- end }} - backoffLimit: 4 \ No newline at end of file + backoffLimit: 4 From 1fa66cf64e362ec8682547b0548b2d7d39a58991 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Thu, 11 Nov 2021 11:35:03 +0000 Subject: [PATCH 075/162] Add job to handle upgrades --- k8s/templates/job-upgrade.yaml | 41 ++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) create mode 100644 k8s/templates/job-upgrade.yaml diff --git a/k8s/templates/job-upgrade.yaml b/k8s/templates/job-upgrade.yaml new file mode 100644 index 000000000..065497dfd --- /dev/null +++ b/k8s/templates/job-upgrade.yaml @@ -0,0 +1,41 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "chart.fullname" . }}-upgrade + labels: + {{- include "chart.labels" . | nindent 4 }} + annotations: + # This is what defines this resource as a hook. Without this line, the + # job is considered part of the release. + "helm.sh/hook": post-upgrade + "helm.sh/hook-weight": "-5" + "helm.sh/hook-delete-policy": hook-succeeded +spec: + template: + spec: + containers: + - name: lifemonitor-upgrade + image: "{{ .Values.lifemonitor.image }}" + imagePullPolicy: {{ .Values.lifemonitor.imagePullPolicy }} + command: ["/bin/sh","-c"] + args: ["wait-for-postgres.sh && flask init db"] + env: +{{ include "lifemonitor.common-env" . | indent 10 }} + volumeMounts: +{{ include "lifemonitor.common-volume-mounts" . | indent 10 }} + restartPolicy: Never + volumes: +{{ include "lifemonitor.common-volume" . | indent 8 }} + {{- with .Values.lifemonitor.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.lifemonitor.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.lifemonitor.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + backoffLimit: 4 From be17346699d5da314d74fbd6fd25d0c0421d3d6e Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Thu, 11 Nov 2021 11:36:09 +0000 Subject: [PATCH 076/162] Auto restart worker and back-end when settings change --- k8s/templates/backend-deployment.yaml | 7 ++++--- k8s/templates/worker-deployment.yaml | 3 ++- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/k8s/templates/backend-deployment.yaml b/k8s/templates/backend-deployment.yaml index 6c2b85387..4f6f9ce82 100644 --- a/k8s/templates/backend-deployment.yaml +++ b/k8s/templates/backend-deployment.yaml @@ -14,11 +14,12 @@ spec: {{- include "chart.selectorLabels" . | nindent 6 }} app.kubernetes.io/component: backend template: - metadata: - {{- with .Values.lifemonitor.podAnnotations }} + metadata: annotations: + checksum/settings: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + {{- with .Values.lifemonitor.podAnnotations }} {{- toYaml . | nindent 8 }} - {{- end }} + {{- end }} labels: {{- include "chart.selectorLabels" . | nindent 8 }} app.kubernetes.io/component: backend diff --git a/k8s/templates/worker-deployment.yaml b/k8s/templates/worker-deployment.yaml index 073390975..42bd9b871 100644 --- a/k8s/templates/worker-deployment.yaml +++ b/k8s/templates/worker-deployment.yaml @@ -13,8 +13,9 @@ spec: {{- include "chart.selectorLabels" . | nindent 6 }} template: metadata: - {{- with .Values.worker.podAnnotations }} annotations: + checksum/settings: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + {{- with .Values.worker.podAnnotations }} {{- toYaml . | nindent 8 }} {{- end }} labels: From 28fac5cb25cc44816c0514dcfc097465588d8415 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Thu, 11 Nov 2021 17:09:05 +0000 Subject: [PATCH 077/162] Add helper function to set the default image --- k8s/templates/_helpers.tpl | 13 +++++++++++++ k8s/templates/backend-deployment.yaml | 2 +- k8s/templates/job-init.yaml | 3 +-- k8s/templates/job-upgrade.yaml | 2 +- 4 files changed, 16 insertions(+), 4 deletions(-) diff --git a/k8s/templates/_helpers.tpl b/k8s/templates/_helpers.tpl index 57da81499..e3b010810 100644 --- a/k8s/templates/_helpers.tpl +++ b/k8s/templates/_helpers.tpl @@ -48,6 +48,19 @@ app.kubernetes.io/name: {{ include "chart.name" . }} app.kubernetes.io/instance: {{ .Release.Name }} {{- end }} +{{/* + +Define lifemonitor image +*/}} +{{- define "chart.lifemonitor.image" -}} +{{- if .Values.lifemonitor.image }} +{{- printf "%s" .Values.lifemonitor.image }} +{{- else }} +{{- printf "crs4/lifemonitor:%s" .Chart.AppVersion }} +{{- end }} +{{- end }} + + {{/* Create the name of the service account to use */}} diff --git a/k8s/templates/backend-deployment.yaml b/k8s/templates/backend-deployment.yaml index 4f6f9ce82..77e1d1d93 100644 --- a/k8s/templates/backend-deployment.yaml +++ b/k8s/templates/backend-deployment.yaml @@ -40,7 +40,7 @@ spec: - name: app securityContext: {{- toYaml .Values.lifemonitor.securityContext | nindent 12 }} - image: {{ .Values.lifemonitor.image }} + image: {{ include "chart.lifemonitor.image" . }} imagePullPolicy: {{ .Values.lifemonitor.imagePullPolicy }} env: {{ include "lifemonitor.common-env" . | indent 12 }} diff --git a/k8s/templates/job-init.yaml b/k8s/templates/job-init.yaml index 07fe94ea7..70d08e82e 100644 --- a/k8s/templates/job-init.yaml +++ b/k8s/templates/job-init.yaml @@ -9,11 +9,10 @@ spec: spec: containers: - name: lifemonitor-init - image: "{{ .Values.lifemonitor.image }}" + image: {{ include "chart.lifemonitor.image" . }} imagePullPolicy: {{ .Values.lifemonitor.imagePullPolicy }} command: ["/bin/sh","-c"] args: ["wait-for-postgres.sh && flask init db"] -# args: ["wait-for-postgres.sh && sleep infinity"] env: {{ include "lifemonitor.common-env" . | indent 10 }} volumeMounts: diff --git a/k8s/templates/job-upgrade.yaml b/k8s/templates/job-upgrade.yaml index 065497dfd..11f3fd022 100644 --- a/k8s/templates/job-upgrade.yaml +++ b/k8s/templates/job-upgrade.yaml @@ -15,7 +15,7 @@ spec: spec: containers: - name: lifemonitor-upgrade - image: "{{ .Values.lifemonitor.image }}" + image: {{ include "chart.lifemonitor.image" . }} imagePullPolicy: {{ .Values.lifemonitor.imagePullPolicy }} command: ["/bin/sh","-c"] args: ["wait-for-postgres.sh && flask init db"] From 7a3ae56776d312547ddb817abb5b287916ffbb4a Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Thu, 11 Nov 2021 17:09:19 +0000 Subject: [PATCH 078/162] Fix labels --- k8s/templates/_helpers.tpl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/k8s/templates/_helpers.tpl b/k8s/templates/_helpers.tpl index e3b010810..40f949c38 100644 --- a/k8s/templates/_helpers.tpl +++ b/k8s/templates/_helpers.tpl @@ -35,9 +35,9 @@ Common labels */}} {{- define "chart.labels" -}} app.kubernetes.io/name: {{ include "chart.name" . }} -helm.sh/chart: {{ include "chart.chart" . }} app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/managed-by: {{ .Release.Service }} +helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" {{- end }} {{/* From 70c07f31b377d159784e9bebff737bdd3a8f00b9 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Thu, 11 Nov 2021 17:09:48 +0000 Subject: [PATCH 079/162] Remove blanks --- k8s/templates/backend-deployment.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/k8s/templates/backend-deployment.yaml b/k8s/templates/backend-deployment.yaml index 77e1d1d93..d881a717a 100644 --- a/k8s/templates/backend-deployment.yaml +++ b/k8s/templates/backend-deployment.yaml @@ -14,7 +14,7 @@ spec: {{- include "chart.selectorLabels" . | nindent 6 }} app.kubernetes.io/component: backend template: - metadata: + metadata: annotations: checksum/settings: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} {{- with .Values.lifemonitor.podAnnotations }} From 1288f7801ba246fbe829626ebb0dbbf4b66ee9db Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Thu, 11 Nov 2021 17:11:40 +0000 Subject: [PATCH 080/162] Fix image of workers --- k8s/templates/worker-deployment.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/k8s/templates/worker-deployment.yaml b/k8s/templates/worker-deployment.yaml index 42bd9b871..73be1b14b 100644 --- a/k8s/templates/worker-deployment.yaml +++ b/k8s/templates/worker-deployment.yaml @@ -37,7 +37,7 @@ spec: - name: worker securityContext: {{- toYaml .Values.worker.securityContext | nindent 12 }} - image: {{ .Values.worker.image }} + image: {{ include "chart.lifemonitor.image" . }} imagePullPolicy: {{ .Values.worker.imagePullPolicy }} command: ["/bin/sh","-c"] args: ["/usr/local/bin/worker_entrypoint.sh"] From df956ef56a0a2302ebadede6ebc56819ea54f317 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Thu, 11 Nov 2021 17:20:36 +0000 Subject: [PATCH 081/162] Fix indentation --- k8s/templates/backend-deployment.yaml | 6 +++--- k8s/templates/job-init.yaml | 10 +++++----- k8s/templates/worker-deployment.yaml | 6 +++--- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/k8s/templates/backend-deployment.yaml b/k8s/templates/backend-deployment.yaml index d881a717a..3c9ec93df 100644 --- a/k8s/templates/backend-deployment.yaml +++ b/k8s/templates/backend-deployment.yaml @@ -43,9 +43,9 @@ spec: image: {{ include "chart.lifemonitor.image" . }} imagePullPolicy: {{ .Values.lifemonitor.imagePullPolicy }} env: -{{ include "lifemonitor.common-env" . | indent 12 }} + {{- include "lifemonitor.common-env" . | nindent 12 }} volumeMounts: -{{ include "lifemonitor.common-volume-mounts" . | indent 12 }} + {{- include "lifemonitor.common-volume-mounts" . | nindent 12 }} ports: - name: http containerPort: 8000 @@ -65,7 +65,7 @@ spec: resources: {{- toYaml .Values.lifemonitor.resources | nindent 12 }} volumes: -{{ include "lifemonitor.common-volume" . | indent 12 }} + {{- include "lifemonitor.common-volume" . | nindent 8 }} {{- with .Values.lifemonitor.nodeSelector }} nodeSelector: {{- toYaml . | nindent 8 }} diff --git a/k8s/templates/job-init.yaml b/k8s/templates/job-init.yaml index 70d08e82e..4ebca0339 100644 --- a/k8s/templates/job-init.yaml +++ b/k8s/templates/job-init.yaml @@ -14,12 +14,12 @@ spec: command: ["/bin/sh","-c"] args: ["wait-for-postgres.sh && flask init db"] env: -{{ include "lifemonitor.common-env" . | indent 10 }} + {{- include "lifemonitor.common-env" . | nindent 10 }} volumeMounts: -{{ include "lifemonitor.common-volume-mounts" . | indent 10 }} - restartPolicy: Never + {{- include "lifemonitor.common-volume-mounts" . | nindent 10 }} + restartPolicy: OnFailure volumes: -{{ include "lifemonitor.common-volume" . | indent 8 }} + {{- include "lifemonitor.common-volume" . | nindent 8 }} {{- with .Values.lifemonitor.nodeSelector }} nodeSelector: {{- toYaml . | nindent 8 }} @@ -30,6 +30,6 @@ spec: {{- end }} {{- with .Values.lifemonitor.tolerations }} tolerations: - {{- toYaml . | nindent 8 }} + {{- toYaml . | nindent 8 }} {{- end }} backoffLimit: 4 diff --git a/k8s/templates/worker-deployment.yaml b/k8s/templates/worker-deployment.yaml index 73be1b14b..ce99c4c61 100644 --- a/k8s/templates/worker-deployment.yaml +++ b/k8s/templates/worker-deployment.yaml @@ -42,9 +42,9 @@ spec: command: ["/bin/sh","-c"] args: ["/usr/local/bin/worker_entrypoint.sh"] env: -{{ include "lifemonitor.common-env" . | indent 12 }} + {{- include "lifemonitor.common-env" . | nindent 12 }} volumeMounts: -{{ include "lifemonitor.common-volume-mounts" . | indent 12 }} + {{- include "lifemonitor.common-volume-mounts" . | nindent 12 }} # livenessProbe: # httpGet: # scheme: HTTPS @@ -60,7 +60,7 @@ spec: resources: {{- toYaml .Values.worker.resources | nindent 12 }} volumes: -{{ include "lifemonitor.common-volume" . | indent 12 }} + {{- include "lifemonitor.common-volume" . | nindent 8 }} {{- with .Values.worker.nodeSelector }} nodeSelector: {{- toYaml . | nindent 8 }} From f3daf9f05245a7832901ff5aa15c23d32b61e949 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Thu, 11 Nov 2021 17:31:17 +0000 Subject: [PATCH 082/162] Bump chart version number --- k8s/Chart.yaml | 4 ++-- k8s/values.yaml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/k8s/Chart.yaml b/k8s/Chart.yaml index 208c09375..1d3baf92c 100644 --- a/k8s/Chart.yaml +++ b/k8s/Chart.yaml @@ -7,12 +7,12 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.3.0 +version: 0.4.0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. -appVersion: 0.1.0 +appVersion: 0.4.0 # Chart dependencies dependencies: diff --git a/k8s/values.yaml b/k8s/values.yaml index b5a61582e..c801d983a 100644 --- a/k8s/values.yaml +++ b/k8s/values.yaml @@ -70,7 +70,7 @@ cache: lifemonitor: replicaCount: 1 - image: &lifemonitorImage crs4/lifemonitor:master + image: &lifemonitorImage crs4/lifemonitor:0.4.0 imagePullPolicy: &lifemonitorImagePullPolicy Always imagePullSecrets: [] From 92ddd439b0ef23ef880f3f6b426357facb586f47 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Fri, 12 Nov 2021 09:14:53 +0000 Subject: [PATCH 083/162] Refactor --- lifemonitor/api/models/services/github.py | 52 +++++++----- lifemonitor/api/models/services/travis.py | 25 +++--- .../api/models/testsuites/testbuild.py | 18 ++--- .../api/models/testsuites/testinstance.py | 72 ++++------------- lifemonitor/cache.py | 81 ++++++++++++------- 5 files changed, 120 insertions(+), 128 deletions(-) diff --git a/lifemonitor/api/models/services/github.py b/lifemonitor/api/models/services/github.py index 718f3f7c4..5ed01f36d 100644 --- a/lifemonitor/api/models/services/github.py +++ b/lifemonitor/api/models/services/github.py @@ -29,7 +29,7 @@ import lifemonitor.api.models as models import lifemonitor.exceptions as lm_exceptions - +from lifemonitor.cache import Timeout, cached import github from github import Github, GithubException @@ -107,20 +107,28 @@ def _gh_service(self) -> Github: def _get_workflow_info(self, resource): return self._parse_workflow_url(resource) + @cached(timeout=Timeout.NONE, client_scope=False) def _get_repo(self, test_instance: models.TestInstance): - logger.debug("Getting github repository...") - key = f"github_repo_{test_instance.uuid}" - repository = self.cache.get(key) - if repository is None: - logger.debug("Getting github repository from remote service...") - _, repo_full_name, _ = self._get_workflow_info(test_instance.resource) - repository = self._gh_service.get_repo(repo_full_name) - logger.debug("Repo ID: %s", repository.id) - logger.debug("Repo full name: %s", repository.full_name) - logger.debug("Repo URL: %s", f'https://github.com/{repository.full_name}') - self.cache.set(key, repository) - else: - logger.debug("Reusing github repository from cache...") + # logger.debug("Getting github repository...") + # key = f"github_repo_{test_instance.uuid}" + # repository = self.cache.get(key) + # if repository is None: + # logger.debug("Getting github repository from remote service...") + # _, repo_full_name, _ = self._get_workflow_info(test_instance.resource) + # repository = self._gh_service.get_repo(repo_full_name) + # logger.debug("Repo ID: %s", repository.id) + # logger.debug("Repo full name: %s", repository.full_name) + # logger.debug("Repo URL: %s", f'https://github.com/{repository.full_name}') + # self.cache.set(key, repository) + # else: + # logger.debug("Reusing github repository from cache...") + # return repository + logger.debug("Getting github repository from remote service...") + _, repo_full_name, _ = self._get_workflow_info(test_instance.resource) + repository = self._gh_service.get_repo(repo_full_name) + logger.debug("Repo ID: %s", repository.id) + logger.debug("Repo full name: %s", repository.full_name) + logger.debug("Repo URL: %s", f'https://github.com/{repository.full_name}') return repository @staticmethod @@ -144,15 +152,17 @@ def check_connection(self) -> bool: logger.info("Caught exception from Github GET /rate_limit: %s. Connection not working?", e) return False + @cached(timeout=Timeout.NONE, client_scope=False) def _get_gh_workflow(self, repository, workflow_id): logger.debug("Getting github workflow...") - key = f"github_workflow_{repository}_{workflow_id}" - workflow = self.cache.get(key) - if workflow is None: - logger.debug("Getting github workflow from remote service...") - workflow = self._gh_service.get_repo(repository).get_workflow(workflow_id) - self.cache.set(key, workflow) - return workflow + return self._gh_service.get_repo(repository).get_workflow(workflow_id) + # key = f"github_workflow_{repository}_{workflow_id}" + # workflow = self.cache.get(key) + # if workflow is None: + # logger.debug("Getting github workflow from remote service...") + # workflow = self._gh_service.get_repo(repository).get_workflow(workflow_id) + # self.cache.set(key, workflow) + # return workflow def _iter_runs(self, test_instance: models.TestInstance, status: str = None) -> Generator[github.WorkflowRun.WorkflowRun]: _, repository, workflow_id = self._get_workflow_info(test_instance.resource) diff --git a/lifemonitor/api/models/services/travis.py b/lifemonitor/api/models/services/travis.py index 55542cf5d..d5d24ea15 100644 --- a/lifemonitor/api/models/services/travis.py +++ b/lifemonitor/api/models/services/travis.py @@ -29,6 +29,7 @@ import lifemonitor.api.models as models import requests from lifemonitor.api.models.services.service import TestingService +from lifemonitor.cache import Timeout, cached from lifemonitor.exceptions import (EntityNotFoundException, TestingServiceException) @@ -133,19 +134,21 @@ def get_last_passed_test_build(self, test_instance: models.TestInstance) -> Opti def get_last_failed_test_build(self, test_instance: models.TestInstance) -> Optional[models.TravisTestBuild]: return self._get_last_test_build(test_instance, state='failed') + @cached(timeout=Timeout.NONE, client_scope=False) def get_project_metadata(self, test_instance: models.TestInstance): - try: + try: logger.debug("Getting Travis project metadata...") - key = f"project_metadata_{test_instance.uuid}" - metadata = self.cache.get(key) - if metadata is None: - logger.debug("Getting project metadata from remote service...") - metadata = self._get("/repo/{}".format(self.get_repo_id(test_instance))) - if metadata is not None: - self.cache.set(key, metadata) - else: - logger.debug("Reusing travis project metadata from cache...") - return metadata + return self._get("/repo/{}".format(self.get_repo_id(test_instance))) + # key = f"project_metadata_{test_instance.uuid}" + # metadata = self.cache.get(key) + # if metadata is None: + # logger.debug("Getting project metadata from remote service...") + # metadata = self._get("/repo/{}".format(self.get_repo_id(test_instance))) + # if metadata is not None: + # self.cache.set(key, metadata) + # else: + # logger.debug("Reusing travis project metadata from cache...") + # return metadata except Exception as e: raise TestingServiceException(f"{self}: {e}") diff --git a/lifemonitor/api/models/testsuites/testbuild.py b/lifemonitor/api/models/testsuites/testbuild.py index b6f16892b..074e33e61 100644 --- a/lifemonitor/api/models/testsuites/testbuild.py +++ b/lifemonitor/api/models/testsuites/testbuild.py @@ -25,7 +25,7 @@ from enum import Enum import lifemonitor.api.models as models -from lifemonitor.cache import CacheMixin +from lifemonitor.cache import CacheMixin, Timeout, cached # set module level logger logger = logging.getLogger(__name__) @@ -112,17 +112,11 @@ def url(self) -> str: @property def external_link(self) -> str: - logger.debug("Getting external link...") - key = f"{str(self)}_external_link" - link = self.cache.get(key) - if link is None: - logger.debug("Getting external link from testing service...") - link = self.testing_service.get_test_build_external_link(self) - if link is not None: - self.cache.set(key, link) - else: - logger.debug("Reusing external link from cache...") - return link + return self.get_external_link() + + @cached(timeout=Timeout.NONE, client_scope=False) + def get_external_link(self): + return self.testing_service.get_test_build_external_link(self) def get_output(self, offset_bytes=0, limit_bytes=131072): return self.testing_service.get_test_build_output(self.test_instance, self.id, offset_bytes, limit_bytes) diff --git a/lifemonitor/api/models/testsuites/testinstance.py b/lifemonitor/api/models/testsuites/testinstance.py index 673c0ad3f..0b96cb4b6 100644 --- a/lifemonitor/api/models/testsuites/testinstance.py +++ b/lifemonitor/api/models/testsuites/testinstance.py @@ -26,7 +26,7 @@ import lifemonitor.api.models as models from lifemonitor.api.models import db -from lifemonitor.cache import Timeout +from lifemonitor.cache import Timeout, cached from lifemonitor.models import JSON, UUID, ModelMixin from .testsuite import TestSuite @@ -82,18 +82,6 @@ def __eq__(self, o: object) -> bool: def _cache_key_prefix(self): return str(self) - def _get_cache_key_external_link(self): - return f"{self._cache_key_prefix}_external_link" - - def _get_cache_key_last_build(self): - return f"{self._cache_key_prefix}_last_build" - - def _get_cache_key_test_builds(self, limit=10): - return f"{self._cache_key_prefix}_test_builds_limit{limit}" - - def _get_cache_key_test_build(self, build_number): - return f"{self._cache_key_prefix}_test_build_{build_number}" - @property def is_roc_instance(self): return self.roc_instance is not None @@ -104,55 +92,29 @@ def managed(self): @property def external_link(self): - logger.debug("Getting external link...") - key = self._get_cache_key_external_link() - link = self.cache.get(key) - if link is None: - logger.debug("Getting external link from testing service...") - link = self.testing_service.get_instance_external_link(self) - if link is not None: - self.cache.set(key, link) - else: - logger.debug("Reusing external link from cache...") - return link + return self.get_external_link() + + @cached(timeout=Timeout.NONE, client_scope=False) + def get_external_link(self): + return self.testing_service.get_instance_external_link(self) @property def last_test_build(self): - key = self._get_cache_key_last_build() - build = self.cache.get(key) - if build is None: - builds = self.get_test_builds() - build = builds[0] if builds and len(builds) > 0 else None - if build is not None: - self.cache.set(key, build) - return build + return self.get_last_test_build() + + @cached(timeout=Timeout.NONE, client_scope=False) + def get_last_test_build(self): + builds = self.get_test_builds() + return builds[0] if builds and len(builds) > 0 else None + @cached(timeout=Timeout.NONE, client_scope=False) def get_test_builds(self, limit=10): - logger.debug("Getting test builds...") - key = self._get_cache_key_test_builds(limit) - builds = self.cache.get(key) - if builds is None: - logger.debug("Getting test builds from testing service...") - builds = self.testing_service.get_test_builds(self, limit=limit) - if builds is not None: - self.cache.set(key, builds) - else: - logger.debug("Reusing test builds from cache...") - return builds + return self.testing_service.get_test_builds(self, limit=limit) + @cached(timeout=Timeout.BUILD, client_scope=False, + unless=lambda b: b.status in [models.BuildStatus.RUNNING, models.BuildStatus.WAITING]) def get_test_build(self, build_number): - logger.debug("Getting test build...") - key = self._get_cache_key_test_build(build_number) - build = self.cache.get(key) - if build is None: - logger.debug("Getting test build from testing service...") - build = self.testing_service.get_test_build(self, build_number) - if build is not None: - if build.status not in [models.BuildStatus.RUNNING, models.BuildStatus.WAITING]: - self.cache.set(key, build, timeout=Timeout.BUILD) - else: - logger.debug(f"Reusing test build {build} from cache...") - return build + return self.testing_service.get_test_build(self, build_number) def to_dict(self, test_build=False, test_output=False): data = { diff --git a/lifemonitor/cache.py b/lifemonitor/cache.py index 9fcb4b8e8..fa3ed2287 100644 --- a/lifemonitor/cache.py +++ b/lifemonitor/cache.py @@ -125,23 +125,16 @@ def to_dict(self): return self.cache.get_dict() def lock(self, key: str): + logger.debug("Getting lock for key %r...", key) return redis_lock.Lock(self.backend, key) def set(self, key: str, value, timeout: int = Timeout.NONE): - val = None - if isinstance(self.cache, RedisCache): - if key is not None and self.cache_enabled: - lock = self.lock(key) - if lock.acquire(blocking=True): - try: - val = self.cache.get(key) - if not val: - self.cache.set(key, value, timeout=timeout) - finally: - lock.release() - return val + if key is not None and self.cache_enabled and isinstance(self.cache, RedisCache): + logger.debug("Setting cache value for key %r.... ", key) + self.cache.set(key, value, timeout=timeout) def get(self, key: str): + logger.debug("Getting value from cache...") return self.cache.get(key) \ if isinstance(self.cache, RedisCache) \ and self.cache_enabled \ @@ -162,8 +155,10 @@ def delete_keys(self, pattern: str): helper: CacheHelper = CacheHelper(cache) -def _make_key(func=None, client_scope=True, *args, **kwargs) -> str: +def make_cache_key(func=None, client_scope=True, *args, **kwargs) -> str: + from flask import request from lifemonitor.auth import current_registry, current_user + hash_enabled = True fname = "" if func is None \ else func if isinstance(func, str) \ else f"{func.__module__}.{func.__name__}" if callable(func) else str(func) @@ -172,18 +167,25 @@ def _make_key(func=None, client_scope=True, *args, **kwargs) -> str: logger.debug("make_key kwargs: %r", kwargs) result = "" if client_scope: + client_id = "" + if request: + client_id += f"{request.remote_addr}" if current_user and not current_user.is_anonymous: - result += "{}-{}_".format(current_user.username, current_user.id) + client_id += "{}-{}_".format(current_user.username, current_user.id) if current_registry: - result += "{}_".format(current_registry.uuid) + client_id += "{}_".format(current_registry.uuid) if not current_registry and current_user.is_anonymous: - result += "anonymous_" + client_id += "anonymous_" + result += f"{hash(client_id) if hash_enabled else client_id}@" if func: result += fname + hash_enabled = False if args: - result += "_" + "-".join([str(_) for _ in args]) + args_str = "-".join([str(_) for _ in args]) + result += f"_{hash(args_str) if hash_enabled else args_str}" if kwargs: - result += "_" + "-".join([f"{str(k)}={str(v)}" for k, v in kwargs.items()]) + kwargs_str = "-".join([f"{k}={str(v)}" for k, v in kwargs.items()]) + result += f"_{hash(kwargs_str) if hash_enabled else kwargs_str}" logger.debug("make_key calculated key: %r", result) return result @@ -191,31 +193,52 @@ def _make_key(func=None, client_scope=True, *args, **kwargs) -> str: def clear_cache(func=None, client_scope=True, *args, **kwargs): try: if func: - key = _make_key(func, client_scope) + key = make_cache_key(func, client_scope) helper.delete_keys(f"{key}*") if args or kwargs: - key = _make_key(func, client_scope, *args, **kwargs) + key = make_cache_key(func, client_scope, *args, **kwargs) helper.delete_keys(f"{key}*") else: - key = _make_key(client_scope) + key = make_cache_key(client_scope) helper.delete_keys(f"{key}*") except Exception as e: logger.error("Error deleting cache: %r", e) -def cached(timeout=Timeout.REQUEST, client_scope=True): +def cached(timeout=Timeout.REQUEST, client_scope=True, unless=None): def decorator(function): @functools.wraps(function) def wrapper(*args, **kwargs): - key = _make_key(function, client_scope, *args, **kwargs) - result = helper.get(key) - if result is None: - logger.debug(f"Getting value from the actual function for key {key}...") - result = function(*args, **kwargs) - helper.set(key, result, timeout=timeout) + logger.debug("Args: %r", args) + logger.debug("KwArgs: %r", kwargs) + obj: CacheMixin = args[0] if len(args) > 0 and isinstance(args[0], CacheMixin) else None + logger.debug("Wrapping a method of a CacheMixin instance: %r", obj is not None) + hc = helper if obj is None else obj.cache + key = make_cache_key(function, client_scope, *args, **kwargs) + result = hc.get(key) + if hc.cache_enabled: + if result is None: + logger.debug(f"Value {key} not set in cache...") + lock = hc.lock(key) + try: + if lock.acquire(blocking=True): + val = hc.get(key) + if not val: + logger.debug("Cache empty: getting value from the actual function...") + result = function(*args, **kwargs) + logger.debug("Checking unless function: %r", unless) + if unless is None or unless is True or callable(unless) and not unless(result): + hc.set(key, result, timeout=timeout) + else: + logger.debug("Don't set value in cache due to unless=True") + finally: + lock.release() + else: + logger.debug(f"Reusing value from cache key '{key}'...") else: - logger.debug(f"Reusing value from cache key '{key}'...") + logger.debug("Cache disabled: getting value from the actual function...") + result = function(*args, **kwargs) return result return wrapper From 5391a1ef8e3e7ea85f66f164e44888903a9a7210 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Fri, 12 Nov 2021 09:17:34 +0000 Subject: [PATCH 084/162] Fix blanks --- lifemonitor/api/models/services/travis.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lifemonitor/api/models/services/travis.py b/lifemonitor/api/models/services/travis.py index d5d24ea15..529813336 100644 --- a/lifemonitor/api/models/services/travis.py +++ b/lifemonitor/api/models/services/travis.py @@ -136,7 +136,7 @@ def get_last_failed_test_build(self, test_instance: models.TestInstance) -> Opti @cached(timeout=Timeout.NONE, client_scope=False) def get_project_metadata(self, test_instance: models.TestInstance): - try: + try: logger.debug("Getting Travis project metadata...") return self._get("/repo/{}".format(self.get_repo_id(test_instance))) # key = f"project_metadata_{test_instance.uuid}" From cae3c5673a6f078c57419725213485c9faba9705 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Fri, 12 Nov 2021 09:57:51 +0000 Subject: [PATCH 085/162] Disable cache when back-end is not Redis --- lifemonitor/cache.py | 43 +++++++++++++++++++++++++------------------ 1 file changed, 25 insertions(+), 18 deletions(-) diff --git a/lifemonitor/cache.py b/lifemonitor/cache.py index fa3ed2287..076cd8491 100644 --- a/lifemonitor/cache.py +++ b/lifemonitor/cache.py @@ -116,7 +116,10 @@ def cache(self) -> RedisCache: @property def backend(self) -> redis.Redis: - return self.cache._read_clients + if isinstance(self.cache, RedisCache): + return self.cache._read_clients + logger.warning("No cache backend found") + return None def size(self): return len(self.cache.get_dict()) @@ -126,7 +129,7 @@ def to_dict(self): def lock(self, key: str): logger.debug("Getting lock for key %r...", key) - return redis_lock.Lock(self.backend, key) + return redis_lock.Lock(self.backend, key) if self.backend else False def set(self, key: str, value, timeout: int = Timeout.NONE): if key is not None and self.cache_enabled and isinstance(self.cache, RedisCache): @@ -215,25 +218,29 @@ def wrapper(*args, **kwargs): obj: CacheMixin = args[0] if len(args) > 0 and isinstance(args[0], CacheMixin) else None logger.debug("Wrapping a method of a CacheMixin instance: %r", obj is not None) hc = helper if obj is None else obj.cache - key = make_cache_key(function, client_scope, *args, **kwargs) - result = hc.get(key) if hc.cache_enabled: + key = make_cache_key(function, client_scope, *args, **kwargs) + result = hc.get(key) if result is None: logger.debug(f"Value {key} not set in cache...") - lock = hc.lock(key) - try: - if lock.acquire(blocking=True): - val = hc.get(key) - if not val: - logger.debug("Cache empty: getting value from the actual function...") - result = function(*args, **kwargs) - logger.debug("Checking unless function: %r", unless) - if unless is None or unless is True or callable(unless) and not unless(result): - hc.set(key, result, timeout=timeout) - else: - logger.debug("Don't set value in cache due to unless=True") - finally: - lock.release() + if hc.backend: + lock = hc.lock(key) + try: + if lock.acquire(blocking=True): + val = hc.get(key) + if not val: + logger.debug("Cache empty: getting value from the actual function...") + result = function(*args, **kwargs) + logger.debug("Checking unless function: %r", unless) + if unless is None or unless is True or callable(unless) and not unless(result): + hc.set(key, result, timeout=timeout) + else: + logger.debug("Don't set value in cache due to unless=%r", "None" if unless is None else "True") + finally: + lock.release() + else: + logger.warning("Using unsupported cache backend: cache will not be used") + result = function(*args, **kwargs) else: logger.debug(f"Reusing value from cache key '{key}'...") else: From 7e08bdc7249740a0a19925f5498c0c4e7537e535 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Fri, 12 Nov 2021 10:18:20 +0000 Subject: [PATCH 086/162] Update cache key --- lifemonitor/cache.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/lifemonitor/cache.py b/lifemonitor/cache.py index 076cd8491..ad86f6c3c 100644 --- a/lifemonitor/cache.py +++ b/lifemonitor/cache.py @@ -161,34 +161,34 @@ def delete_keys(self, pattern: str): def make_cache_key(func=None, client_scope=True, *args, **kwargs) -> str: from flask import request from lifemonitor.auth import current_registry, current_user - hash_enabled = True + hash_enabled = not logger.isEnabledFor(logging.DEBUG) fname = "" if func is None \ else func if isinstance(func, str) \ else f"{func.__module__}.{func.__name__}" if callable(func) else str(func) logger.debug("make_key func: %r", fname) logger.debug("make_key args: %r", args) logger.debug("make_key kwargs: %r", kwargs) + logger.debug("make_key hash enabled: %r", hash_enabled) result = "" if client_scope: - client_id = "" - if request: - client_id += f"{request.remote_addr}" + client_id = "" if current_user and not current_user.is_anonymous: client_id += "{}-{}_".format(current_user.username, current_user.id) if current_registry: client_id += "{}_".format(current_registry.uuid) if not current_registry and current_user.is_anonymous: - client_id += "anonymous_" - result += f"{hash(client_id) if hash_enabled else client_id}@" + client_id += "anonymous" + if request: + client_id += f"@{request.remote_addr}" + result += f"{hash(client_id) if hash_enabled else client_id}::" if func: result += fname - hash_enabled = False if args: args_str = "-".join([str(_) for _ in args]) - result += f"_{hash(args_str) if hash_enabled else args_str}" + result += f"#{hash(args_str) if hash_enabled else args_str}" if kwargs: kwargs_str = "-".join([f"{k}={str(v)}" for k, v in kwargs.items()]) - result += f"_{hash(kwargs_str) if hash_enabled else kwargs_str}" + result += f"#{hash(kwargs_str) if hash_enabled else kwargs_str}" logger.debug("make_key calculated key: %r", result) return result From 4497e444a12988f125f0d837fae00b4fd258dbe9 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Fri, 12 Nov 2021 10:21:42 +0000 Subject: [PATCH 087/162] Clean up --- lifemonitor/api/models/services/github.py | 21 --------------------- lifemonitor/api/models/services/travis.py | 10 ---------- lifemonitor/cache.py | 5 +++-- 3 files changed, 3 insertions(+), 33 deletions(-) diff --git a/lifemonitor/api/models/services/github.py b/lifemonitor/api/models/services/github.py index 5ed01f36d..ad445dc37 100644 --- a/lifemonitor/api/models/services/github.py +++ b/lifemonitor/api/models/services/github.py @@ -109,20 +109,6 @@ def _get_workflow_info(self, resource): @cached(timeout=Timeout.NONE, client_scope=False) def _get_repo(self, test_instance: models.TestInstance): - # logger.debug("Getting github repository...") - # key = f"github_repo_{test_instance.uuid}" - # repository = self.cache.get(key) - # if repository is None: - # logger.debug("Getting github repository from remote service...") - # _, repo_full_name, _ = self._get_workflow_info(test_instance.resource) - # repository = self._gh_service.get_repo(repo_full_name) - # logger.debug("Repo ID: %s", repository.id) - # logger.debug("Repo full name: %s", repository.full_name) - # logger.debug("Repo URL: %s", f'https://github.com/{repository.full_name}') - # self.cache.set(key, repository) - # else: - # logger.debug("Reusing github repository from cache...") - # return repository logger.debug("Getting github repository from remote service...") _, repo_full_name, _ = self._get_workflow_info(test_instance.resource) repository = self._gh_service.get_repo(repo_full_name) @@ -156,13 +142,6 @@ def check_connection(self) -> bool: def _get_gh_workflow(self, repository, workflow_id): logger.debug("Getting github workflow...") return self._gh_service.get_repo(repository).get_workflow(workflow_id) - # key = f"github_workflow_{repository}_{workflow_id}" - # workflow = self.cache.get(key) - # if workflow is None: - # logger.debug("Getting github workflow from remote service...") - # workflow = self._gh_service.get_repo(repository).get_workflow(workflow_id) - # self.cache.set(key, workflow) - # return workflow def _iter_runs(self, test_instance: models.TestInstance, status: str = None) -> Generator[github.WorkflowRun.WorkflowRun]: _, repository, workflow_id = self._get_workflow_info(test_instance.resource) diff --git a/lifemonitor/api/models/services/travis.py b/lifemonitor/api/models/services/travis.py index 529813336..36af5bdd6 100644 --- a/lifemonitor/api/models/services/travis.py +++ b/lifemonitor/api/models/services/travis.py @@ -139,16 +139,6 @@ def get_project_metadata(self, test_instance: models.TestInstance): try: logger.debug("Getting Travis project metadata...") return self._get("/repo/{}".format(self.get_repo_id(test_instance))) - # key = f"project_metadata_{test_instance.uuid}" - # metadata = self.cache.get(key) - # if metadata is None: - # logger.debug("Getting project metadata from remote service...") - # metadata = self._get("/repo/{}".format(self.get_repo_id(test_instance))) - # if metadata is not None: - # self.cache.set(key, metadata) - # else: - # logger.debug("Reusing travis project metadata from cache...") - # return metadata except Exception as e: raise TestingServiceException(f"{self}: {e}") diff --git a/lifemonitor/cache.py b/lifemonitor/cache.py index ad86f6c3c..62ff01bb3 100644 --- a/lifemonitor/cache.py +++ b/lifemonitor/cache.py @@ -26,6 +26,7 @@ import redis import redis_lock +from flask import request from flask.app import Flask from flask_caching import Cache from flask_caching.backends.rediscache import RedisCache @@ -159,8 +160,8 @@ def delete_keys(self, pattern: str): def make_cache_key(func=None, client_scope=True, *args, **kwargs) -> str: - from flask import request from lifemonitor.auth import current_registry, current_user + hash_enabled = not logger.isEnabledFor(logging.DEBUG) fname = "" if func is None \ else func if isinstance(func, str) \ @@ -171,7 +172,7 @@ def make_cache_key(func=None, client_scope=True, *args, **kwargs) -> str: logger.debug("make_key hash enabled: %r", hash_enabled) result = "" if client_scope: - client_id = "" + client_id = "" if current_user and not current_user.is_anonymous: client_id += "{}-{}_".format(current_user.username, current_user.id) if current_registry: From bbcd6b3a6ecb872ff4d0018c434535e84b24106d Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Fri, 12 Nov 2021 10:22:28 +0000 Subject: [PATCH 088/162] Fix unit tests --- tests/unit/cache/test_cache.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/tests/unit/cache/test_cache.py b/tests/unit/cache/test_cache.py index 0f7824458..cd15b1554 100644 --- a/tests/unit/cache/test_cache.py +++ b/tests/unit/cache/test_cache.py @@ -22,8 +22,8 @@ from unittest.mock import MagicMock import lifemonitor.api.models as models +from lifemonitor.cache import helper, make_cache_key from tests import utils -from lifemonitor.cache import helper logger = logging.getLogger(__name__) @@ -38,12 +38,11 @@ def test_cache_last_build(app_client, redis_cache, user1): assert len(suite.test_instances) > 0, "The suite should have at least one test instance" instance: models.TestInstance = suite.test_instances[0] - assert instance._cache_key_prefix == str(instance), "Invalid cache key prefix" - - assert instance.cache.get(instance._get_cache_key_last_build()) is None, "Cache should be empty" + last_build_key = make_cache_key(instance.get_last_test_build) + assert instance.cache.get(last_build_key) is None, "Cache should be empty" build = instance.last_test_build assert build, "Last build should not be empty" - cached_build = instance.cache.get(instance._get_cache_key_last_build()) + cached_build = instance.cache.get(last_build_key) assert cached_build is not None, "Cache should not be empty" assert build == cached_build, "Build should be equal to the cached build" @@ -66,7 +65,7 @@ def test_cache_test_builds(app_client, redis_cache, user1): instance: models.TestInstance = suite.test_instances[0] limit = 10 - cache_key = instance._get_cache_key_test_builds(limit=limit) + cache_key = make_cache_key(instance.get_test_builds, limit=limit) assert instance.cache.get(cache_key) is None, "Cache should be empty" builds = instance.get_test_builds(limit=limit) assert builds and len(builds) > 0, "Invalid number of builds" From 9e2b9fa20761c0b3a9d911424d3ebbd24430b2da Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Fri, 12 Nov 2021 10:23:45 +0000 Subject: [PATCH 089/162] Decrease default request timeout --- settings.conf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/settings.conf b/settings.conf index 8e7b80cc5..a2ab333aa 100644 --- a/settings.conf +++ b/settings.conf @@ -56,7 +56,7 @@ REDIS_PORT_NUMBER=6379 # Cache settings CACHE_REDIS_DB=0 CACHE_DEFAULT_TIMEOUT=300 -CACHE_REQUEST_TIMEOUT=30 +CACHE_REQUEST_TIMEOUT=15 CACHE_SESSION_TIMEOUT=3600 CACHE_BUILD_TIMEOUT=84600 From 8f3931ec58a5172319d9d807b7716f93d059b516 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Fri, 12 Nov 2021 11:25:37 +0000 Subject: [PATCH 090/162] Check if lock is set --- lifemonitor/cache.py | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/lifemonitor/cache.py b/lifemonitor/cache.py index 62ff01bb3..ccfb11498 100644 --- a/lifemonitor/cache.py +++ b/lifemonitor/cache.py @@ -226,19 +226,20 @@ def wrapper(*args, **kwargs): logger.debug(f"Value {key} not set in cache...") if hc.backend: lock = hc.lock(key) - try: - if lock.acquire(blocking=True): - val = hc.get(key) - if not val: - logger.debug("Cache empty: getting value from the actual function...") - result = function(*args, **kwargs) - logger.debug("Checking unless function: %r", unless) - if unless is None or unless is True or callable(unless) and not unless(result): - hc.set(key, result, timeout=timeout) - else: - logger.debug("Don't set value in cache due to unless=%r", "None" if unless is None else "True") - finally: - lock.release() + if lock: + try: + if lock.acquire(blocking=True): + val = hc.get(key) + if not val: + logger.debug("Cache empty: getting value from the actual function...") + result = function(*args, **kwargs) + logger.debug("Checking unless function: %r", unless) + if unless is None or unless is True or callable(unless) and not unless(result): + hc.set(key, result, timeout=timeout) + else: + logger.debug("Don't set value in cache due to unless=%r", "None" if unless is None else "True") + finally: + lock.release() else: logger.warning("Using unsupported cache backend: cache will not be used") result = function(*args, **kwargs) From 101763f211254215678173d4611dd8535b6f564b Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Fri, 12 Nov 2021 11:26:04 +0000 Subject: [PATCH 091/162] Update make_cache_key signature --- lifemonitor/cache.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lifemonitor/cache.py b/lifemonitor/cache.py index ccfb11498..de398bfdb 100644 --- a/lifemonitor/cache.py +++ b/lifemonitor/cache.py @@ -159,7 +159,7 @@ def delete_keys(self, pattern: str): helper: CacheHelper = CacheHelper(cache) -def make_cache_key(func=None, client_scope=True, *args, **kwargs) -> str: +def make_cache_key(func=None, client_scope=True, args=None, kwargs=None) -> str: from lifemonitor.auth import current_registry, current_user hash_enabled = not logger.isEnabledFor(logging.DEBUG) @@ -200,7 +200,7 @@ def clear_cache(func=None, client_scope=True, *args, **kwargs): key = make_cache_key(func, client_scope) helper.delete_keys(f"{key}*") if args or kwargs: - key = make_cache_key(func, client_scope, *args, **kwargs) + key = make_cache_key(func, client_scope, args=args, kwargs=kwargs) helper.delete_keys(f"{key}*") else: key = make_cache_key(client_scope) @@ -220,7 +220,7 @@ def wrapper(*args, **kwargs): logger.debug("Wrapping a method of a CacheMixin instance: %r", obj is not None) hc = helper if obj is None else obj.cache if hc.cache_enabled: - key = make_cache_key(function, client_scope, *args, **kwargs) + key = make_cache_key(function, client_scope, args=args, kwargs=kwargs) result = hc.get(key) if result is None: logger.debug(f"Value {key} not set in cache...") From 2bae1079beb4cb5af6833169ca25634f25bf7f32 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Fri, 12 Nov 2021 11:26:31 +0000 Subject: [PATCH 092/162] Fix unit tests --- tests/unit/cache/test_cache.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/unit/cache/test_cache.py b/tests/unit/cache/test_cache.py index cd15b1554..ac7de728d 100644 --- a/tests/unit/cache/test_cache.py +++ b/tests/unit/cache/test_cache.py @@ -38,7 +38,7 @@ def test_cache_last_build(app_client, redis_cache, user1): assert len(suite.test_instances) > 0, "The suite should have at least one test instance" instance: models.TestInstance = suite.test_instances[0] - last_build_key = make_cache_key(instance.get_last_test_build) + last_build_key = make_cache_key(instance.get_last_test_build, client_scope=False, args=(instance,)) assert instance.cache.get(last_build_key) is None, "Cache should be empty" build = instance.last_test_build assert build, "Last build should not be empty" @@ -65,7 +65,7 @@ def test_cache_test_builds(app_client, redis_cache, user1): instance: models.TestInstance = suite.test_instances[0] limit = 10 - cache_key = make_cache_key(instance.get_test_builds, limit=limit) + cache_key = make_cache_key(instance.get_test_builds, client_scope=False, args=(instance,), kwargs={"limit": limit}) assert instance.cache.get(cache_key) is None, "Cache should be empty" builds = instance.get_test_builds(limit=limit) assert builds and len(builds) > 0, "Invalid number of builds" @@ -81,5 +81,5 @@ def test_cache_test_builds(app_client, redis_cache, user1): assert len(builds) == len(cached_builds), "Unexpected number of cached builds" limit = 20 - cache_key = instance._get_cache_key_test_builds(limit=limit) + cache_key = make_cache_key(instance.get_test_builds, client_scope=False, args=(instance,), kwargs={"limit": limit}) assert instance.cache.get(cache_key) is None, "Cache should be empty" From 7b265db211a04de2c549dbb1b29eac5e9380f6fe Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Fri, 12 Nov 2021 12:59:05 +0000 Subject: [PATCH 093/162] Update cache timeout for external links --- lifemonitor/api/models/testsuites/testbuild.py | 2 +- lifemonitor/api/models/testsuites/testinstance.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lifemonitor/api/models/testsuites/testbuild.py b/lifemonitor/api/models/testsuites/testbuild.py index 074e33e61..e55124517 100644 --- a/lifemonitor/api/models/testsuites/testbuild.py +++ b/lifemonitor/api/models/testsuites/testbuild.py @@ -114,7 +114,7 @@ def url(self) -> str: def external_link(self) -> str: return self.get_external_link() - @cached(timeout=Timeout.NONE, client_scope=False) + @cached(timeout=Timeout.BUILD, client_scope=False) def get_external_link(self): return self.testing_service.get_test_build_external_link(self) diff --git a/lifemonitor/api/models/testsuites/testinstance.py b/lifemonitor/api/models/testsuites/testinstance.py index 0b96cb4b6..e513e3b0e 100644 --- a/lifemonitor/api/models/testsuites/testinstance.py +++ b/lifemonitor/api/models/testsuites/testinstance.py @@ -94,7 +94,7 @@ def managed(self): def external_link(self): return self.get_external_link() - @cached(timeout=Timeout.NONE, client_scope=False) + @cached(timeout=Timeout.BUILD, client_scope=False) def get_external_link(self): return self.testing_service.get_instance_external_link(self) From caad35979b7ebc3a74c60557aa167d79fb6cab3a Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Fri, 12 Nov 2021 14:59:17 +0000 Subject: [PATCH 094/162] Fix make_cache_key on anonymous requests --- lifemonitor/cache.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lifemonitor/cache.py b/lifemonitor/cache.py index de398bfdb..c7725e2f9 100644 --- a/lifemonitor/cache.py +++ b/lifemonitor/cache.py @@ -177,7 +177,7 @@ def make_cache_key(func=None, client_scope=True, args=None, kwargs=None) -> str: client_id += "{}-{}_".format(current_user.username, current_user.id) if current_registry: client_id += "{}_".format(current_registry.uuid) - if not current_registry and current_user.is_anonymous: + if not current_registry and (not current_user or current_user.is_anonymous): client_id += "anonymous" if request: client_id += f"@{request.remote_addr}" From 750ec7f94d027e7cedd4d33de78fd4a130ea384b Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Fri, 12 Nov 2021 15:00:00 +0000 Subject: [PATCH 095/162] Update clear_cache: fix make_cache_key args --- lifemonitor/cache.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lifemonitor/cache.py b/lifemonitor/cache.py index c7725e2f9..2dbcfc28a 100644 --- a/lifemonitor/cache.py +++ b/lifemonitor/cache.py @@ -200,10 +200,10 @@ def clear_cache(func=None, client_scope=True, *args, **kwargs): key = make_cache_key(func, client_scope) helper.delete_keys(f"{key}*") if args or kwargs: - key = make_cache_key(func, client_scope, args=args, kwargs=kwargs) + key = make_cache_key(func, client_scope=client_scope, args=args, kwargs=kwargs) helper.delete_keys(f"{key}*") else: - key = make_cache_key(client_scope) + key = make_cache_key(client_scope=client_scope) helper.delete_keys(f"{key}*") except Exception as e: logger.error("Error deleting cache: %r", e) From 4de62f37bd53e800ca651747700c8b33ed511492 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Sat, 13 Nov 2021 01:56:22 +0100 Subject: [PATCH 096/162] Add additional workflow timeout --- lifemonitor/cache.py | 3 ++- settings.conf | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/lifemonitor/cache.py b/lifemonitor/cache.py index 2dbcfc28a..c1f638bc3 100644 --- a/lifemonitor/cache.py +++ b/lifemonitor/cache.py @@ -64,11 +64,12 @@ class Timeout: DEFAULT = _get_timeout(_get_timeout_key('DEFAULT'), default=300) REQUEST = _get_timeout(_get_timeout_key('REQUEST'), default=30) SESSION = _get_timeout(_get_timeout_key('SESSION'), default=3600) + WORKFLOW = _get_timeout(_get_timeout_key('WORKFLOW'), default=1800) BUILD = _get_timeout(_get_timeout_key('BUILD'), default=300) @classmethod def update(cls, config): - for t in ('DEFAULT', 'REQUEST', 'SESSION', 'BUILD'): + for t in ('DEFAULT', 'REQUEST', 'SESSION', 'BUILD', 'WORKFLOW'): try: key = _get_timeout_key(t) setattr(cls, key, _get_timeout(key, config=config)) diff --git a/settings.conf b/settings.conf index a2ab333aa..78c557d28 100644 --- a/settings.conf +++ b/settings.conf @@ -58,6 +58,7 @@ CACHE_REDIS_DB=0 CACHE_DEFAULT_TIMEOUT=300 CACHE_REQUEST_TIMEOUT=15 CACHE_SESSION_TIMEOUT=3600 +CACHE_WORKFLOW_TIMEOUT=1800 CACHE_BUILD_TIMEOUT=84600 # Github OAuth2 settings From e36d74a962d435da776530a4285f2d1a42c28246 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Sat, 13 Nov 2021 01:57:22 +0100 Subject: [PATCH 097/162] Cache RO-Crate download --- lifemonitor/api/controllers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lifemonitor/api/controllers.py b/lifemonitor/api/controllers.py index 6c21b13a6..75e3dfcb2 100644 --- a/lifemonitor/api/controllers.py +++ b/lifemonitor/api/controllers.py @@ -155,7 +155,7 @@ def workflows_rocrate_metadata(wf_uuid, wf_version): return response.crate_metadata -@cached(timeout=Timeout.REQUEST) +@cached(timeout=Timeout.WORKFLOW, client_scope=False) def workflows_rocrate_download(wf_uuid, wf_version): response = _get_workflow_or_problem(wf_uuid, wf_version) if isinstance(response, Response): From cf103cd6ec4ed63a0b2c550375dcc21c68f0ee98 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Sat, 13 Nov 2021 01:58:14 +0100 Subject: [PATCH 098/162] Cache workflow external link --- lifemonitor/api/models/workflows.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lifemonitor/api/models/workflows.py b/lifemonitor/api/models/workflows.py index daa42543b..c48744c66 100644 --- a/lifemonitor/api/models/workflows.py +++ b/lifemonitor/api/models/workflows.py @@ -24,6 +24,7 @@ from typing import List, Union import lifemonitor.api.models as models +from lifemonitor.cache import Timeout, cached import lifemonitor.exceptions as lm_exceptions from lifemonitor import utils as lm_utils from lifemonitor.api.models import db @@ -221,6 +222,10 @@ def check_health(self) -> dict: @property def external_link(self) -> str: + return self.get_external_link() + + @cached(Timeout.WORKFLOW, client_scope=False) + def get_external_link(self) -> str: if self.hosting_service is None: return self.uri return self.hosting_service.get_external_link(self.workflow.external_id, self.version) From 991ccbaccb92ff3cd76e043712e778d2544391fb Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Sat, 13 Nov 2021 01:59:36 +0100 Subject: [PATCH 099/162] Force int value for timeouts --- lifemonitor/cache.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lifemonitor/cache.py b/lifemonitor/cache.py index c1f638bc3..b3e85297c 100644 --- a/lifemonitor/cache.py +++ b/lifemonitor/cache.py @@ -51,7 +51,7 @@ def _get_timeout(name: str, default: int = 0, config=None) -> int: logger.debug(e) result = result or os.environ.get(name, default) logger.debug("Getting timeout %r: %r", name, result) - return result + return int(result) def _get_timeout_key(n: str) -> str: From ad1dec633da37e420915832ebbc27ba9c8200288 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Sat, 13 Nov 2021 02:01:17 +0100 Subject: [PATCH 100/162] Check global "cache ignore" setting --- lifemonitor/cache.py | 1 + 1 file changed, 1 insertion(+) diff --git a/lifemonitor/cache.py b/lifemonitor/cache.py index b3e85297c..d07152eb8 100644 --- a/lifemonitor/cache.py +++ b/lifemonitor/cache.py @@ -144,6 +144,7 @@ def get(self, key: str): if isinstance(self.cache, RedisCache) \ and self.cache_enabled \ and not self.ignore_cache_values \ + and not cache.ignore_cache_values \ else None def delete_keys(self, pattern: str): From e1cfc8066361d9d139f039a3d5d6fb8b496f6626 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Sat, 13 Nov 2021 02:03:12 +0100 Subject: [PATCH 101/162] Wrap FlaskCache into the Cache class --- lifemonitor/cache.py | 95 ++++++++++++++++++++++++-------------------- 1 file changed, 52 insertions(+), 43 deletions(-) diff --git a/lifemonitor/cache.py b/lifemonitor/cache.py index d07152eb8..0cb7b323d 100644 --- a/lifemonitor/cache.py +++ b/lifemonitor/cache.py @@ -28,7 +28,7 @@ import redis_lock from flask import request from flask.app import Flask -from flask_caching import Cache +from flask_caching import Cache as FlaskCache from flask_caching.backends.rediscache import RedisCache # Set prefix @@ -38,9 +38,6 @@ # Set module logger logger = logging.getLogger(__name__) -# Instantiate cache manager -cache = Cache() - def _get_timeout(name: str, default: int = 0, config=None) -> int: result = None @@ -77,40 +74,27 @@ def update(cls, config): logger.debug("Error when updating timeout %r", t) -def init_cache(app: Flask): - cache_type = app.config.get( - 'CACHE_TYPE', - 'flask_caching.backends.simplecache.SimpleCache' - ) - logger.debug("Cache type detected: %s", cache_type) - if cache_type == 'flask_caching.backends.rediscache.RedisCache': - logger.debug("Configuring cache...") - app.config.setdefault('CACHE_REDIS_HOST', os.environ.get('REDIS_HOST', 'redis')) - app.config.setdefault('CACHE_REDIS_PORT', os.environ.get('REDIS_PORT_NUMBER', 6379)) - app.config.setdefault('CACHE_REDIS_PASSWORD', os.environ.get('REDIS_PASSWORD', '')) - app.config.setdefault('CACHE_REDIS_DB', int(os.environ.get('CACHE_REDIS_DB', 0))) - app.config.setdefault("CACHE_KEY_PREFIX", CACHE_PREFIX) - app.config.setdefault('CACHE_REDIS_URL', "redis://:{0}@{1}:{2}/{3}".format( - app.config.get('CACHE_REDIS_PASSWORD'), - app.config.get('CACHE_REDIS_HOST'), - app.config.get('CACHE_REDIS_PORT'), - app.config.get('CACHE_REDIS_DB') - )) - logger.debug("RedisCache connection url: %s", app.config.get('CACHE_REDIS_URL')) - cache.init_app(app) - Timeout.update(app.config) - logger.debug(f"Cache initialised (type: {cache_type})") - - -class CacheHelper(object): +class Cache(object): # Enable/Disable cache cache_enabled = True # Ignore cache values even if cache is enabled ignore_cache_values = False + # Reference to the Flask cache manager + __cache__ = None - def __init__(self, cache) -> None: - self._cache = cache + @classmethod + def __get_flask_cache(cls): + if cls.__cache__ is None: + cls.__cache__ = FlaskCache() + return cls.__cache__ + + @classmethod + def init_app(cls, app: Flask): + cls.__get_flask_cache().init_app(app) + + def __init__(self, cache: FlaskCache = None) -> None: + self._cache = cache or self.__get_flask_cache() @property def cache(self) -> RedisCache: @@ -157,8 +141,33 @@ def delete_keys(self, pattern: str): self.backend.delete(key) -# global cache helper instance -helper: CacheHelper = CacheHelper(cache) +# global cache instance +cache: Cache = Cache() + + +def init_cache(app: Flask): + cache_type = app.config.get( + 'CACHE_TYPE', + 'flask_caching.backends.simplecache.SimpleCache' + ) + logger.debug("Cache type detected: %s", cache_type) + if cache_type == 'flask_caching.backends.rediscache.RedisCache': + logger.debug("Configuring cache...") + app.config.setdefault('CACHE_REDIS_HOST', os.environ.get('REDIS_HOST', '127.0.0.1')) + app.config.setdefault('CACHE_REDIS_PORT', os.environ.get('REDIS_PORT_NUMBER', 6379)) + app.config.setdefault('CACHE_REDIS_PASSWORD', os.environ.get('REDIS_PASSWORD', 'foobar')) + app.config.setdefault('CACHE_REDIS_DB', int(os.environ.get('CACHE_REDIS_DB', 0))) + app.config.setdefault("CACHE_KEY_PREFIX", CACHE_PREFIX) + app.config.setdefault('CACHE_REDIS_URL', "redis://:{0}@{1}:{2}/{3}".format( + app.config.get('CACHE_REDIS_PASSWORD'), + app.config.get('CACHE_REDIS_HOST'), + app.config.get('CACHE_REDIS_PORT'), + app.config.get('CACHE_REDIS_DB') + )) + logger.debug("RedisCache connection url: %s", app.config.get('CACHE_REDIS_URL')) + cache.init_app(app) + Timeout.update(app.config) + logger.debug(f"Cache initialised (type: {cache_type})") def make_cache_key(func=None, client_scope=True, args=None, kwargs=None) -> str: @@ -200,13 +209,13 @@ def clear_cache(func=None, client_scope=True, *args, **kwargs): try: if func: key = make_cache_key(func, client_scope) - helper.delete_keys(f"{key}*") + cache.delete_keys(f"{key}*") if args or kwargs: key = make_cache_key(func, client_scope=client_scope, args=args, kwargs=kwargs) - helper.delete_keys(f"{key}*") + cache.delete_keys(f"{key}*") else: key = make_cache_key(client_scope=client_scope) - helper.delete_keys(f"{key}*") + cache.delete_keys(f"{key}*") except Exception as e: logger.error("Error deleting cache: %r", e) @@ -220,7 +229,7 @@ def wrapper(*args, **kwargs): logger.debug("KwArgs: %r", kwargs) obj: CacheMixin = args[0] if len(args) > 0 and isinstance(args[0], CacheMixin) else None logger.debug("Wrapping a method of a CacheMixin instance: %r", obj is not None) - hc = helper if obj is None else obj.cache + hc = cache if obj is None else obj.cache if hc.cache_enabled: key = make_cache_key(function, client_scope, args=args, kwargs=kwargs) result = hc.get(key) @@ -231,8 +240,8 @@ def wrapper(*args, **kwargs): if lock: try: if lock.acquire(blocking=True): - val = hc.get(key) - if not val: + result = hc.get(key) + if not result: logger.debug("Cache empty: getting value from the actual function...") result = function(*args, **kwargs) logger.debug("Checking unless function: %r", unless) @@ -258,10 +267,10 @@ def wrapper(*args, **kwargs): class CacheMixin(object): - _helper: CacheHelper = helper + _helper: Cache = None @property - def cache(self) -> CacheHelper: + def cache(self) -> Cache: if self._helper is None: - self._helper = CacheHelper(cache) + self._helper = Cache() return self._helper From e6c50bd56cd5d627abbf796c134d34f26c6b98d3 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Sat, 13 Nov 2021 02:05:34 +0100 Subject: [PATCH 102/162] Clear "client cache" after some updates --- lifemonitor/auth/controllers.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/lifemonitor/auth/controllers.py b/lifemonitor/auth/controllers.py index cbf544e64..072d29ab2 100644 --- a/lifemonitor/auth/controllers.py +++ b/lifemonitor/auth/controllers.py @@ -131,6 +131,7 @@ def register(): if user: login_user(user) flash("Account created", category="success") + clear_cache() return redirect(url_for("auth.index")) return render_template("auth/register.j2", form=form, action='/register', providers=get_providers()) @@ -153,6 +154,7 @@ def register_identity(): if user: login_user(user) flash("Account created", category="success") + clear_cache() return redirect(url_for("auth.index")) return render_template("auth/register.j2", form=form, action='/register_identity', identity=identity, user=user, providers=get_providers()) @@ -224,6 +226,7 @@ def create_apikey(): if apikey: logger.debug("Created a new API key: %r", apikey) flash("API key created!", category="success") + clear_cache() else: flash("API key not created!", category="error") return redirect(url_for('auth.profile', currentView='apiKeysTab')) @@ -238,6 +241,7 @@ def delete_apikey(): flash("Unable to find the API key") else: delete_api_key(current_user, apikey) + clear_cache() flash("API key removed!", category="success") return redirect(url_for('auth.profile', currentView='apiKeysTab')) @@ -285,6 +289,7 @@ def save_generic_code_flow_client(): data['auth_method']) logger.debug("Client updated: %r", client) flash("App Updated", category="success") + clear_cache() else: logger.debug("Ops... validation failed") return profile(form=form, currentView="oauth2ClientEditorPane") @@ -312,6 +317,7 @@ def edit_generic_code_flow_client(): logger.debug("AuthMethod: %r", form.auth_method.data) for scope in form.scopes: logger.debug("A scope: %r", scope.data) + clear_cache() return profile(form=form, currentView="oauth2ClientEditorPane") @@ -329,4 +335,5 @@ def delete_generic_code_flow_client(): flash("Unable to delete the OAuth App", category="error") else: flash("App removed!", category="success") + clear_cache() return redirect(url_for('auth.profile', currentView='oauth2ClientsTab')) From 0fde116dbbdfaeb4fc7625e78e4e1a589e554f59 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Sat, 13 Nov 2021 02:12:42 +0100 Subject: [PATCH 103/162] Update check_last_build task. Globally ignore cache values; schedule task with interval trigger. --- lifemonitor/tasks/tasks.py | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/lifemonitor/tasks/tasks.py b/lifemonitor/tasks/tasks.py index c0977f792..0d16e4644 100644 --- a/lifemonitor/tasks/tasks.py +++ b/lifemonitor/tasks/tasks.py @@ -41,21 +41,24 @@ def heartbeat(): logger.info("Heartbeat!") -@schedule(CronTrigger(minute="*/5")) +@schedule(IntervalTrigger(seconds=Timeout.BUILD * 3 / 4)) @dramatiq.actor def check_last_build(): - logger.info("Checking last build....") from lifemonitor.api.models import Workflow + from lifemonitor.cache import cache + logger.info("Starting 'check_last build' task...") for w in Workflow.all(): - for s in w.latest_version.test_suites: - logger.info("Updating workflow: %r", w) - for i in s.test_instances: - try: - i.ignore_cache_values = True - logger.debug("Updating latest builds: %r", i.get_test_builds()) - finally: - i.ignore_cache_values = False + try: + cache.ignore_cache_values = True + for s in w.latest_version.test_suites: + logger.info("Updating workflow: %r", w) + for i in s.test_instances: + builds = i.get_test_builds() + logger.debug("Updating latest builds: %r", builds) + for b in builds: + logger.debug("Updating build: %r", i.get_test_build(b.id)) logger.debug("Updating latest build: %r", i.last_test_build) - + finally: + cache.ignore_cache_values = False logger.info("Checking last build: DONE!") From 740e5d520961ab47ce0354a8540b4324235308d1 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Sat, 13 Nov 2021 02:13:47 +0100 Subject: [PATCH 104/162] Add 'check_workflows' task to check ro-crate availability --- lifemonitor/tasks/tasks.py | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/lifemonitor/tasks/tasks.py b/lifemonitor/tasks/tasks.py index 0d16e4644..efafe9e0a 100644 --- a/lifemonitor/tasks/tasks.py +++ b/lifemonitor/tasks/tasks.py @@ -4,6 +4,8 @@ import dramatiq import flask from apscheduler.triggers.cron import CronTrigger +from apscheduler.triggers.interval import IntervalTrigger +from lifemonitor.cache import Timeout # set module level logger logger = logging.getLogger(__name__) @@ -41,6 +43,39 @@ def heartbeat(): logger.info("Heartbeat!") +@schedule(IntervalTrigger(seconds=Timeout.WORKFLOW * 3 / 4)) +@dramatiq.actor +def check_workflows(): + from flask import current_app + from lifemonitor.api.controllers import workflows_rocrate_download + from lifemonitor.api.models import Workflow + from lifemonitor.auth.services import login_user, logout_user + from lifemonitor.cache import cache + + logger.info("Starting 'check_workflows' task....") + for w in Workflow.all(): + try: + cache.ignore_cache_values = True + for v in w.versions.values(): + logger.info("Updating external link: %r", v.external_link) + u = v.submitter + with current_app.test_request_context(): + try: + if u is not None: + login_user(u) + logger.info("Updating RO-Crate...") + workflows_rocrate_download(w.uuid, v.version) + logger.info("Updating RO-Crate... DONE") + finally: + try: + logout_user() + except Exception as e: + logger.debug(e) + finally: + cache.ignore_cache_values = False + logger.info("Starting 'check_workflows' task.... DONE!") + + @schedule(IntervalTrigger(seconds=Timeout.BUILD * 3 / 4)) @dramatiq.actor def check_last_build(): From 39b9b7a7f4c9d6f7c97e5f1191e528393257a5f0 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Sat, 13 Nov 2021 02:15:35 +0100 Subject: [PATCH 105/162] Fix missing import --- lifemonitor/auth/controllers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lifemonitor/auth/controllers.py b/lifemonitor/auth/controllers.py index 072d29ab2..d7459d55a 100644 --- a/lifemonitor/auth/controllers.py +++ b/lifemonitor/auth/controllers.py @@ -23,7 +23,7 @@ import flask from flask import flash, redirect, render_template, request, session, url_for from flask_login import login_required, login_user, logout_user -from lifemonitor.cache import cached, Timeout +from lifemonitor.cache import cached, Timeout, clear_cache from lifemonitor.utils import (NextRouteRegistry, next_route_aware, split_by_crlf) From 3c0a3367ad2807d85ec94fa6f831d17da44fd651 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Sat, 13 Nov 2021 02:25:41 +0100 Subject: [PATCH 106/162] Fix tests: rename helper to cache --- tests/unit/cache/test_cache.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/unit/cache/test_cache.py b/tests/unit/cache/test_cache.py index ac7de728d..b446eae70 100644 --- a/tests/unit/cache/test_cache.py +++ b/tests/unit/cache/test_cache.py @@ -22,7 +22,7 @@ from unittest.mock import MagicMock import lifemonitor.api.models as models -from lifemonitor.cache import helper, make_cache_key +from lifemonitor.cache import cache, make_cache_key from tests import utils logger = logging.getLogger(__name__) @@ -30,7 +30,7 @@ def test_cache_last_build(app_client, redis_cache, user1): valid_workflow = 'sort-and-change-case' - assert helper.size() == 0, "Cache should be empty" + assert cache.size() == 0, "Cache should be empty" _, workflow = utils.pick_and_register_workflow(user1, valid_workflow) assert workflow, "Workflow should be set" assert len(workflow.test_suites) > 0, "The workflow should have at least one suite" @@ -56,7 +56,7 @@ def test_cache_last_build(app_client, redis_cache, user1): def test_cache_test_builds(app_client, redis_cache, user1): valid_workflow = 'sort-and-change-case' - assert helper.size() == 0, "Cache should be empty" + assert cache.size() == 0, "Cache should be empty" _, workflow = utils.pick_and_register_workflow(user1, valid_workflow) assert workflow, "Workflow should be set" assert len(workflow.test_suites) > 0, "The workflow should have at least one suite" From 9c796339f0eec6d3aeee4eb3d7e641f431c8bfa1 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Sat, 13 Nov 2021 10:27:47 +0100 Subject: [PATCH 107/162] Add set/get state for TestBuild --- lifemonitor/api/models/testsuites/testbuild.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/lifemonitor/api/models/testsuites/testbuild.py b/lifemonitor/api/models/testsuites/testbuild.py index e55124517..a14abda66 100644 --- a/lifemonitor/api/models/testsuites/testbuild.py +++ b/lifemonitor/api/models/testsuites/testbuild.py @@ -131,3 +131,15 @@ def to_dict(self, test_output=False) -> dict: if test_output: data['output'] = self.output return data + + def __getstate__(self): + return { + "testing_service": self.testing_service.uuid, + "test_instance": self.test_instance.uuid, + "metadata": self._metadata + } + + def __setstate__(self, state): + self.testing_service = models.TestingService.find_by_uuid(state['testing_service']) + self.test_instance = models.TestInstance.find_by_uuid(state['test_instance']) + self._metadata = state['metadata'] From 608aa7af485dc3a7488c69ff00507a8b35e2010e Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Sun, 14 Nov 2021 01:01:11 +0100 Subject: [PATCH 108/162] Update settings of test env with cache defaults --- tests/settings.conf | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/tests/settings.conf b/tests/settings.conf index b5215412f..77c5aa288 100644 --- a/tests/settings.conf +++ b/tests/settings.conf @@ -31,6 +31,18 @@ #TEST_SECRET_KEY="" #PROD_SECRET_KEY="" +REDIS_HOST=redis +REDIS_PASSWORD=foobar +REDIS_PORT_NUMBER=6379 + +# Cache settings +CACHE_REDIS_DB=0 +CACHE_DEFAULT_TIMEOUT=300 +CACHE_REQUEST_TIMEOUT=15 +CACHE_SESSION_TIMEOUT=3600 +CACHE_WORKFLOW_TIMEOUT=1800 +CACHE_BUILD_TIMEOUT=84600 + # PostgreSQL DBMS settings #POSTGRESQL_HOST=0.0.0.0 #POSTGRESQL_PORT=5432 From 1f5db30870bd13ce6bbba65066cc126df51c6c78 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Sun, 14 Nov 2021 01:03:29 +0100 Subject: [PATCH 109/162] Clean up cache when initialising test env --- tests/conftest.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/conftest.py b/tests/conftest.py index c26c8b308..751c7df76 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -33,7 +33,7 @@ from lifemonitor.api.models import (TestingService, TestingServiceTokenManager, TestSuite, User) from lifemonitor.api.services import LifeMonitor -from lifemonitor.cache import cache, init_cache +from lifemonitor.cache import cache, init_cache, clear_cache from lifemonitor.utils import ClassManager from tests.utils import register_workflow @@ -103,6 +103,7 @@ def redis_cache(app_context): def initialize(app_settings, request_context, service_registry: ClassManager): service_registry.remove_class("unknown") helpers.clean_db() + clear_cache(client_scope=False) helpers.init_db(app_settings) helpers.disable_auto_login() auth.logout_user() From d34fb82f734a7a2a25158ce593d8e9eb18ad4ce1 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Sun, 14 Nov 2021 12:07:55 +0100 Subject: [PATCH 110/162] Set lock timeout --- lifemonitor/cache.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lifemonitor/cache.py b/lifemonitor/cache.py index 0cb7b323d..2885a6790 100644 --- a/lifemonitor/cache.py +++ b/lifemonitor/cache.py @@ -239,7 +239,7 @@ def wrapper(*args, **kwargs): lock = hc.lock(key) if lock: try: - if lock.acquire(blocking=True): + if lock.acquire(blocking=True, timeout=Timeout.REQUEST): result = hc.get(key) if not result: logger.debug("Cache empty: getting value from the actual function...") From cad012559c14c8d90d1e55f1baf6e26fec6aa6c9 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Sun, 14 Nov 2021 18:57:56 +0100 Subject: [PATCH 111/162] Set external link to None when not available --- lifemonitor/api/models/testsuites/testinstance.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lifemonitor/api/models/testsuites/testinstance.py b/lifemonitor/api/models/testsuites/testinstance.py index e513e3b0e..17ce8084c 100644 --- a/lifemonitor/api/models/testsuites/testinstance.py +++ b/lifemonitor/api/models/testsuites/testinstance.py @@ -92,7 +92,10 @@ def managed(self): @property def external_link(self): - return self.get_external_link() + try: + return self.get_external_link() + except Exception: + return None @cached(timeout=Timeout.BUILD, client_scope=False) def get_external_link(self): From 0521bb81420fcc5fca0e3d94cd2a2e64a7cb6533 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Sun, 14 Nov 2021 19:04:34 +0100 Subject: [PATCH 112/162] Fix lazy loading on ClassManager --- lifemonitor/utils.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/lifemonitor/utils.py b/lifemonitor/utils.py index 8f0f8ec1c..cba1b2723 100644 --- a/lifemonitor/utils.py +++ b/lifemonitor/utils.py @@ -453,14 +453,18 @@ def _load_concrete_types(self): logger.exception(e) return self.__concrete_types__ + @property + def _concrete_types(self): + return self._load_concrete_types() + def add_class(self, type_name, type_class): - self.__concrete_types__[type_name] = (type_class,) + self._concrete_types[type_name] = (type_class,) def remove_class(self, type_name): - return self.__concrete_types__.pop(type_name, None) + return self._concrete_types.pop(type_name, None) def get_class(self, concrete_type): - return self._load_concrete_types()[concrete_type][0] + return self._concrete_types[concrete_type][0] def get_classes(self): - return [_[0] for _ in self._load_concrete_types().values()] + return [_[0] for _ in self._concrete_types.values()] From 77fd00cff5767b6b578ef4790af7e39e88ea5e33 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Mon, 15 Nov 2021 19:37:06 +0100 Subject: [PATCH 113/162] Add CLI command to delete cache --- lifemonitor/commands/cache.py | 45 +++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) create mode 100644 lifemonitor/commands/cache.py diff --git a/lifemonitor/commands/cache.py b/lifemonitor/commands/cache.py new file mode 100644 index 000000000..e40dc6953 --- /dev/null +++ b/lifemonitor/commands/cache.py @@ -0,0 +1,45 @@ +# Copyright (c) 2020-2021 CRS4 +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + + +import logging +from flask.blueprints import Blueprint +from flask.cli import with_appcontext + +# set module level logger +logger = logging.getLogger() + +# define the blueprint for DB commands +blueprint = Blueprint('cache', __name__) + + +@blueprint.cli.command('clear') +@with_appcontext +def clear(): + """ + Initialize LifeMonitor App + """ + from lifemonitor.cache import clear_cache + try: + clear_cache(client_scope=False) + except Exception as e: + print("Error when deleting cache: %s" % (str(e))) + if logger.isEnabledFor(logging.DEBUG): + logger.exception(e) From cb719e366ac6a3347af700c01a7c155cc92ed845 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Mon, 15 Nov 2021 19:41:45 +0100 Subject: [PATCH 114/162] Allow to configure cache timeout for workflows --- k8s/templates/secret.yaml | 1 + k8s/values.yaml | 1 + 2 files changed, 2 insertions(+) diff --git a/k8s/templates/secret.yaml b/k8s/templates/secret.yaml index bbe1a757b..df8697f90 100644 --- a/k8s/templates/secret.yaml +++ b/k8s/templates/secret.yaml @@ -51,6 +51,7 @@ stringData: CACHE_DEFAULT_TIMEOUT={{ .Values.cache.timeout.default }} CACHE_REQUEST_TIMEOUT={{ .Values.cache.timeout.request }} CACHE_SESSION_TIMEOUT={{ .Values.cache.timeout.session }} + CACHE_WORKFLOW_TIMEOUT={{ .Values.cache.timeout.workflow }} CACHE_BUILD_TIMEOUT={{ .Values.cache.timeout.build }} # Set admin credentials diff --git a/k8s/values.yaml b/k8s/values.yaml index c801d983a..296ba6abb 100644 --- a/k8s/values.yaml +++ b/k8s/values.yaml @@ -65,6 +65,7 @@ cache: default: 30 request: 15 session: 3600 + workflow: 1800 build: 84600 lifemonitor: From 50dfee54209b2de15be64689329dacd645583bb3 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Tue, 16 Nov 2021 10:33:36 +0100 Subject: [PATCH 115/162] Don't fail when releasing lock --- lifemonitor/cache.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/lifemonitor/cache.py b/lifemonitor/cache.py index 2885a6790..faf1d25a0 100644 --- a/lifemonitor/cache.py +++ b/lifemonitor/cache.py @@ -239,7 +239,7 @@ def wrapper(*args, **kwargs): lock = hc.lock(key) if lock: try: - if lock.acquire(blocking=True, timeout=Timeout.REQUEST): + if lock.acquire(blocking=True, timeout=timeout * 3 / 4): result = hc.get(key) if not result: logger.debug("Cache empty: getting value from the actual function...") @@ -250,7 +250,10 @@ def wrapper(*args, **kwargs): else: logger.debug("Don't set value in cache due to unless=%r", "None" if unless is None else "True") finally: - lock.release() + try: + lock.release() + except redis_lock.NotAcquired as e: + logger.debug(e) else: logger.warning("Using unsupported cache backend: cache will not be used") result = function(*args, **kwargs) From 7a59c7386b9753a1622d21138d2a30ee1d95038a Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Tue, 16 Nov 2021 11:31:48 +0100 Subject: [PATCH 116/162] Allow to specify a prefix when deleting cache --- lifemonitor/cache.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/lifemonitor/cache.py b/lifemonitor/cache.py index faf1d25a0..18e0d329f 100644 --- a/lifemonitor/cache.py +++ b/lifemonitor/cache.py @@ -131,12 +131,12 @@ def get(self, key: str): and not cache.ignore_cache_values \ else None - def delete_keys(self, pattern: str): + def delete_keys(self, pattern: str, prefix: str = CACHE_PREFIX): logger.debug(f"Deleting keys by pattern: {pattern}") if isinstance(self.cache, RedisCache): logger.debug("Redis backend detected!") - logger.debug(f"Pattern: {CACHE_PREFIX}{pattern}") - for key in self.backend.scan_iter(f"{CACHE_PREFIX}{pattern}"): + logger.debug(f"Pattern: {prefix}{pattern}") + for key in self.backend.scan_iter(f"{prefix}{pattern}"): logger.debug("Delete key: %r", key) self.backend.delete(key) @@ -205,17 +205,17 @@ def make_cache_key(func=None, client_scope=True, args=None, kwargs=None) -> str: return result -def clear_cache(func=None, client_scope=True, *args, **kwargs): +def clear_cache(func=None, client_scope=True, prefix=CACHE_PREFIX, *args, **kwargs): try: if func: key = make_cache_key(func, client_scope) cache.delete_keys(f"{key}*") if args or kwargs: key = make_cache_key(func, client_scope=client_scope, args=args, kwargs=kwargs) - cache.delete_keys(f"{key}*") + cache.delete_keys(f"{key}*", prefix=prefix) else: key = make_cache_key(client_scope=client_scope) - cache.delete_keys(f"{key}*") + cache.delete_keys(f"{key}*", prefix=prefix) except Exception as e: logger.error("Error deleting cache: %r", e) From 6a8999c495abe8908868be9837e38cb779328c5b Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Tue, 16 Nov 2021 11:33:14 +0100 Subject: [PATCH 117/162] Fix CLI command description --- lifemonitor/commands/cache.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lifemonitor/commands/cache.py b/lifemonitor/commands/cache.py index e40dc6953..8f943d7e5 100644 --- a/lifemonitor/commands/cache.py +++ b/lifemonitor/commands/cache.py @@ -34,7 +34,7 @@ @with_appcontext def clear(): """ - Initialize LifeMonitor App + Delete API cache """ from lifemonitor.cache import clear_cache try: From 0e794a81c06a0b1cae80c46bb829976ad20e4a6d Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Tue, 16 Nov 2021 11:35:19 +0100 Subject: [PATCH 118/162] Configure explicitly redis namespace used by dramatiq --- lifemonitor/tasks/task_queue.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/lifemonitor/tasks/task_queue.py b/lifemonitor/tasks/task_queue.py index 0b2952241..d7b3b0924 100644 --- a/lifemonitor/tasks/task_queue.py +++ b/lifemonitor/tasks/task_queue.py @@ -10,6 +10,8 @@ from dramatiq.results.backends.redis import RedisBackend from flask_apscheduler import APScheduler +REDIS_NAMESPACE = 'dramatiq' + logger = logging.getLogger(__name__) @@ -53,8 +55,8 @@ def init_task_queue(app): port=int(app.config.get("REDIS_PORT_NUMBER", 6379))) logger.info("Setting up task queue. Pointing to broker %s:%s", redis_connection_params['host'], redis_connection_params['port']) - redis_broker = RedisBroker(**redis_connection_params) - result_backend = RedisBackend(**redis_connection_params) + redis_broker = RedisBroker(namespace=f"{REDIS_NAMESPACE}", **redis_connection_params) + result_backend = RedisBackend(namespace=f"{REDIS_NAMESPACE}-results", **redis_connection_params) redis_broker.add_middleware(Results(backend=result_backend)) dramatiq.set_broker(redis_broker) redis_broker.add_middleware(AppContextMiddleware(app)) From 7df1db3d9636a5647b04341938c20185e8de6289 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Tue, 16 Nov 2021 11:36:24 +0100 Subject: [PATCH 119/162] Add command to reset task-queue status --- lifemonitor/commands/tasks.py | 46 +++++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) create mode 100644 lifemonitor/commands/tasks.py diff --git a/lifemonitor/commands/tasks.py b/lifemonitor/commands/tasks.py new file mode 100644 index 000000000..ef35a4a96 --- /dev/null +++ b/lifemonitor/commands/tasks.py @@ -0,0 +1,46 @@ +# Copyright (c) 2020-2021 CRS4 +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + + +import logging +from flask.blueprints import Blueprint +from flask.cli import with_appcontext + +# set module level logger +logger = logging.getLogger() + +# define the blueprint for DB commands +blueprint = Blueprint('task-queue', __name__) + + +@blueprint.cli.command('reset') +@with_appcontext +def reset(): + """ + Reset task-queue status + """ + from lifemonitor.cache import clear_cache + from lifemonitor.tasks.task_queue import REDIS_NAMESPACE + try: + clear_cache(client_scope=False, prefix=REDIS_NAMESPACE) + except Exception as e: + print("Error when deleting cache: %s" % (str(e))) + if logger.isEnabledFor(logging.DEBUG): + logger.exception(e) From 90ad3ea1fd69383b0c2c0d497c334028d6ae867b Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Tue, 16 Nov 2021 11:47:30 +0100 Subject: [PATCH 120/162] Catch task exceptions and prevent rescheduling on failure --- lifemonitor/tasks/tasks.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/lifemonitor/tasks/tasks.py b/lifemonitor/tasks/tasks.py index efafe9e0a..f3121d435 100644 --- a/lifemonitor/tasks/tasks.py +++ b/lifemonitor/tasks/tasks.py @@ -66,11 +66,19 @@ def check_workflows(): logger.info("Updating RO-Crate...") workflows_rocrate_download(w.uuid, v.version) logger.info("Updating RO-Crate... DONE") + except Exception as e: + logger.error(f"Error when updating the workflow {w}: {str(e)}") + if logger.isEnabledFor(logging.DEBUG): + logger.exception(e) finally: try: logout_user() except Exception as e: logger.debug(e) + except Exception as e: + logger.error("Error when executing task 'check_workflows': %s", str(e)) + if logger.isEnabledFor(logging.DEBUG): + logger.exception(e) finally: cache.ignore_cache_values = False logger.info("Starting 'check_workflows' task.... DONE!") @@ -94,6 +102,10 @@ def check_last_build(): for b in builds: logger.debug("Updating build: %r", i.get_test_build(b.id)) logger.debug("Updating latest build: %r", i.last_test_build) + except Exception as e: + logger.error("Error when executing task 'check_last_build': %s", str(e)) + if logger.isEnabledFor(logging.DEBUG): + logger.exception(e) finally: cache.ignore_cache_values = False logger.info("Checking last build: DONE!") From e40024615afd2cc7be412d96e9623117109b3745 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Wed, 17 Nov 2021 19:37:46 +0100 Subject: [PATCH 121/162] Transactional cache update --- lifemonitor/cache.py | 215 +++++++++++++++++++++++++++++++++++-------- 1 file changed, 176 insertions(+), 39 deletions(-) diff --git a/lifemonitor/cache.py b/lifemonitor/cache.py index 18e0d329f..ccc4796dd 100644 --- a/lifemonitor/cache.py +++ b/lifemonitor/cache.py @@ -23,6 +23,7 @@ import functools import logging import os +from contextlib import contextmanager import redis import redis_lock @@ -74,17 +75,60 @@ def update(cls, config): logger.debug("Error when updating timeout %r", t) +class IllegalStateException(RuntimeError): + pass + + +class CacheTransaction(object): + def __init__(self, cache: Cache): + self.__cache__ = cache + self.__locks__ = {} + self.__closed__ = False + + def get_lock(blocking: bool = True, timeout: int = Timeout.REQUEST): + pass + + def has_lock(self, lock: str) -> bool: + return lock in self.__locks__ + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + print("Exception has been handled") + self.close() + return True + + def close(self): + if self.__closed__: + logger.debug("Transaction already closed") + else: + logger.debug("Closing transaction") + try: + for k in list(self.__locks__.keys()): + logger.debug("Releasing lock %r", k) + l = self.__locks__.pop(k) + try: + l.release() + except redis_lock.NotAcquired as e: + logger.debug(e) + logger.debug("All lock released") + logger.debug("Transaction closed") + finally: + self.__closed__ = True + + class Cache(object): # Enable/Disable cache cache_enabled = True # Ignore cache values even if cache is enabled - ignore_cache_values = False + _ignore_cache_values = False # Reference to the Flask cache manager __cache__ = None @classmethod - def __get_flask_cache(cls): + def __get_flask_cache(cls) -> FlaskCache: if cls.__cache__ is None: cls.__cache__ = FlaskCache() return cls.__cache__ @@ -92,43 +136,125 @@ def __get_flask_cache(cls): @classmethod def init_app(cls, app: Flask): cls.__get_flask_cache().init_app(app) + cls.reset_locks() - def __init__(self, cache: FlaskCache = None) -> None: + def __init__(self, cache: FlaskCache = None, parent: Cache = None) -> None: self._cache = cache or self.__get_flask_cache() + self._current_transaction = None + self._parent = parent + + @property + def parent(self) -> Cache: + return self._parent + + @property + def ignore_cache_values(self): + return self._ignore_cache_values is True and \ + (self.parent and self.parent.ignore_cache_values is True) + + @ignore_cache_values.setter + def ignore_cache_values(self, value: bool): + self._ignore_cache_values = True if value is True else False @property def cache(self) -> RedisCache: - return self._cache.cache + return self.get_redis_cache(self._cache) + + @contextmanager + def transaction(self, name) -> CacheTransaction: + if self._current_transaction is not None: + raise IllegalStateException("Transaction already started") + t = CacheTransaction(self) + if self.parent is not None: + self.parent._current_transaction = t + else: + self._current_transaction = t + try: + yield t + except Exception as e: + logger.exception(e) + finally: + logger.debug("Finally closing transaction") + try: + t.close() + except Exception as fe: + logger.debug(fe) + if self.parent is not None: + self.parent._current_transaction = None + else: + self._current_transaction = None @property def backend(self) -> redis.Redis: - if isinstance(self.cache, RedisCache): - return self.cache._read_clients - logger.warning("No cache backend found") - return None + return self.get_backend(self.cache) - def size(self): - return len(self.cache.get_dict()) + def get_current_transaction(self): + if self._current_transaction is None and self.parent is not None: + return self.parent.get_current_transaction() + return self._current_transaction + + def keys(self, pattern: str = None): + query = f"{CACHE_PREFIX}" + if pattern is not None: + query = f"{query}{pattern}" + else: + query = f"{query}*" + return self.backend.keys(query) - def to_dict(self): - return self.cache.get_dict() + def size(self, pattern=None): + return len(self.keys(pattern=pattern)) - def lock(self, key: str): + def to_dict(self, pattern=None): + return {k: self.backend.get(k) for k in self.keys(pattern=pattern)} + + @contextmanager + def lock(self, key: str, blocking: bool = True, + timeout: int = Timeout.REQUEST, + expire=60, auto_renewal=True): logger.debug("Getting lock for key %r...", key) - return redis_lock.Lock(self.backend, key) if self.backend else False + lock = redis_lock.Lock(self.backend, key, expire=expire, auto_renewal=auto_renewal) + try: + yield lock.acquire(blocking=blocking) + finally: + try: + logger.debug("Auto release of lock for key=%s", key) + transaction = self.get_current_transaction() + if transaction is None: + logger.debug("No transaction in progress...") + lock.release() + else: + logger.debug("Transaction %r in progress...", transaction) + transaction.__locks__[key] = lock + logger.debug("Added lock to transaction: %r", transaction.has_lock(key)) + except redis_lock.NotAcquired as e: + logger.debug(e) def set(self, key: str, value, timeout: int = Timeout.NONE): - if key is not None and self.cache_enabled and isinstance(self.cache, RedisCache): - logger.debug("Setting cache value for key %r.... ", key) + if key is not None and self.cache_enabled: + logger.debug("Setting cache value for key %r.... (timeout: %r)", key, timeout) self.cache.set(key, value, timeout=timeout) + def has(self, key: str) -> bool: + return self.get(key) is not None + + def _get_status(self) -> dict: + return { + "self": self, + "enabled": self.cache_enabled, + "ignore_values": self.ignore_cache_values, + "current_transaction": self.get_current_transaction(), + "transaction locks": self.get_current_transaction().__locks__ if self.get_current_transaction() else None + } + def get(self, key: str): logger.debug("Getting value from cache...") + logger.debug("Cache status: %r", self._get_status()) return self.cache.get(key) \ if isinstance(self.cache, RedisCache) \ and self.cache_enabled \ and not self.ignore_cache_values \ - and not cache.ignore_cache_values \ + and (self.get_current_transaction() is None + or self.get_current_transaction().has_lock(key)) \ else None def delete_keys(self, pattern: str, prefix: str = CACHE_PREFIX): @@ -140,6 +266,25 @@ def delete_keys(self, pattern: str, prefix: str = CACHE_PREFIX): logger.debug("Delete key: %r", key) self.backend.delete(key) + def clear(self): + for key in self.backend.scan_iter(f"{CACHE_PREFIX}*"): + self.backend.delete(key) + self.reset_locks() + + @classmethod + def reset_locks(cls): + redis_lock.reset_all(cls.get_backend()) + + @classmethod + def get_redis_cache(cls, cache: FlaskCache = None) -> RedisCache: + rc = cache or cls.__get_flask_cache() + return rc.cache if isinstance(rc, FlaskCache) else None + + @classmethod + def get_backend(cls, cache: RedisCache = None) -> redis.Redis: + rc = cache or cls.get_redis_cache() + return rc._read_clients if isinstance(rc, RedisCache) else None + # global cache instance cache: Cache = Cache() @@ -236,24 +381,16 @@ def wrapper(*args, **kwargs): if result is None: logger.debug(f"Value {key} not set in cache...") if hc.backend: - lock = hc.lock(key) - if lock: - try: - if lock.acquire(blocking=True, timeout=timeout * 3 / 4): - result = hc.get(key) - if not result: - logger.debug("Cache empty: getting value from the actual function...") - result = function(*args, **kwargs) - logger.debug("Checking unless function: %r", unless) - if unless is None or unless is True or callable(unless) and not unless(result): - hc.set(key, result, timeout=timeout) - else: - logger.debug("Don't set value in cache due to unless=%r", "None" if unless is None else "True") - finally: - try: - lock.release() - except redis_lock.NotAcquired as e: - logger.debug(e) + with hc.lock(key, blocking=True): + result = hc.get(key) + if not result: + logger.debug("Cache empty: getting value from the actual function...") + result = function(*args, **kwargs) + logger.debug("Checking unless function: %r", unless) + if unless is None or unless is True or callable(unless) and not unless(result): + hc.set(key, result, timeout=timeout) + else: + logger.debug("Don't set value in cache due to unless=%r", "None" if unless is None else "True") else: logger.warning("Using unsupported cache backend: cache will not be used") result = function(*args, **kwargs) @@ -270,10 +407,10 @@ def wrapper(*args, **kwargs): class CacheMixin(object): - _helper: Cache = None + _cache: Cache = None @property def cache(self) -> Cache: - if self._helper is None: - self._helper = Cache() - return self._helper + if self._cache is None: + self._cache = Cache(parent=cache) + return self._cache From 63198f23c2c3ab428c34ca7babccdfe00740c2fa Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Wed, 17 Nov 2021 19:38:22 +0100 Subject: [PATCH 122/162] Fix CLI cache command --- lifemonitor/commands/cache.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lifemonitor/commands/cache.py b/lifemonitor/commands/cache.py index 8f943d7e5..36180b668 100644 --- a/lifemonitor/commands/cache.py +++ b/lifemonitor/commands/cache.py @@ -36,9 +36,9 @@ def clear(): """ Delete API cache """ - from lifemonitor.cache import clear_cache + from lifemonitor.cache import cache try: - clear_cache(client_scope=False) + cache.clear() except Exception as e: print("Error when deleting cache: %s" % (str(e))) if logger.isEnabledFor(logging.DEBUG): From d43c0bd7adb32d94e229870246791577d6d6dab8 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Wed, 17 Nov 2021 19:39:13 +0100 Subject: [PATCH 123/162] Don't import tasks on testing environments --- lifemonitor/tasks/task_queue.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lifemonitor/tasks/task_queue.py b/lifemonitor/tasks/task_queue.py index d7b3b0924..cf76c29cf 100644 --- a/lifemonitor/tasks/task_queue.py +++ b/lifemonitor/tasks/task_queue.py @@ -66,7 +66,8 @@ def init_task_queue(app): logger.info("Starting job scheduler") app.scheduler = APScheduler() app.scheduler.init_app(app) - from . import tasks # noqa: F401 imported for its side effects - it defines the tasks + if app.config.get('ENV') not in ['testingSupport', 'testing']: + from . import tasks # noqa: F401 imported for its side effects - it defines the tasks app.scheduler.start() # Shut down the scheduler when exiting the app atexit.register(app.scheduler.shutdown) From 9cf48f91a5d977be9c678823bc20fd1a5e5ed9be Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Wed, 17 Nov 2021 19:59:51 +0100 Subject: [PATCH 124/162] Fix configuration of timeouts --- lifemonitor/cache.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/lifemonitor/cache.py b/lifemonitor/cache.py index ccc4796dd..991249ae6 100644 --- a/lifemonitor/cache.py +++ b/lifemonitor/cache.py @@ -29,6 +29,7 @@ import redis_lock from flask import request from flask.app import Flask +from flask.globals import current_app from flask_caching import Cache as FlaskCache from flask_caching.backends.rediscache import RedisCache @@ -42,12 +43,13 @@ def _get_timeout(name: str, default: int = 0, config=None) -> int: result = None - if config is not None: - try: + try: + config = current_app.config if config is None else config + if config is not None: result = config.get(name) - except Exception as e: - logger.debug(e) - result = result or os.environ.get(name, default) + except Exception as e: + logger.debug(e) + result = result if result is not None else os.environ.get(name, default) logger.debug("Getting timeout %r: %r", name, result) return int(result) @@ -66,11 +68,11 @@ class Timeout: BUILD = _get_timeout(_get_timeout_key('BUILD'), default=300) @classmethod - def update(cls, config): + def update(cls, config=None): for t in ('DEFAULT', 'REQUEST', 'SESSION', 'BUILD', 'WORKFLOW'): try: key = _get_timeout_key(t) - setattr(cls, key, _get_timeout(key, config=config)) + setattr(cls, t, _get_timeout(key, config=config)) except Exception: logger.debug("Error when updating timeout %r", t) From abb37f8a51d14652e11b8a1fff455a96a36d3678 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Wed, 17 Nov 2021 20:02:23 +0100 Subject: [PATCH 125/162] Update async tasks to rebuild cache --- lifemonitor/tasks/tasks.py | 50 ++++++++++++++++++-------------------- 1 file changed, 23 insertions(+), 27 deletions(-) diff --git a/lifemonitor/tasks/tasks.py b/lifemonitor/tasks/tasks.py index f3121d435..61cbd344e 100644 --- a/lifemonitor/tasks/tasks.py +++ b/lifemonitor/tasks/tasks.py @@ -55,32 +55,30 @@ def check_workflows(): logger.info("Starting 'check_workflows' task....") for w in Workflow.all(): try: - cache.ignore_cache_values = True for v in w.versions.values(): - logger.info("Updating external link: %r", v.external_link) - u = v.submitter - with current_app.test_request_context(): - try: - if u is not None: - login_user(u) - logger.info("Updating RO-Crate...") - workflows_rocrate_download(w.uuid, v.version) - logger.info("Updating RO-Crate... DONE") - except Exception as e: - logger.error(f"Error when updating the workflow {w}: {str(e)}") - if logger.isEnabledFor(logging.DEBUG): - logger.exception(e) - finally: + with v.cache.transaction(str(v)): + logger.info("Updating external link: %r", v.external_link) + u = v.submitter + with current_app.test_request_context(): try: - logout_user() + if u is not None: + login_user(u) + logger.info("Updating RO-Crate...") + workflows_rocrate_download(w.uuid, v.version) + logger.info("Updating RO-Crate... DONE") except Exception as e: - logger.debug(e) + logger.error(f"Error when updating the workflow {w}: {str(e)}") + if logger.isEnabledFor(logging.DEBUG): + logger.exception(e) + finally: + try: + logout_user() + except Exception as e: + logger.debug(e) except Exception as e: logger.error("Error when executing task 'check_workflows': %s", str(e)) if logger.isEnabledFor(logging.DEBUG): logger.exception(e) - finally: - cache.ignore_cache_values = False logger.info("Starting 'check_workflows' task.... DONE!") @@ -93,19 +91,17 @@ def check_last_build(): logger.info("Starting 'check_last build' task...") for w in Workflow.all(): try: - cache.ignore_cache_values = True for s in w.latest_version.test_suites: logger.info("Updating workflow: %r", w) for i in s.test_instances: - builds = i.get_test_builds() - logger.debug("Updating latest builds: %r", builds) - for b in builds: - logger.debug("Updating build: %r", i.get_test_build(b.id)) - logger.debug("Updating latest build: %r", i.last_test_build) + with i.cache.transaction(str(i)): + builds = i.get_test_builds() + logger.debug("Updating latest builds: %r", builds) + for b in builds: + logger.debug("Updating build: %r", i.get_test_build(b.id)) + logger.debug("Updating latest build: %r", i.last_test_build) except Exception as e: logger.error("Error when executing task 'check_last_build': %s", str(e)) if logger.isEnabledFor(logging.DEBUG): logger.exception(e) - finally: - cache.ignore_cache_values = False logger.info("Checking last build: DONE!") From 1f9620c2a02c5ce8162f4368837e2ab0db8cc709 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Wed, 17 Nov 2021 20:10:09 +0100 Subject: [PATCH 126/162] Improve performances in retrieving github builds --- lifemonitor/api/models/services/github.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/lifemonitor/api/models/services/github.py b/lifemonitor/api/models/services/github.py index ad445dc37..ab8377913 100644 --- a/lifemonitor/api/models/services/github.py +++ b/lifemonitor/api/models/services/github.py @@ -23,7 +23,7 @@ import itertools as it import logging import re -from typing import Generator, Optional, Tuple +from typing import Generator, List, Optional, Tuple from urllib.error import URLError from urllib.parse import urlparse @@ -32,7 +32,7 @@ from lifemonitor.cache import Timeout, cached import github -from github import Github, GithubException +from github import Github, GithubException, Workflow from github import \ RateLimitExceededException as GithubRateLimitExceededException @@ -143,6 +143,10 @@ def _get_gh_workflow(self, repository, workflow_id): logger.debug("Getting github workflow...") return self._gh_service.get_repo(repository).get_workflow(workflow_id) + @cached(timeout=Timeout.NONE, client_scope=False) + def _get_gh_workflow_runs(self, workflow: Workflow.Workflow) -> List: + return list(workflow.get_runs()) + def _iter_runs(self, test_instance: models.TestInstance, status: str = None) -> Generator[github.WorkflowRun.WorkflowRun]: _, repository, workflow_id = self._get_workflow_info(test_instance.resource) logger.debug("iterating over runs -- wf id: %s; repository: %s; status: %s", workflow_id, repository, status) @@ -150,7 +154,7 @@ def _iter_runs(self, test_instance: models.TestInstance, status: str = None) -> workflow = self._get_gh_workflow(repository, workflow_id) logger.debug("Retrieved workflow %s from github", workflow_id) - for run in workflow.get_runs(): + for run in self._get_gh_workflow_runs(workflow): logger.debug("Loading Github run ID %r", run.id) # The Workflow.get_runs method in the PyGithub API has a status argument # which in theory we could use to filter the runs that are retrieved to From 2bcc2b99ba94561e1c8202a91fa43e7ebfdbe64c Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Wed, 17 Nov 2021 20:11:20 +0100 Subject: [PATCH 127/162] Add unit tests --- tests/conftest.py | 1 + tests/unit/cache/test_cache.py | 112 +++++++++++++++++++++++++++++++++ tests/unit/test_utils.py | 9 ++- 3 files changed, 120 insertions(+), 2 deletions(-) diff --git a/tests/conftest.py b/tests/conftest.py index 751c7df76..208513f2a 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -96,6 +96,7 @@ def no_cache(app_context): def redis_cache(app_context): app_context.app.config['CACHE_TYPE'] = "flask_caching.backends.rediscache.RedisCache" init_cache(app_context.app) + cache.clear() return cache diff --git a/tests/unit/cache/test_cache.py b/tests/unit/cache/test_cache.py index b446eae70..5ba41f525 100644 --- a/tests/unit/cache/test_cache.py +++ b/tests/unit/cache/test_cache.py @@ -19,17 +19,20 @@ # SOFTWARE. import logging +from time import sleep from unittest.mock import MagicMock import lifemonitor.api.models as models from lifemonitor.cache import cache, make_cache_key from tests import utils +from tests.unit.test_utils import PickableMock logger = logging.getLogger(__name__) def test_cache_last_build(app_client, redis_cache, user1): valid_workflow = 'sort-and-change-case' + cache.clear() assert cache.size() == 0, "Cache should be empty" _, workflow = utils.pick_and_register_workflow(user1, valid_workflow) assert workflow, "Workflow should be set" @@ -56,6 +59,7 @@ def test_cache_last_build(app_client, redis_cache, user1): def test_cache_test_builds(app_client, redis_cache, user1): valid_workflow = 'sort-and-change-case' + cache.clear() assert cache.size() == 0, "Cache should be empty" _, workflow = utils.pick_and_register_workflow(user1, valid_workflow) assert workflow, "Workflow should be set" @@ -83,3 +87,111 @@ def test_cache_test_builds(app_client, redis_cache, user1): limit = 20 cache_key = make_cache_key(instance.get_test_builds, client_scope=False, args=(instance,), kwargs={"limit": limit}) assert instance.cache.get(cache_key) is None, "Cache should be empty" + + +def test_cache_last_build_update(app_client, redis_cache, user1): + valid_workflow = 'sort-and-change-case' + logger.debug("Cache content: %r", cache.keys) + cache.clear() + assert cache.size() == 0, "Cache should be empty" + _, w = utils.pick_and_register_workflow(user1, valid_workflow) + assert w, "Workflow should be set" + + try: + for s in w.test_suites: + logger.info("Updating workflow: %r", w) + for i in s.test_instances: + builds_data = i.testing_service.get_test_builds(i) + i.testing_service.get_test_builds = PickableMock() + i.testing_service.get_test_builds.return_value = builds_data + with i.cache.transaction(str(i)) as t: + assert i.cache.get_current_transaction() == t, "Unexpected transaction" + + cache_key = make_cache_key(i.get_test_builds, client_scope=False, args=[i]) + logger.debug("The cache key: %r", cache_key) + assert not cache.has(cache_key), "The key should not be in cache" + + logger.debug("\n\nGetting latest builds (first call)...") + builds = i.get_test_builds() + logger.debug("Getting latest builds (first call): %r\n", builds) + i.testing_service.get_test_builds.assert_called_once(), "i.testing_service.get_test_builds should be called once" + assert cache.has(cache_key), "The key should be in cache" + cache_size = cache.size() + logger.debug("Current cache size: %r", cache_size) + assert i.cache.get_current_transaction() == t, "Unexpected transaction" + + logger.debug("\n\nGetting latest builds (second call)...") + builds = i.get_test_builds() + logger.debug("Getting latest builds (second call): %r\n", builds) + i.testing_service.get_test_builds.assert_called_once(), "i.testing_service.get_test_builds should be called once" + assert cache.has(cache_key), "The key should be in cache" + assert cache.size() == cache_size, "Unexpected cache size" + assert i.cache.get_current_transaction() == t, "Unexpected transaction" + + logger.debug("\n\nGetting latest builds (third call)...") + builds = i.get_test_builds() + logger.debug("Getting latest builds (third call): %r\n", builds) + i.testing_service.get_test_builds.assert_called_once(), "i.testing_service.get_test_builds should be called once" + assert cache.has(cache_key), "The key should be in cache" + assert cache.size() == cache_size, "Unexpected cache size" + assert i.cache.get_current_transaction() == t, "Unexpected transaction" + + b_data = [] + for b in builds: + b_data.append(i.testing_service.get_test_build(i, b.id)) + + i.testing_service.get_test_build = PickableMock() + for count in range(0, len(b_data)): + b = b_data[count] + i.testing_service.get_test_build.return_value = b + + cache_key = make_cache_key(i.get_test_build, client_scope=False, args=[i, b.id]) + + logger.debug("\n\nChecking build (first call): %r", i.get_test_build(b.id)) + i.testing_service.get_test_build.call_count == count + 1, "i.testing_service.get_test_build should be called once" + assert cache.has(cache_key), f"The key {cache_key} should be in cache" + cache_size = cache.size() + logger.debug("Current cache size: %r", cache_size) + + logger.debug("\n\nChecking build (second call): %r", i.get_test_build(b.id)) + i.testing_service.get_test_build.call_count == count + 1, "i.testing_service.get_test_build should be called once" + assert cache.has(cache_key), f"The key {cache_key} should be in cache" + assert cache.size() == cache_size, "Unexpected cache size" + + logger.debug("\n\nChecking build (third call): %r", i.get_test_build(b.id)) + i.testing_service.get_test_build.call_count == count + 1, "i.testing_service.get_test_build should be called once" + assert cache.has(cache_key), f"The key {cache_key} should be in cache" + assert cache.size() == cache_size, "Unexpected cache size" + + logger.debug("\n\nGetting latest build: %r", i.last_test_build) + i.testing_service.get_test_builds.assert_called_once(), "i.testing_service.get_test_builds should be called once" + logger.debug("\n\nGetting latest build... DONE\n\n") + + logger.debug("\n\nGetting latest build: %r", i.last_test_build) + + except Exception as e: + logger.error("Error when executing task 'check_last_build': %s", str(e)) + if logger.isEnabledFor(logging.DEBUG): + logger.exception(e) + + sleep(2) + assert cache.size() > 0, "Cache should not be empty" + logger.debug(cache.keys()) + assert len(cache.backend.keys("lock*")) == 0, "No lock should be set" + + +def test_cache_task_last_build(app_client, redis_cache, user1): + valid_workflow = 'sort-and-change-case' + logger.debug("Cache content: %r", cache.keys) + cache.clear() + assert cache.size() == 0, "Cache should be empty" + _, workflow = utils.pick_and_register_workflow(user1, valid_workflow) + assert workflow, "Workflow should be set" + + from lifemonitor.tasks.tasks import check_last_build + check_last_build() + + sleep(2) + assert cache.size() > 0, "Cache should not be empty" + logger.debug(cache.keys()) + assert len(cache.backend.keys("lock*")) == 0, "No lock should be set" diff --git a/tests/unit/test_utils.py b/tests/unit/test_utils.py index 695f050d2..64757f5dc 100644 --- a/tests/unit/test_utils.py +++ b/tests/unit/test_utils.py @@ -20,11 +20,11 @@ import os import tempfile - -import pytest +from unittest.mock import MagicMock, Mock import lifemonitor.exceptions as lm_exceptions import lifemonitor.utils as utils +import pytest def test_download_url_404(): @@ -32,3 +32,8 @@ def test_download_url_404(): with pytest.raises(lm_exceptions.DownloadException) as excinfo: _ = utils.download_url('http://httpbin.org/status/404', os.path.join(d, 'get_404')) assert excinfo.value.status == 404 + + +class PickableMock(MagicMock): + def __reduce__(self): + return (Mock, ()) From 7d60c8845c30eef8fb997726a6ffc5a8078010ad Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Wed, 17 Nov 2021 20:23:30 +0100 Subject: [PATCH 128/162] Fix flake8 issues --- lifemonitor/cache.py | 7 +++---- lifemonitor/tasks/tasks.py | 2 -- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/lifemonitor/cache.py b/lifemonitor/cache.py index 991249ae6..7c3baba65 100644 --- a/lifemonitor/cache.py +++ b/lifemonitor/cache.py @@ -109,9 +109,9 @@ def close(self): try: for k in list(self.__locks__.keys()): logger.debug("Releasing lock %r", k) - l = self.__locks__.pop(k) + lk = self.__locks__.pop(k) try: - l.release() + lk.release() except redis_lock.NotAcquired as e: logger.debug(e) logger.debug("All lock released") @@ -255,8 +255,7 @@ def get(self, key: str): if isinstance(self.cache, RedisCache) \ and self.cache_enabled \ and not self.ignore_cache_values \ - and (self.get_current_transaction() is None - or self.get_current_transaction().has_lock(key)) \ + and (self.get_current_transaction() is None or self.get_current_transaction().has_lock(key)) \ else None def delete_keys(self, pattern: str, prefix: str = CACHE_PREFIX): diff --git a/lifemonitor/tasks/tasks.py b/lifemonitor/tasks/tasks.py index 61cbd344e..a054e7fa5 100644 --- a/lifemonitor/tasks/tasks.py +++ b/lifemonitor/tasks/tasks.py @@ -50,7 +50,6 @@ def check_workflows(): from lifemonitor.api.controllers import workflows_rocrate_download from lifemonitor.api.models import Workflow from lifemonitor.auth.services import login_user, logout_user - from lifemonitor.cache import cache logger.info("Starting 'check_workflows' task....") for w in Workflow.all(): @@ -86,7 +85,6 @@ def check_workflows(): @dramatiq.actor def check_last_build(): from lifemonitor.api.models import Workflow - from lifemonitor.cache import cache logger.info("Starting 'check_last build' task...") for w in Workflow.all(): From 79eb225334296a1f274b2718ccd14c9c6f9eb816 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Wed, 17 Nov 2021 20:48:09 +0100 Subject: [PATCH 129/162] Fix missing requirement --- requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements.txt b/requirements.txt index 1b8c6b603..958f0d202 100644 --- a/requirements.txt +++ b/requirements.txt @@ -29,6 +29,7 @@ python-jenkins==1.7.0 python-redis-lock~=3.7.0 PyGithub~=1.55 PyYAML~=5.4.1 +pika~=1.2.0 redis~=3.5.3 requests~=2.26.0 rocrate~=0.4.0 From c23fa2baad01750ad4bb2bfa9540e26f8fd85a62 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Thu, 18 Nov 2021 13:13:54 +0100 Subject: [PATCH 130/162] Use Redis pipelines to update cache through CacheTransaction --- lifemonitor/cache.py | 85 ++++++++++++++++++++++++++++++++++---------- 1 file changed, 67 insertions(+), 18 deletions(-) diff --git a/lifemonitor/cache.py b/lifemonitor/cache.py index 7c3baba65..1a6559a03 100644 --- a/lifemonitor/cache.py +++ b/lifemonitor/cache.py @@ -85,39 +85,77 @@ class CacheTransaction(object): def __init__(self, cache: Cache): self.__cache__ = cache self.__locks__ = {} + self.__data__ = {} self.__closed__ = False - def get_lock(blocking: bool = True, timeout: int = Timeout.REQUEST): - pass + def make_key(self, key: str, prefix: str = CACHE_PREFIX) -> str: + return self.__cache__._make_key(key, prefix=prefix) - def has_lock(self, lock: str) -> bool: - return lock in self.__locks__ + def set(self, key: str, value, timeout: int = Timeout.REQUEST, prefix: str = CACHE_PREFIX): + self.__data__[self.make_key(key, prefix=prefix)] = (value, timeout) + + def get(self, key: str, prefix: str = CACHE_PREFIX): + data = self.__data__.get(self.make_key(key, prefix=prefix), None) + return data[0] if data is not None else None + + def keys(self): + return list(self.__data__.keys()) + + def has(self, key: str) -> bool: + if key is None: + return False + return self.make_key(key) in self.keys() + + def has_lock(self, key: str) -> bool: + return key in self.__locks__ + + def size(self) -> int: + return len(self.__data__.keys()) def __enter__(self): + self.start() return self def __exit__(self, type, value, traceback): - print("Exception has been handled") self.close() - return True + + def start(self): + logger.debug("Starting transaction...") + self.__data__.clear() + self.__locks__.clear() + self.__closed__ = False def close(self): if self.__closed__: logger.debug("Transaction already closed") else: - logger.debug("Closing transaction") + logger.debug("Stopping transaction...") try: + logger.debug("Finalizing transaction...") + pipeline = self.__cache__.backend.pipeline() + for k, data in self.__data__.items(): + logger.debug(f"Setting key {k} on transaction pipeline (timeout: {data[1]}") + pipeline.set(k, pickle.dumps(data[0]), ex=data[1] if data[1] > 0 else None) + pipeline.execute() + logger.debug("Transaction finalized!") for k in list(self.__locks__.keys()): - logger.debug("Releasing lock %r", k) lk = self.__locks__.pop(k) try: - lk.release() + if lk: + logger.debug("Releasing lock %r", k) + lk.release() + else: + logger.debug("No lock for key %r", k) except redis_lock.NotAcquired as e: logger.debug(e) logger.debug("All lock released") logger.debug("Transaction closed") + except Exception as e: + logger.exception(e) finally: self.__closed__ = True + self.__cache__._set_current_transaction(None) + logger.debug("Transaction finished") class Cache(object): @@ -231,9 +269,15 @@ def lock(self, key: str, blocking: bool = True, except redis_lock.NotAcquired as e: logger.debug(e) - def set(self, key: str, value, timeout: int = Timeout.NONE): + def set(self, key: str, value, timeout: int = Timeout.NONE, prefix: str = CACHE_PREFIX): if key is not None and self.cache_enabled: - logger.debug("Setting cache value for key %r.... (timeout: %r)", key, timeout) + transaction = self.get_current_transaction() + if transaction is not None: + logger.debug("Setting transactional cache value for key %r.... (timeout: %r)", key, timeout) + transaction.set(key, value, timeout=timeout) + else: + key = self._make_key(key, prefix=prefix) + logger.debug("Setting cache value for key %r.... (timeout: %r)", key, timeout) self.cache.set(key, value, timeout=timeout) def has(self, key: str) -> bool: @@ -248,15 +292,20 @@ def _get_status(self) -> dict: "transaction locks": self.get_current_transaction().__locks__ if self.get_current_transaction() else None } - def get(self, key: str): + def get(self, key: str, prefix: str = CACHE_PREFIX): logger.debug("Getting value from cache...") logger.debug("Cache status: %r", self._get_status()) - return self.cache.get(key) \ - if isinstance(self.cache, RedisCache) \ - and self.cache_enabled \ - and not self.ignore_cache_values \ - and (self.get_current_transaction() is None or self.get_current_transaction().has_lock(key)) \ - else None + if not self.cache_enabled or self.ignore_cache_values: + return None + # get value from transaction + transaction = self.get_current_transaction() + logger.debug("Transaction is: %r", transaction) + if transaction is not None: + logger.debug("Getting transactional cache value for key %r....", key) + return transaction.get(key) + # get value from cache + data = self.backend.get(self._make_key(key, prefix=prefix)) + logger.debug("Current cache data: %r", data is not None) def delete_keys(self, pattern: str, prefix: str = CACHE_PREFIX): logger.debug(f"Deleting keys by pattern: {pattern}") From c9de46e2a92484533d7b48c152dd6c444036dfbe Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Thu, 18 Nov 2021 14:39:04 +0100 Subject: [PATCH 131/162] Remove FlaskCaching dependency --- lifemonitor/cache.py | 92 +++++++++++++++++++++++--------------------- requirements.txt | 1 - 2 files changed, 49 insertions(+), 44 deletions(-) diff --git a/lifemonitor/cache.py b/lifemonitor/cache.py index 1a6559a03..66b6f70bc 100644 --- a/lifemonitor/cache.py +++ b/lifemonitor/cache.py @@ -23,6 +23,7 @@ import functools import logging import os +import pickle from contextlib import contextmanager import redis @@ -30,8 +31,6 @@ from flask import request from flask.app import Flask from flask.globals import current_app -from flask_caching import Cache as FlaskCache -from flask_caching.backends.rediscache import RedisCache # Set prefix CACHE_PREFIX = "lifemonitor-api-cache:" @@ -164,25 +163,38 @@ class Cache(object): cache_enabled = True # Ignore cache values even if cache is enabled _ignore_cache_values = False - # Reference to the Flask cache manager + # Reference to Redis back-end __cache__ = None @classmethod - def __get_flask_cache(cls) -> FlaskCache: + def init_backend(cls, config): if cls.__cache__ is None: - cls.__cache__ = FlaskCache() + if config.get("CACHE_TYPE", None) == "flask_caching.backends.rediscache.RedisCache": + cls.__cache__ = redis.Redis.from_url(config.get("CACHE_REDIS_URL")) + else: + cls.cache_enabled = False + return cls.__cache__ + + @classmethod + def get_backend(cls) -> redis.Redis: + if cls.__cache__ is None: + raise IllegalStateException("Back-end not initialized!") return cls.__cache__ @classmethod def init_app(cls, app: Flask): - cls.__get_flask_cache().init_app(app) - cls.reset_locks() + cls.init_backend(app.config) + if cls.__cache__ is not None: + cls.reset_locks() - def __init__(self, cache: FlaskCache = None, parent: Cache = None) -> None: - self._cache = cache or self.__get_flask_cache() + def __init__(self, parent: Cache = None) -> None: self._current_transaction = None self._parent = parent + @staticmethod + def _make_key(key: str, prefix: str = CACHE_PREFIX) -> str: + return f"{prefix}{key}" + @property def parent(self) -> Cache: return self._parent @@ -196,19 +208,26 @@ def ignore_cache_values(self): def ignore_cache_values(self, value: bool): self._ignore_cache_values = True if value is True else False - @property - def cache(self) -> RedisCache: - return self.get_redis_cache(self._cache) + def _set_current_transaction(self, t: CacheTransaction): + # TODO: replace with a thread local attribute + if self.parent is not None: + self.parent._set_current_transaction(t) + else: + self._current_transaction = t + + def get_current_transaction(self) -> CacheTransaction: + # TODO: replace with a thread local attribute + if self.parent is not None: + return self.parent.get_current_transaction() + return self._current_transaction @contextmanager def transaction(self, name) -> CacheTransaction: + # TODO: replace with a thread local attribute if self._current_transaction is not None: raise IllegalStateException("Transaction already started") t = CacheTransaction(self) - if self.parent is not None: - self.parent._current_transaction = t - else: - self._current_transaction = t + self._set_current_transaction(t) try: yield t except Exception as e: @@ -219,19 +238,11 @@ def transaction(self, name) -> CacheTransaction: t.close() except Exception as fe: logger.debug(fe) - if self.parent is not None: - self.parent._current_transaction = None - else: - self._current_transaction = None + self._set_current_transaction(None) @property def backend(self) -> redis.Redis: - return self.get_backend(self.cache) - - def get_current_transaction(self): - if self._current_transaction is None and self.parent is not None: - return self.parent.get_current_transaction() - return self._current_transaction + return self.get_backend() def keys(self, pattern: str = None): query = f"{CACHE_PREFIX}" @@ -239,6 +250,10 @@ def keys(self, pattern: str = None): query = f"{query}{pattern}" else: query = f"{query}*" + logger.debug("Keys pattern: %r", query) + transaction = self.get_current_transaction() + if transaction is not None: + pass return self.backend.keys(query) def size(self, pattern=None): @@ -254,7 +269,7 @@ def lock(self, key: str, blocking: bool = True, logger.debug("Getting lock for key %r...", key) lock = redis_lock.Lock(self.backend, key, expire=expire, auto_renewal=auto_renewal) try: - yield lock.acquire(blocking=blocking) + yield lock.acquire(blocking=blocking, timeout=timeout if timeout > 0 else None) finally: try: logger.debug("Auto release of lock for key=%s", key) @@ -278,10 +293,10 @@ def set(self, key: str, value, timeout: int = Timeout.NONE, prefix: str = CACHE_ else: key = self._make_key(key, prefix=prefix) logger.debug("Setting cache value for key %r.... (timeout: %r)", key, timeout) - self.cache.set(key, value, timeout=timeout) + self.backend.set(key, pickle.dumps(value), ex=timeout if timeout > 0 else None) - def has(self, key: str) -> bool: - return self.get(key) is not None + def has(self, key: str, prefix: str = CACHE_PREFIX) -> bool: + return self.get(key, prefix=prefix) is not None def _get_status(self) -> dict: return { @@ -306,13 +321,14 @@ def get(self, key: str, prefix: str = CACHE_PREFIX): # get value from cache data = self.backend.get(self._make_key(key, prefix=prefix)) logger.debug("Current cache data: %r", data is not None) + return pickle.loads(data) if data is not None else data def delete_keys(self, pattern: str, prefix: str = CACHE_PREFIX): logger.debug(f"Deleting keys by pattern: {pattern}") - if isinstance(self.cache, RedisCache): + if self.cache_enabled: logger.debug("Redis backend detected!") logger.debug(f"Pattern: {prefix}{pattern}") - for key in self.backend.scan_iter(f"{prefix}{pattern}"): + for key in self.backend.scan_iter(self._make_key(pattern, prefix=prefix)): logger.debug("Delete key: %r", key) self.backend.delete(key) @@ -325,16 +341,6 @@ def clear(self): def reset_locks(cls): redis_lock.reset_all(cls.get_backend()) - @classmethod - def get_redis_cache(cls, cache: FlaskCache = None) -> RedisCache: - rc = cache or cls.__get_flask_cache() - return rc.cache if isinstance(rc, FlaskCache) else None - - @classmethod - def get_backend(cls, cache: RedisCache = None) -> redis.Redis: - rc = cache or cls.get_redis_cache() - return rc._read_clients if isinstance(rc, RedisCache) else None - # global cache instance cache: Cache = Cache() @@ -431,7 +437,7 @@ def wrapper(*args, **kwargs): if result is None: logger.debug(f"Value {key} not set in cache...") if hc.backend: - with hc.lock(key, blocking=True): + with hc.lock(key, blocking=True, timeout=Timeout.NONE): result = hc.get(key) if not result: logger.debug("Cache empty: getting value from the actual function...") diff --git a/requirements.txt b/requirements.txt index 958f0d202..edcd4c8ad 100644 --- a/requirements.txt +++ b/requirements.txt @@ -13,7 +13,6 @@ flask-wtf~=0.15.1 Flask-APScheduler==1.12.2 Flask-SQLAlchemy==2.5.1 Flask-Migrate==3.1.0 -Flask-Caching==1.10.1 Flask>=1.1.4,<2.0.0 gunicorn~=20.1.0 jwt==1.2.0 From 625cc7118e726a39be029948fc73191587d37d36 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Thu, 18 Nov 2021 14:44:17 +0100 Subject: [PATCH 132/162] Add unit tests --- tests/unit/cache/test_cache.py | 90 +++++++++++++++++++++++++++++++--- tests/unit/test_utils.py | 2 +- 2 files changed, 84 insertions(+), 8 deletions(-) diff --git a/tests/unit/cache/test_cache.py b/tests/unit/cache/test_cache.py index 5ba41f525..1926902f1 100644 --- a/tests/unit/cache/test_cache.py +++ b/tests/unit/cache/test_cache.py @@ -23,13 +23,66 @@ from unittest.mock import MagicMock import lifemonitor.api.models as models -from lifemonitor.cache import cache, make_cache_key +import pytest +from lifemonitor.cache import (IllegalStateException, cache, init_cache, + make_cache_key) from tests import utils -from tests.unit.test_utils import PickableMock +from tests.unit.test_utils import SerializableMock logger = logging.getLogger(__name__) +@pytest.mark.parametrize("app_settings", [(False, {"CACHE_TYPE": "Flask_caching.backends.simplecache.SimpleCache"})], indirect=True) +def test_cache_config(app_settings, app_context): + + logger.debug("App settings: %r", app_settings) + + app = app_context.app + logger.debug("App: %r", app) + + config = app.config + logger.debug("Config: %r", config) + + #config.setdefault("CACHE_TYPE", "Flask_caching.backends.simplecache.SimpleCache") + assert config.get("CACHE_TYPE") == "Flask_caching.backends.simplecache.SimpleCache", "Unexpected cache type on app config" + init_cache(app) + assert cache.cache_enabled is False, "Cache should be disabled" + with pytest.raises(IllegalStateException): + cache.backend + + +def test_cache_transaction_setup(): + + cache.clear() + key = "test" + value = "test" + assert cache.size() == 0, "Cache should be empty" + with cache.transaction("test") as t: + assert t.size() == 0, "Unexpected transaction size: it should be empty" + t.set(key, value) + assert t.size() == 1, "Unexpected transaction size: it should be equal to 1" + assert t.has(key), f"key '{key}' should be set in the current transaction" + assert cache.size() == 0, "Cache should be empty" + + assert cache.size() == 1, "Cache should contain one element" + assert cache.has(key), f"key '{key}' should be in cache" + assert cache.get_current_transaction() is None, "Unexpected transaction" + + +def test_cache_timeout(): + cache.clear() + assert cache.size() == 0, "Cache should be empty" + key = "test5" + value = 1024 + timeout = 5 + cache.set(key, value, timeout=timeout) + assert cache.size() == 1, "Cache should not be empty" + assert cache.has(key) is True, f"Key {key} should be in cache" + sleep(5) + assert cache.size() == 0, "Cache should be empty" + assert cache.has(key) is False, f"Key {key} should not be in cache after {timeout} secs" + + def test_cache_last_build(app_client, redis_cache, user1): valid_workflow = 'sort-and-change-case' cache.clear() @@ -102,8 +155,9 @@ def test_cache_last_build_update(app_client, redis_cache, user1): logger.info("Updating workflow: %r", w) for i in s.test_instances: builds_data = i.testing_service.get_test_builds(i) - i.testing_service.get_test_builds = PickableMock() + i.testing_service.get_test_builds = SerializableMock() i.testing_service.get_test_builds.return_value = builds_data + transaction_keys = None with i.cache.transaction(str(i)) as t: assert i.cache.get_current_transaction() == t, "Unexpected transaction" @@ -115,6 +169,7 @@ def test_cache_last_build_update(app_client, redis_cache, user1): builds = i.get_test_builds() logger.debug("Getting latest builds (first call): %r\n", builds) i.testing_service.get_test_builds.assert_called_once(), "i.testing_service.get_test_builds should be called once" + logger.debug(f"Checking if key {cache_key} is in cache...") assert cache.has(cache_key), "The key should be in cache" cache_size = cache.size() logger.debug("Current cache size: %r", cache_size) @@ -124,6 +179,7 @@ def test_cache_last_build_update(app_client, redis_cache, user1): builds = i.get_test_builds() logger.debug("Getting latest builds (second call): %r\n", builds) i.testing_service.get_test_builds.assert_called_once(), "i.testing_service.get_test_builds should be called once" + logger.debug(f"Checking if key {cache_key} is in cache...") assert cache.has(cache_key), "The key should be in cache" assert cache.size() == cache_size, "Unexpected cache size" assert i.cache.get_current_transaction() == t, "Unexpected transaction" @@ -132,34 +188,44 @@ def test_cache_last_build_update(app_client, redis_cache, user1): builds = i.get_test_builds() logger.debug("Getting latest builds (third call): %r\n", builds) i.testing_service.get_test_builds.assert_called_once(), "i.testing_service.get_test_builds should be called once" + logger.debug(f"Checking if key {cache_key} is in cache...") assert cache.has(cache_key), "The key should be in cache" assert cache.size() == cache_size, "Unexpected cache size" assert i.cache.get_current_transaction() == t, "Unexpected transaction" + logger.debug("\n\Preparing data to test builds...") b_data = [] for b in builds: b_data.append(i.testing_service.get_test_build(i, b.id)) + logger.debug("\n\Preparing data to test builds... DONE") - i.testing_service.get_test_build = PickableMock() + logger.debug("\n\nChecking test builds...") + i.testing_service.get_test_build = SerializableMock() for count in range(0, len(b_data)): b = b_data[count] i.testing_service.get_test_build.return_value = b cache_key = make_cache_key(i.get_test_build, client_scope=False, args=[i, b.id]) - logger.debug("\n\nChecking build (first call): %r", i.get_test_build(b.id)) + logger.debug("\n\nChecking build (first call): buildID=%r", b.id) + logger.debug("Build data: %r", i.get_test_build(b.id)) i.testing_service.get_test_build.call_count == count + 1, "i.testing_service.get_test_build should be called once" + logger.debug(f"Checking if key {cache_key} is in cache...") assert cache.has(cache_key), f"The key {cache_key} should be in cache" cache_size = cache.size() logger.debug("Current cache size: %r", cache_size) - logger.debug("\n\nChecking build (second call): %r", i.get_test_build(b.id)) + logger.debug("\n\nChecking build (second call): buildID=%r", b.id) + logger.debug("Build data: %r", i.get_test_build(b.id)) i.testing_service.get_test_build.call_count == count + 1, "i.testing_service.get_test_build should be called once" + logger.debug(f"Checking if key {cache_key} is in cache...") assert cache.has(cache_key), f"The key {cache_key} should be in cache" assert cache.size() == cache_size, "Unexpected cache size" - logger.debug("\n\nChecking build (third call): %r", i.get_test_build(b.id)) + logger.debug("\n\nChecking build (third call): buildID=%r", b.id) + logger.debug("Build data: %r", i.get_test_build(b.id)) i.testing_service.get_test_build.call_count == count + 1, "i.testing_service.get_test_build should be called once" + logger.debug(f"Checking if key {cache_key} is in cache...") assert cache.has(cache_key), f"The key {cache_key} should be in cache" assert cache.size() == cache_size, "Unexpected cache size" @@ -167,7 +233,17 @@ def test_cache_last_build_update(app_client, redis_cache, user1): i.testing_service.get_test_builds.assert_called_once(), "i.testing_service.get_test_builds should be called once" logger.debug("\n\nGetting latest build... DONE\n\n") + transaction_keys = t.keys() + logger.debug("Transaction keys (# %r): %r", len(transaction_keys), transaction_keys) + assert len(transaction_keys) == t.size(), "Unexpected transaction size" + + # check the cache after the transaction is completed + cache_size = cache.size() + assert len(transaction_keys) == cache_size, "Unpexpected cache size: it should be equal to the transaction size" + + # check latest build logger.debug("\n\nGetting latest build: %r", i.last_test_build) + assert cache.size() == cache_size, "Unexpected cache size" except Exception as e: logger.error("Error when executing task 'check_last_build': %s", str(e)) diff --git a/tests/unit/test_utils.py b/tests/unit/test_utils.py index 64757f5dc..ad2b27349 100644 --- a/tests/unit/test_utils.py +++ b/tests/unit/test_utils.py @@ -34,6 +34,6 @@ def test_download_url_404(): assert excinfo.value.status == 404 -class PickableMock(MagicMock): +class SerializableMock(MagicMock): def __reduce__(self): return (Mock, ()) From f64b20e8d37facc055c98c0c497f84b505671c4c Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Thu, 18 Nov 2021 15:49:04 +0100 Subject: [PATCH 133/162] Fix cache initialisation --- lifemonitor/cache.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/lifemonitor/cache.py b/lifemonitor/cache.py index 66b6f70bc..8a9c59ac2 100644 --- a/lifemonitor/cache.py +++ b/lifemonitor/cache.py @@ -168,11 +168,11 @@ class Cache(object): @classmethod def init_backend(cls, config): - if cls.__cache__ is None: - if config.get("CACHE_TYPE", None) == "flask_caching.backends.rediscache.RedisCache": - cls.__cache__ = redis.Redis.from_url(config.get("CACHE_REDIS_URL")) - else: - cls.cache_enabled = False + if config.get("CACHE_TYPE", None) == "flask_caching.backends.rediscache.RedisCache": + cls.__cache__ = redis.Redis.from_url(config.get("CACHE_REDIS_URL")) + else: + cls.__cache__ = None + cls.cache_enabled = False return cls.__cache__ @classmethod From 2d406d518f18feca08b092acee9002f110b1b539 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Thu, 18 Nov 2021 15:52:25 +0100 Subject: [PATCH 134/162] Fix flake8 issues --- tests/unit/cache/test_cache.py | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/tests/unit/cache/test_cache.py b/tests/unit/cache/test_cache.py index 1926902f1..293c306f2 100644 --- a/tests/unit/cache/test_cache.py +++ b/tests/unit/cache/test_cache.py @@ -32,18 +32,13 @@ logger = logging.getLogger(__name__) -@pytest.mark.parametrize("app_settings", [(False, {"CACHE_TYPE": "Flask_caching.backends.simplecache.SimpleCache"})], indirect=True) +@pytest.mark.parametrize("app_settings", [(False, {"CACHE_TYPE": "Flask_caching.backends.simplecache.SimpleCache"})], indirect=True) def test_cache_config(app_settings, app_context): - logger.debug("App settings: %r", app_settings) - app = app_context.app logger.debug("App: %r", app) - config = app.config logger.debug("Config: %r", config) - - #config.setdefault("CACHE_TYPE", "Flask_caching.backends.simplecache.SimpleCache") assert config.get("CACHE_TYPE") == "Flask_caching.backends.simplecache.SimpleCache", "Unexpected cache type on app config" init_cache(app) assert cache.cache_enabled is False, "Cache should be disabled" @@ -193,11 +188,11 @@ def test_cache_last_build_update(app_client, redis_cache, user1): assert cache.size() == cache_size, "Unexpected cache size" assert i.cache.get_current_transaction() == t, "Unexpected transaction" - logger.debug("\n\Preparing data to test builds...") + logger.debug("\n\nPreparing data to test builds...") b_data = [] for b in builds: b_data.append(i.testing_service.get_test_build(i, b.id)) - logger.debug("\n\Preparing data to test builds... DONE") + logger.debug("\n\nPreparing data to test builds... DONE") logger.debug("\n\nChecking test builds...") i.testing_service.get_test_build = SerializableMock() From 3f316122c14f71d5032da0db49cc1d518f0a11b7 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Thu, 18 Nov 2021 16:25:51 +0100 Subject: [PATCH 135/162] Fix cache reinitialisation --- lifemonitor/cache.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lifemonitor/cache.py b/lifemonitor/cache.py index 8a9c59ac2..b3fd6d215 100644 --- a/lifemonitor/cache.py +++ b/lifemonitor/cache.py @@ -168,9 +168,14 @@ class Cache(object): @classmethod def init_backend(cls, config): + logger.debug("Initialising cache back-end...") + logger.debug("Cache type detected: %r", config.get("CACHE_TYPE", None)) if config.get("CACHE_TYPE", None) == "flask_caching.backends.rediscache.RedisCache": + logger.debug("Configuring Redis back-end...") cls.__cache__ = redis.Redis.from_url(config.get("CACHE_REDIS_URL")) + cls.cache_enabled = True else: + logger.debug("No cache") cls.__cache__ = None cls.cache_enabled = False return cls.__cache__ From 2f097a400347e942d49adafeb2e5f9bb4c9050ed Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Thu, 18 Nov 2021 16:28:04 +0100 Subject: [PATCH 136/162] Fix test configuration --- tests/conftest.py | 19 ++++++++++++++----- tests/unit/cache/test_cache.py | 13 ++++++------- 2 files changed, 20 insertions(+), 12 deletions(-) diff --git a/tests/conftest.py b/tests/conftest.py index 208513f2a..ad6290a8e 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -77,7 +77,9 @@ def lm() -> LifeMonitor: @pytest.fixture def service_registry() -> ClassManager: - return TestingService.service_type_registry + registry = TestingService.service_type_registry + registry._load_concrete_types() + return registry @pytest.fixture @@ -88,14 +90,18 @@ def token_manager() -> TestingServiceTokenManager: @pytest.fixture def no_cache(app_context): app_context.app.config['CACHE_TYPE'] = "flask_caching.backends.nullcache.NullCache" - init_cache(app_context.app) + assert app_context.app.config.get('CACHE_TYPE') == "flask_caching.backends.nullcache.NullCache" + cache.init_app(app_context.app) + assert cache.cache_enabled is False, "Cache should be disabled" return cache @pytest.fixture def redis_cache(app_context): app_context.app.config['CACHE_TYPE'] = "flask_caching.backends.rediscache.RedisCache" - init_cache(app_context.app) + assert app_context.app.config.get('CACHE_TYPE') == "flask_caching.backends.rediscache.RedisCache" + cache.init_app(app_context.app) + assert cache.cache_enabled is True, "Cache should not be disabled" cache.clear() return cache @@ -113,10 +119,12 @@ def initialize(app_settings, request_context, service_registry: ClassManager): os.environ.pop("FLASK_APP_CONFIG_FILE", None) -def _get_app_settings(include_env=True): +def _get_app_settings(include_env=True, extra=None): settings = env_settings.copy() if include_env else {} settings.update(helpers.load_settings(app_settings_path)) settings.update(helpers.load_settings(tests_settings_path)) + if extra: + settings.update(extra) # remove API KEYS api_keys = {} pattern = re.compile("((\\w+)_API_KEY(_\\w+)?)") @@ -133,7 +141,8 @@ def _get_app_settings(include_env=True): @pytest.fixture(scope="session") def app_settings(request): if hasattr(request, 'param'): - return _get_app_settings(request.param) + logger.debug("App settings param: %r", request.param) + return _get_app_settings(*request.param) return _get_app_settings() diff --git a/tests/unit/cache/test_cache.py b/tests/unit/cache/test_cache.py index 293c306f2..0c362a53d 100644 --- a/tests/unit/cache/test_cache.py +++ b/tests/unit/cache/test_cache.py @@ -46,8 +46,7 @@ def test_cache_config(app_settings, app_context): cache.backend -def test_cache_transaction_setup(): - +def test_cache_transaction_setup(app_context, redis_cache): cache.clear() key = "test" value = "test" @@ -64,7 +63,7 @@ def test_cache_transaction_setup(): assert cache.get_current_transaction() is None, "Unexpected transaction" -def test_cache_timeout(): +def test_cache_timeout(app_context, redis_cache): cache.clear() assert cache.size() == 0, "Cache should be empty" key = "test5" @@ -78,7 +77,7 @@ def test_cache_timeout(): assert cache.has(key) is False, f"Key {key} should not be in cache after {timeout} secs" -def test_cache_last_build(app_client, redis_cache, user1): +def test_cache_last_build(app_context, redis_cache, user1): valid_workflow = 'sort-and-change-case' cache.clear() assert cache.size() == 0, "Cache should be empty" @@ -105,7 +104,7 @@ def test_cache_last_build(app_client, redis_cache, user1): assert build == cached_build, "Build should be equal to the cached build" -def test_cache_test_builds(app_client, redis_cache, user1): +def test_cache_test_builds(app_context, redis_cache, user1): valid_workflow = 'sort-and-change-case' cache.clear() assert cache.size() == 0, "Cache should be empty" @@ -137,7 +136,7 @@ def test_cache_test_builds(app_client, redis_cache, user1): assert instance.cache.get(cache_key) is None, "Cache should be empty" -def test_cache_last_build_update(app_client, redis_cache, user1): +def test_cache_last_build_update(app_context, redis_cache, user1): valid_workflow = 'sort-and-change-case' logger.debug("Cache content: %r", cache.keys) cache.clear() @@ -251,7 +250,7 @@ def test_cache_last_build_update(app_client, redis_cache, user1): assert len(cache.backend.keys("lock*")) == 0, "No lock should be set" -def test_cache_task_last_build(app_client, redis_cache, user1): +def test_cache_task_last_build(app_context, redis_cache, user1): valid_workflow = 'sort-and-change-case' logger.debug("Cache content: %r", cache.keys) cache.clear() From 3ab5c50934bb9ce64079ebfc2fed621e578d4647 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Thu, 18 Nov 2021 16:46:48 +0100 Subject: [PATCH 137/162] Fix unused import --- tests/conftest.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/conftest.py b/tests/conftest.py index ad6290a8e..b052fb4b9 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -33,7 +33,7 @@ from lifemonitor.api.models import (TestingService, TestingServiceTokenManager, TestSuite, User) from lifemonitor.api.services import LifeMonitor -from lifemonitor.cache import cache, init_cache, clear_cache +from lifemonitor.cache import cache, clear_cache from lifemonitor.utils import ClassManager from tests.utils import register_workflow From 21ea52c3068d42c2bacc916e1eae399b85d31505 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Thu, 18 Nov 2021 16:47:01 +0100 Subject: [PATCH 138/162] Fix fixture parameter --- tests/conftest.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tests/conftest.py b/tests/conftest.py index b052fb4b9..027a02985 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -25,6 +25,7 @@ import re import string import uuid +from collections.abc import Iterable from unittest.mock import MagicMock import lifemonitor.db as lm_db @@ -142,7 +143,10 @@ def _get_app_settings(include_env=True, extra=None): def app_settings(request): if hasattr(request, 'param'): logger.debug("App settings param: %r", request.param) - return _get_app_settings(*request.param) + if isinstance(request.param, Iterable): + return _get_app_settings(*request.param) + else: + return _get_app_settings(request.param) return _get_app_settings() From 9903ae1f54fe0b814df65b1d989a5d8b21d0984d Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Thu, 18 Nov 2021 18:11:07 +0100 Subject: [PATCH 139/162] Don't log NoResultExceptions (debug only) --- lifemonitor/api/models/workflows.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lifemonitor/api/models/workflows.py b/lifemonitor/api/models/workflows.py index c48744c66..3bece861a 100644 --- a/lifemonitor/api/models/workflows.py +++ b/lifemonitor/api/models/workflows.py @@ -316,7 +316,7 @@ def get_public_workflow_version(cls, uuid, version) -> WorkflowVersion: .filter(Workflow.public == true())\ .filter(cls.version == version).one() # noqa: E712 except NoResultFound as e: - logger.exception(e) + logger.debug(e) return None except Exception as e: raise lm_exceptions.LifeMonitorException(detail=str(e), stack=str(e)) @@ -331,7 +331,7 @@ def get_user_workflow_version(cls, owner: User, uuid, version) -> WorkflowVersio .filter(Permission.user_id == owner.id)\ .filter(cls.version == version).one() except NoResultFound as e: - logger.exception(e) + logger.debug(e) return None except Exception as e: raise lm_exceptions.LifeMonitorException(detail=str(e), stack=str(e)) From 414799d898d5be9f9c8587338ee8584ad50319da Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Fri, 19 Nov 2021 12:06:26 +0100 Subject: [PATCH 140/162] Set maximum number of retries of tasks --- lifemonitor/tasks/tasks.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lifemonitor/tasks/tasks.py b/lifemonitor/tasks/tasks.py index a054e7fa5..3c04a927b 100644 --- a/lifemonitor/tasks/tasks.py +++ b/lifemonitor/tasks/tasks.py @@ -38,13 +38,13 @@ def decorator(actor): @schedule(CronTrigger(second=0)) -@dramatiq.actor +@dramatiq.actor(max_retries=3) def heartbeat(): logger.info("Heartbeat!") @schedule(IntervalTrigger(seconds=Timeout.WORKFLOW * 3 / 4)) -@dramatiq.actor +@dramatiq.actor(max_retries=3) def check_workflows(): from flask import current_app from lifemonitor.api.controllers import workflows_rocrate_download @@ -82,7 +82,7 @@ def check_workflows(): @schedule(IntervalTrigger(seconds=Timeout.BUILD * 3 / 4)) -@dramatiq.actor +@dramatiq.actor(max_retries=3) def check_last_build(): from lifemonitor.api.models import Workflow From e01d581e054d7b328661c53c4094e79031fc674b Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Fri, 19 Nov 2021 12:08:06 +0100 Subject: [PATCH 141/162] Store current transaction on a thread.local property --- lifemonitor/cache.py | 42 +++++++++++++++++++++++------------------- 1 file changed, 23 insertions(+), 19 deletions(-) diff --git a/lifemonitor/cache.py b/lifemonitor/cache.py index b3fd6d215..a91ef61a0 100644 --- a/lifemonitor/cache.py +++ b/lifemonitor/cache.py @@ -20,6 +20,7 @@ from __future__ import annotations +import threading import functools import logging import os @@ -81,12 +82,20 @@ class IllegalStateException(RuntimeError): class CacheTransaction(object): - def __init__(self, cache: Cache): + def __init__(self, cache: Cache, name=None): + self.__name = name or id(self) self.__cache__ = cache self.__locks__ = {} self.__data__ = {} self.__closed__ = False + def __repr__(self) -> str: + return f"CacheTransaction#{self.name}" + + @property + def name(self): + return self.__name + def make_key(self, key: str, prefix: str = CACHE_PREFIX) -> str: return self.__cache__._make_key(key, prefix=prefix) @@ -119,16 +128,16 @@ def __exit__(self, type, value, traceback): self.close() def start(self): - logger.debug("Starting transaction...") + logger.debug(f"Starting {self} ...") self.__data__.clear() self.__locks__.clear() self.__closed__ = False def close(self): if self.__closed__: - logger.debug("Transaction already closed") + logger.debug(f"{self} already closed") else: - logger.debug("Stopping transaction...") + logger.debug(f"Stopping {self}...") try: logger.debug("Finalizing transaction...") pipeline = self.__cache__.backend.pipeline() @@ -147,14 +156,14 @@ def close(self): logger.debug("No lock for key %r", k) except redis_lock.NotAcquired as e: logger.debug(e) - logger.debug("All lock released") - logger.debug("Transaction closed") + logger.debug(f"All lock of {self} released") + logger.debug(f"{self} closed") except Exception as e: logger.exception(e) finally: self.__closed__ = True self.__cache__._set_current_transaction(None) - logger.debug("Transaction finished") + logger.debug(f"{self} finished") class Cache(object): @@ -193,7 +202,7 @@ def init_app(cls, app: Flask): cls.reset_locks() def __init__(self, parent: Cache = None) -> None: - self._current_transaction = None + self._local = threading.local() self._parent = parent @staticmethod @@ -214,24 +223,22 @@ def ignore_cache_values(self, value: bool): self._ignore_cache_values = True if value is True else False def _set_current_transaction(self, t: CacheTransaction): - # TODO: replace with a thread local attribute if self.parent is not None: self.parent._set_current_transaction(t) else: - self._current_transaction = t + self._local.transaction = t def get_current_transaction(self) -> CacheTransaction: - # TODO: replace with a thread local attribute if self.parent is not None: return self.parent.get_current_transaction() - return self._current_transaction + try: + return self._local.transaction + except AttributeError: + return None @contextmanager def transaction(self, name) -> CacheTransaction: - # TODO: replace with a thread local attribute - if self._current_transaction is not None: - raise IllegalStateException("Transaction already started") - t = CacheTransaction(self) + t = CacheTransaction(self, name=name) self._set_current_transaction(t) try: yield t @@ -256,9 +263,6 @@ def keys(self, pattern: str = None): else: query = f"{query}*" logger.debug("Keys pattern: %r", query) - transaction = self.get_current_transaction() - if transaction is not None: - pass return self.backend.keys(query) def size(self, pattern=None): From a5be9d1852b5b3f70ee9c3693480dc8a9cb6a9d5 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Sat, 20 Nov 2021 18:04:06 +0100 Subject: [PATCH 142/162] Fix issues with concurrent cache updates --- lifemonitor/cache.py | 181 +++++++++++++++++++++++++++---------------- 1 file changed, 114 insertions(+), 67 deletions(-) diff --git a/lifemonitor/cache.py b/lifemonitor/cache.py index a91ef61a0..088372d93 100644 --- a/lifemonitor/cache.py +++ b/lifemonitor/cache.py @@ -20,11 +20,12 @@ from __future__ import annotations -import threading import functools import logging import os import pickle +import threading +import time from contextlib import contextmanager import redis @@ -82,16 +83,35 @@ class IllegalStateException(RuntimeError): class CacheTransaction(object): + + _current_transaction = threading.local() + + @classmethod + def get_current_transaction(cls) -> CacheTransaction: + try: + return cls._current_transaction.value + except AttributeError: + return None + + @classmethod + def set_current_transaction(cls, t: CacheTransaction): + cls._current_transaction.value = t + def __init__(self, cache: Cache, name=None): - self.__name = name or id(self) + self.__name = name or f"T-{id(self)}" self.__cache__ = cache self.__locks__ = {} self.__data__ = {} + self.__started__ = False self.__closed__ = False def __repr__(self) -> str: return f"CacheTransaction#{self.name}" + @property + def cache(self): + return self.__cache__ + @property def name(self): return self.__name @@ -114,6 +134,26 @@ def has(self, key: str) -> bool: return False return self.make_key(key) in self.keys() + @contextmanager + def lock(self, key: str, + timeout: int = Timeout.REQUEST, + expire=15, retry=1, auto_renewal=True): + logger.debug("Getting lock for key %r...", key) + if key in self.__locks__: + yield self.__locks__[key] + else: + lock = redis_lock.Lock(self.cache.backend, key, expire=expire, auto_renewal=auto_renewal, id=self.name) + while not lock.acquire(blocking=False, timeout=timeout if timeout > 0 else None): + logger.debug("Waiting for lock key '%r'... (retry in %r secs)", lock, retry) + time.sleep(retry) + logger.debug("Lock for key '%r' acquired: %r", key, lock.locked) + self.__locks__[key] = lock + logger.debug("Lock for key '%r' added to transaction %r: %r", key, self.name, self.has_lock(key)) + try: + yield lock + finally: + logger.debug("Releasing transactional lock context for key '%s'", key) + def has_lock(self, key: str) -> bool: return key in self.__locks__ @@ -126,11 +166,16 @@ def __enter__(self): def __exit__(self, type, value, traceback): self.close() + return True + + def is_started(self) -> bool: + return self.__started__ def start(self): logger.debug(f"Starting {self} ...") self.__data__.clear() self.__locks__.clear() + self.__started__ = True self.__closed__ = False def close(self): @@ -148,14 +193,18 @@ def close(self): logger.debug("Transaction finalized!") for k in list(self.__locks__.keys()): lk = self.__locks__.pop(k) - try: - if lk: - logger.debug("Releasing lock %r", k) - lk.release() + if lk: + if lk.locked: + logger.debug("Releasing lock for key '%r'...", k) + try: + lk.release() + logger.debug("Lock for key '%r' released: %r", k, lk.locked) + except redis_lock.NotAcquired as e: + logger.warning(e) else: - logger.debug("No lock for key %r", k) - except redis_lock.NotAcquired as e: - logger.debug(e) + logger.warning("Lock for key '%s' not acquired or expired") + else: + logger.warning("No lock for key %r", k) logger.debug(f"All lock of {self} released") logger.debug(f"{self} closed") except Exception as e: @@ -166,6 +215,9 @@ def close(self): logger.debug(f"{self} finished") +_current_transaction = threading.local() + + class Cache(object): # Enable/Disable cache @@ -202,7 +254,7 @@ def init_app(cls, app: Flask): cls.reset_locks() def __init__(self, parent: Cache = None) -> None: - self._local = threading.local() + self._local = _current_transaction self._parent = parent @staticmethod @@ -223,34 +275,36 @@ def ignore_cache_values(self, value: bool): self._ignore_cache_values = True if value is True else False def _set_current_transaction(self, t: CacheTransaction): - if self.parent is not None: - self.parent._set_current_transaction(t) - else: - self._local.transaction = t + CacheTransaction.set_current_transaction(t) def get_current_transaction(self) -> CacheTransaction: - if self.parent is not None: - return self.parent.get_current_transaction() - try: - return self._local.transaction - except AttributeError: - return None + return CacheTransaction.get_current_transaction() @contextmanager - def transaction(self, name) -> CacheTransaction: - t = CacheTransaction(self, name=name) - self._set_current_transaction(t) + def transaction(self, name=None) -> CacheTransaction: + new_transaction = False + t = self.get_current_transaction() + if t is None: + logger.debug("Creating a new transaction...") + t = CacheTransaction(self, name=name) + self._set_current_transaction(t) + new_transaction = True + else: + logger.debug("Reusing transaction in the current thread: %r", t) try: yield t except Exception as e: logger.exception(e) finally: logger.debug("Finally closing transaction") - try: - t.close() - except Exception as fe: - logger.debug(fe) - self._set_current_transaction(None) + if not new_transaction: + logger.debug("Transaction not initialized in this context: it should continue") + else: + try: + t.close() + except Exception as fe: + logger.debug(fe) + self._set_current_transaction(None) @property def backend(self) -> redis.Redis: @@ -272,37 +326,34 @@ def to_dict(self, pattern=None): return {k: self.backend.get(k) for k in self.keys(pattern=pattern)} @contextmanager - def lock(self, key: str, blocking: bool = True, + def lock(self, key: str, timeout: int = Timeout.REQUEST, - expire=60, auto_renewal=True): + expire=15, retry=1, auto_renewal=True): logger.debug("Getting lock for key %r...", key) lock = redis_lock.Lock(self.backend, key, expire=expire, auto_renewal=auto_renewal) try: - yield lock.acquire(blocking=blocking, timeout=timeout if timeout > 0 else None) + while not lock.acquire(blocking=False, timeout=timeout if timeout > 0 else None): + logger.debug("Waiting to acquire the lock for '%r'... (retry in %r secs)", lock, retry) + time.sleep(retry) + logger.debug(f"Lock for key '{key}' acquired: {lock.locked}") + yield lock finally: try: - logger.debug("Auto release of lock for key=%s", key) - transaction = self.get_current_transaction() - if transaction is None: - logger.debug("No transaction in progress...") - lock.release() + logger.debug("Exiting from transactional lock context for key '%s'", key) + if not lock.locked: + logger.warning("Lock for key '%s' not acquired", key) else: - logger.debug("Transaction %r in progress...", transaction) - transaction.__locks__[key] = lock - logger.debug("Added lock to transaction: %r", transaction.has_lock(key)) + logger.debug("Auto release of lock for key '%s'", key) + lock.release() + logger.debug("Lock for key='%s' released: %r", key, lock.locked) except redis_lock.NotAcquired as e: logger.debug(e) def set(self, key: str, value, timeout: int = Timeout.NONE, prefix: str = CACHE_PREFIX): if key is not None and self.cache_enabled: - transaction = self.get_current_transaction() - if transaction is not None: - logger.debug("Setting transactional cache value for key %r.... (timeout: %r)", key, timeout) - transaction.set(key, value, timeout=timeout) - else: - key = self._make_key(key, prefix=prefix) - logger.debug("Setting cache value for key %r.... (timeout: %r)", key, timeout) - self.backend.set(key, pickle.dumps(value), ex=timeout if timeout > 0 else None) + key = self._make_key(key, prefix=prefix) + logger.debug("Setting cache value for key %r.... (timeout: %r)", key, timeout) + self.backend.set(key, pickle.dumps(value), ex=timeout if timeout > 0 else None) def has(self, key: str, prefix: str = CACHE_PREFIX) -> bool: return self.get(key, prefix=prefix) is not None @@ -321,13 +372,6 @@ def get(self, key: str, prefix: str = CACHE_PREFIX): logger.debug("Cache status: %r", self._get_status()) if not self.cache_enabled or self.ignore_cache_values: return None - # get value from transaction - transaction = self.get_current_transaction() - logger.debug("Transaction is: %r", transaction) - if transaction is not None: - logger.debug("Getting transactional cache value for key %r....", key) - return transaction.get(key) - # get value from cache data = self.backend.get(self._make_key(key, prefix=prefix)) logger.debug("Current cache data: %r", data is not None) return pickle.loads(data) if data is not None else data @@ -440,27 +484,30 @@ def wrapper(*args, **kwargs): obj: CacheMixin = args[0] if len(args) > 0 and isinstance(args[0], CacheMixin) else None logger.debug("Wrapping a method of a CacheMixin instance: %r", obj is not None) hc = cache if obj is None else obj.cache - if hc.cache_enabled: + if hc and hc.cache_enabled: key = make_cache_key(function, client_scope, args=args, kwargs=kwargs) - result = hc.get(key) - if result is None: - logger.debug(f"Value {key} not set in cache...") - if hc.backend: - with hc.lock(key, blocking=True, timeout=Timeout.NONE): - result = hc.get(key) + if hc.get_current_transaction() is None: + value = hc.get(key) + if value is not None: + return value + + with hc.transaction() as transaction: + result = transaction.get(key) + if result is None: + logger.debug(f"Value {key} not set in cache...") + # if hc.backend: + with transaction.lock(key, timeout=Timeout.NONE): + result = transaction.get(key) if not result: logger.debug("Cache empty: getting value from the actual function...") result = function(*args, **kwargs) logger.debug("Checking unless function: %r", unless) - if unless is None or unless is True or callable(unless) and not unless(result): - hc.set(key, result, timeout=timeout) + if unless is None or unless is False or callable(unless) and not unless(result): + transaction.set(key, result, timeout=timeout) else: logger.debug("Don't set value in cache due to unless=%r", "None" if unless is None else "True") else: - logger.warning("Using unsupported cache backend: cache will not be used") - result = function(*args, **kwargs) - else: - logger.debug(f"Reusing value from cache key '{key}'...") + logger.debug(f"Reusing value from cache key '{key}'...") else: logger.debug("Cache disabled: getting value from the actual function...") result = function(*args, **kwargs) From 9a06add74b4128db4c65cec321e7fddbf3916f00 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Sun, 21 Nov 2021 10:44:15 +0100 Subject: [PATCH 143/162] Update cached decorator --- lifemonitor/api/models/services/github.py | 6 +- lifemonitor/api/models/services/travis.py | 2 +- .../api/models/testsuites/testinstance.py | 6 +- lifemonitor/cache.py | 63 ++++++++++++------- 4 files changed, 47 insertions(+), 30 deletions(-) diff --git a/lifemonitor/api/models/services/github.py b/lifemonitor/api/models/services/github.py index ab8377913..481a2002c 100644 --- a/lifemonitor/api/models/services/github.py +++ b/lifemonitor/api/models/services/github.py @@ -107,7 +107,7 @@ def _gh_service(self) -> Github: def _get_workflow_info(self, resource): return self._parse_workflow_url(resource) - @cached(timeout=Timeout.NONE, client_scope=False) + @cached(timeout=Timeout.NONE, client_scope=False, transactional_update=True) def _get_repo(self, test_instance: models.TestInstance): logger.debug("Getting github repository from remote service...") _, repo_full_name, _ = self._get_workflow_info(test_instance.resource) @@ -138,12 +138,12 @@ def check_connection(self) -> bool: logger.info("Caught exception from Github GET /rate_limit: %s. Connection not working?", e) return False - @cached(timeout=Timeout.NONE, client_scope=False) + @cached(timeout=Timeout.NONE, client_scope=False, transactional_update=True) def _get_gh_workflow(self, repository, workflow_id): logger.debug("Getting github workflow...") return self._gh_service.get_repo(repository).get_workflow(workflow_id) - @cached(timeout=Timeout.NONE, client_scope=False) + @cached(timeout=Timeout.NONE, client_scope=False, transactional_update=True) def _get_gh_workflow_runs(self, workflow: Workflow.Workflow) -> List: return list(workflow.get_runs()) diff --git a/lifemonitor/api/models/services/travis.py b/lifemonitor/api/models/services/travis.py index 36af5bdd6..4bd3d0a0f 100644 --- a/lifemonitor/api/models/services/travis.py +++ b/lifemonitor/api/models/services/travis.py @@ -134,7 +134,7 @@ def get_last_passed_test_build(self, test_instance: models.TestInstance) -> Opti def get_last_failed_test_build(self, test_instance: models.TestInstance) -> Optional[models.TravisTestBuild]: return self._get_last_test_build(test_instance, state='failed') - @cached(timeout=Timeout.NONE, client_scope=False) + @cached(timeout=Timeout.NONE, client_scope=False, transactional_update=True) def get_project_metadata(self, test_instance: models.TestInstance): try: logger.debug("Getting Travis project metadata...") diff --git a/lifemonitor/api/models/testsuites/testinstance.py b/lifemonitor/api/models/testsuites/testinstance.py index 17ce8084c..b49e28e23 100644 --- a/lifemonitor/api/models/testsuites/testinstance.py +++ b/lifemonitor/api/models/testsuites/testinstance.py @@ -105,16 +105,16 @@ def get_external_link(self): def last_test_build(self): return self.get_last_test_build() - @cached(timeout=Timeout.NONE, client_scope=False) + @cached(timeout=Timeout.NONE, client_scope=False, transactional_update=True) def get_last_test_build(self): builds = self.get_test_builds() return builds[0] if builds and len(builds) > 0 else None - @cached(timeout=Timeout.NONE, client_scope=False) + @cached(timeout=Timeout.NONE, client_scope=False, transactional_update=True) def get_test_builds(self, limit=10): return self.testing_service.get_test_builds(self, limit=limit) - @cached(timeout=Timeout.BUILD, client_scope=False, + @cached(timeout=Timeout.BUILD, client_scope=False, transactional_update=True, unless=lambda b: b.status in [models.BuildStatus.RUNNING, models.BuildStatus.WAITING]) def get_test_build(self, build_number): return self.testing_service.get_test_build(self, build_number) diff --git a/lifemonitor/cache.py b/lifemonitor/cache.py index 088372d93..b383bd6a0 100644 --- a/lifemonitor/cache.py +++ b/lifemonitor/cache.py @@ -474,7 +474,35 @@ def clear_cache(func=None, client_scope=True, prefix=CACHE_PREFIX, *args, **kwar logger.error("Error deleting cache: %r", e) -def cached(timeout=Timeout.REQUEST, client_scope=True, unless=None): +def _process_cache_data(cache, transaction, key, unless, timeout, + read_from_cache, write_to_cache, function, args, kwargs): + # check parameters + assert read_from_cache or transaction, "Unable to read from transaction: transaction is None" + assert write_to_cache or transaction, "Unable to write to transaction: transaction is None" + # set reader/writer + reader = cache if read_from_cache else transaction + writer = cache if write_to_cache else transaction + # get/set data + result = reader.get(key) + if result is None: + logger.debug(f"Value {key} not set in cache...") + with cache.lock(key, timeout=Timeout.NONE): + result = reader.get(key) + if not result: + logger.debug("Cache empty: getting value from the actual function...") + result = function(*args, **kwargs) + logger.debug("Checking unless function: %r", unless) + if unless is None or unless is False or callable(unless) and not unless(result): + writer.set(key, result, timeout=timeout) + else: + logger.debug("Don't set value in cache due to unless=%r", + "None" if unless is None else "True") + else: + logger.debug(f"Reusing value from cache key '{key}'...") + return result + + +def cached(timeout=Timeout.REQUEST, client_scope=True, unless=None, transactional_update=False): def decorator(function): @functools.wraps(function) @@ -486,28 +514,17 @@ def wrapper(*args, **kwargs): hc = cache if obj is None else obj.cache if hc and hc.cache_enabled: key = make_cache_key(function, client_scope, args=args, kwargs=kwargs) - if hc.get_current_transaction() is None: - value = hc.get(key) - if value is not None: - return value - - with hc.transaction() as transaction: - result = transaction.get(key) - if result is None: - logger.debug(f"Value {key} not set in cache...") - # if hc.backend: - with transaction.lock(key, timeout=Timeout.NONE): - result = transaction.get(key) - if not result: - logger.debug("Cache empty: getting value from the actual function...") - result = function(*args, **kwargs) - logger.debug("Checking unless function: %r", unless) - if unless is None or unless is False or callable(unless) and not unless(result): - transaction.set(key, result, timeout=timeout) - else: - logger.debug("Don't set value in cache due to unless=%r", "None" if unless is None else "True") - else: - logger.debug(f"Reusing value from cache key '{key}'...") + transaction = hc.get_current_transaction() + if transaction or transactional_update: + read_from_cache = transaction is None + with hc.transaction() as transaction: + result = _process_cache_data(cache, transaction, + key, unless, timeout, + read_from_cache, False, + function, args, kwargs) + else: + result = _process_cache_data(cache, transaction, key, unless, timeout, + True, True, function, args, kwargs) else: logger.debug("Cache disabled: getting value from the actual function...") result = function(*args, **kwargs) From 0c04d5fb74decfe6f751df9a95b036183a9422e2 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Sun, 21 Nov 2021 10:52:09 +0100 Subject: [PATCH 144/162] Add unit tests for concurrent cache updates --- tests/unit/cache/test_cache.py | 273 +++++++++++++++++++++++++++------ 1 file changed, 224 insertions(+), 49 deletions(-) diff --git a/tests/unit/cache/test_cache.py b/tests/unit/cache/test_cache.py index 0c362a53d..8892f07d6 100644 --- a/tests/unit/cache/test_cache.py +++ b/tests/unit/cache/test_cache.py @@ -19,13 +19,15 @@ # SOFTWARE. import logging +import threading +from multiprocessing.pool import ThreadPool from time import sleep from unittest.mock import MagicMock import lifemonitor.api.models as models import pytest -from lifemonitor.cache import (IllegalStateException, cache, init_cache, - make_cache_key) +from lifemonitor.cache import (IllegalStateException, Timeout, cache, + init_cache, make_cache_key) from tests import utils from tests.unit.test_utils import SerializableMock @@ -136,25 +138,38 @@ def test_cache_test_builds(app_context, redis_cache, user1): assert instance.cache.get(cache_key) is None, "Cache should be empty" -def test_cache_last_build_update(app_context, redis_cache, user1): +def setup_test_cache_last_build_update(app_context, redis_cache, user1): valid_workflow = 'sort-and-change-case' logger.debug("Cache content: %r", cache.keys) cache.clear() assert cache.size() == 0, "Cache should be empty" _, w = utils.pick_and_register_workflow(user1, valid_workflow) assert w, "Workflow should be set" + return w - try: - for s in w.test_suites: - logger.info("Updating workflow: %r", w) - for i in s.test_instances: - builds_data = i.testing_service.get_test_builds(i) - i.testing_service.get_test_builds = SerializableMock() - i.testing_service.get_test_builds.return_value = builds_data - transaction_keys = None - with i.cache.transaction(str(i)) as t: - assert i.cache.get_current_transaction() == t, "Unexpected transaction" +def test_cache_last_build_update(app_context, redis_cache, user1): + w = setup_test_cache_last_build_update(app_context, redis_cache, user1) + cache_last_build_update(app_context.app, w, user1, check_cache_size=True) + + +def cache_last_build_update(app, w, user1, check_cache_size=True, index=0, + multithreaded=False, results=None): + transactions = [] + + assert len(cache.backend.keys("lock*")) == 0, "No lock should be set" + with app.app_context(): + with cache.transaction(f"T{index}") as t: + transactions.append(t) + assert cache.get_current_transaction() == t, "Unexpected transaction" + for s in w.test_suites: + logger.info("[t#%r] Updating workflow (): %r", index, w) + for i in s.test_instances: + get_test_builds_method = i.testing_service.get_test_builds + builds_data = i.testing_service.get_test_builds(i) + i.testing_service.get_test_builds = SerializableMock() + i.testing_service.get_test_builds.return_value = builds_data + transaction_keys = None cache_key = make_cache_key(i.get_test_builds, client_scope=False, args=[i]) logger.debug("The cache key: %r", cache_key) assert not cache.has(cache_key), "The key should not be in cache" @@ -162,9 +177,11 @@ def test_cache_last_build_update(app_context, redis_cache, user1): logger.debug("\n\nGetting latest builds (first call)...") builds = i.get_test_builds() logger.debug("Getting latest builds (first call): %r\n", builds) - i.testing_service.get_test_builds.assert_called_once(), "i.testing_service.get_test_builds should be called once" - logger.debug(f"Checking if key {cache_key} is in cache...") - assert cache.has(cache_key), "The key should be in cache" + if not multithreaded: + i.testing_service.get_test_builds.assert_called_once(), "i.testing_service.get_test_builds should be called once" + logger.debug(f"Checking if key {cache_key} is in cache...") + assert not cache.has(cache_key), "The key should not be in cache" + assert t.has(cache_key), "The key should be in the current transaction" cache_size = cache.size() logger.debug("Current cache size: %r", cache_size) assert i.cache.get_current_transaction() == t, "Unexpected transaction" @@ -172,19 +189,25 @@ def test_cache_last_build_update(app_context, redis_cache, user1): logger.debug("\n\nGetting latest builds (second call)...") builds = i.get_test_builds() logger.debug("Getting latest builds (second call): %r\n", builds) - i.testing_service.get_test_builds.assert_called_once(), "i.testing_service.get_test_builds should be called once" - logger.debug(f"Checking if key {cache_key} is in cache...") - assert cache.has(cache_key), "The key should be in cache" - assert cache.size() == cache_size, "Unexpected cache size" + if not multithreaded: + i.testing_service.get_test_builds.assert_called_once(), "i.testing_service.get_test_builds should be called once" + logger.debug(f"Checking if key {cache_key} is in cache...") + assert not cache.has(cache_key), "The key should not be in cache" + assert t.has(cache_key), "The key should be in the current transaction" + if check_cache_size: + assert cache.size() == cache_size, "Unexpected cache size" assert i.cache.get_current_transaction() == t, "Unexpected transaction" logger.debug("\n\nGetting latest builds (third call)...") builds = i.get_test_builds() logger.debug("Getting latest builds (third call): %r\n", builds) - i.testing_service.get_test_builds.assert_called_once(), "i.testing_service.get_test_builds should be called once" - logger.debug(f"Checking if key {cache_key} is in cache...") - assert cache.has(cache_key), "The key should be in cache" - assert cache.size() == cache_size, "Unexpected cache size" + if not multithreaded: + i.testing_service.get_test_builds.assert_called_once(), "i.testing_service.get_test_builds should be called once" + logger.debug(f"Checking if key {cache_key} is in cache...") + assert not cache.has(cache_key), "The key should not be in cache" + assert t.has(cache_key), "The key should be in the current transaction" + if check_cache_size: + assert cache.size() == cache_size, "Unexpected cache size" assert i.cache.get_current_transaction() == t, "Unexpected transaction" logger.debug("\n\nPreparing data to test builds...") @@ -193,7 +216,10 @@ def test_cache_last_build_update(app_context, redis_cache, user1): b_data.append(i.testing_service.get_test_build(i, b.id)) logger.debug("\n\nPreparing data to test builds... DONE") + assert len(b_data) == 4, "Unexpected number of builds" + logger.debug("\n\nChecking test builds...") + get_test_build_method = i.testing_service.get_test_build i.testing_service.get_test_build = SerializableMock() for count in range(0, len(b_data)): b = b_data[count] @@ -204,50 +230,68 @@ def test_cache_last_build_update(app_context, redis_cache, user1): logger.debug("\n\nChecking build (first call): buildID=%r", b.id) logger.debug("Build data: %r", i.get_test_build(b.id)) i.testing_service.get_test_build.call_count == count + 1, "i.testing_service.get_test_build should be called once" - logger.debug(f"Checking if key {cache_key} is in cache...") - assert cache.has(cache_key), f"The key {cache_key} should be in cache" + if not multithreaded: + logger.debug(f"Checking if key {cache_key} is in cache...") + assert not cache.has(cache_key), "The key should not be in cache" + assert t.has(cache_key), "The key should be in the current transaction" cache_size = cache.size() logger.debug("Current cache size: %r", cache_size) logger.debug("\n\nChecking build (second call): buildID=%r", b.id) logger.debug("Build data: %r", i.get_test_build(b.id)) i.testing_service.get_test_build.call_count == count + 1, "i.testing_service.get_test_build should be called once" - logger.debug(f"Checking if key {cache_key} is in cache...") - assert cache.has(cache_key), f"The key {cache_key} should be in cache" - assert cache.size() == cache_size, "Unexpected cache size" + if not multithreaded: + logger.debug(f"Checking if key {cache_key} is in cache...") + assert not cache.has(cache_key), "The key should not be in cache" + assert t.has(cache_key), "The key should be in the current transaction" + if check_cache_size: + assert cache.size() == cache_size, "Unexpected cache size" logger.debug("\n\nChecking build (third call): buildID=%r", b.id) logger.debug("Build data: %r", i.get_test_build(b.id)) i.testing_service.get_test_build.call_count == count + 1, "i.testing_service.get_test_build should be called once" - logger.debug(f"Checking if key {cache_key} is in cache...") - assert cache.has(cache_key), f"The key {cache_key} should be in cache" - assert cache.size() == cache_size, "Unexpected cache size" - + if not multithreaded: + logger.debug(f"Checking if key {cache_key} is in cache...") + assert not cache.has(cache_key), "The key should not be in cache" + assert t.has(cache_key), "The key should be in the current transaction" + if check_cache_size: + assert cache.size() == cache_size, "Unexpected cache size" + + # check last test build logger.debug("\n\nGetting latest build: %r", i.last_test_build) - i.testing_service.get_test_builds.assert_called_once(), "i.testing_service.get_test_builds should be called once" + if not multithreaded: + i.testing_service.get_test_builds.assert_called_once(), "i.testing_service.get_test_builds should be called once" logger.debug("\n\nGetting latest build... DONE\n\n") + # restore original method + i.testing_service.get_test_build = get_test_build_method + i.testing_service.get_test_builds = get_test_builds_method + + # check transactions transaction_keys = t.keys() logger.debug("Transaction keys (# %r): %r", len(transaction_keys), transaction_keys) assert len(transaction_keys) == t.size(), "Unexpected transaction size" - # check the cache after the transaction is completed - cache_size = cache.size() - assert len(transaction_keys) == cache_size, "Unpexpected cache size: it should be equal to the transaction size" + # check the cache after the transaction is completed + if check_cache_size: + cache_size = cache.size() + assert len(transaction_keys) == cache_size, "Unpexpected cache size: it should be equal to the transaction size" - # check latest build - logger.debug("\n\nGetting latest build: %r", i.last_test_build) - assert cache.size() == cache_size, "Unexpected cache size" + # check latest build + logger.debug("\n\nGetting latest build: %r", i.last_test_build) + if check_cache_size: + assert cache.size() == cache_size, "Unexpected cache size" - except Exception as e: - logger.error("Error when executing task 'check_last_build': %s", str(e)) - if logger.isEnabledFor(logging.DEBUG): - logger.exception(e) - - sleep(2) - assert cache.size() > 0, "Cache should not be empty" - logger.debug(cache.keys()) - assert len(cache.backend.keys("lock*")) == 0, "No lock should be set" + sleep(2) + assert cache.size() > 0, "Cache should not be empty" + logger.debug(cache.keys()) + if not multithreaded: + assert len(cache.backend.keys("lock*")) == 0, "No lock should be set" + else: + assert results, "Results should not be none" + if results: + results[index]['result'].extend(transactions) + return transactions def test_cache_task_last_build(app_context, redis_cache, user1): @@ -265,3 +309,134 @@ def test_cache_task_last_build(app_context, redis_cache, user1): assert cache.size() > 0, "Cache should not be empty" logger.debug(cache.keys()) assert len(cache.backend.keys("lock*")) == 0, "No lock should be set" + + +def check_results(results): + logger.debug("Results: %r", results) + assert len(cache.backend.keys(pattern="locks*")) == 0, "Locks should not be in cache" + for i in range(0, len(results)): + if i == len(results) - 1: + break + r1 = results[i]['result'] + r2 = results[i + 1]['result'] + assert len(r1) == len(r2), "Transactions should be the same length" + + for tdx in range(0, len(r1)): + logger.debug("Transaction %r keys: %r", tdx, r1[tdx].keys()) + logger.debug("Transaction %r keys: %r", tdx, r2[tdx].keys()) + assert r1[tdx].size() == r2[tdx].size(), \ + f"Transactions {r1[tdx]} and {r2[tdx]} should have the same number of keys" + + +def test_cache_last_build_update_multi_thread(app_context, redis_cache, user1): + # set up a workflow + w = setup_test_cache_last_build_update(app_context, redis_cache, user1) + # set up threads + number_of_threads = 4 + results = [] + for index in range(number_of_threads): + t = threading.Thread( + target=cache_last_build_update, name=f"T{index}", args=(app_context.app, w, user1), + kwargs={ + "check_cache_size": False, + "index": index, + "multithreaded": True, + "results": results}) + results.append({ + "t": t, + "result": [] + }) + t.start() + # wait for results + for tdata in results: + t = tdata['t'] + t.join() + # check results + sleep(2) + check_results(results) + + +def test_cache_last_build_update_multi_process(app_context, redis_cache, user1): + # set up a workflow + w = setup_test_cache_last_build_update(app_context, redis_cache, user1) + # set up processes + processes = 4 + pool = ThreadPool(processes=processes) + results = [] + for index in range(processes): + results.append({ + 't': pool.apply_async(cache_last_build_update, args=(app_context.app, w, user1), + kwds={"check_cache_size": False, + "index": index, + "multithreaded": True, + "results": results}), + 'result': [] + }) + # wait for results + for tdata in results: + t = tdata['t'] + result = t.get() + tdata['result'] = result + # check results + sleep(2) + check_results(results) + + +def cache_transaction(transaction, index, name, results): + sleep(5) + logger.debug(f"Cache transaction: {name}") + with cache.transaction(f"T-{index}") as t: + current_transaction = cache.get_current_transaction() + logger.debug("Current transaction: %r", current_transaction) + assert current_transaction != transaction, "Unexpected transaction: transaction should be defferent from that on the main process" + assert t == current_transaction, "Unexpected transaction" + + with cache.transaction() as tx: + assert tx == t, "Unexpected transaction: it should be the same started in this thread" + + key = "TEST" + result = transaction.get(key) + if result is None: + logger.debug(f"Value {key} not set in cache...") + with tx.lock(key, blocking=True, timeout=Timeout.NONE): + result = transaction.get(key) + if not result: + logger.debug("Cache empty: getting value from the actual function...") + sleep(5) + result = f"result-of-index: {index}" + if index != -1: + result = cache_transaction(transaction, -1, f"{index}-NONE", results) + unless = None + logger.debug("Checking unless function: %r", unless) + if unless is None or unless is False or callable(unless) and not unless(result): + transaction.set(key, result, timeout=Timeout.NONE) + else: + logger.debug("Don't set value in cache due to unless=%r", "None" if unless is None else "True") + + +def test_cache_transaction_multi_thread(app_context, redis_cache, user1): + # set up threads + logger.debug("Test cache transaction...") + number_of_threads = 4 + results = [] + with cache.transaction() as transaction: + print("The transaction: %r" % transaction) + + for index in range(number_of_threads): + t = threading.Thread( + target=cache_transaction, name=f"T{index}", args=(transaction, index, f"{index}", results), + kwargs={}) + results.append({ + "t": t, + "result": [] + }) + t.start() + # wait for results + for tdata in results: + t = tdata['t'] + t.join() + # check results + sleep(2) + # check_results(results) + + logger.debug("Test cache transaction... DONE") From 13c38c8d65da4b0e5c1c6f5915c7c53dc2ff4920 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Sun, 21 Nov 2021 16:37:04 +0100 Subject: [PATCH 145/162] Minor fixes --- lifemonitor/cache.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lifemonitor/cache.py b/lifemonitor/cache.py index b383bd6a0..17611b538 100644 --- a/lifemonitor/cache.py +++ b/lifemonitor/cache.py @@ -293,8 +293,6 @@ def transaction(self, name=None) -> CacheTransaction: logger.debug("Reusing transaction in the current thread: %r", t) try: yield t - except Exception as e: - logger.exception(e) finally: logger.debug("Finally closing transaction") if not new_transaction: @@ -512,6 +510,7 @@ def wrapper(*args, **kwargs): obj: CacheMixin = args[0] if len(args) > 0 and isinstance(args[0], CacheMixin) else None logger.debug("Wrapping a method of a CacheMixin instance: %r", obj is not None) hc = cache if obj is None else obj.cache + result = None if hc and hc.cache_enabled: key = make_cache_key(function, client_scope, args=args, kwargs=kwargs) transaction = hc.get_current_transaction() From 7a293dda679febef2846abbd338cf8aa796ea26f Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Mon, 22 Nov 2021 01:17:55 +0100 Subject: [PATCH 146/162] Update tests --- tests/unit/cache/test_cache.py | 282 +++++++++++++++++++-------------- 1 file changed, 160 insertions(+), 122 deletions(-) diff --git a/tests/unit/cache/test_cache.py b/tests/unit/cache/test_cache.py index 8892f07d6..6c7b1fff9 100644 --- a/tests/unit/cache/test_cache.py +++ b/tests/unit/cache/test_cache.py @@ -155,143 +155,171 @@ def test_cache_last_build_update(app_context, redis_cache, user1): def cache_last_build_update(app, w, user1, check_cache_size=True, index=0, multithreaded=False, results=None): - transactions = [] - - assert len(cache.backend.keys("lock*")) == 0, "No lock should be set" - with app.app_context(): - with cache.transaction(f"T{index}") as t: - transactions.append(t) - assert cache.get_current_transaction() == t, "Unexpected transaction" - for s in w.test_suites: - logger.info("[t#%r] Updating workflow (): %r", index, w) - for i in s.test_instances: - get_test_builds_method = i.testing_service.get_test_builds - builds_data = i.testing_service.get_test_builds(i) - i.testing_service.get_test_builds = SerializableMock() - i.testing_service.get_test_builds.return_value = builds_data - transaction_keys = None - cache_key = make_cache_key(i.get_test_builds, client_scope=False, args=[i]) - logger.debug("The cache key: %r", cache_key) - assert not cache.has(cache_key), "The key should not be in cache" - - logger.debug("\n\nGetting latest builds (first call)...") - builds = i.get_test_builds() - logger.debug("Getting latest builds (first call): %r\n", builds) - if not multithreaded: - i.testing_service.get_test_builds.assert_called_once(), "i.testing_service.get_test_builds should be called once" - logger.debug(f"Checking if key {cache_key} is in cache...") - assert not cache.has(cache_key), "The key should not be in cache" - assert t.has(cache_key), "The key should be in the current transaction" - cache_size = cache.size() - logger.debug("Current cache size: %r", cache_size) - assert i.cache.get_current_transaction() == t, "Unexpected transaction" - - logger.debug("\n\nGetting latest builds (second call)...") - builds = i.get_test_builds() - logger.debug("Getting latest builds (second call): %r\n", builds) - if not multithreaded: - i.testing_service.get_test_builds.assert_called_once(), "i.testing_service.get_test_builds should be called once" - logger.debug(f"Checking if key {cache_key} is in cache...") - assert not cache.has(cache_key), "The key should not be in cache" - assert t.has(cache_key), "The key should be in the current transaction" - if check_cache_size: - assert cache.size() == cache_size, "Unexpected cache size" - assert i.cache.get_current_transaction() == t, "Unexpected transaction" - - logger.debug("\n\nGetting latest builds (third call)...") - builds = i.get_test_builds() - logger.debug("Getting latest builds (third call): %r\n", builds) - if not multithreaded: - i.testing_service.get_test_builds.assert_called_once(), "i.testing_service.get_test_builds should be called once" - logger.debug(f"Checking if key {cache_key} is in cache...") - assert not cache.has(cache_key), "The key should not be in cache" - assert t.has(cache_key), "The key should be in the current transaction" - if check_cache_size: - assert cache.size() == cache_size, "Unexpected cache size" - assert i.cache.get_current_transaction() == t, "Unexpected transaction" - - logger.debug("\n\nPreparing data to test builds...") - b_data = [] - for b in builds: - b_data.append(i.testing_service.get_test_build(i, b.id)) - logger.debug("\n\nPreparing data to test builds... DONE") - - assert len(b_data) == 4, "Unexpected number of builds" - - logger.debug("\n\nChecking test builds...") - get_test_build_method = i.testing_service.get_test_build - i.testing_service.get_test_build = SerializableMock() - for count in range(0, len(b_data)): - b = b_data[count] - i.testing_service.get_test_build.return_value = b - - cache_key = make_cache_key(i.get_test_build, client_scope=False, args=[i, b.id]) - - logger.debug("\n\nChecking build (first call): buildID=%r", b.id) - logger.debug("Build data: %r", i.get_test_build(b.id)) - i.testing_service.get_test_build.call_count == count + 1, "i.testing_service.get_test_build should be called once" - if not multithreaded: - logger.debug(f"Checking if key {cache_key} is in cache...") - assert not cache.has(cache_key), "The key should not be in cache" + try: + transactions = [] + logger.debug("Params of thread %r", index) + logger.debug("%r %r %r %r", check_cache_size, index, multithreaded, results) + assert len(cache.backend.keys("lock*")) == 0, "No lock should be set" + with app.app_context(): + transaction_keys = None + with cache.transaction(f"T{index}") as t: + logger.debug("Current transaction: %r", t) + logger.debug("Current workflow: %r", w) + transactions.append(t) + + assert cache.get_current_transaction() == t, "Unexpected transaction" + for s in w.test_suites: + logger.info("[t#%r] Updating workflow (): %r", index, w) + for i in s.test_instances: + get_test_builds_method = i.testing_service.get_test_builds + builds_data = i.testing_service.get_test_builds(i) + i.testing_service.get_test_builds = SerializableMock() + i.testing_service.get_test_builds.return_value = builds_data + + assert cache.get_current_transaction() == t, "Unexpected transaction" + assert i.cache.get_current_transaction() == t, "Unexpected transaction" + + cache_key = make_cache_key(i.get_test_builds, client_scope=False, args=[i]) + logger.debug("The cache key: %r", cache_key) + + ############################################################################# + # latest builds (first call) + ############################################################################# + logger.debug("\n\nGetting latest builds (first call)...") + builds = i.get_test_builds() + logger.debug("Getting latest builds (first call): %r\n", builds) assert t.has(cache_key), "The key should be in the current transaction" cache_size = cache.size() logger.debug("Current cache size: %r", cache_size) - - logger.debug("\n\nChecking build (second call): buildID=%r", b.id) - logger.debug("Build data: %r", i.get_test_build(b.id)) - i.testing_service.get_test_build.call_count == count + 1, "i.testing_service.get_test_build should be called once" + assert i.cache.get_current_transaction() == t, "Unexpected transaction" + # check cache if not multithreaded: + i.testing_service.get_test_builds.assert_called_once(), "i.testing_service.get_test_builds should be called once" logger.debug(f"Checking if key {cache_key} is in cache...") assert not cache.has(cache_key), "The key should not be in cache" + + ############################################################################# + # latest builds (second call) + ############################################################################# + logger.debug("\n\nGetting latest builds (second call)...") + builds = i.get_test_builds() + logger.debug("Getting latest builds (second call): %r\n", builds) + assert i.cache.get_current_transaction() == t, "Unexpected transaction" assert t.has(cache_key), "The key should be in the current transaction" + if not multithreaded: + i.testing_service.get_test_builds.assert_called_once(), "i.testing_service.get_test_builds should be called once" + logger.debug(f"Checking if key {cache_key} is in cache...") + assert not cache.has(cache_key), "The key should not be in cache" if check_cache_size: assert cache.size() == cache_size, "Unexpected cache size" - logger.debug("\n\nChecking build (third call): buildID=%r", b.id) - logger.debug("Build data: %r", i.get_test_build(b.id)) - i.testing_service.get_test_build.call_count == count + 1, "i.testing_service.get_test_build should be called once" + ############################################################################# + # latest builds (third call) + ############################################################################# + logger.debug("\n\nGetting latest builds (third call)...") + builds = i.get_test_builds() + logger.debug("Getting latest builds (third call): %r\n", builds) + assert i.cache.get_current_transaction() == t, "Unexpected transaction" + assert t.has(cache_key), "The key should be in the current transaction" if not multithreaded: + i.testing_service.get_test_builds.assert_called_once(), "i.testing_service.get_test_builds should be called once" logger.debug(f"Checking if key {cache_key} is in cache...") assert not cache.has(cache_key), "The key should not be in cache" - assert t.has(cache_key), "The key should be in the current transaction" if check_cache_size: assert cache.size() == cache_size, "Unexpected cache size" - # check last test build - logger.debug("\n\nGetting latest build: %r", i.last_test_build) - if not multithreaded: - i.testing_service.get_test_builds.assert_called_once(), "i.testing_service.get_test_builds should be called once" - logger.debug("\n\nGetting latest build... DONE\n\n") - - # restore original method - i.testing_service.get_test_build = get_test_build_method - i.testing_service.get_test_builds = get_test_builds_method - - # check transactions - transaction_keys = t.keys() - logger.debug("Transaction keys (# %r): %r", len(transaction_keys), transaction_keys) - assert len(transaction_keys) == t.size(), "Unexpected transaction size" + ############################################################################# + # Check builds + ############################################################################# + logger.debug("\n\nPreparing data to test builds...") + b_data = [] + for b in builds: + b_data.append(i.testing_service.get_test_build(i, b.id)) + logger.debug("\n\nPreparing data to test builds... DONE") + + assert len(b_data) == 4, "Unexpected number of builds" + + logger.debug("\n\nChecking test builds...") + get_test_build_method = i.testing_service.get_test_build + + for count in range(0, len(b_data)): + b = b_data[count] + i.testing_service.get_test_build = SerializableMock() + i.testing_service.get_test_build.return_value = b + + cache_key = make_cache_key(i.get_test_build, client_scope=False, args=[i, b.id]) + + # first call ############################################################# + logger.debug("\n\nChecking build (first call): buildID=%r", b.id) + logger.debug("Build data: %r", i.get_test_build(b.id)) + assert t.has(cache_key), "The key should be in the current transaction" + if not multithreaded: + i.testing_service.get_test_build.call_count == count + 1, "i.testing_service.get_test_build should be called once" + logger.debug(f"Checking if key {cache_key} is in cache...") + assert not cache.has(cache_key), "The key should not be in cache" + cache_size = cache.size() + logger.debug("Current cache size: %r", cache_size) + + # second call ############################################################# + logger.debug("\n\nChecking build (second call): buildID=%r", b.id) + logger.debug("Build data: %r", i.get_test_build(b.id)) + assert t.has(cache_key), "The key should be in the current transaction" + if not multithreaded: + i.testing_service.get_test_build.call_count == count + 1, "i.testing_service.get_test_build should be called once" + logger.debug(f"Checking if key {cache_key} is in cache...") + assert not cache.has(cache_key), "The key should not be in cache" + if check_cache_size: + assert cache.size() == cache_size, "Unexpected cache size" + # third call ############################################################# + logger.debug("\n\nChecking build (third call): buildID=%r", b.id) + logger.debug("Build data: %r", i.get_test_build(b.id)) + assert t.has(cache_key), "The key should be in the current transaction" + if not multithreaded: + i.testing_service.get_test_build.call_count == count + 1, "i.testing_service.get_test_build should be called once" + logger.debug(f"Checking if key {cache_key} is in cache...") + assert not cache.has(cache_key), "The key should not be in cache" + if check_cache_size: + assert cache.size() == cache_size, "Unexpected cache size" + + # check last test build + logger.debug("\n\nGetting latest build: %r", i.last_test_build) + if not multithreaded: + i.testing_service.get_test_builds.assert_called_once(), "i.testing_service.get_test_builds should be called once" + logger.debug("\n\nGetting latest build... DONE\n\n") - # check the cache after the transaction is completed - if check_cache_size: - cache_size = cache.size() - assert len(transaction_keys) == cache_size, "Unpexpected cache size: it should be equal to the transaction size" + # restore original method + i.testing_service.get_test_build = get_test_build_method + i.testing_service.get_test_builds = get_test_builds_method - # check latest build - logger.debug("\n\nGetting latest build: %r", i.last_test_build) - if check_cache_size: - assert cache.size() == cache_size, "Unexpected cache size" + ############################################################################ + # check latest build + ############################################################################ + logger.debug("\n\nGetting latest build: %r", i.last_test_build) + if check_cache_size: + assert cache.size() == cache_size, "Unexpected cache size" - sleep(2) - assert cache.size() > 0, "Cache should not be empty" - logger.debug(cache.keys()) - if not multithreaded: - assert len(cache.backend.keys("lock*")) == 0, "No lock should be set" - else: - assert results, "Results should not be none" - if results: - results[index]['result'].extend(transactions) - return transactions + # check transactions + transaction_keys = t.keys() + logger.debug("Transaction keys (# %r): %r", len(transaction_keys), transaction_keys) + assert len(transaction_keys) == t.size(), "Unexpected transaction size" + + # check the cache after the transaction is completed + if check_cache_size: + cache_size = cache.size() + assert len(transaction_keys) == cache_size, "Unpexpected cache size: it should be equal to the transaction size" + + sleep(2) + assert cache.size() > 0, "Cache should not be empty" + logger.debug(cache.keys()) + if not multithreaded: + assert len(cache.backend.keys("lock*")) == 0, "No lock should be set" + else: + if results: + assert results, "Results should not be none" + results[index]['result'].extend(transactions) + return transactions + except Exception as e: + logger.exception(e) def test_cache_task_last_build(app_context, redis_cache, user1): @@ -314,16 +342,23 @@ def test_cache_task_last_build(app_context, redis_cache, user1): def check_results(results): logger.debug("Results: %r", results) assert len(cache.backend.keys(pattern="locks*")) == 0, "Locks should not be in cache" + for i in range(0, len(results)): + r = results[i]['result'] + for tdx in range(0, len(r)): + logger.debug("Transaction %r keys: # %r", r[tdx], len(r[tdx].keys())) + logger.debug("Transaction %r keys: %r", r[tdx], r[tdx].keys()) + if i == len(results) - 1: break r1 = results[i]['result'] r2 = results[i + 1]['result'] + assert len(r1) == len(r2), "Transactions should be the same length" for tdx in range(0, len(r1)): - logger.debug("Transaction %r keys: %r", tdx, r1[tdx].keys()) - logger.debug("Transaction %r keys: %r", tdx, r2[tdx].keys()) + logger.debug("Transaction %r keys: %r", r1[tdx], r1[tdx].keys()) + logger.debug("Transaction %r keys: %r", r2[tdx], r2[tdx].keys()) assert r1[tdx].size() == r2[tdx].size(), \ f"Transactions {r1[tdx]} and {r2[tdx]} should have the same number of keys" @@ -331,8 +366,9 @@ def check_results(results): def test_cache_last_build_update_multi_thread(app_context, redis_cache, user1): # set up a workflow w = setup_test_cache_last_build_update(app_context, redis_cache, user1) + logger.debug("Workflow %r", w) # set up threads - number_of_threads = 4 + number_of_threads = 2 results = [] for index in range(number_of_threads): t = threading.Thread( @@ -347,6 +383,8 @@ def test_cache_last_build_update_multi_thread(app_context, redis_cache, user1): "result": [] }) t.start() + sleep(2) + # wait for results for tdata in results: t = tdata['t'] @@ -360,7 +398,7 @@ def test_cache_last_build_update_multi_process(app_context, redis_cache, user1): # set up a workflow w = setup_test_cache_last_build_update(app_context, redis_cache, user1) # set up processes - processes = 4 + processes = 3 pool = ThreadPool(processes=processes) results = [] for index in range(processes): From e73197ad91afa4b11f94f113590f023620561c5f Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Mon, 22 Nov 2021 14:18:04 +0100 Subject: [PATCH 147/162] Update serialization of null values --- lifemonitor/cache.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lifemonitor/cache.py b/lifemonitor/cache.py index 17611b538..7d3fd55e3 100644 --- a/lifemonitor/cache.py +++ b/lifemonitor/cache.py @@ -351,7 +351,10 @@ def set(self, key: str, value, timeout: int = Timeout.NONE, prefix: str = CACHE_ if key is not None and self.cache_enabled: key = self._make_key(key, prefix=prefix) logger.debug("Setting cache value for key %r.... (timeout: %r)", key, timeout) - self.backend.set(key, pickle.dumps(value), ex=timeout if timeout > 0 else None) + if value is None: + self.backend.delete(key) + else: + self.backend.set(key, pickle.dumps(value), ex=timeout if timeout > 0 else None) def has(self, key: str, prefix: str = CACHE_PREFIX) -> bool: return self.get(key, prefix=prefix) is not None From f5776553e2fd83176fbd4f0be0d732ad8fbca6b8 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Mon, 22 Nov 2021 14:20:37 +0100 Subject: [PATCH 148/162] Fix log level of debug messages --- lifemonitor/cache.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/lifemonitor/cache.py b/lifemonitor/cache.py index 7d3fd55e3..c69239938 100644 --- a/lifemonitor/cache.py +++ b/lifemonitor/cache.py @@ -172,7 +172,7 @@ def is_started(self) -> bool: return self.__started__ def start(self): - logger.debug(f"Starting {self} ...") + logger.debug(f"Starting transaction {self} ...") self.__data__.clear() self.__locks__.clear() self.__started__ = True @@ -202,9 +202,9 @@ def close(self): except redis_lock.NotAcquired as e: logger.warning(e) else: - logger.warning("Lock for key '%s' not acquired or expired") + logger.debug("Lock for key '%s' not acquired or expired") else: - logger.warning("No lock for key %r", k) + logger.debug("No lock for key %r", k) logger.debug(f"All lock of {self} released") logger.debug(f"{self} closed") except Exception as e: @@ -339,7 +339,7 @@ def lock(self, key: str, try: logger.debug("Exiting from transactional lock context for key '%s'", key) if not lock.locked: - logger.warning("Lock for key '%s' not acquired", key) + logger.debug("Lock for key '%s' not acquired", key) else: logger.debug("Auto release of lock for key '%s'", key) lock.release() @@ -520,11 +520,13 @@ def wrapper(*args, **kwargs): if transaction or transactional_update: read_from_cache = transaction is None with hc.transaction() as transaction: + logger.debug("Getting value using transaction: new=%r", read_from_cache) result = _process_cache_data(cache, transaction, key, unless, timeout, read_from_cache, False, function, args, kwargs) else: + logger.debug("Getting value from cache") result = _process_cache_data(cache, transaction, key, unless, timeout, True, True, function, args, kwargs) else: From 9ca9ab35f962e3af2801af01271105d90e57b4b0 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Tue, 23 Nov 2021 12:32:31 +0100 Subject: [PATCH 149/162] Update tests --- tests/unit/cache/test_cache.py | 363 +++++++++++++++++---------------- 1 file changed, 186 insertions(+), 177 deletions(-) diff --git a/tests/unit/cache/test_cache.py b/tests/unit/cache/test_cache.py index 6c7b1fff9..88f74d375 100644 --- a/tests/unit/cache/test_cache.py +++ b/tests/unit/cache/test_cache.py @@ -20,7 +20,7 @@ import logging import threading -from multiprocessing.pool import ThreadPool +from multiprocessing import Manager, Process from time import sleep from unittest.mock import MagicMock @@ -150,176 +150,183 @@ def setup_test_cache_last_build_update(app_context, redis_cache, user1): def test_cache_last_build_update(app_context, redis_cache, user1): w = setup_test_cache_last_build_update(app_context, redis_cache, user1) + cache.reset_locks() cache_last_build_update(app_context.app, w, user1, check_cache_size=True) def cache_last_build_update(app, w, user1, check_cache_size=True, index=0, multithreaded=False, results=None): - try: - transactions = [] - logger.debug("Params of thread %r", index) - logger.debug("%r %r %r %r", check_cache_size, index, multithreaded, results) + transactions = [] + logger.debug("Params of thread %r", index) + logger.debug("%r %r %r %r", check_cache_size, index, multithreaded, results) + if not multithreaded: assert len(cache.backend.keys("lock*")) == 0, "No lock should be set" - with app.app_context(): - transaction_keys = None - with cache.transaction(f"T{index}") as t: - logger.debug("Current transaction: %r", t) - logger.debug("Current workflow: %r", w) - transactions.append(t) - - assert cache.get_current_transaction() == t, "Unexpected transaction" - for s in w.test_suites: - logger.info("[t#%r] Updating workflow (): %r", index, w) - for i in s.test_instances: - get_test_builds_method = i.testing_service.get_test_builds - builds_data = i.testing_service.get_test_builds(i) - i.testing_service.get_test_builds = SerializableMock() - i.testing_service.get_test_builds.return_value = builds_data - - assert cache.get_current_transaction() == t, "Unexpected transaction" - assert i.cache.get_current_transaction() == t, "Unexpected transaction" - - cache_key = make_cache_key(i.get_test_builds, client_scope=False, args=[i]) - logger.debug("The cache key: %r", cache_key) - - ############################################################################# - # latest builds (first call) - ############################################################################# - logger.debug("\n\nGetting latest builds (first call)...") - builds = i.get_test_builds() - logger.debug("Getting latest builds (first call): %r\n", builds) + with app.app_context(): + transaction_keys = None + with cache.transaction(f"T{index}") as t: + logger.debug("Current transaction: %r", t) + logger.debug("Current workflow: %r", w) + transactions.append(t) + + assert cache.get_current_transaction() == t, "Unexpected transaction" + for s in w.test_suites: + logger.info("[t#%r] Updating workflow (): %r", index, w) + for i in s.test_instances: + get_test_builds_method = i.testing_service.get_test_builds + builds_data = i.testing_service.get_test_builds(i) + i.testing_service.get_test_builds = SerializableMock() + i.testing_service.get_test_builds.return_value = builds_data + + assert cache.get_current_transaction() == t, "Unexpected transaction" + assert i.cache.get_current_transaction() == t, "Unexpected transaction" + + cache_key = make_cache_key(i.get_test_builds, client_scope=False, args=[i]) + logger.debug("The cache key: %r", cache_key) + + ############################################################################# + # latest builds (first call) + ############################################################################# + logger.debug("\n\nGetting latest builds (first call)...") + builds = i.get_test_builds() + logger.debug("Getting latest builds (first call): %r\n", builds) + assert t.has(cache_key), "The key should be in the current transaction" + cache_size = cache.size() + logger.debug("Current cache size: %r", cache_size) + assert i.cache.get_current_transaction() == t, "Unexpected transaction" + # check cache + if not multithreaded: + i.testing_service.get_test_builds.assert_called_once(), "i.testing_service.get_test_builds should be called once" + logger.debug(f"Checking if key {cache_key} is in cache...") + assert not cache.has(cache_key), "The key should not be in cache" + + ############################################################################# + # latest builds (second call) + ############################################################################# + logger.debug("\n\nGetting latest builds (second call)...") + builds = i.get_test_builds() + logger.debug("Getting latest builds (second call): %r\n", builds) + assert i.cache.get_current_transaction() == t, "Unexpected transaction" + assert t.has(cache_key), "The key should be in the current transaction" + if not multithreaded: + i.testing_service.get_test_builds.assert_called_once(), "i.testing_service.get_test_builds should be called once" + logger.debug(f"Checking if key {cache_key} is in cache...") + assert not cache.has(cache_key), "The key should not be in cache" + if check_cache_size: + assert cache.size() == cache_size, "Unexpected cache size" + + ############################################################################# + # latest builds (third call) + ############################################################################# + logger.debug("\n\nGetting latest builds (third call)...") + builds = i.get_test_builds() + logger.debug("Getting latest builds (third call): %r\n", builds) + assert i.cache.get_current_transaction() == t, "Unexpected transaction" + assert t.has(cache_key), "The key should be in the current transaction" + if not multithreaded: + i.testing_service.get_test_builds.assert_called_once(), "i.testing_service.get_test_builds should be called once" + logger.debug(f"Checking if key {cache_key} is in cache...") + assert not cache.has(cache_key), "The key should not be in cache" + if check_cache_size: + assert cache.size() == cache_size, "Unexpected cache size" + + ############################################################################# + # Check builds + ############################################################################# + logger.debug("\n\nPreparing data to test builds...") + b_data = [] + for b in builds: + b_data.append(i.testing_service.get_test_build(i, b.id)) + logger.debug("\n\nPreparing data to test builds... DONE") + + assert len(b_data) == 4, "Unexpected number of builds" + + logger.debug("\n\nChecking test builds...") + get_test_build_method = i.testing_service.get_test_build + + for count in range(0, len(b_data)): + b = b_data[count] + i.testing_service.get_test_build = SerializableMock() + i.testing_service.get_test_build.return_value = b + + cache_key = make_cache_key(i.get_test_build, client_scope=False, args=[i, b.id]) + + # first call ############################################################# + logger.debug("\n\nChecking build (first call): buildID=%r", b.id) + logger.debug("Build data: %r", i.get_test_build(b.id)) assert t.has(cache_key), "The key should be in the current transaction" - cache_size = cache.size() - logger.debug("Current cache size: %r", cache_size) - assert i.cache.get_current_transaction() == t, "Unexpected transaction" - # check cache if not multithreaded: - i.testing_service.get_test_builds.assert_called_once(), "i.testing_service.get_test_builds should be called once" + i.testing_service.get_test_build.call_count == count + 1, "i.testing_service.get_test_build should be called once" logger.debug(f"Checking if key {cache_key} is in cache...") assert not cache.has(cache_key), "The key should not be in cache" + cache_size = cache.size() + logger.debug("Current cache size: %r", cache_size) - ############################################################################# - # latest builds (second call) - ############################################################################# - logger.debug("\n\nGetting latest builds (second call)...") - builds = i.get_test_builds() - logger.debug("Getting latest builds (second call): %r\n", builds) - assert i.cache.get_current_transaction() == t, "Unexpected transaction" + # second call ############################################################# + logger.debug("\n\nChecking build (second call): buildID=%r", b.id) + logger.debug("Build data: %r", i.get_test_build(b.id)) assert t.has(cache_key), "The key should be in the current transaction" if not multithreaded: - i.testing_service.get_test_builds.assert_called_once(), "i.testing_service.get_test_builds should be called once" + i.testing_service.get_test_build.call_count == count + 1, "i.testing_service.get_test_build should be called once" logger.debug(f"Checking if key {cache_key} is in cache...") assert not cache.has(cache_key), "The key should not be in cache" if check_cache_size: assert cache.size() == cache_size, "Unexpected cache size" - - ############################################################################# - # latest builds (third call) - ############################################################################# - logger.debug("\n\nGetting latest builds (third call)...") - builds = i.get_test_builds() - logger.debug("Getting latest builds (third call): %r\n", builds) - assert i.cache.get_current_transaction() == t, "Unexpected transaction" + # third call ############################################################# + logger.debug("\n\nChecking build (third call): buildID=%r", b.id) + logger.debug("Build data: %r", i.get_test_build(b.id)) assert t.has(cache_key), "The key should be in the current transaction" if not multithreaded: - i.testing_service.get_test_builds.assert_called_once(), "i.testing_service.get_test_builds should be called once" + i.testing_service.get_test_build.call_count == count + 1, "i.testing_service.get_test_build should be called once" logger.debug(f"Checking if key {cache_key} is in cache...") assert not cache.has(cache_key), "The key should not be in cache" if check_cache_size: assert cache.size() == cache_size, "Unexpected cache size" - ############################################################################# - # Check builds - ############################################################################# - logger.debug("\n\nPreparing data to test builds...") - b_data = [] - for b in builds: - b_data.append(i.testing_service.get_test_build(i, b.id)) - logger.debug("\n\nPreparing data to test builds... DONE") - - assert len(b_data) == 4, "Unexpected number of builds" - - logger.debug("\n\nChecking test builds...") - get_test_build_method = i.testing_service.get_test_build - - for count in range(0, len(b_data)): - b = b_data[count] - i.testing_service.get_test_build = SerializableMock() - i.testing_service.get_test_build.return_value = b - - cache_key = make_cache_key(i.get_test_build, client_scope=False, args=[i, b.id]) - - # first call ############################################################# - logger.debug("\n\nChecking build (first call): buildID=%r", b.id) - logger.debug("Build data: %r", i.get_test_build(b.id)) - assert t.has(cache_key), "The key should be in the current transaction" - if not multithreaded: - i.testing_service.get_test_build.call_count == count + 1, "i.testing_service.get_test_build should be called once" - logger.debug(f"Checking if key {cache_key} is in cache...") - assert not cache.has(cache_key), "The key should not be in cache" - cache_size = cache.size() - logger.debug("Current cache size: %r", cache_size) - - # second call ############################################################# - logger.debug("\n\nChecking build (second call): buildID=%r", b.id) - logger.debug("Build data: %r", i.get_test_build(b.id)) - assert t.has(cache_key), "The key should be in the current transaction" - if not multithreaded: - i.testing_service.get_test_build.call_count == count + 1, "i.testing_service.get_test_build should be called once" - logger.debug(f"Checking if key {cache_key} is in cache...") - assert not cache.has(cache_key), "The key should not be in cache" - if check_cache_size: - assert cache.size() == cache_size, "Unexpected cache size" - # third call ############################################################# - logger.debug("\n\nChecking build (third call): buildID=%r", b.id) - logger.debug("Build data: %r", i.get_test_build(b.id)) - assert t.has(cache_key), "The key should be in the current transaction" - if not multithreaded: - i.testing_service.get_test_build.call_count == count + 1, "i.testing_service.get_test_build should be called once" - logger.debug(f"Checking if key {cache_key} is in cache...") - assert not cache.has(cache_key), "The key should not be in cache" - if check_cache_size: - assert cache.size() == cache_size, "Unexpected cache size" - - # check last test build - logger.debug("\n\nGetting latest build: %r", i.last_test_build) - if not multithreaded: - i.testing_service.get_test_builds.assert_called_once(), "i.testing_service.get_test_builds should be called once" - logger.debug("\n\nGetting latest build... DONE\n\n") - - # restore original method - i.testing_service.get_test_build = get_test_build_method - i.testing_service.get_test_builds = get_test_builds_method - - ############################################################################ - # check latest build - ############################################################################ - logger.debug("\n\nGetting latest build: %r", i.last_test_build) - if check_cache_size: - assert cache.size() == cache_size, "Unexpected cache size" - - # check transactions - transaction_keys = t.keys() - logger.debug("Transaction keys (# %r): %r", len(transaction_keys), transaction_keys) - assert len(transaction_keys) == t.size(), "Unexpected transaction size" - - # check the cache after the transaction is completed - if check_cache_size: - cache_size = cache.size() - assert len(transaction_keys) == cache_size, "Unpexpected cache size: it should be equal to the transaction size" - - sleep(2) - assert cache.size() > 0, "Cache should not be empty" - logger.debug(cache.keys()) - if not multithreaded: - assert len(cache.backend.keys("lock*")) == 0, "No lock should be set" - else: - if results: - assert results, "Results should not be none" - results[index]['result'].extend(transactions) - return transactions - except Exception as e: - logger.exception(e) + # check last test build + logger.debug("\n\nGetting latest build: %r", i.last_test_build) + if not multithreaded: + i.testing_service.get_test_builds.assert_called_once(), "i.testing_service.get_test_builds should be called once" + logger.debug("\n\nGetting latest build... DONE\n\n") + + # restore original method + i.testing_service.get_test_build = get_test_build_method + i.testing_service.get_test_builds = get_test_builds_method + + ############################################################################ + # check latest build + ############################################################################ + logger.debug("\n\nGetting latest build: %r", i.last_test_build) + if check_cache_size: + assert cache.size() == cache_size, "Unexpected cache size" + + # check transactions + transaction_keys = t.keys() + logger.debug("Transaction keys (# %r): %r", len(transaction_keys), transaction_keys) + assert len(transaction_keys) == t.size(), "Unexpected transaction size" + + # check the cache after the transaction is completed + if check_cache_size: + cache_size = cache.size() + assert len(transaction_keys) == cache_size, "Unpexpected cache size: it should be equal to the transaction size" + sleep(2) + assert cache.size() > 0, "Cache should not be empty" + logger.debug(cache.keys()) + + # prepare return value + return_value = [] + for tr in transactions: + return_value.append({ + 'transaction': str(tr), + 'keys': tr.keys() + }) + if not multithreaded: + assert len(cache.backend.keys("lock*")) == 0, "No lock should be set" + else: + if results: + assert results, "Results should not be none" + results[index]['result'].extend(return_value) + logger.debug("Return value: %r", return_value) + return return_value def test_cache_task_last_build(app_context, redis_cache, user1): @@ -340,27 +347,25 @@ def test_cache_task_last_build(app_context, redis_cache, user1): def check_results(results): - logger.debug("Results: %r", results) + logger.debug("\n\n\nResults: %r", results) assert len(cache.backend.keys(pattern="locks*")) == 0, "Locks should not be in cache" - for i in range(0, len(results)): - r = results[i]['result'] - for tdx in range(0, len(r)): - logger.debug("Transaction %r keys: # %r", r[tdx], len(r[tdx].keys())) - logger.debug("Transaction %r keys: %r", r[tdx], r[tdx].keys()) - if i == len(results) - 1: break - r1 = results[i]['result'] - r2 = results[i + 1]['result'] - - assert len(r1) == len(r2), "Transactions should be the same length" - + p1 = results[i] + p2 = results[i + 1] + r1 = p1['result'] + r2 = p2['result'] + processes = f"'{p1['index']}' and '{p2['index']}'" + logger.debug(f"Checking process/thread {processes}") + logger.debug("Number of transactions: %r => %r ||| %r => %r", p1['index'], len(r1), p2['index'], len(r2)) + assert len(r1) == len(r2), f"Process/thread {processes} should have the same number of transactions" for tdx in range(0, len(r1)): - logger.debug("Transaction %r keys: %r", r1[tdx], r1[tdx].keys()) - logger.debug("Transaction %r keys: %r", r2[tdx], r2[tdx].keys()) - assert r1[tdx].size() == r2[tdx].size(), \ - f"Transactions {r1[tdx]} and {r2[tdx]} should have the same number of keys" + logger.debug("Checking transactions %r and %r", + r1[tdx]['transaction'], r2[tdx]['transaction']) + assert len(r1[tdx]['keys']) == len(r2[tdx]['keys']), \ + f"Transactions {r1[tdx]['transaction']} and {r2[tdx]['transaction']} should have the same number of keys" + logger.debug("\n\nChecking Results DONE\n\n\n",) def test_cache_last_build_update_multi_thread(app_context, redis_cache, user1): @@ -368,8 +373,8 @@ def test_cache_last_build_update_multi_thread(app_context, redis_cache, user1): w = setup_test_cache_last_build_update(app_context, redis_cache, user1) logger.debug("Workflow %r", w) # set up threads - number_of_threads = 2 results = [] + number_of_threads = 3 for index in range(number_of_threads): t = threading.Thread( target=cache_last_build_update, name=f"T{index}", args=(app_context.app, w, user1), @@ -379,7 +384,8 @@ def test_cache_last_build_update_multi_thread(app_context, redis_cache, user1): "multithreaded": True, "results": results}) results.append({ - "t": t, + 't': t, + 'index': str(index), "result": [] }) t.start() @@ -399,24 +405,27 @@ def test_cache_last_build_update_multi_process(app_context, redis_cache, user1): w = setup_test_cache_last_build_update(app_context, redis_cache, user1) # set up processes processes = 3 - pool = ThreadPool(processes=processes) results = [] + manager = Manager() for index in range(processes): + p = Process(target=cache_last_build_update, args=(app_context.app, w, user1), + kwargs={"check_cache_size": False, + "index": index, + "multithreaded": True, + "results": results}) results.append({ - 't': pool.apply_async(cache_last_build_update, args=(app_context.app, w, user1), - kwds={"check_cache_size": False, - "index": index, - "multithreaded": True, - "results": results}), - 'result': [] + 'p': p, + 'index': str(index), + 'result': manager.list() }) + p.start() + sleep(1) # wait for results - for tdata in results: - t = tdata['t'] - result = t.get() - tdata['result'] = result + for pdata in results: + p = pdata['p'] + p.join() # check results - sleep(2) + sleep(4) check_results(results) From f462460dd0bcf1dec5741e30f139117386995fd3 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Tue, 23 Nov 2021 17:50:39 +0100 Subject: [PATCH 150/162] Add OpenAPI logo --- lifemonitor/static/img/logo/openapi-custom-colors.svg | 1 + lifemonitor/static/img/logo/openapi.svg | 1 + 2 files changed, 2 insertions(+) create mode 100644 lifemonitor/static/img/logo/openapi-custom-colors.svg create mode 100644 lifemonitor/static/img/logo/openapi.svg diff --git a/lifemonitor/static/img/logo/openapi-custom-colors.svg b/lifemonitor/static/img/logo/openapi-custom-colors.svg new file mode 100644 index 000000000..e5e11f72b --- /dev/null +++ b/lifemonitor/static/img/logo/openapi-custom-colors.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/lifemonitor/static/img/logo/openapi.svg b/lifemonitor/static/img/logo/openapi.svg new file mode 100644 index 000000000..50ee1e39f --- /dev/null +++ b/lifemonitor/static/img/logo/openapi.svg @@ -0,0 +1 @@ + \ No newline at end of file From e3e1fb94d2e20e1e2a48c46c8de7e3c1efd7fa65 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Tue, 23 Nov 2021 17:53:18 +0100 Subject: [PATCH 151/162] Add link to API explorer on "API keys" tab --- .../auth/templates/auth/apikeys_tab.j2 | 30 +++++++++++++++---- 1 file changed, 25 insertions(+), 5 deletions(-) diff --git a/lifemonitor/auth/templates/auth/apikeys_tab.j2 b/lifemonitor/auth/templates/auth/apikeys_tab.j2 index a505d83c8..c5e2eccfd 100644 --- a/lifemonitor/auth/templates/auth/apikeys_tab.j2 +++ b/lifemonitor/auth/templates/auth/apikeys_tab.j2 @@ -1,8 +1,28 @@ -
- Allow a user to interact with the LifeMonitor API. - An API key acts as a static authentication token - that can be used to quickly - try API calls via the API docs interface or tools like curl. +
+
+ Allow a user to interact with the LifeMonitor API. + An API key acts as a static authentication token + that can be used to quickly + try API calls via the API docs interface or tools like curl. + asdasad +
+
From ee01fa10a54fdde402d821b796e030014300b80a Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Tue, 23 Nov 2021 17:58:23 +0100 Subject: [PATCH 152/162] Fix padding --- lifemonitor/auth/templates/auth/apikeys_tab.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lifemonitor/auth/templates/auth/apikeys_tab.j2 b/lifemonitor/auth/templates/auth/apikeys_tab.j2 index c5e2eccfd..420a30af9 100644 --- a/lifemonitor/auth/templates/auth/apikeys_tab.j2 +++ b/lifemonitor/auth/templates/auth/apikeys_tab.j2 @@ -1,5 +1,5 @@
-
+
Allow a user to interact with the LifeMonitor API. An API key acts as a static authentication token that can be used to quickly From cb8e6ed3d395fd9e6f7b65f477fb120f977921b6 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Tue, 23 Nov 2021 17:59:07 +0100 Subject: [PATCH 153/162] Add link to API explorer on "OAuth2 Apps" tab --- .../auth/templates/auth/oauth2_clients_tab.j2 | 24 ++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) diff --git a/lifemonitor/auth/templates/auth/oauth2_clients_tab.j2 b/lifemonitor/auth/templates/auth/oauth2_clients_tab.j2 index 45e52d049..066f3c72e 100644 --- a/lifemonitor/auth/templates/auth/oauth2_clients_tab.j2 +++ b/lifemonitor/auth/templates/auth/oauth2_clients_tab.j2 @@ -1,9 +1,27 @@ -
-
+
+
Allow an OAuth2 client to interact with LifeMonitor on behalf of a user. This authentication method is appropriate for applications that need to interact with LifeMonitor as a user. +
+ +

OAuth2 Apps

@@ -87,4 +105,4 @@
-
+ From 07926995b8b9c6a9fd26adfcac08a1bc03a351e7 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Tue, 23 Nov 2021 18:01:25 +0100 Subject: [PATCH 154/162] Fix margin --- lifemonitor/auth/templates/auth/apikeys_tab.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lifemonitor/auth/templates/auth/apikeys_tab.j2 b/lifemonitor/auth/templates/auth/apikeys_tab.j2 index 420a30af9..d70a96c38 100644 --- a/lifemonitor/auth/templates/auth/apikeys_tab.j2 +++ b/lifemonitor/auth/templates/auth/apikeys_tab.j2 @@ -1,4 +1,4 @@ -
+
Allow a user to interact with the LifeMonitor API. An API key acts as a static authentication token From a0b9a75c8f16a43da1d158b066944a93bfe30e58 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Tue, 23 Nov 2021 17:10:00 +0000 Subject: [PATCH 155/162] Reset task-queue after install/update --- k8s/templates/job-init.yaml | 2 +- k8s/templates/job-upgrade.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/k8s/templates/job-init.yaml b/k8s/templates/job-init.yaml index 4ebca0339..a20595777 100644 --- a/k8s/templates/job-init.yaml +++ b/k8s/templates/job-init.yaml @@ -12,7 +12,7 @@ spec: image: {{ include "chart.lifemonitor.image" . }} imagePullPolicy: {{ .Values.lifemonitor.imagePullPolicy }} command: ["/bin/sh","-c"] - args: ["wait-for-postgres.sh && flask init db"] + args: ["wait-for-postgres.sh && flask init db && flask task-queue reset"] env: {{- include "lifemonitor.common-env" . | nindent 10 }} volumeMounts: diff --git a/k8s/templates/job-upgrade.yaml b/k8s/templates/job-upgrade.yaml index 11f3fd022..34008f66f 100644 --- a/k8s/templates/job-upgrade.yaml +++ b/k8s/templates/job-upgrade.yaml @@ -18,7 +18,7 @@ spec: image: {{ include "chart.lifemonitor.image" . }} imagePullPolicy: {{ .Values.lifemonitor.imagePullPolicy }} command: ["/bin/sh","-c"] - args: ["wait-for-postgres.sh && flask init db"] + args: ["wait-for-postgres.sh && flask init db && flask task-queue reset"] env: {{ include "lifemonitor.common-env" . | indent 10 }} volumeMounts: From c8dff51f8b9123b696914d75b35c949d5b549037 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Tue, 23 Nov 2021 18:20:03 +0100 Subject: [PATCH 156/162] Fix test --- tests/unit/cache/test_cache.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/unit/cache/test_cache.py b/tests/unit/cache/test_cache.py index 88f74d375..84085e6be 100644 --- a/tests/unit/cache/test_cache.py +++ b/tests/unit/cache/test_cache.py @@ -151,6 +151,7 @@ def setup_test_cache_last_build_update(app_context, redis_cache, user1): def test_cache_last_build_update(app_context, redis_cache, user1): w = setup_test_cache_last_build_update(app_context, redis_cache, user1) cache.reset_locks() + sleep(2) cache_last_build_update(app_context.app, w, user1, check_cache_size=True) From 4daadfe01a0f250191fe1ed5a0e7ff7ed10a49dd Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Tue, 23 Nov 2021 17:29:26 +0000 Subject: [PATCH 157/162] Update default host of PostgreSQL service --- settings.conf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/settings.conf b/settings.conf index 8e7b80cc5..e817ee59e 100644 --- a/settings.conf +++ b/settings.conf @@ -35,7 +35,7 @@ FLASK_ENV=development LIFEMONITOR_ADMIN_PASSWORD=admin # PostgreSQL DBMS settings -#POSTGRESQL_HOST=0.0.0.0 +POSTGRESQL_HOST=db POSTGRESQL_PORT=5432 POSTGRESQL_DATABASE=lm POSTGRESQL_USERNAME=lm From 115f1aa9109aac9b5bdd9e1c4d64eb3dbc4c11c9 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Tue, 23 Nov 2021 18:15:27 +0000 Subject: [PATCH 158/162] Allow to configure worker threads --- docker/worker_entrypoint.sh | 8 +++++++- k8s/templates/secret.yaml | 3 +++ k8s/values.yaml | 1 + settings.conf | 1 + 4 files changed, 12 insertions(+), 1 deletion(-) diff --git a/docker/worker_entrypoint.sh b/docker/worker_entrypoint.sh index 056a20b17..875ada86d 100755 --- a/docker/worker_entrypoint.sh +++ b/docker/worker_entrypoint.sh @@ -41,11 +41,17 @@ if [[ -n "${WORKER_PROCESSES:-}" ]]; then log "Worker starting ${WORKER_PROCESSES} processes" fi +if [[ -n "${WORKER_THREADS:-}" ]]; then + processes="--threads ${WORKER_THREADS}" + log "Worker starting with ${WORKER_THREADS} threads per process" +fi + while : ; do - /usr/local/bin/dramatiq \ + /opt/homebrew/bin/dramatiq \ ${verbose:-} \ ${watch:-} \ ${processes:-} \ + ${threads:-} \ lifemonitor.tasks.worker:broker lifemonitor.tasks.tasks exit_code=$? if [[ $exit_code == 3 ]]; then diff --git a/k8s/templates/secret.yaml b/k8s/templates/secret.yaml index df8697f90..73af94f14 100644 --- a/k8s/templates/secret.yaml +++ b/k8s/templates/secret.yaml @@ -38,6 +38,9 @@ stringData: # Dramatiq worker settings WORKER_PROCESSES={{ .Values.worker.processes }} + {{- if .Values.worker.threads }} + WORKER_THREADS={{ .Values.worker.threads }} + {{- end }} # Redis settings REDIS_HOST={{ .Release.Name }}-redis-master diff --git a/k8s/values.yaml b/k8s/values.yaml index 296ba6abb..07683f742 100644 --- a/k8s/values.yaml +++ b/k8s/values.yaml @@ -143,6 +143,7 @@ worker: imagePullSecrets: [] processes: 1 + #threads: 1 podAnnotations: {} diff --git a/settings.conf b/settings.conf index e817ee59e..abd95c5d4 100644 --- a/settings.conf +++ b/settings.conf @@ -47,6 +47,7 @@ GUNICORN_THREADS=2 # Dramatiq worker settings WORKER_PROCESSES=1 +WORKER_THREADS=3 # Redis settings REDIS_HOST=redis From e41e13dbea1a199df691dc1a438a250bf9523d94 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Wed, 24 Nov 2021 08:12:04 +0000 Subject: [PATCH 159/162] Restore path of dramatiq --- docker/worker_entrypoint.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/worker_entrypoint.sh b/docker/worker_entrypoint.sh index 875ada86d..9e78003c1 100755 --- a/docker/worker_entrypoint.sh +++ b/docker/worker_entrypoint.sh @@ -47,7 +47,7 @@ if [[ -n "${WORKER_THREADS:-}" ]]; then fi while : ; do - /opt/homebrew/bin/dramatiq \ + /usr/local/bin/dramatiq \ ${verbose:-} \ ${watch:-} \ ${processes:-} \ From 961e3f0accaef52abcef13033686ebb0272da1d7 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Wed, 24 Nov 2021 10:45:21 +0100 Subject: [PATCH 160/162] Fix link title --- lifemonitor/auth/templates/auth/apikeys_tab.j2 | 2 +- lifemonitor/auth/templates/auth/oauth2_clients_tab.j2 | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lifemonitor/auth/templates/auth/apikeys_tab.j2 b/lifemonitor/auth/templates/auth/apikeys_tab.j2 index d70a96c38..9bf8cdd90 100644 --- a/lifemonitor/auth/templates/auth/apikeys_tab.j2 +++ b/lifemonitor/auth/templates/auth/apikeys_tab.j2 @@ -8,7 +8,7 @@
- +
diff --git a/lifemonitor/auth/templates/auth/oauth2_clients_tab.j2 b/lifemonitor/auth/templates/auth/oauth2_clients_tab.j2 index 066f3c72e..0086b0f9d 100644 --- a/lifemonitor/auth/templates/auth/oauth2_clients_tab.j2 +++ b/lifemonitor/auth/templates/auth/oauth2_clients_tab.j2 @@ -6,7 +6,7 @@
- +
From 7d9bf7156ed92ccfb1ad4d89802ec5c490152b06 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Wed, 24 Nov 2021 10:51:00 +0100 Subject: [PATCH 161/162] Update link layout --- .../auth/templates/auth/apikeys_tab.j2 | 14 ++++---- lifemonitor/auth/templates/auth/base.j2 | 5 ++- .../auth/templates/auth/oauth2_clients_tab.j2 | 36 ++++++++++--------- 3 files changed, 31 insertions(+), 24 deletions(-) diff --git a/lifemonitor/auth/templates/auth/apikeys_tab.j2 b/lifemonitor/auth/templates/auth/apikeys_tab.j2 index 9bf8cdd90..9ed4edba3 100644 --- a/lifemonitor/auth/templates/auth/apikeys_tab.j2 +++ b/lifemonitor/auth/templates/auth/apikeys_tab.j2 @@ -6,17 +6,19 @@ try API calls via the API docs interface or tools like curl. asdasad
-
+
-
-
- OpenAPI -
-
explorer
+
+
+
+ API +
+
+
explorer
diff --git a/lifemonitor/auth/templates/auth/base.j2 b/lifemonitor/auth/templates/auth/base.j2 index 0ea37cc2e..3c388e855 100644 --- a/lifemonitor/auth/templates/auth/base.j2 +++ b/lifemonitor/auth/templates/auth/base.j2 @@ -9,7 +9,10 @@ {% block stylesheets %} - + + + + diff --git a/lifemonitor/auth/templates/auth/oauth2_clients_tab.j2 b/lifemonitor/auth/templates/auth/oauth2_clients_tab.j2 index 0086b0f9d..e533edc44 100644 --- a/lifemonitor/auth/templates/auth/oauth2_clients_tab.j2 +++ b/lifemonitor/auth/templates/auth/oauth2_clients_tab.j2 @@ -4,23 +4,25 @@ This authentication method is appropriate for applications that need to interact with LifeMonitor as a user.
-
+
From cf8e86f2e6c8bde4832a8012e2914bf1c14d6105 Mon Sep 17 00:00:00 2001 From: Marco Enrico Piras Date: Wed, 24 Nov 2021 12:19:45 +0100 Subject: [PATCH 162/162] Bump version number --- lifemonitor/static/src/package.json | 2 +- specs/api.yaml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/lifemonitor/static/src/package.json b/lifemonitor/static/src/package.json index 73682ab85..ba2afb2a3 100644 --- a/lifemonitor/static/src/package.json +++ b/lifemonitor/static/src/package.json @@ -1,7 +1,7 @@ { "name": "lifemonitor", "description": "Workflow Testing Service", - "version": "0.2.0", + "version": "0.4.0", "license": "MIT", "author": "CRS4", "main": "../dist/js/lifemonitor.min.js", diff --git a/specs/api.yaml b/specs/api.yaml index df6db116e..88d712936 100644 --- a/specs/api.yaml +++ b/specs/api.yaml @@ -3,7 +3,7 @@ openapi: "3.0.0" info: - version: "0.3.0" + version: "0.4.0" title: "Life Monitor API" description: | *Workflow sustainability service* @@ -18,7 +18,7 @@ info: servers: - url: / description: > - Version 0.3.0 of API. + Version 0.4.0 of API. tags: - name: Registries