From 8112d8994ec67023b18c7b0f514646df492e0bdc Mon Sep 17 00:00:00 2001 From: Marco Mancini Date: Thu, 11 Jan 2024 09:09:25 +0100 Subject: [PATCH 01/15] Update api with adjust_for_intake branch --- api/Dockerfile | 20 +- api/app/api_utils.py | 73 ++++ api/app/auth/__init__.py | 0 api/app/auth/backend.py | 66 ++++ api/app/auth/manager.py | 72 ++++ api/app/auth/models.py | 38 ++ api/app/auth/scopes.py | 5 + api/app/callbacks/__init__.py | 1 + api/app/callbacks/on_startup.py | 15 + api/app/const/__init__.py | 0 api/app/const/tags.py | 5 + api/app/const/venv.py | 7 + api/app/decorators_factory.py | 37 ++ api/app/encoders.py | 41 ++ api/app/endpoint_handlers/__init__.py | 3 + api/app/endpoint_handlers/dataset.py | 430 ++++++++++++++++++++ api/app/endpoint_handlers/file.py | 66 ++++ api/app/endpoint_handlers/request.py | 144 +++++++ api/app/exceptions.py | 195 ++++++++++ api/app/main.py | 538 ++++++++++++++++++++++---- api/app/validation.py | 36 ++ api/requirements.txt | 5 +- 22 files changed, 1710 insertions(+), 87 deletions(-) create mode 100644 api/app/api_utils.py create mode 100644 api/app/auth/__init__.py create mode 100644 api/app/auth/backend.py create mode 100644 api/app/auth/manager.py create mode 100644 api/app/auth/models.py create mode 100644 api/app/auth/scopes.py create mode 100644 api/app/callbacks/__init__.py create mode 100644 api/app/callbacks/on_startup.py create mode 100644 api/app/const/__init__.py create mode 100644 api/app/const/tags.py create mode 100644 api/app/const/venv.py create mode 100644 api/app/decorators_factory.py create mode 100644 api/app/encoders.py create mode 100644 api/app/endpoint_handlers/__init__.py create mode 100644 api/app/endpoint_handlers/dataset.py create mode 100644 api/app/endpoint_handlers/file.py create mode 100644 api/app/endpoint_handlers/request.py create mode 100644 api/app/exceptions.py create mode 100644 api/app/validation.py diff --git a/api/Dockerfile b/api/Dockerfile index 6182cb1..9ee0633 100644 --- a/api/Dockerfile +++ b/api/Dockerfile @@ -1,15 +1,9 @@ -FROM continuumio/miniconda3 -WORKDIR /code -COPY ./api/requirements.txt /code/requirements.txt +ARG REGISTRY=rg.nl-ams.scw.cloud/geodds-production +ARG TAG=latest +FROM $REGISTRY/geodds-datastore:$TAG +WORKDIR /app +COPY requirements.txt /code/requirements.txt RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt -RUN conda install -c anaconda psycopg2 -COPY ./utils/wait-for-it.sh /code/wait-for-it.sh -COPY ./db/dbmanager /code/db/dbmanager -COPY ./geoquery/ /code/geoquery -COPY ./resources /code/app/resources -COPY ./api/app /code/app +COPY app /app EXPOSE 80 -# VOLUME /code -CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "80"] -# if behind a proxy use --proxy-headers -# CMD ["uvicorn", "app.main:app", "--proxy-headers", "--host", "0.0.0.0", "--port", "80"] \ No newline at end of file +CMD ["uvicorn", "app.main:app", "--proxy-headers", "--host", "0.0.0.0", "--port", "80"] diff --git a/api/app/api_utils.py b/api/app/api_utils.py new file mode 100644 index 0000000..82ea9f6 --- /dev/null +++ b/api/app/api_utils.py @@ -0,0 +1,73 @@ +"""Utils module""" + + +def convert_bytes(size_bytes: int, to: str) -> float: + """Converts size in bytes to the other unit - one out of: + ["kb", "mb", "gb"] + + Parameters + ---------- + size_bytes : int + Size in bytes + to : str + Unit to convert `size_bytes` to + + size : float + `size_bytes` converted to the given unit + """ + assert to is not None, "Expected unit cannot be `None`" + to = to.lower() + match to: + case "bytes": + return size_bytes + case "kb": + return size_bytes / 1024 + case "mb": + return size_bytes / 1024**2 + case "gb": + return size_bytes / 1024**3 + case _: + raise ValueError(f"unsupported units: {to}") + + +def make_bytes_readable_dict( + size_bytes: int, units: str | None = None +) -> dict: + """Prepare dictionary representing size (in bytes) in more readable unit + to keep value in the range [0,1] - if `units` is `None`. + If `units` is not None, converts `size_bytes` to the size expressed by + that argument. + + Parameters + ---------- + size_bytes : int + Size expressed in bytes + units : optional str + + Returns + ------- + result : dict + A dictionary with size and units in the form: + { + "value": ..., + "units": ... + } + """ + if units is None: + units = "bytes" + if units != "bytes": + converted_size = convert_bytes(size_bytes=size_bytes, to=units) + return {"value": converted_size, "units": units} + val = size_bytes + if val > 1024: + units = "kB" + val /= 1024 + if val > 1024: + units = "MB" + val /= 1024 + if val > 1024: + units = "GB" + val /= 1024 + if val > 0.0 and (round(val, 2) == 0.00): + val = 0.01 + return {"value": round(val, 2), "units": units} diff --git a/api/app/auth/__init__.py b/api/app/auth/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/api/app/auth/backend.py b/api/app/auth/backend.py new file mode 100644 index 0000000..c172b58 --- /dev/null +++ b/api/app/auth/backend.py @@ -0,0 +1,66 @@ +"""The module contains authentication backend""" +from uuid import UUID + +from starlette.authentication import ( + AuthCredentials, + AuthenticationBackend, + UnauthenticatedUser, +) +from dbmanager.dbmanager import DBManager + +import exceptions as exc +from auth.models import DDSUser +from auth import scopes + + +class DDSAuthenticationBackend(AuthenticationBackend): + """Class managing authentication and authorization""" + + async def authenticate(self, conn): + """Authenticate user based on `User-Token` header""" + if "User-Token" in conn.headers: + return self._manage_user_token_auth(conn.headers["User-Token"]) + return AuthCredentials([scopes.ANONYMOUS]), UnauthenticatedUser() + + def _manage_user_token_auth(self, user_token: str): + try: + user_id, api_key = self.get_authorization_scheme_param(user_token) + except exc.BaseDDSException as err: + raise err.wrap_around_http_exception() + user_dto = DBManager().get_user_details(user_id) + eligible_scopes = [scopes.AUTHENTICATED] + self._get_scopes_for_user( + user_dto=user_dto + ) + if user_dto.api_key != api_key: + raise exc.AuthenticationFailed( + user_dto + ).wrap_around_http_exception() + return AuthCredentials(eligible_scopes), DDSUser(username=user_id) + + def _get_scopes_for_user(self, user_dto) -> list[str]: + if user_dto is None: + return [] + eligible_scopes = [] + for role in user_dto.roles: + if "admin" == role.role_name: + eligible_scopes.append(scopes.ADMIN) + continue + # NOTE: Role-specific scopes + # Maybe need some more logic + eligible_scopes.append(role.role_name) + return eligible_scopes + + def get_authorization_scheme_param(self, user_token: str): + """Get `user_id` and `api_key` if authorization scheme is correct.""" + if user_token is None or user_token.strip() == "": + raise exc.EmptyUserTokenError + if ":" not in user_token: + raise exc.ImproperUserTokenError + user_id, api_key, *rest = user_token.split(":") + if len(rest) > 0: + raise exc.ImproperUserTokenError + try: + _ = UUID(user_id, version=4) + except ValueError as err: + raise exc.ImproperUserTokenError from err + return (user_id, api_key) diff --git a/api/app/auth/manager.py b/api/app/auth/manager.py new file mode 100644 index 0000000..02bf686 --- /dev/null +++ b/api/app/auth/manager.py @@ -0,0 +1,72 @@ +"""Module with access/authentication functions""" +from typing import Optional + +from utils.api_logging import get_dds_logger +import exceptions as exc + +log = get_dds_logger(__name__) + + +def is_role_eligible_for_product( + product_role_name: Optional[str] = None, + user_roles_names: Optional[list[str]] = None, +): + """Check if given role is eligible for the product with the provided + `product_role_name`. + + Parameters + ---------- + product_role_name : str, optional, default=None + The role which is eligible for the given product. + If `None`, product_role_name is claimed to be public + user_roles_names: list of str, optional, default=None + A list of user roles names. If `None`, user_roles_names is claimed + to be public + + Returns + ------- + is_eligible : bool + Flag which indicate if any role within the given `user_roles_names` + is eligible for the product with `product_role_name` + """ + log.debug( + "verifying eligibility of the product role '%s' against roles '%s'", + product_role_name, + user_roles_names, + ) + if product_role_name == "public" or product_role_name is None: + return True + if user_roles_names is None: + # NOTE: it means, we consider the public profile + return False + if "admin" in user_roles_names: + return True + if product_role_name in user_roles_names: + return True + return False + + +def assert_is_role_eligible( + product_role_name: Optional[str] = None, + user_roles_names: Optional[list[str]] = None, +): + """Assert that user role is eligible for the product + + Parameters + ---------- + product_role_name : str, optional, default=None + The role which is eligible for the given product. + If `None`, product_role_name is claimed to be public + user_roles_names: list of str, optional, default=None + A list of user roles names. If `None`, user_roles_names is claimed + to be public + + Raises + ------- + AuthorizationFailed + """ + if not is_role_eligible_for_product( + product_role_name=product_role_name, + user_roles_names=user_roles_names, + ): + raise exc.AuthorizationFailed diff --git a/api/app/auth/models.py b/api/app/auth/models.py new file mode 100644 index 0000000..bff896f --- /dev/null +++ b/api/app/auth/models.py @@ -0,0 +1,38 @@ +"""The module contains models related to the authentication and authorization""" +from starlette.authentication import SimpleUser + + +class DDSUser(SimpleUser): + """Immutable class containing information about the authenticated user""" + + def __init__(self, username: str) -> None: + super().__init__(username=username) + + @property + def id(self): + return self.username + + def __eq__(self, other) -> bool: + if not isinstance(other, DDSUser): + return False + if self.username == other.username: + return True + return False + + def __ne__(self, other): + return self != other + + def __repr__(self): + return f"" + + def __delattr__(self, name): + if getattr(self, name, None) is not None: + raise AttributeError("The attribute '{name}' cannot be deleted!") + super().__delattr__(name) + + def __setattr__(self, name, value): + if getattr(self, name, None) is not None: + raise AttributeError( + "The attribute '{name}' cannot modified when not None!" + ) + super().__setattr__(name, value) diff --git a/api/app/auth/scopes.py b/api/app/auth/scopes.py new file mode 100644 index 0000000..75113e4 --- /dev/null +++ b/api/app/auth/scopes.py @@ -0,0 +1,5 @@ +"""This module contains predefined authorization scopes""" + +ADMIN = "admin" +AUTHENTICATED = "authenticated" +ANONYMOUS = "anonymous" diff --git a/api/app/callbacks/__init__.py b/api/app/callbacks/__init__.py new file mode 100644 index 0000000..e003acf --- /dev/null +++ b/api/app/callbacks/__init__.py @@ -0,0 +1 @@ +from .on_startup import all_onstartup_callbacks diff --git a/api/app/callbacks/on_startup.py b/api/app/callbacks/on_startup.py new file mode 100644 index 0000000..ec883d3 --- /dev/null +++ b/api/app/callbacks/on_startup.py @@ -0,0 +1,15 @@ +"""Module with functions call during API server startup""" +from utils.api_logging import get_dds_logger + +from datastore.datastore import Datastore + +log = get_dds_logger(__name__) + + +def _load_cache() -> None: + log.info("loading cache started...") + Datastore()._load_cache() + log.info("cache loaded succesfully!") + + +all_onstartup_callbacks = [_load_cache] diff --git a/api/app/const/__init__.py b/api/app/const/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/api/app/const/tags.py b/api/app/const/tags.py new file mode 100644 index 0000000..58a2213 --- /dev/null +++ b/api/app/const/tags.py @@ -0,0 +1,5 @@ +"""The module with endpoint tags definitions""" + +BASIC = "basic" +DATASET = "dataset" +REQUEST = "request" diff --git a/api/app/const/venv.py b/api/app/const/venv.py new file mode 100644 index 0000000..85c3658 --- /dev/null +++ b/api/app/const/venv.py @@ -0,0 +1,7 @@ +"""This modul contains all supported environment variables names""" + +ENDPOINT_PREFIX = "ENDPOINT_PREFIX" +ALLOWED_CORS_ORIGINS_REGEX = "ALLOWED_CORS_ORIGINS_REGEX" +LOGGING_FORMAT = "LOGGING_FORMAT" +LOGGING_LEVEL = "LOGGING_LEVEL" +WEB_COMPONENT_HOST = "WEB_COMPONENT_HOST" diff --git a/api/app/decorators_factory.py b/api/app/decorators_factory.py new file mode 100644 index 0000000..d2e4b39 --- /dev/null +++ b/api/app/decorators_factory.py @@ -0,0 +1,37 @@ +"""Modules with utils for creating decorators""" +from inspect import Signature + + +def assert_parameters_are_defined( + sig: Signature, required_parameters: list[tuple] +): + """Assert the given callable signature has parameters with + names and types indicated by `required_parameters` argument. + + Parameters + ---------- + sig : Signature + A signature object of a callable + required_parameters : list of tuples + List of two-element tuples containing a name and a type + of the parameter, e.g. [("dataset_id", str)] + + Raises + ------ + TypeError + If a required parameter is not defined or is of wrong type + """ + for param_name, param_type in required_parameters: + if param_name not in sig.parameters: + raise TypeError( + f"The parameter '{param_name}' annotated with the type" + f" '{param_type}' must be defined for the callable decorated" + " with 'authenticate_user' decorator" + ) + + +def bind_arguments(sig: Signature, *args, **kwargs): + """Bind arguments to the signature""" + args_bind = sig.bind_partial(*args, **kwargs) + args_bind.apply_defaults() + return args_bind.arguments diff --git a/api/app/encoders.py b/api/app/encoders.py new file mode 100644 index 0000000..9566f57 --- /dev/null +++ b/api/app/encoders.py @@ -0,0 +1,41 @@ +import numpy as np +from fastapi.encoders import encoders_by_class_tuples + + +def make_ndarray_dtypes_valid(o: np.ndarray) -> np.ndarray: + """Convert `numpy.array` dtype to the one which is serializable + to JSON. + + int32 -> int64 + float32 -> float 64 + + Parameters + ---------- + o : np.ndarray + A NumPy array object + + Returns + ------- + res : np.ndarray + A NumPy array object with dtype set properly + + Raises + ------ + AssertionError + If passed object is not of `numpy.ndarray` + """ + assert isinstance(o, np.ndarray) + if np.issubdtype(o.dtype, np.int32): + return o.astype(np.int64) + if np.issubdtype(o.dtype, np.float32): + return o.astype(np.float64) + return o + + +def extend_json_encoders(): + """Extend `encoders_by_class_tuples` module variable from `fastapi.encoders` + with auxiliary encoders necessary for proper application working.""" + encoders_by_class_tuples[lambda o: list(make_ndarray_dtypes_valid(o))] = ( + np.ndarray, + ) + encoders_by_class_tuples[str] += (np.int32, np.float32) diff --git a/api/app/endpoint_handlers/__init__.py b/api/app/endpoint_handlers/__init__.py new file mode 100644 index 0000000..c5a44be --- /dev/null +++ b/api/app/endpoint_handlers/__init__.py @@ -0,0 +1,3 @@ +from . import file as file_handler +from . import dataset as dataset_handler +from . import request as request_handler diff --git a/api/app/endpoint_handlers/dataset.py b/api/app/endpoint_handlers/dataset.py new file mode 100644 index 0000000..c03a54b --- /dev/null +++ b/api/app/endpoint_handlers/dataset.py @@ -0,0 +1,430 @@ +"""Modules realizing logic for dataset-related endpoints""" +import os +import pika +import json +from typing import Optional + +from fastapi.responses import FileResponse + +from dbmanager.dbmanager import DBManager, RequestStatus +from intake_geokube.queries.geoquery import GeoQuery +from intake_geokube.queries.workflow import Workflow +from datastore.datastore import Datastore, DEFAULT_MAX_REQUEST_SIZE_GB +from datastore import exception as datastore_exception + +from utils.metrics import log_execution_time +from utils.api_logging import get_dds_logger +from auth.manager import ( + is_role_eligible_for_product, +) +import exceptions as exc +from api_utils import make_bytes_readable_dict +from validation import assert_product_exists + +from . import request + +log = get_dds_logger(__name__) +data_store = Datastore() + +MESSAGE_SEPARATOR = os.environ["MESSAGE_SEPARATOR"] + +def _is_etimate_enabled(dataset_id, product_id): + if dataset_id in ("sentinel-2",): + return False + return True + + +@log_execution_time(log) +def get_datasets(user_roles_names: list[str]) -> list[dict]: + """Realize the logic for the endpoint: + + `GET /datasets` + + Get datasets names, their metadata and products names (if eligible for a user). + If no eligible products are found for a dataset, it is not included. + + Parameters + ---------- + user_roles_names : list of str + List of user's roles + + Returns + ------- + datasets : list of dict + A list of dictionaries with datasets information (including metadata and + eligible products lists) + + Raises + ------- + MissingKeyInCatalogEntryError + If the dataset catalog entry does not contain the required key + """ + log.debug( + "getting all eligible products for datasets...", + ) + datasets = [] + for dataset_id in data_store.dataset_list(): + log.debug( + "getting info and eligible products for `%s`", + dataset_id, + ) + dataset_info = data_store.dataset_info(dataset_id=dataset_id) + try: + eligible_prods = { + prod_name: prod_info + for prod_name, prod_info in dataset_info["products"].items() + if is_role_eligible_for_product( + product_role_name=prod_info.get("role"), + user_roles_names=user_roles_names, + ) + } + except KeyError as err: + log.error( + "dataset `%s` does not have products defined", + dataset_id, + exc_info=True, + ) + raise exc.MissingKeyInCatalogEntryError( + key="products", dataset=dataset_id + ) from err + else: + if len(eligible_prods) == 0: + log.debug( + "no eligible products for dataset `%s` for the role `%s`." + " dataset skipped", + dataset_id, + user_roles_names, + ) + else: + dataset_info["products"] = eligible_prods + datasets.append(dataset_info) + return datasets + + +@log_execution_time(log) +@assert_product_exists +def get_product_details( + user_roles_names: list[str], + dataset_id: str, + product_id: Optional[str] = None, +) -> dict: + """Realize the logic for the endpoint: + + `GET /datasets/{dataset_id}/{product_id}` + + Get details for the given product indicated by `dataset_id` + and `product_id` arguments. + + Parameters + ---------- + user_roles_names : list of str + List of user's roles + dataset_id : str + ID of the dataset + product_id : optional, str + ID of the product. If `None` the 1st product will be considered + + Returns + ------- + details : dict + Details for the given product + + Raises + ------- + AuthorizationFailed + If user is not authorized for the resources + """ + log.debug( + "getting details for eligible products of `%s`", + dataset_id, + ) + try: + if product_id: + return data_store.product_details( + dataset_id=dataset_id, + product_id=product_id, + role=user_roles_names, + use_cache=True, + ) + else: + return data_store.first_eligible_product_details( + dataset_id=dataset_id, role=user_roles_names, use_cache=True + ) + except datastore_exception.UnauthorizedError as err: + raise exc.AuthorizationFailed from err + + +@log_execution_time(log) +@assert_product_exists +def get_metadata(dataset_id: str, product_id: str): + """Realize the logic for the endpoint: + + `GET /datasets/{dataset_id}/{product_id}/metadata` + + Get metadata for the product. + + Parameters + ---------- + dataset_id : str + ID of the dataset + product_id : str + ID of the product + """ + log.debug( + "getting metadata for '{dataset_id}.{product_id}'", + ) + return data_store.product_metadata(dataset_id, product_id) + + +@log_execution_time(log) +@assert_product_exists +def estimate( + dataset_id: str, + product_id: str, + query: GeoQuery, + unit: Optional[str] = None, +): + """Realize the logic for the nedpoint: + + `POST /datasets/{dataset_id}/{product_id}/estimate` + + Estimate the size of the resulting data. + No authentication is needed for estimation query. + + Parameters + ---------- + dataset_id : str + ID of the dataset + product_id : str + ID of the product + query : GeoQuery + Query to perform + unit : str + One of unit [bytes, kB, MB, GB] to present the result. If `None`, + unit will be inferred. + + Returns + ------- + size_details : dict + Estimated size of the query in the form: + ```python + { + "value": val, + "units": units + } + ``` + """ + query_bytes_estimation = data_store.estimate(dataset_id, product_id, query) + return make_bytes_readable_dict( + size_bytes=query_bytes_estimation, units=unit + ) + + +@log_execution_time(log) +@assert_product_exists +def async_query( + user_id: str, + dataset_id: str, + product_id: str, + query: GeoQuery, +): + """Realize the logic for the endpoint: + + `POST /datasets/{dataset_id}/{product_id}/execute` + + Query the data and return the ID of the request. + + Parameters + ---------- + user_id : str + ID of the user executing the query + dataset_id : str + ID of the dataset + product_id : str + ID of the product + query : GeoQuery + Query to perform + + Returns + ------- + request_id : int + ID of the request + + Raises + ------- + MaximumAllowedSizeExceededError + if the allowed size is below the estimated one + EmptyDatasetError + if estimated size is zero + + """ + log.debug("geoquery: %s", query) + if _is_etimate_enabled(dataset_id, product_id): + estimated_size = estimate(dataset_id, product_id, query, "GB").get("value") + allowed_size = data_store.product_metadata(dataset_id, product_id).get( + "maximum_query_size_gb", DEFAULT_MAX_REQUEST_SIZE_GB + ) + if estimated_size > allowed_size: + raise exc.MaximumAllowedSizeExceededError( + dataset_id=dataset_id, + product_id=product_id, + estimated_size_gb=estimated_size, + allowed_size_gb=allowed_size, + ) + if estimated_size == 0.0: + raise exc.EmptyDatasetError( + dataset_id=dataset_id, product_id=product_id + ) + broker_conn = pika.BlockingConnection( + pika.ConnectionParameters( + host=os.getenv("BROKER_SERVICE_HOST", "broker") + ) + ) + broker_channel = broker_conn.channel() + + request_id = DBManager().create_request( + user_id=user_id, + dataset=dataset_id, + product=product_id, + query=json.dumps(query.model_dump_original()), + ) + + # TODO: find a separator; for the moment use "\" + message = MESSAGE_SEPARATOR.join( + [str(request_id), "query", dataset_id, product_id, query.json()] + ) + + broker_channel.basic_publish( + exchange="", + routing_key="query_queue", + body=message, + properties=pika.BasicProperties( + delivery_mode=2, # make message persistent + ), + ) + broker_conn.close() + return request_id + +@log_execution_time(log) +@assert_product_exists +def sync_query( + user_id: str, + dataset_id: str, + product_id: str, + query: GeoQuery, +): + """Realize the logic for the endpoint: + + `POST /datasets/{dataset_id}/{product_id}/execute` + + Query the data and return the result of the request. + + Parameters + ---------- + user_id : str + ID of the user executing the query + dataset_id : str + ID of the dataset + product_id : str + ID of the product + query : GeoQuery + Query to perform + + Returns + ------- + request_id : int + ID of the request + + Raises + ------- + MaximumAllowedSizeExceededError + if the allowed size is below the estimated one + EmptyDatasetError + if estimated size is zero + + """ + + import time + request_id = async_query(user_id, dataset_id, product_id, query) + status, _ = DBManager().get_request_status_and_reason(request_id) + log.debug("sync query: status: %s", status) + while status in (RequestStatus.RUNNING, RequestStatus.QUEUED, + RequestStatus.PENDING): + time.sleep(1) + status, _ = DBManager().get_request_status_and_reason(request_id) + log.debug("sync query: status: %s", status) + + if status is RequestStatus.DONE: + download_details = DBManager().get_download_details_for_request_id( + request_id + ) + return FileResponse( + path=download_details.location_path, + filename=download_details.location_path.split(os.sep)[-1], + ) + raise exc.ProductRetrievingError( + dataset_id=dataset_id, + product_id=product_id, + status=status.name) + + +@log_execution_time(log) +def run_workflow( + user_id: str, + workflow: Workflow, +): + """Realize the logic for the endpoint: + + `POST /datasets/workflow` + + Schedule the workflow and return the ID of the request. + + Parameters + ---------- + user_id : str + ID of the user executing the query + workflow : Workflow + Workflow to perform + + Returns + ------- + request_id : int + ID of the request + + Raises + ------- + MaximumAllowedSizeExceededError + if the allowed size is below the estimated one + EmptyDatasetError + if estimated size is zero + + """ + log.debug("geoquery: %s", workflow) + broker_conn = pika.BlockingConnection( + pika.ConnectionParameters( + host=os.getenv("BROKER_SERVICE_HOST", "broker") + ) + ) + broker_channel = broker_conn.channel() + request_id = DBManager().create_request( + user_id=user_id, + dataset=workflow.dataset_id, + product=workflow.product_id, + query=workflow.json(), + ) + + # TODO: find a separator; for the moment use "\" + message = MESSAGE_SEPARATOR.join( + [str(request_id), "workflow", workflow.json()] + ) + + broker_channel.basic_publish( + exchange="", + routing_key="query_queue", + body=message, + properties=pika.BasicProperties( + delivery_mode=2, # make message persistent + ), + ) + broker_conn.close() + return request_id diff --git a/api/app/endpoint_handlers/file.py b/api/app/endpoint_handlers/file.py new file mode 100644 index 0000000..04cf562 --- /dev/null +++ b/api/app/endpoint_handlers/file.py @@ -0,0 +1,66 @@ +"""Module with functions to handle file related endpoints""" +import os + +from fastapi.responses import FileResponse +from dbmanager.dbmanager import DBManager, RequestStatus + +from utils.api_logging import get_dds_logger +from utils.metrics import log_execution_time +import exceptions as exc + +log = get_dds_logger(__name__) + + +@log_execution_time(log) +def download_request_result(request_id: int): + """Realize the logic for the endpoint: + + `GET /download/{request_id}` + + Get location path of the file being the result of + the request with `request_id`. + + Parameters + ---------- + request_id : int + ID of the request + + Returns + ------- + path : str + The location of the resulting file + + Raises + ------- + RequestNotYetAccomplished + If dds request was not yet finished + FileNotFoundError + If file was not found + """ + log.debug( + "preparing downloads for request id: %s", + request_id, + ) + ( + request_status, + _, + ) = DBManager().get_request_status_and_reason(request_id=request_id) + if request_status is not RequestStatus.DONE: + log.debug( + "request with id: '%s' does not exist or it is not finished yet!", + request_id, + ) + raise exc.RequestNotYetAccomplished(request_id=request_id) + download_details = DBManager().get_download_details_for_request( + request_id=request_id + ) + if not os.path.exists(download_details.location_path): + log.error( + "file '%s' does not exists!", + download_details.location_path, + ) + raise FileNotFoundError + return FileResponse( + path=download_details.location_path, + filename=download_details.location_path.split(os.sep)[-1], + ) diff --git a/api/app/endpoint_handlers/request.py b/api/app/endpoint_handlers/request.py new file mode 100644 index 0000000..93a0636 --- /dev/null +++ b/api/app/endpoint_handlers/request.py @@ -0,0 +1,144 @@ +"""Modules with functions realizing logic for requests-related endpoints""" +from dbmanager.dbmanager import DBManager + +from utils.api_logging import get_dds_logger +from utils.metrics import log_execution_time +import exceptions as exc + +log = get_dds_logger(__name__) + + +@log_execution_time(log) +def get_requests(user_id: str): + """Realize the logic for the endpoint: + + `GET /requests` + + Get details of all requests for the user. + + Parameters + ---------- + user_id : str + ID of the user for whom requests are taken + + Returns + ------- + requests : list + List of all requests done by the user + """ + return DBManager().get_requests_for_user_id(user_id=user_id) + + +@log_execution_time(log) +def get_request_status(user_id: str, request_id: int): + """Realize the logic for the endpoint: + + `GET /requests/{request_id}/status` + + Get request status and the reason of the eventual fail. + The second item is `None`, it status is other than failed. + + Parameters + ---------- + user_id : str + ID of the user whose request's status is about to be checed + request_id : int + ID of the request + + Returns + ------- + status : tuple + Tuple of status and fail reason. + """ + # NOTE: maybe verification should be added if user checks only him\her requests + try: + status, reason = DBManager().get_request_status_and_reason(request_id) + except IndexError as err: + log.error( + "request with id: '%s' was not found!", + request_id, + ) + raise exc.RequestNotFound(request_id=request_id) from err + return {"status": status.name, "fail_reason": reason} + + +@log_execution_time(log) +def get_request_resulting_size(request_id: int): + """Realize the logic for the endpoint: + + `GET /requests/{request_id}/size` + + Get size of the file being the result of the request with `request_id` + + Parameters + ---------- + request_id : int + ID of the request + + Returns + ------- + size : int + Size in bytes + + Raises + ------- + RequestNotFound + If the request was not found + """ + if request := DBManager().get_request_details(request_id): + size = request.download.size_bytes + if not size or size == 0: + raise exc.EmptyDatasetError(dataset_id=request.dataset, + product_id=request.product) + return size + log.info( + "request with id '%s' could not be found", + request_id, + ) + raise exc.RequestNotFound(request_id=request_id) + + +@log_execution_time(log) +def get_request_uri(request_id: int): + """ + Realize the logic for the endpoint: + + `GET /requests/{request_id}/uri` + + Get URI for the request. + + Parameters + ---------- + request_id : int + ID of the request + + Returns + ------- + uri : str + URI for the download associated with the given request + """ + try: + download_details = DBManager().get_download_details_for_request_id( + request_id + ) + except IndexError as err: + log.error( + "request with id: '%s' was not found!", + request_id, + ) + raise exc.RequestNotFound(request_id=request_id) from err + if download_details is None: + ( + request_status, + _, + ) = DBManager().get_request_status_and_reason(request_id) + log.info( + "download URI not found for request id: '%s'." + " Request status is '%s'", + request_id, + request_status, + ) + raise exc.RequestStatusNotDone( + request_id=request_id, request_status=request_status + ) + return download_details.download_uri diff --git a/api/app/exceptions.py b/api/app/exceptions.py new file mode 100644 index 0000000..af4d072 --- /dev/null +++ b/api/app/exceptions.py @@ -0,0 +1,195 @@ +"""Module with DDS exceptions definitions""" +from typing import Optional + +from fastapi import HTTPException + + +class BaseDDSException(BaseException): + """Base class for DDS.api exceptions""" + + msg: str = "Bad request" + code: int = 400 + + def wrap_around_http_exception(self) -> HTTPException: + """Wrap an exception around `fastapi.HTTPExcetion`""" + return HTTPException( + status_code=self.code, + detail=self.msg, + ) + + +class EmptyUserTokenError(BaseDDSException): + """Raised if `User-Token` is empty""" + + msg: str = "User-Token cannot be empty!" + + +class ImproperUserTokenError(BaseDDSException): + """Raised if `User-Token` format is wrong""" + + msg: str = ( + "The format of the User-Token is wrong. It should be be in the format" + " :!" + ) + + +class NoEligibleProductInDatasetError(BaseDDSException): + """No eligible products in the dataset Error""" + + msg: str = ( + "No eligible products for the dataset '{dataset_id}' for the user" + " with roles '{user_roles_names}'" + ) + + def __init__(self, dataset_id: str, user_roles_names: list[str]) -> None: + self.msg = self.msg.format( + dataset_id=dataset_id, user_roles_names=user_roles_names + ) + super().__init__(self.msg) + + +class MissingKeyInCatalogEntryError(BaseDDSException): + """Missing key in the catalog entry""" + + msg: str = ( + "There is missing '{key}' in the catalog for '{dataset}' dataset." + ) + + def __init__(self, key, dataset): + self.msg = self.msg.format(key=key, dataset=dataset) + super().__init__(self.msg) + + +class MaximumAllowedSizeExceededError(BaseDDSException): + """Estimated size is too big""" + + msg: str = ( + "Maximum allowed size for '{dataset_id}.{product_id}' is" + " {allowed_size_gb:.2f} GB but the estimated size is" + " {estimated_size_gb:.2f} GB" + ) + + def __init__( + self, dataset_id, product_id, estimated_size_gb, allowed_size_gb + ): + self.msg = self.msg.format( + dataset_id=dataset_id, + product_id=product_id, + allowed_size_gb=allowed_size_gb, + estimated_size_gb=estimated_size_gb, + ) + super().__init__(self.msg) + + +class RequestNotYetAccomplished(BaseDDSException): + """Raised if dds request was not finished yet""" + + msg: str = ( + "Request with id: {request_id} does not exist or it is not" + " finished yet!" + ) + + def __init__(self, request_id): + self.msg = self.msg.format(request_id=request_id) + super().__init__(self.msg) + + +class RequestNotFound(BaseDDSException): + """If the given request could not be found""" + + msg: str = "Request with ID '{request_id}' was not found" + + def __init__(self, request_id: int) -> None: + self.msg = self.msg.format(request_id=request_id) + super().__init__(self.msg) + + +class RequestStatusNotDone(BaseDDSException): + """Raised when the submitted request failed""" + + msg: str = ( + "Request with id: `{request_id}` does not have download. URI. Its" + " status is: `{request_status}`!" + ) + + def __init__(self, request_id, request_status) -> None: + self.msg = self.msg.format( + request_id=request_id, request_status=request_status + ) + super().__init__(self.msg) + + +class AuthorizationFailed(BaseDDSException): + """Raised when the user is not authorized for the given resource""" + + msg: str = "{user} is not authorized for the resource!" + code: int = 403 + + def __init__(self, user_id: Optional[str] = None): + if user_id is None: + self.msg = self.msg.format(user="User") + else: + self.msg = self.msg.format(user=f"User '{user_id}'") + super().__init__(self.msg) + + +class AuthenticationFailed(BaseDDSException): + """Raised when the key of the provided user differs from the one s + tored in the DB""" + + msg: str = "Authentication of the user '{user_id}' failed!" + code: int = 401 + + def __init__(self, user_id: str): + self.msg = self.msg.format(user_id=user_id) + super().__init__(self.msg) + + +class MissingDatasetError(BaseDDSException): + """Raied if the queried dataset is not present in the catalog""" + + msg: str = "Dataset '{dataset_id}' does not exist in the catalog!" + + def __init__(self, dataset_id: str): + self.msg = self.msg.format(dataset_id=dataset_id) + super().__init__(self.msg) + + +class MissingProductError(BaseDDSException): + """Raised if the requested product is not defined for the dataset""" + + msg: str = ( + "Product '{dataset_id}.{product_id}' does not exist in the catalog!" + ) + + def __init__(self, dataset_id: str, product_id: str): + self.msg = self.msg.format( + dataset_id=dataset_id, product_id=product_id + ) + super().__init__(self.msg) + + +class EmptyDatasetError(BaseDDSException): + """The size of the requested dataset is zero""" + + msg: str = "The resulting dataset '{dataset_id}.{product_id}' is empty" + + def __init__(self, dataset_id, product_id): + self.msg = self.msg.format( + dataset_id=dataset_id, + product_id=product_id, + ) + super().__init__(self.msg) + +class ProductRetrievingError(BaseDDSException): + """Retrieving of the product failed.""" + + msg: str = "Retrieving of the product '{dataset_id}.{product_id}' failed with the status {status}" + + def __init__(self, dataset_id, product_id, status): + self.msg = self.msg.format( + dataset_id=dataset_id, + product_id=product_id, + status=status + ) + super().__init__(self.msg) \ No newline at end of file diff --git a/api/app/main.py b/api/app/main.py index 2712586..2084394 100644 --- a/api/app/main.py +++ b/api/app/main.py @@ -1,72 +1,468 @@ -from fastapi import FastAPI -import pika -from enum import Enum -from pydantic import BaseModel -from db.dbmanager.dbmanager import DBManager -from geoquery.geoquery import GeoQuery - -app = FastAPI() -db_conn = None -## -# RabbitMQ Broker Connection -broker_conn = pika.BlockingConnection(pika.ConnectionParameters(host='broker')) -broker_chann = broker_conn.channel() - -@app.get("/") +"""Main module with dekube-dds API endpoints defined""" +__version__ = "2.0" +import os +from typing import Optional + +from datetime import datetime + +from fastapi import FastAPI, HTTPException, Request, status, Query +from fastapi.middleware.cors import CORSMiddleware +from starlette.middleware.authentication import AuthenticationMiddleware +from starlette.authentication import requires + +from aioprometheus import ( + Counter, + Summary, + timer, + MetricsMiddleware, +) +from aioprometheus.asgi.starlette import metrics + +from intake_geokube.queries.workflow import Workflow +from intake_geokube.queries.geoquery import GeoQuery + +from utils.api_logging import get_dds_logger +import exceptions as exc +from endpoint_handlers import ( + dataset_handler, + file_handler, + request_handler, +) +from auth.backend import DDSAuthenticationBackend +from callbacks import all_onstartup_callbacks +from encoders import extend_json_encoders +from const import venv, tags +from auth import scopes + +def map_to_geoquery( + variables: list[str], + format: str, + bbox: str | None = None, # minx, miny, maxx, maxy (minlon, minlat, maxlon, maxlat) + time: datetime | None = None, + **format_kwargs +) -> GeoQuery: + + bbox_ = [float(x) for x in bbox.split(',')] + area = { 'west': bbox_[0], 'south': bbox_[1], 'east': bbox_[2], 'north': bbox_[3], } + time_ = { 'year': time.year, 'month': time.month, 'day': time.day, 'hour': time.hour} + query = GeoQuery(variable=variables, time=time_, area=area, + format_args=format_kwargs, format=format) + return query + +logger = get_dds_logger(__name__) + +# ======== JSON encoders extension ========= # +extend_json_encoders() + +app = FastAPI( + title="geokube-dds API", + description="REST API for geokube-dds", + version=__version__, + contact={ + "name": "geokube Contributors", + "email": "geokube@googlegroups.com", + }, + license_info={ + "name": "Apache 2.0", + "url": "https://www.apache.org/licenses/LICENSE-2.0.html", + }, + root_path=os.environ.get(venv.ENDPOINT_PREFIX, "/api"), + on_startup=all_onstartup_callbacks, +) + +# ======== Authentication backend ========= # +app.add_middleware( + AuthenticationMiddleware, backend=DDSAuthenticationBackend() +) + +# ======== CORS ========= # +cors_kwargs: dict[str, str | list[str]] +if venv.ALLOWED_CORS_ORIGINS_REGEX in os.environ: + cors_kwargs = { + "allow_origin_regex": os.environ[venv.ALLOWED_CORS_ORIGINS_REGEX] + } +else: + cors_kwargs = {"allow_origins": ["*"]} + +app.add_middleware( + CORSMiddleware, + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], + **cors_kwargs, +) + + +# ======== Prometheus metrics ========= # +app.add_middleware(MetricsMiddleware) +app.add_route("/metrics", metrics) + +app.state.api_request_duration_seconds = Summary( + "api_request_duration_seconds", "Requests duration" +) +app.state.api_http_requests_total = Counter( + "api_http_requests_total", "Total number of requests" +) + + +# ======== Endpoints definitions ========= # +@app.get("/", tags=[tags.BASIC]) async def dds_info(): - return {"DDS API 2.0"} - -@app.get("/datasets") -async def datasets(): - return {"List of Datasets"} - -@app.get("/datasets/{dataset_id}") -async def dataset(dataset_id: str): - return {f"Dataset Info {dataset_id}"} - -@app.get("/datasets/{dataset_id}/{product_id}") -async def dataset(dataset_id: str, product_id: str): - return {f"Product Info {product_id} from dataset {dataset_id}"} - -@app.post("/datasets/{dataset_id}/{product_id}/estimate") -async def estimate(dataset_id: str, product_id: str, query: GeoQuery): - return {f'estimate size for {dataset_id} {product_id} is 10GB'} - -@app.post("/datasets/{dataset_id}/{product_id}/execute") -async def query(dataset_id: str, product_id: str, format: str, query: GeoQuery): - global db_conn - if not db_conn: - db_conn = DBManager() -# -# -# TODO: Validation Query Schema -# TODO: estimate the size and will not execute if it is above the limit -# -# - request_id = db_conn.create_request(dataset=dataset_id, product=product_id, query=query.json()) - print(f"request id: {request_id}") - -# we should find a separator; for the moment use "\" - message = f'{request_id}\\{dataset_id}\\{product_id}\\{query.json()}\\{format}' - -# submit request to broker queue - broker_chann.basic_publish( - exchange='', - routing_key='query_queue', - body=message, - properties=pika.BasicProperties( - delivery_mode=2, # make message persistent - )) - return request_id - -@app.get("/requests") -async def get_requests(): - return - -@app.get("/requests/{request_id}/status") -async def get_request_status(request_id: int): - return db_conn.get_request_status(request_id) - -@app.get("/requests/{request_id}/uri") -async def get_request_uri(request_id: int): - return \ No newline at end of file + """Return current version of the DDS API""" + return f"DDS API {__version__}" + + +@app.get("/datasets", tags=[tags.DATASET]) +@timer( + app.state.api_request_duration_seconds, labels={"route": "GET /datasets"} +) +async def get_datasets(request: Request): + """List all products eligible for a user defined by user_token""" + app.state.api_http_requests_total.inc({"route": "GET /datasets"}) + try: + return dataset_handler.get_datasets( + user_roles_names=request.auth.scopes + ) + except exc.BaseDDSException as err: + raise err.wrap_around_http_exception() from err + + +@app.get("/datasets/{dataset_id}", tags=[tags.DATASET]) +@timer( + app.state.api_request_duration_seconds, + labels={"route": "GET /datasets/{dataset_id}"}, +) +async def get_first_product_details( + request: Request, + dataset_id: str, +): + """Get details for the 1st product of the dataset""" + app.state.api_http_requests_total.inc( + {"route": "GET /datasets/{dataset_id}"} + ) + try: + return dataset_handler.get_product_details( + user_roles_names=request.auth.scopes, + dataset_id=dataset_id, + ) + except exc.BaseDDSException as err: + raise err.wrap_around_http_exception() from err + + +@app.get("/datasets/{dataset_id}/{product_id}", tags=[tags.DATASET]) +@timer( + app.state.api_request_duration_seconds, + labels={"route": "GET /datasets/{dataset_id}/{product_id}"}, +) +async def get_product_details( + request: Request, + dataset_id: str, + product_id: str, +): + """Get details for the requested product if user is authorized""" + app.state.api_http_requests_total.inc( + {"route": "GET /datasets/{dataset_id}/{product_id}"} + ) + try: + return dataset_handler.get_product_details( + user_roles_names=request.auth.scopes, + dataset_id=dataset_id, + product_id=product_id, + ) + except exc.BaseDDSException as err: + raise err.wrap_around_http_exception() from err + +@app.get("/datasets/{dataset_id}/{product_id}/map", tags=[tags.DATASET]) +@timer( + app.state.api_request_duration_seconds, + labels={"route": "GET /datasets/{dataset_id}/{product_id}"}, +) +async def get_map( + request: Request, + dataset_id: str, + product_id: str, +# OGC WMS parameters + width: int, + height: int, + layers: str | None = None, + format: str | None = 'png', + time: datetime | None = None, + transparent: bool | None = 'true', + bgcolor: str | None = 'FFFFFF', + bbox: str | None = None, # minx, miny, maxx, maxy (minlon, minlat, maxlon, maxlat) + crs: str | None = None, +# OGC map parameters + # subset: str | None = None, + # subset_crs: str | None = Query(..., alias="subset-crs"), + # bbox_crs: str | None = Query(..., alias="bbox-crs"), +): + + app.state.api_http_requests_total.inc( + {"route": "GET /datasets/{dataset_id}/{product_id}/map"} + ) + # query should be the OGC query + # map OGC parameters to GeoQuery + # variable: Optional[Union[str, List[str]]] + # time: Optional[Union[Dict[str, str], Dict[str, List[str]]]] + # area: Optional[Dict[str, float]] + # location: Optional[Dict[str, Union[float, List[float]]]] + # vertical: Optional[Union[float, List[float], Dict[str, float]]] + # filters: Optional[Dict] + # format: Optional[str] + query = map_to_geoquery(variables=layers, bbox=bbox, time=time, + format="png", width=width, height=height, + transparent=transparent, bgcolor=bgcolor) + try: + return dataset_handler.sync_query( + user_id=request.user.id, + dataset_id=dataset_id, + product_id=product_id, + query=query + ) + except exc.BaseDDSException as err: + raise err.wrap_around_http_exception() from err + +@app.get("/datasets/{dataset_id}/{product_id}/items/{feature_id}", tags=[tags.DATASET]) +@timer( + app.state.api_request_duration_seconds, + labels={"route": "GET /datasets/{dataset_id}/{product_id}/items/{feature_id}"}, +) +async def get_feature( + request: Request, + dataset_id: str, + product_id: str, + feature_id: str, +# OGC feature parameters + time: datetime | None = None, + bbox: str | None = None, # minx, miny, maxx, maxy (minlon, minlat, maxlon, maxlat) + crs: str | None = None, +# OGC map parameters + # subset: str | None = None, + # subset_crs: str | None = Query(..., alias="subset-crs"), + # bbox_crs: str | None = Query(..., alias="bbox-crs"), +): + + app.state.api_http_requests_total.inc( + {"route": "GET /datasets/{dataset_id}/{product_id}/items/{feature_id}"} + ) + # query should be the OGC query + # feature OGC parameters to GeoQuery + # variable: Optional[Union[str, List[str]]] + # time: Optional[Union[Dict[str, str], Dict[str, List[str]]]] + # area: Optional[Dict[str, float]] + # location: Optional[Dict[str, Union[float, List[float]]]] + # vertical: Optional[Union[float, List[float], Dict[str, float]]] + # filters: Optional[Dict] + # format: Optional[str] + + query = map_to_geoquery(variables=[feature_id], bbox=bbox, time=time, + format="geojson") + try: + return dataset_handler.sync_query( + user_id=request.user.id, + dataset_id=dataset_id, + product_id=product_id, + query=query + ) + except exc.BaseDDSException as err: + raise err.wrap_around_http_exception() from err + +@app.get("/datasets/{dataset_id}/{product_id}/metadata", tags=[tags.DATASET]) +@timer( + app.state.api_request_duration_seconds, + labels={"route": "GET /datasets/{dataset_id}/{product_id}/metadata"}, +) +async def get_metadata( + request: Request, + dataset_id: str, + product_id: str, +): + """Get metadata of the given product""" + app.state.api_http_requests_total.inc( + {"route": "GET /datasets/{dataset_id}/{product_id}/metadata"} + ) + try: + return dataset_handler.get_metadata( + dataset_id=dataset_id, product_id=product_id + ) + except exc.BaseDDSException as err: + raise err.wrap_around_http_exception() from err + + +@app.post("/datasets/{dataset_id}/{product_id}/estimate", tags=[tags.DATASET]) +@timer( + app.state.api_request_duration_seconds, + labels={"route": "POST /datasets/{dataset_id}/{product_id}/estimate"}, +) +async def estimate( + request: Request, + dataset_id: str, + product_id: str, + query: GeoQuery, + unit: str = None, +): + """Estimate the resulting size of the query""" + app.state.api_http_requests_total.inc( + {"route": "POST /datasets/{dataset_id}/{product_id}/estimate"} + ) + try: + return dataset_handler.estimate( + dataset_id=dataset_id, + product_id=product_id, + query=query, + unit=unit, + ) + except exc.BaseDDSException as err: + raise err.wrap_around_http_exception() from err + + +@app.post("/datasets/{dataset_id}/{product_id}/execute", tags=[tags.DATASET]) +@timer( + app.state.api_request_duration_seconds, + labels={"route": "POST /datasets/{dataset_id}/{product_id}/execute"}, +) +@requires([scopes.AUTHENTICATED]) +async def query( + request: Request, + dataset_id: str, + product_id: str, + query: GeoQuery, +): + """Schedule the job of data retrieve""" + app.state.api_http_requests_total.inc( + {"route": "POST /datasets/{dataset_id}/{product_id}/execute"} + ) + try: + return dataset_handler.async_query( + user_id=request.user.id, + dataset_id=dataset_id, + product_id=product_id, + query=query, + ) + except exc.BaseDDSException as err: + raise err.wrap_around_http_exception() from err + + +@app.post("/datasets/workflow", tags=[tags.DATASET]) +@timer( + app.state.api_request_duration_seconds, + labels={"route": "POST /datasets/workflow"}, +) +@requires([scopes.AUTHENTICATED]) +async def workflow( + request: Request, + tasks: Workflow, +): + """Schedule the job of workflow processing""" + app.state.api_http_requests_total.inc({"route": "POST /datasets/workflow"}) + try: + return dataset_handler.run_workflow( + user_id=request.user.id, + workflow=tasks, + ) + except exc.BaseDDSException as err: + raise err.wrap_around_http_exception() from err + + +@app.get("/requests", tags=[tags.REQUEST]) +@timer( + app.state.api_request_duration_seconds, labels={"route": "GET /requests"} +) +@requires([scopes.AUTHENTICATED]) +async def get_requests( + request: Request, +): + """Get all requests for the user""" + app.state.api_http_requests_total.inc({"route": "GET /requests"}) + try: + return request_handler.get_requests(request.user.id) + except exc.BaseDDSException as err: + raise err.wrap_around_http_exception() from err + + +@app.get("/requests/{request_id}/status", tags=[tags.REQUEST]) +@timer( + app.state.api_request_duration_seconds, + labels={"route": "GET /requests/{request_id}/status"}, +) +@requires([scopes.AUTHENTICATED]) +async def get_request_status( + request: Request, + request_id: int, +): + """Get status of the request without authentication""" + app.state.api_http_requests_total.inc( + {"route": "GET /requests/{request_id}/status"} + ) + try: + return request_handler.get_request_status( + user_id=request.user.id, request_id=request_id + ) + except exc.BaseDDSException as err: + raise err.wrap_around_http_exception() from err + + +@app.get("/requests/{request_id}/size", tags=[tags.REQUEST]) +@timer( + app.state.api_request_duration_seconds, + labels={"route": "GET /requests/{request_id}/size"}, +) +@requires([scopes.AUTHENTICATED]) +async def get_request_resulting_size( + request: Request, + request_id: int, +): + """Get size of the file being the result of the request""" + app.state.api_http_requests_total.inc( + {"route": "GET /requests/{request_id}/size"} + ) + try: + return request_handler.get_request_resulting_size( + request_id=request_id + ) + except exc.BaseDDSException as err: + raise err.wrap_around_http_exception() from err + + +@app.get("/requests/{request_id}/uri", tags=[tags.REQUEST]) +@timer( + app.state.api_request_duration_seconds, + labels={"route": "GET /requests/{request_id}/uri"}, +) +@requires([scopes.AUTHENTICATED]) +async def get_request_uri( + request: Request, + request_id: int, +): + """Get download URI for the request""" + app.state.api_http_requests_total.inc( + {"route": "GET /requests/{request_id}/uri"} + ) + try: + return request_handler.get_request_uri(request_id=request_id) + except exc.BaseDDSException as err: + raise err.wrap_around_http_exception() from err + + +@app.get("/download/{request_id}", tags=[tags.REQUEST]) +@timer( + app.state.api_request_duration_seconds, + labels={"route": "GET /download/{request_id}"}, +) +# @requires([scopes.AUTHENTICATED]) # TODO: mange download auth in the web component +async def download_request_result( + request: Request, + request_id: int, +): + """Download result of the request""" + app.state.api_http_requests_total.inc( + {"route": "GET /download/{request_id}"} + ) + try: + return file_handler.download_request_result(request_id=request_id) + except exc.BaseDDSException as err: + raise err.wrap_around_http_exception() from err + except FileNotFoundError as err: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, detail="File was not found!" + ) from err diff --git a/api/app/validation.py b/api/app/validation.py new file mode 100644 index 0000000..51bdbc1 --- /dev/null +++ b/api/app/validation.py @@ -0,0 +1,36 @@ +from datastore.datastore import Datastore +from utils.api_logging import get_dds_logger +from decorators_factory import assert_parameters_are_defined, bind_arguments +from functools import wraps +from inspect import signature +import exceptions as exc + + +log = get_dds_logger(__name__) + + +def assert_product_exists(func): + """Decorator for convenient checking if product is defined in the catalog + """ + sig = signature(func) + assert_parameters_are_defined( + sig, required_parameters=[("dataset_id", str), ("product_id", str)] + ) + + @wraps(func) + def assert_inner(*args, **kwargs): + args_dict = bind_arguments(sig, *args, **kwargs) + dataset_id = args_dict["dataset_id"] + product_id = args_dict["product_id"] + if dataset_id not in Datastore().dataset_list(): + raise exc.MissingDatasetError(dataset_id=dataset_id) + elif ( + product_id is not None + and product_id not in Datastore().product_list(dataset_id) + ): + raise exc.MissingProductError( + dataset_id=dataset_id, product_id=product_id + ) + return func(*args, **kwargs) + + return assert_inner diff --git a/api/requirements.txt b/api/requirements.txt index e23ebfb..97fcaf3 100644 --- a/api/requirements.txt +++ b/api/requirements.txt @@ -1,6 +1,5 @@ fastapi -pydantic uvicorn pika -intake -sqlalchemy \ No newline at end of file +sqlalchemy +aioprometheus From 3a608ef828f6e363e884834e801f491468564256 Mon Sep 17 00:00:00 2001 From: Marco Mancini Date: Thu, 11 Jan 2024 09:12:52 +0100 Subject: [PATCH 02/15] Update datastore with adjust_for_intake branch --- datastore/Dockerfile | 14 + datastore/datastore.py | 63 --- datastore/datastore/__init__.py | 0 datastore/datastore/const.py | 6 + datastore/datastore/datastore.py | 447 +++++++++++++++++++++ datastore/datastore/exception.py | 5 + datastore/datastore/singleton.py | 29 ++ datastore/datastore/util.py | 27 ++ datastore/dbmanager/__init__.py | 0 datastore/dbmanager/dbmanager.py | 349 ++++++++++++++++ datastore/dbmanager/singleton.py | 21 + datastore/requirements.txt | 2 + datastore/tests/__init__.py | 0 datastore/tests/workflow/__init__.py | 0 datastore/tests/workflow/fixtures.py | 122 ++++++ datastore/tests/workflow/test_operators.py | 20 + datastore/tests/workflow/test_workflow.py | 23 ++ datastore/utils/__init__.py | 0 datastore/utils/api_logging.py | 40 ++ datastore/utils/metrics.py | 33 ++ datastore/wait-for-it.sh | 182 +++++++++ datastore/workflow/__init__.py | 1 + datastore/workflow/workflow.py | 226 +++++++++++ 23 files changed, 1547 insertions(+), 63 deletions(-) create mode 100644 datastore/Dockerfile delete mode 100644 datastore/datastore.py create mode 100644 datastore/datastore/__init__.py create mode 100644 datastore/datastore/const.py create mode 100644 datastore/datastore/datastore.py create mode 100644 datastore/datastore/exception.py create mode 100644 datastore/datastore/singleton.py create mode 100644 datastore/datastore/util.py create mode 100644 datastore/dbmanager/__init__.py create mode 100644 datastore/dbmanager/dbmanager.py create mode 100644 datastore/dbmanager/singleton.py create mode 100644 datastore/requirements.txt create mode 100644 datastore/tests/__init__.py create mode 100644 datastore/tests/workflow/__init__.py create mode 100644 datastore/tests/workflow/fixtures.py create mode 100644 datastore/tests/workflow/test_operators.py create mode 100644 datastore/tests/workflow/test_workflow.py create mode 100644 datastore/utils/__init__.py create mode 100644 datastore/utils/api_logging.py create mode 100644 datastore/utils/metrics.py create mode 100755 datastore/wait-for-it.sh create mode 100644 datastore/workflow/__init__.py create mode 100644 datastore/workflow/workflow.py diff --git a/datastore/Dockerfile b/datastore/Dockerfile new file mode 100644 index 0000000..9ca2496 --- /dev/null +++ b/datastore/Dockerfile @@ -0,0 +1,14 @@ +ARG REGISTRY=rg.nl-ams.scw.cloud/geokube-production +ARG TAG=latest +FROM $REGISTRY/intake-geokube:$TAG +RUN conda install -c conda-forge --yes --freeze-installed psycopg2 \ + && conda clean -afy +COPY requirements.txt /app/requirements.txt +RUN pip install --no-cache-dir -r /app/requirements.txt +COPY ./datastore /app/datastore +COPY ./workflow /app/workflow +COPY ./dbmanager /app/dbmanager +COPY ./utils /app/utils +COPY ./tests /app/tests +COPY ./wait-for-it.sh / + diff --git a/datastore/datastore.py b/datastore/datastore.py deleted file mode 100644 index 107d821..0000000 --- a/datastore/datastore.py +++ /dev/null @@ -1,63 +0,0 @@ -import intake -from geokube.core.datacube import DataCube -from geokube.core.dataset import Dataset -from typing import Union -from geoquery.geoquery import GeoQuery -import json - -class Datastore(): - - def __init__(self, cat_path: str) -> None: - self.catalog = intake.open_catalog(cat_path) - - def dataset_list(self): - return list(self.catalog) - - def product_list(self, dataset_id: str): - return list(self.catalog[dataset_id]) - - def dataset_info(self, dataset_id: str): - info = {} - entry = self.catalog[dataset_id] - if entry.metadata: - info['metadata'] = entry.metadata - info['products'] = {} - for p in self.products(): - info['products'][p] = self.product_info() - - def product_info(self, dataset_id: str, product_id: str): - info = {} - entry = self.catalog[dataset_id][product_id] - if entry.metadata: - info['metadata'] = entry.metadata - info.update(entry.read_chunked().to_dict()) - return info - - def query(self, dataset: str, product: str, query: Union[GeoQuery, dict, str], compute: bool=False): - """ - :param dataset: dasaset name - :param product: product name - :param query: subset query - :param path: path to store - :return: subsetted geokube of selected dataset product - """ - if isinstance(query, str): - query = json.loads(query) - if isinstance(query, dict): - query = GeoQuery(**query) - kube = self.catalog[dataset][product].read_chunked() - if isinstance(kube, Dataset): - kube = kube.filter(query.filters) - if query.variable: - kube = kube[query.variable] - if query.area: - kube = kube.geobbox(query.area) - if query.locations: - kube = kube.locations(**query.locations) - if query.time: - kube = kube.sel(query.time) - if query.vertical: - kube = kube.sel(query.vertical) - if compute: - kube.compute() - return kube \ No newline at end of file diff --git a/datastore/datastore/__init__.py b/datastore/datastore/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/datastore/datastore/const.py b/datastore/datastore/const.py new file mode 100644 index 0000000..22435bc --- /dev/null +++ b/datastore/datastore/const.py @@ -0,0 +1,6 @@ +"""This module contains useful constants definitions grouped into classes""" + + +class BaseRole: + PUBLIC = "public" + ADMIN = "admin" diff --git a/datastore/datastore/datastore.py b/datastore/datastore/datastore.py new file mode 100644 index 0000000..ca402fe --- /dev/null +++ b/datastore/datastore/datastore.py @@ -0,0 +1,447 @@ +"""Module for catalog management classes and functions""" +from __future__ import annotations + +import os +import logging +import json + +import intake +from dask.delayed import Delayed + +from intake_geokube.queries.geoquery import GeoQuery + +from geokube.core.datacube import DataCube +from geokube.core.dataset import Dataset + +from .singleton import Singleton +from .util import log_execution_time +from .const import BaseRole +from .exception import UnauthorizedError + +DEFAULT_MAX_REQUEST_SIZE_GB = 10 + + +class Datastore(metaclass=Singleton): + """Singleton component for managing catalog data""" + + _LOG = logging.getLogger("geokube.Datastore") + + def __init__(self) -> None: + if "CATALOG_PATH" not in os.environ: + self._LOG.error( + "missing required environment variable: 'CATALOG_PATH'" + ) + raise KeyError( + "Missing required environment variable: 'CATALOG_PATH'" + ) + if "CACHE_PATH" not in os.environ: + self._LOG.error( + "'CACHE_PATH' environment variable was not set. catalog will" + " not be opened!" + ) + raise RuntimeError( + "'CACHE_PATH' environment variable was not set. catalog will" + " not be opened!" + ) + self.catalog = intake.open_catalog(os.environ["CATALOG_PATH"]) + self.cache_dir = os.environ["CACHE_PATH"] + self._LOG.info("cache dir set to %s", self.cache_dir) + self.cache = None + + @log_execution_time(_LOG) + def get_cached_product_or_read( + self, dataset_id: str, product_id: str, query: GeoQuery | None = None + ) -> DataCube | Dataset: + """Get product from the cache instead of loading files indicated in + the catalog if `metadata_caching` set to `True`. + If might return `geokube.DataCube` or `geokube.Dataset`. + + Parameters + ------- + dataset_id : str + ID of the dataset + product_id : str + ID of the dataset + + Returns + ------- + kube : DataCube or Dataset + """ + if self.cache is None: + self._load_cache() + if ( + dataset_id not in self.cache + or product_id not in self.cache[dataset_id] + ): + self._LOG.info( + "dataset `%s` or product `%s` not found in cache! Reading" + " product!", + dataset_id, + product_id, + ) + return self.catalog(CACHE_DIR=self.cache_dir)[dataset_id][ + product_id + ].process(query=query) + return self.cache[dataset_id][product_id] + + @log_execution_time(_LOG) + def _load_cache(self, datasets: list[str] | None = None): + if self.cache is None or datasets is None: + self.cache = {} + datasets = self.dataset_list() + + for i, dataset_id in enumerate(datasets): + self._LOG.info( + "loading cache for `%s` (%d/%d)", + dataset_id, + i + 1, + len(datasets), + ) + self.cache[dataset_id] = {} + for product_id in self.product_list(dataset_id): + catalog_entry = self.catalog(CACHE_DIR=self.cache_dir)[ + dataset_id + ][product_id] + if hasattr(catalog_entry, "metadata_caching") and not catalog_entry.metadata_caching: + self._LOG.info( + "`metadata_caching` for product %s.%s set to `False`", + dataset_id, + product_id, + ) + continue + try: + self.cache[dataset_id][ + product_id + ] = catalog_entry.read() + except ValueError: + self._LOG.error( + "failed to load cache for `%s.%s`", + dataset_id, + product_id, + exc_info=True, + ) + except NotImplementedError: + pass + + @log_execution_time(_LOG) + def dataset_list(self) -> list: + """Get list of datasets available in the catalog stored in `catalog` + attribute + + Returns + ------- + datasets : list + List of datasets present in the catalog + """ + datasets = set(self.catalog(CACHE_DIR=self.cache_dir)) + datasets -= { + "medsea-rea-e3r1", + } + # NOTE: medsae cmip uses cftime.DatetimeNoLeap as time + # need to think how to handle it + return sorted(list(datasets)) + + @log_execution_time(_LOG) + def product_list(self, dataset_id: str): + """Get list of products available in the catalog for dataset + indicated by `dataset_id` + + Parameters + ---------- + dataset_id : str + ID of the dataset + + Returns + ------- + products : list + List of products for the dataset + """ + return list(self.catalog(CACHE_DIR=self.cache_dir)[dataset_id]) + + @log_execution_time(_LOG) + def dataset_info(self, dataset_id: str): + """Get information about the dataset and names of all available + products (with their metadata) + + Parameters + ---------- + dataset_id : str + ID of the dataset + + Returns + ------- + info : dict + Dict of short information about the dataset + """ + info = {} + entry = self.catalog(CACHE_DIR=self.cache_dir)[dataset_id] + if entry.metadata: + info["metadata"] = entry.metadata + info["metadata"]["id"] = dataset_id + info["products"] = {} + for product_id in entry: + prod_entry = entry[product_id] + info["products"][product_id] = prod_entry.metadata + info["products"][product_id][ + "description" + ] = prod_entry.description + return info + + @log_execution_time(_LOG) + def product_metadata(self, dataset_id: str, product_id: str): + """Get product metadata directly from the catalog. + + Parameters + ---------- + dataset_id : str + ID of the dataset + product_id : str + ID of the product + + Returns + ------- + metadata : dict + DatasetMetadata of the product + """ + return self.catalog(CACHE_DIR=self.cache_dir)[dataset_id][ + product_id + ].metadata + + @log_execution_time(_LOG) + def first_eligible_product_details( + self, + dataset_id: str, + role: str | list[str] | None = None, + use_cache: bool = False, + ): + """Get details for the 1st product of the dataset eligible for the `role`. + If `role` is `None`, the `public` role is considered. + + Parameters + ---------- + dataset_id : str + ID of the dataset + role : optional str or list of str, default=`None` + Role code for which the 1st eligible product of a dataset + should be selected + use_cache : bool, optional, default=False + Data will be loaded from cache if set to `True` or directly + from the catalog otherwise + + Returns + ------- + details : dict + Details of the product + + Raises + ------ + UnauthorizedError + if none of product of the requested dataset is eligible for a role + """ + info = {} + product_ids = self.product_list(dataset_id) + for prod_id in product_ids: + if not self.is_product_valid_for_role( + dataset_id, prod_id, role=role + ): + continue + entry = self.catalog(CACHE_DIR=self.cache_dir)[dataset_id][prod_id] + if entry.metadata: + info["metadata"] = entry.metadata + info["description"] = entry.description + info["id"] = prod_id + info["dataset"] = self.dataset_info(dataset_id=dataset_id) + if use_cache: + info["data"] = self.get_cached_product_or_read( + dataset_id, prod_id + ).to_dict() + else: + info["data"] = entry.read_chunked().to_dict() + return info + raise UnauthorizedError() + + @log_execution_time(_LOG) + def product_details( + self, + dataset_id: str, + product_id: str, + role: str | list[str] | None = None, + use_cache: bool = False, + ): + """Get details for the single product + + Parameters + ---------- + dataset_id : str + ID of the dataset + product_id : str + ID of the product + role : optional str or list of str, default=`None` + Role code for which the the product is requested. + use_cache : bool, optional, default=False + Data will be loaded from cache if set to `True` or directly + from the catalog otherwise + + Returns + ------- + details : dict + Details of the product + + Raises + ------ + UnauthorizedError + if the requested product is not eligible for a role + """ + info = {} + if not self.is_product_valid_for_role( + dataset_id, product_id, role=role + ): + raise UnauthorizedError() + entry = self.catalog(CACHE_DIR=self.cache_dir)[dataset_id][product_id] + if entry.metadata: + info["metadata"] = entry.metadata + info["description"] = entry.description + info["id"] = product_id + info["dataset"] = self.dataset_info(dataset_id=dataset_id) + if use_cache: + info["data"] = self.get_cached_product_or_read( + dataset_id, product_id + ).to_dict() + else: + info["data"] = entry.read_chunked().to_dict() + return info + + def product_info( + self, dataset_id: str, product_id: str, use_cache: bool = False + ): + info = {} + entry = self.catalog(CACHE_DIR=self.cache_dir)[dataset_id][product_id] + if entry.metadata: + info["metadata"] = entry.metadata + if use_cache: + info["data"] = self.get_cached_product_or_read( + dataset_id, product_id + ).to_dict() + else: + info["data"] = entry.read_chunked().to_dict() + return info + + @log_execution_time(_LOG) + def query( + self, + dataset_id: str, + product_id: str, + query: GeoQuery | dict | str, + compute: None | bool = False, + ) -> DataCube: + """Query dataset + + Parameters + ---------- + dataset_id : str + ID of the dataset + product_id : str + ID of the product + query : GeoQuery or dict or str or bytes or bytearray + Query to be executed for the given product + compute : bool, optional, default=False + If True, resulting data of DataCube will be computed, otherwise + DataCube with `dask.Delayed` object will be returned + + Returns + ------- + kube : DataCube + DataCube processed according to `query` + """ + self._LOG.debug("query: %s", query) + geoquery: GeoQuery = GeoQuery.parse(query) + self._LOG.debug("processing GeoQuery: %s", geoquery) + # NOTE: we always use catalog directly and single product cache + self._LOG.debug("loading product...") + kube = self.catalog(CACHE_DIR=self.cache_dir)[dataset_id][ + product_id + ].process(query=geoquery) + return kube + + @log_execution_time(_LOG) + def estimate( + self, + dataset_id: str, + product_id: str, + query: GeoQuery | dict | str, + ) -> int: + """Estimate dataset size + + Parameters + ---------- + dataset_id : str + ID of the dataset + product_id : str + ID of the product + query : GeoQuery or dict or str + Query to be executed for the given product + + Returns + ------- + size : int + Number of bytes of the estimated kube + """ + self._LOG.debug("query: %s", query) + geoquery: GeoQuery = GeoQuery.parse(query) + self._LOG.debug("processing GeoQuery: %s", geoquery) + # NOTE: we always use catalog directly and single product cache + self._LOG.debug("loading product...") + # NOTE: for estimation we use cached products + kube = self.get_cached_product_or_read(dataset_id, product_id, + query=query) + return Datastore._process_query(kube, geoquery, False).nbytes + + @log_execution_time(_LOG) + def is_product_valid_for_role( + self, + dataset_id: str, + product_id: str, + role: str | list[str] | None = None, + ): + entry = self.catalog(CACHE_DIR=self.cache_dir)[dataset_id][product_id] + product_role = BaseRole.PUBLIC + if entry.metadata: + product_role = entry.metadata.get("role", BaseRole.PUBLIC) + if product_role == BaseRole.PUBLIC: + return True + if not role: + # NOTE: it means, we consider the public profile + return False + if BaseRole.ADMIN in role: + return True + if product_role in role: + return True + return False + + @staticmethod + def _process_query(kube, query: GeoQuery, compute: None | bool = False): + if isinstance(kube, Dataset): + Datastore._LOG.debug("filtering with: %s", query.filters) + try: + kube = kube.filter(**query.filters) + except ValueError as err: + Datastore._LOG.warning("could not filter by one of the key: %s", err) + if isinstance(kube, Delayed) and compute: + kube = kube.compute() + if query.variable: + Datastore._LOG.debug("selecting fields...") + kube = kube[query.variable] + if query.area: + Datastore._LOG.debug("subsetting by geobbox...") + kube = kube.geobbox(**query.area) + if query.location: + Datastore._LOG.debug("subsetting by locations...") + kube = kube.locations(**query.location) + if query.time: + Datastore._LOG.debug("subsetting by time...") + kube = kube.sel(time=query.time) + if query.vertical: + Datastore._LOG.debug("subsetting by vertical...") + method = None if isinstance(query.vertical, slice) else "nearest" + kube = kube.sel(vertical=query.vertical, method=method) + return kube.compute() if compute else kube diff --git a/datastore/datastore/exception.py b/datastore/datastore/exception.py new file mode 100644 index 0000000..d048e83 --- /dev/null +++ b/datastore/datastore/exception.py @@ -0,0 +1,5 @@ +"""Module with exceptions definitions""" + + +class UnauthorizedError(ValueError): + """Role is not authorized""" diff --git a/datastore/datastore/singleton.py b/datastore/datastore/singleton.py new file mode 100644 index 0000000..ff6ef01 --- /dev/null +++ b/datastore/datastore/singleton.py @@ -0,0 +1,29 @@ +# -*- coding: utf-8 -*- +"""Singleton module. + +The module contains metaclass called Singleton +for thread-safe singleton-pattern implementation. +""" +import os +import logging +from threading import Lock +from typing import Any, Type + + +class Singleton(type): + """Thread-safe implementation of the singleton design pattern metaclass""" + + _instances: dict[Type, Any] = {} + _lock: Lock = Lock() + + def __call__(cls, *args, **kwargs): + with cls._lock: + if cls not in cls._instances: + instance = super().__call__(*args, **kwargs) + if hasattr(instance, "_LOG"): + instance._LOG.setLevel( + os.environ.get("LOGGING_LEVEL", "INFO") + ) + instance._LOG.addHandler(logging.StreamHandler()) + cls._instances[cls] = instance + return cls._instances[cls] diff --git a/datastore/datastore/util.py b/datastore/datastore/util.py new file mode 100644 index 0000000..4122d57 --- /dev/null +++ b/datastore/datastore/util.py @@ -0,0 +1,27 @@ +"""Utils module""" +from functools import wraps +import datetime +import logging + + +def log_execution_time(logger: logging.Logger): + """Decorator logging execution time of the method or function""" + + def inner(func): + @wraps(func) + def wrapper(*args, **kwds): + exec_start_time = datetime.datetime.now() + try: + return func(*args, **kwds) + finally: + exec_time = datetime.datetime.now() - exec_start_time + logger.info( + "execution of '%s' function from '%s' package took %s", + func.__name__, + func.__module__, + exec_time, + ) + + return wrapper + + return inner diff --git a/datastore/dbmanager/__init__.py b/datastore/dbmanager/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/datastore/dbmanager/dbmanager.py b/datastore/dbmanager/dbmanager.py new file mode 100644 index 0000000..d4ff293 --- /dev/null +++ b/datastore/dbmanager/dbmanager.py @@ -0,0 +1,349 @@ +from __future__ import annotations + +import os +import yaml +import logging +import uuid +import secrets +from datetime import datetime +from enum import auto, Enum as Enum_, unique + +from sqlalchemy import ( + Column, + create_engine, + DateTime, + Enum, + ForeignKey, + Integer, + JSON, + Sequence, + String, + Table, +) +from sqlalchemy.dialects.postgresql import UUID +from sqlalchemy.orm import declarative_base, sessionmaker, relationship + +from .singleton import Singleton + + +def is_true(item) -> bool: + """If `item` represents `True` value""" + if isinstance(item, str): + return item.lower() in ["y", "yes", "true", "t"] + return bool(item) + + +def generate_key() -> str: + """Generate as new api key for a user""" + return secrets.token_urlsafe(nbytes=32) + + +@unique +class RequestStatus(Enum_): + """Status of the Request""" + + PENDING = auto() + QUEUED = auto() + RUNNING = auto() + DONE = auto() + FAILED = auto() + TIMEOUT = auto() + + @classmethod + def _missing_(cls, value): + return cls.PENDING + + +class _Repr: + def __repr__(self): + cols = self.__table__.columns.keys() # pylint: disable=no-member + kwa = ", ".join(f"{col}={getattr(self, col)}" for col in cols) + return f"{type(self).__name__}({kwa})" + + +Base = declarative_base(cls=_Repr, name="Base") + + +association_table = Table( + "users_roles", + Base.metadata, + Column("user_id", ForeignKey("users.user_id")), + Column("role_id", ForeignKey("roles.role_id")), +) + + +class Role(Base): + __tablename__ = "roles" + role_id = Column(Integer, Sequence("role_id_seq"), primary_key=True) + role_name = Column(String(255), nullable=False, unique=True) + + +class User(Base): + __tablename__ = "users" + user_id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4) + # keycloak_id = Column(UUID(as_uuid=True), nullable=False, unique=True, default=uuid.uuid4) + api_key = Column( + String(255), nullable=False, unique=True, default=generate_key + ) + contact_name = Column(String(255)) + requests = relationship("Request", lazy="dynamic") + roles = relationship("Role", secondary=association_table, lazy="selectin") + + +class Worker(Base): + __tablename__ = "workers" + worker_id = Column(Integer, primary_key=True) + status = Column(String(255), nullable=False) + host = Column(String(255)) + dask_scheduler_port = Column(Integer) + dask_dashboard_address = Column(String(10)) + created_on = Column(DateTime, default=datetime.now) + + +class Request(Base): + __tablename__ = "requests" + request_id = Column(Integer, primary_key=True) + status = Column(Enum(RequestStatus), nullable=False) + priority = Column(Integer) + user_id = Column( + UUID(as_uuid=True), ForeignKey("users.user_id"), nullable=False + ) + worker_id = Column(Integer, ForeignKey("workers.worker_id")) + dataset = Column(String(255)) + product = Column(String(255)) + query = Column(JSON()) + estimate_size_bytes = Column(Integer) + created_on = Column(DateTime, default=datetime.now) + last_update = Column(DateTime, default=datetime.now, onupdate=datetime.now) + fail_reason = Column(String(1000)) + download = relationship("Download", uselist=False, lazy="selectin") + + +class Download(Base): + __tablename__ = "downloads" + download_id = Column(Integer, primary_key=True) + download_uri = Column(String(255)) + request_id = Column( + Integer, ForeignKey("requests.request_id"), nullable=False + ) + storage_id = Column(Integer, ForeignKey("storages.storage_id")) + location_path = Column(String(255)) + size_bytes = Column(Integer) + created_on = Column(DateTime, default=datetime.now) + + +class Storage(Base): + __tablename__ = "storages" + storage_id = Column(Integer, primary_key=True) + name = Column(String(255)) + host = Column(String(20)) + protocol = Column(String(10)) + port = Column(Integer) + + +class DBManager(metaclass=Singleton): + _LOG = logging.getLogger("geokube.DBManager") + + def __init__(self) -> None: + for venv_key in [ + "POSTGRES_DB", + "POSTGRES_USER", + "POSTGRES_PASSWORD", + "DB_SERVICE_PORT", + ]: + self._LOG.info( + "attempt to load data from environment variable: `%s`", + venv_key, + ) + if venv_key not in os.environ: + self._LOG.error( + "missing required environment variable: `%s`", venv_key + ) + raise KeyError( + f"missing required environment variable: {venv_key}" + ) + + user = os.environ["POSTGRES_USER"] + password = os.environ["POSTGRES_PASSWORD"] + host = os.environ["DB_SERVICE_HOST"] + port = os.environ["DB_SERVICE_PORT"] + database = os.environ["POSTGRES_DB"] + + url = f"postgresql://{user}:{password}@{host}:{port}/{database}" + self._LOG.info("db connection: `%s`", url) + self.__engine = create_engine( + url, echo=is_true(os.environ.get("DB_LOGGING", False)) + ) + self.__session_maker = sessionmaker(bind=self.__engine) + + def _create_database(self): + try: + Base.metadata.create_all(self.__engine) + except Exception as exception: + self._LOG.error( + "could not create a database due to an error", exc_info=True + ) + raise exception + + def add_user( + self, + contact_name: str, + user_id: UUID | None = None, + api_key: str | None = None, + roles_names: list[str] | None = None, + ): + with self.__session_maker() as session: + user = User( + user_id=user_id, api_key=api_key, contact_name=contact_name + ) + if roles_names: + user.roles.extend( + [ + session.query(Role) + .where(Role.role_name == role_name) + .all()[0] # NOTE: role_name is unique in the database + for role_name in roles_names + ] + ) + session.add(user) + session.commit() + return user + + def get_user_details(self, user_id: int): + with self.__session_maker() as session: + return session.query(User).get(user_id) + + def get_user_roles_names(self, user_id: int | None = None) -> list[str]: + if user_id is None: + return ["public"] + with self.__session_maker() as session: + return list( + map( + lambda role: role.role_name, + session.query(User).get(user_id).roles, + ) + ) + + def get_request_details(self, request_id: int): + with self.__session_maker() as session: + return session.query(Request).get(request_id) + + def get_download_details_for_request(self, request_id: int): + with self.__session_maker() as session: + request_details = session.query(Request).get(request_id) + if request_details is None: + raise ValueError( + f"Request with id: {request_id} doesn't exist" + ) + return request_details.download + + def create_request( + self, + user_id: int = 1, + dataset: str | None = None, + product: str | None = None, + query: str | None = None, + worker_id: int | None = None, + priority: str | None = None, + estimate_size_bytes: int | None = None, + status: RequestStatus = RequestStatus.PENDING, + ) -> int: + # TODO: Add more request-related parameters to this method. + with self.__session_maker() as session: + request = Request( + status=status, + priority=priority, + user_id=user_id, + worker_id=worker_id, + dataset=dataset, + product=product, + query=query, + estimate_size_bytes=estimate_size_bytes, + created_on=datetime.utcnow(), + ) + session.add(request) + session.commit() + return request.request_id + + def update_request( + self, + request_id: int, + worker_id: int | None = None, + status: RequestStatus | None = None, + location_path: str = None, + size_bytes: int = None, + fail_reason: str = None, + ) -> int: + with self.__session_maker() as session: + request = session.query(Request).get(request_id) + if status: + request.status = status + if worker_id: + request.worker_id = worker_id + request.last_update = datetime.utcnow() + request.fail_reason = fail_reason + session.commit() + if status is RequestStatus.DONE: + download = Download( + location_path=location_path, + storage_id=0, + request_id=request.request_id, + created_on=datetime.utcnow(), + download_uri=f"/download/{request_id}", + size_bytes=size_bytes, + ) + session.add(download) + session.commit() + return request.request_id + + def get_request_status_and_reason( + self, request_id + ) -> None | RequestStatus: + with self.__session_maker() as session: + if request := session.query(Request).get(request_id): + return RequestStatus(request.status), request.fail_reason + raise IndexError( + f"Request with id: `{request_id}` does not exist!" + ) + + def get_requests_for_user_id(self, user_id) -> list[Request]: + with self.__session_maker() as session: + return session.query(User).get(user_id).requests.all() + + def get_requests_for_user_id_and_status( + self, user_id, status: RequestStatus | tuple[RequestStatus] + ) -> list[Request]: + if isinstance(status, RequestStatus): + status = (status,) + with self.__session_maker() as session: + return session.get(User, user_id).requests.filter( + Request.status.in_(status) + ) + + def get_download_details_for_request_id(self, request_id) -> Download: + with self.__session_maker() as session: + request_details = session.query(Request).get(request_id) + if request_details is None: + raise IndexError( + f"Request with id: `{request_id}` does not exist!" + ) + return request_details.download + + def create_worker( + self, + status: str, + dask_scheduler_port: int, + dask_dashboard_address: int, + host: str = "localhost", + ) -> int: + with self.__session_maker() as session: + worker = Worker( + status=status, + host=host, + dask_scheduler_port=dask_scheduler_port, + dask_dashboard_address=dask_dashboard_address, + created_on=datetime.utcnow(), + ) + session.add(worker) + session.commit() + return worker.worker_id diff --git a/datastore/dbmanager/singleton.py b/datastore/dbmanager/singleton.py new file mode 100644 index 0000000..bf7b29b --- /dev/null +++ b/datastore/dbmanager/singleton.py @@ -0,0 +1,21 @@ +# -*- coding: utf-8 -*- +"""Singleton module. + +The module contains metaclass called Singleton +for thread-safe singleton-pattern implementation. +""" +from threading import Lock + + +class Singleton(type): + """Thread-safe implementation of the singleton design pattern metaclass""" + + _instances = {} + _lock: Lock = Lock() + + def __call__(cls, *args, **kwargs): + with cls._lock: + if cls not in cls._instances: + instance = super().__call__(*args, **kwargs) + cls._instances[cls] = instance + return cls._instances[cls] diff --git a/datastore/requirements.txt b/datastore/requirements.txt new file mode 100644 index 0000000..d4a7d44 --- /dev/null +++ b/datastore/requirements.txt @@ -0,0 +1,2 @@ +networkx +pydantic \ No newline at end of file diff --git a/datastore/tests/__init__.py b/datastore/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/datastore/tests/workflow/__init__.py b/datastore/tests/workflow/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/datastore/tests/workflow/fixtures.py b/datastore/tests/workflow/fixtures.py new file mode 100644 index 0000000..8ce94ad --- /dev/null +++ b/datastore/tests/workflow/fixtures.py @@ -0,0 +1,122 @@ +import pytest + + +@pytest.fixture +def subset_query() -> str: + yield """ + { + "dataset_id": "era5-single-levels", + "product_id": "reanalysis", + "query": { + "area": { + "north": -85, + "south": -90, + "east": 260, + "west": 240 + }, + "time": { + "hour": [ + "15" + ], + "year": [ + "1981", + "1985", + "2022" + ], + "month": [ + "3", + "6" + ], + "day": [ + "23", + "27" + ] + }, + "variable": [ + "2_metre_dewpoint_temperature", + "surface_net_downward_shortwave_flux" + ] + } + } + """ + + +@pytest.fixture +def resample_query(): + yield """ + { + "freq": "1D", + "operator": "nanmax", + "resample_args": { + "closed": "right" + } + } + """ + + +@pytest.fixture +def workflow_str(): + yield """ + [ + { + "id": "subset1", + "op": "subset", + "args": { + "dataset_id": "era5-single-levels", + "product_id": "reanalysis", + "query": { + "area": { + "north": -85, + "south": -90, + "east": 260, + "west": 240 + } + } + } + }, + { + "id": "resample1", + "use": ["subset1"], + "op": "resample", + "args": + { + "freq": "1D", + "operator": "nanmax" + } + } + ] + """ + + +@pytest.fixture +def bad_workflow_str(): + yield """ + [ + { + "id": "subset1", + "op": "subset", + "args": { + "dataset_id": "era5-single-levels", + "product_id": "reanalysis", + "query": { + "area": { + "north": -85, + "south": -90, + "east": 260, + "west": 240 + } + } + } + }, + { + "id": "resample1", + "use": ["subset1", "subset2"], + "op": "resample", + "args": + { + "freq": "1D", + "operator": "nanmax" + } + } + ] + """ diff --git a/datastore/tests/workflow/test_operators.py b/datastore/tests/workflow/test_operators.py new file mode 100644 index 0000000..46cf109 --- /dev/null +++ b/datastore/tests/workflow/test_operators.py @@ -0,0 +1,20 @@ +from workflow import operators as op + +from .fixtures import subset_query, resample_query + + +def test_create_subset_operator_with_str_args(subset_query): + sub_op = op.Operator("subset", subset_query) + assert isinstance(sub_op, op.Subset) + assert isinstance(sub_op.args, op.SubsetArgs) + assert sub_op.args.dataset_id == "era5-single-levels" + assert sub_op.args.product_id == "reanalysis" + + +def test_create_resample_operator_with_str_args(resample_query): + res_op = op.Operator("resample", resample_query) + assert isinstance(res_op, op.Resample) + assert isinstance(res_op.args, op.ResampleArgs) + assert res_op.args.freq == "1D" + assert res_op.args.operator == "nanmax" + assert res_op.args.resample_args == {"closed": "right"} diff --git a/datastore/tests/workflow/test_workflow.py b/datastore/tests/workflow/test_workflow.py new file mode 100644 index 0000000..7036b73 --- /dev/null +++ b/datastore/tests/workflow/test_workflow.py @@ -0,0 +1,23 @@ +import pytest +from workflow.workflow import Workflow + +from .fixtures import workflow_str, bad_workflow_str + + +def test_create_workflow(workflow_str): + comp_graph = Workflow(workflow_str) + assert len(comp_graph) == 2 + task_iter = comp_graph.traverse() + node1, precedint1 = next(task_iter) + assert precedint1 == tuple() + assert node1.operator.name == "subset" + + node2, precedint2 = next(task_iter) + assert len(precedint2) == 1 + assert node2.operator.name == "resample" + assert precedint2[0].operator.name == "subset" + + +def test_fail_when_task_not_defined(bad_workflow_str): + with pytest.raises(ValueError, match=r"task with id*"): + _ = Workflow(bad_workflow_str) diff --git a/datastore/utils/__init__.py b/datastore/utils/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/datastore/utils/api_logging.py b/datastore/utils/api_logging.py new file mode 100644 index 0000000..58d148d --- /dev/null +++ b/datastore/utils/api_logging.py @@ -0,0 +1,40 @@ +import os +from typing import Literal +import logging as default_logging + + +def get_dds_logger( + name: str, + level: Literal["debug", "info", "warning", "error", "critical"] = "info", +): + """Get DDS logger with the expected format, handlers and formatter. + + Parameters + ---------- + name : str + Name of the logger + level : str, default="info" + Value of the logging level. One out of ["debug", "info", "warn", + "error", "critical"]. + Logging level is taken from the + enviornmental variable `LOGGING_FORMAT`. If this variable is not defined, + the value of the `level` argument is used. + + Returns + ------- + log : logging.Logger + Logger with the handlers set + """ + log = default_logging.getLogger(name) + format_ = os.environ.get( + "LOGGING_FORMAT", + "%(asctime)s %(name)s %(levelname)s %(message)s", + ) + formatter = default_logging.Formatter(format_) + logging_level = os.environ.get("LOGGING_LEVEL", level.upper()) + log.setLevel(logging_level) + stream_handler = default_logging.StreamHandler() + stream_handler.setFormatter(formatter) + stream_handler.setLevel(logging_level) + log.addHandler(stream_handler) + return log diff --git a/datastore/utils/metrics.py b/datastore/utils/metrics.py new file mode 100644 index 0000000..82aeb55 --- /dev/null +++ b/datastore/utils/metrics.py @@ -0,0 +1,33 @@ +import time +import logging as default_logging +from functools import wraps +from typing import Literal + + +def log_execution_time( + logger: default_logging.Logger, + level: Literal["debug", "info", "warning", "error", "critical"] = "info", +): + """Decorator logging execution time of the method or function""" + level = default_logging.getLevelName(level.upper()) + + def inner(func): + @wraps(func) + def wrapper(*args, **kwds): + exec_start_time = time.monotonic() + try: + return func(*args, **kwds) + finally: + # NOTE: maybe logging should be on DEBUG level + logger.log( + level, + "execution of '%s' function from '%s' package took" + " %.4f sec", + func.__name__, + func.__module__, + time.monotonic() - exec_start_time, + ) + + return wrapper + + return inner diff --git a/datastore/wait-for-it.sh b/datastore/wait-for-it.sh new file mode 100755 index 0000000..d990e0d --- /dev/null +++ b/datastore/wait-for-it.sh @@ -0,0 +1,182 @@ +#!/usr/bin/env bash +# Use this script to test if a given TCP host/port are available + +WAITFORIT_cmdname=${0##*/} + +echoerr() { if [[ $WAITFORIT_QUIET -ne 1 ]]; then echo "$@" 1>&2; fi } + +usage() +{ + cat << USAGE >&2 +Usage: + $WAITFORIT_cmdname host:port [-s] [-t timeout] [-- command args] + -h HOST | --host=HOST Host or IP under test + -p PORT | --port=PORT TCP port under test + Alternatively, you specify the host and port as host:port + -s | --strict Only execute subcommand if the test succeeds + -q | --quiet Don't output any status messages + -t TIMEOUT | --timeout=TIMEOUT + Timeout in seconds, zero for no timeout + -- COMMAND ARGS Execute command with args after the test finishes +USAGE + exit 1 +} + +wait_for() +{ + if [[ $WAITFORIT_TIMEOUT -gt 0 ]]; then + echoerr "$WAITFORIT_cmdname: waiting $WAITFORIT_TIMEOUT seconds for $WAITFORIT_HOST:$WAITFORIT_PORT" + else + echoerr "$WAITFORIT_cmdname: waiting for $WAITFORIT_HOST:$WAITFORIT_PORT without a timeout" + fi + WAITFORIT_start_ts=$(date +%s) + while : + do + if [[ $WAITFORIT_ISBUSY -eq 1 ]]; then + nc -z $WAITFORIT_HOST $WAITFORIT_PORT + WAITFORIT_result=$? + else + (echo -n > /dev/tcp/$WAITFORIT_HOST/$WAITFORIT_PORT) >/dev/null 2>&1 + WAITFORIT_result=$? + fi + if [[ $WAITFORIT_result -eq 0 ]]; then + WAITFORIT_end_ts=$(date +%s) + echoerr "$WAITFORIT_cmdname: $WAITFORIT_HOST:$WAITFORIT_PORT is available after $((WAITFORIT_end_ts - WAITFORIT_start_ts)) seconds" + break + fi + sleep 1 + done + return $WAITFORIT_result +} + +wait_for_wrapper() +{ + # In order to support SIGINT during timeout: http://unix.stackexchange.com/a/57692 + if [[ $WAITFORIT_QUIET -eq 1 ]]; then + timeout $WAITFORIT_BUSYTIMEFLAG $WAITFORIT_TIMEOUT $0 --quiet --child --host=$WAITFORIT_HOST --port=$WAITFORIT_PORT --timeout=$WAITFORIT_TIMEOUT & + else + timeout $WAITFORIT_BUSYTIMEFLAG $WAITFORIT_TIMEOUT $0 --child --host=$WAITFORIT_HOST --port=$WAITFORIT_PORT --timeout=$WAITFORIT_TIMEOUT & + fi + WAITFORIT_PID=$! + trap "kill -INT -$WAITFORIT_PID" INT + wait $WAITFORIT_PID + WAITFORIT_RESULT=$? + if [[ $WAITFORIT_RESULT -ne 0 ]]; then + echoerr "$WAITFORIT_cmdname: timeout occurred after waiting $WAITFORIT_TIMEOUT seconds for $WAITFORIT_HOST:$WAITFORIT_PORT" + fi + return $WAITFORIT_RESULT +} + +# process arguments +while [[ $# -gt 0 ]] +do + case "$1" in + *:* ) + WAITFORIT_hostport=(${1//:/ }) + WAITFORIT_HOST=${WAITFORIT_hostport[0]} + WAITFORIT_PORT=${WAITFORIT_hostport[1]} + shift 1 + ;; + --child) + WAITFORIT_CHILD=1 + shift 1 + ;; + -q | --quiet) + WAITFORIT_QUIET=1 + shift 1 + ;; + -s | --strict) + WAITFORIT_STRICT=1 + shift 1 + ;; + -h) + WAITFORIT_HOST="$2" + if [[ $WAITFORIT_HOST == "" ]]; then break; fi + shift 2 + ;; + --host=*) + WAITFORIT_HOST="${1#*=}" + shift 1 + ;; + -p) + WAITFORIT_PORT="$2" + if [[ $WAITFORIT_PORT == "" ]]; then break; fi + shift 2 + ;; + --port=*) + WAITFORIT_PORT="${1#*=}" + shift 1 + ;; + -t) + WAITFORIT_TIMEOUT="$2" + if [[ $WAITFORIT_TIMEOUT == "" ]]; then break; fi + shift 2 + ;; + --timeout=*) + WAITFORIT_TIMEOUT="${1#*=}" + shift 1 + ;; + --) + shift + WAITFORIT_CLI=("$@") + break + ;; + --help) + usage + ;; + *) + echoerr "Unknown argument: $1" + usage + ;; + esac +done + +if [[ "$WAITFORIT_HOST" == "" || "$WAITFORIT_PORT" == "" ]]; then + echoerr "Error: you need to provide a host and port to test." + usage +fi + +WAITFORIT_TIMEOUT=${WAITFORIT_TIMEOUT:-15} +WAITFORIT_STRICT=${WAITFORIT_STRICT:-0} +WAITFORIT_CHILD=${WAITFORIT_CHILD:-0} +WAITFORIT_QUIET=${WAITFORIT_QUIET:-0} + +# Check to see if timeout is from busybox? +WAITFORIT_TIMEOUT_PATH=$(type -p timeout) +WAITFORIT_TIMEOUT_PATH=$(realpath $WAITFORIT_TIMEOUT_PATH 2>/dev/null || readlink -f $WAITFORIT_TIMEOUT_PATH) + +WAITFORIT_BUSYTIMEFLAG="" +if [[ $WAITFORIT_TIMEOUT_PATH =~ "busybox" ]]; then + WAITFORIT_ISBUSY=1 + # Check if busybox timeout uses -t flag + # (recent Alpine versions don't support -t anymore) + if timeout &>/dev/stdout | grep -q -e '-t '; then + WAITFORIT_BUSYTIMEFLAG="-t" + fi +else + WAITFORIT_ISBUSY=0 +fi + +if [[ $WAITFORIT_CHILD -gt 0 ]]; then + wait_for + WAITFORIT_RESULT=$? + exit $WAITFORIT_RESULT +else + if [[ $WAITFORIT_TIMEOUT -gt 0 ]]; then + wait_for_wrapper + WAITFORIT_RESULT=$? + else + wait_for + WAITFORIT_RESULT=$? + fi +fi + +if [[ $WAITFORIT_CLI != "" ]]; then + if [[ $WAITFORIT_RESULT -ne 0 && $WAITFORIT_STRICT -eq 1 ]]; then + echoerr "$WAITFORIT_cmdname: strict mode, refusing to execute subprocess" + exit $WAITFORIT_RESULT + fi + exec "${WAITFORIT_CLI[@]}" +else + exit $WAITFORIT_RESULT +fi diff --git a/datastore/workflow/__init__.py b/datastore/workflow/__init__.py new file mode 100644 index 0000000..9c75326 --- /dev/null +++ b/datastore/workflow/__init__.py @@ -0,0 +1 @@ +from workflow.workflow import Workflow diff --git a/datastore/workflow/workflow.py b/datastore/workflow/workflow.py new file mode 100644 index 0000000..63e6f78 --- /dev/null +++ b/datastore/workflow/workflow.py @@ -0,0 +1,226 @@ +import json +from typing import Generator, Hashable, Callable, Literal, Any +from functools import partial +import logging + +import networkx as nx +from geokube.core.datacube import DataCube +from intake_geokube.queries.geoquery import GeoQuery +from intake_geokube.queries.workflow import Workflow as WorkflowModel +from datastore.datastore import Datastore + +AggregationFunctionName = ( + Literal["max"] + | Literal["nanmax"] + | Literal["min"] + | Literal["nanmin"] + | Literal["mean"] + | Literal["nanmean"] + | Literal["sum"] + | Literal["nansum"] +) + + +_LOG = logging.getLogger("geokube.workflow") + +TASK_ATTRIBUTE = "task" + + +class _WorkflowTask: + __slots__ = ("id", "dependencies", "operator") + + id: Hashable + dependencies: list[Hashable] | None + operator: Callable[..., DataCube] + + def __init__( + self, + id: Hashable, + operator: Callable[..., DataCube], + dependencies: list[Hashable] | None = None, + ) -> None: + self.operator = operator + self.id = id + if dependencies is None: + dependencies = [] + self.dependencies = dependencies + + def compute(self, kube: DataCube | None) -> DataCube: + return self.operator(kube) + + +class Workflow: + __slots__ = ("graph", "present_nodes_ids", "is_verified") + + graph: nx.DiGraph + present_nodes_ids: set[Hashable] + is_verified: bool + + def __init__(self) -> None: + self.graph = nx.DiGraph() + self.present_nodes_ids = set() + self.is_verified = False + + @classmethod + def from_tasklist(cls, task_list: WorkflowModel) -> "Workflow": + workflow = cls() + for task in task_list.tasks: + match task.op: + case "subset": + workflow.subset(task.id, **task.args) + case "resample": + workflow.resample( + task.id, dependencies=task.use, **task.args + ) + case "average": + workflow.average( + task.id, dependencies=task.use, **task.args + ) + case "to_regular": + workflow.to_regular( + task.id, dependencies=task.use, **task.args + ) + case _: + raise ValueError( + f"task operator: {task.op} is not defined" + ) + return workflow + + def _add_computational_node(self, task: _WorkflowTask): + node_id = task.id + assert ( + node_id not in self.present_nodes_ids + ), "worflow task IDs need to be unique!" + self.present_nodes_ids.add(node_id) + self.graph.add_node(node_id, **{TASK_ATTRIBUTE: task}) + for dependend_node in task.dependencies: + self.graph.add_edge(dependend_node, node_id) + self.is_verified = False + + def subset( + self, + id: Hashable, + dataset_id: str, + product_id: str, + query: GeoQuery | dict, + ) -> "Workflow": + def _subset(kube: DataCube | None = None) -> DataCube: + return Datastore().query( + dataset_id=dataset_id, + product_id=product_id, + query=( + query if isinstance(query, GeoQuery) else GeoQuery(**query) + ), + compute=False, + ) + + task = _WorkflowTask(id=id, operator=_subset) + self._add_computational_node(task) + return self + + def resample( + self, + id: Hashable, + freq: str, + agg: Callable[..., DataCube] | AggregationFunctionName, + resample_kwargs: dict[str, Any] | None, + *, + dependencies: list[Hashable], + ) -> "Workflow": + def _resample(kube: DataCube | None = None) -> DataCube: + assert kube is not None, "`kube` cannot be `None` for resampling" + return kube.resample( + operator=agg, + frequency=freq, + **resample_kwargs, + ) + + task = _WorkflowTask( + id=id, operator=_resample, dependencies=dependencies + ) + self._add_computational_node(task) + return self + + def average( + self, id: Hashable, dim: str, *, dependencies: list[Hashable] + ) -> "Workflow": + def _average(kube: DataCube | None = None) -> DataCube: + assert kube is not None, "`kube` cannot be `None` for averaging" + return kube.average(dim=dim) + + task = _WorkflowTask( + id=id, operator=_average, dependencies=dependencies + ) + self._add_computational_node(task) + return self + + def to_regular( + self, id: Hashable, *, dependencies: list[Hashable] + ) -> "Workflow": + def _to_regular(kube: DataCube | None = None) -> DataCube: + assert ( + kube is not None + ), "`kube` cannot be `None` for `to_regular``" + return kube.to_regular() + + task = _WorkflowTask( + id=id, operator=_to_regular, dependencies=dependencies + ) + self._add_computational_node(task) + return self + + def add_task( + self, + id: Hashable, + func: Callable[..., DataCube], + dependencies: list[str] | None = None, + **func_kwargs, + ) -> "Workflow": + task = _WorkflowTask( + id=id, + operator=partial(func, **func_kwargs), + dependencies=dependencies, + ) + self._add_computational_node(task) + return self + + def verify(self) -> "Workflow": + if self.is_verified: + return + assert nx.is_directed_acyclic_graph( + self.graph + ), "the workflow contains cycles!" + for u, v in self.graph.edges: + if TASK_ATTRIBUTE not in self.graph.nodes[u].keys(): + _LOG.error( + "task with id `%s` is not defined for the workflow", u + ) + raise ValueError( + f"task with id `{u}` is not defined for the workflow" + ) + if TASK_ATTRIBUTE not in self.graph.nodes[v].keys(): + _LOG.error( + "task with id `%s` is not defined for the workflow", v + ) + raise ValueError( + f"task with id `{v}` is not defined for the workflow" + ) + self.is_verified = True + + def traverse(self) -> Generator[_WorkflowTask, None, None]: + for node_id in nx.topological_sort(self.graph): + _LOG.debug("computing task for the node: %s", node_id) + yield self.graph.nodes[node_id][TASK_ATTRIBUTE] + + def compute(self) -> DataCube: + self.verify() + result = None + for task in self.traverse(): + result = task.compute(result) + return result + + def __len__(self): + return len(self.graph.nodes) + + def __getitem__(self, idx: Hashable): + return self.graph.nodes[idx] From 49bec4730ac839c1e64e412e29de1e471750e3fe Mon Sep 17 00:00:00 2001 From: Marco Mancini Date: Thu, 11 Jan 2024 09:16:50 +0100 Subject: [PATCH 03/15] Update executor with adjust_for_intake branch --- executor/Dockerfile | 28 +- executor/app/main.py | 567 ++++++++++++++++++++++++++++++-------- executor/app/messaging.py | 45 +++ executor/app/meta.py | 27 ++ executor/requirements.txt | 11 +- 5 files changed, 537 insertions(+), 141 deletions(-) create mode 100644 executor/app/messaging.py create mode 100644 executor/app/meta.py diff --git a/executor/Dockerfile b/executor/Dockerfile index e3cc317..6a946fd 100644 --- a/executor/Dockerfile +++ b/executor/Dockerfile @@ -1,16 +1,12 @@ -FROM continuumio/miniconda3 -WORKDIR /code -RUN conda install -c conda-forge xesmf cartopy psycopg2 -y -COPY ./executor/requirements.txt /code/requirements.txt -RUN pip install --no-cache-dir -r requirements.txt -COPY geokube_packages/geokube-0.1a0-py3-none-any.whl /code -COPY geokube_packages/intake_geokube-0.1a0-py3-none-any.whl /code -RUN pip install /code/geokube-0.1a0-py3-none-any.whl -RUN pip install /code/intake_geokube-0.1a0-py3-none-any.whl -COPY ./db/dbmanager /code/app/db/dbmanager -COPY ./utils/wait-for-it.sh /code/wait-for-it.sh -COPY ./datastore /code/app/datastore -COPY ./geoquery /code/app/geoquery -COPY ./resources /code/app/resources -COPY ./executor/app /code/app -CMD [ "python", "./app/main.py" ] \ No newline at end of file +ARG REGISTRY=rg.nl-ams.scw.cloud/geodds-production +ARG TAG=latest +ARG SENTINEL_USERNAME=... +ARG SENTINEL_PASSWORD=... +FROM $REGISTRY/geodds-datastore:$TAG +WORKDIR /app +ENV SENTINEL_USERNAME=$SENTINEL_USERNAME +ENV SENTINEL_PASSWORD=$SENTINEL_PASSWORD +COPY requirements.txt /code/requirements.txt +RUN pip install --no-cache-dir -r /code/requirements.txt +COPY app /app +CMD [ "python", "main.py" ] diff --git a/executor/app/main.py b/executor/app/main.py index c59ef92..35b90fe 100644 --- a/executor/app/main.py +++ b/executor/app/main.py @@ -1,146 +1,477 @@ -# We have three type of executor: -# - query executor (query) -# - estimate query executor (estimate) -# - catalog info executor (info) -# -# Configuration parameters for the executor: -# type: query, estimate, catalog -# dask cluster base ports (if they are not provided the cluster is not created: (e.g. for estimate and catalog info)) -# channel: channel_queue, channel_type, channel_durable -# catalog path -# store_path (where to store the query results) -# -# An executor will register to the DB and get a worker id -# if dask cluster base ports are provided, a dask cluster is created -# an executor mush have a unique port for the dask scheduler/dashboard - import os -import json +import time +import datetime import pika -from dask.distributed import Client, LocalCluster +import logging +import asyncio +import threading, functools +from zipfile import ZipFile + +import numpy as np +from dask.distributed import Client, LocalCluster, Nanny, Status +from dask.delayed import Delayed +from geokube.core.datacube import DataCube +from geokube.core.dataset import Dataset +from geokube.core.field import Field from datastore.datastore import Datastore -from db.dbmanager.dbmanager import DBManager, RequestStatus +from workflow import Workflow +from intake_geokube.queries.geoquery import GeoQuery +from dbmanager.dbmanager import DBManager, RequestStatus + +from meta import LoggableMeta +from messaging import Message, MessageType + +_BASE_DOWNLOAD_PATH = "/downloads" + + +def get_file_name_for_climate_downscaled(kube: DataCube, message: Message): + query: GeoQuery = GeoQuery.parse(message.content) + is_time_range = False + if query.time: + is_time_range = "start" in query.time or "stop" in query.time + var_names = list(kube.fields.keys()) + if len(kube) == 1: + if is_time_range: + FILENAME_TEMPLATE = "{ncvar_name}_VHR-PRO_IT2km_CMCC-CM_{product_id}_CCLM5-0-9_1hr_{start_date}_{end_date}_{request_id}" + ncvar_name = kube.fields[var_names[0]].ncvar + return FILENAME_TEMPLATE.format( + product_id=message.product_id, + request_id=message.request_id, + ncvar_name=ncvar_name, + start_date=np.datetime_as_string( + kube.time.values[0], unit="D" + ), + end_date=np.datetime_as_string(kube.time.values[-1], unit="D"), + ) + else: + FILENAME_TEMPLATE = "{ncvar_name}_VHR-PRO_IT2km_CMCC-CM_{product_id}_CCLM5-0-9_1hr_{request_id}" + ncvar_name = kube.fields[var_names[0]].ncvar + return FILENAME_TEMPLATE.format( + product_id=message.product_id, + request_id=message.request_id, + ncvar_name=ncvar_name, + ) + else: + if is_time_range: + FILENAME_TEMPLATE = "VHR-PRO_IT2km_CMCC-CM_{product_id}_CCLM5-0-9_1hr_{start_date}_{end_date}_{request_id}" + return FILENAME_TEMPLATE.format( + product_id=message.product_id, + request_id=message.request_id, + start_date=np.datetime_as_string( + kube.time.values[0], unit="D" + ), + end_date=np.datetime_as_string(kube.time.values[-1], unit="D"), + ) + else: + FILENAME_TEMPLATE = ( + "VHR-PRO_IT2km_CMCC-CM_{product_id}_CCLM5-0-9_1hr_{request_id}" + ) + return FILENAME_TEMPLATE.format( + product_id=message.product_id, + request_id=message.request_id, + ) + + +def rcp85_filename_condition(kube: DataCube, message: Message) -> bool: + return ( + message.dataset_id == "climate-projections-rcp85-downscaled-over-italy" + ) + + +def get_history_message(): + return ( + f"Generated by CMCC DDS version 0.9.0 {str(datetime.datetime.now())}" + ) + + +def persist_datacube( + kube: DataCube, + message: Message, + base_path: str | os.PathLike, +) -> str | os.PathLike: + if rcp85_filename_condition(kube, message): + path = get_file_name_for_climate_downscaled(kube, message) + else: + var_names = list(kube.fields.keys()) + if len(kube) == 1: + path = "_".join( + [ + var_names[0], + message.dataset_id, + message.product_id, + message.request_id, + ] + ) + else: + path = "_".join( + [message.dataset_id, message.product_id, message.request_id] + ) + kube._properties["history"] = get_history_message() + if isinstance(message.content, GeoQuery): + format = message.content.format + format_args = message.content.format_args + else: + format = "netcdf" + match format: + case "netcdf": + full_path = os.path.join(base_path, f"{path}.nc") + kube.to_netcdf(full_path) + case "geojson": + full_path = os.path.join(base_path, f"{path}.json") + kube.to_geojson(full_path) + case "png": + full_path = os.path.join(base_path, f"{path}.png") + kube.to_image(full_path, **format_args) + case "jpeg": + full_path = os.path.join(base_path, f"{path}.jpg") + kube.to_image(full_path, **format_args) + case _: + raise ValueError(f"format `{format}` is not supported") + return full_path + + +def persist_dataset( + dset: Dataset, + message: Message, + base_path: str | os.PathLike, +): + def _get_attr_comb(dataframe_item, attrs): + return "_".join([dataframe_item[attr_name] for attr_name in attrs]) + + def _persist_single_datacube(dataframe_item, base_path, format, format_args=None): + if not format_args: + format_args = {} + dcube = dataframe_item[dset.DATACUBE_COL] + if isinstance(dcube, Delayed): + dcube = dcube.compute() + if len(dcube) == 0: + return None + for field in dcube.fields.values(): + if 0 in field.shape: + return None + attr_str = _get_attr_comb(dataframe_item, dset._Dataset__attrs) + var_names = list(dcube.fields.keys()) + if len(dcube) == 1: + path = "_".join( + [ + var_names[0], + message.dataset_id, + message.product_id, + attr_str, + message.request_id, + ] + ) + else: + path = "_".join( + [ + message.dataset_id, + message.product_id, + attr_str, + message.request_id, + ] + ) + match format: + case "netcdf": + full_path = os.path.join(base_path, f"{path}.nc") + dcube.to_netcdf(full_path) + case "geojson": + full_path = os.path.join(base_path, f"{path}.json") + dcube.to_geojson(full_path) + case "png": + full_path = os.path.join(base_path, f"{path}.png") + dcube.to_image(full_path, **format_args) + case "jpeg": + full_path = os.path.join(base_path, f"{path}.jpg") + dcube.to_image(full_path, **format_args) + case _: + raise ValueError(f"format: {format} is not supported!") + return full_path + + if isinstance(message.content, GeoQuery): + format = message.content.format + format_args = message.content.format_args + else: + format = "netcdf" + datacubes_paths = dset.data.apply( + _persist_single_datacube, base_path=base_path, format=format, format_args=format_args, axis=1 + ) + paths = datacubes_paths[~datacubes_paths.isna()] + if len(paths) == 0: + return None + elif len(paths) == 1: + return paths.iloc[0] + zip_name = "_".join( + [message.dataset_id, message.product_id, message.request_id] + ) + path = os.path.join(base_path, f"{zip_name}.zip") + with ZipFile(path, "w") as archive: + for file in paths: + archive.write(file, arcname=os.path.basename(file)) + for file in paths: + os.remove(file) + return path + -def ds_query(ds_id, prod_id, query, compute, catalog_path): - ds = Datastore(catalog_path) - kube = ds.query(ds_id, prod_id, query, compute) - kube.persist('.') - return kube +def process(message: Message, compute: bool): + res_path = os.path.join(_BASE_DOWNLOAD_PATH, message.request_id) + os.makedirs(res_path, exist_ok=True) + match message.type: + case MessageType.QUERY: + kube = Datastore().query( + message.dataset_id, + message.product_id, + message.content, + compute, + ) + case MessageType.WORKFLOW: + kube = Workflow.from_tasklist(message.content).compute() + case _: + raise ValueError("unsupported message type") + if isinstance(kube, Field): + kube = DataCube( + fields=[kube], + properties=kube.properties, + encoding=kube.encoding, + ) + match kube: + case DataCube(): + return persist_datacube(kube, message, base_path=res_path) + case Dataset(): + return persist_dataset(kube, message, base_path=res_path) + case _: + raise TypeError( + "expected geokube.DataCube or geokube.Dataset, but passed" + f" {type(kube).__name__}" + ) -class Executor(): - def __init__(self, broker, catalog_path, store_path): - self._datastore = Datastore(catalog_path) - self._catalog_path = catalog_path +class Executor(metaclass=LoggableMeta): + _LOG = logging.getLogger("geokube.Executor") + + def __init__(self, broker, store_path): self._store = store_path - broker_conn = pika.BlockingConnection(pika.ConnectionParameters(host=broker)) + broker_conn = pika.BlockingConnection( + pika.ConnectionParameters(host=broker, heartbeat=10), + ) + self._conn = broker_conn self._channel = broker_conn.channel() self._db = DBManager() - - def create_dask_cluster(self, dask_cluster_opts): - self._worker_id = self._db.create_worker(status='enabled', - dask_scheduler_port=dask_cluster_opts['scheduler_port'], - dask_dashboard_address=dask_cluster_opts['dashboard_address']) - dask_cluster = LocalCluster(n_workers=dask_cluster_opts['n_workers'], - scheduler_port=dask_cluster_opts['scheduler_port'], - dashboard_address=dask_cluster_opts['dashboard_address'] - ) + + def create_dask_cluster(self, dask_cluster_opts: dict = None): + if dask_cluster_opts is None: + dask_cluster_opts = {} + dask_cluster_opts["scheduler_port"] = int( + os.getenv("DASK_SCHEDULER_PORT", 8188) + ) + dask_cluster_opts["processes"] = True + port = int(os.getenv("DASK_DASHBOARD_PORT", 8787)) + dask_cluster_opts["dashboard_address"] = f":{port}" + dask_cluster_opts["n_workers"] = None + dask_cluster_opts["memory_limit"] = "auto" + self._worker_id = self._db.create_worker( + status="enabled", + dask_scheduler_port=dask_cluster_opts["scheduler_port"], + dask_dashboard_address=dask_cluster_opts["dashboard_address"], + ) + self._LOG.info( + "creating Dask Cluster with options: `%s`", + dask_cluster_opts, + extra={"track_id": self._worker_id}, + ) + dask_cluster = LocalCluster( + n_workers=dask_cluster_opts["n_workers"], + scheduler_port=dask_cluster_opts["scheduler_port"], + dashboard_address=dask_cluster_opts["dashboard_address"], + memory_limit=dask_cluster_opts["memory_limit"], + ) + self._LOG.info( + "creating Dask Client...", extra={"track_id": self._worker_id} + ) self._dask_client = Client(dask_cluster) + self._nanny = Nanny(self._dask_client.cluster.scheduler.address) + + def maybe_restart_cluster(self, status: RequestStatus): + if status is RequestStatus.TIMEOUT: + self._LOG.info("recreating the cluster due to timeout") + self._dask_client.cluster.close() + self.create_dask_cluster() + if self._dask_client.cluster.status is Status.failed: + self._LOG.info("attempt to restart the cluster...") + try: + asyncio.run(self._nanny.restart()) + except Exception as err: + self._LOG.error( + "couldn't restart the cluster due to an error: %s", err + ) + self._LOG.info("closing the cluster") + self._dask_client.cluster.close() + if self._dask_client.cluster.status is Status.closed: + self._LOG.info("recreating the cluster") + self.create_dask_cluster() - def query_and_persist(self, ds_id, prod_id, query, compute, format): - kube = self._datastore.query(ds_id, prod_id, query, compute) - kube.persist(self._store, format=format) - - def estimate(self, channel, method, properties, body): - m = body.decode().split('\\') - dataset_id = m[0] - product_id = m[1] - query = m[2] - kube = self._datastore.query(dataset_id, product_id, query) - channel.basic_publish(exchange='', - routing_key=properties.reply_to, - properties=pika.BasicProperties(correlation_id = properties.correlation_id), - body=str(kube.get_nbytes())) - channel.basic_ack(delivery_tag=method.delivery_tag) - - def info(self, channel, method, properties, body): - m = body.decode().split('\\') - oper = m[0] # could be list or info - if (oper == 'list'): - if len(m) == 1: # list datasets - response = json.loads(self._datastore.dataset_list()) - if len(m) == 2: # list dataset products - dataset_id = m[1] - response = json.loads(self._datastore.product_list(dataset_id)) - - if (oper == 'info'): - if (len(m) == 2): # dataset info - dataset_id = m[1] - response = json.loads(self._datastore.dataset_info(dataset_id)) - if (len(m) == 3): # product info - dataset_id = m[1] - product_id = m[2] - response = json.loads(self._datastore.product_info(dataset_id, product_id)) - - channel.basic_publish(exchange='', - routing_key=properties.reply_to, - properties=pika.BasicProperties(correlation_id = \ - properties.correlation_id), - body=response) - channel.basic_ack(delivery_tag=method.delivery_tag) - - def query(self, channel, method, properties, body): - m = body.decode().split('\\') - request_id = m[0] - dataset_id = m[1] - product_id = m[2] - query = m[3] - format = m[4] - - self._db.update_request(request_id=request_id, worker_id=self._worker_id, status=RequestStatus.RUNNING) - # future = self._dask_client.submit(self.query_and_persist, dataset_id, product_id, query, False, format) - future = self._dask_client.submit(ds_query, dataset_id, product_id, query, False, self._catalog_path) + def ack_message(self, channel, delivery_tag): + """Note that `channel` must be the same pika channel instance via which + the message being ACKed was retrieved (AMQP protocol constraint). + """ + if channel.is_open: + channel.basic_ack(delivery_tag) + else: + self._LOG.info( + "cannot acknowledge the message. channel is closed!" + ) + pass + + def retry_until_timeout( + self, + future, + message: Message, + retries: int = 30, + sleep_time: int = 10, + ): + assert retries is not None, "`retries` cannot be `None`" + assert sleep_time is not None, "`sleep_time` cannot be `None`" + status = fail_reason = location_path = None try: - future.result() - self._db.update_request(request_id=request_id, worker_id=self._worker_id, status=RequestStatus.DONE) + self._LOG.debug( + "attempt to get result for the request", + extra={"track_id": message.request_id}, + ) + for _ in range(retries): + if future.done(): + self._LOG.debug( + "result is done", + extra={"track_id": message.request_id}, + ) + location_path = future.result() + status = RequestStatus.DONE + self._LOG.debug( + "result save under: %s", + location_path, + extra={"track_id": message.request_id}, + ) + break + self._LOG.debug( + f"result is not ready yet. sleeping {sleep_time} sec", + extra={"track_id": message.request_id}, + ) + time.sleep(sleep_time) + else: + self._LOG.info( + "processing timout", + extra={"track_id": message.request_id}, + ) + future.cancel() + status = RequestStatus.TIMEOUT + fail_reason = "Processing timeout" except Exception as e: - print(e) - self._db.update_request(request_id=request_id, worker_id=self._worker_id, status=RequestStatus.FAILED) + self._LOG.error( + "failed to get result due to an error: %s", + e, + exc_info=True, + stack_info=True, + extra={"track_id": message.request_id}, + ) + status = RequestStatus.FAILED + fail_reason = f"{type(e).__name__}: {str(e)}" + return (location_path, status, fail_reason) + + def handle_message(self, connection, channel, delivery_tag, body): + message: Message = Message(body) + self._LOG.debug( + "executing query: `%s`", + message.content, + extra={"track_id": message.request_id}, + ) + + # TODO: estimation size should be updated, too + self._db.update_request( + request_id=message.request_id, + worker_id=self._worker_id, + status=RequestStatus.RUNNING, + ) + + self._LOG.debug( + "submitting job for workflow request", + extra={"track_id": message.request_id}, + ) + future = self._dask_client.submit( + process, + message=message, + compute=False, + ) + location_path, status, fail_reason = self.retry_until_timeout( + future, + message=message, + retries=int(os.environ.get("RESULT_CHECK_RETRIES")), + ) + self._db.update_request( + request_id=message.request_id, + worker_id=self._worker_id, + status=status, + location_path=location_path, + size_bytes=self.get_size(location_path), + fail_reason=fail_reason, + ) + self._LOG.debug( + "acknowledging request", extra={"track_id": message.request_id} + ) + cb = functools.partial(self.ack_message, channel, delivery_tag) + connection.add_callback_threadsafe(cb) - channel.basic_ack(delivery_tag=method.delivery_tag) + self.maybe_restart_cluster(status) + self._LOG.debug( + "request acknowledged", extra={"track_id": message.request_id} + ) + + def on_message(self, channel, method_frame, header_frame, body, args): + (connection, threads) = args + delivery_tag = method_frame.delivery_tag + t = threading.Thread( + target=self.handle_message, + args=(connection, channel, delivery_tag, body), + ) + t.start() + threads.append(t) def subscribe(self, etype): - print(f'subscribe channel: {etype}_queue') - self._channel.queue_declare(queue=f'{etype}_queue', durable=True) + self._LOG.debug( + "subscribe channel: %s_queue", etype, extra={"track_id": "N/A"} + ) + self._channel.queue_declare(queue=f"{etype}_queue", durable=True) self._channel.basic_qos(prefetch_count=1) - self._channel.basic_consume(queue=f'{etype}_queue', on_message_callback=getattr(self, etype)) + + threads = [] + on_message_callback = functools.partial( + self.on_message, args=(self._conn, threads) + ) + + self._channel.basic_consume( + queue=f"{etype}_queue", on_message_callback=on_message_callback + ) def listen(self): while True: self._channel.start_consuming() -if __name__ == "__main__": + def get_size(self, location_path): + if location_path and os.path.exists(location_path): + return os.path.getsize(location_path) + return None - broker = os.getenv('BROKER', 'broker') - executor_types = os.getenv('EXECUTOR_TYPES', 'query').split(',') - catalog_path = os.getenv('CATALOG_PATH', 'catalog.yaml') - store_path = os.getenv('STORE_PATH', '.') - executor = Executor(broker=broker, - catalog_path=catalog_path, - store_path=store_path) - print('channel subscribe') +if __name__ == "__main__": + broker = os.getenv("BROKER_SERVICE_HOST", "broker") + executor_types = os.getenv("EXECUTOR_TYPES", "query").split(",") + store_path = os.getenv("STORE_PATH", ".") + + executor = Executor(broker=broker, store_path=store_path) + print("channel subscribe") for etype in executor_types: - if etype == 'query': - dask_cluster_opts = {} - dask_cluster_opts['scheduler_port'] = int(os.getenv('DASK_SCHEDULER_PORT', 8188)) - port = int(os.getenv('DASK_DASHBOARD_PORT', 8787)) - dask_cluster_opts['dashboard_address'] = f':{port}' - dask_cluster_opts['n_workers'] = int(os.getenv('DASK_N_WORKERS', 1)) - executor.create_dask_cluster(dask_cluster_opts) + if etype == "query": + executor.create_dask_cluster() executor.subscribe(etype) - - print('waiting for requests ...') - executor.listen() \ No newline at end of file + + print("waiting for requests ...") + executor.listen() diff --git a/executor/app/messaging.py b/executor/app/messaging.py new file mode 100644 index 0000000..37ce25a --- /dev/null +++ b/executor/app/messaging.py @@ -0,0 +1,45 @@ +import os +import logging +from enum import Enum + +from intake_geokube.queries.geoquery import GeoQuery +from intake_geokube.queries.workflow import Workflow + +MESSAGE_SEPARATOR = os.environ["MESSAGE_SEPARATOR"] + + +class MessageType(Enum): + QUERY = "query" + WORKFLOW = "workflow" + + +class Message: + _LOG = logging.getLogger("geokube.Message") + + request_id: int + dataset_id: str = "" + product_id: str = "" + type: MessageType + content: GeoQuery | Workflow + + def __init__(self, load: bytes) -> None: + self.request_id, msg_type, *query = load.decode().split( + MESSAGE_SEPARATOR + ) + match MessageType(msg_type): + case MessageType.QUERY: + self._LOG.debug("processing content of `query` type") + assert len(query) == 3, "improper content for query message" + self.dataset_id, self.product_id, self.content = query + self.content: GeoQuery = GeoQuery.parse(self.content) + self.type = MessageType.QUERY + case MessageType.WORKFLOW: + self._LOG.debug("processing content of `workflow` type") + assert len(query) == 1, "improper content for workflow message" + self.content: Workflow = Workflow.parse(query[0]) + self.dataset_id = self.content.dataset_id + self.product_id = self.content.product_id + self.type = MessageType.WORKFLOW + case _: + self._LOG.error("type `%s` is not supported", msg_type) + raise ValueError(f"type `{msg_type}` is not supported!") diff --git a/executor/app/meta.py b/executor/app/meta.py new file mode 100644 index 0000000..739ef62 --- /dev/null +++ b/executor/app/meta.py @@ -0,0 +1,27 @@ +"""Module with `LoggableMeta` metaclass""" +import os +import logging + + +class LoggableMeta(type): + """Metaclass for dealing with logger levels and handlers""" + + def __new__(cls, child_cls, bases, namespace): + # NOTE: method is called while creating a class, not an instance! + res = super().__new__(cls, child_cls, bases, namespace) + if hasattr(res, "_LOG"): + format_ = os.environ.get( + "LOGGING_FORMAT", + "%(asctime)s %(name)s %(levelname)s %(lineno)d" + " %(track_id)s %(message)s", + ) + formatter = logging.Formatter(format_) + logging_level = os.environ.get("LOGGING_LEVEL", "INFO") + res._LOG.setLevel(logging_level) + stream_handler = logging.StreamHandler() + stream_handler.setFormatter(formatter) + stream_handler.setLevel(logging_level) + res._LOG.addHandler(stream_handler) + for handler in logging.getLogger("geokube").handlers: + handler.setFormatter(formatter) + return res diff --git a/executor/requirements.txt b/executor/requirements.txt index c4a403b..f188e90 100644 --- a/executor/requirements.txt +++ b/executor/requirements.txt @@ -1,7 +1,4 @@ -pika -bokeh -dask -distributed -intake -pydantic -sqlalchemy \ No newline at end of file +pika==1.2.1 +prometheus_client +sqlalchemy +pydantic \ No newline at end of file From 09353264f99fc3328b6ab2f6e4514193b6faf693 Mon Sep 17 00:00:00 2001 From: Marco Mancini Date: Thu, 11 Jan 2024 09:19:21 +0100 Subject: [PATCH 04/15] Removed old geokube packages --- geokube_packages/geokube-0.1a0-py3-none-any.whl | Bin 86682 -> 0 bytes .../intake_geokube-0.1a0-py3-none-any.whl | Bin 13930 -> 0 bytes 2 files changed, 0 insertions(+), 0 deletions(-) delete mode 100644 geokube_packages/geokube-0.1a0-py3-none-any.whl delete mode 100644 geokube_packages/intake_geokube-0.1a0-py3-none-any.whl diff --git a/geokube_packages/geokube-0.1a0-py3-none-any.whl b/geokube_packages/geokube-0.1a0-py3-none-any.whl deleted file mode 100644 index 99341a80639a5b9ae3ce0c2089ced3901880a684..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 86682 zcmZsCbC4*Hj_%mDZQHhO+nzaN+twM|wr!hdY}d-rWurT^&abW-Vb@FlGv z4Ge+;00001pq!(w!XcB8PX!1700ayGfc*EWxv9PNFC$ZWeSJ$iOBa28ItP!KL|MB5 z28588EFz>#HKBGvbj9~Zz-Fp&+6;~)14FNcI}$2hA6aZ|fuNjXHT5D(CE|C$-x$coH$n;gY3JM|!csHe+4u;2OyC$@M;___g5vam_AWke%5e9du~ zQFS@ND2S`vevg4{18xC4J|X4tUyI`KB?iI67QF<`NAEHofb)m`aq~KXu+>W5mYb7! zQ@Olic_ga{+6?phgwzQ8W%`=WOH3|a)derHw)XyM1kz69MF>C_lQ!0ZBNxxGK@Y-h z7<%|Eun|o_4``9NRP(?&QZKFGrMes}vDGp&`!?O2t{m-xp6JFO#&btE!0G;wl_6q-DTIfl`5Wc@AmHkWq82rCX z>tgEcqHphDYWLT+($o{PsuJ?kvvjgjs$)~MWAs#tWAqZ!QSy>Bk}_aWPL2V^Hj1LG z$x`0T4=|PY4@$Z?ygo!v_lYSi?EwERZ^(KhF{OJD+tC69z>0doe75l9Q8DP3Us72)DAm99j;JGi`P)F>HLCDo2 z8`sbpl(0s=edHDxCW0C78y~kDFL^}K>ckYX#E)8>WxPhpJY(@p$k6E=#B&F9`lIqe zD|cCY@?~(SIFi~hrZoCLM2IW0No8`(7$AYJ5Plml$D?Z&ClBLcjaV;=KZ{{SOq)jA z3X6eoo>pSU#RpFo2I6!7uwBLy#P1O5d4-|zUJck_Wm$My#t0009Y008rUF|eJfi?NB>UlVU>*v4(LA$?!# z_q+3pK&kA#FNeTOuuEbST%mP$4+4$|<%>?BsiFB4HR%E5qY0FmtpHt#&V|e@E}N-n@pDe}An} zyQ~WPn?kuzD|@oZc4FdHwb?syW2}l`c>oK$9B;E(W|hY;x!J!p;hEl$sj z9M*dDi9~=3-$`PN*!iS+<$1hWQ~`5S(~{l+zP;fkFzmD${j?c!r4Vg(l6K(sLQ;xl< z$WJ4$eG4>9m^-8K?OYkaJb$Iuf`q03OtA!b38k#DXI3m^uJF3e?~Tb0_UyWLy;k_5y43L>C-_b~6%A|if3bw8!@v3aqg|4T> zaHCG>=6LF17ay}Jl5Q{g|D;w<%S}IqKiEli7nW# zeQjlSf)a49I*|j;swzZrF5~VAHs~~`7@S(K*c3kzQcp-RNuV=yZaULiLV*i%Pl(+{ zXfv;L8m>gpT4N1>Lc?M_G2`hO+Llf08AuK zuBcrueuMuFPDO5zz zbaA9o%kMIPq8boM2_()R6B`Io33$^|`0Nrc6IFGv2c>L{Tz4A(UZKqMuq08LOFrN2 zbzh!n)uP5leaKA-YqJ6@YXfcQRFf|Tcq~}dpF%d1zC@ZRf6>uS`>`aoQ6DqkPeLFC z=1;w7>DOu0rXK9c?*dK_A)J}(l0oeT5y}jV?3%CGs*RhUTj5>IW2`U^M%q`A)}SHA zEpR6&#m1My8WLixVTEWcS3*9YQrGhCf(_*JJ*vqKerBSH{pP(Zy6Xhg-}d|22o;o%&1Ef%Fsz`a^oK1BYk}r`Sa@=#QzJa*k*IYzgi@)kt*R(uJnQG zX&JF$UtC=cfZWW)wjx_wv(am7V;`bqhYIkhuA)q;?e-aRTf+&4P9?!B$rpTzRJCYj ztf1OkSIh`SyS0a2!gKnb$OZZ2 z14I77{91ox_*^L@%cqAej+}O8eia#Zy4{99Z@~P!#{hpc%GV^^(}5$@gDIo+!dC>n z^%NJNC>RdBY*lyO-_x-{MV&v?S@*5+c&4YV_2PA=#XWZD-Bxm#9Li=8Yk`{OjueyH zBE=QuutZmxmHL7ZmX%Bb1UdHDkYGM)fIA4~M==Q9mmz)BpL!e20u7Tw4Qf5@c81p}%p{vu`CM-6xhoGQph09vM$(qz4& zAh-sQLB6Ln{WkG5-fu%EN5kbZ$vNf(^HV|M&CESdL<&qli^hG>pTMt(?RQ5T65*1( zsZk)g!lrb1kG2r5p}$sl0?j0ZoWY-pt>t7X@X`0zMDP3}P!?JdxM{p>aJd}DSnAt@1$YFZBG|T0ccL{SEJ}W}V?1Cpby@9eF2sJ8j`Cw?Tv_snk z(A&XJ^l0cN)$-&>rbqdXs&aki1v5GV1G%6O2bXWE@85C#gZcw3?3Z{qRC&Qwq~}9$ zpYE`}NG$>o)pC>6Y>|t5Ud<4XedQ4M1s4{lGGZrjHIxptqg*U#^kmIz&c>m<3%ikY zW8BWltLC3u9nG0@P9LtBa)Wbjk8p-|q5O9Z1IAoAFiw~Ir@JAPfScr*+60XA`BU3@ zx|>Lgw6j01w&I`9?s#_BbS0H@DiKCh)=160`P_F0=;W+IRTN^~V=jr^c;)JZk339a z{U*Jk<>x<43#gD+)(lbH3J{|6H*vHLWDsqOtb@!m(CCAezJQ}J!e;lYf)Kypn+xD; z9gU;a@#MG&Y5{qC&rJOshfRN2+kvzArLcZL|Fa~JllWl+00jUDf&JfWE>A-zr@sY3 zwz{qTCL6*(1wp%hk&t0h$(I5=R%(l!8Uej%;`3mjKpyD?Ax#pNB&}=E=gXGjQe`%3 zrvKXgox9uY)*!c7m2@$!qFJJKGnuP4R69vQYR;Ig$V^B*V!HupjmTj|laQpE$YBc= zG+u%|lCM0Ouk^@NB;8sfgK&c=amsSXp8|>0m@*S27ezI*m?@l-5|15*>ff7<^D&p; zW9J+pO*X^jiYaoO)m0QS$W6z7&bID})ZJz+YNIhr8pZ30?4G$OYX}QjdAaQ?Qmq83 zI@J{+t5hYbjfr;bGX>c%QuzhHR3=K-gmsiJ zyzcGqd!n5B9_c~k?)t<@jnK3;j%R3x-o(70X!-69CykTPrZbYfAt#2+>oP!|EeN99 za%3X7`?9BMCIt3_r*KM+I4AO0sgVs{lBd7snXEX`nh$BWOgdK7ES|A#EQ^_!<7F*pV$9u91z`qjO8dOtLM>{$+{hLxO9Nz@`F@XbisM6(io`)8GcbIBVQjrDQ_Zw_v2GX&1;9J=3)lHVz$M z>{t!(ga*pamp(RllhPOx&toH9^OD@?oS$4>xe*B#Y+F2&#oXm-;%R9ntK8+B>~H1) z|0W#1m9tvwg{Y*44doY?{$hi>NW!^miyQ12OzcY9qV-HvW9!3;yY*+vDj~=X%)Sps)~Sb_*DH z80W%BZm=@NIdiT<95?Y}2#Ue64?267%2L0YENn-Vzi&#k)m;2!e z9*%6GuI_0=abSPEN{7G^dO$QAzqpqG=Zsrh-U1*J=?FZ}w<8>h48D7ZQ(Z?)3W}d9 z&)wN!I=1G57T45xHWNzvvjB5GqCp)LB#v7{;F;P-DRmF4U^uuAm9!eeKzhru;#!!2 zRO#MVP9ONuY-y`a$WhG~a+A1<91a1+NqL0{8?iiST0h6_t;dGua0K3`j)BUZw}paXom`XMUc;pEll`{DEC3nyncA7^K}{&wPG zrS-kX_uVGu+xyk~#q#v$$VJcj)rbp}%n3(}raWZsu~H(fZwUWtN|l*E0}f?JNwb?4 z1V3tq>shA6Wv`pSS8FLaG=fTqsF9#5#Vc0o-Y~=uZx7vvgRMr|TuhKd++^nvDQkZb zn^K;6U?DhXCRfho|nmbdUvU;F$PaA0=M-48FE%&%+4S+etz4|qkeTI^F{S`Aue&%tK@_7jrs z)QwAU*sjuSt4sLirbR0|*EOEJ94%8-sIHKJ)u_A@;2nZ&Tc#eE>$OYODm@@Jlx3}0 z=+zDU=tnah_C?!tM{uU``?i?{Jg+v5-tM{$Fg$J@y`JZvESTr${E^{)w5Yvi3_12;C>@8l0r08d>UEB9ykAP1@x^a4M>YWJy)!tFf1oXMwLb(GT@< zN@_q(YcZoYG3MJ-98RkkQ8K?fQafTzdRgcys@56*u$!0$J*nI9f;sfe{F=vBpbX42m>*t$um(IHvCianw0&(NrLiufoJj(DrhTQ*42wzCbAI0zyGu0y|B9T69qKKpf^>PR&yM zdkCAktwyF&nv4~j6-raS;V}+1kqkQLZIrp<9z9=+qjhD?&z0@bGZ&S%TP`}HB4ug8w%nwK z)>}fga;eFxrE>^4KX=;=4s^l@yp8o}PYT9xrgK?en|FUa%n_`|>W49(5;Z7tm`{{?K#NYc=$ zImpPrOjnIfHSQ|`1ST)Bg#4M}iP>MuVv!9;fS{lbe(ikKraUB5BP*sNF)t!Br#2~} zBuPz1F(pe$Eed=zJuWWkV?`-WO-4JS^pBp8qL-$bkdc{?Tn7GKH6Bt&Q6H#=1}H1m zKtbDB)&{agRmD{ZYc#%4VvR8g`ft#bL#~DjPEPKiG8h2B^$G1RKCUW;}cdSIo@(; z))&ndKX&7NGT@wl9EeUKds0jVDyb|IMT!n(IMOx{=|skzeU(9w-7bc#h6cA9X5Lw` zB0a&>x@~-c;nU|7K+YL4kD5Xyw(4Ha$k?H4WtKI1n$sV&S~noH%Q}EmEi$@5Mp`&g z52R(AmZUVV(*fI%fiy_j%%FMYpE%r<70+^_DHfdc>Hui1ZdShd28#J9R9<|UlM^_B zsm3v0rj?u&e7|U2@U4}5tbtfAw-`7(ku|m$Sa$Tjgq2OBlHgX76@_W)FI^f)MMNP7 zIzE9Dm0Xn?C;=%};QY8GcniHEh#ocY^k<8O+>=tnMS(=b`n|PB*w-Jv2G+xnrk3fifx&q5w|i$3EnB*HR4&cAS1Iln%hc}HEUKOYF7WN7T~D}7~cyR%V}TXku6 zIt^|g!>G-^)8DCPYzV`D?x12Y)1b>@?qxB<^4`99CgV&)dR|?IQ$i9-rbd?JwIUMB6U(r|Xs z)~D8y>r&O817llXYkJ!@EDMo!FA2pnC_MNP~*}q1=xhN9VC7I$R!( zT#;kVys?+-&+Dd56+C-SdJKLTwp;h4;0@K451!5rJU<4C-h2TF|3=+c_*r0n9k3rX^qmBgOKFCN3h&BmSrZ}J2^y?Way*aefUFSv|iliD**&kNR5P2D6|rz z6lpAC4yRC3-<%Pe_c14|Na&&t7S)!Tcpp0-0gj{8sj}FTB5}@hGkSt zXdkMALV2EZV_caCdsc!~VF|uQ;+5ml5yes(Z?}T-P!C_B7+TC*xqGKYLE#O1ipC8w zX%UnL7t^j2beB2ha@s9TpXu=}(~rCE~qx~dBEq_9pl+d7P~{m>QxzL#VwT_P{Cr1Z|3~I#=eOvM zac@BWaTpw}UVC5ci*r*B@s;LpG!z>%O)KUHY|U0OGh};PIG5_w$Ma|2EUzmd1Gu+8 z3}Bnj1}vp0k^Hj4wE}6Dmpe@=tJ?-Hh#NLI>K`3|r!bfAA2VZt*=J<6BgPD3tBRx& zYsrIzW~}{J%a$s-kzB_9`bs% z63dC0gGJTQf+>AK9Du@S5&!7sQy$M%J)T)=lX&_Y@D8045`SW!N@R)Gu~DK1wtq>O z3%nHVsYd4nwJk$@P&(H9M%t3gNT_CyLEi?qiD`DgFy;+=C=VIKnfP(JVlAR51xa+OC76bJ4riN&`zAWFC24WPDPC(y+ZfUi@C%KY zrzv6|tmE2jLz{P>t#B6?ZDLLxmgl}9& zvz^?@&*YW~dEQ!ww9(}uQ(`qCKTLByD)>szY*&ZJBd+duRu`|S(yO&4e-ESy;ofg~ z&j#;3%iD|^PJ%ykwmEP4I2;6{o=Kpc;eP&PE2{)$-^>*kk2OD^ui|#(m^yW?VtjoTP-uc2bIfMt-CH2+0e@zQn>G=q7Jyg2e zCuD`kBS<;|&M*ok1L^5dX`eD=I)ksn0gK8}EPN-3od4EnVDIc+KeWSwca;cuntu=; z^ylJBFPQ25&$OGbTtw#gH+`=CrGGCw|CNM{?0?yr{F8=kV|bwl1Ykm6J|pMOpp+a? zK@iQwR~6#UvYxdVjbj%{!iUucc^ziJ>xUrI~ zn=}9X$oBlF%3z<}E82hI1<}7FC7i!%|3{@@Y-(epZ)@seVgE11Q`EQY57=P*X7vS& z=-JLYXu&QSfHlK(?FH~_z+t*ei6m6CEnQ9|a*7{;MSorr8f>In7WHBP>O~^)`IShz z%a`HQDyO$Et$VZr)>JFl`ifLSG4qRZrrCy!bEpY#wBJ+qSUagbfObfkrp@g1uiciJ z8DJZnPLyK3I(T1FGOMZVa;W(-r|%OjVRJn+**4cP;HE>L~n&&>Q+n=pmN|hcw^wcEOdxLe?jQ=Qzq!8A0)Lyd(HIn3ou;Q8Vi8gcsPmv#1%sb)fqe28!GWCgkgAwxm9w8pMU&#I!g1asp-`T9jG#gwzikBKomFn zz+&P^i@)9$YWM{e@$7ee1oQ*X+)oBT5j5212JgH9`74J2H|gUVd=|D0ma>9@mUVmO zSz`KbMLIliOpH$sg>&Ix#j#Hk6Cd_dm*d;@WG(`%BQ;Yki~|>qwo>=Gt3EFF5PaUT z28k}U5cw43(=b)x@+X>sosP2#(JeoJ)m3!Ay40M+v7Zfp(B-=2C`W* zVjq$iZ)AW}e0Kh0%S_w>3s$c@Qo0Hg)Smrb$#VN@j4DOuU2L}Vok2Xasx$yG$#tJ= zSX)PA`-~%gV|&9gpLiDN_tv^X(JyzcZHy>AEoT}ORM{v@z%}UzFr@uKB%vWM#JfMH z+V+kbB!3z}OEp0y8<7{@RzZ+f4JU$4J-K|x!_84*87Beu82_RrEL*~4CK{gX8^V~` zC|Fz3Sv!tmA=mmfFU7Q0us|`keRZbLgN_@0u28F&Shcu}R!}H(z#-xe&G|?XSv7h! z)yb>|=BtrZ0oiVKJR^{dQVf^mPaaEwA^zcFtfYZ@E3#J_n-k3?fh};*Sl1+T&G=GK zx+w!94=`O*1v5$A&bMrZ>8|lKQ*x+A#7^833KACd>Y$+qa=Z?`{G$5n%xqqU+$eD3SfmYsVBsv!YhZBVco8|m zW4O3en9%3`^aUc8xGjq~wr$v7Xnj3G@RSQt2){bOTS;fY#@&P|_OdRHg@ckXX8bP4 zi=84gQI2O71l>2m?$7Qnpx;=k;zw_}O@gF5vt*JS<-Qv}>-+NY56&N{H3Wgxmf~7Y zHaqZ9Fpcj#2|$ItUr+rznW4meww6xHYFk?mtT<`xCOLeWJp%$Gc44uA_PFOm5*NUoi#B4EzcA2bmzqWS`^4?MWyuP=ZXG4W zP+bh67u+{AT-XVI-#hQUZ8C#AP<0%Esd20AFW_rDRlJqm9p9mtUcFtdFTe}&%X{TA z4Bj=Oim}psiHS=wj`?6-q_1kp*w8CJvk;2?C14Yw?k#>P*r=#^(Tz7k5YntK|<|j8~ zLtt?lk!3v0o$iP+-l6wc$#}eZRonjMaHBfC22Kdicsn@D42O9sz5dtR=c;*f<>#i? z-Q(runKc?|GPSfak(}RyWQKU(P_%9dW@pw2;~XMMp~+Ba7eoQaOAATZm`Q3ZsCk+u z!||%+bB-?RFbTi!tpN(E`XV&F%qTq1?B=-(m$v+?X|jlDG8tzoR*5BBbYk8vP3{pW zKCz<=dJH+`lGZ1(IQnuFN$zc3Jb!Hv4dBc@tr2k>#3at;p?+yEK&y{?8oDL|nz5cv zv2a37x@;Yn=|UQ^33+%c$vna#?EBpS%n==gKI%?H9GCN@(ddgbh?FL#&kHs? z;Mg$x>7yR#6RK@+!hDo#c)-ukf}BZ~f=tjFjM@HAfsvE;DqCIqZDVF&N$vCvFd}MH zVM@2yR0pu(dzPuin#O!n3pci`O5V~uF7}=z)7UnERvm6c)<8E-W5^FhmA==Kl2IU% zkt1UrUOp{5m6|}%c!I|>k~pQS2K$f{&B+3!LZ81M7$l(3z(rML!g$O_Z9q`?zyB`n zl!u!NCObddn*>^Ii=N{#@P}IEdD|4HOi67i#(c3n%u~56X9qqjswNlF7t6SYb{PZr z%Lru-QqUKWkTgbz7Emm|`!j9eL5a5Ve=;DLE@9DPhVJy4V}N_W!6D-GW48O#u`K&< z!%e8^Fw6erB+F*#`h%Mg@!wy$kTDT|F-{cZ^TUy?eSU%bLA?$AjI1SL%>B9g*vcAB z^GOX(X7SNY({)k8o|o_z3m-}^BAeixOW1c#y4j4ARdS*&P$9wSfI4^z~5 z<->-lC>X+S=7Y4an&o|-M2RNIO}s&r{J30(;V~Tur9_7UWm+37Dz;Hrf(i+a>kH*~ z82QNwv_)Z_ML}O60HfmbV)YZe4)S^m5_>R_#3e0P zB8$vd3Crl%y!idc$J=j;FaE9TW)Od_h;`i)aYM0HoKB7P&hMP-PqZ<9#{r{!Wa6kH zme6bvDI7!f0CD<$(!!wrZRM_TvRvbaJ9TGD37v^d@|xV$m$60csdneh2fjb+)PlKk zFmS;a8c(5Fyd3oHm^Lb9h_dDPS|`MB;p{tH4S8KA4xl-o@GBU=0n=o`{ykL%ZGi6k zJ5&Vbv)(PARkfD4319#uaKf7vkq;?AJjcbWr**yB8f3^B^uPzy$hEU#LYI9UlDj6A zIL!hf@NSIp^>g7n#9T1DgS7@FK!)8xy06Q|7jL%kpu1s)Jdn{xeS{b#jtbbfvq@hb z#3mOq|6vG!4LuM@h;k4#s#pd5_sgKtq3hFw`c}7BdGTjlsW^IkJnnLG_+^2M-o4NEXYDNe@RNTnTkV;zZS8ot9TJCZat*;jbtPnmwW(LiULRj*LS}7ktt4 zxc$dKw@P&%F?y2y#ccJIi7ozp;r9G@RVDcD0)Tu|&1M1{%85%XWVPxuYfVvL?nx_l_tvv#B zgVsa9dq|g9MF=>(3s{YpcPgk_0D8v;f-yblO0q@cRUB@>vJyQ2{|HL6*>&o^%uf^` zv>ELX{lEru9^CAlUny3pB++F2`PJCN39i=-qJ{D(0{G62$Vl8#Wf+7I?f|;n zq3%0~+>rrZ5XanL_A$f+8z5pYBxtTiEtv{>xv1a<>X73VY=hR6se4Bd6+WT!oE(rN z>^48KNUv)Z?*q&^!Rui8GL3nxNXjto7o~HpjwdrPDO&-NPbOwG`Iac{5J(NNmzV+n# zTg^Sn$%Sr)j3=6om~NxXE>FSBvl7~JOnh$PKIFHD@Ax;7Kn*p77u@Oo`>tQ!ilo(f z6lKjYnkhb}@DBUlIw=Rpo~cEdfI(qC#-QNyuWr+RhKysl@5l=jTB97?r+V5}2QfI= zQ+Q~6VUA#XQeR=PAdajNtT-wYtie_-@lgQmn%G&?ci3(H`KX5uxl-lZ zP)WqucO#T2Zyrz0$UsdfnVUnQ2SWi|G4vh)^1OAet;nAtMTWkecB_4P`#?sKqqq76 zxI0KhnpC~#+uhRcuPt%LT z7)e_@PEJ<)rFCUoohu(M8!-~3fH}5U95;ZhHrqGSr=uFERfkrfI7$mjfpz&ce z0h>*%EG;aFTmvGSs}cHY(-&cD2Y~rmEZRynpgeJ#4_c)Ep(p5}#*kk)JB7NdxXoeC zia_##enI*5ySaYr?tvs%4=6qoVMnt&sZTw_n9r;i!5>*cWk#9Gm`atTVE9+g73vU5 z@ZB~A6ljZ)qE(PmRK>46R8^zpZt%2|%Zv`32t7+Y>yv^P0uFX4C#+D4$$7fw{hV8# zsRC#S-Rqw)^D`f69Y-;06)S0XkYQ`)Y+A_=8+FXb)zQNkM{Lf}_m)k@rsXs4l+DV~ z^zvq|rJupRa_UipT67sQ9!yu$JyoslTdiu|%3HRGF9E&0Vx7$gu1)XK;`mr>mDz*o zndMEVpTtGCu~x2uKw`@yoh0GhnstofBL$ZjVttNP{~$vznIcl4ig-t()ccn?)SC!E z;?FPF7u-*LeXZ1kERyqN#eq1@+Cg6zExAlL=I}!ikg1}BuM{!K7u%Cj?_1+g`c97K zrA4ye;d^dYnSK4|^opKFR{83%f4DO(qHv&nm0wj0{YrfrNsdqHo;0J5-w?WK*0;Iv ze=sqUh>NOnQg$*3elv3}Xl>6zqnrSN(~4+QW^gG#s%qQFwv}R(IJ+umilmN^n_*uh z*yPZgk672s6@Tn<XhC>4{kF`$J)SRnAFfvAN+ZFgB|WnLOos^Qa`Ralw9aKBMa!mE9QFiO4@ zRp(&?!{VNErp_;OpS`(PBVVh>Mljjf__N`s#0Xbk#Cx`xf@1P zq_!t5Zt62!2Q>??iXdQ&&V~fCNgU~lb63&ba+L|e$%Vc@ftLMA2KkcD03(_(VBckr zuaDG_hSHSE7I~8@uOtbbRz3BCO%kAVkzQvRT6U9`=$haczb}$>#wwRo>yYdQBnQa4 zvntnaUQ;m?hvwgij8C+o686>aLKywjDExj@(zggPyi2a9l1`0bQOkhaP}^^;$;SY8 z;ORv{rgTbVT^;NMEg>>_$lr7B{)K<}yxw2KDI-nYo{=<*y5QYEDQN~>IwQ_ZC*&UI2I<8&)g6#%GS7O@om<%lU^75EIo2=_-fevy~$*$ zgD5%p0mlD}SRr+chC4k^)3|l9NCOfX8+9jFyzJa1gF>~UQgR&_W11_SJ85F}S@_@Pz(GXVx8Lh^|cp*&yV4Q!Vc zX9J*j5+fbLu-|+ryZxczoxe{`WuL_oI>jc7b9;wUyaNsJvaZffdVXqtD*>DfpVjdk zWWXX#_!pIDx%bl@j(+uv2yWpEXU^gWhx4Rd9c3dbjG`VA@L|5+h5gSOv^}e46_dBp zW-o^5`Zk)C;?s+yF@-5$ZC*#r&?LmGEE)-?Z0F}dBLl@k6)#NAup$#mCHM5vZRa0hA)G-5^f=1sP)uT=;TFAJ06lQNU-uMq zuP$GH<0!1HQ~#_z#m2_MxxGC5ZG2eH2Sjd6N{DHXkfPZyGo_{H*gE4aTL@TF8Cw`d zq}Dnw92;86Z77{((VXFgLr(9c7s79dTMrpZ_U56G`^0qeye(7R!eoOX%K-~v?LjFJ zMSf1F5IcWHj|Z5QJuto0Q>t^aao{W>xN^VDxZ=dLEO&sn31S6;XG>LLrfRw{?2{N9 zASIa5p*bd2WP|$F46;nFPT4=V_+{(}VBeq0me9(Y9$$+%$INC|$0j!2@Gi=nfP+$t znaWMw)CtD$YPNHE>#6~wf?^8NSN_^^KRH{dJ7~93ZRXSv>?OGiK>V)82>Jc6PY6ps zA3#i>+r=}|z5DdcO$~IvR!P$xeiW@vG0$Vm)-M~$4?u|6)6L`cTw7D^xqFIt7Zj#F zM$1oFIY>n|Nc9VzP_9I^5(9NgxIQD#Toz27bkCSsX||tg+(h~u`v%|?!ye!{-4f)P zaq)^%{{lerhYdcf{LIVhX-l?4u8K`cNnY7*Gjby$8Sz7!!M9zzo>mRqhl(=B!Y7|X zXWDOa&rP}`<={pestj2ccmr>hRdmTZyab(UPfhr2IR5_J@WE)#{Q!AU-wk4UK+3wo zo;tjPJ0N03-P=l{!2nM^d_P)}&%UnSqNG_AJD1T$A(gUXQsJ=P8SmRrVk$;KTQH zBT%WPUc^?8d%F0=)KnPLS5Pqcx?F5fbrKh~2o@+7h@BQTIB^dYPBQ5$)YdFGG%DGr zBo=9i-|!v9L?<*3bqbSc=?;SLAG@XBjw<3h&Nb=Pm*tuc3BDBawZF{}wkCgBVIiWk zeZU#?04%9+>@Sp`rwfOv784rj4%?Z3 zHW%Ea`{|)v+ApMYgL~=0k{|*GE6o?sxbphkr-`3ROAUh{GRp!fqyZ7=@mzM_5Pg@T%?h0 zH!ro+%stEnHWCPe>p;ximL*$j%G--IA^h$DE*19cbj*p%c%4@$boPbd)C-|W03moL zMhA_Ue@unHtc`*BvtH+<@4iHBvHpIHn}mzKjnK#}+ti*d>thz#@h4E!vXsjc)q-$R z;Obj9=GkS*m3^QqB`pjyLxbe~VaZfM8dL7WDsGnJ9 zt7q&SFSU|V9rB9>NIKYeqAcLRv?Df!yX|Zfz49e58FqTcqClU+O1a1|4ExyE_mAH( zr*o;S(uJqmC| zUERH%?5jQ_OK0*Uws!8K34<17$MA2`Q~lwA%XcN7uK_03z_;a(G;y+Du>H~=^P-NV zcH$`HH7|F+1{^0zLRjy~y6;gcdxT)pjBJ44w_RC&bAzTl>B-h#BbigBwr#&b6%uO> zW`#DsIG0M;`$#4jhUgOkEXxXq8CBogr9JU*&hE(m-4V{r#B)cCrgv8Z&>)ZlB7l&XV1Vbi0O^m4L%uF3HQzCBW+pl+Xop_tv+!ciQJ0NHbhiwL5@pTebIjCM5U_pg zEFRyzt5<7`1#jFn9@8`Pq!(Vlf~6TrX-W>ql$1Ip*a1bXz*=v1wgEy#S~Z(RLsrkq z`#j%f;J5CS0n3dpbJX&Hpb16Ywyp3lSon%9l;%$D{;O-EyGz)_>(-Ec%dt)X7#x3d zRXb9&eAjUD9@ni^cJ0DlvqAfcUbk{-3$B1}lnmMWAqw1x~jy6W|9+xl~W_<&efYZsKh7t^pDAee4VV*#td?3en<2fdOU^q0ai zsXbyKnn*YsmTm&OBROWwvXN zFr2J)*|URove4yTfxzh@iGOI0o7rmbrxizU?aPn_2AaMg#8NlRaLFPzK2BG+<(iqV zHBHl`>-Dy$O)86ZteK5`&bTt%y1j|TPj79IA@T5t0>aN_t?+&;fec(Ozd*ipq0i)mt;rIn4Bo3O}%K_M8UTQ2_ z&*frsW{LhunUk~N$QjEA!Z0G9-wO$h7eO8y#gB(AqDv8CD}s`h^RxNX@6DkvN3nu z`sBUXRx7S(R<@hxoGa8cAiWpS55(#=L8f}nW6u$N^8yW3;6dw5e?>$0Y_(4 zy@VMf8@{TJ$%Jfeo9sm~m=1{+xwL}~q(g=Y_w}%DGHSXT2$r@&jqxxI$aIxMwYwK8 zX>tL*$axg{Zj@pb_JfW{kIz>|Y$7ZSN1XxPR7jLyME4wZcAdtIpq9?^kL%AGuNS!I zZgFUmTAZ_O`A)6#95b&PGF})SM|#4ghOS2&2w{@b}6t1Pq_#U`WV=6Q*z9c41(`}+gw$k)KvMdOvji1 zyXqgv3;=-j|IOq78U0w=8M^%Qm*q5;?%!$rJ#TN|MVCZ~+SWAgbw7A(8rk)Vz&TBa zrI#qeScA-{6^dkv3AteWk1w;Z5((+V-7_#RP-nvUo&(1Yj*~9xOZ3U6SL$|}=yDyU zH-2?JI$vs6#d2SkZ)E;)xy*w$t5XI)=w)Zy_RwTBxXsF~l(!Fl8U%XXWP@ZB&kT@j zGp*y{D2^{at|UlhTMzU>MP#mq&LI6VJWBFXN?Ju_w6p(6k5lnG-e!5+asmMZOsi~0 zbJN3UFft@d62F$fUL?6FDtpMbwb~!C3)>tK>YhK_8*`Z^orj15f9|FF{oKHO(3eO9 z(uUden_qkK^#eXXzIA-2XGz^Z(_8vVZ*k{-)a6Oj04NI3_KEBk%sThYMSBi7uc-~t zMSVBVl9gLMHp+hYy@{E#{mgnal-V_!Vl0;o(WTT2UOFgvhNrZ((?#0DnG|8Kn zqL=d5L1HgUW(_Zgd(M{Pmp(k4eL{h&bsfGzonkP$PQBW1x}cliCb5EYf23xAzwWFL zUp|KEN2@}N+t}qxKByyP&-Ucb@K4j7kQEdb9uyjhaL&OTth4n#&G4&pDE z0iUl7!8An@=305_;q5FQx}K-8z<1gToP$+52pY2^1S4~KKcUP5yVe{7M8#>MENjj$ zDT46aiX|wNB)}8Tsd>W!P%2=H=9~_wJ=ox$<|c}*J0uw!{#y0G41~=e2bsgqiKG{u zfZZ497_DB`i}LuTY}pJ`l+og{Gbggv!I@C?wc#J|H5HXbOi|z23PEMjXFv(9#*hd8 zIUy$lW2&0mWu;_il<<9)frIgBKE3Oq4mZdM5`CF?e2{7f0AcaCe-8|U zK{qU{DJOZZXqYx1L7r81ef)ry`?8l?CVl@`norvq{N#L94 z#>y*9%go`OsI#v?;b?K8<$>h@!C3`ftKg{S94d}4`c}@eeQT5YSGauN&%4YuXY;7jGP ze2pAlzIw*nV!VWgFdoie#hzIaO+|%IC(e7RT;EScck^c%8b&|$M;GWLGMKatN?b3d z7I=>C!@FMd!vyU%-QsT9d;E4Ru4{&4f^v#PHVFdbHscSwPxWM7*nLXjtE6x@OAinwUAq=MuW zIwXc|S3iSUzKQT-(RMT9qB9$M^5;d}R7-top=l&<={rKN&gpp@^aPz^)v&Y&8Qzotq)dN<|pe81`DOW(cze zTfcvRY`Zj}I2evp0BPo&Jn7M~dLuA#@EH7iA7CF@Z`Enetm(9oU36Zb8^T&9JCs=B zSPD`!m+pI!!e^tfOdZ)Nf$wVAjZwx%V(41pfgcDv5~Yxrx!(^o8Li=d3eoeIM4b9z zkvNh6iIE{O^tCyf=WRz2@S+w_Kdo4L$r*iM-O3I9=JD*{+0@gR9;LhhSs?6BzB8)E zXNg8ctXx3Jk*YXpBFw;s5-!&NVe6cN1q+ufeQevdZQHi$*tTukwrv}AY}>Xqb-QDt zJ0@no?$?YzGIOoAK-H1NPU~#kj5{tABR)HlJ}T=Wkq)Qo4RV z!&y@EgFO!Rtl4#e2YFW!*HZKhxy5WO*z*5M!t4jkuq7)ER}M>os$kFu!1Q7XJopbZ zg|cMkbn~{EDiOxGUMa%B5juii4flK>{(`H)hx!l(?Kk&__YVo6^c;I&4+XF*iRQNY zqxq)Kl<`l^c>m5FP38<5a=-1>V&Uu^8VAuaPZNqEJ5c!6#y-b;9$38i1NSr%a7@xm zkOl|&=eduU{r#!9vd?}lLH>GVUa6g+@Vk1F1i#FEY6qvt-|t^7GK1lzubg8~jr}}U zH3aWo+!d;A#wM*-x8NTa{BGfPSgLuknXXVgg43BOwygUI;y;7F+RpI4^$hB3^bWI# zZndXWw>X58zw=1_@pwkh@RG!YBMDE2adi$)6ckl$9=5HJw;`9(rb;=nfkOgu8O)w$ z+%&UW|J?t(3Hd2RlwO~t*D@7w`*Ho5H59OmSRLexfmz)S30B!9v6*Od@j6e=zCx_5 zlB>f!#;}Z^|4LwZiff4L{x8-NrBrPE*`?m@woo&y7XVr=Qc?IINNUA$(79O6tZ-7~ zjR$Whj8@mc00?)_cDPs3NSiY@uyrT)!oYdecI8u&_$-m%bb)UZP>jcXOL8~BIU9t0 zfBLzXcX3GchVC6|FwRG{h)NSuL!XQL<)!&$YlNmoJ)0$8GZC3tNuY*fy}nuJ-y_O) zv*FhhQjuFnwJ`({Lw&TWaM4)XOHnH4(q#1~BJ2Rqd)E|BXT5qB`(HYfENp}GHHu57 zQwdbGqVi$`@@XHCD+gPFm2{o))pMED=#ynpR7rObrWUP&o!w+ip8348ywK|hzG;U% z1Q+nu)BdfSx-iX*vZ5_F8Xg10h}A!cUkHZasle7tHJ=aJ_$OsVGeHHNA# z)`+gPs&UJ7+Uhm1e}|3hT~M*VQPI&I)La%ju~t$(TXU&O=|(J0Y`dKl*excKO9phc zomwV?=)uHHS`m=XN>QkvulQ#2HVVyV&<>mi?x@&9f^+?*`67yekP69Z;Foad4eu}n zH4#8EnB)j@(3E$fjmZyY`O*Gpf59Qz_|(h@Y2;!yD85-GKs~B2?cS3U>?8(wL$quC zVE+{KMQ1{-JbOoNlK?62BkMonxitODDC(;(w=UCD)n9-lNL9s4 zvyIY5un8>%MD#La>E!#3_)QT#yrt+HYoL`l)DQo>Wcc$2*KV=PEB6W0M-ZO-cv;cj zWYq(ktpYD@Vq$@3=8zJn^O5A4Ei;c^_kdiZW_0iM-n6%F^rp3+zyGZTtZFUuw`Xsi zaFMV*N6Ok-)ZIOu^!#z`w*-g#T?lZI2dHHr36w&j@{Jk{t4(|J8uPCdj$ z=XG@I`;m%#m`D$>Kn7CONr3N}`gB%}I zZRxvR14F{#tAnfwimAJ>N*tV?k0)myiSjH2Wtq#H zGu~O8wBu_KC41}y<-N{Fhlu)vmhS-SRZc)@nZzoyl_;x-O+Sv1DZ(>Pi5t9w1)2Jj zmu|GXPDS^Qh8I!v$S2bi74^Y>cu>rm5CFKCS^rBXsK}v+4?GK*8v-yE*o^+@+x=Y( zG?Wk295A)h=}BZ$GK{4>wy*eGkG8bxc`{9nCHNLfEY`KOW`olMxz-5+qXFMp0{OGb zT#UEk6Hu)FJj7NQ>FO0>r%Zyen-yfz`u&(pVRO6Cx0KlCO8aO#D8b``NEzo|QRn{( z!g3kdtKJg-T9fWLHwmj)gq~zF{fGae)ZcDuIWN(-o6DwvKN4L^HM5)R*zbQFc>fv0 z2+ug$lpz2BND2PISpMHJ%-F!$z~~=E>E95xjj0=V*m}R+7r4xuoziKU7egTtFP$lT zYa3d9TqJ7SwN|d2 zPPVr?OT!EqZ#%kw5Nh^oUQw;tW;0BuKKZ|HVHMPYg zO}15ObMmu59gi~I6^0BS18cN5gD&@y1*pz@g4aqj>vC*yZ>x3sQ|@_F^-Y}(Ml@Ni zI!NyAu!|Aa?qX+YSmyeUd^300jx^wNUDFCU{7L^n{ibQomsuqLNyJ5>=&% z0&#jL$4em%mFt4)4=QRGL&yth$MQpMmB$3zs7PziF2}Be zz~=g`dX@Uo&&~99s&{;*!U7c?>Z&?`I%#}Wd1-lkwNdAu>bw%H9KlLm9+#}BsH!T& zM!kHUGPBWZrlYF)o#-9PHua0=?cQFN&T9%__1{caTo2%8#fD3XZ09bf86^~5S{HSc z{vj^PXva=t;l#npAr@^?1KjBhT!X@7)VG53@P_~(@`d`e;@FF>u0EiF@oX!c8RXeW z@!)VUwD!GD3Tywp#fFb|OwZJ^n&mRaH9i=yl~z6;R}3QzbHq%ucQPfZpC{OKvHEL< zG3@Fh_-F~mKe*%|ASW2P2q1-lJfx`cp~(P%U_!1U(vD}G92tDf++i9faK-NlOET58 zJ;_I$@6sCR_<2yw*40Vy3w5v$e0PzmM-(Xqx2zxF3KlYeF6o?Jft0y?w9%aue`EHb z4~aMB<~6n(uFYgTxlF;DN#_I*9DPaZYfWq;8v0vvfSvfwHVEQeo~jKlhF7|Z zBD@-ozS9c%-kVLh7i)`Da=^L&E=R=t@)Y9woe4DVO=&@mWy%sFxS7ztB1t*zX! zU3aNo5v>E)c>P;;H6n_TkrbJrFvW)L^tYbeU=(cI_VaUNNcubwui0gHS zLIOt(K{0|sFo|J_ZbG-OumD;Ne?<)R4rDUkx+XmykHd)se8cW0nD#e#fGZ8LsWR!< z8;Oo?y!a?QYHU>)Mn zVjVTzuW(WXI*!|3C1yzN*uu4w+?R-B`i`Om1`Dipd1mysW!5FlxqaxTa=pDVjbgV zAc$L6q*3+7mCo5ekm^MG)>yN?8r0%ftUtkm9o~-N$C?2r!>(X37(Io+aCQkfuc*cH zbZ6{B&DuMxC_-TViTW|Oy?eX6PSEhQ&|2lwDuWsPd@&vA-@4!APm3+82< zlk1O>6VR_t79X^v#fE5an|KUzlr&2NLAFkj1&P70#`CBL$3tD^K~Ic9dEdfg{S;Vt zY@h=1l?t9?d$lV)A3GcGF75?xoMX!?mTP=GJ3UTsa)_=|lUg!?v`uUFSk?frY&|&BOz7nF@cJ=oJP8IVVq?H^yjkJN0#TU-9Hu#AuBM4)uzAgsX3tk$Apm`g-H1P!7xOi7|4y)tgdv?=(zkb*J^K3IN71I^ zmQo^cU5CDks9BZxsJz+sKIk(i6$wMB>1Md0&lnWDkwMwpwvk13*;|9w-|Jn!>?$LZ zBZt$`P{d1O{O8`{BkNu&jTWBEvNz3iQY~~6pyZ0^o({AWAoVr5hLtI0{aecc;emrXkPpf?prtln)Op?jQZ~)&Vh-7rjqJHKN>1Tg9U|XiUuADb8 zbWdI0VrartKx&1+Y2fjrL>e|b(5tM^mTu2Ak&{Vbr$d{RuHXB_^XWaed)r^O26o~o zEnyEd9dv=-?}yNX^fvTdV8|)?`SUTN5P77o5$#xm`3ax7qe2h-4!%VJ*zR555)9fV z!agaw2N1Yfm_+(^Nhg019Sn;{5s7XqzLoAJ-KTYMNv)8#sIIf7ehA7B0mjJZ3Rr}9 zQCg_d$&0h*@CkaQohLRj=r=Mmtf%EZ!2fAP{u2f0I`n7V``3sR{cA-2H*)X4B4z(+ zM4U{V|Melqn$mGwY)HMYb$hP<(MJ@6CsfseE~76uKuJ>rNjg}tu-b*4Df3pOf0<(< zem~XjNtIOOyRkX=tcO-4yiqPk;#5Z*&#)3rj)%(vOfV}ld$BnRY&ISKR@LINV=u47 ze8e=B(pP2LNd1!mT&5iv*-FE7lG;mMWZ_w(#^aiAt6ne?UaxkMt(;ivax#Ij#dv?n z?cfK#Lr?aj;UckWYCL@mQU?wSt-(PyWH-46p1AtC>kjc75sjkcIcD z9o;@p&`$Xo9O=<|$T_d)cO})4OkuP=Ewzk=8y{iU z|M`61K3mo^Ke1e`2>4D!HG1g6Z6?r55|H(@Tu+@XT|8q<*u&wJS|ZC+*RU(2MSl{J zg-^CBzb z@yJGpGlt9F0&>O(oO?R6DO>o4V)@6qCx-!%f*mxKZO^WF6Id$?%bM8gdl++6Vx2jXkg?ZbR3q)Mu_`+!-6Ta4OXyxA^D|(02Di-K9vuv9 z%#rM|r4$1ct)X&E)M1)0RQe7j3<2lcg?)R;tmvA4Vd#vE0nhy%Ob{X>Dy|t40`pN; z!=uHEpI&CwFEh4A`32=A;vf?cQ~dJT*8E3ozL@a2$wiYZVEVDlU85G#d^&R zm#hwS`^-6=v)y8rF*ahy<@X$vARx;7U-4R8$B8@fs}u4>lEnNSxCH3K>|;(m*ij&K zyMcvZ0_E>zJ6sry&%&YGbl*xb8gC5KIt}}$KCTL_WV3_?CzOS94dqX3TSu55xJoug z7H?5L#=>?K-p8(USr6aW$=z!e)}?E$aFF|jO;OWykl7SR$_5=v zJFViPO5v^dROK#*kAS&l;9Z$Q*18aPe}iso%3UhvmdssO%(*UC&RCY|{|xizpC+1x zr>DTG`4Y_XPn8+W8LI+5)BaQ;on-a?F}T9e=TAM0A%&AoLioGitlq|e1eU8 zH&sq)jY6rVE4mFC!+JhTIpy;5EbK!7=?ig}p?y@d{bVen1vS^!%nDuP4ip_90pn6l zG4wXKaf)eWk}uQq{YVHGKg?p;Qfe2UOydWiNZjwcb*xBi%loyPrO^k4E5pN1J(a)N z$|*9MHWOG2Qlu^hf$sOc<`b@snKeZh(CW;{btIO!r!wmnp4d}U5M?=G`9zfFx<%9~ z?8vD2cS?TzF?8l-L*@)Du?k-ifw*z>ach{`t9iR3)89Uvfe%8S@3np=lEG*Wr#A|} z16O>+0N&562*xIYVhNXOBHx$@Kn21Y=zi-fI1GYDnM|e))dzAM&+j+ecwsEmJOMj~ zE+0?J#tioFA3QVa3rC0sb zHE^oWlDxsq^n=s$k?Yq2`D2TqBWF_!&HoW-!j7M?vJR@HSXKoUn-i{9g1 z{CSari(Ny!gHMIzV_89-WT|+Jt0HWi{)vOyC;tk{{4h)XMBqHV+c+9hWJ(S+6o>IO zzF1pU*^JC}io*7Xt0g>i)#-;&&Hmq+oJ8MAsmL{^0fz{a3L+TJzZ0Z(8%$`}qm`dn4v1W5}P#z}7*mxHp zeNZRkXzAN(ttGI$a>6;k|2bk3P+Bis%aTdI@9XNfmS=&x38>yJXH|8=BhR~}4od)a zkvelOy2;*nLu;`%4=co zi9G615%6kyaya`ytpP84dazLTc|=R14nvuE2ww7GHCREqw`#`x5_|TXO2BFnPv6>aCh31hj^9C^++2x20e-QBuV1V8|luAdkDi;6o%7V$!7V#G({#(&_MC$px$- zY;2l(p4`eWkYcZH^WYKD;XrMmI?)PLc-!fz(1=0kC#xz1hgaIM(5&D81$(&>(p2dc zL7VqO1OO=D|NrFdjqPj<{)Q7&=2xZ~?J3p0_ZA}mSI3lxsSPlCje17{{n*isi28%^}mrm02*rNLPl zO?AlqpAKoaKvV}0r?TmpH72WQ%zDVG%?@sblWE6Jt+?#Dh1qj~*mp-I|5&9JI4YVN zk!~OAje1E~7E}I4g^VNtAhhiSG~37uiow}(5n1PvIoW$s8_elE=Ju}J@4NTRS_re2 z)ocs&LQIcHrhiH*H{0Yt-A4NIn>^&zC6r#*J+ypWVk_nDgZxl=u#{`Rt@nV_SzjaL zylDgNrj09cFlfJ79BS==4NVjv7P%=%;2ptM*g==L0SJMNh>mIpHFR6u9;ra5NTjw& zjsC&4Q!4_3pkBCe$KM^;%Dz#$jGTTAxd+ zm1qd;xLe$2Q9FjCycoAu?5_~AK{{|`(Ih)uMYJ1%qX{pic%+^q#_Bv)g@P&`_oLZ^ zk0VkyGrNoXN8~YYrUb`~K+QX!{)UndMw;sc?{zyU87GOrHn~Thv93g>~&W`d>i1Pf~;ksPMY< zuMha+KRYm$$`Xp=414F0dQ&&2A1ty4*BRlQH4$YdMZHLMKt4)3x@c&+ma!Fc42d<> zwc9+~p*-g2AO~RMv#4IEexrE-V7f%R4p_~lCWc9nnxt%$lf|^wKyvF%8?O>$0{Y_0 z1WmL`TK!cM5qc!<)qC3$m(5t2RqB5fZ=Sdia&`22k{E2ii)y%a1gZ{zg-`G@H(eDN zO)Lp8Mh0Zae-7mBB|UIW^nIW)qMF19h65MlYL!IPIglUXWV1at?j&BL$H&7*%641C z-MM-&F{T+Bet;vtwUsj$?4S7^modUbz%}#f2F>#!;^c{v>(k1E3`>l_YMgpY2(Xj^ zputr@icKvV^RD+jeS1eMB{QL-NN}$*Ahf?|N?H^0Lo!*!duUBH0M{Pz#a}U75zz{x zhTNTr%TkajgZVSq%$rs3t&g`5l}Pbr6pC%QMA#w=`RlLA2-*o~@Gda2*1*9;#NOZx z!g7J{9vHIy(&o>v--MnpJ;APG`BCW75q_y5>V@}ksN_e^mG=^?A1juYjO}Lwk|<0T zNlIK)8(S@ff{n!% z%|>#azwH{8uVwY3vgK}6x0w|*TxL93~d957mOe zV8ct(x@2Ss!wir}ZPUh??(Xb13xA}q7jH`=6f{*IK(RUk@`3O4gNBr_p*TvHI3oEZQh znAyy@D^SW%X;TRtDLb=MNt|tM?JS;OFE!pukcI(_Ex?jE)?(;#54*WBaRSPo-~0m^ z7pf{68+m|5r|k{BgF1&PQ9lM8RZNl%ue1Q*J89M}n{Rv)W%EZb99Z{uT8Z@hVR&{W zUQAFBl4f3_{0$#x>kigcC9)O8UVOT2-e#4wJ#kA6=~Qh%&-5zZGRgyoWZj&&1|P?d zm@9vt5k8I0i4>(ag2-to4EW&gaLvj7clImfy%3>yzhNeGfsnKPyiR|Nk3 z!z)mirJ!^H+nE$?rv_=(1_s-+|iOdWk4iVvQDq@3%uGf$^}>9>@@4x`3cUy~^D`!@{6%YMBw$3+5^nbfK$oPVxino!#frj6o%E{ z$7&>=j-Zr953o2RBCvdDSINMhE2lLYEX}RggscxJqw%91iuIwrU}UrOzkVfL&uZ0i z-a_7Y0Gv|~091(#dXQM|s_`NqYs!hW$_X+biiIX?x5C7-6Bax2c_L6Ux(4?IYFJA{ z-w+;N^*nU<(raJ3g>!U@hH2)e?K(LfjfT^7$AD3Y9W=t}mZ3FJs^7^F9ka<~c>sy> zENEILG>tM_{WNiB5M3~Red0R#q z;4df|!_h?ULep`D;97>=HB=bzwSW^$J3zQ2#j}}fj!eTS3{yHB;v^rh>Gbs~MYo+B zauVP!863mO!#Ck-OwJLq>suc2B*LL!(*`*|R0I{=OPbjJ)lGrye|+}4hS0+nPs7>) zy%2pT_)PFv=*o|oufldpP~ALQWBZv?;oLRYN=}<*kD+D1vT;9J%d)Io`b4tnQ?Imz z;1+e5X!63b-u?_7!o7w2c1jT#fw!b()Ti8lUjgT;_du*Y3Rgx_2JcR>h;0lq-*VD? zi@McRY*-u&IW=X&ErG*619r@Xd4g>wVkay2fKHalFj@(dWJ;9=l~u}{|EgMNamm_; zOHf1WjyzIR@0O_}RiD1<@}#A#W>DsIbMkxnaQ&XY8MnVIEQD%Xy1Ymd_WF@v_44*{ zBB>HJYLPu@Ht0a`ml#C}`ny+PfQM!c!^E#I&Xx%+_$&p%XhT<&2!brNv>%^^ zDniXO@A}A{qc^ot8GC7cZk#qllw-=(p)eLz9H?5pS%><0g%s}H-=-5!s@djZO?q&Vl^aUNRn?$zY9PLR zWOkz^)uPu1=Cy(-xb#R#oMCX$z#$kHs0Lc(r8{|k+2)}H(s?-Ur5=oR?nQJ^|L6>% z*d36#Uti*)JGq3~hiD9l`WYqp!;nMVI^&pm3OS?{oLMrb3wzt-an>}qo0pv@o8kmb zH)Wirp`q~rYN`*tnWf5DM;`P#bdkMA`qGgFROEI73;MHUFD~28u_;MNr0@{f5BrqG zw`lwL{cZekj1g(-bU@I6k@XA@7?wrTi(i7lpWMaB-?o$vrG*EY;SXC`v)`Gu2*|(g z(o_AH9|W-H0tNjrkg-XONpWZZ*}_3+eASaQAsK7N`4Zo|sbQ6>-vdYuYeMYjL?Hs) zPSXdm$OXDCURH?S&x3&}2ln;vlX6hp9GDZol+2i->+G`ZL#gd5p(a4igR~>wOShUc zw!>{1A1yRYt*X}5`$?|=ePCGz2D*>cEk(>!hKxX~XI!4khB(EiVL8;AMW=e#HjruD z;Ywj4^FrOeciY#(UUK}F3UgS;b65#y&XQo0u-U}|*XNc|r(Qp~3DDwu4`jy+Vyl;U zx<}t2^wx4Y!{WOlnTmh<&1W*C1AphBlfqqhLF-}r5EG))5jA_0MS|a9ca#H;Pa!Fb zI&jo3u{FwFj;3M6$&)6yIhj}QeMhV^Xed@g%Qv}oJQ{CrvBV{<46NOO|hLXv!>V)3j8nIZRq-j!stOVcwlq_K)(mvSytK zZj1>z`$f6uE~+NG?k->+^7+cqykd+fhhSRGeDB#giWivSshrwrant(yXW9Kx8VX=j z9j&ke##YDPEhhK!zvse+wUdX>sq@@L6!5TyQ0$h$Ow`^5tecuSwJj z{b8G7tJTVGL`Y@5=xAT^0PLC|Tt*;)9)se%EqH(}LHGV`0|I%6!PxD5QEvimfRkV} z275CtgQtxh>=L?YooIrnE(`F_1#Nmv79j3r8_O95jtT;vYtf4j~GdC~F+raSWs$+hGHD{Q`49Fb1BdEQL+3l)wKFG#e(S zd~Hz2=#EC52D>(2x6*sFdw*6hD%!uo*1mpYeqPIUCiBNsC+!auv2E3#!p=7ME*Hx{ z9-UskAwNIQKa&pmhqyLRc1`k15Bm%Xe+K@yj`g3*Mz5i=eR}_h+`s>{tp9y=>c4fY z|61AnU$*eEnvGo+1H#Wt-Q5Fa0tP?9LaC_alBo2eW#M8aMIjx|6!qFTo1((+7qmXz8(ur1ep1Wc{6w*d zrmsv25p;sWsNH(NeTC>96ah)@7{_8Rdv3DRiPZcimDsKlrBXbX-Pa-2nT8+^UQI$i z^F&0(_R6R83bWfWA5ChPUUTLRLwncsv`IH_Y`XlRpnYE||Ms$X>y79l4W`(X?)Hll z7fj&Wb{8Lh^Uu9e3~ygW1X7~+&xR_44GCJJ(>)}$5qEBbhn9Ob*&H2KJl`09<;7B)JAe_0tpqck0F?XTM`zH1{_22hzFcfy(*{lIt8}-`mh^p= z${*SM>1hQplh<&7HW)}Y1Yq+d^zky2ChPv|%5O?qH_;ERBR319NQV;2SA%GZkl$i< zag$dS?WZkPJ_NV^B^J5*IppbJXx?J3$g^b1{+}D~JhRz5;D?fAq1f54Db!dQ4I&|> z@%N|5hMimq0s&M)z#38gZ*cn+mJ@m6E@Ub+MUV&S340$mryJIHiQ>m?&UAL-Sz~|q zoWT^6hrBBFaC?&dPu|SJ%Qa&Tt^E$3bib$>Dcq0V| z;@}4)b<+lNxd(o7^bbvyxd#o!=JHQ_Jc}8{b&z-|S*-FpBN9E`0S+!8lBMdVYUg0O z#y-aknW&IIX2K^9`Bf?>ydGj za7y7n%et~wT+uYL>!7ll!pdrk0qXl84QN$ru+=B1J>J=v8qJSTK7?qhi;+4WQFBqH z0R!;i%EN3&C`B|b(U~jj$oL-vf2kyf84fTE9rGpd^`sRJ#1srsb#&#CKJP*F1C{Qg z{_+Y~aJ|#7K4ZKO_+WtcQa(FPFsv(y-Pv3oz?5GB2oZ7y>k2N|z{A)EXl;Zp1R-lW zf;BJl<~NhbtIx8%%sd&IYTYN(6bY5vhHcAIee8wcc$MjFW&?*ZEr4TAtONu9)vCD? zGM@$#N|Q>Au%%Xm@2mmJlIow1S>YJdGSU8WWUgds<1Dmf2Azc-JZ@Nz@QqX0jGyY@ z&F0Vfd-m?kVCR^@{$)1-Mw~L;QT0RovQ9r9t86V_{e9OL3~x!kBed;mCadhS815Er zh9y3!-|&!VU&7r}sa?uyk5n%=U~fB*|0fQY#xf@Ej6PS{*Bja*zE#mHwZd`5bkL=#PAbQP}tj_(0Z);MNIL*U!u+|P-#6nfGXbStVpkQHn&#-3jQoE-H+gxZoux^0p4_@F7v<Wj6t=r+l1^W8~tmuh<3C$g$)>eoe_Ks`69(2%v}W$%@H zrzYwwFk;==dZ#8Mx0TNijpj>PZO)T*o!3zMlkDWnY!PygBSd{VD3VWQ-90B&>?WI} z^Rw?z(&Y19s4s+nNaoi)LvDYS26`?QSin`(Hk~&V%1ZD0J}vc*B(OYT<4zvFY3%q* zJQk0%y;&!Zoz4a$@l>hb&09hopK1AYDjcy@-4@HuM7y#Vi3O>xMO-_q$K1JdSE582 zM{FH1!II@JOC_u>Y72L}Ud`%ud%oYop-ay@sae;frkfE)3Puc(X;|vAOmRoqucnP)Na>))(EOGuyT63u zMGU@|s?5plaTWBLX-}x1H)~Kh8xQ>Z#mt)+a?zO)vuU%5Dy<5U=Tj^$`Bc!ZMgMRj zyVFxa-9BQWF!iIcq+W(Ei(vvfOAGNt!dMHtW%`Viu7l<$=@#*ZA4Q7T%s}1g_|Z#O zRlEG^XDt=s1=wBUIofm3>Cc?p_|Z}OR}KyIpIh`WtDDlHa9q7F+zWczq{G4C8xx<` zlZB7>-Tvj~`xpE7>Eh^$9$(KVy7BBr5v1=8$rDzY9q*n+($D*K|F{YsAK90?=CM^f zS+!s%ub2DX{pI9&Q@wDQ=vU=z!%pt!>!Qe0c{&>1?oZ_DbMrX$^TGCcRZ`o5tnyBJ zxwrdw)M)X@{n9O8Zces=^lKa*uO|#Ms{NJNe6@7~)q10~_TKB^Ig#Dwp(d|Q7uZb! z4R3@mRn&+zK}34`Fjg#{V$h*OS9uqjh!q;_8Ds%t?Sly-`6zH{&-3_^`K73ytaQB3 zG@6J|ns(jab$8IFW^-!=>D2Ipb%ni`lWlxJTBBN6gc_Ps-;u(l>U3dxDgDBGjn{ly zG^cHVLPo=V&lx3dK)`f0W+XYtZh84P%f;eMy@v}SVIdGP#JP0Vq~p^qsEJ1> z6hL&L_B?5id^~{Us3_>O{mo7)7M{#LGLdI6KbRfQV17oko>Rk$f!0J|2z!Tl_(3m0 zXR-Dz+~o=o5i?H*4JSMWa6nxGf%4JfL_)IfLJGStL7|Bx5f&J|7**vu0b~m3rc9C$ zy}6;3C`+xN_@0mG%9COyuQD~LzQ{~l?yN()tfKt2O+h0J+`fEmLSpmmRe_iRhG#RZ zAA4VuH*y0hvnTd~iiG{V;e4}FdSuZG89j=2VJo^at#SyRoK+6Seb}GVZp5f8%H8le zmg#EoW1OY*Mj%Wb*(gBtcqJ$E6dE)eLcL;wZa&n)6h=A~qgu|~lttU<9`G$QqrH`Y zv1=WsBw5U-zyr(+FVph^L7cLrRM!Meonk4WR~AG;Z9rTtm?mKmZvzA&RUweFZTlma zq6z7I@dimrwW4@0P%4wdS9HI}w`Eh%e{-a8O-J~J2aBStOksD27JU>HWBy6vnd7$T zwo3qwv+_(n)?T#vj)6OqP%cS|TgoEOJ)Oyh-3Wu@s65dS>dcKiZUg8-U zkgw8psXi0Url3C!-@nJr96`_hsd5O&3B&(YU+Fa-I|WRkk%ig32ikZh*cv3_kR*c4 zSSEYCtE%t^M7GKWDHl+aEc(TG*M9p6ZZSak7#{rxPu%QQuNPaiZPf$4W%b#UF^aAr zsyt=rswhKr=+v$A`}OQFuN>&CSi!82>*bcOh}kULPbh+N5?11Cdrjh9NP3C21x*7; zAlXncvBSO!yoa*CzDoZs9Ah3AP~_f~A8A$jY{17^+jW&?mpJ#9SaAsj@RhOc1Dw#a z4GJ8j9UG8V&&z;-C=4KhiLZyMQ*esPwD^PdD#-3dgdJH6%WI>h*g&3`rjOMf?S>bi z!=ApEWEyix4xmDV@&kbK2J&g+&)&1+&SE)`S>s-FEtfzEs&~3bUSv%iE00Kqxc$@& zGYp1YazK|LhRbY0e+XngrJ5Hx(DLmCBR2||l*1ckSPKR!GN4HPypbCo7^;elFp6A9g^*h6d{6aoY%2(q}r)Ni1@_?L> zEW@{|1*DZ|TmQ?Lf(gv+{vXw~Ngfx*eLB*mkYEKlgb@*f0v9puf_l^le)LT|ULwA8 zA|ViPoMdpFjnijC#ofXorw&_-`CvfGDj>AH$DsJmH==xyE6uO4fp>N^YX&+qY;2D~ z0wW&pMw95W=YR1T2qW!v@GItvz-NrkB!jf-u&tix-^Dta4kAP}I-M{-(tQw2H`t3g z6eJ!G$8;Wc!-qMVWj!bh(06K#AE|l;eXc>T1{OfzL7(*+GthKz%fe06i@&uB>gwE< zQ>xSyY9Ih(Z|OG4vol{2Y$RfoT?PV4wqvI}MG<Ij!nv6F7rTzWD=u8)m zy`GXUc1Ml)1q==4A4 ziN*@}YQw0FeiCF$Yr(}1tvwQeHCnwd> zDKn!_|LE1{ke1z%g%sex6e)tJznd~Rlda;}&*Io?gW6sOEIuadaKYWL)pyX9=8boW zFe^iX%ahC0-?labL>S4N?G79ZLBd2L;CGuQJxZgrRxFqjEb81dIj5SC$dt-ze_o@k zMY39bcI+tZRgS_1=_*(=hASP|zCzhlptQQK7m2-4qVLDCMQY`SFz7} zb$pRN=s6~GHqq2%(!U$_Oaf!z1FpwP>~k4f;!@`1Xm!$*zGDFl zV9lY(Oi;iDDGgcB5c!)+nj485BwT!Twyp^hzbPt#-q!ktefdT^qq1HAwv^3t(RT+TCx&fxH z4sNKdPH6oP;1;ve35gELS56rgjd;b%9CASkDgUm(hOtxy?7c8c)JRwOqA9$^+@I^( zJGChvrLiY5f_ zR`fO?;BvaF5;Qay3^5^(I z4dm0w^aHn&c8|W4fm@cXl$Da#E^nqLRcc}Khcm7?aUT6Q5HXpklmr1gu<4tQE0GpA zrqHP#IZlQ9A;3E1<~C#|Iwiy)m;DS&H{i}nQF#O$wz6(KS9vi67Aeg+-;ymlnESJ$ zOj+wkN|bEZh;n|WnX8szmZb(IhN+I!h$A0gw5f8w9c`1zIxAMiJ8z5Lnp?;Eu+m?z zO;?7SVK}eX?|%4+zV#Aybgx8=62Tn!5Y5j`IWh)k@q~FEp1j2wd+tZwGf2xCnhEei z-k}8CLaBd4N&OYBnO@+>eY7REMP|3#>r>p*R1L?oi?W!zucm8t!I0847OCbXP9Vh8 zIIhq_PjvNNb^sfTm)c>bYj+ZBU=)r=7z3i9!3&wvxS<12noRGY3-3Lo!cEDjYe`O3 zzi&nJFYjWq7#f-~E`4UN-|NTqy0{-%hjpW9SH3bpxo=VO)cA{b}{BN;n{P@6sw1rr?O!A4Dwv(g-LUlUORcux^|-32viX z&@DG}IS^WZtMUG3hVXZu`^^p3L$P0y*B$@T#C{35lJVdGFph4GEgrZY&4 zIUP#s)5|Wed%CpxxcpE$GQmh~X|k|*8OPj(w9p!>@T+@%iB=vItyE!F+Gx-`=^3?x z$JcO;g`QNZYcJ)T-s_qxZHc=LXgb(cvQ}gKsL-JyQsOXu#4dxuU{rG%TbNK$q!23HN;5J$nY+USC-&xoD z&Fh6+NLkjdXY?6&g+iz@y>K^65*Z<_=|iLwhTaE&r4?x2LNeTWD>N!vIKF=l{Fs&i zUNgT2xuk9RY?4%(PB#ASz1js~E3YqVRcrqxA5|e3peW3!X%JN;oYs1f@tKfQ?&jxs z0JJz83=&7<*?PU6cNMNxuv6YPA?*m+8Ipl(4{Dna$~vtzTH2Kw+XdV_SGn5PzTkG7 zhj-eIc==G(G@A6uW1`Z3rf#;J^<}UA6tA7zps#)%Hu{irZ`iV$9rNSIyd6KqnlZV3KAY`tTUEYQ{@+O}=m=4souZQHhS+V0b~ZQHhO`}LiP7w>*= zCaNl;qWq{Rj20pm@O6zt%Ff0420}9ivqo)TotN~tw8A^Tr}N$Z`Tg-Q2G(u_?m62$s-vrquVf=BmCm!rjIH5RxO9OnqyLw{m$uq)C7+T$?SZ6EQ zucYrhJ&Y-~nzDC3ohW5L>%84kpGR+&s4jihK^#1gB8^?G1>=xSqQIr%pswO~K%=x9 zF7h^ZH5#mpQO$-x!b^XamePH28oKw^08L&BuUtbu*-_2`{?JwdOty2%;1yWY3jm}F z4sNK;ROr(iFE8$1Z2Y+5Hm_(7+OE+X+(`vC9pP#>hLwqhvyHuuP2;%Z%kzj)%6G5X z3zM09RKKD*=sKrOL+@H$TkjRbgsCWSNtWHP+(+C@AgbSCC_bp^rA{G1aY_tu$=^ae z_iqXY=6fT={}Ff6k=yDLOQ5G}QzR(gY%i1bNI)2sPfF|B;6XTt1BJZzFF4kKW*^g# zw;DA9%iALtR&PV-^W7PQ(FC0YLTPesAosl6Ho{2j$a9JipW>Jz#|9XAV81Xv_RP|^Apf2Hc4e9n48q@b1)DMjH^gx*VZLdV-(&5`zN7XZf8}v&e1Q> zAe-Kpf}dUni$m@|)Dn=0RC^`xgtYWZS;N3i9cF%m;TBeFfko7*#!@20g`j^TKxh4Bk{$Zbu*GS|HT9OZ8+k#Ct4?}sI+3PME=QQFw2u2)Ec zINEfb+x}Yo5nk0WY%dAejcUJ|P4C-#wO#oatk~o z@`kC)9kovRQwkOu3hve(KzmyImw2LTU3s`vq2^h&R85tNHvUH~4fzPHV_fDNfd^ak zC&CQ?&aU#lqTOY@C%pS}yR@&xb+cqGl7uO+85%QBB_BOq!BRnItkcYEw0z-Z7ryYb z5-fN_CI6_NC*tYBY)oy3x1-m?&lko=a(5?0=@w);?%q*`Q!pcNC2!-xF+dW2?UF!Z zyh0LWQ+oasrXhGYBTAoMA=Vz!{nt=gPf7cPj3%2rHalQGhFF%NxPm8ju@!;j1R2%v zW{p+By{WGD&TKCmgfkKhxFcfcj^HO3LLA62`Pv%VF^woRh+Gba0_()f0YrR|aP7no zB(|MXH`PTw{)5bW_oz7Uf$p-vV)bBcSqerWlVRTsCGpptEQB-iH#&krjWGz9Yn%i1CB|X2-c@u9#~7M7?yBp?0s`jp4p=mW9|Fgm z1Yoh7@GI<2RR8kvyet_)eE^$nd23NDFGTgBAis$6GQy_A&?H#j9xmTzmB)@;+j(C) z`*L_XL(c3jkCO7Ex!)tZx_wC>gC_>;*=Suf*aM4$)|`JWIN$m`|5b|`9EUEx$HRM2 z<1VY(h>IGh|L*l$+~US^OpJ%7Rt$6E(Ki&A|1GswFd2}#Za`(Y^}Cf0-PvC*x)W>X zfX9o*dA-W4B#0G2B{{bD32mrI4|Pb$#s?b(PFYfRQEM@9d{6ywnS{JF9Sx-GFtIF0 zbOzbQY=}sG$%P1E&ybxeihE~j=p%FZ3}-`|fNXT~bDheVD5-@1F(EG>e`uddt`$eP z#Lv^^{Uke7NJyjoa0`V>r^heIVI&vr@P!ck{``UYb8hJ*?&Vam72^2Bv zMcq5+VM3XySi)oirL=+k%n@?`1eSvW*y6u4R$14K(<;pr&RAHULXH-XWYZLioeX0PZ1%?WSH_V<`cr! zBoeY>-~s-T?cp|P@pk>fkldfTG zFT?j(p(2kw5XqBjWrU&Vm%xhW!(uRsC9|V*A$_HU9~WoS`{Z{vSKXkiGpIDU+QpyF zo;1a7rhf&>D5>BDX#aI?#nvojNR{ToR<2ze6P5|!298(+Ac|J0EKVs_ex&c8sDj-M z((o`6&a6?J1L?nwzlJ0A_@|k@TO1|4J7AxGrfogceN0>!dkzZu=l$lI}*v4nfh|!^G_i12pL^s#tpj>{NB&EjomeHJJ6$ zU4BH}oC2t|BTqd!_jwt;0KPm%9uJGF`I_C^dA?s?ji}-K0{-*!Flp#{viyAB3d_~& z>SX8Cl7=8?SO}jgPHESn56BrxLsD-9j?xcEp6;-eXy<%Gd)$?xrrovBPtmsr6B$X( zpv_9Xy=8LshWomo9a_ci3tMy9=1pXrR9$jk z2GTi5V67kblc{5CweS}xPbS~MkQk|mhzHOCFhRaX(3hVH8J32geGRn1ESYTY&kitT z3uU8vpO5ack`9p{Rm{0cD4I!^IBtZyAe%sfg1n-%i+l{~#Qoz@5RtK3&(?4agB_z} z*+q}_ZZ4w(%4CVVn3Disi-}*4U!D)WCCkA6}R8EKw)v^kHNLCt;wo5oXEkYIr_+ zkP~6~orD_q=M+V-PLVavXow(#{XzVet=rmmb!VI7>>1dzMp>6MojVJQ^Ce##W*({G zV5+;*HZR`?hP_Ey{NP>4Y_J_LMig!OuFiT(poLZz)k# zoI{T`Is!EvGO3td#2<(0yAlt_fs~PsFwIs#MfGJcd=JhDJuAjA5Hw=R z(f*kg`wRfOVkU1UO z?QJp0nnA%ErmMzZLy$q6$5EO}TJ#jReQdWuSSU>qS$;Jz8!b=>&{`L={g4~if6NQI zH(R0#bUeFukqavnNakH^wK}F@Uo0ZaE3@&P+K>a1W8^*VW&|j@ps9j=4Jf4|(|687 zNbqr92})2zZ!j*Vt6{nRrCI#?9jKK8H@eSK-tF zmojA(lB*d`=5~MYi?nroxteyc{~Yp1tN=|?Bk@oCzLW$;c?dF=L}S$otRn~)m|F79 zy!w$j=||-#oKT7920FO~ZtfS^oDyrv{55#N3eC`78ql@m8 zJUc(AVb|yg!DZ@|ut2h8vBFLJg=%4mT#3^Ij@p)XUy{*5f>Y)`xeQ*RrKfbQKqgBw zXNQ#3wf)8Ul@JXirna$X~(NseCr+L z_gl~FiCFtj0^X;%+&9)vCNwEQxW?&zDpP@FyE?F(?;aQkfq+)tDQac=g;e87ZZ*I` zCr=J4`~LmeQ9-x&`_|*d&B;SekMHxxS<6hWT+bWcw@;?u!&bwIp3lqDXUECTbfno1 zde4ulYCpwxiQ|^I+Qx`o_viUX4Nk|jB4?4*E}dzhe*_A$jGr)PmQynCR-rOS5F*ZJ=QCZMuPb zIKP~U=gG9|QRV#wIvS|i$oU;CE1fvn??bm{KVlBQvo}58+c$wR=3H9A8tZHQz+*}< zyFjGi3&UZk*-C9wh+d~y_3kKKk6P2VOuB#qgX?2P*yXQRBrZfFZB%yTUwKJYhk0@T zebA!^VWc(YX#*)~$pi9zZbx0pRd;fNI(l?$ zQ;0RS7i)TeT#hCz408@7MYXH7@8~hvsj9SkuDqJd*r!ek%^}%B1M#@{h4tR)?m1A{ zzg2*ez{i`6Io70t`IK5-kb0!UFa=y(;f!ERbU1pCHLU3sI9=Y)-b{q*p|iueZB z%w%+Qj8Xn^C>&}H*bJN>u6UeqBSVaM7YuwU>UB0d^V~^@P%qaZQov0zt;w_~{wYeT zjsqTf841xnW5$#O6_>sAbv1Y=bn-#JiI|VPt_1gK447SX+-`VBrPvWLWI=AvteH2hv%`(vm{}%)!krs2z*O?9? zprXW-B!AD-#c#hpG-}`8OAe&>+ ziB*Qib9P}SUrcFdSOO)@E1H1Qu6G?DCKh0V*jfqC+qe_Ce>y3#di=Gu#3z`m+va{L z0c4d}aHu>Q-?eTsOjP?Ba);{IzmY)~d zs!RIIjFCSB7z>I8vT9CDI*qA)Ww}v&km-s#e_+Wz zR1RajJBhLbPpZNl>REXPHQ~ykhMJwEQi4t=P3h1GD-(^KNtq>->~vr<^L=vv``O3) zI~!Q7v!Qw$TnZ4zw^=xV@&~*)wrp!e1{)}N30UBB-%eYrp?ihYW@6`p7u%!Os4CB!(Ugn_L zdCtkDTHzbUwf+!HlSSr{g1koUKZ`)iM?nrj#VBcgm1A~AF;}Y^y2EJG*zRP!p~(X< z+qZSK1%j!<)JR7N^iWDM_W=6!Fi?Z`y9&i-4r~N=g<8&6N3^gWBmPj(@@letO&HX= zoc<)x85ae0M5lGT_Gp`na)ogrCr+upcDsFN*mgwTPg5&9U*DN# zJAA00ZZHdl~SH+KQQYOyih{JAior)Om!Z)+>`AjGC;SUYBMh3k8X=jn%%CzdLIQFn7uFAa> zaiG~AMNoiY;NR!NRRKbPKAE`kQHW2xWQNI+M*`VCZv=N?(n0zjOflgYlqLBIg>>6K z2_kilroENc2elXsrL1u`hg&|*z9srwdatE>Qi$Pw;vn_CV~!Jve5j1NilUM_DxTR> zzA-nnnd zYtbI4W9FDN?RTNqz{i<%l-1MV_j6IwMAAERNpXIoB=17G>@Btf8{MHUV^!AfRxFbb zkyzVUun#oYy05|lTNo07>1cCm_nmqsbMFIhcd2QX4q;e*kD_9IX{kK{hmi~wnMZ9G zfwp`5a9qjqpA6Q`K-h$H&xXWch8tlruh^Wtu|GJPJdNyOv#w$%Djsz_A>*stI%Ec@ zcXUaSL3yyf-C2z1QS65|bGjFub&ksNR5*wFsJoSXthQ!l@&GfWFwMbd{7MAUM&VF9 z8AyT;u~@Fbk!fOb!MsMIpC=bnt132<4$q^q@S9lkU-C5S(QV2Tdq(7 zIXRhKlVqh$jXrez93*u}9XGOnk}@Y2M7EnZE3zF4(&jDO8KQmD@W7znIqRB{J*%0u zuef(5jW56ewO3k2bl@F@N&Qhxyx&9BnKG~cW9CH$>)yR zlM;NXN)ZcjT!Wc`>fFNaBI47Wh+aWUpa=Pca48I~9*}J&-x5Vi?5Sl}s{G9;kV0Ix zRl2ZNlZXkKuuZ8XYo#HzsUg>C*cYI~+D>pwvbPV-Uk$)$~j zkhwTCaref)$_I#BkhA8HgBb1+81Tnql*FqVY4 zXoHm<&R-F~M5I)NV`bZzEbfhUs$PBXFiGysIAILCL4s(3m z;8@bn-u#8%hiS<~QN~+4_xwCA;7`$MOG0sqAARzrgWIyPG$LKJN+XU|>{<7g8!0ef zt2vB}1#6c*m)1~_eZY56C|j##iV!5+oP6+s>xZ$VinuB z{PR^hC?uaC|HJqHi9r7&ci#9|ZQTY00LTFa06_Ww%bi_pE&gLAm95lgyGel1^+An5 z?q%`@tm?8>7swX5>Un3PmMv;Uw+M<{f4lP~&0tWfq>~6)aI@7Xk;G*ph7jAWMK)Y3^W5v{z-*{nr$NTWNlFr&njuS7QZU6G!Kheenc=v< ztfxABUa)YXyB{2HfXpGBFv12b(W2@?=Wkz6SVE1{n7EF*W_vA%pW=l zss_Vj(8xoS#kM}=aWvORU<@E|1&d4S+I?f-@3Q1i&7XR2alD4OrMG#DrGRF;eARxA zDzmb0NxiC41}y8EBcOZ+VX6y!k>nfKP~cnkR95;>htt@PDCVs+b~s4RW&b5N_-LD&MKznJsT zjt2hTF5-1pS9dG_U}4b@4Q`@t8J)csMexa@N@4z|ggtkeg)GDv%`@ zCB_}=HLooG6kmAs!6C~qFHJ`9iOi3DGHhaei6~@hxHmoWV#N}2eoc$1x9SDqu9_Q1OG4|*Ke$c4Ino5rV5t!Z%r?)i82 zpHaQ97)fWkQeY`X#|r9UjoT>q#Aw=c>pNp7h1ejqn&w^&zOQUR+EI)w zA*2oCsQ2aSl_){$&}W>hgVA&!R>e> z%Ki-P#|U4#&q0d=LAEiz53E#Map_Tb-yf}vs9ydX#kjeKLm)wL;sFM(B{OrRCy+@t zdGH@Yb}T(zi;*0T*xw~1#D-u(FMAF_yanHwr7ns;NHO>n zl_y`TF?Y~Knl35rfmmI0?5d$PzzyQ4iu&|!hfs9tYbBk#P}QqKX(^K0weVcN=Q57X z>-+fCH!-07noh3wZQAdz?KtBH@yvqrFbRf8ybCaJY@vUe?&E+S=1yf9F3jJGNyLqy za?FXVb3C^+<0Ci?5ANlDvqtf$;c`mafoi=18T)pUAMeHB`Gpu$t>O2}i$H)FJ(&=u zBLiwb4w4>po3*c3R<;BtMZ#1v#{iWqYB9tbJF$w0fE2s54NO%?2#m#@Fx;&(q&BNb z5ttWS7>@Svf38`O86RE7T$|yP5D1A2fk^URYBYjTSQG`q5>-{}GRXx|DB%W3AosGq z)SqSRL~P6>^W}IEnY~M-Z^4SEcnqD*%b90PF9Py^Oi_v!*Fzj*z4*n)wgDj2l^nP*caYGKJXqo$%MQdsZ-Sya^8#XIt8CX@eqqbMQdjP1Ka`Q6(i;# zs4UtE|B;t_603Ao;$?q+=PP|Uh=+(rIqpPMmNaoxJJZLju!_#*%-*3^S~rX#Z5lrG zzt&dSsY7H+?76Iy!KYdZHgZYfTXcQ&b*%iJ;k&1Dqn>Ag3s-{FHx@)i<^oRL-%Oi- zNzDvyHljHw#dsBZ8+)57Q`tpuZdU4ZP(bw3iUqMn85!b)`AkZ&i+_=DcXo^Q2_E5h zZ*PQz$M~Y1_D#0b*+#C=VsibNBtJ#ijYJ5TsauKrv>X{*ujp&?YREA#so$br*H<-A z{~B|hz#PTF5(KiaY=YRMhzA6uQlwC3Q1)FcZXK) z$>AXL5J21D#>?mF=)Au)1!MgP@;uQb=JcXR@_E+zXaY(CfiYIVEfJ|` zCy3VQ#@1+9zGY{RkT+wDRc=h)7!BQUriB(3?-7UVToG&^e;oXI_&XM=%k zrP8BP5I0cc@v7jPHMjhIq`;h=s5lb`dJKVQnRJ0_f8{)0W|HH=zs8Q+u!orlDyF%eO%xT8y>M~7z$o7V~%j2`}w_y$u===bEUDjPn zA@&d$lv%w9-{kGlHM01Y^mLGxKh@c(|CBu8Wk_>#`2Eey?{{DHCZlpeW*sL=y32$m zsr+;ey#bn`v6{tgICpz7=3Md}R9H_X{93O}2nD3(!r(=pm#7yYeP#+H5kA5=s#{$gvFy47R0qHv;$)@Kv6oKO}DbIRC=wvWDj0+aGQ! zzxCu$UQi{KNmvDr$Cg6&>y@9(S-DOeZrj~_vczYTK{T{lWzazzgQJ@BFW&!2E|0dW zx3Px7X^8jAJmKlUA$PT3EI^xv@mCqz$`N%IDUxJkQA+k^MiY?ze7k6=*`enHC#$0L z*%UGX6tivTf1S_CQ}RH=fNp*H)JOVvHgIX95ktDjV2CybM;X57F_=&3p?GvBC}p}U z$&m!4HuI32r7_Bx`a((aXP3hXfaQ5|wl%A=2uBA~)}tNQ$08kdJP$DDXbaub$>5fh zL2JD@CgmNvl4}{ZvM zcTf*!@t5gaQ%y-5)V=V!gkV!7^wr^U?W9(10N@%OD+pZ|aHKqEIJ5x3?6{DhGj?1G z_qERshLK~}dO6NGV7)t(MA1*nK>qob-&vUw;*qH4Mf1fA-Eg4e`ngwf1^U0H5H?i47GqGQ~M91__a2;C+<3pt^I()E{s0wsjL(hgnxTIo;F%R76{ z4BwN0Y9RqNzVTb52jqUGn8UJ9))PINEEP$9{c@APHDfDNk<`~_iU5PdIOQ2_FhtbB z)H4g|d)?*Ul>z-w9flrMYz}kr>21Gwq?52*BL;u?jSxaG3_zMqCcghGTe>mTNl!)e z@uOPc(O%pT3F_gyb9r;%taucdaM@lP8pY^nLpdwu}cuaBw5f^hoJxEpR#J!>M z>-@NHwFzu&0XnLLrr;vO$$GbdkFW5S87YD~0N8B1_=VuwY(D}-g*+fgFO&>&a^6~< z{Jr`apMb0qA)M0`N=j}Bg}E7TA%>xybV?e6evWtqlK|h6ZA-l}1?jzPmQh=nO!IuI z=K5jXU)4b7!2SFA#p=|vwwnR!R(H+TL}E9iNvP?@mu%?aHIv>F6L5GGtTHPJD|4X= zg#ahZ_)L|oYvn-I>1&b=GlJ+V>n3&6Dj~RQ$~K$oz)l7QW!Z2mC#gr^2`NHUx8KvBE7u59P0FuG%;k*uPLZw)S$p*C*<&9J|MgkBb1ch2Er$J4P7Sl z`KwLyCFHZw#V0xn2~a zTJ+g6xAVZ{Z$CfY41n_oro-mcf*I|hIpb;xqMCM|Bfm(cwdx5@&W1Tcd4}XDpuD84 zy>EkkBF_Olj(Q;9k|EUf%yzhXX{$bXtz?6LwxwFC(JqAE>QxynngJ1@4>hN?h`_9! z@k>g`Md$2r`d_q}HEEc#GXY&Lq9V)ch!lJ+w4Dz+3(_)rUQq;g=X z_uq#%NWxZlb}Wr2v}tUe&qKvkXsZ?T&^teKbVT`PzY&9=6vwOy=^@@IuOw=AdFa9a zk+Rkb}G_eH3{m||JQmS>^~>Rmisl`Q6IEU&Z8Tb<%l)V7ILeZ%a(knUvrUSQm^ zRIcs>ZHRGYtNnWfP*=i3S9ei4%Z2ut)Lw9P73~fdS>?s00An6Da%|ehA?H z=dhG*odM)D9@A-73z@h5qyOe5a<+ESk|Q{Z^YG2Frgi@Cq+ikf_gwv}+s!vS?N$2} z5#m;QQE^m59H}UAA9(SfFEG;~F4e~Q9-1Y=kcyC^Xrob9h97*@@XIlL2Ip+D5NKUD z8c{OqT+9>C9C!KO#C`saA9-540^yxr(#EG5ED!NIU>RE>uYFPGoDw)c3*ha%Vh0D%y!E0~}JTM0fWouP< z@A=pr-<<&i4yST{!^dEBIy}wT6t;Ob?H{g_wYeEPZYAIK0nHT`$DQjx0B_b8$F}Ob zw~rUAxp@_C71if6>iEM0cML4lY3;}N4St(dP!zrO-Zf|NVRfJqtwjJLA!8)i`TXg1 z23l0NNfl7(!~6)apwM{E9(n%hB+QSB6Qm(%o}uiTZZBuC z3K&L%1Ry5&ZnX}OGcQ`9`@q)4AnWr#B-s*8?Li4vNR z$+nqG7iO8_OGK17jCr~D$IQ+0q#aM^K8XlDG^H0j6yFcgFzF7gB$U;r0@{YbAT z;EYt=!|D;$WsjE{Kb6!_C7@QCG~8xBJODmh8kVrS-y@#b=Mb05@*HaumwNlBHxS5g zPn{J4^hm|H3-tG?48|`Crs#+4;aji8_rXwm0^02d{eNB}Cbxe{t-qIt?e`*K{?{dP zakj8_qSw>2`0exQ>HT(fRO0%e`WX;}Z{8x8l%VI~Z!cvK=yNt!rJWgVEZGo6KHacU zo18FK*N$k7f_^13ev)3wl=VqaD`-N-JJ1g7uLu?gA38H9!K}8DWy=g84fhvC)L&;bq5Tzk!VQLd1=4K&T1nf^U&Oxa(PE?i7W&tCQmzuH9?+0|MR$jOPoHZ zzyJW|zkMwH|Kqp@&d!cbdcXdTcE7=?X8&n#swl)2{!*ivn%enP;Q|ly9o6-6Z2~Q2 zAVdQ-H_a*<`n18wzEQTh~uP;$xqr{NXN`W0EFCI-9|w+76>6ZBi1@cGxW zU}4y@GQoe83S*Wgm=b9E zvqnfqDch{l729*XFG;;Kffb?i2!IKsctiUv9zNMWjg z^As2-!n(KV;-b2oE`b~7J#oD|N%(x4VXR6}k^q0UIZOOS0H;3>`NK^iCZtiYS zV9sT*vZwvH1SYiLZ47oJHw&m{acHpiwdCjW`x6%y{|f-H zUpsW;9_`f!w|HprIU|y8xH7st_%r#w9k5)%En8b$@2b1(i<41Zn~xwXBlI{*wLc^gAMcd zMY&=R;^o|Hd1m$XG3Mxi0+j;x$47iw6)DUyO}%6uAz>o*tz4lGcto}Ci(9p6(T!oz z-6m`Cm*t+{{I|S0=^#9muRb+(RKi{wG-6NZSV;2UoLe%}m6~&OvQnAnV@2&)QBA^? z=U2-QaW6%h(K(%E(@1ExX_X7T)~Pxv*Zi&YBia$NXwKJ9Ka#6kD|~|Wz%W=N=i;JP z4wo58hCBHpSx4MvH3N1om*tUh#;4M}X#{p4wh1RwN3wx*gO5ycK#_aiUA}z?vplNq83Bws6L(=uWzspu z);v6u)m=%!R2*-uw()#4czpOZn1=je$^M6r=S>44;t&`QYZ)v`Hsv6GbCe1ma}d3&+|J?PNgH^u7x$DlBGJ`B(|b=mQm{(pkRYMkEx!bHzs zOvL*?K*Gq{z{$zp(azq)(b?laz!=X1H$VUr;wHO?NRkY!Jigd<4l6GyEtM&Vl111; z5BHXj7k@eP?6nG~BWZv1!aIaWu4(MuIhWWDF*;WWY~+1w94*bk0ec<6eY9&hz-q2W zQr?z@XXH^83QyhCf>FD^t5X&v*LzBW1KPwt5q>Fy>nxa2Vvl9++NUHbK^m;joU-8X zQJ~=6-Pg0d?q2rD%v|HQu;7 zY>ZJwXCh*eG}jSlZ>E@f79Z3#7RotQCaJVsl%v9$rf+c19_8lY#x*7{ z>W%@d6t|IVxL&!xps@3tOvvjyk|JO`OwRHQLEZ(17Z1dUDK`T#*o$Zl0$LvP#ZXTv z&RhD|)~62XJ`Jc9J6BWXCb1fDD%$5VQcBAkr{gDMFRu;s-cCQ~L6(`Yd(-$cb~$Fb@3HiW8Ylc^)@Zs|=nOMN zE8PS-jK?Fne|6lon(x%@aGYp&=!62}Cay)ZOW&&?H|o>Dc;~-d(En54dU6~co!|N* z{F250w$%FH^)+!fGO>5I_~nYf^&L{0knQJ3*?OU_c2Pn)1&?oK2o)|Q1e>)fyRu22 z_27UH!T8zQprj3_fJSN?BYEI>`9sE>%7D>p{x`o~8cL`liHf4}5MgAxeta-$vP)la ztX>#4n!1d3b9;6>Xa_2gphD3|pJpgYzXtBc;Wr%Jg4V?j13(2MG)}X-4#!D-A1e)z zkd`B^WB?b5+ogQDcN19{nVjaJb%afeKs3Cb}7mVAEPvl}bT2vDLQq(K4+vof+fjGX9*1H{M$KO+k3T6a#gh|X5BHIS&X zs`sO1R3LN{0ken=4aXK zitOPm;m^2?;z)mv@h4~1=J1=V=i)n>r|iwu1J~`e@7)&A%vr|wHm(|~Bg)ndcdZ-C zL_m&T>=Wq!L>j8_?re=;+V!OQf2)eAoukb!CHdb}5kE;h`* zLSs^SDC!sw*H2Rc@F4?wJ)x9tg@$!2DM(gX6|LilN}lzE5Flfm25dc3Gc%L46vs3H zmP?neQ%oDQ0-f>)b7w;&Jy1G^b)c|?WRC{tQwvCd5%PlBLvf$J9iw+q2yr{Kz1Ei| zu#@PZguZ3i4eJ-5h=&P)kz%u0F49M^NmZqEPjJBwl;Vf2>E&84NbsPMZ&K1YvJ}zS z2y8(#+y4uSR8zdmoG7Do678>6zgdl0kvDR?BsfWywD7<=pklEx785w0A+-p^{qnSk zWcb=#!441|8f0#w5L!_`y_*?;JcFo~G(BxJG3;GTF-1&m84t7zHPsxvcgAU(smoUC zNi6_;0O3mbud4txusCbL56Z~+m=74nMuW!0J<@x)@nQ~vcz{40q;>1;Gq@O0K_rm> zPlv)~g^vzuOheqEWtLAP-Vu+C?U>pujv2nj{u2}~oWbn!r|_p1X!(*2b!`U{!j9yv z;L=I`02}2## zo_z=4OK4aQKhXZv3SWSig_VN_G)FBeFM)y`wnOGsJX0F9qIRKzNdYLky5w{3ErW=a zU|CVw>OM1>kk%^MIw(gd(R39!fEu*mo06*Sk(+x~XdhO{TCV|wSTlg1Ofw_}>6J79 zStUdLzxDkjl!Y>m29DhDjrO+C6L*Xx_SW-u8E%j7PbBXbSHO~mcg``buO4%)yywq~ z2fxp8rr~$i%-raz-uY(ZD$=bR-Nns9S<`nPuwjF#fL%c9uPn+&rxl1V_xuKezDVUqex;^c?H9uB!c94naa=cuB8f3>hiZ-LklwS7F z7wZ`EA{er0sLb7%Mndz&*6>8CJ^4$!I+5%e;z(+-Q0go=vq!BS9fHNjYHASZME-I# zRvT2~JQM8maA(f;1c06qGKYtb;*6@)uJne*dCBY}z%+%k^vqypq~CzhiLW;E0`2 zZg;)!(+?>j-H*A6Jrbk&tu0rY)ppV5!UM0sj_kX((NM`}xOV6Y6<2Rq#{{!2n zggGrGUu7u~!OSIMBTe5E9b+Ie5ah6nd{c11F;$OH_U6c^x;)32{s^87mLfF&ZzM{G zd;?Kaoffz{!APz&CQH7>pI1rZdAGJRSDh3U&=vA8- zI+r^Yy11rpqj|V3Rfu=5glYdc6DQ_%UsSGZEEO{eMcAt+1y>f(THbn7(~&6vNS#dC z0D{#~ag{Un{}IBgSd(eK)HmH%!N`S>z=Q=IWa*lyE?TvB{%qO&2mYac7sZlR;n66r@uiD@-*z)Rp4 zYB4oYd)4j~pC$tEK=e?-8_pJdS8yp#Jm$m^vTuAxj(vECSw)=5#m{dZO<6e@9A-Lo zfC~Pj7#PAj0T%2@42virMRZ2QnQ|@McVL#%XF6oaNuGv@@Xmr>Qe*Ln%(K3ylAHiwnDrV2A)qRiEka{2CbpYIhbfg}P#8&wtB5oM)p zdcVO;xl@&+mE=5;Bxaaa#17u}u)Z9dbAY~tJ$*b`!ZrTV9NDo-v@Wj@;)P;aiRN`R z5eSKDpgW^rT?qsc;z)@w-$bWZ`fp#f0|vnSY*~T3p zp9O-Gh}hH!FN@tI7GPhrNi(5d5tBc`?s|{L&m^~se$}V9^qU!SLxu;6*F!SO0eHN| zw`qyz7c%;a_8Ugic?~0xCaQ{Rpi#=i#VShPdaSm)j*=FXk5`eVqWT-f@@hvp!^!qI zo9ZfoJWWFp2|{Hh@}Cs`QXZ8?sLLyMWB|aoIm*6ZEacoOf>H#%tPH3ANwnV#ES_-P z2>QnS#Pv*_aP2BL>xnJ04-+=RsCsv~A@tRQdK>bW&gO^RUfG|gs}4WEOyAE3xt*e$ zo{zIHaQ;s42IZzcND#JTDSHk=hgDvud0W8WkfU4hb16l(Qf!sYvcqXIW(ESbgOah9 zo9!uuyGylfb#zp=OhwByMT8w{Qy+gDng%c@vtWtUKikGSqFbB7?Bh!ku$LG0M#K-a zG?jv+yF!oy5G>sC*_GDDDL1bWVH8GnO4+lpC%%j7C$&L%_Jt{$9@uNI3L)oqoM(|- zXP!Fxw>IV3QrD-JS~^Y+GR_+^lmkbX*0QpG5`esJp^)RMcP{S*6d-4st{S`UW}miM zI6P1FR?+zm41!2OSNDZd!T%3m?-(Xow`>cSy4Yn~UADSx+qP}Hx?EYdZQHhO+cvsz z>pkcD?mc_&``mn<{E{vQ|WNi z*eZQ6qybjy)#Ur^#aK4#i%AW1MPCQwT-McObx0@E*G*e-Y?HjA(PfnOX^*~^-lDF(sDeKO8AQ$An~7!uyW3=2Trt}!l{sW2R0NVj@&pZE zfBr8T${B->e*229odZH{xL|c&10hN?_6AK0_Fnqd<(x1Gj3|BArYWQo>@UBbp8L~Q zczwLHL57VgxM)56cmP`Qo$qOinTIQ-*FzYGzFc$<@h;z3Dk(xgyHs2JGC1FA@^j_Y1ZXVI}4 zO`E(`Q(lYVpUc`5Ew(gnJcY`O2tzyNb-cp>O}r!dGCES}$IJrt7<3vj*I}UEjmOKC zcgF`~KT?-C2V&(jGG8!$M^1NCtrT~39QNEqor}qnUH>ak;7TcHCZKXfOiXr=35DYR z##?>KRPqZ1aPBLTVr7DXEY~w2d6vf`eGw z)Nd{2h539lDPC=*jz2i?_cN3$*q`v8u%0W(GD^DvGPrCAxMKp^nv7z^=xJeG2hQ3H zvA)n)D%tGhkArxY0^@BqcTSx&?fxF7gs^ONOQiYCC$XL)YH@~~#gQJqIW%vGrC)vv zlO~vq42yD@xX_peB%Zvb-9JZ|^>k)VDy~x_VQ~B zsb&9Y7*;r3sO$^VTyXtXcc!QfE+G?6+2)Lvc?69(t)ysN|Iq@U2Q**`eeNYB=q+aZ zH4F1x*J!G@Ri8Vw^qS@qg)+nX+Meixh{VqOw!Rp22(-=GSv#7 z;=g>7`o&(4-f4O9ymAc@5^>7*Z>chN8r0aTY+6q7@nKu!l)L`k z7DxLQQKt}zzHk-7Fw25IcTqz}oo^wSSXq4`*qQ}Y51HWkSuH18cmdse9qDH@Q8z2a zXf4#3)+8@{wabOlvRQ-8u7fJp0M9mNS{yHvfwN9p-;9yq^xcrLl$0T-d0OGtj^@7U z1n@&zG$N2e(a}3-C#NOrvCQR{)O+RXv4mtq3{*>>BSnD#Wt?$`Nw{>Jr` zN?he&x574ncgXCW%cUsV&a`+h1g>B?z(|$6jzpJ&5daMvqk#Y(m=Kljxuy27-@YXs zDj#qPxl+2;kxJL)Zsjrq#nO-Wr1lP+ACv4|wW@})Y?01Bqr^1_*#zGn=MQ$ss2_iT z+jW{t^f!UK`kIw}*y3#6+}}=t47cCwG%!UBXv-*WYOz}0z)86WW12+7U3miL;P8@ zzz6JKb{0zvm`EMfVxC%etbz{vWIb*w2D58S^X(dXhH^ioaxsKHPTDlJ*?!x$lDP5a zk4QWTXL;`N#a?)0=^YLxn5_}GaDi4?ag5Mo7?=$!I)0X$2D9=3Iu!h#(_i1QOwhxv zU3sI$_Gf=02?E$3c2VS+LU6R3;n@zihsmA=2#RprO%Fy+`%1#ZHLFbIy~F!|Y$Yw% zaZu995LI;XQZpBw?ldCRjryzR5Zj;X?&$>KRO3;_meaj)Go1&7FeHND4k=kAMU`wU zkG6n-m9h=hgoHtQAx5X8-$1IDN%Xor%&iNhR(p{6O+yN`bhCruB?C!Mo-Q?eNzW`S zG1E%hYu>)~1l9$P zX3|}o6gJyrl4RuUbpBlPU0S?6_#g&r-zn&O%90E*w|FO2v-sL)JP(J9giW-u>IkE1 zBF|WT#@aGQUvV0G=k9{9?5nhZO|5(_Zs^c^g{ydk&u7o3%HkYobA7N^P@Gtsq1NhD z*IoWf-m`U+d) z0Qb+j97$*EN?WGDRfE})bdUu0Xf|tI|A`w9ZEXr6(p=VLYGw;MNs~q9&TwzNlOM>p zNIwzm?RdD^)678CTo=XE+*3hU!t3h;w&4HLLT**pcoN=AmO3ucB|yFH%%3v6pc9jx z^YFT&v%pN`168x6*n$c5Vb<;xpEVOFCqHJfW6Sgose!(Ec3pZd7lDHu02v%Xy=#An=mZ;6CD+)mo0uxPp5x!={B- zwtokiLayLH>arOC4{jKs`&-iKLYuNvXd0Td@WUNLtYwB9RE9a4G-wMQsM}LBRn%UUn*Jqm_*31qLYqJ9bFs76((~%1m}P z)j`&A+W2@Y@7JDFo}?g|n={4v`K18q-+-Op>77V8d=E5h$xMfx&mDITZ%CviKZSDoKd16glzq_g)-M(8Y$(`Dh$~8K0@%KR^ zcaYF}p9;}st<`@tTEV{(mwODGyrb68)pWEtIL`#$`un!je||Iq&VeTl5;B@y^k7yA z23?U5oDjx*JwH5rW&l4$SA<~CeP^jUQs>nWXSw1@&y!J9rBZqvVo+Q20}ufgc70Cm z(adW#*Q6m1m~kvOf|}N!QZ*f2Gz0prffgtpPWUL={c6x$vTm$&6Q_tS~%U_Y?n=QQLn!! z^2)R#UCeD_$?oKs9Q)DPXOVh`WcALq`(ZQjv4_oy<#+%9M*j=~voNWRdPswQ*@gHq zyZ*|80fY=P^S22HaXw_p{TYr;A!jklW~GMxqfva z2jks5baHEG0M~Qv@FnQ@sjwi$Ehb|F$8$hOWi-=o zp)$ncO4SO?p!%bpVGVAc~O3Om%Y1E zp&PORa|1}@>G%sRJstb^4=QtcMoN5JJouB+E$4!45?3ywt(wK%g24}m_ z!@!;c)yBO&2HG`NZ+0+3i^hj%#|+2jZ9fF_5nj@svMoc7RKfa$OzQQW5Bx-1)PZoU z?CHmzF8R>snmOVK)C|JdPSqLV#+P*yaST4$l`aI`x;PCDWbs3WU+E_MF|LT{Pl-(v zr^B!4Y1iz_TEc0-H1+N+hD4RkPCr$QOFI<2v83gUaSgTfru~>qsNciq8IvXIk{iWg z%H+*+7KevHwRCGu2``7;c=0_6`oYu!mf-_ z)WKN=>4u)#tb?KUe7noeYz-m9oVfgI%gfL2Rh6x4E6aN}y;B#jhg<$f5}2*VE;>>5KKPv6v18MnLw8Sr0aOX|m8gxRGG3)~Q&r ze|qby_p6V(PdV-gb760}NRQ&3!ux;>|2l#`Fz>;%e;6p2Zr;gRDw}BXiaYR9KjeyP zb9t1N8><0^poESIdo>{jOy_W&tvPA|(85P;We%TFps5AQLSMQ9%2~w*>KIMG77=4H z5_=#!DC+%R{F8fD5hNn_V6Ny1!8l4Pk@A!*Rjo3`iQrdy7|Hap%ul&M!56|rv8mD< zyte(RDPi`qwlogIu49#*R9W9Y{a~V9S>15W%S#W$9BUI9e5462%SmNtHx$Cb0SU_E zH@9IGdSq=s;1wYUozfQx)_UWtZPW*ngh>y*I&4k!5JFEfu7?rpHgiNfVKM1`L1Z1I zO(tC4uwSKzCItjal;>|M6b1QR=8?bFBN<;1k+x9{d}LPC-|dJ$K`v)UKlku(V4VMP z-MKOXapAxFtLYN62NTlWLkMkvZ~}zCvzwz7FF$>ty-NXiP%kjz;^jNz&=04r|3T1x ziDqyLDnHJaYX^Mi;gmXS5O-P z`KMvQ(r6J4uz4%;J7z}*V6We8Y-ntGO?NQROK~MZ9J*^iXlh~qSC3wEEkC&_vh6Vi zG9!umQFa>K)zPKo%x%?fhs%>E;HXApI>f@5n8>B`_m8QO`3 zUcYJ^z@ ziH5D(<(#Z#&J4~06bXTjCjl&TDEz$PG&gdTEk$5pMWHGT;%ohik1$&ASb-TgUAMzj zXwHQ%<@*FQ1B(4$XJ8XQf3C@Qg*YT_@94!Y5_DXkDac~z6rw=lA!<((J+TJ2?5JiNXu1!;BJKC|mog>6J) zzKbMmgc%Ye;Nl@``u*CP8tE|Ods@lIp%zp!Kui~Cz7bxAnMivd!ldpfM+z(1q1tqn z^o&Do-1qxWZn85@PS)Woua%<+bUy{=mHzyVTBhl9>sNm9+(gQj z#yZ~BsX5Uk@B8)kw&KSkwPq^wlZ}UgC$N|B$)4QDambMQ&qW0FuEo!TZ`Y(0cR~N1 zscK;)*Tas0if8OCn-7Kv?-PgRqLJH`V$>=Hmw;04%q2Xq67DpBeAN)2Z=pnpXRaDT z+bBL-`4xhxhT?$7Z+oJfOd*~ZjzdRb;;d_qz^VFZXO!*>e-;9S2anhA%X?<2d3xu! zyx^`PqyFF0>W-!u5g%{e($XqLA$*vpEddQ7vu;32TI*hbk57i#g2L6@nI`QKug&Se znJS!?xwP?e8|RKMO4qdC+*F0Fu=JyD@pAP?T`2hU`wx2YO^LxL`E#-NF zN{ek}oZ&3MDYDXKc6W8oO(xhDDH6&I$CN7X;%5(Z%oRb`F*_p#5C{E#$G9fiF;Kn4Z2;!agIm8L{7}vpxBb;x;{G6G=`v}1)B71XOeme3Rh|2@NJX|1`{ce0wfiH>cJutT zt8!YA2-f-Vjeg44`H2=swBds`WH&POL1oq43b`sLGk zae!o0G&s!sfw#K7Vt-P%!OWF`{{Fb=k4$XnSaBb-42=hAw8S;;4w})-(vWNnquiTe z-h!_BC7=4SUcAk#&{P9`vz8G_{JR$r7|L6*vK|64e*{3v25K47Uj3apAlWhvEX*2D zp=m5!JCcMg&I#|_;Bn5AE4?v85tlu_|!}uli_WxH@qRyADzJxRmbC=|Py#!{| z!AA?pCP+8~t^7NV+yU;_CvA8S#m`l|D)(0OJwAvmRkM)@I-|}HAZTp}l7fwI1oql{`kG7u8#>8ID zz6+`kiuIS7+`YIT9EPL6_MFeUil1o7xfMkhMqH6979FdWmKDM+z=^#6aYom@r{th% zN{)@zmkd2x5;B$EnRQk665yXt0 z5uMB1;JaX%1t0jH=-_%wGJTf8x~@)*Q`3oU(J@PujOv!gv<@ zqX-_QB<^mRW)Y$P0{$Dd$YtF}{wC@bo^BeVr`yZLCnc+88K-bFF)YRDFJB)v_xZ!ve(PWE{#G*rSi9&c_ zEEMrz&->f!h$J`>Aq|26E1L+YQ~vj#($$6unoW5V0nSVyH6dd3v%o?3O7qmZ1MP9t z4+7PcHB6P)a%s*}&-lnQS;T6kQ9AqKOF6@O{p~YRFxKBNiMn6*3}E;}4*g~Sj^E~w zN_rIjdQG?GQs zn|YxKUcXc(Fke9%zq)?UEJ&+)fcCw#K6_uAMPBuIKgP)rwCi9)V22d(P)^iU$Tv3D z5dO(FTdK2{3WU%#MmPiJX*$28Y@)b`!EuXAb1-WJ7GGLi&|}DtWF;k183pHGI}PeQ zxW=(S`V$gxEqr-nH{Nl0^FG#_^GFGq(WnX%<)$3kTrcV5Vj38a6O%K$1}V;eFF%4X z6YNAxUK2DLp{`^NC!wKw+9XjFPNGS0DG--!t_Ce*4TVSbBPUSXV@r25MKMya_}Mp+ z$Z=F3gF;>`B-5DH#&P7=PK@r?c7s~b063g6l(@Mi8tfwH`IGkci{wg?b|~_*E>R@?9!{jhL0@55 ztnGjGu$MrW*^uvfh%4Xo5V8Kl!@hk>_rK7E-RI4>$V( zDFH8>XvR(uNW_}uHS-IuNw87i-E~MeOtCFNYbnc}$@a+IrA>cNAU5lJQ1Ea0=GG$` zTy=N>k;WZR`4Fhc0sXCr5Zu-@rD4%EQF*0DB?*Vo^z!veVg@lCD@qr0%rg_So%c(_3it8$Q%O%EEwbf0;8Gw9)I-VuX2;$59b`ynH3N0kQiYyh3eumh0NvUu zq}P1|&9A5dOZ%->4Ij}zZyda3Uu>Yzrd+QI5@g!icOLzib*GaAYD_fuz3N#1Nv}8 zT@Yu-ur!N3W;b$}lJy+KZl!x!E%_j{cUeDV_D7E$t~obRovt3)uNN*+v5019^{PZ5 zj+cg1(08g0Qe9&mJi?zmXFP-n40##^UA%cMt?h_VfdU1ZF!0MlN!&fJ;Ac-M#SU4Q ze4qaYdDgvgOYHfMC9D5e$g`8Xo$>$iUxjtRIz58-vg%Lwx*nE?J_Qi%lcb;#WW| zlP}pu7t#^{FOZmSe_=)lQabp19F-BKp)^@WfV6D^RbP(Rs9%?I=O5P^xfAr+?rksF zuKVAwtmlu%Kvs=cNDiLQ#`hQm?(|)~2NIHQaB$28= z_QUE?-v~uE4MRU|>u>KUHLG=v&(B~@@q;T59`p$#E{$NIHO;fdOw=!oyMH1%y4h!D zW#LtZ9-?s2)r3_bX%%CQ^6$U_M#4G}ONr*^a@-x@)^rpa+=oIFrzAqAC43uFL`ucp zTFpP*jM88lyK&anO%FRSeK~h6#qkozbjjz)jqG>;(VER9KdJWj16B0l7XkfVer0}K$Fx%NO32@>;Ro0zYy)Lp@Z%S|gw0_-rR_oLC&TqRz(AqHY} zPNs(n+%}me8Q8TYuXV=5(QZ!jmnIk~=&cQUr{8g+Ore73T*~-wmCRvJsXE-F9QhB1 z1zuQE)3|!d7vUGfQSE-$Q-(uN{Ml4b*v+yfe1o4m#0peT#o#pP;XOYThMD%0P8n!3 zTNpByWca&wT=4Z1t#2Arsde0n+H4?*EqxS5gW+#F)TjeD7;KD)m2CC#ODbAiNYs3l z1OVVlIQ)X{SitmMbgD4myu_QmX1|Y(;o;FY_@Q4)ZJ+M6p0?Idk;|MRH(ekIRVx&i zCS$uVRWC+d0`&hv<0Y8r+rsXiS+^Ql3_nT1!e7PZ++K{EN*DSj6hn4e~0H1EmoQ0aTQxa7JWqSXEW{ zTOYaAz( zjx#naiFw5cywCoRFILZWi+e-Dme~Nc_-I+?Sx!PC?sc<&{X8I(K+0)j#ym!(h5HQ- z0Ph+TvBN?@N}(e*cy;xp8@3%T4!*yJLM9eyNNb4Ko3oDY7~_ZlpXv4Fd)xPy49=8p zNyIXjf%$Ggb2)RA5kXl;4Ox-^Y|YlE(Z!r>c~4lm)!*X=UBHXkg#J+3;u=6F3wq;M zAxPI)kKi4tr#ULZ3~Iy{Y_D5ui?WrYDz)0?%b@7B zs}d*X&|AQxif{}5Dzak3ZnQ^@OyeZIz`ZL$b@G+DU%pT;RBo{7xy$ zZ#nY5TA&TRu(Ds$9zJROA>6D=L4Cjz@{3eBgr4x;X{il6unJrAXn1@QJ(xKz}Mp5Sxyx1QE7`Z&GQ z)6Z_`QjD8&&#@=OFy~y1OWYYY3G`82`wLig<@ctC3)W7vxhbghzDu_B)J29;yEVsP zm1NvP-X_^==lf}mp$me=D6(KW8~Q@6Q4=ghP7F^!05k^)l3&(r@5e9=57%K?r~hgY z`g35cUkANn#Ln?y%w<}n+KWHHKr^$en^P zF<#+f1@I)VA22yi64u#Gu@nN*vSe#775ufm(I2qV`}b}Rn@?v$VHE=2VOGFktjf|O z1pp_qHl&L-UM`&0Tf;L+2Q9uMzu4j&xJ&4n1v|uS-h*ok(fG$z+Z@2+dp~_-)48_L z{W1(v)K4|)h!8|Ryw=AulQ>v*bKtgYX23G!C<1C4=6k{+NNTkU&dmfodU7e(#wVl! z|Hp445s*4KB(D0}JA4UF5- zJM6pyD=slz1CMWs@qj22CG5ghHFcIa$%I&a`@$gFta|Uv$3c5n;CHCGi5p!HYyDxu zI9VP6JEr?^TKe7kZDTsGF{f}jZivC+teR*eKY@J4Ak;juL6fb{-an3qSR!F_s+|Bs zd;0v-+3DOoK;;8fZ!9Ho-pSt1lcV)kW#?SlgOb0l{V?sEMmX*a(Grt5bdM<#RAVQ`yg^d)?t;Z%2mxmvJxE;P)trf~50qd6YLNIZXy=+$=xzX_?jpWcb%$0K~* z3OxS9$3PM?g|bA(#w93h@clFiGM&`Neyh~BIKV};u(stE(_iOwWDkgh3m-4$G+bzn z2N>F1ll;XE#vQl;`(awbU3ri}00AL>|1T$FN2h;bD<^&z92wsQ$2}Fbj7XlC zT||RIIi)hZvQlHKFrtIrN}HecDyas_VYjQ5-Yev~RSU-SaQgB0NO~1zco4MU z3GBRdza7>P{nub@$smc8rHbNuD$+cwR<;6pA-BefT{p0>VL?8SNZ`Ahmf%U+ev@Rq z`tX{>1u%=g9}If8CE~E1xwCF)&+It(^zj*)S5yTxT26Mq84ZD|jYb>_%5Hv~smk)@ zE)ZPoKqU*0JpiAHiY~-zC1W6tTVd8o9R3;IiR{g6d#i z0BN0SVK0>hmsX`7Buh=+7N!phFy%l&oA3bdBWNXxSADEOV4J7x#;Hu8GWHhSak1b{ zQVGm@%uDLkCrrYt(HG#YqCx33{-Mr|ehJ$ATsPwZiupfv691vvefuNOw{QN}>z@tt z-_B{EZ)j<3WAr~M^`QUe)jn;wfBxa&GkmZ9N3`c(7yk1}*T&e%(8%O_vnj`H+Wf;h zy>^ElP$ps_FMA@E9@B12E-4H3udhYoTS-l8#%GppKL8lw`wmCp8j1~oGPZY&MCpZ9 z&dC~Pv=qG5!J~tqmxJm( zoWuwqV6OQ8QX5!uPbpcCHK#ZXU2 z4D(8!AkE#2K}eB?6dF&xYaA|fvHsL)uAA;cXW^2r6dDuV;I97bkVPrAQS(#gV|zDW zX-c_yj{A-rng!yWk`-7Yh*`8ms0&)+`0~(=EiBDdJJm;}{1PunrT*UpPXDpc|8J;o zouvQoet$DvjsIU(YJxM%)cbBfJ^cg%LjTuQ|79Y5H}ij@;iihF_R0zE+#&T&0OWl@Ll^`;-KLd|4e@T)WN?x^T#}#^vJc-CYtp z!Enl{$P4sbXb?f}3UKK}ZGLs3SUeAPGA7{-KK7{sph@JJ)u{W6(e)Q_W^>&TutO*@ zo8S-ZN;iE}|2nx`3d1ATG21ox#1+0i2&^p$jmO3p)UteH!1I@(@%9x<$6JQT0!MF> z2#`+QmV)5#3F-37)q$ZiO=o%v8(2XdW-x`%Y07~HX2e>$pR30P0|RACX?xFT$VTIeMr@?u@BR8NxxUxec96*;H z{dXAjh5szsNYI_HAS5?oS%9%G{^H3Zvz};XQr#yj1#Z5{l|&y<#*MYvx;>q%4Lezy zAi&W|PUL~R`PW4B!Jn5Co_g)Fa9kDM?vVIDO9^aGcTp4Oyq{ zXFD?hnf4a@W9JH4rbZ#@=(Zj(jNKbP$ipTR{th~DXE6SEdKl_t$w!S@iSVbVE)Z?8988vaf7jzM5Ez#tW9tRTDoYx!4)2gNLxiy)z z0dv)b&JogBZ%uzO;c}xWjZqw2BGBWY z`I+_W=?Tso(3`iq;p@!2^*;f@a9A`M3926OAmaE!-Z77&;d2k<9Tr2ys@ zniX29Du)-+#s%p-HNE-Q|54J*QK%~!U=E4>fB^zxk^b+J{=Y-#e{JRe+LVU2whrH# z9{NuI2-IF`o!hRDAbwSK8;pC!f~eFi9zbIt$g#R;<}W|k9gl}#feXh|rPJfLX^%A_ zy!c*E@Qjc&7Klmk9LJpm7}%Sd9^a;~u0qrssSC_GI|yQu$=8jk$?yQO^JCvI-`;)B zn2u~ir+1Tm;5t?Ix)c6`1=+ z%$9zkpw4CXZCAZ?ZBv5hdIiIiq5ZXOEW4zPUMoOH?LYq`Foem2E$y-?rT?)iU_ zJ!;0~l?65MCslZfbkajR*fhu{&nkAAG!S@kVnqtHp;T@GjDZg~+DDhnM>-4PU9e4R+`1k$Glv)WX_;C z9sZJ}7}dMrlt@y?F^!h@kaW=N8wDZJZ80L;;FcEL)p3hqsGCul1@{|fwX|a$$IFWe zw#8-dio@Y8d@W|7k)*kCbiRm7mHga`b6vJ6BzZ*UaJ^YC{hhP_vVr)ukcPnP7&x&q zN_@gr`rFdi<9$kZ_VwlEWQ1fGeAegzHgzi&KJ(h-OMCKjpvctcDtk}Q5cxBnZWVFu zGyC1G5=Rc>^IAZ%@4C(zC$2e4A0>K%6I&$R1IOR}=oC6wrk@(|OH6)SRtrf4DqC0l zaRa1(JF+?YR?9l6pz49uRP@yf@2v>9-iXU5#9)e4H`p7C3i72jZ{q$w!#;YRE5xZ7 z2Earg0EX~c1)K5_#rIQNlI&L*i7fE3Pd$$R2j*_7! zd^vK|Cp%_625S{O931B8$8d4NdsHJcrY$w%;|`)(CdfJ3i8DqS-U2Q9e7%j$ni!I~ zI2Kt;O)yu0d!;6PGO2zoNRiosT+${@U@hdZK@B)G(uDF}=tp|Jju=Vi@;u6^>Nhp7 z9ssr3^0zr%t0e7NwOwzKDoY83vaEpYZN-D0AQSsg4^{PeZ5?{6g?bG_2P6MD1rlINVDDQyqynh(+`hVjerbvNr_CyQ`qt7nqM8vq z6@0d_WkMR!ZX-*L)DWY;1#$EKjG}q~`qz~c5V4+iNJKWXk2AF6hrRvPSJ~s6$J;uI zXbHEr_$|52Pd5*D``7bT_e3=EeC7fwXkBx-;E(h^$-QsAl;@c2V25MgnWn*lLOXU#t;Bhm_p0t{L zZ+5l%g{nin>=<$wKw47Byqp` zEVk4_TC&t2isKH_7-B-E->1Mjc|x&e%mpibp<7bcF)Jjg2R*!5wDzD|`lUP)3P} zDh=UEim2L7IC`}qxrGla*)8bh^?PEFl7#e}Et2-68UCN!-18{e&4Fc5lb%ur`E$0ZF0{j66Gr>ARk&f3Mwh?t_=MNcJTfRb23V_+OO(EKxX{LM`KWi5 zGyImuEFf$}f_NaQHZ;rsVC^_5pF3~H2^~HOsdiZ~2`X1LM<~GInKnW%m5!Ei*G@KV ze3d|-MYQGjxrlkQnW>SI4Kp4)*>?`UftY8~D*=aHro3Yd1CX5f$~pWziz|C~Kz1$AR^(eHu9B z1x}+S96d!ZFu71S$Ap=S_97E$G!>}!nKnHOUW+X>8I z7_hgQCwM@DeyjXexo++%nZCN^3qqKWGgZ_waMfD+S2hwFBY!E@aetP6?XYBmJofzY zAkL{nURo6}-ff=eK$}Z5{we-+|EWW1UVpV@mR=Vxz|9>&w}v;=e^jdF3XcxajjLsq zYfTT;C1Hnv53eAB-IO0ql`skY@jvu}|H+^6awlC`zomhQG5%YK=HK&YBYh|Re-IwUb!bmaq=U^C;sWl-8(eco9Aip@7btRI;n^fY>O^WIoe+i_` zLOVvW7z}cnIPkCBJBawl*gy8W1rBJ4#hE?kcu%P-te@y%ipvL* zzyTY=m^Hcy2leXoIqH5zEvLd*z1ASUNuiBn+-p+Xs<%l87pWl;+bd77?F=b{`oQl~ zHJ>9&(o4hS;IZqCCrE(4flR}Vs7_F14F=8fvhL%Q?J3zkyuJZ{*t36jUI#Js;*<&x zgq83GuxpQ!Okk#Fb7aa09eJQq;EeUdI;3%;7={@nxj+ru%Hefy(c!{>=FMQE;((#5 z!<6T$L4pi$nZ8hOH=KM>DTe!KkVLC<**<=y)!XhqJb9Xa%lncn6fu{Q@(513IHk zijW2xKTfA8bfzz(`7ToEEIZOf8q&KiboF2~-mQjIl&b)w?V+m?I6`40xjz8jwm+Q} z&7NfQd+oQ8W0qy}i637;sv@F-Z-nT%V54a`=BRt@H-0E1H=dpjK+u(HiInkm@3}$! z_Q7=z=r7!g?8rx}?Gd}QEhRLVXlNOp6LwU+)M6e?=tOJHL0I2bQ5?b;a}-2?s#S4U zA$4bJo}7g-3t?c$mYT9f(f^KSq?>K0AiQTm}=Pw{M)x6FroXm!3}7?K{gfhn>WuEr^9W}>>QifOQIL_LecL+KTYDZ0KQID%p6FdK)&BeDD76)H zp|TA_l%_WMo77y@mw$#;2f~~ibzxVBDEo@nDxTu@NJ0CH`T5_#c?Vb)#owV8=%2RZ zKhUZFTfB8N{s+W1K0z}=M=4KULM={1ySt~L0(t9tdkpeFTE}YNJ!kcqby&x5NJBK~ ze;?BP$M@M<>zmvB)BDi)-_g4vL?VDvO((4Z_XBX~?KqJOb1B2N)p z`0Fp6xOPdqgEs2|q?P(HCpunMbV#0{KUWr(3jo*c&T`YdXse;rK&NmvqP zcd(^o$FwVh4Un!#`poohcew4;ghcg}2`nX%3KCH;Q)B*nRH0V0+e1E3EWKi8n61j& zYF-@_)Hn)Hk2B~2xV%5@vbz}m{58@TayI;2xIc#c#oK^k7*TWeNRl=A)SvDs5n_B@ zsM2x!D>Qk43vDDu#I)$H5xRD+}=fG8Pg6nIH3&;FMLX=Y)!i>r!t$~{6L62PV zL-zTj)dm{5+wA^+^;O>jzz<+f(X$n#fLrw_4nmwqnoyvYK>ArLS(BL?{;)QDEn`y8 z$^=XokbBZ_ny{SpAtT}(k-s?WDX8~qDGVh9-P-!U8hZ<R-u%uFV^+&lm8D^{-sXVJg2ySl2Yy7sPTe=^d0C%QB+ zINJ_VM*E>BF@e|ZryewR;gO0NaaRlYa_o~-&6r~uH`6-!_W;sG8$l$?EEuSx^Z3G3 z@Xz6)3qM&TwTCI*iXbDC!Up59mO>-9O~r?m<6a|wzA8vWnZ|))Y9lYYO&9e54%P+L z){=SVKL$;s!4p`XX{eK2@5>|%$+m|cAf@>Mg0@0dL`rQ^PDxW(Kv^;=!IW$23n`(& z-y`uIk~^c5hr@}?&QI|QPq}@-*kv3qye#O3sVJ?eKn~jp>umI)DLz|4@(aLsuL~oQ z&1^T6`W{0eX@3gR0kJtYRN;X09;8X3XTQA3;7*C|iY+1`o+`tdq87=BS%Vu!!9Rt|#X!Wrgh&V+H-BIL0?xEA;5Ng$msx{qKfq=8sEQIK#&<9Yhe#)*|H^lR0YoRKk z=-|tu>GEQ#My{IQ(&|>WDmGcCj3}T$smUC#(Qc4K;tE!2ZHo?Uv-tU5i&jdhU&d!t ztelLUz}EjMqIeWR_fp8_xe>&S(Zl3pWd6vbh5t5&;iqM8YBUIw9ZHpsG99r)W}Jf& zfnihPhP?1(uy-fy7oSukVjHJ~Xg=5oD>U_l7+Fj#DfjH?$bogqo{B)pTMMWx;VmsI z85n}Y*Dp2tEFrWa7ktdv|IUW_5qE9I*{s~VHTUMMKqoS@+I(i~DYO$} zq|&|@2emL|#`5YFRqm7hK&bWg6P+#^gHMWi)J@tq8+-)pE^rlF9;jXXGRSteMX~;tmYYXcf(u7H@C1r^xpxFmvh=L2=_iUDJkn9c<@t{LS?W{%*>q@1HrRtrOJ3wUyK zvA(61Sp%#_gw^hyA%+LmrL9y}dIcZFkAwuIh0F+2p4L3SB+L<~@VomF+_XubCF7y^ zv$oiE?ou$50kH@TkY|c1H;&$$oa#Zm-u;N)$t{_?wp2nUh1xdU>lu?2AbM~i(77;Msfl*I}h4SY2!ZMSdJ72-!k$Srh_j7*ZdJ_6(_->tGn` zGBxLNVESvguf}FM?;w5$-zfs#G|)u+G~XZq0U2rn0lkcXuSY6V3lnQ&z(BRDeeCpw z75VwJGq6`qQO;1L-*sDhzV*X#VG5}QnZuXXX?G0G_tvs~gE0xzQOD1ZuE1|V#lMqK z$yV;4FQGQI0{uMi-6r7+%Ohs$aX~zTWn<~N88Q<&r+sHJz-O~Y>_E}aiiB!=AOQR@ zfJdiAo#Ap|R_9#Jz1`17eAm`Nxy5PFB(y}s_fMjVrF>wxaBV&E%L8lPoG1(e``zn?E?b7h**Njza@ud}m^i z=<5#{>uv`o7w|Yf_P{|Y%)i^g9kzW}StuZtRn9%}^v-t*F$cmvbz@UQsKxuUf^C8U$7ZVzIk7Em!a5h(3OyT_UI zh5+hNl*xL4uF;rj$l)EvfB&6eljZzH6zGAm45$PI zK<)dhBTR_!x3>?qy!2olq|@WKgTT98_(vp!{!270G4|EV=RXv#NZ4+zt+P6^8AEE8 zMYt(Ni^wr5m_i{A(J$X5RLdY%dIbtEi^=y(BjR%77L(22yq7Kt|1pLj4igSd3Fpih zF&|&TJ{{a(^tdVX$jMcCpd3W$GJeusZsF&4+7)H=Jq%6vl%3I!>q@&~J7OEAQ~?b{ zp7VgHR2)c&Ty7^Z3r@Nh2VUW*9n$RdQ(msHxf(5sq){+JY00ppu{cetL#=dF_;WNxIPQe^o<=!-=J*}9eX9D-^^tb~?Bqy)iUYt^(YN0lv4<$JX z-mP~9zFTD`kB{!U4baTj9v0bIP67$jVzC?1M>pf+2& z7Hbu0y+rhuV}ceJVKwDd%Bl%uPjbET_^8ecA?@Ja1_VK5Vc9wY+k>T|3jGTC$Or5A z6d8A@%Cstl`&1g`UMuZ9oMgOa2}p;DsLe0R6;$JKz(%oR%$)vIQ5i-&GI@F;SrtvY zvFxpD&Uq)H?L6l`UvRD5G9(`75F7|}VgxWv#ZZkWVbna~GV`@)N;Sx3_T#CjzVy%W zM7Rv&Jv0D9@n{b6n&2kOij^)$D6EiATjs(~1PCaF$kYSp%tVz_p-if2XYx^D$4uKk z3g!}yw5rGvzCp9JfL1>_R_p>d^uyW9nwl7q7}Lgj7H=|ISJ02HJyrKkvL%qT3kOI2+JWuDm!%p-iMsq=dsKWADA_J zXEE^4_`iMw#S4Q$pxY#j++H3YYKpF*hW6JU8iEjD1(HZ~jw@5=`#|GXk2NJAxgxlt zpQO0cnqqLB^XzN-!3vXFc0tfo>XIW>oI zsZq-_m2ns#)Nbb;Rm9y@_^bxJFsEhXA7#Z+hqMrs3iF&6oU98nk4Q!J6Om!T=Kh&B z%JZyVd9u?#LC}pz50Ow_j&cTETc#ui3JjGfSdSGEU66q*VG8I_nVlWY{`)tPXz(U; zf$lC|rMu!j9VdL1nHApB*r}O8iXDL;gdxWNPy7uY{I|d#n5!s6~B0y*Et!6l&fiG*}fn zkafpaRBz|5KS$bs_F`F*@BX~&0g^_?16=?8R>*h0K!NB! zP`s^h+(_03l3dKso|J4HPf&%(pQ|F!oO9S|D!IO90%xV`w~ss|lQd(oofit2@d0bP zz`cQ{o(>5RuihU-oEu~nc8*n_G{C4SQrqP-dV%&e%b#WM+b_`qqjrsc(Ho%-A;cXW)coN6Cgs61-%PBLXVJ@ryD>C@ww zsOBfvoRZP6F=oahH!2pOnG`zSD(asyXb z*Yzi0fA*K4fJ&p#hkMoB6LZg zuMc>L5-psw2*x=+a*}rf)m4Vu&Z;^q(F<9@2VkZ)7MS}u0?iP~2T(hD$S7rjr=~MY zWx{8EC6w{319>82b3wC>6FHvy-00|n&VA$5_ejTZh+I0Z& za8ce!NDa?~uS%BE(v+PBw_{b#z@2(DijY}nCEFWR+Pp~ z{{&2tdX765Q5dYJD)B8nU&I;}jeE+dLOXF!oh%vI4(rqB3nl#{!SJtjBRm4M;sYw; zCm99Dl{oJe`TEaCmpNNOKBt=h%pG31gR+BomUeX(!;%9tj{KrwxD8DDF0i4S{CgC| znKF}DZwe%jC2QIjv@zyFvrXME8Z8&-#`c`uDLcmOjIJKxkFb%Mav>uL;j9>39l$=f zFpZ_amgs|edxP8QE|6o0I-%M!GY7%Pc}ksHEy5q|^D()uD{LARO{+n?S7#HHb`Bgu zhDp*s9@GNCR^X=8n33jUI7{ldXvN0Bo3~+JD(}=ezz@UMpPTAQyvuGl{&uxGF=+E} z>!e3$#CvyK@jhNC6x1Pv%ubPiALK4EcgK4nQ*PqK41q?ou)Uy&w~`n&<3oZixf($g zSCji0@%AR7uSR#$mvAcArm7(BnZbrDoPoq}7glPZEeu8`@~PWcXE*OKD}yUyF5K|C zU~PE{Dd&YBEj;^_Ahcr!qu7`YmT-frCxxzwc#YBWz6kH258R1_DEF^iL{;RH+#2ZD z)Gb*U*L02y_qDa@l#<*zHMs2L@q?1o-;Y19+eYe^u~XxYbrL)|<2N>2CAHlxW73Ag zrL~HPL9WnE>e1q^df}pzOT=@H|^6*oLvwE{@_(l`NhH8b9A7 zqIj@g680u^T1!H6zhPbu96K6~{5tSR7Tj21cA#2(WSclbqDL@_FdW~|b0L$v?ApS) z*@<-Xkm1m&nv8Odj8fv0Ib~x6^C8=K#}~z(pGV}eiW|CntHw4E=wHFfT^Hkj(CmH+ zdUq6D&)IVfxfftj#w}aOQ8^<(ky?wNRxYLXcHDVR^2;@n%dKG^h`C5hyg%Uui2F360EY@grO_n;h(3$|Kk% zwsi*)TKMV=+c_KV6Y?(6hqa;T0mc74zQ^G=V@7IQC3KdKmi)IJ!b@7W23W04hz^2D zq@wPDQiFYIC5{S(vF9ItRB^#b!Q=KTqet;Fot@?%ubJOsLIyp#E5tlw!^)at7fY8A z7Efb7QA8w{ntsz$kWR`rE%d(CYqE71-;T~HE@|i2&4yxbx)kMyOv>&+-@26%$I(Q9g-lJlIf7gAQ9{;_Rv>Ump!Wax0RO$^PtkySLV_)F4or+S*V9 zskrm?D%U(;l48C?Fkx6$LL7;mhN!8GTQml%n}>vEIgv2Z7n_8qwyv(uEi{5IjiL}7 zR>|t(hpt;nJ<->%>rP(l=VmrW>Sq!8^4IO8sf$h%6ALXCA1lwDNN$b#%h6mCY^?2B zu5MONPP`}E#34F6WYC(_68%TNR+g^OD0yQ_gmU>fVI@~$rTzNS}s+`qt&<&lqL z505jh^?I}O_j~h8RB>dF0Qn5yeK{QvApB2Zr!Ig*U4YkSn~w~(z4WjmZW)2!jX|Qe zqy<23pqDveK1($V6HF6O)V2GV6bPj&Z}NZl>yT5>)_~pHbRtqpuDgu3oxK{AA8#hW zF%an3bNgn7_Kl6{p?O{MEd9O~r{7qg(?WCDIQm9p(Yk|!3oj?#vcA8n23#H)*ld8KvH+08 zE>*a$#^3d<9ZvY&XfV`%9-4=AN*--Uu9jM=hrV6bBXu((;z3ATrHypeV6NV-} zS%Pfq^l{7go%)2sb+`hYf`@#5gX`hecIJ*Y2~z@_LKj!CDT-3M)jpWPTyRI5D>)4} zqNF+8&q!A;j+$7!gi$ENqPZw(HDC6gO?sWij9L>b^DwMpznoqhx|K}p8clMK#gz7K zaxQFX9;&1kor32w^|Gf*f_DUJ?G7HvL$ z@Q%D%fwi(Q?F-X^mE(%lD=W3Sl}R`C(jF~I-bBYmlC=9ps`y?@_m;8%+t-E z4KU)^iSuF}T5vCQ8!Bf97R!=Ot+Guf;u5g1U>|VyyPERlYxF4{=BCF0>Q6n;x&HiN z%6q$=zJ529#Fct*;Ak}$Y$c5YJ_8mgSqb}|+!Yc0mxtaL4i~#?B1-7Fa#?hKwKZj; zqi78dF%VHh8kX#{JE1@jy_;}Qoak7)9Fs@jx zgzP9yqEhd@Rb+k=?bBP)UKW7Qm-$l-snL^hJ8ye zocgS|+h$~n4{g#BS#gpMX0(Kc74*PB`o`PPQk~IH!Zgda{LJVp@(gA^Q!oO}p9Q(t zlbw?oY?CjV53@3EOYm7!rf143?YI*?=NvpCb*HTlSLL+C0)Heoh#4NmS$ri?dM_jR z7S{e87w5)%vqA+!ZuEc4!J2PODW`tkXPOR z?tsyK)a4CiQlkO7WCV&l#br|?gaco38*aPhNN>O2#Ae0h{+XCr)*xR@EAA=Yk87Us z%TdQ0hXInM?gc)-xZw9CEY8(yP$C`2LeyG@o zr|SNhJh$b>;NbaMDQH0$fUT%-}WEV0xV% zRj$_Y5Z{n(1ntV|q(gs(O?4iYQEfyyV;JysPhz`BqE!)?T zd^PJqe;*NLhMqFo7L)tnN|a9s(~N2nwjl=^fH-XejuXfsbe2pG7lkbp+8Q!oBUA=s zilCOJ4o+&q7Qv7KS+WUvk0>2FHbb{N@l;2tn6_3WJ)oqb90;W27C+iMG^fJ-4b#s#LRxLj>Z@*v(0O|BWGR8Bz+fM-Ws~?pUPSm zk71r_ab-S~Jh^9KyAP3a`&4-K5s#7e6G>H)UdXHDO{@=RII>fZ)gYhlO$T?1IGo!b7>Kl(CyKF_K60lFp7BVg$Gz3Tj5y1Rj+qrpouxBB>TS-|k=f94ZWS_D^- z5dsQ9OuSA!g6t-o?_kne)xl!{Gs{^mFdwJn%jf5ctTrzmw@?8jwH-38fO5z>pN>Z% z5tn38Prjyr71$@3^5zgiB@ZT-T6HCO{*FSz=v|nXAh)_coPbKfd@iQ(e5EVRv3}bc zGCiv?f$OAzn+W|fDNCh{`_8^JTcCq^cQ%*_;;?J&x*3nX&nTTghL&$yJ$zdR`GSF? z>(-`$KxNP%&uZO$q?X+9B&-v{*(L|sl)(Kb-0Kmt0rf}bMSW_4;l^+*xN46&#UOA_#Z~yMW_5ECeXCM*Xt(G&8Vr3aK?4i6ou`R2^%^4sy4w_ zrD4dQ1nu4`c#S1a0aGfQZ_BB)4stZ|ypd_Ul>0qLffdqFZUfcvvIeZ}>rnqCyCw}i zErS6)t+9oZGawDEsU4lPgpi1=lE|8-meZGkw}42ZCC5S~3-H1WW((~7@anH26+#xm z&9eb6X_kBs5pTF?+sMU;gV{_Z?y=y^qTWoY=Q^Q~1UIgQDsn_1vpz*u$--z+A z+}oC%Z_6Xb$&vjODWv}IYvuX&<3tG zF(xoZ_rxLpN_BojQ(d-m!T4ORNyLg0PAon_rMtU@DlJwTSc06BDNM8x%MF5$QwhNf zmQD;iP>Hh2cZdi1YyWo;k)dZzJ29!wAT6xK?!oarJw)iD($UkdjR3BJkc+&A!eIQ- zjBotMF$9QjGkVEPAj$6v&+)AY!O}we-p+z}Vu}sEuRCNRO(5KTU}!{yvIoEMblk*g z-^@q&unxa#f7(zwqX-+y%lA{vh%dvx2d{%T3M!O-MiKge7Uvx)P}&53S>juu;h?#w zRc69xCT&VY83rJ!Lm>%KZO8*~Q~%I+ZPf*v1~R6Jz5eyV)Tnh=M6{bgM+yx*-(X>Y zR|v;E2zi?iyui5l1Q}UO{Zh*93(9?gsCdZ(z#VFLAiH=bzI0`+OBrH=_Os38{NR-A zhraGQ2t6<;@2MElNrt)4pmoE<@UI0g298NlQ*&cGAVb?zNyS3@hpgwHmQY>w|pPncVB9tLgg|hH!md> z8|a#2R5wz(2jns3L)2?Sph+*wxm-HB=?uqjY^9+XOJ0M88ReOaswtMj>G4P-*j#yT z#^aj~!eCPSm<#OF6Re4K#czO;B@e5X84fBD0vLifuZt4sYVt78H-L*fVix z|5ybZsSE;T4(IO>pu02_vTbhXpdjlqd zXFdu+uC2WTNCI-c2?>=Vp__e>r4Ia-ypvckNq#=oVV3^2(XLSgaLSY}McZcD^ZkF(DC|$G1L%BH6;L45b{8pI@%pLMu?q+zD*ZSKLybj$VaArafpR;ww-vAi=DyO8>)QJw%UOa zVi`6xsb1x~1W2_PJ;|F}36|?ed|Qkb8g*cf1tXj+0-eAgzN=lDRYahc8{&|cH*g1I z1q;(`nd)DCv}Aw0LqpNvT|SrQAhR}rcw}&zst}%fSd4+Ty$h)9l1e$YhN zg36P{@&j+m1+gI>AwpeOKJ3n1X>sUBHe@mQX3{e{V*gSAE{a1JipeB2lxffK6zI3S zs6c9thOMraSiG2=g%i()Pr3@;YQ8m{M-%q z+oH{ZW6ijDi?oS=Az^$~_sv~^9nr0+VA#JD!R1Y${;a?-Bu_!Hkf} z+P{yO1jd;MRb9PRR19m6 z={tX%ek4Y$8$T4Pi#nS<{?KA}0>;Lf^aL9jU_-n&Uf% zI`pjNjW&pdMG{6aR=aG(F`$m7oNAkLX|eDJz71eAqv3Xe$eupms)^bOp&)=Bt5U{= z!CwRGF)WB-dyAQpjfkt*a$9>2x&486Febt;yqUoh95Y|rBy37BvD+w@gJb7g3^aPy z*E(gnC{-OIb3k<30D=yW*4irFhVCT6EgnP-TVciq2!&gNT&4~tX!>vh%4l3Cqe9MA z!lsah!pD?A*$ft{z_>mRJJNw5EJ6_c_tEm+;W|tV`!wDQpHZy|@)TwaxDw?KAL9}# z_&?~+vI)pwI!iamqNNHadZ~t9Q|W1dtqPFqayn@1(njW z0oJJni9S&MttYys9~iSkT?S#mrtE3b%oTx|;U50DA9p^&{g`X-*FY)w9B57jcbo!* z1J}W*mW}8at!;ucY++o1^2sZtyJ&hnh3Xwz{My@;yAP})l4+@_{k z$JVJysO-%ZsmfbJEe@Zgm9cir89#+*j3|)NI}A0>eY;5mI#oe|adtY-38Fk2vN%Fw zrDSR7BxYzO2>%h)%MdvhqOpu|JUp#pc-ORZP7qqMPMXF0%vO<7q>*omMo%4BV)fP* z-{WxvwuyXwUEXII!*+z;rg3iF0F!Wj6>_($MDkn&u~1+Sxf7`t1}`S?t&vn+aAx$I z=S`;foQzk+Be2_0?s)>`E0Uwvl7sD`oiH~j{00OB@CFP!(}{#%agrU)*aXep{c;8_ zNwkfI(DqGZr#I)MmLbv)?>}b8e>CuA2ep}E7nZbCs<^Y6d-@3p(*lyEFF}?hxWz`J zE0@QrZ?cI?f-}`h#EQ#a!jm!?Vh!#4V}4T_N?+>HgZ6V>-U1Yzjnz5Q4cXEzVmv`< z3x_?3R0dmvIu!U&oM>_j8^1dd}ZEe3M^&-=G zuVK43#E!4_`m_6wEnRQz)ioN`>(S%-rsmPgUAy)0{v)&V>)j*hgXlf%O|kUo7H4Fc zCPfpYnFIl5vP9q__^GdPCB=j_MTjs1ZF=jSowQv?Wl5v+t*yI9m5Y2jm8UT0vsY_h z@C8Qu5k{{^@3Sc0uhX2VEn4~d=x>><(w=r1;IGr2_ML>beo4DxQPb+6vHorfe0gS| zB-04*%+#y1#jM@sv~a2Vj;9D@5mDy`vR_Yd)mnFCR&?JVWy=9X+e&QcRJz0{Z7lor zOv3^zO|LGKwHC+AFQs>1Us3x~cQnfO;KDmRQ4Q$v5PT-cq)QDBj!Qu7psrRys%bb7 z2YDSLr#AzHj7r_Ua)Lq;k9)hPBrRFj_sVza5D`lyY1WDUgkQ_ev8-2fK#Pq zRN4;k_HB`oj})Gf$U?=Ud?#&~~jU#h<<J-1ZK9$vv~q zM>?3Sc3T+gx>d|xCHYimS=uiad(dqas_)5wT=H|iN%n^xc)%V_pYb~st-$NiNPsY~ zTA?_jo6g=)pq_tHgX9|WzAXit2)fKX2sx3<#82Ul@tw%cY`x)R7# zI|DOA>I*V5m?LgQ@{C6v>)NN=VFrCh=iR093&eW%3~W32OzV6Q+^ILig|TC2wdP~y zMsvX+H=ZuRE?28o_LukDKhf|so_@f!#1w-07ehSX2|gX+J&Bfn+tEROSo7No?CBI` z$RGvPv>~vfuGv=jj}eSAfCXnK&X62TdhIm)@E+C$yBF6?oSwWf+Zzp zU_fO020PyaBeH%v&G>I$QV+rjV=}7RwuYJh%)tmPn~##LET4k2nCjc=!6X+8vg+H! zT!=UvFY>zun}6Rmd(OMAMZ4NMa4wDID6Vz-eIPZ6P>1x%(Z5r#pDv&6z{7d;j*90r zD1XGW(BcO57A9x=4Sv{}wyy6-B0;`D{;u88kCPx#iy3x8P*_I7A`$Nt7wuS?Y2xE! zbs&v7-rGN~QWc#Av{o)G(g~PoUXZ{(Qk-sWa0oFhi=SM=k+m@o;k!o!Jb%38LbPN2 zqFE9SO*j<3z(3YC-Esm643PjdBIx#Dnw9m5xZ_lIlEv@*kL^A$FX~TgjVLw%4GUGs zzhpW1+l#u4h_ZmNfU-aZKu%|e6(Fah1{)*QsJ^^>s!N$I62&eu`#D2DgOxnOkg0G0 z`CYiN#7OwvF;{|dMW>_rbR;ao{`g>;M{Tx0Cb+ckyZyvz3U4M4vQO$t&>Sf>8u1ue zv}9-B70XjwL>CL~7eji?tH`CspjZdGSz6?~Bq?X&^d|xg3ac&KFSf~)lnj@df{8$n zXAhaxa?BS{ERGIitgop|;UL_MknByI?Jd_pcOJec%SV-B>G%#s4O5L|Sd@Qc=ocfA z@S0%T!Ga0gde*|=+}5S**9i`gs#LmhxgEXuX+ZIr@>16Z)!~lAri8J96j%Q(Ju}`6 z>blUI+7UGOO|Oc^5zMvQnpu#FYZ-{oY@9I@7IsPz+@r<#gNR5`P+wT9wEKVrJFA_ zI3|N~k`UB~OLu`2242?nHU#*{77hD)(G{sZ^ACdvgQj$eJmES@>?|vvj_D@_wLiQ; zLr1R~Oh)8Vpg?e!9bh*{8{7MkPo~qQ8>AanX%G;LgqQoH1QF_?dh?3KdO^|RYG2Ll ztL_4Ry7fca!6TC+!*~D02kg<>bvf^k51Ug0Xd{UsU>2DGrPkDnI5UUH{*KYK&!^ZY zAga`N?m)T0HXy+Yg}M*6Seb96s4y~}_hIS29>dMX1sHRFu-P%{b;L9aU-{gW0&96Mi7D3Yu-`H()`wulu=nM=MxNQu z2-2z=P^O>}Svq^eTSBtcRXCbQ7+ufO#i-Jn=bjt;NW)Bu%ZyLMNzqVFj7`=m z($6t%I?9bp(MVDc)72=7OAb@hhSNcmC`{2$F|p1v&3uI(nWUS(qF#U_r;!{Vmab7G zr=XPCg_e+RR3tBEUKk&pl$w_rEBpGF1=W`)2&*Ta5+cAPc)&pdILu7!tN{B1|7Mo6 zoxPs5iK~e|rUq0~ISRrH1=M`0a=7=j>?^SfUX57OpDofS)#$?-D!5>yX~?b1@QZ-eO(0Z*@8JxES)FdlYw1Q!+@#ided$JO)=wEY=oTY_sWkY3onZ z$3oI61dSiiDJb<>!U0@``$Eej+QHoMl+I-wlT5Drt@bbnE3zX+GyE7ov7sNTfLt!; zt&m&W?QNYj_h@;)X-Mt@B3mIoGSNpya9f;oH%kGO1HxF}d%pqk1EN#U7T-$GDAvSl zx?l5}F#DdXF?hCwQX}Bo#rJ37S?-K<{?WKe9 zDhxE)2cLprm=Ja>pc2LRQ_(rRk!2g#ba+6h+#)-|_{x5tKqFK!Xf%;JK7XD#5j<)} zPk4Wky-WYzK2VU)OXI$WVzCT7$=ESc(aD^XX~;}hC@ihqE)7u>FOqj?wN3igK1e?G zDHJB{X*Bo#*>xv$k1SlxMCzv|ZIySdn~+9BYScKF(EIvBPeOMvb&FNL(>JpuixMJg z7#0LLMeAK8*t+G8LrGc;@XkT@wsyTiu^FhG)%l`63pFOos)U&7?+Okvi4;KZ!@$P# zqIEe?S`|#EN4YMJv~3N;-^jUQD6H^Iyjf5%_dBguIEO|?BY^eM(u5qDDeFpR#UB&m zpZb~iQO?~PdWHd2gH;DYWbDujHzEw1iJC7Px}XO{Se`EGCyGrf??uQyec(~Ov0~fo z#HPU&7epmp@u5qQcP|1aS*$Lz*Bu@kQiV6h5F8Ob^1bA^rJwgn{Eu_Aj{|V+3!!yc z+-tn`0>+91j1~g;P3Gy}os{@ko~!5KqG#+X@o)3+Z9Ch;!g%u>Vcnq#EppVjkVzMV zcbwbh0aDewu&h=W*tvlO@h2v z#CYuJz+d-r*{~6vi6%=wY5T4@?4SkLGlsBvVDX@N!OsGH?@K&cCMJXty@@X)&sVy* zonsu-AGu{MW6et1vc7WB&ouuQznJ@?dRcP(tyrXR7BeIX=5`*MS@s9rx+1(BH6{G? zds+x9=3^y=MCf|scf8W2J1*`j$I6i#vmRww2_9OjM%b`)E9RRm>ATjdO#2X8kd~!U zghA?>TV2;YTd33CUq@Cm6u|_c(x1X)e6wx^ZZ&E9Q{obz9N#TGb&G;DLNioH>2fqr zMdt6UmqQW>45|yP6M*MES?n2H4pKjG;e?!=H>Z4=rJIB4>uTbi$ws%cwnu;;6k5Wq zy9>7ubK8!0PfpAYoW-l8p2jA#;zFPF-w=kH2ri^^k z(@|I;wT@&=C_=}Z?FC}CX%u*5=9+FJxO5kCyU)cvP>$1q)g~#+%{|% zI_-vNdh)m~WB5>}=}c3rs>51zvJ7Uvcci~syO&ZUk! zv$~7CHuBb}n2pKy&hownzwe83HJ*-hU z^frHZ@1FSk!TECJ&xobt56QR4@i+=|QNk=ge~{X0)xWD|bM$W5jBj2#f174JW(?mT ze6u>Qe0psajtsJ2@#!R5Q7ja9Gjp**iecL)Wy8@TU)cp%Fg_D=W5u!;x~dBz>7=Q4N$l8_!fk+rgfK)Rmoot zMeW{RD%~wA-inQSXokM(BpKI|Kk(HdtU0_Ww z(UW>`b3BT?GIFpRt})2yv(u+cLcYW7D|O;zm*v~x@HE6;Es=xqIvR*43Exnnl=Qpk z7*jV4h^x;#%xE|wa@RgyXR2aFa;9OC(`u4lV~6%|)XYN^Ou-fLHS7%2K%Ft@*+s^M zCDtdm68?UX^HKF9nRmrX9sH0Iy~jKhES7MNmX@E1ue7G34$Gjsjdtb|eiu`{f)WzN zE$pBGPEAo6)S^xqM5QjR5UpOvKjh$5Hg}U z5r*Ude+wueAZS?wCJ)qC`q(pyb|tZf_R~0Z57SQ0{S+oefzXF@+S|! z^%+>nn5~jxqTb`RmHmf&@gqt*Bca)nAHp=z0^$#-c%_4Av(uB64?=oq1yQ>ttUZ3Ln>5rNpXX{3nDj7~tH#s{u z08+1p)&!EbXMXH_+>j%_-T8yygSU{+SFHrNoljxgdv+>Y!LuSaJl5#Ml{-B=(vccc zsGl|CIuaxt%M$nLfyJ7Y7;V#uI-Y%fcfOu}tRhwamRY}~SL4T7>gsB<;AMW2HEX{# ztIG*{sL;C~&8`-G2muz_=#pg3#+-ih?%Pmc_7iUpXDuR<{W}s5jJiVfABO4i4xA`>WIiXaTUl058E~dUp0EFB_x(BrH;C zC=WHo;{V#A4VaMrx6}oU{y}gxadfh39vf$Dw6i1&HNh;QxDn_-{xhpo(8x z^Iw(G*BDvxXI3CU2t_d5Yg>8%;(M<#o`9%|f3O7(%yxgT<1;7BbmtTz6Ql76bNA=!cI|y*9JfM1Clv;n4x&S&rqxlQP zz|2g~#=`wyd+7hf*k`FL<78&_Dgid%p8~%6uZ`{SZ9?A%=D?w*N{@@R|Ys(7vDrxHlqz*7~9z`>WIi5CZ=E3&Y67`p*;xf0ACr z1aL+N-MazxfnWHV)Ij)}^rEY%=j7q!Y-023HoeHoJe4Km%K_R-Ak)8tdVbqf&Gk>D zUkZ(XTJ*vpJYa8B0<2Y$5d1rQ=QoF4;uYr)rJ6q}FW1$&ydIE0Bq;i_MEJ?Vq;1T+b9$mBS1L5YTk~ zYXr38E5xs@xql*FtoU-j)U*!hLYaWp_t*B1kp2qspD5)&31jK%%A^*;q^^Khe_BBF z*I&Cce&#E}9|699QeNzk7}8-c1=uhJ=t6((0i#8~DNfD?&K5>=Movz2PR<_ICbWRx zUXy&P#|xhTSL6zy0qW+zr7obd;%}svOt5qoMs~JNG)}H&fCwjR3tJP~mrq|~8Qt>Z zn*gvEk>|fYZ!Rl;WB-*j>Hiwsfaf`U4gimBb3ly&p6CB9bpbn7uQ3MB&W=ucCbmX) z0Qpn1SNH#=(w08z{mUj#fK4x}F271$0A&4Zn31)Clasw8V8nEEeu1^O^=lHr&z8F}5tn{06w*8vp{AZe_*Pz#ldVYfr zzP<*%O55`q^*W2eZxq_M*Qh^o8ob85PCW1%QwqTRdozNbo&{hphMwL(w4#@P2RyF- zRsQ+_5Ig&I`u~q9;cKDz{}6%d0{s7m`PUrxe@^IL&irN2Und^lzyC7m|7#ZYS`huW zHKgZ%Vf@RK|Hz~NS+V~l;QSlXdii(AU*w!$qh4#-{YKqC{~h%|<_j-Rq1Pf!zd=@D zz<=9$@&fw%l;NMmnSOIx|B3TYvxa|C&iIWnf&IHV|7);*y^rWOB?J5Klz$knUux#{ zE}Y+xe4M{S{*NL1b=3E7kPp!>(0`+}|BsIKI==fi=gZ$Y|Bd$kv*r9#{PJR~d zx!vDf9RG=Cehqu=WBm;~V*1~(Umn-jwAXH+-?T)w|4sXsQ|LA7wd>|Likkg@qyFN^ pd5wJSy7?~>*zf;9{@alwF9imey#fJo0Dc}2fPlgT0RunK{{u}6ZpZ)t diff --git a/geokube_packages/intake_geokube-0.1a0-py3-none-any.whl b/geokube_packages/intake_geokube-0.1a0-py3-none-any.whl deleted file mode 100644 index e24fdb35c01eca8427669f367b38d30bf808f369..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 13930 zcmajGW0WS#)-7DNZQEv-ZQHihW!vhq)#WbRwyVpwZQR=L`+aBc)A!tQ@)?oO$RBIQ zip;fQMa-EgF9i&O0ssI20ibA2p*;2EM4$I{wfNPrzM6%tvw@X~o|%cAm5ZSXy`G+h zt%b9m9-X~AsVp~KAOp-gXE5rbpevoc%jq<9P`{suJ<%$G!>j1|%?9?y6z5}qZ4PrU zcMdfpjmXvjnk!D0lg{rxGvZ)-RLN+91mRHu0s|4*;p-5|#-L1tPCyMgLfxK5eb`^9 zBze##;)OLV4nj1L0 zZD~U?8a;{%s^z|w}O6VVY13f&Tw%2^d9 zWa1D@QnGTg3Cn!Nkygq+Bz&a1%m@l_2Nts2ghI(dmpnxV?w)xG*B%6~kO6L(U)Fq7P}qtnIo2g>r&mKa7|Euw<_kKe zbi*1DY$|&cs3LhKYoKuBgcs(y;``@e+BLmYpu*dHLtgQV>Ml%=7B8BC%19RD)|dcr z@$dq`C|j9MK(zN?t}z|-7SJELiYJMjEJf;eJc8DcD|7(f!xGf-w@~eQRUBcRqN5YO zT2`Zp9&otP*y5_dmcPaBy2e7zEt$h8DPB154t~7I60v7?nfrF$ete3 zOHB_DSGfoS0&zf7Nq(Ao$=q;_c@Nif~bQ+^^eh^;ny1MkaWhpi8tLi#}d&R==%1Odeb zdwS5&+2dsc--m{oQ&h8j1U!elKDr+&UV3DIaJ_-qOeEw@*cnkP`SKcV+O%L7kzTRt zhV+9xP1`5TL_%xnBF>XUIjMG8E74&8!x-*1KV;CvV&{(D7Nt^Bxn?{sBDkl!$!^oD z8v*=x z#9Ere-&ECPE9YN@M)eW>7XYgr4fGI05PG=S_IVeY{sr$p=C@1ry~$cTKQ7 z2?FqvXiljQh^;L6_5%+b^ZEOQilK~fTG#YqKMCDx9jaO-oX#?#G{vq?!1na}h>I7o zth>RSY_%D}xUA!n5+T#*S-=)*dPHiq5D}2q%yBRq62~`ewm7eAyCpvxiu9M>oQIRwu^FFwokU$R!Mvs{LBngB3yTzCHFN5v zL9IvOT1bSHXPvT$%=|d1BB+IR*jg3{5W1?b+7_aO?n(U3G2uh z(^y6(eV`c;!@O_60ZL196UyaKa~67eo=#RE{OzOz5p860Mocnur?(k#?Yb82NX_}M z43mEBfI9tMx?xjVY`Bc?gd|S!!#@NuEy<2bpF*J>J|N2%^U;wdDAG3CKut5Z&EbhF zMGElOh*5L+HqVX)I$UjlqWQ~6i%6vo9m%~9xsl5#EX`oFq#ku~%{3rJSh%5(-t|+$ zPH6l_T9o&@x6OXq+;S({ELVVEg-noCM^uB)?3ti!o&*uyP}r9x95^hJK#;15beLFv z%b=ow*^%Hdw@mzo3mLq#SrnsTa0!)KsW|kK&}SL6BvTrG4sACmy#aW#B1A++#|Ya9 zX+L)7kt>OnjU?3G<7it=U?%n~0%fDngGxte+D8j!Q588cl2uJTZFqez%YhCj%k z^9(}r9&lo16J}L|8xNxj_`$#mo6tErf&LSLn|wQ}&9I*5n?E+-QOq7q{HO>PsMsP2 zOp-3OZ77X%kab2*NgoiwawzizCC5-zUe<$5syzDW1|eXExX&7$0T*&0uEYC1Ih?pB zqP6}YF?NcIkK$?RL_d#54+`3jT){Tk(7kxZ+Z)_EXRH9!g2d|^0*|JO&yjXH1Q%BV zdXuS;m>f%U#wYJ(>2;@gmn>U`ddy$&@$E z)*fUJ;(^c!^SlANeL!hte(dYx#aqD_w2H(tVF51Bc_=8`BNUJ_rG+C#xgD8?)^Sjp%5kOJBzV1=eelAN-cG?VufXDSPN&@7iAc^)kNR9N zEUsa?zfR<_XT!N#o=QFNqwy6f|k|w1Gwaoyq#^2UsuBd;Qd^NOZa?$c3x|VydvR`pTFxsVKP2^5_H*HRnve z>o%zyqpH8Q7I#4vR~L+4$RUwoB0A2HTHKsZMsIJ=uzbDr17z{#$@=Ou`seo#!SPb- zjG|xKc!cR71gpt;PsK}pDU?iFBy3(l3oHnUjAj0$r!(nh$CWVB(k5wutW;zPMH9i& zvzjS5wl1h?%l@yLBOI1g$B0s=$Q&Z!0*L&fxv*&t+g}#R%aQZiqDR8+j$b@m10(;r##Css7$38jCjcNK+m@Vg&FNz1;{1-^~n8*^vy&{ z=ZHcOB4&p3osl=d=#)@WS~;0VF5n04)VassfQDBKRMLxlP4Iz%0lr_57G?Myv#02F z9sm+8iR=LcF7`V!um%(?u7mTD=9beWWb1iVci7xCmld|Bwp?rO3Nk< zpKinLt$ZCR%Zkb4O=@N2?{hGwFqeM6a0=4!zT~KxHrUi(s@sTFnZ%Q-Uruf^{jNWo zKKt2Fk?s?1vM>d~@U_%wO+vM~yUv3y1315(BkP)o{XQ}lF6JpmRL`0oYG$XpuFjno0RPg1bt$LvqbOuh6R&S8dVhTIdd`4 z-|-_HAIJ^LmHG46Ztv?iJVGaBuU@ms+6a}_1oFcABuS@uVYAr}5lE%+U6iI_17w6P zRWGcH*a2s*O)zM3mAbN%L^kt{L?KT&x2Ls2G&X5MspFEz)i5h`1yPAJfzT+)3l+`Z zOiiWiZTMvKm2Weg8cB8j2A2^OgfhdoJr?~<&MUA>u0Ko)`?|V1J>*Ipez!BS`xsyX z>d8oes$2$or~7^|nH!wL7XYKuWp5R{`3XynMW5DDk?ro`O3rkrWV_efDs=$ zB@i=UtQ{Fqbhc6pm+Hff|D=;A59jc+j z*(4X0BebbxoFg{UXOTrQ6F{_pd$X&guH~(@0$L_a#?%iqxBNJu-<)9I)czQ{&t$zd z6^{n!BX#us$u20JTT5W%$Qt;(|5WAt(6DfJYCAK91lih%viiFY;-Hj`DsV`lF&2(^ zy$;CXb&E2x{PU^VLzeqMV!FXoL_3tk$FR?i2N9oNxN*@`sg|#`e<2awMu*+K10TE6 zMnjD9OqcS6Tc&UTk~*NL_(7bTMGrPgD5aBL|j5FaocLhM@)?xWL=G^t1R8_ z!}?Yk*Yo#`ep)8sp3hD+E{69%$P>L*uTb;3W^cG^nm*;DfQF`1Ws)bn!eha_ZRHD* zCb=8V8Wgb2O-9@^Ycl#eG8ECDZ#+ERbm`K!-u5t%-3W27Mw{SlgOJ9zo9k(cN?xQA zq=Jh&;VAGO-G{agy#wi8=_Ot{bLG4xZGBGcy?A|0&3Jh`alONAJy5za0Wod8r*AUz zD))UT`5{GIVhC7gR&wM*wAJt6zz;rdw%Qu{QD1&n>MIk1e${5X4@iXLy}Se5LS~!E zDE8e854DwAv4iowf!V#6s54wWm7weqs`>Cx%c#$J#^>Rj5o2p__jCl!ccdZvLEWXy znZ--CRx&BW4D;Ni1gAN;2KXLzz-TJ>qU}iATsBhB&`)3sZvdaCdZSz_V*bL@DYyjp_R8D{u6bPT0R7k;?$$HL6(THI+iAun;X?vmhG zL9M+S$`LtTdb>|Rb-v{fCl#F2>*2iL9`X#fY`hH>kzAu835{gZzWWt+JqrwKe8p&7 zlWZZU*)N_Y?L3Wi5Z|Y)O79|^&))+iCyZoUU<)Rnm6~Wy7Omkac`LC_%_0{!>=ji^ zWg2ManJ7=fZOxGj@tr>*p3PcqW5H~QdS(3rr8~t6t?fSf2{}yoaw$)l0r1)TbIdYf z!U#989Wf5Rsq*;v<3y6R@W!CMBq7>}eat z8fZGr=#mwAP*~%Lkjngs+udUSHVmNjSf>8llWW-E)cPiuf=~Y8)7bpJ|LT?12Fo5k zV5hW|H~+GJ&KJMjvO;MJ;T_B5yYmFT#VVK8d3_dd_nQ(6+?7vlFHe~jSkKKT;afby zM@P?@z}}O=5{2u5t7pr+gHgFS7~ZDhOsNtkN~$I%F~biQ7F{tMkcyZN`} zOo-3Xz02;`$=Jkau^|C`=qN_!D+GvE9A4It8^m9Q2?rQ3trSnj2oO~=IYESkMPSCB#5+RvSnLFKV|df8sq zGR*iF9#)r+Sdh{Y0Ik8_w23#?QF7?0k<=F&{;r71+5%Se}gkh}xPWSy&>V-wj~##|#4r~6wgy&gZ)Wk9WI zoMQbDx`e;*xaaRJN#VAQQ_zQOs~@xX?0YeM?cT2+;O>o@mGLnjD%txYyuSB5 zpB6zIn*u+j-I1g$5BHlodph#al&+_)dQ;erDyMpM`+R-~ujEucR4Z|a$N zDFcHpk1E*;PcBT7TPfNe4Zj&`e@i{PU(6R8zK(7)&x@6XR8Z=;J&>~m5aN8gy0ml3 z@b;WtA2J|2qgpu)x_Ga>ZrVThUak9h1%>Z`ScvD%ez1MxJMkK-GFUfK7HvO4#NQ5UPA*XlP?Z* zT~)?zl>w&XPz~WCk1@`Dy@4N2s1BC90(5aBq(|PGYsOk4grsRA;^R@_=5eRF<%~%O zDI<1o7n$@0h^Z1pxLc&OKA6Kt*rqwbq5TStj&_WZscaTfRLk^Nux+Zz#*?DaY1A06`}Yn2O#y^83c{oy z+KXjk@(h1z3yMg~p`tv&?u!<{=pv%4UQgD6UaWxfBj7fuw^d<<8SQ-6OYE42BI6au z(>();fPPC@_|{%7^$5&_r|80<6)4MBXymql5+1n+Kf}mU|17~2 z7^WCd9;o{9k=bjPNV6>>u^KT&ktRvQoWaVKD#;{=f)gejK)S7MTPHVI{JHx5-Fi!w zi+cQ4c}-h!@iCI)O=fi$PplY;<^?o?@caETUsLMYoazvb9B zvpt#`V<4I4^k@f0Gwa`^o(Bgv&O~Os)LL;AMRQ7-$+;Ji--4t}XM{QN#+fo9Up!`a z9$4MhH_0t}Wm?445@mjsm2)FvZ zKIc;Mz+mM&rsrC+Jf3^CY>C!{TD{6QwB_s73&G>i{Q)kaN9I9ycF7&xIP0iv;k#*R zeB3Yc+VNnJfc2ThpLdss%XUGBTdqd&!_^vWD1W`TI-f>9w|*<`f?@%&1hW;#$j70! z+BnAPTb_OwcgTb}N6wMfbDPXHjB57~32#9Wq;MN(HpS2JM<@p9#{AjR9h4k@cu0%E zW2r6GWmz0)K8s4{oE0i8UNSzN`(|ydpr43%X$RI?x&9_RSR3#cJqQ>A+R85l%RNyP zu*w)CcrhHw;EpPCW^6Kn*-Ht`DB@5i_Czya(Dlmf6K3gmB)>i}%9!ykTYsCMm%Qoy zxLERB#s51W(xo(%B`7nAu)p%*0~`PV zfJC;kvBVahr;2FI-@if7+E{vZ!Sd5{C^dG8 z#X!25=aO0wYB`E>>3qs^4>);TNmU0SPU+SS$%wH0pVsc1eZc9}n-@Bh{pG)=h;zkm zm<0iyLdTQC3T8A6V51-@VT?x1Yvf{Wvgn#(GD>hf3V0F)XvL`aedLkUN`5Y7d_ZY^ zjI-QCEX%&H)Pv>n>``g0d-H|nU~+=QFH65`bEyRZj)7O>dGmBtg5X@$>P%lo!0 z-uaa9X|p#mIE@f7a3k(TLg0~Wd|Ns8cgxG^+ukzChRDDiUiv8>dd1Ty=D5@q-3Pbv zd^X}%J&$16@st~f9}q&uM?ew*Bshl+7*oSAsPX>Gj}HoxMgW!FHe)%2AFnckh?eX_M!AwH>qb&%`kv*&*Xs>*a9~ zyITX_n$8rxaAO#47hzoY)>JG_K0Xgr=_;0{Duj|2Yu9&dw%I&QAXn&VT{_-9V(QJeB|u03iFz3g=%2f0=!W zZU0o?zM{}Yal*2n0HO00Wrq^Fa@vY;`cWM%t$$>Z&C;IC=H_8RhQ_(+I-hxSli}zM z1$j)o*QVOSgrq7N8YV}*9QBPN==&;%hduNZWblAMemHI#9>(BzR^%&BCl6GnoP}+3 zCqFuwcSId?<@iL81S-M-&in7wl1-XN2}QD;mCc(7@N;{;3+*hi2A+!>3Fo}cF~}j3 zZ*s0m8lwRn-l`Z7-t0TfGLSuGk#uTx3ZbdSnQ zZqoWb`EADeRUhm~F0{@8Qyd=9x3e+(=BL?nFSF#U)FK0CI?Fe@2)ZUy$f#XYDPU8J zmOvS(*vYW|cyiem*{(}=4P3}T>_VRnhiXzzUc%|uOs3hRjnY{xqZ)y`r^)9IZNY>B8gAu^KA=te)M`#f4V2V7-6>TDvz)T`Wvhd zw<{_QwajYzq|^<3L1?zuzZz3RfKrn+p`qVcs{vxpTHtXN`_N>5KKbw<22pacaMbC_ro zmK!)9mlC`u480h3fD+X&pJ5)Py@4xWk>O8GJ29!QKrO7~o}q~YJp`zdvayTqO@Hpe zkeh9AYwnj7j|R zWK@8W!1?Gk7Q#~lT=R_TN6QWX+^4;X`fLa^8DzPZ%f_~xVfl@%G!)}0YOye*J@QdB#nQOk z-)RL~s;|v>d@_Lpb4Ulu>Vbrq2K_o(Kgvz%bOOoho3o#JxVXfCe zmRxtie)R>4ZQ-Km6mpgxD*3Y!;qc|5_KYcCWLKhbDhMbSpEGW-Lq%x>Qf8CU>#3d( z&}3U2m^gIA{(^~A28J|;^>gsoT^bJAF}HKj5!*95Yrh9I6|i+qh6#4X(w0Q4?V`@6 z6+vvf;o0gcL?+0$wRZqcLMk*NrB)<#vk$b?f!mgM5(_3REW|p_(SJ1BH)=#`QMJ7z z3XGRPF6HF%e=r8&v14Zz+ie%$0r*-{wO61lvmaL`N7{^2bGX=tee~f`T;d z9hn9>EBIA@_6oKtGv_}yG8(3%+fW;ibv7|&8VXFE5VzKf$j&{idsw)Tl+^%w2ZHw2g{aqgNnEXk0D`vUw`&pfCIs;xpO7pMf#?A^qKbIC_?`9z)+*$(};c# zpp>DwKnbkF1a(h!X9~oz)1gZol-kkCD$`Q}tj{RPRQRJmr{`9Y#qm;Dky^R;%eIF76y77y#U5iw*gkN63D> zR2*BcDX|~U03s9ClP@y$O})(l(g#QaYAGsFVBxf7vLXHNV*l*0Q_t=y_Pj~mVJY5Y zi1wb8pFlmt-Y zzm##I@z+6n4U6K~Q8CkU5pWe-pX;t6cA|KP;v#&*TNpjSFblO!!lnh2dyMiqId{+E zpwM&n>Xqf9RdtBWMLu5#;dOYl*4OAab*BiP@xW`@i?cR?Dcu_7vUM;)GDnh-#}c}j z6!PwqwuCekV$%X-vskGE68bsq$OZ$k2!Zi~V&uKTb(k3sX}uP!P^<|G6lM*$ljV-z z6OtfRYNbG7;-=~vVYG3CQZBaV$!6oDg(;xV{2tgh8QR(DIxmpwWZ2>a zk&TMAv5SPUjUfCsx{on(JVav|<8)+3#qgzh_nIKIbb~C1_mjOctwf{H6peuPj|=P1k$7q@4k71 zccj`z!)S-5@iSZVQp@0($FH%uiLnMg93VE+9Kw>8N|i4*^B)fo(5=8Z`V!Z?sQf41&fgMHdfb&PvlGc2#Ew` zt(^A2Qd#Vc>X2Z=k@M|^lOU9p}A$Odf#p%eBi|F`f>jR>L}&_drK@crqvlq zrdiR%Xf{cJg*+Ls1a5jSq4XzVZ3zPOV7uN%R~KFPNqNfHLR;JZN%e0&o$3qd>$&^& zReXW50r;`UvDX~Rpbgqfwcl1g-ul}nYjhXg2KXEF7yaj>T7Z!;;c-lo8~ZitKh0Z3R8f!tFLT%+Pp)8fDFgfAjS2+7e}Hy z>YDSLXSVona8xyVnZ>AlTeNIv<+XJlNjK_GjqjStax)Dnf9~JfS?0dZMa64l#QCLV zRMug8v%F1CFZu(pMIp zSbFH34qIrN`ZdfxCHZt`S-Mq=1E}^&)gW>Jm%_X=$$`)#cbJozOMZuv6*xUw31CJx zD`aPM)43-~lc+%(D1=N3T6D#QRLQ%d+)}{>D127%{h3{m$EwIAc%&jh!`U2ql2L`=hX{i|~ z0J*-w?o~iU&V$pe-_9M)5UemJld5f7nCU|vMrip$v}AR~G_1vR|8_4Xg;=0f{}$$A z#PLLl?=$E^Q1{$5?}irLTHD~YG?t^d*2UFedLW?=*@vTFm)-zA3okFEKc%HQgYx-Ac;BSVD9AEOvpRQzn!uicT}DJ%Kv+OoKu2lJc9j94{gxW-s39TqKoP-QLC95+INVy;!l}5lL0H`@3BKgx zwd$>GKjTMcR+dJLE<=Qy5*8-~@&H;L+BsWgybeO*o2;~YRy~@} z)b{mv_-HlmyI(bUXrYst@AX*BDM=%ZYJm81c!$2=t2^c=&v?f(Vp2edeNQ__Ga)^=_ z=1iLSLrATAq6F88ZMA!4+D&TJ2O^LL1#v^u=DrQCXfi=u5othrsJE2$T z83hj!g`P8gbqqH7p_nfR#pfy*u^L==^{#V*9q@?v4tU2l)36bCUKQ;&9n&7>&BeOs z$1dXrD#ULYE|R+KqFl+KBl2J~@Hl}PKW*%B7jD`SA0m7cZZK+z=|e9!FK(-?Lj*kb zLz0Y(a8`Tf`Owq|mMo&r6O2|qm{ZjFx2n&rk73i;g zQ%V*$`yUsg<^H$l zeJTlBI?97T$JyxMsV((IQ=Wb0C+UChKv6_UPEj~`ZlpGy9|dfyV+v@))Y2dxlqdj- zh$WyluYDSZiG}Is(_KsHs0?Nz6}#EO{kz%E$&*)~*w_+_O6cIWX=|$1a+l><=>>7}G<|Y6=+_aebmaGm=4bU= zu71`tRM+v*v#eRrfRO52x*GR0&KDQ9_AkR#nrVi@x=4BxQ%a#Z?L5bsfI8K;TTEOT zHa;mvoEuc0`o#)8xLAFNl`Vjv&- z>6#YxBysv%WV-QW{f2yIP>pC6Imt09$OTC$SA6p^?7+rUmk#E}A)`(q1LF9ylF%$T;^9gx4P> zPvo`QSzTUsZDR#9@1CY*V5ruxfXCz0?cXSHNMi@*c8wT*Pe9vnV>z1C-&x?W-ej~C zS8cXl-}Sgj*_#~;tHp_)xy!CkSFXMAOYsvmW^KS$)`r2v6Y zfdAiLt9*sz{~Y!HfBpSC-TO}p{+~Gi>En?9MF9Y)_h0@GIDdaC@+Z!p;_BZx+g~~P zhd=+H)cQ}1|0GELjdA^@PyK=MFL~;pDE~=?_!~tK`#)$Bf13ZPN&Ic@^e^-OtWf+D z=|6Gseqwoc|Xi|0m|3%Y?r%eZHo>|AP7dFB<*?{jtPy{L|2fzH4c_tJ!T)XM{}cJoN&a8RfOri5)$RYC^5vyK!TvIX P`TG6*sudMXe_j0_@U%^_ From e4365d0d467022f570f8e8ab21553aa4efb09a32 Mon Sep 17 00:00:00 2001 From: Marco Mancini Date: Thu, 11 Jan 2024 09:20:05 +0100 Subject: [PATCH 05/15] Removed old geoquery --- geoquery/__init__.py | 0 geoquery/geoquery.py | 17 ----------------- 2 files changed, 17 deletions(-) delete mode 100644 geoquery/__init__.py delete mode 100644 geoquery/geoquery.py diff --git a/geoquery/__init__.py b/geoquery/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/geoquery/geoquery.py b/geoquery/geoquery.py deleted file mode 100644 index dc42414..0000000 --- a/geoquery/geoquery.py +++ /dev/null @@ -1,17 +0,0 @@ -from typing import Optional, List, Dict, Union - -from pydantic import BaseModel, root_validator - -class GeoQuery(BaseModel): - variable: List[str] - time: Optional[Union[Dict[str, str], Dict[str, List[str]]]] - area: Optional[Dict[str, float]] - locations: Optional[Dict[str, List[float]]] - vertical: Optional[Union[float, List[float]]] - filters: Optional[Dict] - - @root_validator - def area_locations_mutually_exclusive_validator(cls, query): - if query["area"] is not None and query["locations"] is not None: - raise KeyError("area and locations couldn't be processed together, please use one of them") - return query \ No newline at end of file From 948425509f60678c7cb18787bbf7a779ad879f9e Mon Sep 17 00:00:00 2001 From: Marco Mancini Date: Thu, 11 Jan 2024 09:21:19 +0100 Subject: [PATCH 06/15] Merge intake drivers --- drivers/Dockerfile | 8 + drivers/LICENSE | 201 +++++++ drivers/Makefile | 21 + drivers/README.md | 2 + drivers/intake_geokube/__init__.py | 6 + drivers/intake_geokube/base.py | 132 ++++ drivers/intake_geokube/builders/__init__.py | 1 + drivers/intake_geokube/iot/__init__.py | 1 + drivers/intake_geokube/iot/driver.py | 164 +++++ drivers/intake_geokube/netcdf/__init__.py | 1 + drivers/intake_geokube/netcdf/driver.py | 64 ++ drivers/intake_geokube/queries/__init__.py | 1 + drivers/intake_geokube/queries/geoquery.py | 94 +++ drivers/intake_geokube/queries/types.py | 10 + drivers/intake_geokube/queries/utils.py | 106 ++++ drivers/intake_geokube/queries/workflow.py | 72 +++ drivers/intake_geokube/sentinel/__init__.py | 1 + drivers/intake_geokube/sentinel/auth.py | 45 ++ drivers/intake_geokube/sentinel/driver.py | 342 +++++++++++ .../intake_geokube/sentinel/odata_builder.py | 564 ++++++++++++++++++ drivers/intake_geokube/utils.py | 51 ++ drivers/intake_geokube/version.py | 3 + drivers/intake_geokube/wrf/__init__.py | 1 + drivers/intake_geokube/wrf/driver.py | 178 ++++++ drivers/pyproject.toml | 85 +++ drivers/setup.py | 3 + drivers/tests/__init__.py | 0 drivers/tests/queries/__init__.py | 0 drivers/tests/queries/test_utils.py | 50 ++ drivers/tests/queries/test_workflow.py | 61 ++ drivers/tests/sentinel/__init__.py | 0 drivers/tests/sentinel/fixture.py | 11 + drivers/tests/sentinel/test_builder.py | 376 ++++++++++++ drivers/tests/sentinel/test_driver.py | 177 ++++++ drivers/tests/test_geoquery.py | 41 ++ 35 files changed, 2873 insertions(+) create mode 100644 drivers/Dockerfile create mode 100644 drivers/LICENSE create mode 100644 drivers/Makefile create mode 100644 drivers/README.md create mode 100644 drivers/intake_geokube/__init__.py create mode 100644 drivers/intake_geokube/base.py create mode 100644 drivers/intake_geokube/builders/__init__.py create mode 100644 drivers/intake_geokube/iot/__init__.py create mode 100644 drivers/intake_geokube/iot/driver.py create mode 100644 drivers/intake_geokube/netcdf/__init__.py create mode 100644 drivers/intake_geokube/netcdf/driver.py create mode 100644 drivers/intake_geokube/queries/__init__.py create mode 100644 drivers/intake_geokube/queries/geoquery.py create mode 100644 drivers/intake_geokube/queries/types.py create mode 100644 drivers/intake_geokube/queries/utils.py create mode 100644 drivers/intake_geokube/queries/workflow.py create mode 100644 drivers/intake_geokube/sentinel/__init__.py create mode 100644 drivers/intake_geokube/sentinel/auth.py create mode 100644 drivers/intake_geokube/sentinel/driver.py create mode 100644 drivers/intake_geokube/sentinel/odata_builder.py create mode 100644 drivers/intake_geokube/utils.py create mode 100644 drivers/intake_geokube/version.py create mode 100644 drivers/intake_geokube/wrf/__init__.py create mode 100644 drivers/intake_geokube/wrf/driver.py create mode 100644 drivers/pyproject.toml create mode 100644 drivers/setup.py create mode 100644 drivers/tests/__init__.py create mode 100644 drivers/tests/queries/__init__.py create mode 100644 drivers/tests/queries/test_utils.py create mode 100644 drivers/tests/queries/test_workflow.py create mode 100644 drivers/tests/sentinel/__init__.py create mode 100644 drivers/tests/sentinel/fixture.py create mode 100644 drivers/tests/sentinel/test_builder.py create mode 100644 drivers/tests/sentinel/test_driver.py create mode 100644 drivers/tests/test_geoquery.py diff --git a/drivers/Dockerfile b/drivers/Dockerfile new file mode 100644 index 0000000..d4f9e76 --- /dev/null +++ b/drivers/Dockerfile @@ -0,0 +1,8 @@ +ARG REGISTRY=rg.nl-ams.scw.cloud/geokube-production +ARG TAG=latest +FROM $REGISTRY/geokube:$TAG +RUN conda install -c conda-forge --yes --freeze-installed intake=0.6.6 +RUN conda clean -afy +COPY dist/intake_geokube-1.0b0-py3-none-any.whl / +RUN pip install /intake_geokube-1.0b0-py3-none-any.whl +RUN rm /intake_geokube-1.0b0-py3-none-any.whl diff --git a/drivers/LICENSE b/drivers/LICENSE new file mode 100644 index 0000000..2b65938 --- /dev/null +++ b/drivers/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/drivers/Makefile b/drivers/Makefile new file mode 100644 index 0000000..12a2661 --- /dev/null +++ b/drivers/Makefile @@ -0,0 +1,21 @@ +.PHONY: typehint +typehint: + mypy --ignore-missing-imports --check-untyped-defs intake_geokube + pylint intake_geokube + +.PHONY: test +test: + pytest tests/ + +.PHONY: format +format: + isort intake_geokube + black intake_geokube + black tests/ + isort tests/ + +.PHONY: docs +docs: + pydocstyle -e --convention=numpy intake_geokube + +prepublish: format typehint docs test diff --git a/drivers/README.md b/drivers/README.md new file mode 100644 index 0000000..f08349c --- /dev/null +++ b/drivers/README.md @@ -0,0 +1,2 @@ +# intake-geokube +GeoKube plugin for Intake \ No newline at end of file diff --git a/drivers/intake_geokube/__init__.py b/drivers/intake_geokube/__init__.py new file mode 100644 index 0000000..95b5503 --- /dev/null +++ b/drivers/intake_geokube/__init__.py @@ -0,0 +1,6 @@ +"""Geokube Plugin for Intake.""" + +# This avoids a circular dependency pitfall by ensuring that the +# driver-discovery code runs first, see: +# https://intake.readthedocs.io/en/latest/making-plugins.html#entrypoints +from .queries.geoquery import GeoQuery diff --git a/drivers/intake_geokube/base.py b/drivers/intake_geokube/base.py new file mode 100644 index 0000000..e070427 --- /dev/null +++ b/drivers/intake_geokube/base.py @@ -0,0 +1,132 @@ +"""Module with AbstractBaseDriver definition.""" + +import logging +import os +from abc import ABC, abstractmethod +from typing import Any + +from dask.delayed import Delayed +from geokube.core.datacube import DataCube +from geokube.core.dataset import Dataset +from intake.source.base import DataSourceBase + +from .queries.geoquery import GeoQuery + +_NOT_SET: str = "" + + +class AbstractBaseDriver(ABC, DataSourceBase): + """Abstract base class for all DDS-related drivers.""" + + name: str = _NOT_SET + version: str = _NOT_SET + container: str = "python" + log: logging.Logger + + def __new__(cls, *arr, **kw): # pylint: disable=unused-argument + """Create a new instance of driver and configure logger.""" + obj = super().__new__(cls) + assert ( + obj.name != _NOT_SET + ), f"'name' class attribute was not set for the driver '{cls}'" + assert ( + obj.version != _NOT_SET + ), f"'name' class attribute was not set for the driver '{cls}'" + obj.log = cls.__configure_logger() + return obj + + def __init__(self, *, metadata: dict) -> None: + super().__init__(metadata=metadata) + + @classmethod + def __configure_logger(cls) -> logging.Logger: + log = logging.getLogger(f"dds.intake.{cls.__name__}") + level = os.environ.get("DDS_LOG_LEVEL", "INFO") + logformat = os.environ.get( + "DDS_LOG_FORMAT", + "%(asctime)s %(name)s %(funcName)s %(levelname)s %(message)s", + ) + log.setLevel(level) # type: ignore[arg-type] + for handler in log.handlers: + if isinstance(handler, logging.StreamHandler): + break + else: + log.addHandler(logging.StreamHandler()) + if logformat: + formatter = logging.Formatter(logformat) + for handler in log.handlers: + handler.setFormatter(formatter) + for handler in log.handlers: + handler.setLevel(level) # type: ignore[arg-type] + return log + + @abstractmethod + def read(self) -> Any: + """Read metadata.""" + raise NotImplementedError + + @abstractmethod + def load(self) -> Any: + """Read metadata and load data into the memory.""" + raise NotImplementedError + + def process(self, query: GeoQuery) -> Any: + """ + Process data with the query. + + Parameters + ---------- + query: GeoQuery + A query to use for data processing + + Results + ------- + res: Any + Result of `query` processing + """ + data_ = self.read() + return self._process_geokube_dataset(data_, query=query, compute=True) + + def _process_geokube_dataset( + self, + dataset: Dataset | DataCube, + query: GeoQuery, + compute: bool = False, + ) -> Dataset | DataCube: + self.log.info( + "processing geokube structure with Geoquery: %s '", query + ) + if not query: + self.log.info("query is empty!") + return dataset.compute() if compute else dataset + if isinstance(dataset, Dataset): + self.log.info("filtering with: %s", query.filters) + dataset = dataset.filter(**query.filters) + if isinstance(dataset, Delayed) and compute: + dataset = dataset.compute() + if query.variable: + self.log.info("selecting variable: %s", query.variable) + dataset = dataset[query.variable] + if query.area: + self.log.info("subsetting by bounding box: %s", query.area) + dataset = dataset.geobbox(**query.area) + if query.location: + self.log.info("subsetting by location: %s", query.location) + dataset = dataset.locations(**query.location) + if query.time: + self.log.info("subsetting by time: %s", query.time) + dataset = dataset.sel(time=query.time) + if query.vertical: + self.log.info("subsetting by vertical: %s", query.vertical) + method = None if isinstance(query.vertical, slice) else "nearest" + dataset = dataset.sel(vertical=query.vertical, method=method) + if isinstance(dataset, Dataset) and compute: + self.log.info( + "computing delayed datacubes in the dataset with %d" + " records...", + len(dataset), + ) + dataset = dataset.apply( + lambda dc: dc.compute() if isinstance(dc, Delayed) else dc + ) + return dataset diff --git a/drivers/intake_geokube/builders/__init__.py b/drivers/intake_geokube/builders/__init__.py new file mode 100644 index 0000000..0b7eded --- /dev/null +++ b/drivers/intake_geokube/builders/__init__.py @@ -0,0 +1 @@ +"""Subpackage with builders.""" diff --git a/drivers/intake_geokube/iot/__init__.py b/drivers/intake_geokube/iot/__init__.py new file mode 100644 index 0000000..5500b37 --- /dev/null +++ b/drivers/intake_geokube/iot/__init__.py @@ -0,0 +1 @@ +"""Domain-specific subpackage for IoT data.""" diff --git a/drivers/intake_geokube/iot/driver.py b/drivers/intake_geokube/iot/driver.py new file mode 100644 index 0000000..93c52cd --- /dev/null +++ b/drivers/intake_geokube/iot/driver.py @@ -0,0 +1,164 @@ +"""Driver for IoT data.""" + +import json +from collections import deque +from datetime import datetime +from typing import NoReturn + +import dateparser +import numpy as np +import pandas as pd +import streamz + +from ..base import AbstractBaseDriver +from ..queries.geoquery import GeoQuery + +d: deque = deque(maxlen=1) + + +def _build(data_model: dict) -> pd.DataFrame: + model_dict = { + data_model.get("time", ""): pd.to_datetime( + "01-01-1970 00:00:00", format="%d-%m-%Y %H:%M:%S" + ), + data_model.get("latitude", ""): [0.0], + data_model.get("longitude", ""): [0.0], + } + for f in data_model.get("filters", []): + model_dict[f] = [0] + for v in data_model.get("variables", []): + model_dict[v] = [0] + df_model = pd.DataFrame(model_dict) + df_model = df_model.set_index(data_model.get("time", "")) + return df_model + + +def _mqtt_preprocess(df, msg) -> pd.DataFrame: + payload = json.loads(msg.payload.decode("utf-8")) + if ("uplink_message" not in payload) or ( + "frm_payload" not in payload["uplink_message"] + ): + return df + data = payload["uplink_message"]["decoded_payload"]["data_packet"][ + "measures" + ] + date_time = pd.to_datetime( + datetime.now().strftime("%d-%m-%Y %H:%M:%S"), + format="%d-%m-%Y %H:%M:%S", + ) + data["device_id"] = payload["end_device_ids"]["device_id"] + data["string_type"] = 9 + data["cycle_duration"] = payload["uplink_message"]["decoded_payload"][ + "data_packet" + ]["timestamp"] + data["sensor_time"] = pd.to_datetime( + payload["received_at"], format="%Y-%m-%dT%H:%M:%S.%fZ" + ) + data["latitude"] = data["latitude"] / 10**7 + data["longitude"] = data["longitude"] / 10**7 + data["AirT"] = data["AirT"] / 100 + data["AirH"] = data["AirH"] / 100 + data["surfaceTemp"] = 2840 / 100 + row = pd.Series(data, name=date_time) + df = df._append(row) # pylint: disable=protected-access + return df + + +class IotDriver(AbstractBaseDriver): + """Driver class for IoT data.""" + + name: str = "iot_driver" + version: str = "0.1b0" + + def __init__( + self, + mqtt_kwargs, + time_window, + data_model, + start=False, + metadata=None, + **kwargs, + ): + super().__init__(metadata=metadata) + self.mqtt_kwargs = mqtt_kwargs + self.kwargs = kwargs + self.stream = None + self.time_window = time_window + self.start = start + self.df_model = _build(data_model) + + def _get_schema(self): + if not self.stream: + self.log.debug("creating stream...") + stream = streamz.Stream.from_mqtt(**self.mqtt_kwargs) + self.stream = stream.accumulate( + _mqtt_preprocess, returns_state=False, start=pd.DataFrame() + ).to_dataframe(example=self.df_model) + self.stream.stream.sink(d.append) + if self.start: + self.log.info("streaming started...") + self.stream.start() + return {"stream": str(self.stream)} + + def read(self) -> streamz.dataframe.core.DataFrame: + """Read IoT data.""" + self.log.info("reading stream...") + self._get_schema() + return self.stream + + def load(self) -> NoReturn: + """Load IoT data.""" + self.log.error("loading entire product is not supported for IoT data") + raise NotImplementedError( + "loading entire product is not supported for IoT data" + ) + + def process(self, query: GeoQuery) -> streamz.dataframe.core.DataFrame: + """Process IoT data with the passed query. + + Parameters + ---------- + query : intake_geokube.GeoQuery + A query to use + + Returns + ------- + stream : streamz.dataframe.core.DataFrame + A DataFrame object with streamed content + """ + df = d[0] + if not query: + self.log.info( + "method 'process' called without query. processing skipped." + ) + return df + if query.time: + if not isinstance(query.time, slice): + self.log.error( + "expected 'query.time' type is slice but found %s", + type(query.time), + ) + raise TypeError( + "expected 'query.time' type is slice but found" + f" {type(query.time)}" + ) + self.log.info("querying by time: %s", query.time) + df = df[query.time.start : query.time.stop] + else: + self.log.info( + "getting latest data for the predefined tie window: %s", + self.time_window, + ) + start = dateparser.parse(f"NOW - {self.time_window}") + stop = dateparser.parse("NOW") + df = df[start:stop] # type: ignore[misc] + if query.filters: + self.log.info("filtering with: %s", query.filters) + mask = np.logical_and.reduce( + [df[k] == v for k, v in query.filters.items()] + ) + df = df[mask] + if query.variable: + self.log.info("selecting variables: %s", query.variable) + df = df[query.variable] + return df diff --git a/drivers/intake_geokube/netcdf/__init__.py b/drivers/intake_geokube/netcdf/__init__.py new file mode 100644 index 0000000..315792c --- /dev/null +++ b/drivers/intake_geokube/netcdf/__init__.py @@ -0,0 +1 @@ +"""Domain-specific subpackage for netcdf data.""" diff --git a/drivers/intake_geokube/netcdf/driver.py b/drivers/intake_geokube/netcdf/driver.py new file mode 100644 index 0000000..e29cbfa --- /dev/null +++ b/drivers/intake_geokube/netcdf/driver.py @@ -0,0 +1,64 @@ +"""NetCDF driver for DDS.""" + +from geokube import open_datacube, open_dataset +from geokube.core.datacube import DataCube +from geokube.core.dataset import Dataset + +from ..base import AbstractBaseDriver + + +class NetCdfDriver(AbstractBaseDriver): + """Driver class for netCDF files.""" + + name = "netcdf_driver" + version = "0.1a0" + + def __init__( + self, + path: str, + metadata: dict, + pattern: str | None = None, + field_id: str | None = None, + metadata_caching: bool = False, + metadata_cache_path: str | None = None, + storage_options: dict | None = None, + xarray_kwargs: dict | None = None, + mapping: dict[str, dict[str, str]] | None = None, + load_files_on_persistance: bool = True, + ) -> None: + super().__init__(metadata=metadata) + self.path = path + self.pattern = pattern + self.field_id = field_id + self.metadata_caching = metadata_caching + self.metadata_cache_path = metadata_cache_path + self.storage_options = storage_options + self.mapping = mapping + self.xarray_kwargs = xarray_kwargs or {} + self.load_files_on_persistance = load_files_on_persistance + + @property + def _arguments(self) -> dict: + return { + "path": self.path, + "id_pattern": self.field_id, + "metadata_caching": self.metadata_caching, + "metadata_cache_path": self.metadata_cache_path, + "mapping": self.mapping, + } | self.xarray_kwargs + + def read(self) -> Dataset | DataCube: + """Read netCDF.""" + if self.pattern: + return open_dataset( + pattern=self.pattern, delay_read_cubes=True, **self._arguments + ) + return open_datacube(**self._arguments) + + def load(self) -> Dataset | DataCube: + """Load netCDF.""" + if self.pattern: + return open_dataset( + pattern=self.pattern, delay_read_cubes=False, **self._arguments + ) + return open_datacube(**self._arguments) diff --git a/drivers/intake_geokube/queries/__init__.py b/drivers/intake_geokube/queries/__init__.py new file mode 100644 index 0000000..e6847fb --- /dev/null +++ b/drivers/intake_geokube/queries/__init__.py @@ -0,0 +1 @@ +"""Subpackage with queries.""" diff --git a/drivers/intake_geokube/queries/geoquery.py b/drivers/intake_geokube/queries/geoquery.py new file mode 100644 index 0000000..9ab408a --- /dev/null +++ b/drivers/intake_geokube/queries/geoquery.py @@ -0,0 +1,94 @@ +"""Module with GeoQuery definition.""" + +from __future__ import annotations + +import json +from typing import Any + +from pydantic import ( + BaseModel, + ConfigDict, + Field, + field_serializer, + model_validator, +) + +from .types import BoundingBoxDict, SliceQuery, TimeComboDict +from .utils import maybe_dict_to_slice, slice_to_dict + + +class GeoQuery(BaseModel, extra="allow"): + """GeoQuery definition class.""" + + model_config = ConfigDict(arbitrary_types_allowed=True) + + variable: list[str] | None = None + time: SliceQuery | TimeComboDict | None = None + area: BoundingBoxDict | None = None + location: dict[str, float | list[float]] | None = None + vertical: SliceQuery | float | list[float] | None = None + filters: dict[str, Any] = Field(default_factory=dict) + format: str | None = None + format_args: dict[str, Any] | None = None + + @field_serializer("time") + def serialize_time(self, time: SliceQuery | TimeComboDict | None, _info): + """Serialize time.""" + if isinstance(time, slice): + return slice_to_dict(time) + return time + + @model_validator(mode="after") + @classmethod + def area_locations_mutually_exclusive_validator(cls, query): + """Assert 'locations' and 'area' are not passed at once.""" + if query.area is not None and query.location is not None: + raise KeyError( + "area and location couldn't be processed together, please use" + " one of them" + ) + return query + + @model_validator(mode="before") + @classmethod + def build_filters(cls, values: dict[str, Any]) -> dict[str, Any]: + """Build filters based on extra arguments.""" + if "filters" in values: + return values + filters = {} + fields = {} + for k in values.keys(): + if k in cls.model_fields: + fields[k] = values[k] + continue + if isinstance(values[k], dict): + values[k] = maybe_dict_to_slice(values[k]) + filters[k] = values[k] + fields["filters"] = filters + return fields + + def model_dump_original(self, skip_empty: bool = True) -> dict: + """Return the JSON representation of the original query.""" + res = super().model_dump() + res = {**res.pop("filters", {}), **res} + if skip_empty: + res = dict(filter(lambda item: item[1] is not None, res.items())) + return res + + @classmethod + def parse( + cls, load: "GeoQuery" | dict | str | bytes | bytearray + ) -> "GeoQuery": + """Parse load to GeoQuery instance.""" + if isinstance(load, cls): + return load + if isinstance(load, (str, bytes, bytearray)): + load = json.loads(load) + if isinstance(load, dict): + load = GeoQuery(**load) + else: + raise TypeError( + f"type of the `load` argument ({type(load).__name__}) is not" + " supported!" + ) + return load diff --git a/drivers/intake_geokube/queries/types.py b/drivers/intake_geokube/queries/types.py new file mode 100644 index 0000000..cfb7327 --- /dev/null +++ b/drivers/intake_geokube/queries/types.py @@ -0,0 +1,10 @@ +"""Module with types definitions.""" + +from pydantic import BeforeValidator +from typing_extensions import Annotated + +from . import utils as ut + +SliceQuery = Annotated[slice, BeforeValidator(ut.dict_to_slice)] +TimeComboDict = Annotated[dict, BeforeValidator(ut.assert_time_combo_dict)] +BoundingBoxDict = Annotated[dict, BeforeValidator(ut.assert_bounding_box_dict)] diff --git a/drivers/intake_geokube/queries/utils.py b/drivers/intake_geokube/queries/utils.py new file mode 100644 index 0000000..c2fb2dd --- /dev/null +++ b/drivers/intake_geokube/queries/utils.py @@ -0,0 +1,106 @@ +"""Module with util functions.""" + +from typing import Any, Collection, Hashable, Iterable + +import dateparser +from pydantic.fields import FieldInfo + +_TIME_COMBO_SUPPORTED_KEYS: tuple[str, ...] = ( + "year", + "month", + "day", + "hour", +) + +_BBOX_SUPPORTED_KEYS: tuple[str, ...] = ( + "north", + "south", + "west", + "east", +) + + +def _validate_dict_keys( + provided_keys: Iterable, supported_keys: Collection +) -> None: + for provided_k in provided_keys: + assert ( + provided_k in supported_keys + ), f"key '{provided_k}' is not among supported ones: {supported_keys}" + + +def dict_to_slice(mapping: dict) -> slice: + """Convert dictionary to slice.""" + mapping = mapping or {} + assert "start" in mapping or "stop" in mapping, ( + "missing at least of of the keys ['start', 'stop'] required to" + " construct slice object based on the dictionary" + ) + if "start" in mapping and "NOW" in mapping["start"]: + mapping["start"] = dateparser.parse(mapping["start"]) + if "stop" in mapping and "NOW" in mapping["stop"]: + mapping["stop"] = dateparser.parse(mapping["stop"]) + return slice( + mapping.get("start"), + mapping.get("stop"), + mapping.get("step"), + ) + + +def maybe_dict_to_slice(mapping: Any) -> slice: + """Convert valid dictionary to slice or return the original one.""" + if "start" in mapping or "stop" in mapping: + return dict_to_slice(mapping) + return mapping + + +def slice_to_dict(slice_: slice) -> dict: + """Convert slice to dictionary.""" + return {"start": slice_.start, "stop": slice_.stop, "step": slice_.step} + + +def assert_time_combo_dict(mapping: dict) -> dict: + """Check if dictionary contains time-combo related keys.""" + _validate_dict_keys(mapping.keys(), _TIME_COMBO_SUPPORTED_KEYS) + return mapping + + +def assert_bounding_box_dict(mapping: dict) -> dict: + """Check if dictionary contains bounding-box related keys.""" + _validate_dict_keys(mapping.keys(), _BBOX_SUPPORTED_KEYS) + return mapping + + +def split_extra_arguments( + values: dict, fields: dict[str, FieldInfo] +) -> tuple[dict, dict]: + """Split arguments to field-related and auxiliary.""" + extra_args: dict = {} + field_args: dict = {} + extra_args = {k: v for k, v in values.items() if k not in fields} + field_args = {k: v for k, v in values.items() if k in fields} + return (field_args, extra_args) + + +def find_value( + content: dict | list, key: Hashable, *, recursive: bool = False +) -> Any: + """Return value for a 'key' (recursive search).""" + result = None + if isinstance(content, dict): + if key in content: + return content[key] + if not recursive: + return result + for value in content.values(): + if isinstance(value, (dict, list)): + result = result or find_value(value, key, recursive=True) + elif isinstance(content, list): + for el in content: + result = result or find_value(el, key, recursive=True) + else: + raise TypeError( + "'content' argument need to be a dictionary or a list but found," + f" '{type(content)}" + ) + return result diff --git a/drivers/intake_geokube/queries/workflow.py b/drivers/intake_geokube/queries/workflow.py new file mode 100644 index 0000000..a93cd91 --- /dev/null +++ b/drivers/intake_geokube/queries/workflow.py @@ -0,0 +1,72 @@ +"""Module with workflow definition.""" + +from __future__ import annotations + +import json +from collections import Counter +from typing import Any + +from pydantic import BaseModel, Field, field_validator, model_validator + +from .utils import find_value + + +class Task(BaseModel): + """Single task model definition.""" + + id: str | int + op: str + use: list[str | int] = Field(default_factory=list) + args: dict[str, Any] = Field(default_factory=dict) + + +class Workflow(BaseModel): + """Workflow model definition.""" + + tasks: list[Task] + dataset_id: str = "" + product_id: str = "" + + @model_validator(mode="before") + @classmethod + def obtain_dataset_id(cls, values): + """Get dataset_id and product_id from included tasks.""" + dataset_id = find_value(values, key="dataset_id", recursive=True) + if not dataset_id: + raise KeyError( + "'dataset_id' key was missing. did you defined it for 'args'?" + ) + product_id = find_value(values, key="product_id", recursive=True) + if not product_id: + raise KeyError( + "'product_id' key was missing. did you defined it for 'args'?" + ) + return values | {"dataset_id": dataset_id, "product_id": product_id} + + @field_validator("tasks", mode="after") + @classmethod + def match_unique_ids(cls, items): + """Verify the IDs are uniqe.""" + for id_value, id_count in Counter([item.id for item in items]).items(): + if id_count != 1: + raise ValueError(f"duplicated key found: `{id_value}`") + return items + + @classmethod + def parse( + cls, + workflow: Workflow | dict | list[dict] | str | bytes | bytearray, + ) -> Workflow: + """Parse to Workflow model.""" + if isinstance(workflow, cls): + return workflow + if isinstance(workflow, (str | bytes | bytearray)): + workflow = json.loads(workflow) + if isinstance(workflow, list): + return cls(tasks=workflow) # type: ignore[arg-type] + if isinstance(workflow, dict): + return cls(**workflow) + raise TypeError( + f"`workflow` argument of type `{type(workflow).__name__}`" + " cannot be safetly parsed to the `Workflow`" + ) diff --git a/drivers/intake_geokube/sentinel/__init__.py b/drivers/intake_geokube/sentinel/__init__.py new file mode 100644 index 0000000..4957128 --- /dev/null +++ b/drivers/intake_geokube/sentinel/__init__.py @@ -0,0 +1 @@ +"""Domain-specific subpackage for sentinel data.""" diff --git a/drivers/intake_geokube/sentinel/auth.py b/drivers/intake_geokube/sentinel/auth.py new file mode 100644 index 0000000..680bfb2 --- /dev/null +++ b/drivers/intake_geokube/sentinel/auth.py @@ -0,0 +1,45 @@ +"""Module with auth utils for accessing sentinel data.""" + +import os + +import requests +from requests.auth import AuthBase + + +class SentinelAuth(AuthBase): # pylint: disable=too-few-public-methods + """Class ewith authentication for accessing sentinel data.""" + + _SENTINEL_AUTH_URL: str = os.environ.get( + "SENTINEL_AUTH_URL", + "https://identity.dataspace.copernicus.eu/auth/realms/CDSE/protocol/openid-connect/token", + ) + + def __init__(self, username: str, password: str) -> None: + self.username = username + self.password = password + + @classmethod + def _get_access_token(cls, username: str, password: str) -> str: + data = { + "client_id": "cdse-public", + "username": username, + "password": password, + "grant_type": "password", + } + try: + response = requests.post( + cls._SENTINEL_AUTH_URL, data=data, timeout=10 + ) + response.raise_for_status() + except Exception as e: + raise RuntimeError( + "Access token creation failed. Reponse from the server was:" + f" {response.json()}" + ) from e + return response.json()["access_token"] + + def __call__(self, request): + """Add authorization header.""" + token: str = self._get_access_token(self.username, self.password) + request.headers["Authorization"] = f"Bearer {token}" + return request diff --git a/drivers/intake_geokube/sentinel/driver.py b/drivers/intake_geokube/sentinel/driver.py new file mode 100644 index 0000000..4895103 --- /dev/null +++ b/drivers/intake_geokube/sentinel/driver.py @@ -0,0 +1,342 @@ +"""Geokube driver for sentinel data.""" + +import glob +import os +import string +import zipfile +from multiprocessing.util import get_temp_dir +from typing import Collection, NoReturn + +import dask +import numpy as np +import pandas as pd +import xarray as xr +from geokube.backend.netcdf import open_datacube +from geokube.core.dataset import Dataset +from intake.source.utils import reverse_format +from pyproj import Transformer +from pyproj.crs import CRS, GeographicCRS + +from ..base import AbstractBaseDriver +from ..queries.geoquery import GeoQuery +from ..queries.types import BoundingBoxDict, TimeComboDict +from .auth import SentinelAuth +from .odata_builder import ODataRequest, ODataRequestBuilder + + +def _get_items_nbr(mapping, key) -> int: + if isinstance(mapping[key], str): + return 1 + return len(mapping[key]) if isinstance(mapping[key], Collection) else 1 + + +def _validate_geoquery_for_sentinel(query: GeoQuery) -> None: + if query.time: + if isinstance(query.time, dict) and any([ + _get_items_nbr(query.time, "year") != 1, + _get_items_nbr(query.time, "month") != 1, + _get_items_nbr(query.time, "day") != 1, + ]): + raise ValueError( + "valid time combo for sentinel data should contain exactly one" + " value for 'year', one for 'month', and one for 'day'" + ) + if query.location and ( + "latitude" not in query.location or "longitude" not in query.location + ): + raise ValueError( + "both 'latitude' and 'longitude' must be defined for location" + ) + + +def _bounding_box_to_polygon( + bbox: BoundingBoxDict, +) -> list[tuple[float, float]]: + return [ + (bbox["north"], bbox["west"]), + (bbox["north"], bbox["east"]), + (bbox["south"], bbox["east"]), + (bbox["south"], bbox["west"]), + (bbox["north"], bbox["west"]), + ] + + +def _timecombo_to_day_range(combo: TimeComboDict) -> tuple[str, str]: + return (f"{combo['year']}-{combo['month']}-{combo['day']}T00:00:00", + f"{combo['year']}-{combo['month']}-{combo['day']}T23:59:59") + + +def _location_to_valid_point( + location: dict[str, float | list[float]] +) -> tuple[float, float]: + if isinstance(location["latitude"], list): + if len(location["latitude"]) > 1: + raise ValueError( + "location can have just a single point (single value for" + " 'latitude' and 'longitude')" + ) + lat = location["latitude"][0] + else: + lat = location["latitude"] + if isinstance(location["longitude"], list): + if len(location["longitude"]) > 1: + raise ValueError( + "location can have just a single point (single value for" + " 'latitude' and 'longitude')" + ) + lon = location["longitude"][0] + else: + lon = location["longitude"] + return (lat, lon) + + +def _validate_path_and_pattern(path: str, pattern: str): + if path.startswith(os.sep) or pattern.startswith(os.sep): + raise ValueError(f"path and pattern cannot start with {os.sep}") + + +def _get_attrs_keys_from_pattern(pattern: str) -> list[str]: + return list( + map( + lambda x: str(x[1]), + filter(lambda x: x[1], string.Formatter().parse(pattern)), + ) + ) + + +def unzip_and_clear(target: str) -> None: + """Unzip ZIP archives in 'target' dir and remove archive.""" + assert os.path.exists(target), f"directory '{target}' does not exist" + for file in os.listdir(target): + if not file.endswith(".zip"): + continue + prod_id = os.path.splitext(os.path.basename(file))[0] + target_prod = os.path.join(target, prod_id) + os.makedirs(target_prod, exist_ok=True) + try: + with zipfile.ZipFile(os.path.join(target, file)) as archive: + archive.extractall(path=target_prod) + except zipfile.BadZipFile as err: + raise RuntimeError("downloaded ZIP archive is invalid") from err + os.remove(os.path.join(target, file)) + + +def _get_field_name_from_path(path: str): + res, file = path.split(os.sep)[-2:] + band = file.split("_")[-2] + return f"{res}_{band}" + + +def preprocess_sentinel(dset: xr.Dataset) -> xr.Dataset: + """Preprocessing function for sentinel data. + + Parameters + ---------- + dset : xarray.Dataset + xarray.Dataset to be preprocessed + + Returns + ------- + ds : xarray.Dataset + Preprocessed xarray.Dataset + """ + crs = CRS.from_cf(dset["spatial_ref"].attrs) + transformer = Transformer.from_crs( + crs_from=crs, crs_to=GeographicCRS(), always_xy=True + ) + x_vals, y_vals = dset["x"].to_numpy(), dset["y"].to_numpy() + lon_vals, lat_vals = transformer.transform(*np.meshgrid(x_vals, y_vals)) # type: ignore[call-overload] # pylint: disable=unpacking-non-sequence + source_path = dset.encoding["source"] + sensing_time = os.path.splitext(source_path.split(os.sep)[-6])[0].split( + "_" + )[-1] + time = pd.to_datetime([sensing_time]).to_numpy() + dset = dset.assign_coords({ + "time": time, + "latitude": (("x", "y"), lat_vals), + "longitude": (("x", "y"), lon_vals), + }).rename({"band_data": _get_field_name_from_path(source_path)}) + expanded_timedim_dataarrays = {var_name: dset[var_name].expand_dims('time') for var_name in dset.data_vars} + dset = dset.update(expanded_timedim_dataarrays) + return dset + + +class _SentinelKeys: # pylint: disable=too-few-public-methods + UUID: str = "Id" + SENSING_TIME: str = "ContentDate/Start" + TYPE: str = "Name" + + +class SentinelDriver(AbstractBaseDriver): + """Driver class for sentinel data.""" + + name: str = "sentinel_driver" + version: str = "0.1b0" + + def __init__( + self, + metadata: dict, + url: str, + zippattern: str, + zippath: str, + type: str, + username: str | None = None, + password: str | None = None, + sentinel_timeout: int | None = None, + mapping: dict | None = None, + xarray_kwargs: dict | None = None, + ) -> None: + super().__init__(metadata=metadata) + self.url: str = url + self.zippattern: str = zippattern + self.zippath: str = zippath + self.type_ = type + _validate_path_and_pattern(path=self.zippath, pattern=self.zippattern) + self.auth: SentinelAuth = self._get_credentials(username, password) + self.target_dir: str = get_temp_dir() + self.sentinel_timeout: int | None = sentinel_timeout + self.mapping: dict = mapping or {} + self.xarray_kwargs: dict = xarray_kwargs or {} + + def _get_credentials( + self, username: str | None, password: str | None + ) -> SentinelAuth: + if username and password: + return SentinelAuth( + username=username, + password=password, + ) + self.log.debug("getting credentials from environmental variables...") + if ( + "SENTINEL_USERNAME" not in os.environ + or "SENTINEL_PASSWORD" not in os.environ + ): + self.log.error( + "missing at least of of the mandatory environmental variables:" + " ['SENTINEL_USERNAME', 'SENTINEL_PASSWORD']" + ) + raise KeyError( + "missing at least of of the mandatory environmental variables:" + " ['SENTINEL_USERNAME', 'SENTINEL_PASSWORD']" + ) + return SentinelAuth( + username=os.environ["SENTINEL_USERNAME"], + password=os.environ["SENTINEL_PASSWORD"], + ) + + def _force_sentinel_type(self, builder): + self.log.info("forcing sentinel type: %s...", self.type_) + return builder.filter(_SentinelKeys.TYPE, containing=self.type_) + + def _filter_by_sentinel_attrs(self, builder, query: GeoQuery): + self.log.info("filtering by sentinel attributes...") + path_filter_names: set[str] = { + parsed[1] + for parsed in string.Formatter().parse(self.zippattern) + if parsed[1] + } + if not query.filters: + return builder + sentinel_filter_names: set[str] = ( + query.filters.keys() - path_filter_names + ) + for sf in sentinel_filter_names: + builder = builder.filter_attr(sf, query.filters[sf]) + return builder + + def _build_odata_from_geoquery(self, query: GeoQuery) -> ODataRequest: + self.log.debug("validating geoquery...") + _validate_geoquery_for_sentinel(query) + self.log.debug("constructing odata request...") + builder = ODataRequestBuilder.new(url=self.url) + if "product_id" in query.filters: + builder = builder.filter( + name=_SentinelKeys.UUID, eq=query.filters.get("product_id") + ) + builder = self._filter_by_sentinel_attrs(builder, query=query) + builder = self._force_sentinel_type(builder) + if query.time: + if isinstance(query.time, dict): + timecombo_start, timecombo_end = _timecombo_to_day_range(query.time) + self.log.debug("filtering by timecombo: [%s, %s] ", timecombo_start, timecombo_end) + builder = builder.filter_date( + _SentinelKeys.SENSING_TIME, ge=timecombo_start, le=timecombo_end + ) + elif isinstance(query.time, slice): + self.log.debug("filtering by slice: %s", query.time) + builder = builder.filter_date( + _SentinelKeys.SENSING_TIME, + ge=query.time.start, + le=query.time.stop, + ) + if query.area: + self.log.debug("filering by polygon") + polygon = _bounding_box_to_polygon(query.area) + builder = builder.intersect_polygon(polygon=polygon) + if query.location: + self.log.debug("filering by location") + point = _location_to_valid_point(query.location) + builder = builder.intersect_point(point=point) + return builder.build() + + def _prepare_dataset(self) -> Dataset: + data: list = [] + attrs_keys: list[str] = _get_attrs_keys_from_pattern(self.zippattern) + for f in glob.glob(os.path.join(self.target_dir, self.zippath)): + self.log.debug("processsing file %s", f) + file_no_tmp_dir = f.removeprefix(self.target_dir).strip(os.sep) + attr = reverse_format(self.zippattern, file_no_tmp_dir) + attr[Dataset.FILES_COL] = [f] + data.append(attr) + # NOTE: eventually, join files if there are several for the same attrs + # combintation + df = ( + pd.DataFrame(data) + .groupby(attrs_keys) + .agg({Dataset.FILES_COL: sum}) + ) + datacubes = [] + for ind, files in df.iterrows(): + load = dict(zip(df.index.names, ind)) + load[Dataset.FILES_COL] = files + load[Dataset.DATACUBE_COL] = dask.delayed(open_datacube)( + path=files.item(), + id_pattern=None, + mapping=self.mapping, + metadata_caching=False, + **self.xarray_kwargs, + preprocess=preprocess_sentinel, + ) + datacubes.append(load) + return Dataset(pd.DataFrame(datacubes)) + + def read(self) -> NoReturn: + """Read sentinel data.""" + raise NotImplementedError( + "reading metadata is not supported for sentinel data" + ) + + def load(self) -> NoReturn: + """Load sentinel data.""" + raise NotImplementedError( + "loading entire product is not supported for sentinel data" + ) + + def process(self, query: GeoQuery) -> Dataset: + """Process query for sentinel data.""" + self.log.info("builder odata request based on passed geoquery...") + req = self._build_odata_from_geoquery(query) + self.log.info("downloading data...") + req.download( + target_dir=self.target_dir, + auth=self.auth, + timeout=self.sentinel_timeout, + ) + self.log.info("unzipping and removing archives...") + unzip_and_clear(self.target_dir) + self.log.info("preparing geokube.Dataset...") + dataset = self._prepare_dataset() + dataset = super()._process_geokube_dataset( + dataset, query=query, compute=True + ) + return dataset diff --git a/drivers/intake_geokube/sentinel/odata_builder.py b/drivers/intake_geokube/sentinel/odata_builder.py new file mode 100644 index 0000000..4036810 --- /dev/null +++ b/drivers/intake_geokube/sentinel/odata_builder.py @@ -0,0 +1,564 @@ +"""Module with OData API classes definitions.""" + +from __future__ import annotations + +__all__ = ( + "datetime_to_isoformat", + "HttpMethod", + "ODataRequestBuilder", + "ODataRequest", +) + +import math +import os +import warnings +from collections import defaultdict +from datetime import datetime +from enum import Enum, auto +from typing import Any, Callable + +import pandas as pd +import requests +from tqdm import tqdm + +from ..utils import create_zip_from_response +from .auth import SentinelAuth + + +def datetime_to_isoformat(date: str | datetime) -> str: + """Convert string of datetime object to ISO datetime string.""" + if isinstance(date, str): + try: + value = pd.to_datetime([date]).item().isoformat() + except ValueError as exc: + raise ValueError(f"cannot parse '{date}' to datetime") from exc + elif isinstance(date, datetime): + value = value.isoformat() + else: + raise TypeError(f"type '{type(date)}' is not supported") + return f"{value}Z" + + +class HttpMethod(Enum): + """Enum with HTTP methods.""" + + GET = auto() + POST = auto() + + @property + def method_name(self) -> str: + """Get name of the HTTP method.""" + return self.name.lower() + + +class _ODataEntity: # pylint: disable=too-few-public-methods + def __init__( + self, + url: str, + params: dict | None = None, + method: HttpMethod = HttpMethod.GET, + body: dict | None = None, + ) -> None: + if not params: + self.params: dict[str, list] = defaultdict(list) + self.conj: list = [] + if not body: + self.body: dict = {} + self.url = url + self.method = method + self.callbacks: dict = {} + + +class _ODataBuildableMixin: # pylint: disable=too-few-public-methods + odata: _ODataEntity + + def build(self) -> ODataRequest: + """Build ODataRequest object.""" + return ODataRequest(self.odata) + + +class _ODataOrderMixing: # pylint: disable=too-few-public-methods + odata: _ODataEntity + + def order(self, by: str, desc: bool = False) -> _ODataOperation: + """Add ordering option. + + Parameters + ---------- + by : str + A key by which ordering should be done + desc : bool + If descending order should be used + """ + order = "desc" if desc else "asc" + if "orderby" in self.odata.params: + raise ValueError( + f"ordering was already defined: {self.odata.params['orderby']}" + ) + self.odata.params["orderby"] = [f"{by} {order}"] + match self: + case _ODataOperation(): + return _ODataOperation(self.odata) + case _: + raise TypeError(f"unexpected type: {type(self)}") + + +class ODataRequest: + """OData request object.""" + + _ALL_HTTP_CODES: int = -1 + _DOWNLOAD_PATTERN: str = ( + "https://zipper.dataspace.copernicus.eu" + "/odata/v1/Products({pid})/$value" + ) + + def __init__(self, odata: _ODataEntity) -> None: + self.request_params: dict = {} + self.odata = odata + self._convert_filter_param() + self._convert_order_param() + + def _convert_order_param(self) -> None: + if self.odata.params["orderby"]: + self.request_params["orderby"] = self.odata.params["orderby"] + + def _convert_filter_param(self) -> None: + param: str = "" + for i in range(len(self.odata.params["filter"])): + if not param: + param = self.odata.params["filter"][i] + else: + param = f"{param} {self.odata.params['filter'][i]}" + if i < len(self.odata.params["filter"]) - 1: + param = f"{param} {self.odata.conj[i]}" + self.request_params["filter"] = param + + def _query( + self, + headers: dict | None = None, + auth: Any | None = None, + timeout: int | None = None, + ) -> requests.Response: + if self.odata.params and not self.odata.url.endswith("?"): + self.odata.url = f"{self.odata.url}?" + params = {} + if self.request_params: + params = { + f"${key}": value for key, value in self.request_params.items() + } + match self.odata.method: + case HttpMethod.GET: + return requests.get( + self.odata.url, + params=params, + headers=headers, + timeout=timeout, + ) + case HttpMethod.POST: + return requests.post( + self.odata.url, + data=self.odata.body, + auth=auth, + timeout=timeout, + ) + case _: + raise NotImplementedError( + f"method {self.odata.method} is not supported" + ) + + def with_callback( + self, + callback: Callable[[requests.Response], Any], + http_code: int | None = None, + ) -> "ODataRequest": + """ + Add callbacks for request response. + + Parameters + ---------- + callback : callable + A callback function taking just a single argument, + i.e `requests.Response` object + http_code : int + HTTP code for which callback should be used. + If not passed, callback will be executed for all codes. + """ + if http_code: + if http_code in self.odata.callbacks: + warnings.warn( + f"callback for HTTP code {http_code} will be overwritten" + ) + self.odata.callbacks[http_code] = callback + else: + self.odata.callbacks[self._ALL_HTTP_CODES] = callback + return self + + def query( + self, + headers: dict | None = None, + auth: Any | None = None, + timeout: int | None = None, + ) -> Any: + """Query data based on the built request. + + Parameters + ---------- + headers : dict, optional + Headers passed to HTTP request + auth : Any, optional + Authorization object or tuple (,) for basic authentication + + Returns + ------- + res : Any + Value returned from the appropriate callback or `requests.Response` object otherwise + """ + response = self._query(headers=headers, auth=auth, timeout=timeout) + if response.status_code in self.odata.callbacks: + return self.odata.callbacks[response.status_code](response) + if self._ALL_HTTP_CODES in self.odata.callbacks: + return self.odata.callbacks[self._ALL_HTTP_CODES](response) + return response + + def download( + self, + target_dir: str, + headers: dict | None = None, + auth: Any | None = None, + timeout: int | None = None, + ) -> Any: + """Download requested data to `target_dir`. + + Parameters + ---------- + target_dir : str + Path to the directory where files should be downloaded + headers : dict, optional + Headers passed to HTTP request + auth : Any, optional + Authorization object or tuple (,) for basic + authentication + """ + os.makedirs(target_dir, exist_ok=True) + response = self._query(headers=headers, auth=auth, timeout=timeout) + response.raise_for_status() + if response.status_code in self.odata.callbacks: + self.odata.callbacks[response.status_code](response) + if self._ALL_HTTP_CODES in self.odata.callbacks: + self.odata.callbacks[self._ALL_HTTP_CODES](response) + df = pd.DataFrame(response.json()["value"]) + if len(df) == 0: + raise ValueError("no product found for the request") + if not isinstance(auth, SentinelAuth): + raise TypeError( + f"expected authentication of the type '{SentinelAuth}' but" + f" passed '{type(auth)}'" + ) + for pid in tqdm(df["Id"]): + response = requests.get( + self._DOWNLOAD_PATTERN.format(pid=pid), + stream=True, + auth=auth, + timeout=timeout, + ) + response.raise_for_status() + create_zip_from_response( + response, os.path.join(target_dir, f"{pid}.zip") + ) + + +class _ODataOperation(_ODataBuildableMixin, _ODataOrderMixing): + def __init__(self, odata: _ODataEntity) -> None: + self.odata = odata + + def _append_query_param(self, param: str | None) -> None: + if not param: + return + self.odata.params["filter"].append(param) + self.odata.conj.append("and") + + def _validate_args(self, lt, le, eq, ge, gt) -> None: + if eq: + if any(map(lambda x: x is not None, [lt, le, ge, gt])): + raise ValueError( + "cannot define extra operations for a single option if" + " `eq` is defined" + ) + if lt and le: + raise ValueError( + "cannot define both operations `lt` and `le` for a single" + " option" + ) + if gt and ge: + raise ValueError( + "cannot define both operations `gt` and `ge` for a single" + " option" + ) + + def and_(self) -> _ODataOperation: + """Put conjunctive conditions.""" + self.odata.conj[-1] = "and" + return self + + def or_(self) -> _ODataOperation: + """Put alternative conditions.""" + self.odata.conj[-1] = "or" + return self + + def filter_attr(self, name: str, value: str) -> _ODataOperation: + """Filter by attribute value. + + Parameters + ---------- + name : str + Name of an attribute + value : str + Value of the attribute + """ + param: str = ( + "Attributes/OData.CSC.ValueTypeAttribute/any(att:att/Name eq" + f" ‘[{name}]’" + + f"and att/OData.CSC.ValueTypeAttribute/Value eq ‘{value}]’)" + ) + self._append_query_param(param) + return self + + def filter( + self, + name: str, + *, + lt: str | None = None, + le: str | None = None, + eq: str | None = None, + ge: str | None = None, + gt: str | None = None, + containing: str | None = None, + not_containing: str | None = None, + ) -> _ODataOperation: + """Filter option by values. + + Add filter option to the request. Value of an option indicated by + the `name` argument will be checked agains given values or arguments. + You cannot specify both `lt` and `le` or `ge` and `gt. + + Parameters + ---------- + lt : str, optional + value for `less than` comparison + le : str, optional + value for `less ord equal` comparison + eq : str, optional + value for `equal` comparison + ge : str, optional + value for `greater or equal` comparison + gt : str, optional + value for `greater than` comparison + containing : str, optional + value to be contained + not_containing : str, optional + value not to be containing + """ + if not any([le, lt, eq, ge, gt, containing, not_containing]): + return self + self._validate_args(le=le, lt=lt, eq=eq, ge=ge, gt=gt) + build_: _ODataOperation = self + assert isinstance(build_, _ODataOperation), "unexpected type" + if lt: + build_ = build_.with_option_lt(name, lt).and_() + if le: + build_ = build_.with_option_le(name, le).and_() + if eq: + build_ = build_.with_option_equal(name, eq).and_() + if ge: + build_ = build_.with_option_ge(name, ge).and_() + if gt: + build_ = build_.with_option_gt(name, gt).and_() + if containing: + build_ = build_.with_option_containing(name, containing).and_() + if not_containing: + build_ = build_.with_option_not_containing( + name, not_containing + ).and_() + + return build_ + + def filter_date( + self, + name: str, + *, + lt: str | None = None, + le: str | None = None, + eq: str | None = None, + ge: str | None = None, + gt: str | None = None, + ) -> _ODataOperation: + """ + Filter datetetime option by values. + + Add filter option to the request. Datetime values of an option + indicated by the `name` argument will be checked agains given + values or arguments. + Values of arguments will be automatically converted to ISO datetime + string format. + You cannot specify both `lt` and `le` or `ge` and `gt. + + Parameters + ---------- + lt : str, optional + value for `less than` comparison + le : str, optional + value for `less ord equal` comparison + eq : str, optional + value for `equal` comparison + ge : str, optional + value for `greater or equal` comparison + gt : str, optional + value for `greater than` comparison + """ + if lt: + lt = datetime_to_isoformat(lt) + if le: + le = datetime_to_isoformat(le) + if eq: + eq = datetime_to_isoformat(eq) + if ge: + ge = datetime_to_isoformat(ge) + if gt: + gt = datetime_to_isoformat(gt) + return self.filter(name, lt=lt, le=le, eq=eq, ge=ge, gt=gt) + + def with_option_equal(self, name: str, value: str) -> "_ODataOperation": + """Add filtering by option `is equal`.""" + param: str = f"{name} eq '{value}'" + self._append_query_param(param) + return self + + def with_option_containing( + self, name: str, value: str + ) -> "_ODataOperation": + """Add filtering by option `containing`.""" + param: str = f"contains({name},'{value}')" + self._append_query_param(param) + return self + + def with_option_not_containing( + self, name: str, value: str + ) -> "_ODataOperation": + """Add filtering by option `not containing`.""" + param: str = f"not contains({name},'{value}')" + self._append_query_param(param) + return self + + def with_option_equal_list( + self, name: str, value: list[str] + ) -> "_ODataOperation": + """Add filtering by equality.""" + self.odata.body.update({"FilterProducts": [{name: v} for v in value]}) + self.odata.method = HttpMethod.POST + return self + + def with_option_lt(self, name: str, value: str) -> "_ODataOperation": + """Add filtering with `less than` option.""" + param: str = f"{name} lt {value}" + self._append_query_param(param) + return self + + def with_option_le(self, name: str, value: str) -> "_ODataOperation": + """Add filtering with `less or equal` option.""" + param: str = f"{name} le {value}" + self._append_query_param(param) + return self + + def with_option_gt(self, name: str, value: str) -> "_ODataOperation": + """Add filtering with `greater or equal` option.""" + param: str = f"{name} gt {value}" + self._append_query_param(param) + return self + + def with_option_ge(self, name: str, value: str) -> "_ODataOperation": + """Add filtering with `greater than` option.""" + param: str = f"{name} ge {value}" + self._append_query_param(param) + return self + + def intersect_polygon( + self, + polygon: list[tuple[float, float]] | list[list[float]], + srid: str | None = "4326", + ) -> "_ODataOperation": + """ + Add filtering by polygon intersection. + + Parameters + ---------- + polygon: list of 2-element tuple or 2-element lists of floats + Points belonging to the polygon [longitude, latitude]. + The 1st at the last point needs to be the same (polygon needs + to be closed) + srid : str, optional + SRID name, currently supported is only `4326` + """ + if srid != "4326": + raise NotImplementedError( + "currently supported SRID is only ['4326' (EPSG 4326)]" + ) + if not polygon: + return self + if any(map(lambda x: len(x) != 2, polygon)): + raise ValueError( + "polygon should be defined as a 2-element list or tuple" + " (containing latitude and longitude values)" + ) + if not math.isclose(polygon[0][0], polygon[-1][0]) or not math.isclose( + polygon[0][1], polygon[-1][1] + ): + raise ValueError( + "polygon needs to end at the same point it starts!" + ) + polygon_repr = ",".join([f"{p[1]} {p[0]}" for p in polygon]) + param = f"OData.CSC.Intersects(area=geography'SRID={srid};POLYGON(({polygon_repr}))')" + self._append_query_param(param) + return self + + def intersect_point( + self, + point: list[float] | tuple[float, float], + srid: str | None = "4326", + ) -> "_ODataOperation": + """Add filtering by intersection with a point. + + Parameters + ---------- + point: 2-element tuple or list of floats + Point definition [latitude, longitude] + srid : str, optional + SRID name, currently supported is only `4326` + """ + if srid != "4326": + raise NotImplementedError( + "currently supported SRID is only ['4326' (EPSG 4326)]" + ) + if len(point) > 2: + # NOTE: to assure the order is [latitude, longitude] and not vice versa! + raise ValueError( + "point need to have just two elemens [latitude, longitude]" + ) + param = ( + f"OData.CSC.Intersects(area=geography'SRID={srid};POINT({point[0]} {point[1]})')" + ) + self._append_query_param(param) + return self + + +class ODataRequestBuilder( + _ODataOperation +): # pylint: disable=too-few-public-methods + """OData API request builder.""" + + _BASE_PATTERN: str = "{url}/Products" + + @classmethod + def new(cls, url: str) -> _ODataOperation: + """Start building OData request.""" + url = cls._BASE_PATTERN.format(url=url.strip("/")) + return _ODataOperation(_ODataEntity(url=url)) diff --git a/drivers/intake_geokube/utils.py b/drivers/intake_geokube/utils.py new file mode 100644 index 0000000..a3a97e2 --- /dev/null +++ b/drivers/intake_geokube/utils.py @@ -0,0 +1,51 @@ +"""Utils module.""" + +import os + +import requests + + +def create_zip_from_response(response: requests.Response, target: str) -> None: + """Create ZIP archive based on the content in streamable response. + + Parameters + ---------- + response : requests.Response + Response whose contant is streamable (`stream=True`) + target : str + Target path containing name and .zip extension + + Raises + ------ + ValueError + if `Content-Type` header is missing + TypeError + if type supplied by `Content-Type` is other than `zip` + RuntimError + if size provided by `Content-Length` header differs from the size + of the downloaded file + """ + content_type = response.headers.get("Content-Type") + if not content_type: + raise ValueError("`Content-Type` mandatory header is missing") + format_ = content_type.split("/")[-1] + _, ext = os.path.splitext(target) + if format_ != "zip": + raise TypeError( + f"provided content type {format_} is not allowed. expected 'zip'" + " format" + ) + assert ext[1:] == "zip", "expected target with '.zip' extension" + + expected_length = int(response.headers["Content-Length"]) + total_bytes = 0 + with open(target, "wb") as f: + for chunk in response.iter_content(chunk_size=1024): + if chunk: + f.write(chunk) + total_bytes += len(chunk) + if expected_length != total_bytes: + raise RuntimeError( + "downloaded file is not complete in spite of download finished" + " successfully" + ) diff --git a/drivers/intake_geokube/version.py b/drivers/intake_geokube/version.py new file mode 100644 index 0000000..656021a --- /dev/null +++ b/drivers/intake_geokube/version.py @@ -0,0 +1,3 @@ +"""Module with the current version number definition.""" + +__version__ = "1.0b0" diff --git a/drivers/intake_geokube/wrf/__init__.py b/drivers/intake_geokube/wrf/__init__.py new file mode 100644 index 0000000..c528597 --- /dev/null +++ b/drivers/intake_geokube/wrf/__init__.py @@ -0,0 +1 @@ +"""Domain subpackage for WRF datasets.""" diff --git a/drivers/intake_geokube/wrf/driver.py b/drivers/intake_geokube/wrf/driver.py new file mode 100644 index 0000000..d819760 --- /dev/null +++ b/drivers/intake_geokube/wrf/driver.py @@ -0,0 +1,178 @@ +"""WRF driver for DDS.""" + +from functools import partial +from typing import Any + +import numpy as np +import xarray as xr +from geokube import open_datacube, open_dataset +from geokube.core.datacube import DataCube +from geokube.core.dataset import Dataset + +from ..base import AbstractBaseDriver + +_DIM_RENAME_MAP: dict = { + "Time": "time", + "south_north": "latitude", + "west_east": "longitude", +} +_COORD_RENAME_MAP: dict = { + "XTIME": "time", + "XLAT": "latitude", + "XLONG": "longitude", +} +_COORD_SQUEEZE_NAMES: tuple = ("latitude", "longitude") +_PROJECTION: dict = {"grid_mapping_name": "latitude_longitude"} + + +def _cast_to_set(item: Any) -> set: + if item is None: + return set() + if isinstance(item, set): + return item + if isinstance(item, str): + return {item} + if isinstance(item, list): + return set(item) + raise TypeError(f"type '{type(item)}' is not supported!") + + +def rename_coords(dset: xr.Dataset) -> xr.Dataset: + """Rename coordinates.""" + dset_ = dset.rename_vars(_COORD_RENAME_MAP) + # Removing `Time` dimension from latitude and longitude. + coords = dset_.coords + for name in _COORD_SQUEEZE_NAMES: + coord = dset_[name] + if "Time" in coord.dims: + coords[name] = coord.squeeze(dim="Time", drop=True) + return dset_ + + +def change_dims(dset: xr.Dataset) -> xr.Dataset: + """Change dimensions to time, latitude, and longitude.""" + # Preparing new horizontal coordinates. + lat = (["south_north"], dset["latitude"].to_numpy().mean(axis=1)) + lon = (["west_east"], dset["longitude"].to_numpy().mean(axis=0)) + # Removing old horizontal coordinates. + dset_ = dset.drop_vars(["latitude", "longitude"]) + # Adding new horizontal coordinates and setting their units. + coords = dset_.coords + coords["latitude"] = lat + coords["longitude"] = lon + dset_["latitude"].attrs["units"] = "degree_north" + dset_["longitude"].attrs["units"] = "degree_east" + # Making `time`, `latitude`, and `longitude` new dimensions, instead of + # `Time`, `south_north`, and `west_east`. + dset_ = dset_.swap_dims(_DIM_RENAME_MAP) + return dset_ + + +def add_projection(dset: xr.Dataset) -> xr.Dataset: + """Add projection information to the dataset.""" + coords = dset.coords + coords["crs"] = xr.DataArray(data=np.array(1), attrs=_PROJECTION) + for var in dset.data_vars.values(): + enc = var.encoding + enc["grid_mapping"] = "crs" + if coord_names := enc.get("coordinates"): + for old_name, new_name in _COORD_RENAME_MAP.items(): + coord_names = coord_names.replace(old_name, new_name) + enc["coordinates"] = coord_names + return dset + + +def choose_variables( + dset: xr.Dataset, + variables_to_keep: str | list[str] | None = None, + variables_to_skip: str | list[str] | None = None, +) -> xr.Dataset: + """Choose only some variables by keeping or skipping some of them.""" + variables_to_keep_ = _cast_to_set(variables_to_keep) + variables_to_skip_ = _cast_to_set(variables_to_skip) + selected_variables = set(dset.data_vars.keys()) + if len(variables_to_keep_) > 0: + selected_variables = set(dset.data_vars.keys()) & variables_to_keep_ + selected_variables = selected_variables - variables_to_skip_ + if len(set(dset.data_vars.keys())) != len(selected_variables): + return dset[selected_variables] + return dset + + +def preprocess_wrf( + dset: xr.Dataset, variables_to_keep, variables_to_skip +) -> xr.Dataset: + """Preprocess WRF dataset.""" + dset = rename_coords(dset) + dset = change_dims(dset) + dset = add_projection(dset) + dset = choose_variables(dset, variables_to_keep, variables_to_skip) + return dset + + +class WrfDriver(AbstractBaseDriver): + """Driver class for netCDF files.""" + + name = "wrf_driver" + version = "0.1a0" + + def __init__( + self, + path: str, + metadata: dict, + pattern: str | None = None, + field_id: str | None = None, + metadata_caching: bool = False, + metadata_cache_path: str | None = None, + storage_options: dict | None = None, + xarray_kwargs: dict | None = None, + mapping: dict[str, dict[str, str]] | None = None, + load_files_on_persistance: bool = True, + variables_to_keep: str | list[str] | None = None, + variables_to_skip: str | list[str] | None = None, + ) -> None: + super().__init__(metadata=metadata) + self.path = path + self.pattern = pattern + self.field_id = field_id + self.metadata_caching = metadata_caching + self.metadata_cache_path = metadata_cache_path + self.storage_options = storage_options + self.mapping = mapping + self.xarray_kwargs = xarray_kwargs or {} + self.load_files_on_persistance = load_files_on_persistance + self.preprocess = partial( + preprocess_wrf, + variables_to_keep=variables_to_keep, + variables_to_skip=variables_to_skip, + ) + + @property + def _arguments(self) -> dict: + return { + "path": self.path, + "id_pattern": self.field_id, + "metadata_caching": self.metadata_caching, + "metadata_cache_path": self.metadata_cache_path, + "mapping": self.mapping, + } | self.xarray_kwargs + + def read(self) -> Dataset | DataCube: + """Read netCDF.""" + if self.pattern: + return open_dataset( + pattern=self.pattern, + preprocess=self.preprocess, + **self._arguments, + ) + return open_datacube( + delay_read_cubes=True, + preprocess=self.preprocess, + **self._arguments, + ) + + def load(self) -> Dataset | DataCube: + """Load netCDF.""" + if self.pattern: + return open_dataset(pattern=self.pattern, **self._arguments) + return open_datacube(delay_read_cubes=False, **self._arguments) diff --git a/drivers/pyproject.toml b/drivers/pyproject.toml new file mode 100644 index 0000000..ae138ac --- /dev/null +++ b/drivers/pyproject.toml @@ -0,0 +1,85 @@ +[build-system] +requires = ["setuptools", "wheel"] +build-backend = "setuptools.build_meta" + +[project] +name = "intake-geokube" +description = "opengeokube DDS driver." +requires-python = ">=3.10" +readme = "README.md" +license = {file = "LICENSE"} +dynamic = ["version"] +authors = [ + {name = "Jakub Walczak"}, + {name = "Marco Mancini"}, + {name = "Mirko Stojiljkovic"}, + {name = "Valentina Scardigno"}, +] +classifiers = [ + "Development Status :: 3 - Alpha", + "Environment :: Web Environment", + "Intended Audience :: Science/Research", + "License :: OSI Approved :: Apache Software License", + "Natural Language :: English", + "Operating System :: OS Independent", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3 :: Only", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Topic :: Scientific/Engineering :: Atmospheric Science", + "Topic :: Software Development :: Libraries :: Python Modules", + "Topic :: Software Development :: Libraries :: Application Frameworks", + "Topic :: Software Development :: Libraries", +] +dependencies = [ + "dateparser", + "intake", + "pydantic", + "tqdm", + "streamz@git+https://github.com/python-streamz/streamz.git", + "paho-mqtt" +] +[project.entry-points."intake.drivers"] +netcdf_driver = "intake_geokube.netcdf.driver:NetCdfDriver" +sentinel_driver = "intake_geokube.sentinel.driver:SentinelDriver" +iot_driver = "intake_geokube.iot.driver:IotDriver" +wrf_driver = "intake_geokube.wrf.driver:WrfDriver" + +[tool.setuptools.dynamic] +version = {attr = "intake_geokube.version.__version__"} + +[tool.setuptools] +include-package-data = true + +[tool.setuptools.packages.find] +where = ["."] +exclude = ["examples*"] + +[tool.pydocstyle] + +[tool.pylint.'MESSAGES CONTROL'] +disable = "too-many-arguments,too-many-instance-attributes,too-few-public-methods,duplicate-code" + + +[tool.isort] +profile = "black" +include_trailing_comma = true +line_length = 79 +overwrite_in_place = true +use_parentheses = true + +[tool.black] +line_length = 79 +preview = true + +[tool.mypy] +files = [ + "intake_geokube", "." +] +exclude = ["tests/"] + +[tool.pytest.ini_options] +filterwarnings = [ + "ignore::DeprecationWarning" +] diff --git a/drivers/setup.py b/drivers/setup.py new file mode 100644 index 0000000..b908cbe --- /dev/null +++ b/drivers/setup.py @@ -0,0 +1,3 @@ +import setuptools + +setuptools.setup() diff --git a/drivers/tests/__init__.py b/drivers/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/drivers/tests/queries/__init__.py b/drivers/tests/queries/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/drivers/tests/queries/test_utils.py b/drivers/tests/queries/test_utils.py new file mode 100644 index 0000000..0fbefbc --- /dev/null +++ b/drivers/tests/queries/test_utils.py @@ -0,0 +1,50 @@ +from intake_geokube.queries import utils as ut + + +class TestUtils: + def test_find_key_root_level_recusrive_switched_off(self): + assert ut.find_value({"a": 0, "b": 10}, "b", recursive=False) == 10 + + def test_find_key_root_level_recusrive_switched_on(self): + assert ut.find_value({"a": 0, "b": 10}, "b", recursive=True) == 10 + + def test_return_none_on_missing_key_root_level(self): + assert ut.find_value({"a": 0, "b": 10}, "c", recursive=True) is None + + def test_return_none_on_missing_key_another_level(self): + assert ( + ut.find_value({"a": 0, "b": {"c": 10}}, "d", recursive=True) + is None + ) + + def test_find_key_another_level_recursive_switched_off(self): + assert ( + ut.find_value({"a": 0, "b": {"c": "ccc"}}, "c", recursive=False) + is None + ) + + def test_find_key_another_level_recursive_switched_on(self): + assert ( + ut.find_value({"a": 0, "b": {"c": "ccc"}}, "c", recursive=True) + == "ccc" + ) + + def test_find_list_first(self): + assert ( + ut.find_value( + {"a": 0, "b": [{"c": "ccc"}, {"d": "ddd"}]}, + "c", + recursive=True, + ) + == "ccc" + ) + + def test_find_list_not_first(self): + assert ( + ut.find_value( + {"a": 0, "b": [{"d": "ddd"}, {"c": "ccc"}]}, + "c", + recursive=True, + ) + == "ccc" + ) diff --git a/drivers/tests/queries/test_workflow.py b/drivers/tests/queries/test_workflow.py new file mode 100644 index 0000000..1b8f8c3 --- /dev/null +++ b/drivers/tests/queries/test_workflow.py @@ -0,0 +1,61 @@ +import pytest + +from intake_geokube.queries.workflow import Workflow + + +class TestWorkflow: + def test_fail_on_missing_dataset_id(self): + with pytest.raises( + KeyError, + match=r"'dataset_id' key was missing. did you defined it for*", + ): + Workflow.parse({ + "tasks": [{ + "id": 0, + "op": "subset", + "args": { + "product_id": "reanalysis", + }, + }] + }) + + def test_fail_on_missing_product_id(self): + with pytest.raises( + KeyError, + match=r"'product_id' key was missing. did you defined it for*", + ): + Workflow.parse({ + "tasks": [{ + "id": 0, + "op": "subset", + "args": { + "dataset_id": "era5", + }, + }] + }) + + def test_fail_on_nonunique_id(self): + with pytest.raises( + ValueError, + match=r"duplicated key found*", + ): + Workflow.parse({ + "tasks": [ + { + "id": 0, + "op": "subset", + "args": { + "dataset_id": "era5", + "product_id": "reanalysis", + }, + }, + { + "id": 0, + "op": "subset", + "args": { + "dataset_id": "era5", + "product_id": "reanalysis", + }, + }, + ] + }) diff --git a/drivers/tests/sentinel/__init__.py b/drivers/tests/sentinel/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/drivers/tests/sentinel/fixture.py b/drivers/tests/sentinel/fixture.py new file mode 100644 index 0000000..cfbb8bd --- /dev/null +++ b/drivers/tests/sentinel/fixture.py @@ -0,0 +1,11 @@ +import pytest + + +@pytest.fixture +def sentinel_files(): + return [ + "/tmp/pymp-2b5gr07m/162f8f7e-c954-4f69-bb53-ed820aa6432a/S2A_MSIL2A_20231007T100031_N0509_R122_T32TQM_20231007T142901.SAFE/GRANULE/L2A_T32TQM_A043305_20231007T100026/IMG_DATA/R20m/T32TQM_20231007T100031_B01_20m.jp2", + "/tmp/pymp-2b5gr07m/162f8f7e-c954-4f69-bb53-ed820aa6432a/S2A_MSIL2A_20231007T100031_N0509_R122_T32TQM_20231007T142901.SAFE/GRANULE/L2A_T32TQM_A043305_20231007T100026/IMG_DATA/R20m/T32TQM_20231007T100031_B10_20m.jp2", + "/tmp/pymp-2b5gr07m/162f8f7e-c954-4f69-bb53-ed820aa6432a/S2A_MSIL2A_20231007T100031_N0509_R122_T32TQM_20231007T142901.SAFE/GRANULE/L2A_T32TQM_A043305_20231007T100026/IMG_DATA/R30m/T32TQM_20231007T100031_B04_30m.jp2", + "/tmp/pymp-2b5gr07m/162f8f7e-c954-4f69-bb53-ed820aa6432a/S2A_MSIL2A_20231007T100031_N0509_R122_T32TQM_20231007T142901.SAFE/GRANULE/L2A_T32TQM_A043305_20231007T100026/IMG_DATA/R10m/T32TQM_20231007T100031_B12_40m.jp2", + ] diff --git a/drivers/tests/sentinel/test_builder.py b/drivers/tests/sentinel/test_builder.py new file mode 100644 index 0000000..f2e5cc1 --- /dev/null +++ b/drivers/tests/sentinel/test_builder.py @@ -0,0 +1,376 @@ +from multiprocessing import Value +from unittest import mock + +import pytest +from requests import Response, Session + +from intake_geokube.sentinel.odata_builder import ( + HttpMethod, + ODataRequest, + ODataRequestBuilder, + _ODataEntity, + _ODataOperation, + _ODataOrderMixing, + datetime_to_isoformat, +) + + +@pytest.fixture +def odata() -> _ODataEntity: + return _ODataEntity(url="http://url.com/v1") + + +@pytest.fixture +def odata_op(odata) -> _ODataOperation: + return _ODataOperation(odata=odata) + + +class TestHttpMethod: + @pytest.mark.parametrize( + "method,res", [(HttpMethod.GET, "get"), (HttpMethod.POST, "post")] + ) + def test_get_proper_name(self, method, res): + assert method.method_name == res + + +class TestODataRequestBuildable: + def test_build_from_operation(self, odata): + res = _ODataOperation(odata).build() + assert isinstance(res, ODataRequest) + assert res.odata == odata + + +class TestOrderMixin: + @pytest.mark.parametrize("type_", [_ODataOperation]) + def test_proper_class_when_order(self, type_, odata): + res = type_(odata).order(by="ProductionDate") + assert isinstance(res, type_) + + def test_fail_order_on_wrong_superclass(self, odata): + class A(_ODataOrderMixing): + def __init__(self, odata): + self.odata = odata + + with pytest.raises(TypeError, match=r"unexpected type:*"): + A(odata).order(by="a") + + +class TestODataRequest: + def test_convert_filter_param(self, odata_op): + odata_op.filter("a", eq=10).or_().filter("b", lt=100, ge=10).order( + by="a", desc=True + ) + req = ODataRequest(odata_op.odata) + assert req.odata.params["filter"] == [ + "a eq '10'", + "b lt 100", + "b ge 10", + ] + assert ( + req.request_params["filter"] == "a eq '10' or b lt 100 and b ge 10" + ) + assert req.odata.params["orderby"] == ["a desc"] + + +class TestODataRequestBuilder: + def test_create_odata_operation_from_builder(self): + res = ODataRequestBuilder.new(url="http:/url.com") + assert isinstance(res, _ODataOperation) + assert res.odata.url == "http:/url.com/Products" + + +class TestODataOperation: + @pytest.fixture + def odata_request(self) -> ODataRequest: + return ODataRequestBuilder.new("http://aaaa.com").build() + + @pytest.mark.parametrize( + "datestring,result", + [ + ("2002-02-01", "2002-02-01T00:00:00Z"), + ("2001-02-02 12:45", "2001-02-02T12:45:00Z"), + ("1977-12-23 11:00:05", "1977-12-23T11:00:05Z"), + ("1977-12-23T11:00:05", "1977-12-23T11:00:05Z"), + ], + ) + def test_convert_to_isoformat(self, datestring, result): + assert datetime_to_isoformat(datestring) == result + + def testwith_option_equal(self, odata_op): + odata_op.with_option_equal("Name", "some_name") + assert len(odata_op.odata.params) == 1 + assert odata_op.odata.method is HttpMethod.GET + assert odata_op.odata.params["filter"] == ["Name eq 'some_name'"] + + def test_option_containing(self, odata_op): + odata_op.with_option_containing("some_option", "aaa") + assert len(odata_op.odata.params) == 1 + assert odata_op.odata.method is HttpMethod.GET + assert odata_op.odata.params["filter"] == [ + "contains(some_option,'aaa')" + ] + + def test_option_not_containing(self, odata_op): + odata_op.with_option_not_containing("some_option", "aaa") + assert len(odata_op.odata.params) == 1 + assert odata_op.odata.method is HttpMethod.GET + assert odata_op.odata.params["filter"] == [ + "not contains(some_option,'aaa')" + ] + + def testwith_option_equal_list(self, odata_op): + odata_op.with_option_equal_list("Name", ["some_name", "aaa"]) + assert len(odata_op.odata.params) == 0 + assert odata_op.odata.method is HttpMethod.POST + assert odata_op.odata.body == { + "FilterProducts": [{"Name": "some_name"}, {"Name": "aaa"}] + } + + def test_several_options(self, odata_op): + odata_op.with_option_equal("aa", "bb").and_().with_option_lt( + "aaa", "1000" + ) + assert odata_op.odata.method is HttpMethod.GET + assert len(odata_op.odata.params) == 1 + assert odata_op.odata.params["filter"] == ["aa eq 'bb'", "aaa lt 1000"] + + @pytest.mark.parametrize( + "comb", + [ + {"lt": 1, "eq": 10}, + {"le": 1, "eq": 10}, + {"lt": 1, "le": 10}, + {"gt": 1, "ge": 10}, + {"ge": 1, "eq": 10}, + {"gt": 1, "eq": 10}, + {"lt": 1, "eq": 1, "ge": 1}, + ], + ) + def test_filter_fail_on_wrong_arguments_passed(self, comb, odata_op): + with pytest.raises(ValueError, match=r"cannot define *"): + odata_op.filter(name="a", **comb) + + def test_filter_single(self, odata_op): + res = odata_op.filter(name="a", lt=100) + assert res.odata.params["filter"] == ["a lt 100"] + + def test_filter_multiple(self, odata_op): + res = odata_op.filter(name="a", lt=100, gt=10) + assert res.odata.params["filter"] == ["a lt 100", "a gt 10"] + assert res.odata.conj[-1] == "and" + + def test_filter_multiple2(self, odata_op): + res = odata_op.filter(name="a", ge=10, le=100) + assert res.odata.params["filter"] == ["a le 100", "a ge 10"] + assert res.odata.conj[-1] == "and" + + def test_filter_multiple3(self, odata_op): + res = odata_op.filter(name="a", eq=10) + assert res.odata.params["filter"] == ["a eq '10'"] + assert res.odata.conj[-1] == "and" + + @pytest.mark.parametrize("arr", ["111", "111", "02-20", "56:45", "aaa"]) + def test_filter_date_fail_arg_nondateparsable(self, arr, odata_op): + with pytest.raises(ValueError, match=r"cannot parse*"): + odata_op.filter_date("ProductionDate", lt=arr) + + @pytest.mark.parametrize("arr", [(1,), 1, 1.2, [1, 2], {1, 2}]) + def test_filter_date_fail_arg_wrong_type(self, arr, odata_op): + with pytest.raises(TypeError, match=r"type .* is not supported"): + odata_op.filter_date("ProductionDate", lt=arr) + + def test_filter_and_order_ascending(self, odata_op): + odata_op.with_option_gt("aaa", "-50").order( + by="ProductionDate", desc=False + ) + assert odata_op.odata.method is HttpMethod.GET + assert len(odata_op.odata.params) == 2 + assert odata_op.odata.body == {} + assert odata_op.odata.params["filter"] == ["aaa gt -50"] + assert odata_op.odata.params["orderby"] == ["ProductionDate asc"] + + def test_filter_and_order_descending(self, odata_op): + odata_op.with_option_gt("aaa", "-50").order( + by="ProductionDate", desc=True + ) + assert odata_op.odata.method is HttpMethod.GET + assert len(odata_op.odata.params) == 2 + assert odata_op.odata.body == {} + assert odata_op.odata.params["filter"] == ["aaa gt -50"] + assert odata_op.odata.params["orderby"] == ["ProductionDate desc"] + + @mock.patch.object(Session, "send") + def test_request_data(self, send_mock, odata_op): + send_mock.json.return_value = "{'response': 'some response'}" + _ = ( + odata_op.with_option_gt("aaa", "-50") + .order(by="ProductionDate", desc=True) + .build() + .query() + ) + send_mock.assert_called_once() + assert ( + send_mock.call_args_list[0].args[0].url + == "http://url.com/v1?%24filter=aaa+gt+-50&%24orderby=ProductionDate+desc" + ) + + @mock.patch.object(Session, "send") + def test_url_passed_with_extra_slashes(self, send_mock): + builder = ODataRequestBuilder.new( + "https://some_url.com/odata/v1" + ).build() + assert builder.odata.url == "https://some_url.com/odata/v1/Products" + + def test_polygon_fail_on_other_srid_passed(self, odata_op): + with pytest.raises( + NotImplementedError, match=r"currently supported SRID is only*" + ): + odata_op.intersect_polygon( + polygon=[[0, 1], [1, 2], [0, 1]], srid="123" + ) + + def test_polygon_fail_on_polygon_with_more_than_two_coords(self, odata_op): + with pytest.raises( + ValueError, + match=r"polygon should be defined as a 2-element list or tuple*", + ): + odata_op.intersect_polygon(polygon=[[0, 1], [1, 2, 3], [0, 1]]) + + def test_polygon_fail_on_polygon_ending_not_on_start_point(self, odata_op): + with pytest.raises( + ValueError, + match=r"polygon needs to end at the same point it starts!", + ): + odata_op.intersect_polygon(polygon=[[0, 1], [1, 3], [1, 1]]) + + def test_location_fail_on_other_srid_passed(self, odata_op): + with pytest.raises( + NotImplementedError, match=r"currently supported SRID is only*" + ): + odata_op.intersect_point(point=(0.1, 2.0), srid="123") + + def test_location_fail_on_more_than_two_coords(self, odata_op): + with pytest.raises( + ValueError, match=r"point need to have just two elemens*" + ): + odata_op.intersect_point(point=[0, 1, 4]) + + @mock.patch.object(Session, "send") + @pytest.mark.parametrize( + "code,callback", [(200, lambda r: "ok"), (400, lambda r: "bad")] + ) + def test_callback_call_on_defined( + self, send_mock, code, callback, odata_request + ): + response = Response() + response.status_code = code + send_mock.return_value = response + res = odata_request.with_callback(callback, code).query() + assert res == callback(None) + + @mock.patch.object(Session, "send") + def test_return_response_on_missing_callback( + self, send_mock, odata_request + ): + response = Response() + response.status_code = 200 + send_mock.return_value = response + res = odata_request.query() + assert isinstance(res, Response) + + @mock.patch.object(Session, "send") + @pytest.mark.parametrize("code", [200, 300, 305, 400, 500]) + def test_callback_without_http_code(self, send_mock, code, odata_request): + response = Response() + response.status_code = code + send_mock.return_value = response + callback = mock.MagicMock() + _ = odata_request.with_callback(callback).query() + callback.assert_called_with(response) + + def test_operations_with_auto_conjunction(self, odata_op): + res = odata_op.filter("a", lt=10).filter("b", ge="aaa") + assert res.odata.params["filter"] == ["a lt 10", "b ge aaa"] + assert len(res.odata.conj) == 2 + assert res.odata.conj == ["and", "and"] + + def test_operations_with_auto_conjunction_with_several_operations( + self, odata_op + ): + res = ( + odata_op.filter("a", lt=10) + .filter("b", ge="aaa") + .filter_date("ProductioNDate", lt="2000-01-01") + ) + assert res.odata.params["filter"] == [ + "a lt 10", + "b ge aaa", + "ProductioNDate lt 2000-01-01T00:00:00Z", + ] + assert len(res.odata.conj) == 3 + assert res.odata.conj == ["and", "and", "and"] + + def test_operations_with_auto_and_explicit_conjunction_with_several_operations( + self, odata_op + ): + res = ( + odata_op.filter("a", lt=10) + .filter("b", ge="aaa") + .or_() + .filter_date("ProductioNDate", lt="2000-01-01") + ) + assert res.odata.params["filter"] == [ + "a lt 10", + "b ge aaa", + "ProductioNDate lt 2000-01-01T00:00:00Z", + ] + assert len(res.odata.conj) == 3 + assert res.odata.conj == ["and", "or", "and"] + + def test_con_conj_on_single_operation(self, odata_op): + res = odata_op.filter("a", lt=10) + assert res.odata.params["filter"] == ["a lt 10"] + assert len(res.odata.conj) == 1 + + def test_operations_with_explicit_conjunction_and(self, odata_op): + res = odata_op.filter("a", lt=10).and_().filter("b", ge="aaa") + assert res.odata.params["filter"] == ["a lt 10", "b ge aaa"] + assert len(res.odata.conj) == 2 + assert res.odata.conj == ["and", "and"] + + def test_operations_with_explicit_conjunction_or(self, odata_op): + res = odata_op.filter("a", lt=10).or_().filter("b", ge="aaa") + assert res.odata.params["filter"] == ["a lt 10", "b ge aaa"] + assert len(res.odata.conj) == 2 + assert res.odata.conj == ["or", "and"] + + def test_operation_with_idempotent_same_conjunction(self, odata_op): + res = odata_op.filter("a", lt=10).or_().or_().filter("b", ge="aaa") + assert res.odata.params["filter"] == ["a lt 10", "b ge aaa"] + assert len(res.odata.conj) == 2 + assert res.odata.conj == ["or", "and"] + + def test_operation_with_idempotent_other_conjunction(self, odata_op): + res = ( + odata_op.filter("a", lt=10) + .or_() + .or_() + .and_() + .filter("b", ge="aaa") + ) + assert res.odata.params["filter"] == ["a lt 10", "b ge aaa"] + assert len(res.odata.conj) == 2 + assert res.odata.conj == ["and", "and"] + + def test_filter_skip_if_all_arg_nones(self, odata_op): + odata_op = odata_op.filter("a").filter("b") + assert len(odata_op.odata.params) == 0 + assert len(odata_op.odata.conj) == 0 + + def test_filter_containing(self, odata_op): + odata_op = odata_op.filter("a", containing="ggg", not_containing="bbb") + assert odata_op.odata.params["filter"] == [ + "contains(a,'ggg')", + "not contains(a,'bbb')", + ] + assert odata_op.odata.conj == ["and", "and"] diff --git a/drivers/tests/sentinel/test_driver.py b/drivers/tests/sentinel/test_driver.py new file mode 100644 index 0000000..326bab4 --- /dev/null +++ b/drivers/tests/sentinel/test_driver.py @@ -0,0 +1,177 @@ +import os +from unittest import mock + +import pytest +from intake.source.utils import reverse_format + +import intake_geokube.sentinel.driver as drv +from intake_geokube.queries.geoquery import GeoQuery + +from . import fixture as fxt + + +class TestSentinelDriver: + @pytest.mark.parametrize( + "item,res", + [ + ("aaa", 1), + (["aa", "bb"], 2), + (10, 1), + ([10, 100], 2), + (("a", "b"), 2), + ((-1, -5), 2), + ], + ) + def test_get_items_nbr(self, item, res): + assert drv._get_items_nbr({"key": item}, "key") == res + + @pytest.mark.skip(reason="product_id is not mandatory anymore") + def test_validate_query_fail_on_missing_product_id(self): + query = GeoQuery() + with pytest.raises( + ValueError, match=r"\'product_id\' is mandatory filter" + ): + drv._validate_geoquery_for_sentinel(query) + + @pytest.mark.parametrize( + "time", + [ + {"year": [2000, 2014], "month": 10, "day": 14}, + {"year": 2014, "month": [10, 11], "day": 14}, + {"year": 2000, "month": 10, "day": [14, 15, 16]}, + ], + ) + def test_validate_query_fail_on_multiple_year_month_day(self, time): + query = GeoQuery(product_id="aaa", time=time) + with pytest.raises( + ValueError, + match=( + r"valid time combo for sentinel data should contain exactly" + r" one*" + ), + ): + drv._validate_geoquery_for_sentinel(query) + + @pytest.mark.parametrize( + "time", + [ + {"year": 1999, "month": 10, "day": 14}, + {"year": 2014, "month": 10, "day": 14}, + {"year": 2000, "month": 10, "day": 14}, + ], + ) + def test_validate_query_if_time_passed_as_int(self, time): + query = GeoQuery(product_id="aaa", time=time) + drv._validate_geoquery_for_sentinel(query) + + @pytest.mark.parametrize( + "time", + [ + {"year": "1999", "month": "10", "day": "14"}, + {"year": 2014, "month": "10", "day": 14}, + {"year": "2000", "month": 10, "day": 14}, + ], + ) + def test_validate_query_if_time_passed_as_str(self, time): + query = GeoQuery(product_id="aaa", time=time) + drv._validate_geoquery_for_sentinel(query) + + @pytest.mark.parametrize( + "locs", + [{"latitude": 10}, {"longitude": -10}, {"latitude": 5, "aaa": 10}], + ) + def test_validate_query_Fail_on_missing_key(self, locs): + query = GeoQuery(product_id="aa", location=locs) + with pytest.raises( + ValueError, + match=( + r"both \'latitude\' and \'longitude\' must be defined for" + r" locatio" + ), + ): + drv._validate_geoquery_for_sentinel(query) + + @pytest.mark.parametrize( + "locs", + [ + {"latitude": [10, -5], "longitude": [-1, -2]}, + {"latitude": 10, "longitude": [-1, -2]}, + {"latitude": [10, -5], "longitude": -1}, + ], + ) + def test_location_to_valid_point_fail_on_multielement_list_passed( + self, locs + ): + query = GeoQuery(product_id="aa", location=locs) + with pytest.raises( + ValueError, + match=r"location can have just a single point \(single value for*", + ): + drv._location_to_valid_point(query.location) + + @pytest.mark.parametrize( + "path,res", + [ + ( + "/tmp/pymp-2b5gr07m/162f8f7e-c954-4f69-bb53-ed820aa6432a/S2A_MSIL2A_20231007T100031_N0509_R122_T32TQM_20231007T142901.SAFE/GRANULE/L2A_T32TQM_A043305_20231007T100026/IMG_DATA/R20m/T32TQM_20231007T100031_B01_20m.jp2", + { + "product_id": "162f8f7e-c954-4f69-bb53-ed820aa6432a", + "resolution": "R20m", + "band": "B01", + }, + ), + ( + "/tmp/pymp-2b5gr07m/162f8f7e-c954-4f69-bb53-ed820aa6432a/S2A_MSIL2A_20231007T100031_N0509_R122_T32TQM_20231007T142901.SAFE/GRANULE/L2A_T32TQM_A043305_20231007T100026/IMG_DATA/R30m/T32TQM_20231007T100031_B04_30m.jp2", + { + "product_id": "162f8f7e-c954-4f69-bb53-ed820aa6432a", + "resolution": "R30m", + "band": "B04", + }, + ), + ], + ) + def test_zippatern(self, path, res): + zippattern = "/{product_id}/{}.SAFE/GRANULE/{}/IMG_DATA/{resolution}/{}_{}_{band}_{}.jp2" + target_dir = "/tmp/pymp-2b5gr07m" + assert reverse_format(zippattern, path.removeprefix(target_dir)) == res + + @pytest.mark.parametrize( + "path,exp", + [ + ( + "/tmp/pymp-2b5gr07m/162f8f7e-c954-4f69-bb53-ed820aa6432a/S2A_MSIL2A_20231007T100031_N0509_R122_T32TQM_20231007T142901.SAFE/GRANULE/L2A_T32TQM_A043305_20231007T100026/IMG_DATA/R20m/T32TQM_20231007T100031_B01_20m.jp2", + "R20m_B01", + ), + ( + "/tmp/pymp-2b5gr07m/162f8f7e-c954-4f69-bb53-ed820aa6432a/S2A_MSIL2A_20231007T100031_N0509_R122_T32TQM_20231007T142901.SAFE/GRANULE/L2A_T32TQM_A043305_20231007T100026/IMG_DATA/R30m/T32TQM_20231007T100031_B04_30m.jp2", + "R30m_B04", + ), + ], + ) + def test_get_field_name_from_path(self, path, exp): + assert drv._get_field_name_from_path(path) == exp + + @mock.patch.dict(os.environ, {}, clear=True) + def test_fail_if_no_username_passed(self): + with pytest.raises( + KeyError, + match=( + r"missing at least of of the mandatory environmental" + r" variables:" + ), + ): + drv.SentinelDriver({}, "", "", "") + + def test_raise_notimplemented_for_read(self): + with pytest.raises( + NotImplementedError, + match=r"reading metadata is not supported for sentinel data*", + ): + drv.SentinelDriver({}, "", "", "").read() + + def test_raise_notimplemented_for_load(self): + with pytest.raises( + NotImplementedError, + match=r"loading entire product is not supported for sentinel data", + ): + drv.SentinelDriver({}, "", "", "").load() diff --git a/drivers/tests/test_geoquery.py b/drivers/tests/test_geoquery.py new file mode 100644 index 0000000..4cb9daa --- /dev/null +++ b/drivers/tests/test_geoquery.py @@ -0,0 +1,41 @@ +from unittest import mock + +import pytest + +from intake_geokube.queries.geoquery import GeoQuery + + +class TestGeoQuery: + def test_pass_time_as_combo(self): + query = GeoQuery( + time={ + "year": ["2002"], + "month": ["6"], + "day": ["21"], + "hour": ["8", "10"], + } + ) + assert isinstance(query.time, dict) + + def test_pass_time_as_slice(self): + query = GeoQuery(time={"start": "2000-01-01", "stop": "2001-12-21"}) + assert isinstance(query.time, slice) + assert query.time.start == "2000-01-01" + assert query.time.stop == "2001-12-21" + + def test_dump_original_from_time_as_combo(self): + query = GeoQuery( + time={ + "year": ["2002"], + "month": ["6"], + "day": ["21"], + "hour": ["8", "10"], + } + ) + res = query.model_dump_original() + assert isinstance(res["time"], dict) + + def test_dump_original_from_time_as_slice(self): + query = GeoQuery(time={"start": "2000-01-01", "stop": "2001-12-21"}) + res = query.model_dump_original() + assert isinstance(res["time"], dict) From 3a25e5480ecbe77f6124acc4555960dbb401c634 Mon Sep 17 00:00:00 2001 From: Marco Mancini Date: Fri, 12 Jan 2024 10:13:39 +0100 Subject: [PATCH 07/15] Add workflows --- .github/workflows/build-push-docker-prod.yml | 26 ++++++++++++++++ .github/workflows/build-push-docker.yml | 32 ++++++++++++++++++++ 2 files changed, 58 insertions(+) create mode 100644 .github/workflows/build-push-docker-prod.yml create mode 100644 .github/workflows/build-push-docker.yml diff --git a/.github/workflows/build-push-docker-prod.yml b/.github/workflows/build-push-docker-prod.yml new file mode 100644 index 0000000..3cc1e88 --- /dev/null +++ b/.github/workflows/build-push-docker-prod.yml @@ -0,0 +1,26 @@ +name: Build Docker image of the geodds-api component and push to the production repository + +on: + push: + tags: + - 'v*' +jobs: + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Get release tag + run: echo "RELEASE_TAG=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV + - name: Login to Docker registry + run: echo ${{ secrets.DOCKER_PASSWORD }} | docker login ${{ secrets.DOCKER_PROD_REPO_URL }} -u nologin --password-stdin + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + - name: Build and push + uses: docker/build-push-action@v4 + with: + context: . + file: ./Dockerfile + push: true + tags: | + ${{ secrets.DOCKER_PROD_REPO_URL }}/geodds-api:${{ env.RELEASE_TAG }} + ${{ secrets.DOCKER_PROD_REPO_URL }}/geodds-api:latest diff --git a/.github/workflows/build-push-docker.yml b/.github/workflows/build-push-docker.yml new file mode 100644 index 0000000..6365e65 --- /dev/null +++ b/.github/workflows/build-push-docker.yml @@ -0,0 +1,32 @@ +name: Build Docker image of the geodds-api component and push to the dev repository + +on: + pull_request: + types: [opened, synchronize] + workflow_dispatch: +jobs: + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Set Docker image tag name + run: echo "TAG=$(date +'%Y.%m.%d.%H.%M')" >> $GITHUB_ENV + - name: Login to Scaleway Container Registry + uses: docker/login-action@v2 + with: + username: nologin + password: ${{ secrets.DOCKER_PASSWORD }} + registry: ${{ secrets.DOCKER_DEV_REPO_URL }} + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + - name: Build and push + uses: docker/build-push-action@v4 + with: + context: . + file: ./Dockerfile + push: true + build-args: | + REGISTRY=${{ secrets.DOCKER_DEV_REPO_URL }} + tags: | + ${{ secrets.DOCKER_DEV_REPO_URL }}/geodds-api:${{ env.TAG }} + ${{ secrets.DOCKER_DEV_REPO_URL }}/geodds-api:latest \ No newline at end of file From 291a1e603558052448c57c670ebc87bc32f338d4 Mon Sep 17 00:00:00 2001 From: Marco Mancini Date: Fri, 12 Jan 2024 10:23:53 +0100 Subject: [PATCH 08/15] Remove db folder --- db/Dockerfile | 2 - db/dbmanager/__init__.py | 0 db/dbmanager/dbmanager.py | 183 -------------------------------------- db/scripts/1-init.sql | 66 -------------- db/scripts/2-populate.sql | 2 - 5 files changed, 253 deletions(-) delete mode 100644 db/Dockerfile delete mode 100644 db/dbmanager/__init__.py delete mode 100644 db/dbmanager/dbmanager.py delete mode 100644 db/scripts/1-init.sql delete mode 100644 db/scripts/2-populate.sql diff --git a/db/Dockerfile b/db/Dockerfile deleted file mode 100644 index 8bcf754..0000000 --- a/db/Dockerfile +++ /dev/null @@ -1,2 +0,0 @@ -FROM postgres:14.1 -ADD ./scripts/init.sql /docker-entrypoint-initdb.d/ \ No newline at end of file diff --git a/db/dbmanager/__init__.py b/db/dbmanager/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/db/dbmanager/dbmanager.py b/db/dbmanager/dbmanager.py deleted file mode 100644 index 16b956b..0000000 --- a/db/dbmanager/dbmanager.py +++ /dev/null @@ -1,183 +0,0 @@ -from __future__ import annotations - -from datetime import datetime -from enum import auto, Enum as Enum_, unique - -from sqlalchemy import ( - Column, - create_engine, - DateTime, - Enum, - ForeignKey, - Integer, - JSON, - Sequence, - String -) -from sqlalchemy.orm import declarative_base, relationship, sessionmaker - - -@unique -class RequestStatus(Enum_): - PENDING = auto() - RUNNING = auto() - DONE = auto() - FAILED = auto() - - -class _Repr: - def __repr__(self): - cols = self.__table__.columns.keys() # pylint: disable=no-member - kwa = ', '.join(f'{col}={getattr(self, col)}' for col in cols) - return f'{type(self).__name__}({kwa})' - - -Base = declarative_base(cls=_Repr, name='Base') - - -class Role(Base): - __tablename__ = 'roles' - role_id = Column(Integer, Sequence('role_id_seq'), primary_key=True) - role_name = Column(String(255), nullable=False, unique=True) - - -class User(Base): - __tablename__ = 'users' - user_id = Column(Integer, primary_key=True) - keycloak_id = Column(Integer, nullable=False, unique=True) - api_key = Column(String(255), nullable=False, unique=True) - contact_name = Column(String(255)) - role_id = Column(Integer, ForeignKey('roles.role_id')) - - -class Worker(Base): - __tablename__ = 'workers' - worker_id = Column(Integer, primary_key=True) - status = Column(String(255), nullable=False) - host = Column(String(255)) - dask_scheduler_port = Column(Integer) - dask_dashboard_address = Column(String(10)) - created_on = Column(DateTime, nullable=False) - - -class Request(Base): - __tablename__ = 'requests' - request_id = Column(Integer, primary_key=True) - status = Column(Enum(RequestStatus), nullable=False) - priority = Column(Integer) - user_id = Column(Integer, ForeignKey('users.user_id'), nullable=False) - worker_id = Column(Integer, ForeignKey('workers.worker_id')) - dataset = Column(String(255)) - product = Column(String(255)) - query = Column(JSON()) - estimate_bytes_size = Column(Integer) - download_id = Column(Integer, unique=True) - created_on = Column(DateTime, nullable=False) - last_update = Column(DateTime) - - -class Download(Base): - __tablename__ = 'downloads' - download_id = Column( - Integer, primary_key=True - ) - download_uri = Column(String(255)) - storage_id = Column(Integer) - location_path = Column(String(255)) - bytes_size = Column(Integer) - created_on = Column(DateTime, nullable=False) - - -class Storage(Base): - __tablename__ = 'storages' - storage_id = Column(Integer, primary_key=True) - name = Column(String(255)) - host = Column(String(20)) - protocol = Column(String(10)) - port = Column(Integer) - - -class DBManager: - def __init__( - self, - database: str = 'dds', - host: str = 'db', - port: int = 5432, - user: str = 'dds', - password: str = 'dds' - ) -> None: - url = f'postgresql://{user}:{password}@{host}:{port}/{database}' - self.__engine = engine = create_engine(url, echo=True) - self.__session_maker = sessionmaker(bind=engine) - Base.metadata.create_all(engine) - - def create_request( - self, - user_id: int = 1, - dataset: str | None = None, - product: str | None = None, - query: str | None = None, - worker_id: int | None = None, - priority: str | None = None, - estimate_bytes_size: int | None = None, - download_id: int | None = None, - status: RequestStatus = RequestStatus.PENDING, - ) -> int: - # TODO: Add more request-related parameters to this method. - with self.__session_maker() as session: - request = Request( - status=status, - priority=priority, - user_id=user_id, - worker_id=worker_id, - dataset=dataset, - product=product, - query=query, - estimate_bytes_size=estimate_bytes_size, - download_id=download_id, - created_on=datetime.utcnow() - ) - session.add(request) - session.commit() - return request.request_id - - def update_request( - self, - request_id: int, - worker_id: int, - status: RequestStatus - ) -> int: - with self.__session_maker() as session: - request = session.query(Request).get(request_id) - request.status = status - request.worker_id = worker_id - request.last_update = datetime.utcnow() - session.commit() - return request.request_id - - def get_request_status( - self, - request_id - ) -> RequestStatus: - with self.__session_maker() as session: - request = session.query(Request).get(request_id) - return request.status - - def create_worker( - self, - status: str, - dask_scheduler_port: int, - dask_dashboard_address: int, - host: str = 'localhost' - ) -> int: - with self.__session_maker() as session: - worker = Worker( - status=status, - host=host, - dask_scheduler_port=dask_scheduler_port, - dask_dashboard_address=dask_dashboard_address, - created_on=datetime.utcnow() - ) - session.add(worker) - session.commit() - return worker.worker_id diff --git a/db/scripts/1-init.sql b/db/scripts/1-init.sql deleted file mode 100644 index fafd908..0000000 --- a/db/scripts/1-init.sql +++ /dev/null @@ -1,66 +0,0 @@ --- CREATE USER dds WITH PASSWORD 'dds'; --- CREATE DATABASE dds; --- GRANT ALL PRIVILEGES ON DATABASE dds TO dds; - -CREATE TABLE IF NOT EXISTS roles ( - role_id SERIAL PRIMARY KEY, - role_name VARCHAR (255) UNIQUE NOT NULL -); - -CREATE TABLE IF NOT EXISTS users ( - user_id SERIAL PRIMARY KEY, - keycloak_id INT UNIQUE NOT NULL, - api_key VARCHAR(255) UNIQUE NOT NULL, - contact_name VARCHAR(255), - role_id INT, - CONSTRAINT fk_role - FOREIGN KEY(role_id) - REFERENCES roles(role_id) -); - -CREATE TABLE IF NOT EXISTS workers ( - worker_id SERIAL PRIMARY KEY, - status VARCHAR(255) NOT NULL, - host VARCHAR(255), - dask_scheduler_port INT, - dask_dashboard_address CHAR(10), - created_on TIMESTAMP NOT NULL -); - -CREATE TABLE IF NOT EXISTS requests ( - request_id SERIAL PRIMARY KEY, - status VARCHAR(255) NOT NULL, - priority INT, - user_id INT NOT NULL, - worker_id INT, - dataset VARCHAR(255), - product VARCHAR(255), - query json, - estimate_bytes_size INT, - download_id INT UNIQUE, - created_on TIMESTAMP NOT NULL, - last_update TIMESTAMP, - CONSTRAINT fk_user - FOREIGN KEY(user_id) - REFERENCES users(user_id), - CONSTRAINT fk_worker - FOREIGN KEY(worker_id) - REFERENCES workers(worker_id) -); - -CREATE TABLE IF NOT EXISTS downloads ( - download_id SERIAL PRIMARY KEY, - download_uri VARCHAR(255), - storage_id INT, - location_path VARCHAR(255), - bytes_size INT, - created_on TIMESTAMP NOT NULL -); - -CREATE TABLE IF NOT EXISTS storages ( - storage_id SERIAL PRIMARY KEY, - name VARCHAR(255), - host VARCHAR(20), - protocol VARCHAR(10), - port INT -); \ No newline at end of file diff --git a/db/scripts/2-populate.sql b/db/scripts/2-populate.sql deleted file mode 100644 index 1406ff9..0000000 --- a/db/scripts/2-populate.sql +++ /dev/null @@ -1,2 +0,0 @@ -INSERT INTO roles VALUES (1, 'internal'); -INSERT INTO users VALUES (1, '1234', '1234:1234', 'Mario Rossi', 1); \ No newline at end of file From 1bd8355460c25d950e2bee1e8781d6734b1340d8 Mon Sep 17 00:00:00 2001 From: Jakub Walczak Date: Mon, 15 Jan 2024 09:00:37 +0100 Subject: [PATCH 09/15] Prepare single workflow for docker images of all components --- .github/workflows/build-push-docker-prod.yml | 26 -------- .github/workflows/build-push-docker.yml | 32 ---------- .github/workflows/deploy-staging.yml | 65 ++++++++++++++++++++ api/Dockerfile | 2 +- datastore/Dockerfile | 2 +- drivers/README.md | 2 +- drivers/pyproject.toml | 2 +- executor/Dockerfile | 2 +- 8 files changed, 70 insertions(+), 63 deletions(-) delete mode 100644 .github/workflows/build-push-docker-prod.yml delete mode 100644 .github/workflows/build-push-docker.yml create mode 100644 .github/workflows/deploy-staging.yml diff --git a/.github/workflows/build-push-docker-prod.yml b/.github/workflows/build-push-docker-prod.yml deleted file mode 100644 index 3cc1e88..0000000 --- a/.github/workflows/build-push-docker-prod.yml +++ /dev/null @@ -1,26 +0,0 @@ -name: Build Docker image of the geodds-api component and push to the production repository - -on: - push: - tags: - - 'v*' -jobs: - build: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - name: Get release tag - run: echo "RELEASE_TAG=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV - - name: Login to Docker registry - run: echo ${{ secrets.DOCKER_PASSWORD }} | docker login ${{ secrets.DOCKER_PROD_REPO_URL }} -u nologin --password-stdin - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 - - name: Build and push - uses: docker/build-push-action@v4 - with: - context: . - file: ./Dockerfile - push: true - tags: | - ${{ secrets.DOCKER_PROD_REPO_URL }}/geodds-api:${{ env.RELEASE_TAG }} - ${{ secrets.DOCKER_PROD_REPO_URL }}/geodds-api:latest diff --git a/.github/workflows/build-push-docker.yml b/.github/workflows/build-push-docker.yml deleted file mode 100644 index 6365e65..0000000 --- a/.github/workflows/build-push-docker.yml +++ /dev/null @@ -1,32 +0,0 @@ -name: Build Docker image of the geodds-api component and push to the dev repository - -on: - pull_request: - types: [opened, synchronize] - workflow_dispatch: -jobs: - build: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - name: Set Docker image tag name - run: echo "TAG=$(date +'%Y.%m.%d.%H.%M')" >> $GITHUB_ENV - - name: Login to Scaleway Container Registry - uses: docker/login-action@v2 - with: - username: nologin - password: ${{ secrets.DOCKER_PASSWORD }} - registry: ${{ secrets.DOCKER_DEV_REPO_URL }} - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 - - name: Build and push - uses: docker/build-push-action@v4 - with: - context: . - file: ./Dockerfile - push: true - build-args: | - REGISTRY=${{ secrets.DOCKER_DEV_REPO_URL }} - tags: | - ${{ secrets.DOCKER_DEV_REPO_URL }}/geodds-api:${{ env.TAG }} - ${{ secrets.DOCKER_DEV_REPO_URL }}/geodds-api:latest \ No newline at end of file diff --git a/.github/workflows/deploy-staging.yml b/.github/workflows/deploy-staging.yml new file mode 100644 index 0000000..b45bf4e --- /dev/null +++ b/.github/workflows/deploy-staging.yml @@ -0,0 +1,65 @@ +name: Build Docker images for geolake components and push to the repository + +on: + pull_request: + types: [opened, synchronize] + workflow_dispatch: +jobs: + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Set Docker image tag name + run: echo "TAG=$(date +'%Y.%m.%d.%H.%M')" >> $GITHUB_ENV + - name: Login to Scaleway Container Registry + uses: docker/login-action@v2 + with: + username: nologin + password: ${{ secrets.DOCKER_PASSWORD }} + registry: ${{ vars.DOCKER_REGISTRY }} + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + - name: Build and push drivers + uses: docker/build-push-action@v4 + with: + context: . + file: ./drivers/Dockerfile + push: true + build-args: | + REGISTRY=${{ vars.DOCKER_REGISTRY }} + tags: | + ${{ vars.DOCKER_REGISTRY }}/geolake-drivers:${{ env.TAG }} + ${{ vars.DOCKER_REGISTRY }}/geolake-drivers:latest + - name: Build and push datastore component + uses: docker/build-push-action@v4 + with: + context: . + file: ./datastore/Dockerfile + push: true + build-args: | + REGISTRY=${{ vars.DOCKER_REGISTRY }} + tags: | + ${{ vars.DOCKER_REGISTRY }}/geolake-datastore:${{ env.TAG }} + ${{ vars.DOCKER_REGISTRY }}/geolake-datastore:latest + - name: Build and push api component + uses: docker/build-push-action@v4 + with: + context: . + file: ./api/Dockerfile + push: true + build-args: | + REGISTRY=${{ vars.DOCKER_REGISTRY }} + tags: | + ${{ vars.DOCKER_REGISTRY }}/geolake-api:${{ env.TAG }} + ${{ vars.DOCKER_REGISTRY }}/geolake-api:latest + - name: Build and push executor component + uses: docker/build-push-action@v4 + with: + context: . + file: ./executor/Dockerfile + push: true + build-args: | + REGISTRY=${{ vars.DOCKER_REGISTRY }} + tags: | + ${{ vars.DOCKER_REGISTRY }}/geolake-executor:${{ env.TAG }} + ${{ vars.DOCKER_REGISTRY }}/geolake-executor:latest \ No newline at end of file diff --git a/api/Dockerfile b/api/Dockerfile index 9ee0633..a2cfea0 100644 --- a/api/Dockerfile +++ b/api/Dockerfile @@ -1,6 +1,6 @@ ARG REGISTRY=rg.nl-ams.scw.cloud/geodds-production ARG TAG=latest -FROM $REGISTRY/geodds-datastore:$TAG +FROM $REGISTRY/geolake-datastore:$TAG WORKDIR /app COPY requirements.txt /code/requirements.txt RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt diff --git a/datastore/Dockerfile b/datastore/Dockerfile index 9ca2496..018ad5e 100644 --- a/datastore/Dockerfile +++ b/datastore/Dockerfile @@ -1,6 +1,6 @@ ARG REGISTRY=rg.nl-ams.scw.cloud/geokube-production ARG TAG=latest -FROM $REGISTRY/intake-geokube:$TAG +FROM $REGISTRY/geolake-drivers:$TAG RUN conda install -c conda-forge --yes --freeze-installed psycopg2 \ && conda clean -afy COPY requirements.txt /app/requirements.txt diff --git a/drivers/README.md b/drivers/README.md index f08349c..ed98e22 100644 --- a/drivers/README.md +++ b/drivers/README.md @@ -1,2 +1,2 @@ -# intake-geokube +# geolake-drivers GeoKube plugin for Intake \ No newline at end of file diff --git a/drivers/pyproject.toml b/drivers/pyproject.toml index ae138ac..2f0a6d5 100644 --- a/drivers/pyproject.toml +++ b/drivers/pyproject.toml @@ -3,7 +3,7 @@ requires = ["setuptools", "wheel"] build-backend = "setuptools.build_meta" [project] -name = "intake-geokube" +name = "geolake-drivers" description = "opengeokube DDS driver." requires-python = ">=3.10" readme = "README.md" diff --git a/executor/Dockerfile b/executor/Dockerfile index 6a946fd..db3cebb 100644 --- a/executor/Dockerfile +++ b/executor/Dockerfile @@ -2,7 +2,7 @@ ARG REGISTRY=rg.nl-ams.scw.cloud/geodds-production ARG TAG=latest ARG SENTINEL_USERNAME=... ARG SENTINEL_PASSWORD=... -FROM $REGISTRY/geodds-datastore:$TAG +FROM $REGISTRY/geolake-datastore:$TAG WORKDIR /app ENV SENTINEL_USERNAME=$SENTINEL_USERNAME ENV SENTINEL_PASSWORD=$SENTINEL_PASSWORD From 29d93e71ee7a93885cf4ae481e661687b83ca2d4 Mon Sep 17 00:00:00 2001 From: Jakub Walczak Date: Mon, 15 Jan 2024 09:04:53 +0100 Subject: [PATCH 10/15] Build wheel for driver --- .github/workflows/deploy-staging.yml | 12 ++++++++++++ drivers/Dockerfile | 6 +++--- 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/.github/workflows/deploy-staging.yml b/.github/workflows/deploy-staging.yml index b45bf4e..039f13c 100644 --- a/.github/workflows/deploy-staging.yml +++ b/.github/workflows/deploy-staging.yml @@ -9,6 +9,18 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: "3.x" + - name: Install build + run: >- + python3 -m + pip install + build + --user + - name: Build a binary wheel and a source for drivers + run: python3 -m build ./drivers - name: Set Docker image tag name run: echo "TAG=$(date +'%Y.%m.%d.%H.%M')" >> $GITHUB_ENV - name: Login to Scaleway Container Registry diff --git a/drivers/Dockerfile b/drivers/Dockerfile index d4f9e76..4980d28 100644 --- a/drivers/Dockerfile +++ b/drivers/Dockerfile @@ -3,6 +3,6 @@ ARG TAG=latest FROM $REGISTRY/geokube:$TAG RUN conda install -c conda-forge --yes --freeze-installed intake=0.6.6 RUN conda clean -afy -COPY dist/intake_geokube-1.0b0-py3-none-any.whl / -RUN pip install /intake_geokube-1.0b0-py3-none-any.whl -RUN rm /intake_geokube-1.0b0-py3-none-any.whl +COPY dist/geolake_drivers-1.0b0-py3-none-any.whl / +RUN pip install /geolake_drivers-1.0b0-py3-none-any.whl +RUN rm /geolake_drivers-1.0b0-py3-none-any.whl From 97b1ef331b8cc06588271fa25b61e8d9eeaa6d67 Mon Sep 17 00:00:00 2001 From: Jakub Walczak Date: Mon, 15 Jan 2024 09:15:46 +0100 Subject: [PATCH 11/15] Update path for intake wheel in Docker use --- .github/workflows/deploy-staging.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/deploy-staging.yml b/.github/workflows/deploy-staging.yml index 039f13c..8bf91bf 100644 --- a/.github/workflows/deploy-staging.yml +++ b/.github/workflows/deploy-staging.yml @@ -34,7 +34,7 @@ jobs: - name: Build and push drivers uses: docker/build-push-action@v4 with: - context: . + context: ./drivers file: ./drivers/Dockerfile push: true build-args: | From bdb77195972a3f280d78418174b79cfac247fb2d Mon Sep 17 00:00:00 2001 From: Jakub Walczak Date: Mon, 15 Jan 2024 09:27:00 +0100 Subject: [PATCH 12/15] Add action for production --- .github/workflows/build-production.yml | 79 +++++++++++++++++++ .../{deploy-staging.yml => build-staging.yml} | 0 2 files changed, 79 insertions(+) create mode 100644 .github/workflows/build-production.yml rename .github/workflows/{deploy-staging.yml => build-staging.yml} (100%) diff --git a/.github/workflows/build-production.yml b/.github/workflows/build-production.yml new file mode 100644 index 0000000..2b04b9a --- /dev/null +++ b/.github/workflows/build-production.yml @@ -0,0 +1,79 @@ +name: Build Docker images for geolake components and push to the repository + +on: + push: + tags: + - 'v*' +jobs: + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: "3.x" + - name: Install build + run: >- + python3 -m + pip install + build + --user + - name: Build a binary wheel and a source for drivers + run: python3 -m build ./drivers + - name: Set Docker image tag name + run: echo "TAG=$(date +'%Y.%m.%d.%H.%M')" >> $GITHUB_ENV + - name: Login to Scaleway Container Registry + uses: docker/login-action@v2 + with: + username: nologin + password: ${{ secrets.DOCKER_PASSWORD }} + registry: ${{ vars.DOCKER_REGISTRY }} + - name: Get release tag + run: echo "RELEASE_TAG=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + - name: Build and push drivers + uses: docker/build-push-action@v4 + with: + context: ./drivers + file: ./drivers/Dockerfile + push: true + build-args: | + REGISTRY=${{ vars.DOCKER_REGISTRY }} + tags: | + ${{ vars.DOCKER_REGISTRY }}/geolake-drivers:${{ env.RELEASE_TAG }} + ${{ vars.DOCKER_REGISTRY }}/geolake-drivers:latest + - name: Build and push datastore component + uses: docker/build-push-action@v4 + with: + context: . + file: ./datastore/Dockerfile + push: true + build-args: | + REGISTRY=${{ vars.DOCKER_REGISTRY }} + tags: | + ${{ vars.DOCKER_REGISTRY }}/geolake-datastore:${{ env.RELEASE_TAG }} + ${{ vars.DOCKER_REGISTRY }}/geolake-datastore:latest + - name: Build and push api component + uses: docker/build-push-action@v4 + with: + context: . + file: ./api/Dockerfile + push: true + build-args: | + REGISTRY=${{ vars.DOCKER_REGISTRY }} + tags: | + ${{ vars.DOCKER_REGISTRY }}/geolake-api:${{ env.RELEASE_TAG }} + ${{ vars.DOCKER_REGISTRY }}/geolake-api:latest + - name: Build and push executor component + uses: docker/build-push-action@v4 + with: + context: . + file: ./executor/Dockerfile + push: true + build-args: | + REGISTRY=${{ vars.DOCKER_REGISTRY }} + tags: | + ${{ vars.DOCKER_REGISTRY }}/geolake-executor:${{ env.RELEASE_TAG }} + ${{ vars.DOCKER_REGISTRY }}/geolake-executor:latest \ No newline at end of file diff --git a/.github/workflows/deploy-staging.yml b/.github/workflows/build-staging.yml similarity index 100% rename from .github/workflows/deploy-staging.yml rename to .github/workflows/build-staging.yml From 5f42f3d3dcb2997bd91d0a5de89db35a490f55ae Mon Sep 17 00:00:00 2001 From: Jakub Walczak Date: Mon, 15 Jan 2024 09:33:43 +0100 Subject: [PATCH 13/15] Update docker context --- .github/workflows/build-production.yml | 6 +++--- .github/workflows/build-staging.yml | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/build-production.yml b/.github/workflows/build-production.yml index 2b04b9a..608c92b 100644 --- a/.github/workflows/build-production.yml +++ b/.github/workflows/build-production.yml @@ -47,7 +47,7 @@ jobs: - name: Build and push datastore component uses: docker/build-push-action@v4 with: - context: . + context: ./datastore file: ./datastore/Dockerfile push: true build-args: | @@ -58,7 +58,7 @@ jobs: - name: Build and push api component uses: docker/build-push-action@v4 with: - context: . + context: ./api file: ./api/Dockerfile push: true build-args: | @@ -69,7 +69,7 @@ jobs: - name: Build and push executor component uses: docker/build-push-action@v4 with: - context: . + context: ./executor file: ./executor/Dockerfile push: true build-args: | diff --git a/.github/workflows/build-staging.yml b/.github/workflows/build-staging.yml index 8bf91bf..7c16ff2 100644 --- a/.github/workflows/build-staging.yml +++ b/.github/workflows/build-staging.yml @@ -45,7 +45,7 @@ jobs: - name: Build and push datastore component uses: docker/build-push-action@v4 with: - context: . + context: ./datastore file: ./datastore/Dockerfile push: true build-args: | @@ -56,7 +56,7 @@ jobs: - name: Build and push api component uses: docker/build-push-action@v4 with: - context: . + context: ./api file: ./api/Dockerfile push: true build-args: | @@ -67,7 +67,7 @@ jobs: - name: Build and push executor component uses: docker/build-push-action@v4 with: - context: . + context: ./executor file: ./executor/Dockerfile push: true build-args: | From f1771da5b2c6bd463cbfe3a6f0b012d17e811635 Mon Sep 17 00:00:00 2001 From: Valentina Scardigno Date: Tue, 16 Jan 2024 10:34:05 +0000 Subject: [PATCH 14/15] Fix variable name in staging --- .github/workflows/build-staging.yml | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/.github/workflows/build-staging.yml b/.github/workflows/build-staging.yml index 7c16ff2..288f6e2 100644 --- a/.github/workflows/build-staging.yml +++ b/.github/workflows/build-staging.yml @@ -28,7 +28,7 @@ jobs: with: username: nologin password: ${{ secrets.DOCKER_PASSWORD }} - registry: ${{ vars.DOCKER_REGISTRY }} + registry: ${{ vars.STAGING_DOCKER_REGISTRY }} - name: Set up Docker Buildx uses: docker/setup-buildx-action@v2 - name: Build and push drivers @@ -38,10 +38,10 @@ jobs: file: ./drivers/Dockerfile push: true build-args: | - REGISTRY=${{ vars.DOCKER_REGISTRY }} + REGISTRY=${{ vars.STAGING_DOCKER_REGISTRY }} tags: | - ${{ vars.DOCKER_REGISTRY }}/geolake-drivers:${{ env.TAG }} - ${{ vars.DOCKER_REGISTRY }}/geolake-drivers:latest + ${{ vars.STAGING_DOCKER_REGISTRY }}/geolake-drivers:${{ env.TAG }} + ${{ vars.STAGING_DOCKER_REGISTRY }}/geolake-drivers:latest - name: Build and push datastore component uses: docker/build-push-action@v4 with: @@ -49,10 +49,10 @@ jobs: file: ./datastore/Dockerfile push: true build-args: | - REGISTRY=${{ vars.DOCKER_REGISTRY }} + REGISTRY=${{ vars.STAGING_DOCKER_REGISTRY }} tags: | - ${{ vars.DOCKER_REGISTRY }}/geolake-datastore:${{ env.TAG }} - ${{ vars.DOCKER_REGISTRY }}/geolake-datastore:latest + ${{ vars.STAGING_DOCKER_REGISTRY }}/geolake-datastore:${{ env.TAG }} + ${{ vars.STAGING_DOCKER_REGISTRY }}/geolake-datastore:latest - name: Build and push api component uses: docker/build-push-action@v4 with: @@ -60,10 +60,10 @@ jobs: file: ./api/Dockerfile push: true build-args: | - REGISTRY=${{ vars.DOCKER_REGISTRY }} + REGISTRY=${{ vars.STAGING_DOCKER_REGISTRY }} tags: | - ${{ vars.DOCKER_REGISTRY }}/geolake-api:${{ env.TAG }} - ${{ vars.DOCKER_REGISTRY }}/geolake-api:latest + ${{ vars.STAGING_DOCKER_REGISTRY }}/geolake-api:${{ env.TAG }} + ${{ vars.STAGING_DOCKER_REGISTRY }}/geolake-api:latest - name: Build and push executor component uses: docker/build-push-action@v4 with: @@ -71,7 +71,7 @@ jobs: file: ./executor/Dockerfile push: true build-args: | - REGISTRY=${{ vars.DOCKER_REGISTRY }} + REGISTRY=${{ vars.STAGING_DOCKER_REGISTRY }} tags: | - ${{ vars.DOCKER_REGISTRY }}/geolake-executor:${{ env.TAG }} - ${{ vars.DOCKER_REGISTRY }}/geolake-executor:latest \ No newline at end of file + ${{ vars.STAGING_DOCKER_REGISTRY }}/geolake-executor:${{ env.TAG }} + ${{ vars.STAGING_DOCKER_REGISTRY }}/geolake-executor:latest \ No newline at end of file From a3a3e999d3ea44346d75936b87240eb512c8e544 Mon Sep 17 00:00:00 2001 From: Valentina Scardigno Date: Tue, 23 Jan 2024 14:11:27 +0000 Subject: [PATCH 15/15] Change var name for registry --- .github/workflows/build-staging.yml | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/.github/workflows/build-staging.yml b/.github/workflows/build-staging.yml index 288f6e2..7c16ff2 100644 --- a/.github/workflows/build-staging.yml +++ b/.github/workflows/build-staging.yml @@ -28,7 +28,7 @@ jobs: with: username: nologin password: ${{ secrets.DOCKER_PASSWORD }} - registry: ${{ vars.STAGING_DOCKER_REGISTRY }} + registry: ${{ vars.DOCKER_REGISTRY }} - name: Set up Docker Buildx uses: docker/setup-buildx-action@v2 - name: Build and push drivers @@ -38,10 +38,10 @@ jobs: file: ./drivers/Dockerfile push: true build-args: | - REGISTRY=${{ vars.STAGING_DOCKER_REGISTRY }} + REGISTRY=${{ vars.DOCKER_REGISTRY }} tags: | - ${{ vars.STAGING_DOCKER_REGISTRY }}/geolake-drivers:${{ env.TAG }} - ${{ vars.STAGING_DOCKER_REGISTRY }}/geolake-drivers:latest + ${{ vars.DOCKER_REGISTRY }}/geolake-drivers:${{ env.TAG }} + ${{ vars.DOCKER_REGISTRY }}/geolake-drivers:latest - name: Build and push datastore component uses: docker/build-push-action@v4 with: @@ -49,10 +49,10 @@ jobs: file: ./datastore/Dockerfile push: true build-args: | - REGISTRY=${{ vars.STAGING_DOCKER_REGISTRY }} + REGISTRY=${{ vars.DOCKER_REGISTRY }} tags: | - ${{ vars.STAGING_DOCKER_REGISTRY }}/geolake-datastore:${{ env.TAG }} - ${{ vars.STAGING_DOCKER_REGISTRY }}/geolake-datastore:latest + ${{ vars.DOCKER_REGISTRY }}/geolake-datastore:${{ env.TAG }} + ${{ vars.DOCKER_REGISTRY }}/geolake-datastore:latest - name: Build and push api component uses: docker/build-push-action@v4 with: @@ -60,10 +60,10 @@ jobs: file: ./api/Dockerfile push: true build-args: | - REGISTRY=${{ vars.STAGING_DOCKER_REGISTRY }} + REGISTRY=${{ vars.DOCKER_REGISTRY }} tags: | - ${{ vars.STAGING_DOCKER_REGISTRY }}/geolake-api:${{ env.TAG }} - ${{ vars.STAGING_DOCKER_REGISTRY }}/geolake-api:latest + ${{ vars.DOCKER_REGISTRY }}/geolake-api:${{ env.TAG }} + ${{ vars.DOCKER_REGISTRY }}/geolake-api:latest - name: Build and push executor component uses: docker/build-push-action@v4 with: @@ -71,7 +71,7 @@ jobs: file: ./executor/Dockerfile push: true build-args: | - REGISTRY=${{ vars.STAGING_DOCKER_REGISTRY }} + REGISTRY=${{ vars.DOCKER_REGISTRY }} tags: | - ${{ vars.STAGING_DOCKER_REGISTRY }}/geolake-executor:${{ env.TAG }} - ${{ vars.STAGING_DOCKER_REGISTRY }}/geolake-executor:latest \ No newline at end of file + ${{ vars.DOCKER_REGISTRY }}/geolake-executor:${{ env.TAG }} + ${{ vars.DOCKER_REGISTRY }}/geolake-executor:latest \ No newline at end of file