From 0c318c892c554c00c8081a66675421b1a790b8c9 Mon Sep 17 00:00:00 2001 From: Darren Chaddock Date: Tue, 18 Jun 2024 15:31:41 -0600 Subject: [PATCH] updated docs --- Makefile | 12 +- README.md | 4 +- docs/code/installation.md | 1 + docs/code/pyaurorax_api_reference/index.js | 2657 ++++++----- .../pyaurorax/api/api.html | 291 -- .../pyaurorax/api/classes/request.html | 763 ---- .../pyaurorax/api/classes/urls.html | 639 --- .../pyaurorax/api/index.html | 808 ---- .../pyaurorax/availability/availability.html | 430 -- .../pyaurorax/availability/index.html | 427 -- .../pyaurorax/conjunctions/conjunctions.html | 627 --- .../pyaurorax/conjunctions/index.html | 1522 ------- .../conjunctions/swarmaurora/index.html | 288 -- .../pyaurorax/data/index.html | 1027 +++++ .../pyaurorax/data/ucalgary/index.html | 3990 +++++++++++++++++ .../pyaurorax/data/ucalgary/read/index.html | 2199 +++++++++ .../data_products/data_products.html | 973 ---- .../pyaurorax/data_products/index.html | 1591 ------- .../pyaurorax/ephemeris/ephemeris.html | 827 ---- .../pyaurorax/ephemeris/index.html | 1433 ------ .../pyaurorax/exceptions.html | 335 +- .../pyaurorax/index.html | 1151 ++++- .../pyaurorax/metadata/metadata.html | 311 -- .../pyaurorax/models/atm/index.html | 1038 +++++ .../index.html} | 174 +- .../pyaurorax/requests/index.html | 636 --- .../pyaurorax/requests/requests.html | 919 ---- .../api}/classes/index.html | 48 +- .../pyaurorax/search/api/classes/request.html | 567 +++ .../{ => search}/api/classes/response.html | 147 +- .../pyaurorax/search/api/index.html | 499 +++ .../classes/availability_result.html} | 180 +- .../availability}/classes/index.html | 47 +- .../pyaurorax/search/availability/index.html | 604 +++ .../conjunctions/classes/conjunction.html | 230 +- .../conjunctions}/classes/index.html | 37 +- .../conjunctions/classes/search.html | 649 +-- .../pyaurorax/search/conjunctions/index.html | 809 ++++ .../conjunctions/swarmaurora/index.html} | 320 +- .../data_products/classes/data_product.html | 302 +- .../data_products/classes/index.html | 37 +- .../data_products/classes/search.html | 347 +- .../pyaurorax/search/data_products/index.html | 925 ++++ .../ephemeris/classes/ephemeris.html | 254 +- .../search/ephemeris/classes/index.html | 145 + .../ephemeris/classes/search.html | 369 +- .../pyaurorax/search/ephemeris/index.html | 806 ++++ .../pyaurorax/search/index.html | 3455 ++++++++++++++ .../pyaurorax/search/location.html | 345 ++ .../{ => search}/metadata/index.html | 240 +- .../pyaurorax/search/requests/index.html | 790 ++++ .../search/sources/classes/data_source.html | 467 ++ .../sources/classes/data_source_stats.html | 157 +- .../search/sources/classes/index.html | 138 + .../pyaurorax/search/sources/index.html | 1715 +++++++ .../pyaurorax/search/util/index.html | 324 ++ .../sources/classes/data_source.html | 502 --- .../pyaurorax/sources/index.html | 1581 ------- .../pyaurorax/sources/sources.html | 1551 ------- .../bounding_box/extract_metric/index.html | 904 ++++ .../classes => tools/bounding_box}/index.html | 51 +- .../pyaurorax/tools/calibration/index.html | 360 ++ .../pyaurorax/tools/ccd_contour/index.html | 1022 +++++ .../{api => tools}/classes/index.html | 55 +- .../pyaurorax/tools/classes/keogram.html | 1506 +++++++ .../pyaurorax/tools/classes/montage.html | 693 +++ .../pyaurorax/tools/classes/mosaic.html | 2137 +++++++++ .../pyaurorax/tools/index.html | 3463 ++++++++++++++ .../pyaurorax/tools/keogram/index.html | 487 ++ .../pyaurorax/tools/montage/index.html | 182 + .../pyaurorax/tools/mosaic/index.html | 771 ++++ .../pyaurorax/util/calculate_btrace.html | 367 -- .../pyaurorax/util/index.html | 285 -- mkdocs.yml | 4 +- pyaurorax | 2 +- requirements.txt | 7 +- 76 files changed, 35970 insertions(+), 19986 deletions(-) delete mode 100644 docs/code/pyaurorax_api_reference/pyaurorax/api/api.html delete mode 100644 docs/code/pyaurorax_api_reference/pyaurorax/api/classes/request.html delete mode 100644 docs/code/pyaurorax_api_reference/pyaurorax/api/classes/urls.html delete mode 100644 docs/code/pyaurorax_api_reference/pyaurorax/api/index.html delete mode 100644 docs/code/pyaurorax_api_reference/pyaurorax/availability/availability.html delete mode 100644 docs/code/pyaurorax_api_reference/pyaurorax/availability/index.html delete mode 100644 docs/code/pyaurorax_api_reference/pyaurorax/conjunctions/conjunctions.html delete mode 100644 docs/code/pyaurorax_api_reference/pyaurorax/conjunctions/index.html delete mode 100644 docs/code/pyaurorax_api_reference/pyaurorax/conjunctions/swarmaurora/index.html create mode 100644 docs/code/pyaurorax_api_reference/pyaurorax/data/index.html create mode 100644 docs/code/pyaurorax_api_reference/pyaurorax/data/ucalgary/index.html create mode 100644 docs/code/pyaurorax_api_reference/pyaurorax/data/ucalgary/read/index.html delete mode 100644 docs/code/pyaurorax_api_reference/pyaurorax/data_products/data_products.html delete mode 100644 docs/code/pyaurorax_api_reference/pyaurorax/data_products/index.html delete mode 100644 docs/code/pyaurorax_api_reference/pyaurorax/ephemeris/ephemeris.html delete mode 100644 docs/code/pyaurorax_api_reference/pyaurorax/ephemeris/index.html delete mode 100644 docs/code/pyaurorax_api_reference/pyaurorax/metadata/metadata.html create mode 100644 docs/code/pyaurorax_api_reference/pyaurorax/models/atm/index.html rename docs/code/pyaurorax_api_reference/pyaurorax/{availability/classes/availability_result.html => models/index.html} (63%) delete mode 100644 docs/code/pyaurorax_api_reference/pyaurorax/requests/index.html delete mode 100644 docs/code/pyaurorax_api_reference/pyaurorax/requests/requests.html rename docs/code/pyaurorax_api_reference/pyaurorax/{conjunctions => search/api}/classes/index.html (81%) create mode 100644 docs/code/pyaurorax_api_reference/pyaurorax/search/api/classes/request.html rename docs/code/pyaurorax_api_reference/pyaurorax/{ => search}/api/classes/response.html (68%) create mode 100644 docs/code/pyaurorax_api_reference/pyaurorax/search/api/index.html rename docs/code/pyaurorax_api_reference/pyaurorax/{location.html => search/availability/classes/availability_result.html} (60%) rename docs/code/pyaurorax_api_reference/pyaurorax/{sources => search/availability}/classes/index.html (82%) create mode 100644 docs/code/pyaurorax_api_reference/pyaurorax/search/availability/index.html rename docs/code/pyaurorax_api_reference/pyaurorax/{ => search}/conjunctions/classes/conjunction.html (61%) rename docs/code/pyaurorax_api_reference/pyaurorax/{availability => search/conjunctions}/classes/index.html (83%) rename docs/code/pyaurorax_api_reference/pyaurorax/{ => search}/conjunctions/classes/search.html (68%) create mode 100644 docs/code/pyaurorax_api_reference/pyaurorax/search/conjunctions/index.html rename docs/code/pyaurorax_api_reference/pyaurorax/{conjunctions/swarmaurora/tools.html => search/conjunctions/swarmaurora/index.html} (51%) rename docs/code/pyaurorax_api_reference/pyaurorax/{ => search}/data_products/classes/data_product.html (57%) rename docs/code/pyaurorax_api_reference/pyaurorax/{ => search}/data_products/classes/index.html (81%) rename docs/code/pyaurorax_api_reference/pyaurorax/{ => search}/data_products/classes/search.html (73%) create mode 100644 docs/code/pyaurorax_api_reference/pyaurorax/search/data_products/index.html rename docs/code/pyaurorax_api_reference/pyaurorax/{ => search}/ephemeris/classes/ephemeris.html (64%) create mode 100644 docs/code/pyaurorax_api_reference/pyaurorax/search/ephemeris/classes/index.html rename docs/code/pyaurorax_api_reference/pyaurorax/{ => search}/ephemeris/classes/search.html (71%) create mode 100644 docs/code/pyaurorax_api_reference/pyaurorax/search/ephemeris/index.html create mode 100644 docs/code/pyaurorax_api_reference/pyaurorax/search/index.html create mode 100644 docs/code/pyaurorax_api_reference/pyaurorax/search/location.html rename docs/code/pyaurorax_api_reference/pyaurorax/{ => search}/metadata/index.html (58%) create mode 100644 docs/code/pyaurorax_api_reference/pyaurorax/search/requests/index.html create mode 100644 docs/code/pyaurorax_api_reference/pyaurorax/search/sources/classes/data_source.html rename docs/code/pyaurorax_api_reference/pyaurorax/{ => search}/sources/classes/data_source_stats.html (58%) create mode 100644 docs/code/pyaurorax_api_reference/pyaurorax/search/sources/classes/index.html create mode 100644 docs/code/pyaurorax_api_reference/pyaurorax/search/sources/index.html create mode 100644 docs/code/pyaurorax_api_reference/pyaurorax/search/util/index.html delete mode 100644 docs/code/pyaurorax_api_reference/pyaurorax/sources/classes/data_source.html delete mode 100644 docs/code/pyaurorax_api_reference/pyaurorax/sources/index.html delete mode 100644 docs/code/pyaurorax_api_reference/pyaurorax/sources/sources.html create mode 100644 docs/code/pyaurorax_api_reference/pyaurorax/tools/bounding_box/extract_metric/index.html rename docs/code/pyaurorax_api_reference/pyaurorax/{ephemeris/classes => tools/bounding_box}/index.html (82%) create mode 100644 docs/code/pyaurorax_api_reference/pyaurorax/tools/calibration/index.html create mode 100644 docs/code/pyaurorax_api_reference/pyaurorax/tools/ccd_contour/index.html rename docs/code/pyaurorax_api_reference/pyaurorax/{api => tools}/classes/index.html (79%) create mode 100644 docs/code/pyaurorax_api_reference/pyaurorax/tools/classes/keogram.html create mode 100644 docs/code/pyaurorax_api_reference/pyaurorax/tools/classes/montage.html create mode 100644 docs/code/pyaurorax_api_reference/pyaurorax/tools/classes/mosaic.html create mode 100644 docs/code/pyaurorax_api_reference/pyaurorax/tools/index.html create mode 100644 docs/code/pyaurorax_api_reference/pyaurorax/tools/keogram/index.html create mode 100644 docs/code/pyaurorax_api_reference/pyaurorax/tools/montage/index.html create mode 100644 docs/code/pyaurorax_api_reference/pyaurorax/tools/mosaic/index.html delete mode 100644 docs/code/pyaurorax_api_reference/pyaurorax/util/calculate_btrace.html delete mode 100644 docs/code/pyaurorax_api_reference/pyaurorax/util/index.html diff --git a/Makefile b/Makefile index 5be2d45..8d785a6 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,6 @@ init-submodules: git checkout main && \ git pull && \ pip install poetry && \ - poetry add pdoc3 && \ poetry install cd .. @@ -16,22 +15,23 @@ update-submodules: git submodule foreach git pull docs-install: - python3 -m pip install -r requirements.txt + pip install -r requirements.txt docs-update docs-update-deps: - python3 -m pip install --upgrade -r requirements.txt + pip install --upgrade -r requirements.txt docs-generate: + rm -rf docs/code/pyaurorax_api_reference/* cd pyaurorax && poetry run python3 -m pdoc --html --force --output-dir ../docs/code/pyaurorax_api_reference pyaurorax --config "lunr_search={'fuzziness': 1}" docs-build: - python3 -m mkdocs build + python -m mkdocs build docs-serve: - python3 -m mkdocs serve + python -m mkdocs serve docs-deploy: - python3 -m mkdocs gh-deploy --force + python -m mkdocs gh-deploy --force clean: rm -rf site diff --git a/README.md b/README.md index ec01730..84e271d 100644 --- a/README.md +++ b/README.md @@ -31,7 +31,7 @@ To generate documentation for submodules. they must first be initialized and the $ cd pyaurorax $ git checkout main $ git pull - $ python3 -m pip install poetry + $ pip install poetry $ poetry install $ cd .. ``` @@ -45,7 +45,7 @@ To generate documentation for submodules. they must first be initialized and the 4. Install mkdocs dependencies ``` - $ python3 -m pip install -r requirements.txt + $ pip install -r requirements.txt ``` 5. Since the submodules in this repository are Python projects, their API references can be generated automatically using ```pdoc3```. This step generates HTML files and places them in the specified directory. The command to generate the docs is run through Poetry because the dependencies of the package were installed by Poetry in a virtual environment. This additional step ensures that each submodule maintains its own dependencies and that the documentation is generated for exactly the dependencies used by the submodule. diff --git a/docs/code/installation.md b/docs/code/installation.md index 20f6ad7..847b195 100644 --- a/docs/code/installation.md +++ b/docs/code/installation.md @@ -10,6 +10,7 @@ You can install PyAuroraX using pip, and import it like so: $ pip install pyaurorax $ python >>> import pyaurorax +>>> aurorax = pyaurorax.PyAuroraX() ``` More installation details can be found on the readme of the code repository [on Github](https://github.com/aurorax-space/pyaurorax){:target="_blank"}. diff --git a/docs/code/pyaurorax_api_reference/index.js b/docs/code/pyaurorax_api_reference/index.js index 603b413..62cac7f 100644 --- a/docs/code/pyaurorax_api_reference/index.js +++ b/docs/code/pyaurorax_api_reference/index.js @@ -1,2048 +1,2481 @@ URLS=[ "pyaurorax/index.html", -"pyaurorax/api/index.html", -"pyaurorax/api/api.html", -"pyaurorax/api/classes/index.html", -"pyaurorax/api/classes/request.html", -"pyaurorax/api/classes/response.html", -"pyaurorax/api/classes/urls.html", -"pyaurorax/availability/index.html", -"pyaurorax/availability/availability.html", -"pyaurorax/availability/classes/index.html", -"pyaurorax/availability/classes/availability_result.html", -"pyaurorax/conjunctions/index.html", -"pyaurorax/conjunctions/classes/index.html", -"pyaurorax/conjunctions/classes/conjunction.html", -"pyaurorax/conjunctions/classes/search.html", -"pyaurorax/conjunctions/conjunctions.html", -"pyaurorax/conjunctions/swarmaurora/index.html", -"pyaurorax/conjunctions/swarmaurora/tools.html", -"pyaurorax/data_products/index.html", -"pyaurorax/data_products/classes/index.html", -"pyaurorax/data_products/classes/data_product.html", -"pyaurorax/data_products/classes/search.html", -"pyaurorax/data_products/data_products.html", -"pyaurorax/ephemeris/index.html", -"pyaurorax/ephemeris/classes/index.html", -"pyaurorax/ephemeris/classes/ephemeris.html", -"pyaurorax/ephemeris/classes/search.html", -"pyaurorax/ephemeris/ephemeris.html", "pyaurorax/exceptions.html", -"pyaurorax/location.html", -"pyaurorax/metadata/index.html", -"pyaurorax/metadata/metadata.html", -"pyaurorax/requests/index.html", -"pyaurorax/requests/requests.html", -"pyaurorax/sources/index.html", -"pyaurorax/sources/classes/index.html", -"pyaurorax/sources/classes/data_source.html", -"pyaurorax/sources/classes/data_source_stats.html", -"pyaurorax/sources/sources.html", -"pyaurorax/util/index.html", -"pyaurorax/util/calculate_btrace.html" +"pyaurorax/search/index.html", +"pyaurorax/search/requests/index.html", +"pyaurorax/search/data_products/index.html", +"pyaurorax/search/data_products/classes/index.html", +"pyaurorax/search/data_products/classes/search.html", +"pyaurorax/search/data_products/classes/data_product.html", +"pyaurorax/search/location.html", +"pyaurorax/search/availability/index.html", +"pyaurorax/search/availability/classes/index.html", +"pyaurorax/search/availability/classes/availability_result.html", +"pyaurorax/search/sources/index.html", +"pyaurorax/search/sources/classes/index.html", +"pyaurorax/search/sources/classes/data_source.html", +"pyaurorax/search/sources/classes/data_source_stats.html", +"pyaurorax/search/ephemeris/index.html", +"pyaurorax/search/ephemeris/classes/index.html", +"pyaurorax/search/ephemeris/classes/search.html", +"pyaurorax/search/ephemeris/classes/ephemeris.html", +"pyaurorax/search/metadata/index.html", +"pyaurorax/search/api/index.html", +"pyaurorax/search/api/classes/index.html", +"pyaurorax/search/api/classes/request.html", +"pyaurorax/search/api/classes/response.html", +"pyaurorax/search/util/index.html", +"pyaurorax/search/conjunctions/index.html", +"pyaurorax/search/conjunctions/swarmaurora/index.html", +"pyaurorax/search/conjunctions/classes/index.html", +"pyaurorax/search/conjunctions/classes/search.html", +"pyaurorax/search/conjunctions/classes/conjunction.html", +"pyaurorax/models/index.html", +"pyaurorax/models/atm/index.html", +"pyaurorax/tools/index.html", +"pyaurorax/tools/montage/index.html", +"pyaurorax/tools/bounding_box/index.html", +"pyaurorax/tools/bounding_box/extract_metric/index.html", +"pyaurorax/tools/mosaic/index.html", +"pyaurorax/tools/keogram/index.html", +"pyaurorax/tools/calibration/index.html", +"pyaurorax/tools/ccd_contour/index.html", +"pyaurorax/tools/classes/index.html", +"pyaurorax/tools/classes/mosaic.html", +"pyaurorax/tools/classes/montage.html", +"pyaurorax/tools/classes/keogram.html", +"pyaurorax/data/index.html", +"pyaurorax/data/ucalgary/index.html", +"pyaurorax/data/ucalgary/read/index.html" ]; INDEX=[ { "ref":"pyaurorax", "url":0, -"doc":"The PyAuroraX package provides a way to interact with the [AuroraX API](https: aurorax.space/data/apiLibraries). It is intended to provide an intuitive process for those in the space physics and related communities to programmatically query AuroraX's vast database for conjunctions, ephemeris or data product records, data availability information, and more. Check out this project on [GitHub](https: github.com/aurorax-space/pyaurorax) and explore the evolving ecosystem of visualizations, tools, and data at [AuroraX](https: aurorax.space/). For an overview of usage and examples, visit the [AuroraX Documentation website](https: docs.aurorax.space/code/overview). Details of functionality and options are available in the [API reference](https: docs.aurorax.space/code/pyaurorax_api_reference/pyaurorax/). Installation: $ python -m pip install pyaurorax Basic usage: > import pyaurorax " +"doc":"The PyAuroraX package provides a way to interact with the [AuroraX Data Platform](https: aurorax.space), facilitating programmatic usage of AuroraX's search engine and data analysis tools. For an overview of usage and examples, visit the [AuroraX Developer Zone website](https: docs.aurorax.space/code/overview), or explore the examples contained in the Github repository [here](https: github.com/aurorax-space/pyaurorax/tree/main/examples). Installation: pip install pyaurorax Basic usage: import pyaurorax aurorax = pyaurorax.PyAuroraX() " }, { -"ref":"pyaurorax.api", -"url":1, -"doc":"This module is the under-the-hood interface for RESTful API requests. It provides helper functions that the PyAuroraX library uses to make robust requests. Note that all functions and classes from submodules are all imported at this level of the api module. They can be referenced from here instead of digging in deeper to the submodules." +"ref":"pyaurorax.PyAuroraX", +"url":0, +"doc":"The PyAuroraX class is the primary entry point for utilizing this library. It is used to initialize a session, capturing details about API connectivity, environment, and more. All submodules are encapsulated within this class, so any usage of the library starts with creating this object. import pyaurorax aurorax = pyaurorax.PyAuroraX() When working with this object, you can set configuration parameters, such as the destination directory for downloaded data, or API special settings (e.g., timeout, HTTP headers, API key). These parameters can be set when instantiating the object, or after instantiating using the self-contained accessible variables. Attributes: download_output_root_path (str): Destination directory for downloaded data. The default for this path is a subfolder in the user's home directory, such as /home/user/pyaurorax_data in Linux. In Windows and Mac, it is similar. read_tar_temp_path (str): Temporary directory used for tar extraction phases during file reading (e.g., reading TREx RGB Burst data). The default for this is /.tar_temp_working . For faster performance when reading tar-based data, one option on Linux is to set this to use RAM directly at /dev/shm/pyaurorax_tar_temp_working . api_base_url (str): URL prefix to use when interacting with the AuroraX API. By default this is set to https: api.phys.ucalgary.ca . This parameter is primarily used by the development team to test and build new functions using the private staging API. api_timeout (int): The timeout used when communicating with the Aurorax API. This value is represented in seconds, and by default is 10 seconds . api_headers (Dict): HTTP headers used when communicating with the AuroraX API. The default for this value consists of several standard headers. Any changes to this parameter are in addition to the default standard headers. api_key (str): API key to use when interacting with the AuroraX API. The default value is None. Please note that an API key is only required for write operations to the AuroraX search API, such as creating data sources or uploading ephemeris data. srs_obj (pyucalgarysrs.PyUCalgarySRS): A [PyUCalgarySRS](https: docs-pyucalgarysrs.phys.ucalgary.ca/ pyucalgarysrs.PyUCalgarySRS) object. If not supplied, it will create the object with some settings carried over from the PyAuroraX object. Note that specifying this is for advanced users and only necessary a few special use-cases. Raises: pyaurorax.exceptions.AuroraXInitializationError: an error was encountered during initialization of the paths" }, { -"ref":"pyaurorax.api.DEFAULT_BASE_URL", -"url":1, -"doc":"The default API base URL to use when sending requests" +"ref":"pyaurorax.PyAuroraX.search", +"url":0, +"doc":"Access to the search submodule from within a PyAuroraX object." }, { -"ref":"pyaurorax.api.AuroraXRequest", -"url":1, -"doc":"AuroraX API request class Attributes: url: the URL to make the request against method: the HTTP method to use (get, post, put, delete, etc.) params: any URL parameters to send in the request, defaults to {} body: the body of the request (ie. post data), defaults to {} headers: any headers to send as part of the request (in addition to the default ones), default is {} null_response: signifies if we expect a response from the API that has no body/data in it (ie. requests to upload data that respond with just a 202 status code), defaults to False Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model." +"ref":"pyaurorax.PyAuroraX.data", +"url":0, +"doc":"Access to the data submodule from within a PyAuroraX object." }, { -"ref":"pyaurorax.api.AuroraXRequest.url", -"url":1, -"doc":"" +"ref":"pyaurorax.PyAuroraX.models", +"url":0, +"doc":"Access to the models submodule from within a PyAuroraX object." }, { -"ref":"pyaurorax.api.AuroraXRequest.method", -"url":1, -"doc":"" +"ref":"pyaurorax.PyAuroraX.tools", +"url":0, +"doc":"Access to the tools submodule from within a PyAuroraX object." +}, +{ +"ref":"pyaurorax.PyAuroraX.api_base_url", +"url":0, +"doc":"Property for the API base URL. See above for details." +}, +{ +"ref":"pyaurorax.PyAuroraX.api_headers", +"url":0, +"doc":"Property for the API headers. See above for details." +}, +{ +"ref":"pyaurorax.PyAuroraX.api_timeout", +"url":0, +"doc":"Property for the API timeout. See above for details." +}, +{ +"ref":"pyaurorax.PyAuroraX.api_key", +"url":0, +"doc":"Property for the API key. See above for details." +}, +{ +"ref":"pyaurorax.PyAuroraX.download_output_root_path", +"url":0, +"doc":"Property for the download output root path. See above for details." +}, +{ +"ref":"pyaurorax.PyAuroraX.read_tar_temp_path", +"url":0, +"doc":"Property for the read tar temp path. See above for details." +}, +{ +"ref":"pyaurorax.PyAuroraX.srs_obj", +"url":0, +"doc":"Property for the PyUCalgarySRS object. See above for details." +}, +{ +"ref":"pyaurorax.PyAuroraX.purge_download_output_root_path", +"url":0, +"doc":"Delete all files in the download_output_root_path directory. Since the library downloads data to this directory, over time it can grow too large and the user can risk running out of space. This method is here to assist with easily clearing out this directory. Note that it also deletes all files in the PyUCalgarySRS object's download_output_root_path path as well. Normally, these two paths are the same, but it can be different if the user specifically changes it. Raises: pyaurorax.exceptions.AuroraXPurgeError: an error was encountered during the purge operation", +"func":1 +}, +{ +"ref":"pyaurorax.PyAuroraX.purge_read_tar_temp_path", +"url":0, +"doc":"Delete all files in the read_tar_temp_path directory. Since the library extracts temporary data to this directory, sometime issues during reading can cause this directory to contain residual files that aren't deleted during the normal read routine. Though this is very rare, it is still possible. Therefore, this method is here to assist with easily clearing out this directory. Note that it also deletes all files in the PyUCalgarySRS object's read_tar_temp_path path as well. Normally, these two paths are the same, but it can be different if the user specifically changes it. Raises: pyaurorax.exceptions.AuroraXPurgeError: an error was encountered during the purge operation", +"func":1 +}, +{ +"ref":"pyaurorax.PyAuroraX.show_data_usage", +"url":0, +"doc":"Print the volume of data existing in the download_output_root_path, broken down by dataset. Alternatively return the information in a dictionary. This can be a helpful tool for managing your disk space. Args: order (bool): Order results by either size or name . Default is size . return_dict (bool): Instead of printing the data usage information, return the information as a dictionary. Returns: Printed output. If return_dict is True, then it will instead return a dictionary with the disk usage information. Notes: Note that size on disk may differ slightly from the values determined by this routine. For example, the results here will be slightly different than the output of a 'du' command on nix systems.", +"func":1 }, { -"ref":"pyaurorax.api.AuroraXRequest.params", +"ref":"pyaurorax.exceptions", "url":1, -"doc":"" +"doc":"Unique exception classes utilized by PyAuroraX. These exceptions can be used to help trap specific errors raised by this library. Note that all exceptions are imported at the root level of the library. They can be referenced using [ pyaurorax.AuroraXError ](exceptions.html pyaurorax.exceptions.AuroraXError) or pyaurorax.exceptions.AuroraXError ." }, { -"ref":"pyaurorax.api.AuroraXRequest.body", +"ref":"pyaurorax.exceptions.AuroraXError", "url":1, -"doc":"" +"doc":"Common base class for all non-exit exceptions." }, { -"ref":"pyaurorax.api.AuroraXRequest.headers", +"ref":"pyaurorax.exceptions.AuroraXInitializationError", "url":1, -"doc":"" +"doc":"Error occurred during library initialization" }, { -"ref":"pyaurorax.api.AuroraXRequest.null_response", +"ref":"pyaurorax.exceptions.AuroraXPurgeError", "url":1, -"doc":"" +"doc":"Error occurred during purging of download or tar extraction working directory" }, { -"ref":"pyaurorax.api.AuroraXRequest.execute", +"ref":"pyaurorax.exceptions.AuroraXAPIError", "url":1, -"doc":"Execute an AuroraX request Args: limited_evaluation: don't evaluate the response after the retry mechanism, defaults to False skip_retry_logic: exclude the retry logic in the request, defaults to False Returns: an AuroraXResponse object Raises: pyaurorax.exceptions.AuroraXMaxRetriesException: max retry error pyaurorax.exceptions.AuroraXNotFoundException: requested resource was not found pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected content error pyaurorax.exceptions.AuroraXUnexpectedEmptyResponse: unexpected empty response pyaurorax.exceptions.AuroraXUnauthorizedException: invalid API key for this operation", -"func":1 +"doc":"Error occurred during an API call" }, { -"ref":"pyaurorax.api.AuroraXResponse", +"ref":"pyaurorax.exceptions.AuroraXNotFoundError", "url":1, -"doc":"AuroraX API response class Attributes: request: the request object data: the data received as part of the request status_code: the HTTP status code received when making the request Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model." +"doc":"The AuroraX record was not found" }, { -"ref":"pyaurorax.api.AuroraXResponse.request", +"ref":"pyaurorax.exceptions.AuroraXDuplicateError", "url":1, -"doc":"" +"doc":"A duplicate record already exists" }, { -"ref":"pyaurorax.api.AuroraXResponse.data", +"ref":"pyaurorax.exceptions.AuroraXUnauthorizedError", "url":1, -"doc":"" +"doc":"A privileged operation was attempted without authorization" }, { -"ref":"pyaurorax.api.AuroraXResponse.status_code", +"ref":"pyaurorax.exceptions.AuroraXConflictError", "url":1, -"doc":"" +"doc":"A conflict occurred while modifying records" }, { -"ref":"pyaurorax.api.get_api_key", +"ref":"pyaurorax.exceptions.AuroraXDataRetrievalError", "url":1, -"doc":"Returns the currently set API key for the module Returns: current API key", -"func":1 +"doc":"Error occurred while retrieving search data" }, { -"ref":"pyaurorax.api.authenticate", +"ref":"pyaurorax.exceptions.AuroraXSearchError", "url":1, -"doc":"Set authentication values for use with subsequent queries Args: api_key: an AuroraX API key string", -"func":1 +"doc":"An error occurred in the API while performing a search" }, { -"ref":"pyaurorax.api.set_base_url", +"ref":"pyaurorax.exceptions.AuroraXUploadError", "url":1, -"doc":"Change the base URL for the API (ie. change to the staging system or local server) Args: url: the new base url string (ie. 'https: api.staging.aurorax.space')", -"func":1 +"doc":"Error occurred during upload operation" }, { -"ref":"pyaurorax.api.get_base_url", +"ref":"pyaurorax.exceptions.AuroraXMaintenanceError", "url":1, -"doc":"Returns the current base URL for the API Returns: current base URL", -"func":1 +"doc":"AuroraX API is in maintenance mode, read-only tasks are only possible" }, { -"ref":"pyaurorax.api.reset_base_url", +"ref":"pyaurorax.exceptions.AuroraXUnsupportedReadError", "url":1, -"doc":"Set the base URL for the API back to the default", -"func":1 +"doc":"Unsupported dataset for read function NOTE: this is primarily a PyUCalgarySRS error" }, { -"ref":"pyaurorax.api.api", -"url":2, -"doc":"Helper functions when interacting with the API" +"ref":"pyaurorax.exceptions.AuroraXDownloadError", +"url":1, +"doc":"Error occurred during downloading of data NOTE: this is primarily a PyUCalgarySRS error" }, { -"ref":"pyaurorax.api.api.get_api_key", +"ref":"pyaurorax.search", "url":2, -"doc":"Returns the currently set API key for the module Returns: current API key", -"func":1 +"doc":"Interact with the AuroraX search engine. This includes finding data sources, searching for conjunctions or ephemeris data, and uploading/managing your own data in the AuroraX platform." }, { -"ref":"pyaurorax.api.api.authenticate", +"ref":"pyaurorax.search.SearchManager", "url":2, -"doc":"Set authentication values for use with subsequent queries Args: api_key: an AuroraX API key string", -"func":1 +"doc":"The SearchManager object is initialized within every PyAuroraX object. It acts as a way to access the submodules and carry over configuration information in the super class." }, { -"ref":"pyaurorax.api.api.set_base_url", +"ref":"pyaurorax.search.SearchManager.util", "url":2, -"doc":"Change the base URL for the API (ie. change to the staging system or local server) Args: url: the new base url string (ie. 'https: api.staging.aurorax.space')", -"func":1 +"doc":"Access to the util submodule from within a PyAuroraX object." }, { -"ref":"pyaurorax.api.api.get_base_url", +"ref":"pyaurorax.search.SearchManager.api", "url":2, -"doc":"Returns the current base URL for the API Returns: current base URL", -"func":1 +"doc":"Access to the api submodule from within a PyAuroraX object." }, { -"ref":"pyaurorax.api.api.reset_base_url", +"ref":"pyaurorax.search.SearchManager.sources", "url":2, -"doc":"Set the base URL for the API back to the default", -"func":1 +"doc":"Access to the sources submodule from within a PyAuroraX object." }, { -"ref":"pyaurorax.api.classes", -"url":3, -"doc":"Separted classes and functions used by the api module. Note that these classes and variables are all imported higher up at the top of the api module. They can be referenced from there instead of digging in deeper to these submodules." +"ref":"pyaurorax.search.SearchManager.availability", +"url":2, +"doc":"Access to the availability submodule from within a PyAuroraX object." }, { -"ref":"pyaurorax.api.classes.request", -"url":4, -"doc":"Class definition used for managing an API request" +"ref":"pyaurorax.search.SearchManager.metadata", +"url":2, +"doc":"Access to the metadata submodule from within a PyAuroraX object." }, { -"ref":"pyaurorax.api.classes.request.DEFAULT_RETRIES", -"url":4, -"doc":"Number of retry attempts when requesting data from the API" +"ref":"pyaurorax.search.SearchManager.requests", +"url":2, +"doc":"Access to the requests submodule from within a PyAuroraX object." }, { -"ref":"pyaurorax.api.classes.request.REQUEST_HEADERS", -"url":4, -"doc":"The default headers sent as part of a request to the AuroraX API" +"ref":"pyaurorax.search.SearchManager.ephemeris", +"url":2, +"doc":"Access to the ephemeris submodule from within a PyAuroraX object." }, { -"ref":"pyaurorax.api.classes.request.REQUEST_TIMEOUT", -"url":4, -"doc":"Default request timeout, in seconds" +"ref":"pyaurorax.search.SearchManager.data_products", +"url":2, +"doc":"Access to the data_products submodule from within a PyAuroraX object." }, { -"ref":"pyaurorax.api.classes.request.API_KEY_HEADER_NAME", -"url":4, -"doc":"The API key header used when sending requests to the AuroraX API" +"ref":"pyaurorax.search.SearchManager.conjunctions", +"url":2, +"doc":"Access to the conjunctions submodule from within a PyAuroraX object." }, { -"ref":"pyaurorax.api.classes.request.AuroraXRequest", -"url":4, -"doc":"AuroraX API request class Attributes: url: the URL to make the request against method: the HTTP method to use (get, post, put, delete, etc.) params: any URL parameters to send in the request, defaults to {} body: the body of the request (ie. post data), defaults to {} headers: any headers to send as part of the request (in addition to the default ones), default is {} null_response: signifies if we expect a response from the API that has no body/data in it (ie. requests to upload data that respond with just a 202 status code), defaults to False Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model." +"ref":"pyaurorax.search.DataSource", +"url":2, +"doc":"AuroraX data source record Attributes: identifier (int): the unique AuroraX data source identifier program (str): the program for this data source platform (str): the platform for this data source instrument_type (str): the instrument type for this data source source_type (str): the data source type for this data source. Options are in the pyaurorax.search.sources module, or at the top level using the pyaurorax.search.SOURCE_TYPE_ variables. display_name (str): the display name for this data source metadata (Dict): metadata for this data source (arbitrary keys and values) owner (str): the owner's email address of this data source maintainers (List[str]): the email addresses of AuroraX accounts that can alter this data source and its associated records ephemeris_metadata_schema (Dict): a list of dictionaries capturing the metadata keys and values that can appear in ephemeris records associated with this data source data_product_metadata_schema (Dict): a list of dictionaries capturing the metadata keys and values that can appear in data product records associated with this data source format (str): the format used when printing the data source, defaults to \"full_record\". Other options are in the pyaurorax.search.sources module, or at the top level using the pyaurorax.search.FORMAT_ variables." }, { -"ref":"pyaurorax.api.classes.request.AuroraXRequest.url", -"url":4, +"ref":"pyaurorax.search.DataSource.identifier", +"url":2, "doc":"" }, { -"ref":"pyaurorax.api.classes.request.AuroraXRequest.method", -"url":4, +"ref":"pyaurorax.search.DataSource.program", +"url":2, "doc":"" }, { -"ref":"pyaurorax.api.classes.request.AuroraXRequest.params", -"url":4, +"ref":"pyaurorax.search.DataSource.platform", +"url":2, "doc":"" }, { -"ref":"pyaurorax.api.classes.request.AuroraXRequest.body", -"url":4, +"ref":"pyaurorax.search.DataSource.instrument_type", +"url":2, "doc":"" }, { -"ref":"pyaurorax.api.classes.request.AuroraXRequest.headers", -"url":4, +"ref":"pyaurorax.search.DataSource.source_type", +"url":2, "doc":"" }, { -"ref":"pyaurorax.api.classes.request.AuroraXRequest.null_response", -"url":4, +"ref":"pyaurorax.search.DataSource.display_name", +"url":2, "doc":"" }, { -"ref":"pyaurorax.api.classes.request.AuroraXRequest.execute", -"url":4, -"doc":"Execute an AuroraX request Args: limited_evaluation: don't evaluate the response after the retry mechanism, defaults to False skip_retry_logic: exclude the retry logic in the request, defaults to False Returns: an AuroraXResponse object Raises: pyaurorax.exceptions.AuroraXMaxRetriesException: max retry error pyaurorax.exceptions.AuroraXNotFoundException: requested resource was not found pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected content error pyaurorax.exceptions.AuroraXUnexpectedEmptyResponse: unexpected empty response pyaurorax.exceptions.AuroraXUnauthorizedException: invalid API key for this operation", -"func":1 -}, -{ -"ref":"pyaurorax.api.classes.response", -"url":5, -"doc":"Class definition used for managing the response from an API request" -}, -{ -"ref":"pyaurorax.api.classes.response.AuroraXResponse", -"url":5, -"doc":"AuroraX API response class Attributes: request: the request object data: the data received as part of the request status_code: the HTTP status code received when making the request Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model." -}, -{ -"ref":"pyaurorax.api.classes.response.AuroraXResponse.request", -"url":5, +"ref":"pyaurorax.search.DataSource.metadata", +"url":2, "doc":"" }, { -"ref":"pyaurorax.api.classes.response.AuroraXResponse.data", -"url":5, +"ref":"pyaurorax.search.DataSource.owner", +"url":2, "doc":"" }, { -"ref":"pyaurorax.api.classes.response.AuroraXResponse.status_code", -"url":5, +"ref":"pyaurorax.search.DataSource.maintainers", +"url":2, "doc":"" }, { -"ref":"pyaurorax.api.classes.urls", -"url":6, -"doc":"This class provides the URL endpoints for different AuroraX API requests. It is contained in a special class so that we can use different base URLs if desired." -}, -{ -"ref":"pyaurorax.api.classes.urls.URLs", -"url":6, +"ref":"pyaurorax.search.DataSource.ephemeris_metadata_schema", +"url":2, "doc":"" }, { -"ref":"pyaurorax.api.classes.urls.URLs.base_url", -"url":6, +"ref":"pyaurorax.search.DataSource.data_product_metadata_schema", +"url":2, "doc":"" }, { -"ref":"pyaurorax.api.classes.urls.URLs.data_sources_url", -"url":6, +"ref":"pyaurorax.search.DataSource.stats", +"url":2, "doc":"" }, { -"ref":"pyaurorax.api.classes.urls.URLs.data_sources_search_url", -"url":6, +"ref":"pyaurorax.search.DataSource.format", +"url":2, "doc":"" }, { -"ref":"pyaurorax.api.classes.urls.URLs.ephemeris_availability_url", -"url":6, -"doc":"" +"ref":"pyaurorax.search.Location", +"url":2, +"doc":"Representation for an AuroraX location, such as geographic coordinates, GSM coordinates, or northern/southern B-trace magnetic footprints. Latitude and longitude values are in decimal degrees format, ranging from -90 to 90 for latitude and -180 to 180 for longitude. Note that latitude and longitude must both be numbers, or both be None. Attributes: lat (float): latitude value lon (float): longitude value Raises: ValueError: if both latitude and longitude are not real numbers, or not both None." }, { -"ref":"pyaurorax.api.classes.urls.URLs.data_products_availability_url", -"url":6, +"ref":"pyaurorax.search.Location.lat", +"url":2, "doc":"" }, { -"ref":"pyaurorax.api.classes.urls.URLs.ephemeris_search_url", -"url":6, +"ref":"pyaurorax.search.Location.lon", +"url":2, "doc":"" }, { -"ref":"pyaurorax.api.classes.urls.URLs.ephemeris_upload_url", -"url":6, -"doc":"" +"ref":"pyaurorax.search.Location.to_json_serializable", +"url":2, +"doc":"Convert object to a JSON-serializable object (ie. translate datetime objects to strings) Returns: a dictionary object that is JSON-serializable", +"func":1 }, { -"ref":"pyaurorax.api.classes.urls.URLs.ephemeris_request_url", -"url":6, -"doc":"" +"ref":"pyaurorax.search.AvailabilityResult", +"url":2, +"doc":"Class definition for data availability information Attributes: data_source (pyaurorax.search.DataSource): the data source that the records are associated with available_ephemeris (Dict): the ephemeris availability information available_data_products (Dict): the data product availability information" }, { -"ref":"pyaurorax.api.classes.urls.URLs.data_products_search_url", -"url":6, +"ref":"pyaurorax.search.AvailabilityResult.data_source", +"url":2, "doc":"" }, { -"ref":"pyaurorax.api.classes.urls.URLs.data_products_upload_url", -"url":6, +"ref":"pyaurorax.search.AvailabilityResult.available_data_products", +"url":2, "doc":"" }, { -"ref":"pyaurorax.api.classes.urls.URLs.data_products_request_url", -"url":6, +"ref":"pyaurorax.search.AvailabilityResult.available_ephemeris", +"url":2, "doc":"" }, { -"ref":"pyaurorax.api.classes.urls.URLs.conjunction_search_url", -"url":6, -"doc":"" +"ref":"pyaurorax.search.EphemerisData", +"url":2, +"doc":"Ephemeris object Attributes: data_source: data source that the ephemeris record is associated with epoch: timestamp for the record (assumed it is in UTC) location_geo: Location object containing geographic latitude and longitude location_gsm: Location object containing GSM latitude and longitude (leave empty for data sources with a type of 'ground') nbtrace: Location object with north B-trace geographic latitude and longitude sbtrace: Location object with south B-trace geographic latitude and longitude metadata: metadata for this record (arbitrary keys and values)" }, { -"ref":"pyaurorax.api.classes.urls.URLs.conjunction_request_url", -"url":6, -"doc":"" +"ref":"pyaurorax.search.EphemerisData.to_json_serializable", +"url":2, +"doc":"Convert object to a JSON-serializable object (ie. translate datetime objects to strings) Returns: a dictionary object that is JSON-serializable", +"func":1 }, { -"ref":"pyaurorax.api.classes.urls.URLs.describe_conjunction_query_url", -"url":6, -"doc":"" +"ref":"pyaurorax.search.EphemerisSearch", +"url":2, +"doc":"Class representing an ephemeris search Note: At least one search criteria from programs, platforms, or instrument_types must be specified. Args: start: start timestamp of the search (inclusive) end: end timestamp of the search (inclusive) programs: list of programs to search through, defaults to None platforms: list of platforms to search through, defaults to None instrument_types: list of instrument types to search through, defaults to None metadata_filters: list of dictionaries describing metadata keys and values to filter on, defaults to None e.g. { \"key\": \"string\", \"operator\": \"=\", \"values\": [ \"string\" ] } metadata_filters_logical_operator: the logical operator to use when evaluating metadata filters (either 'AND' or 'OR'), defaults to \"AND\" response_format: JSON representation of desired data response format request: AuroraXResponse object returned when the search is executed request_id: unique ID assigned to the request by the AuroraX API request_url: unique URL assigned to the request by the AuroraX API executed: indicates if the search has been executed/started completed: indicates if the search has finished data_url: the URL where data is accessed query: the query for this request as JSON status: the status of the query data: the ephemeris records found logs: all log messages outputted by the AuroraX API for this request" }, { -"ref":"pyaurorax.api.classes.urls.URLs.describe_data_products_query_url", -"url":6, -"doc":"" +"ref":"pyaurorax.search.EphemerisSearch.query", +"url":2, +"doc":"Property for the query value" }, { -"ref":"pyaurorax.api.classes.urls.URLs.describe_ephemeris_query_url", -"url":6, -"doc":"" +"ref":"pyaurorax.search.EphemerisSearch.execute", +"url":2, +"doc":"Initiate ephemeris search request Raises: pyaurorax.exceptions.AuroraXError: invalid request parameters are set", +"func":1 }, { -"ref":"pyaurorax.api.classes.urls.URLs.list_requests_url", -"url":6, -"doc":"" +"ref":"pyaurorax.search.EphemerisSearch.update_status", +"url":2, +"doc":"Update the status of this ephemeris search request Args: status: the previously-retrieved status of this request (include to avoid requesting it from the API again), defaults to None", +"func":1 }, { -"ref":"pyaurorax.api.classes.urls.URLs.delete_requests_url", -"url":6, -"doc":"" +"ref":"pyaurorax.search.EphemerisSearch.check_for_data", +"url":2, +"doc":"Check to see if data is available for this ephemeris search request Returns: True if data is available, else False", +"func":1 }, { -"ref":"pyaurorax.availability", -"url":7, -"doc":"The availability module provides functions to quickly determine what data exists on the AuroraX platform. Note that all functions and classes from submodules are all imported at this level of the availability module. They can be referenced from here instead of digging in deeper to the submodules." +"ref":"pyaurorax.search.EphemerisSearch.get_data", +"url":2, +"doc":"Retrieve the data available for this ephemeris search request", +"func":1 }, { -"ref":"pyaurorax.availability.ephemeris", -"url":7, -"doc":"Retrieve information about the number of existing ephemeris records Args: start: start date to retrieve availability info from (inclusive) end: end date to retrieve availability info until (inclusive) program: program name to filter sources by, defaults to None platform: platform name to filter sources by, defaults to None instrument_type: instrument type to filter sources by, defaults to None source_type: source type to filter sources by, defaults to None. Other options are in the pyaurorax.sources module, or at the top level using the pyaurorax.SOURCE_TYPE_ variables. owner: owner email address to filter sources by, defaults to None format: the format of the data sources returned, defaults to \"basic_info\". Other options are in the pyaurorax.sources module, or at the top level using the pyaurorax.FORMAT_ variables. slow: query the data using a slower, but more accurate method, defaults to False Returns: ephemeris availability information matching the requested parameters", +"ref":"pyaurorax.search.EphemerisSearch.wait", +"url":2, +"doc":"Block and wait for the request to complete and data is available for retrieval Args: poll_interval: time in seconds to wait between polling attempts, defaults to pyaurorax.requests.STANDARD_POLLING_SLEEP_TIME verbose: output poll times and other progress messages, defaults to False", "func":1 }, { -"ref":"pyaurorax.availability.data_products", -"url":7, -"doc":"Retrieve information about the number of existing data product records Args: start: start date to retrieve availability info from (inclusive) end: end date to retrieve availability info until (inclusive) program: program name to filter sources by, defaults to None platform: platform name to filter sources by, defaults to None instrument_type: instrument type to filter sources by, defaults to None source_type: source type to filter sources by, defaults to None. Other options are in the pyaurorax.sources module, or at the top level using the pyaurorax.SOURCE_TYPE_ variables. owner: owner email address to filter sources by, defaults to None format: the format of the data sources returned, defaults to \"basic_info\". Other options are in the pyaurorax.sources module, or at the top level using the pyaurorax.FORMAT_ variables. slow: query the data using a slower, but more accurate method, defaults to False Returns: data product availability information matching the requested parameters", +"ref":"pyaurorax.search.EphemerisSearch.cancel", +"url":2, +"doc":"Cancel the ephemeris search request This method returns immediately by default since the API processes this request asynchronously. If you would prefer to wait for it to be completed, set the 'wait' parameter to True. You can adjust the polling time using the 'poll_interval' parameter. Args: wait: wait until the cancellation request has been completed (may wait for several minutes) poll_interval: seconds to wait between polling calls, defaults to STANDARD_POLLING_SLEEP_TIME. verbose: output poll times and other progress messages, defaults to False Returns: 1 on success Raises: pyaurorax.exceptions.AuroraXUnauthorizedError: invalid API key for this operation pyaurorax.exceptions.AuroraXAPIError: An API error was encountered", "func":1 }, { -"ref":"pyaurorax.availability.AvailabilityResult", -"url":7, -"doc":"Availability information object Attributes: data_source: the data source that the records are associated with available_data_products: the data product availability information available_ephemeris: the ephemeris availability information Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model." +"ref":"pyaurorax.search.DataProductData", +"url":2, +"doc":"Data product object Attributes: data_source: data source that the ephemeris record is associated with data_product_type: data product type (\"keogram\", \"movie\", \"summary_plot\") start: starting timestamp for the record (assumed it is in UTC), inclusive end: ending timestamp for the record (assumed it is in UTC), inclusive url: the URL of data product metadata: metadata for this record (arbitrary keys and values)" }, { -"ref":"pyaurorax.availability.AvailabilityResult.data_source", -"url":7, -"doc":"" +"ref":"pyaurorax.search.DataProductData.to_json_serializable", +"url":2, +"doc":"Convert object to a JSON-serializable object (ie. translate datetime objects to strings) Returns: a dictionary object that is JSON-serializable", +"func":1 }, { -"ref":"pyaurorax.availability.AvailabilityResult.available_data_products", -"url":7, -"doc":"" +"ref":"pyaurorax.search.DataProductSearch", +"url":2, +"doc":"Class representing a data product search Attributes: start: start timestamp of the search (inclusive) end: end timestamp of the search (inclusive) programs: list of program names to search platforms: list of platform names to search instrument_types: list of instrument types to search data_product_types: list of dictionaries describing data product types to filter on e.g. \"keogram\", defaults to None. Options are in the pyaurorax.data_products module, or at the top level using the pyaurorax.DATA_PRODUCT_TYPE variables. metadata_filters: list of dictionaries describing metadata keys and values to filter on, defaults to None Example: [{ \"key\": \"nbtrace_region\", \"operator\": \"in\", \"values\": [\"north polar cap\"] }] metadata_filters_logical_operator: the logical operator to use when evaluating metadata filters (either 'AND' or 'OR'), defaults to \"AND\" response_format: JSON representation of desired data response format request: AuroraXResponse object returned when the search is executed request_id: unique ID assigned to the request by the AuroraX API request_url: unique URL assigned to the request by the AuroraX API executed: indicates if the search has been executed/started completed: indicates if the search has finished data_url: the URL where data is accessed query: the query for this request as JSON status: the status of the query data: the data product records found logs: all log messages outputted by the AuroraX API for this request" }, { -"ref":"pyaurorax.availability.AvailabilityResult.available_ephemeris", -"url":7, -"doc":"" +"ref":"pyaurorax.search.DataProductSearch.query", +"url":2, +"doc":"Property for the query value" }, { -"ref":"pyaurorax.availability.availability", -"url":8, -"doc":"Functions for retrieving availablity information" +"ref":"pyaurorax.search.DataProductSearch.execute", +"url":2, +"doc":"Initiate a data product search request", +"func":1 }, { -"ref":"pyaurorax.availability.availability.ephemeris", -"url":8, -"doc":"Retrieve information about the number of existing ephemeris records Args: start: start date to retrieve availability info from (inclusive) end: end date to retrieve availability info until (inclusive) program: program name to filter sources by, defaults to None platform: platform name to filter sources by, defaults to None instrument_type: instrument type to filter sources by, defaults to None source_type: source type to filter sources by, defaults to None. Other options are in the pyaurorax.sources module, or at the top level using the pyaurorax.SOURCE_TYPE_ variables. owner: owner email address to filter sources by, defaults to None format: the format of the data sources returned, defaults to \"basic_info\". Other options are in the pyaurorax.sources module, or at the top level using the pyaurorax.FORMAT_ variables. slow: query the data using a slower, but more accurate method, defaults to False Returns: ephemeris availability information matching the requested parameters", +"ref":"pyaurorax.search.DataProductSearch.update_status", +"url":2, +"doc":"Update the status of this data product search request Args: status: the previously-retrieved status of this request (include to avoid requesting it from the API again), defaults to None", "func":1 }, { -"ref":"pyaurorax.availability.availability.data_products", -"url":8, -"doc":"Retrieve information about the number of existing data product records Args: start: start date to retrieve availability info from (inclusive) end: end date to retrieve availability info until (inclusive) program: program name to filter sources by, defaults to None platform: platform name to filter sources by, defaults to None instrument_type: instrument type to filter sources by, defaults to None source_type: source type to filter sources by, defaults to None. Other options are in the pyaurorax.sources module, or at the top level using the pyaurorax.SOURCE_TYPE_ variables. owner: owner email address to filter sources by, defaults to None format: the format of the data sources returned, defaults to \"basic_info\". Other options are in the pyaurorax.sources module, or at the top level using the pyaurorax.FORMAT_ variables. slow: query the data using a slower, but more accurate method, defaults to False Returns: data product availability information matching the requested parameters", +"ref":"pyaurorax.search.DataProductSearch.check_for_data", +"url":2, +"doc":"Check to see if data is available for this data product search request Returns: True if data is available, else False", "func":1 }, { -"ref":"pyaurorax.availability.classes", -"url":9, -"doc":"Separted classes and functions used by the availability module. Note that these classes and variables are all imported higher up at the top of the availability module. They can be referenced from there instead of digging in deeper to these submodules." +"ref":"pyaurorax.search.DataProductSearch.get_data", +"url":2, +"doc":"Retrieve the data available for this data product search request", +"func":1 }, { -"ref":"pyaurorax.availability.classes.availability_result", -"url":10, -"doc":"Class definition used for containing Availability information" +"ref":"pyaurorax.search.DataProductSearch.wait", +"url":2, +"doc":"Block and wait for the request to complete and data is available for retrieval Args: poll_interval: time in seconds to wait between polling attempts, defaults to pyaurorax.requests.STANDARD_POLLING_SLEEP_TIME verbose: output poll times and other progress messages, defaults to False", +"func":1 }, { -"ref":"pyaurorax.availability.classes.availability_result.AvailabilityResult", -"url":10, -"doc":"Availability information object Attributes: data_source: the data source that the records are associated with available_data_products: the data product availability information available_ephemeris: the ephemeris availability information Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model." +"ref":"pyaurorax.search.DataProductSearch.cancel", +"url":2, +"doc":"Cancel the data product search request This method returns immediately by default since the API processes this request asynchronously. If you would prefer to wait for it to be completed, set the 'wait' parameter to True. You can adjust the polling time using the 'poll_interval' parameter. Args: wait: wait until the cancellation request has been completed (may wait for several minutes) poll_interval: seconds to wait between polling calls, defaults to STANDARD_POLLING_SLEEP_TIME. verbose: output poll times and other progress messages, defaults to False Returns: 1 on success Raises: pyaurorax.exceptions.AuroraXAPIError: An API error was encountered pyaurorax.exceptions.AuroraXUnauthorizedError: invalid API key for this operation", +"func":1 }, { -"ref":"pyaurorax.availability.classes.availability_result.AvailabilityResult.data_source", -"url":10, -"doc":"" +"ref":"pyaurorax.search.Conjunction", +"url":2, +"doc":"Conjunction object Attributes: conjunction_type: the type of location data used when the conjunction was found (either 'nbtrace', 'sbtrace', or 'geographic') start: start timestamp of the conjunction end: end timestamp of the conjunction data_sources: data sources in the conjunction min_distance: minimum kilometer distance of the conjunction max_distance: maximum kilometer distance of the conjunction events: the sub-conjunctions that make up this over-arching conjunction (the conjunctions between each set of two data sources) closest_epoch: timestamp for when data sources were closest farthest_epoch: timestamp for when data sources were farthest" }, { -"ref":"pyaurorax.availability.classes.availability_result.AvailabilityResult.available_data_products", -"url":10, -"doc":"" +"ref":"pyaurorax.search.ConjunctionSearch", +"url":2, +"doc":"Class representing a conjunction search Attributes: start: start timestamp of the search (inclusive) end: end timestamp of the search (inclusive) distance: the maximum distance allowed between data sources when searching for conjunctions. This can either be a number (int or float), or a dictionary modified from the output of the \"get_advanced_distances_combos()\" function. ground: list of ground instrument search parameters, defaults to [] Example: [{ \"programs\": [\"themis-asi\"], \"platforms\": [\"gillam\", \"rabbit lake\"], \"instrument_types\": [\"RGB\"], \"ephemeris_metadata_filters\": { \"logical_operator\": \"AND\", \"expressions\": [ { \"key\": \"calgary_apa_ml_v1\", \"operator\": \"in\", \"values\": [ \"classified as APA\" ] } ] } }] space: list of one or more space instrument search parameters, defaults to [] Example: [{ \"programs\": [\"themis-asi\", \"swarm\"], \"platforms\": [\"themisa\", \"swarma\"], \"instrument_types\": [\"footprint\"], \"ephemeris_metadata_filters\": { \"logical_operator\": \"AND\", \"expressions\": [ { \"key\": \"nbtrace_region\", \"operator\": \"in\", \"values\": [ \"north auroral oval\" ] } ] }, \"hemisphere\": [ \"northern\" ] }] events: list of one or more events search parameters, defaults to [] Example: [{ \"programs\": [ \"events\" ], \"instrument_types\": [ \"substorm onsets\" ] }] conjunction_types: list of conjunction types, defaults to [\"nbtrace\"]. Options are in the pyaurorax.conjunctions module, or at the top level using the pyaurorax.CONJUNCTION_TYPE_ variables. epoch_search_precision: the time precision to which conjunctions are calculated. Can be 30 or 60 seconds. Defaults to 60 seconds. Note - this parameter is under active development and still considered \"alpha\". response_format: JSON representation of desired data response format request: AuroraXResponse object returned when the search is executed request_id: unique ID assigned to the request by the AuroraX API request_url: unique URL assigned to the request by the AuroraX API executed: indicates if the search has been executed/started completed: indicates if the search has finished data_url: the URL where data is accessed query: the query for this request as JSON status: the status of the query data: the conjunctions found logs: all log messages outputted by the AuroraX API for this request" }, { -"ref":"pyaurorax.availability.classes.availability_result.AvailabilityResult.available_ephemeris", -"url":10, -"doc":"" +"ref":"pyaurorax.search.ConjunctionSearch.check_criteria_block_count_validity", +"url":2, +"doc":"Check the number of of criteria blocks to see if there is too many. A max of 10 is allowed by the AuroraX conjunction search engine. An exception is raised if it was determined to have too many. Raises: pyaurorax.exceptions.AuroraXError: too many criteria blocks are found", +"func":1 }, { -"ref":"pyaurorax.conjunctions", -"url":11, -"doc":"The conjunction module is used for finding conjunctions between groupings of data sources. Note that all functions and classes from submodules are all imported at this level of the conjunctions module. They can be referenced from here instead of digging in deeper to the submodules." +"ref":"pyaurorax.search.ConjunctionSearch.get_advanced_distances_combos", +"url":2, +"doc":"Get the advanced distances combinations for this search Args: default_distance: the default distance to use, defaults to None Returns: the advanced distances combinations", +"func":1 }, { -"ref":"pyaurorax.conjunctions.CONJUNCTION_TYPE_NBTRACE", -"url":11, -"doc":"Conjunction search 'conjunction_type' category for finding conjunctions using the north B-trace data" +"ref":"pyaurorax.search.ConjunctionSearch.distance", +"url":2, +"doc":"Property for the distance parameter Returns: the distance dictionary with all combinations" }, { -"ref":"pyaurorax.conjunctions.CONJUNCTION_TYPE_SBTRACE", -"url":11, -"doc":"Conjunction search 'conjunction_type' category for finding conjunctions using the south B-trace data" +"ref":"pyaurorax.search.ConjunctionSearch.query", +"url":2, +"doc":"Property for the query value Returns: the query parameter" }, { -"ref":"pyaurorax.conjunctions.search", -"url":11, -"doc":"Search for conjunctions between data sources By default, this function will block and wait until the request completes and all data is downloaded. If you don't want to wait, set the 'return_immediately value to True. The Search object will be returned right after the search has been started, and you can use the helper functions as part of that object to get the data when it's done. Args: start: start timestamp of the search (inclusive) end: end timestamp of the search (inclusive) distance: the maximum distance allowed between data sources when searching for conjunctions. This can either be a number (int or float), or a dictionary modified from the output of the \"get_advanced_distances_combos()\" function. ground: list of ground instrument search parameters, defaults to [] Example: [{ \"programs\": [\"themis-asi\"], \"platforms\": [\"gillam\", \"rabbit lake\"], \"instrument_types\": [\"RGB\"], \"ephemeris_metadata_filters\": { \"logical_operator\": \"AND\", \"expressions\": [ { \"key\": \"calgary_apa_ml_v1\", \"operator\": \"in\", \"values\": [ \"classified as APA\" ] } ] } }] space: list of one or more space instrument search parameters, defaults to [] Example: [{ \"programs\": [\"themis-asi\", \"swarm\"], \"platforms\": [\"themisa\", \"swarma\"], \"instrument_types\": [\"footprint\"], \"ephemeris_metadata_filters\": { \"logical_operator\": \"AND\", \"expressions\": [ { \"key\": \"nbtrace_region\", \"operator\": \"in\", \"values\": [ \"north auroral oval\" ] } ] }, \"hemisphere\": [ \"northern\" ] }] events: list of one or more events search parameters, defaults to [] Example: [{ \"programs\": [ \"events\" ], \"instrument_types\": [ \"substorm onsets\" ] }] conjunction_types: list of conjunction types, defaults to [] (meaning all conjunction types). Options are in the pyaurorax.conjunctions module, or at the top level using the pyaurorax.CONJUNCTION_TYPE_ variables. epoch_search_precision: the time precision to which conjunctions are calculated. Can be 30 or 60 seconds. Defaults to 60 seconds. Note - this parameter is under active development and still considered \"alpha\". response_format: JSON representation of desired data response format poll_interval: seconds to wait between polling calls, defaults to pyaurorax.requests.STANDARD_POLLING_SLEEP_TIME return_immediately: initiate the search and return without waiting for data to be received, defaults to False verbose: show the progress of the request using the request log, defaults Returns: a pyaurorax.conjunctions.Search object", +"ref":"pyaurorax.search.ConjunctionSearch.execute", +"url":2, +"doc":"Initiate a conjunction search request Raises: pyaurorax.exceptions.AuroraXAPIError: An API error was encountered", "func":1 }, { -"ref":"pyaurorax.conjunctions.describe", -"url":11, -"doc":"Describe a conjunction search as an \"SQL-like\" string Args: search_obj: the conjunction search to describe Returns: the \"SQL-like\" string describing the conjunction search object", +"ref":"pyaurorax.search.ConjunctionSearch.update_status", +"url":2, +"doc":"Update the status of this conjunction search request Args: status: the previously-retrieved status of this request (include to avoid requesting it from the API again), defaults to None Raises: pyaurorax.exceptions.AuroraXAPIError: An API error was encountered", "func":1 }, { -"ref":"pyaurorax.conjunctions.get_request_url", -"url":11, -"doc":"Get the conjunction search request URL for a given request ID. This URL can be used for subsequent pyaurorax.requests function calls. Primarily this method facilitates delving into details about a set of already-submitted conjunction searches. Args: request_id: the request identifier Returns: the request URL", +"ref":"pyaurorax.search.ConjunctionSearch.check_for_data", +"url":2, +"doc":"Check to see if data is available for this conjunction search request Returns: True if data is available, else False Raises: pyaurorax.exceptions.AuroraXAPIError: An API error was encountered", "func":1 }, { -"ref":"pyaurorax.conjunctions.Conjunction", -"url":11, -"doc":"Conjunction object Attributes: conjunction_type: the type of location data used when the conjunction was found (either be 'nbtrace' or 'sbtrace') start: start timestamp of the conjunction end: end timestamp of the conjunction data_sources: data sources in the conjunction min_distance: minimum kilometer distance of the conjunction max_distance: maximum kilometer distance of the conjunction events: the sub-conjunctions that make up this over-arching conjunction (the conjunctions between each set of two data sources) Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model." +"ref":"pyaurorax.search.ConjunctionSearch.get_data", +"url":2, +"doc":"Retrieve the data available for this conjunction search request Raises: pyaurorax.exceptions.AuroraXAPIError: An API error was encountered", +"func":1 }, { -"ref":"pyaurorax.conjunctions.Conjunction.conjunction_type", -"url":11, -"doc":"" -}, +"ref":"pyaurorax.search.ConjunctionSearch.wait", +"url":2, +"doc":"Block and wait until the request is complete and data is available for retrieval Args: poll_interval: time in seconds to wait between polling attempts, defaults to pyaurorax.requests.STANDARD_POLLING_SLEEP_TIME verbose: output poll times and other progress messages, defaults to False Raises: pyaurorax.exceptions.AuroraXAPIError: An API error was encountered", +"func":1 +}, { -"ref":"pyaurorax.conjunctions.Conjunction.start", -"url":11, -"doc":"" +"ref":"pyaurorax.search.ConjunctionSearch.cancel", +"url":2, +"doc":"Cancel the conjunction search request This method returns immediately by default since the API processes this request asynchronously. If you would prefer to wait for it to be completed, set the 'wait' parameter to True. You can adjust the polling time using the 'poll_interval' parameter. Args: wait: wait until the cancellation request has been completed (may wait for several minutes) poll_interval: seconds to wait between polling calls, defaults to STANDARD_POLLING_SLEEP_TIME. verbose: output poll times and other progress messages, defaults to False Returns: 1 on success Raises: pyaurorax.exceptions.AuroraXAPIError: An API error was encountered", +"func":1 }, { -"ref":"pyaurorax.conjunctions.Conjunction.end", -"url":11, +"ref":"pyaurorax.search.requests", +"url":3, +"doc":"Helper methods for retrieving data from an AuroraX search engine API request. Note that all functions and classes from submodules are all imported at this level of the requests module. They can be referenced from here instead of digging in deeper to the submodules." +}, +{ +"ref":"pyaurorax.search.requests.RequestsManager", +"url":3, +"doc":"The RequestsManager object is initialized within every PyAuroraX object. It acts as a way to access the submodules and carry over configuration information in the super class." +}, +{ +"ref":"pyaurorax.search.requests.RequestsManager.get_status", +"url":3, +"doc":"Retrieve the status of a request Args: request_url: the URL of the request information Returns: the status information for the request", +"func":1 +}, +{ +"ref":"pyaurorax.search.requests.RequestsManager.get_data", +"url":3, +"doc":"Retrieve the data for a request Args: data_url: the URL for the data of a request, response_format: the response format to send as post data, defaults to None skip_serializing: skip any object serializing, defaults to False Raises: pyaurorax.exceptions.AuroraXDataRetrievalError: error retrieving data Returns: the data for this request", +"func":1 +}, +{ +"ref":"pyaurorax.search.requests.RequestsManager.get_logs", +"url":3, +"doc":"Retrieve the logs for a request Args: request_url: the URL of the request information Returns: the log messages for the request", +"func":1 +}, +{ +"ref":"pyaurorax.search.requests.RequestsManager.wait_for_data", +"url":3, +"doc":"Block and wait for the data to be made available for a request Args: request_url: the URL of the request information poll_interval: seconds to wait between polling calls, defaults to STANDARD_POLLING_SLEEP_TIME verbose: output poll times and other progress messages, defaults to False Returns: the status information for the request", +"func":1 +}, +{ +"ref":"pyaurorax.search.requests.RequestsManager.cancel", +"url":3, +"doc":"Cancel the request at the given URL. This method returns immediately by default since the API processes this request asynchronously. If you would prefer to wait for it to be completed, set the 'wait' parameter to True. You can adjust the polling time using the 'poll_interval' parameter. Args: request_url: the URL string of the request to be canceled wait: set to True to block until the cancellation request has been completed (may wait for several minutes) poll_interval: seconds to wait between polling calls, defaults to STANDARD_POLLING_SLEEP_TIME. verbose: if True then output poll times and other progress, defaults to False Returns: 0 on success Raises: pyaurorax.exceptions.AuroraXUnauthorizedError: invalid API key for this operation pyaurorax.exceptions.AuroraXAPIError: An API error was encountered", +"func":1 +}, +{ +"ref":"pyaurorax.search.requests.RequestsManager.list", +"url":3, +"doc":"Retrieve a list of search requests matching certain criteria. Administrators only. Args: search_type: the type of search request, valid values are 'conjunction', 'ephemeris', or 'data_product'. Exclusion of value will return all search requests of any type, defaults to None active: return searches that are currently active or not, exclude for both, defaults to None start: start timestamp for narrowing down search timeframes, defaults to None end: end timestamp for narrowing down search timeframes, defaults to None file_size: filter by result file size, measured in KB, defaults to None result_count: filter by result count, defaults to None query_duration: filter by query duration, measured in milliseconds, defaults to None error_condition: filter by if an error occurred or not, exclude for both, defaults to None Returns: list of matching search requests Raises: pyaurorax.exceptions.AuroraXUnauthorizedError: invalid API key for this operation", +"func":1 +}, +{ +"ref":"pyaurorax.search.requests.RequestsManager.delete", +"url":3, +"doc":"Entirely remove a search request from the AuroraX database. Administrators only. Args: request_id: search request UUID Returns: 0 on success, raises error on failure Raises: pyaurorax.exceptions.AuroraXNotFoundError: data source not found", +"func":1 +}, +{ +"ref":"pyaurorax.search.data_products", +"url":4, +"doc":"Use the AuroraX search engine to search and upload data product records. Note that all functions and classes from submodules are all imported at this level of the data_products module. They can be referenced from here instead of digging in deeper to the submodules." +}, +{ +"ref":"pyaurorax.search.data_products.DataProductsManager", +"url":4, +"doc":"The DataProductsManager object is initialized within every PyAuroraX object. It acts as a way to access the submodules and carry over configuration information in the super class." +}, +{ +"ref":"pyaurorax.search.data_products.DataProductsManager.search", +"url":4, +"doc":"Search for data product records By default, this function will block and wait until the request completes and all data is downloaded. If you don't want to wait, set the 'return_immediately value to True. The Search object will be returned right after the search has been started, and you can use the helper functions as part of that object to get the data when it's done. Note: At least one search criteria from programs, platforms, or instrument_types, must be specified. Args: start: start timestamp of the search (inclusive) end: end timestamp of the search (inclusive) programs: list of programs to search through, defaults to None platforms: list of platforms to search through, defaults to None instrument_types: list of instrument types to search through, defaults to None data_product_types: list of dictionaries describing data product types to filter on e.g. \"keogram\", defaults to None. Options are in the pyaurorax.data_products module, or at the top level using the pyaurorax.DATA_PRODUCT_TYPE variables. metadata_filters: list of dictionaries describing metadata keys and values to filter on, defaults to None Example: [{ \"key\": \"nbtrace_region\", \"operator\": \"in\", \"values\": [\"north polar cap\"] }] metadata_filters_logical_operator: the logical operator to use when evaluating metadata filters (either 'AND' or 'OR'), defaults to \"AND\" response_format: JSON representation of desired data response format poll_interval: time in seconds to wait between polling attempts, defaults to pyaurorax.requests.STANDARD_POLLING_SLEEP_TIME return_immediately: initiate the search and return without waiting for data to be received, defaults to False verbose: output poll times and other progress messages, defaults to False Returns: a pyaurorax.search.DataProductSearch object", +"func":1 +}, +{ +"ref":"pyaurorax.search.data_products.DataProductsManager.upload", +"url":4, +"doc":"Upload data product records to AuroraX Args: identifier: the AuroraX data source ID records: data product records to upload validate_source: validate all records before uploading, defaults to False chunk_size: number of records to upload in a single call, defaults to 500 Returns: 0 for success, raises exception on error Raises: pyaurorax.exceptions.AuroraXUploadError: upload error pyaurorax.exceptions.AuroraXError: data source validation error", +"func":1 +}, +{ +"ref":"pyaurorax.search.data_products.DataProductsManager.delete_urls", +"url":4, +"doc":"Delete data products by URL. The API processes this request asynchronously, so this method will return immediately whether or not the data has already been deleted. Args: data_source: data source associated with the data product records (note that identifier, program, platform, and instrument_type are required) urls: URLs of data product records to delete Returns: 0 on success Raises: pyaurorax.exceptions.AuroraXAPIError: An API error was encountered pyaurorax.exceptions.AuroraXUnauthorizedError: invalid API key for this operation", +"func":1 +}, +{ +"ref":"pyaurorax.search.data_products.DataProductsManager.delete", +"url":4, +"doc":"Delete data products associated with a data source within a date range. The API processes this request asynchronously, so this method will return immediately whether or not the data has already been deleted. Args: data_source: data source associated with the data product records (note that identifier, program, platform, and instrument_type are required) start: timestamp marking beginning of range to delete records for, inclusive end: timestamp marking end of range to delete records for, inclusive data_product_types: specific types of data product to delete, e.g. [\"keogram\", \"movie\"]. If omitted, all data product types will be deleted. Returns: 1 on success Raises: pyaurorax.exceptions.AuroraXNotFoundError: source not found pyaurorax.exceptions.AuroraXUnauthorizedError: invalid API key for this operation", +"func":1 +}, +{ +"ref":"pyaurorax.search.data_products.DataProductsManager.describe", +"url":4, +"doc":"Describe a data product search as an \"SQL-like\" string. Either a DataProductSearch object can be supplied, or a dictionary of the raw JSON query. Args: search_obj: the data product search to describe, optional query_dict: the data product search query represented as a raw dictionary, optional Returns: the \"SQL-like\" string describing the data product search object", +"func":1 +}, +{ +"ref":"pyaurorax.search.data_products.DataProductsManager.get_request_url", +"url":4, +"doc":"Get the data product search request URL for a given request ID. This URL can be used for subsequent pyaurorax.requests function calls. Primarily this method facilitates delving into details about a set of already-submitted data product searches. Args: request_id: the request identifier Returns: the request URL", +"func":1 +}, +{ +"ref":"pyaurorax.search.data_products.classes", +"url":5, +"doc":"Separated classes and functions used by the data_products module. Note that these classes and variables are all imported higher up at the top of the data_products module. They can be referenced from there instead of digging in deeper to these submodules." +}, +{ +"ref":"pyaurorax.search.data_products.classes.search", +"url":6, +"doc":"Class definition for a data product search" +}, +{ +"ref":"pyaurorax.search.data_products.classes.search.DataProductSearch", +"url":6, +"doc":"Class representing a data product search Attributes: start: start timestamp of the search (inclusive) end: end timestamp of the search (inclusive) programs: list of program names to search platforms: list of platform names to search instrument_types: list of instrument types to search data_product_types: list of dictionaries describing data product types to filter on e.g. \"keogram\", defaults to None. Options are in the pyaurorax.data_products module, or at the top level using the pyaurorax.DATA_PRODUCT_TYPE variables. metadata_filters: list of dictionaries describing metadata keys and values to filter on, defaults to None Example: [{ \"key\": \"nbtrace_region\", \"operator\": \"in\", \"values\": [\"north polar cap\"] }] metadata_filters_logical_operator: the logical operator to use when evaluating metadata filters (either 'AND' or 'OR'), defaults to \"AND\" response_format: JSON representation of desired data response format request: AuroraXResponse object returned when the search is executed request_id: unique ID assigned to the request by the AuroraX API request_url: unique URL assigned to the request by the AuroraX API executed: indicates if the search has been executed/started completed: indicates if the search has finished data_url: the URL where data is accessed query: the query for this request as JSON status: the status of the query data: the data product records found logs: all log messages outputted by the AuroraX API for this request" +}, +{ +"ref":"pyaurorax.search.data_products.classes.search.DataProductSearch.query", +"url":6, +"doc":"Property for the query value" +}, +{ +"ref":"pyaurorax.search.data_products.classes.search.DataProductSearch.execute", +"url":6, +"doc":"Initiate a data product search request", +"func":1 +}, +{ +"ref":"pyaurorax.search.data_products.classes.search.DataProductSearch.update_status", +"url":6, +"doc":"Update the status of this data product search request Args: status: the previously-retrieved status of this request (include to avoid requesting it from the API again), defaults to None", +"func":1 +}, +{ +"ref":"pyaurorax.search.data_products.classes.search.DataProductSearch.check_for_data", +"url":6, +"doc":"Check to see if data is available for this data product search request Returns: True if data is available, else False", +"func":1 +}, +{ +"ref":"pyaurorax.search.data_products.classes.search.DataProductSearch.get_data", +"url":6, +"doc":"Retrieve the data available for this data product search request", +"func":1 +}, +{ +"ref":"pyaurorax.search.data_products.classes.search.DataProductSearch.wait", +"url":6, +"doc":"Block and wait for the request to complete and data is available for retrieval Args: poll_interval: time in seconds to wait between polling attempts, defaults to pyaurorax.requests.STANDARD_POLLING_SLEEP_TIME verbose: output poll times and other progress messages, defaults to False", +"func":1 +}, +{ +"ref":"pyaurorax.search.data_products.classes.search.DataProductSearch.cancel", +"url":6, +"doc":"Cancel the data product search request This method returns immediately by default since the API processes this request asynchronously. If you would prefer to wait for it to be completed, set the 'wait' parameter to True. You can adjust the polling time using the 'poll_interval' parameter. Args: wait: wait until the cancellation request has been completed (may wait for several minutes) poll_interval: seconds to wait between polling calls, defaults to STANDARD_POLLING_SLEEP_TIME. verbose: output poll times and other progress messages, defaults to False Returns: 1 on success Raises: pyaurorax.exceptions.AuroraXAPIError: An API error was encountered pyaurorax.exceptions.AuroraXUnauthorizedError: invalid API key for this operation", +"func":1 +}, +{ +"ref":"pyaurorax.search.data_products.classes.data_product", +"url":7, +"doc":"Class definition for a data product" +}, +{ +"ref":"pyaurorax.search.data_products.classes.data_product.DATA_PRODUCT_TYPE_KEOGRAM", +"url":7, +"doc":"Data product type for keograms. Keograms are a 2-D representation of a series of images, and are one of the most popular data products that auroral science uses. More information can be found at https: docs.aurorax.space/about_the_data/standards/ keograms." +}, +{ +"ref":"pyaurorax.search.data_products.classes.data_product.DATA_PRODUCT_TYPE_MONTAGE", +"url":7, +"doc":"Data product type for montages. Like keograms, montages are another representation of a series of images. However, montages are not a 2D representation but rather a collage of thumnbail images for the period of time. An example can be found at https: data.phys.ucalgary.ca/sort_by_project/THEMIS/asi/stream2/2021/12/28/gill_themis19/20211228__gill_themis19_full-montage.pgm.jpg" +}, +{ +"ref":"pyaurorax.search.data_products.classes.data_product.DATA_PRODUCT_TYPE_MOVIE", +"url":7, +"doc":"Data product type for movies. Movies are timelapse video files of auroral data, usually as MP4 or MPEG. They can consist of frames for a whole night, or an hour, and can be at any cadence that is most appropriate." +}, +{ +"ref":"pyaurorax.search.data_products.classes.data_product.DATA_PRODUCT_TYPE_SUMMARY_PLOT", +"url":7, +"doc":"Data product type for summary plots. A summary plot can be any type of plot that shows auroral data in a summary format, for example a background-subtracted meridian scanning photometer plot showing counts in Rayleighs." +}, +{ +"ref":"pyaurorax.search.data_products.classes.data_product.DATA_PRODUCT_TYPE_DATA_AVAILABILITY", +"url":7, +"doc":"Data product type for data availability. The AuroraX data availability system does not account for times when data was not expected to be collected, such as summer shutdowns due to inadequate night hours. This data product type for 'data availability' is meant to be used as a smarter data availability mechanism for Aurora." +}, +{ +"ref":"pyaurorax.search.data_products.classes.data_product.DataProductData", +"url":7, +"doc":"Data product object Attributes: data_source: data source that the ephemeris record is associated with data_product_type: data product type (\"keogram\", \"movie\", \"summary_plot\") start: starting timestamp for the record (assumed it is in UTC), inclusive end: ending timestamp for the record (assumed it is in UTC), inclusive url: the URL of data product metadata: metadata for this record (arbitrary keys and values)" +}, +{ +"ref":"pyaurorax.search.data_products.classes.data_product.DataProductData.to_json_serializable", +"url":7, +"doc":"Convert object to a JSON-serializable object (ie. translate datetime objects to strings) Returns: a dictionary object that is JSON-serializable", +"func":1 +}, +{ +"ref":"pyaurorax.search.location", +"url":8, +"doc":"AuroraX pyaurorax.search.location.Location class definition" +}, +{ +"ref":"pyaurorax.search.location.Location", +"url":8, +"doc":"Representation for an AuroraX location, such as geographic coordinates, GSM coordinates, or northern/southern B-trace magnetic footprints. Latitude and longitude values are in decimal degrees format, ranging from -90 to 90 for latitude and -180 to 180 for longitude. Note that latitude and longitude must both be numbers, or both be None. Attributes: lat (float): latitude value lon (float): longitude value Raises: ValueError: if both latitude and longitude are not real numbers, or not both None." +}, +{ +"ref":"pyaurorax.search.location.Location.lat", +"url":8, "doc":"" }, { -"ref":"pyaurorax.conjunctions.Conjunction.data_sources", -"url":11, +"ref":"pyaurorax.search.location.Location.lon", +"url":8, "doc":"" }, { -"ref":"pyaurorax.conjunctions.Conjunction.min_distance", -"url":11, +"ref":"pyaurorax.search.location.Location.to_json_serializable", +"url":8, +"doc":"Convert object to a JSON-serializable object (ie. translate datetime objects to strings) Returns: a dictionary object that is JSON-serializable", +"func":1 +}, +{ +"ref":"pyaurorax.search.availability", +"url":9, +"doc":"Retrieve availability information about data in the AuroraX search engine." +}, +{ +"ref":"pyaurorax.search.availability.AvailabilityManager", +"url":9, +"doc":"The AvailabilityManager object is initialized within every PyAuroraX object. It acts as a way to access the submodules and carry over configuration information in the super class." +}, +{ +"ref":"pyaurorax.search.availability.AvailabilityManager.ephemeris", +"url":9, +"doc":"Retrieve information about the number of existing ephemeris records Args: start (datetime.date): Start date to retrieve availability info for (inclusive) end (datetime.date): End date to retrieve availability info for (inclusive) program (str): Program name to filter sources by, defaults to None platform (str): Platform name to filter sources by, defaults to None instrument_type (str): Instrument type to filter sources by, defaults to None source_type (str): The data source type to filter for, defaults to None . Options are in the pyaurorax.search.sources module, or at the top level using the pyaurorax.search.SOURCE_TYPE_ variables. owner (str): Owner email address to filter sources by, defaults to None format (str): The format of the data sources returned, defaults to FORMAT_FULL_RECORD . Other options are in the pyaurorax.search.sources module, or at the top level using the pyaurorax.search.FORMAT_ variables. slow (bool): Query the data using a slower, but more accurate method, defaults to False Returns: Ephemeris availability information matching the requested parameters Raises: pyaurorax.exceptions.AuroraXAPIError: An API error was encountered", +"func":1 +}, +{ +"ref":"pyaurorax.search.availability.AvailabilityManager.data_products", +"url":9, +"doc":"Retrieve information about the number of existing data product records Args: start (datetime.date): Start date to retrieve availability info for (inclusive) end (datetime.date): End date to retrieve availability info for (inclusive) program (str): Program name to filter sources by, defaults to None platform (str): Platform name to filter sources by, defaults to None instrument_type (str): Instrument type to filter sources by, defaults to None source_type (str): The data source type to filter for, defaults to None . Options are in the pyaurorax.search.sources module, or at the top level using the pyaurorax.search.SOURCE_TYPE_ variables. owner (str): Owner email address to filter sources by, defaults to None format (str): The format of the data sources returned, defaults to FORMAT_FULL_RECORD . Other options are in the pyaurorax.search.sources module, or at the top level using the pyaurorax.search.FORMAT_ variables. slow (bool): Query the data using a slower, but more accurate method, defaults to False Returns: Data product availability information matching the requested parameters Raises: pyaurorax.exceptions.AuroraXAPIError: An API error was encountered", +"func":1 +}, +{ +"ref":"pyaurorax.search.availability.AvailabilityResult", +"url":9, +"doc":"Class definition for data availability information Attributes: data_source (pyaurorax.search.DataSource): the data source that the records are associated with available_ephemeris (Dict): the ephemeris availability information available_data_products (Dict): the data product availability information" +}, +{ +"ref":"pyaurorax.search.availability.AvailabilityResult.data_source", +"url":9, "doc":"" }, { -"ref":"pyaurorax.conjunctions.Conjunction.max_distance", -"url":11, +"ref":"pyaurorax.search.availability.AvailabilityResult.available_data_products", +"url":9, "doc":"" }, { -"ref":"pyaurorax.conjunctions.Conjunction.events", -"url":11, +"ref":"pyaurorax.search.availability.AvailabilityResult.available_ephemeris", +"url":9, "doc":"" }, { -"ref":"pyaurorax.conjunctions.Search", -"url":11, -"doc":"Class representing a conjunction search Attributes: start: start timestamp of the search (inclusive) end: end timestamp of the search (inclusive) distance: the maximum distance allowed between data sources when searching for conjunctions. This can either be a number (int or float), or a dictionary modified from the output of the \"get_advanced_distances_combos()\" function. ground: list of ground instrument search parameters, defaults to [] Example: [{ \"programs\": [\"themis-asi\"], \"platforms\": [\"gillam\", \"rabbit lake\"], \"instrument_types\": [\"RGB\"], \"ephemeris_metadata_filters\": { \"logical_operator\": \"AND\", \"expressions\": [ { \"key\": \"calgary_apa_ml_v1\", \"operator\": \"in\", \"values\": [ \"classified as APA\" ] } ] } }] space: list of one or more space instrument search parameters, defaults to [] Example: [{ \"programs\": [\"themis-asi\", \"swarm\"], \"platforms\": [\"themisa\", \"swarma\"], \"instrument_types\": [\"footprint\"], \"ephemeris_metadata_filters\": { \"logical_operator\": \"AND\", \"expressions\": [ { \"key\": \"nbtrace_region\", \"operator\": \"in\", \"values\": [ \"north auroral oval\" ] } ] }, \"hemisphere\": [ \"northern\" ] }] events: list of one or more events search parameters, defaults to [] Example: [{ \"programs\": [ \"events\" ], \"instrument_types\": [ \"substorm onsets\" ] }] conjunction_types: list of conjunction types, defaults to [\"nbtrace\"]. Options are in the pyaurorax.conjunctions module, or at the top level using the pyaurorax.CONJUNCTION_TYPE_ variables. epoch_search_precision: the time precision to which conjunctions are calculated. Can be 30 or 60 seconds. Defaults to 60 seconds. Note - this parameter is under active development and still considered \"alpha\". response_format: JSON representation of desired data response format request: AuroraXResponse object returned when the search is executed request_id: unique ID assigned to the request by the AuroraX API request_url: unique URL assigned to the request by the AuroraX API executed: indicates if the search has been executed/started completed: indicates if the search has finished data_url: the URL where data is accessed query: the query for this request as JSON status: the status of the query data: the conjunctions found logs: all log messages outputed by the AuroraX API for this request Returns: a pyaurorax.conjunctions.Search object" +"ref":"pyaurorax.search.availability.classes", +"url":10, +"doc":"Class definitions used by the availability submodule" }, { -"ref":"pyaurorax.conjunctions.Search.check_criteria_block_count_validity", +"ref":"pyaurorax.search.availability.classes.availability_result", "url":11, -"doc":"Check the number of of criteria blocks to see if there is too many. A max of 10 is allowed by the AuroraX conjunction search engine. An exception is raised if it was determined to have too many. Raises: pyaurorax.exceptions.AuroraXBadParametersException: too many criteria blocks are found", -"func":1 +"doc":"Class definition for data availability information" }, { -"ref":"pyaurorax.conjunctions.Search.get_advanced_distances_combos", +"ref":"pyaurorax.search.availability.classes.availability_result.AvailabilityResult", "url":11, -"doc":"Get the advanced distances combinations for this search Args: default_distance: the default distance to use, defaults to None Returns: the advanced distances combinations", -"func":1 +"doc":"Class definition for data availability information Attributes: data_source (pyaurorax.search.DataSource): the data source that the records are associated with available_ephemeris (Dict): the ephemeris availability information available_data_products (Dict): the data product availability information" }, { -"ref":"pyaurorax.conjunctions.Search.distance", +"ref":"pyaurorax.search.availability.classes.availability_result.AvailabilityResult.data_source", "url":11, -"doc":"Property for the distance parameter Returns: the distance dictionary with all combinations" +"doc":"" }, { -"ref":"pyaurorax.conjunctions.Search.query", +"ref":"pyaurorax.search.availability.classes.availability_result.AvailabilityResult.available_data_products", "url":11, -"doc":"Property for the query value Returns: the query parameter" +"doc":"" }, { -"ref":"pyaurorax.conjunctions.Search.execute", +"ref":"pyaurorax.search.availability.classes.availability_result.AvailabilityResult.available_ephemeris", "url":11, -"doc":"Initiate a conjunction search request Raises: pyaurorax.exceptions.AuroraXBadParametersException: too many criteria blocks", +"doc":"" +}, +{ +"ref":"pyaurorax.search.sources", +"url":12, +"doc":"Manage AuroraX data sources utilized by the search engine." +}, +{ +"ref":"pyaurorax.search.sources.SourcesManager", +"url":12, +"doc":"The SourcesManager object is initialized within every PyAuroraX object. It acts as a way to access the submodules and carry over configuration information in the super class." +}, +{ +"ref":"pyaurorax.search.sources.SourcesManager.list", +"url":12, +"doc":"Retrieve all data source records. Parameters can be used to filter as desired. Args: program (str): the program to filter for, defaults to None platform (str): the platform to filter for, defaults to None instrument_type (str): the instrument type to filter for, defaults to None source_type (str): the data source type to filter for, defaults to None . Options are in the pyaurorax.search.sources module, or at the top level using the pyaurorax.search.SOURCE_TYPE_ variables. owner (str): the owner's email address to filter for, defaults to None format (str): the format of the data sources returned, defaults to classes.data_source.FORMAT_FULL_RECORD . Other options are in the pyaurorax.search.sources module, or at the top level using the pyaurorax.search.FORMAT_ variables. order (str): the category to order results by. Valid values are identifier, program, platform, instrument_type, display_name, or owner. Defaults to identifier include_stats (bool): include additional stats information about the data source, defaults to False Returns: a list of DataSource records matching the requested parameters Raises: pyaurorax.exceptions.AuroraXAPIError: error during API call", "func":1 }, { -"ref":"pyaurorax.conjunctions.Search.update_status", -"url":11, -"doc":"Update the status of this conjunction search request Args: status: the previously-retrieved status of this request (include to avoid requesting it from the API again), defaults to None", +"ref":"pyaurorax.search.sources.SourcesManager.search", +"url":12, +"doc":"Search for data source records. Parameters can be used to filter as desired. This function is very similar to the list() function, however multiple programs, platforms, and/or instrument types can be supplied here. The list() function only supports single values for the parameters. Args: programs (List[str]): the programs to search for, defaults to [] platforms (List[str]): the platforms to search for, defaults to [] instrument_type (List[str]): the instrument types to search for, defaults to [] format (str): the format of the data sources returned, defaults to classes.data_source.FORMAT_FULL_RECORD . Other options are in the pyaurorax.search.sources module, or at the top level using the pyaurorax.search.FORMAT_ variables. order (str): the category to order results by. Valid values are identifier, program, platform, instrument_type, display_name, or owner. Defaults to identifier include_stats (bool): include additional stats information about the data source, defaults to False Returns: a list of DataSource records matching the requested parameters Raises: pyaurorax.exceptions.AuroraXAPIError: error during API call", "func":1 }, { -"ref":"pyaurorax.conjunctions.Search.check_for_data", -"url":11, -"doc":"Check to see if data is available for this conjunction search request Returns: True if data is available, else False", +"ref":"pyaurorax.search.sources.SourcesManager.get", +"url":12, +"doc":"Retrieve a specific data source record Args: program (str): the program name platform (str): the platform name instrument_type (str): the instrument type name format (str): the format of the data sources returned, defaults to classes.data_source.FORMAT_FULL_RECORD . Other options are in the pyaurorax.search.sources module, or at the top level using the pyaurorax.search.FORMAT_ variables. include_stats (bool): include additional stats information about the data source, defaults to False Returns: the DataSource matching the requested parameters Raises: pyaurorax.exceptions.AuroraXAPIError: error during API call pyaurorax.exceptions.AuroraXNotFoundError: source not found", "func":1 }, { -"ref":"pyaurorax.conjunctions.Search.get_data", -"url":11, -"doc":"Retrieve the data available for this conjunction search request", +"ref":"pyaurorax.search.sources.SourcesManager.get_using_filters", +"url":12, +"doc":"Retrieve all data sources matching a filter Args: program (str): the program to filter for, defaults to None platform (str): the platform to filter for, defaults to None instrument_type (str): the instrument type to filter for, defaults to None source_type (str): the data source type to filter for, defaults to None . Options are in the pyaurorax.search.sources module, or at the top level using the pyaurorax.search.SOURCE_TYPE_ variables. owner (str): the owner's email address to filter for, defaults to None format (str): the format of the data sources returned, defaults to classes.data_source.FORMAT_FULL_RECORD . Other options are in the pyaurorax.search.sources module, or at the top level using the pyaurorax.search.FORMAT_ variables. order (str): the category to order results by. Valid values are identifier, program, platform, instrument_type, display_name, or owner. Defaults to identifier include_stats (bool): include additional stats information about the data source, defaults to False . Returns: a list of DataSource records matching the requested parameters Raises: pyaurorax.exceptions.AuroraXAPIError: error during API call", "func":1 }, { -"ref":"pyaurorax.conjunctions.Search.wait", -"url":11, -"doc":"Block and wait until the request is complete and data is available for retrieval Args: poll_interval: time in seconds to wait between polling attempts, defaults to pyaurorax.requests.STANDARD_POLLING_SLEEP_TIME verbose: output poll times and other progress messages, defaults to False", +"ref":"pyaurorax.search.sources.SourcesManager.get_using_identifier", +"url":12, +"doc":"Retrieve data source for a specific identifier Args: identifier (int): the AuroraX unique data source identifier number format (str): the format of the data sources returned, defaults to classes.data_source.FORMAT_FULL_RECORD . Other options are in the pyaurorax.search.sources module, or at the top level using the pyaurorax.search.FORMAT_ variables. include_stats (bool): include additional stats information about the data source, defaults to False Returns: the DataSource for the specified identifier Raises: pyaurorax.exceptions.AuroraXAPIError: error during API call", "func":1 }, { -"ref":"pyaurorax.conjunctions.Search.cancel", -"url":11, -"doc":"Cancel the conjunction search request This method returns immediately by default since the API processes this request asynchronously. If you would prefer to wait for it to be completed, set the 'wait' parameter to True. You can adjust the polling time using the 'poll_interval' parameter. Args: wait: wait until the cancellation request has been completed (may wait for several minutes) poll_interval: seconds to wait between polling calls, defaults to STANDARD_POLLING_SLEEP_TIME. verbose: output poll times and other progress messages, defaults to False Returns: 1 on success Raises: pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected error pyaurorax.exceptions.AuroraXUnauthorizedException: invalid API key for this operation", +"ref":"pyaurorax.search.sources.SourcesManager.add", +"url":12, +"doc":"Add a new data source to the AuroraX search engine Args: data_source (DataSource): the data source to add (note: it must be a fully-defined DataSource object) Returns: the newly created DataSource . Raises: pyaurorax.exceptions.AuroraXAPIError: error during API call pyaurorax.exceptions.AuroraXUnauthorizedError: not allowed to perform task, or API key / user permissions are invalid pyaurorax.exceptions.AuroraXDuplicateError: duplicate data source, already exists", "func":1 }, { -"ref":"pyaurorax.conjunctions.classes", +"ref":"pyaurorax.search.sources.SourcesManager.delete", "url":12, -"doc":"Separted classes and functions used by the conjunctions module. Note that these classes and variables are all imported higher up at the top of the conjunctions module. They can be referenced from there instead of digging in deeper to these submodules." +"doc":"Delete a data source from the AuroraX search engine Args: identifier (int): the data source unique identifier to delete Returns: 0 on success, raises error if an issue was encountered Raises: pyaurorax.exceptions.AuroraXAPIError: error during API call pyaurorax.exceptions.AuroraXUnauthorizedError: not allowed to perform task, or API key / user permissions are invalid pyaurorax.exceptions.AuroraXNotFoundError: data source not found pyaurorax.exceptions.AuroraXConflictError: a conflict occurred", +"func":1 }, { -"ref":"pyaurorax.conjunctions.classes.conjunction", -"url":13, -"doc":"Class definition for a conjunction" +"ref":"pyaurorax.search.sources.SourcesManager.update", +"url":12, +"doc":"Update a data source in the AuroraX search engine. Omitted fields are ignored during the update. Note that the identifier cannot be updated. If you need to update the data source's identifier, we recommend deletion of the original data source and recreation using the desired identifier. Args: identifier (int): the AuroraX unique identifier for the data source, required and cannot be updated program (str): the new program for the data source, defaults to None platform (str): the new platform for the data source, defaults to None instrument_type (str): the new instrument type for the data source, defaults to None source_type (str): the new source type for the data source, defaults to None . Options are in the pyaurorax.search.sources module, or at the top level using the pyaurorax.search.SOURCE_TYPE_ variables. display_name (str): the new display name for the data source, defaults to None metadata (Dict): the new metadata for the data source, defaults to None maintainers (str): the new maintainer AuroraX account email addresses, defaults to None ephemeris_metadata_schema (List[Dict]): a list of dictionaries capturing the metadata keys and values that can appear in ephemeris records associated with the data source, defaults to None data_product_metadata_schema (List[Dict]): a list of dictionaries capturing the metadata keys and values that can appear in data product records associated with the data source, defaults to None Returns: the updated DataSource record Raises: pyaurorax.exceptions.AuroraXAPIError: error during API call pyaurorax.exceptions.AuroraXUnauthorizedError: not allowed to perform task, or API key / user permissions are invalid pyaurorax.exceptions.AuroraXNotFoundError: data source not found", +"func":1 }, { -"ref":"pyaurorax.conjunctions.classes.conjunction.Conjunction", -"url":13, -"doc":"Conjunction object Attributes: conjunction_type: the type of location data used when the conjunction was found (either be 'nbtrace' or 'sbtrace') start: start timestamp of the conjunction end: end timestamp of the conjunction data_sources: data sources in the conjunction min_distance: minimum kilometer distance of the conjunction max_distance: maximum kilometer distance of the conjunction events: the sub-conjunctions that make up this over-arching conjunction (the conjunctions between each set of two data sources) Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model." +"ref":"pyaurorax.search.sources.DataSource", +"url":12, +"doc":"AuroraX data source record Attributes: identifier (int): the unique AuroraX data source identifier program (str): the program for this data source platform (str): the platform for this data source instrument_type (str): the instrument type for this data source source_type (str): the data source type for this data source. Options are in the pyaurorax.search.sources module, or at the top level using the pyaurorax.search.SOURCE_TYPE_ variables. display_name (str): the display name for this data source metadata (Dict): metadata for this data source (arbitrary keys and values) owner (str): the owner's email address of this data source maintainers (List[str]): the email addresses of AuroraX accounts that can alter this data source and its associated records ephemeris_metadata_schema (Dict): a list of dictionaries capturing the metadata keys and values that can appear in ephemeris records associated with this data source data_product_metadata_schema (Dict): a list of dictionaries capturing the metadata keys and values that can appear in data product records associated with this data source format (str): the format used when printing the data source, defaults to \"full_record\". Other options are in the pyaurorax.search.sources module, or at the top level using the pyaurorax.search.FORMAT_ variables." }, { -"ref":"pyaurorax.conjunctions.classes.conjunction.Conjunction.conjunction_type", -"url":13, +"ref":"pyaurorax.search.sources.DataSource.identifier", +"url":12, "doc":"" }, { -"ref":"pyaurorax.conjunctions.classes.conjunction.Conjunction.start", -"url":13, +"ref":"pyaurorax.search.sources.DataSource.program", +"url":12, "doc":"" }, { -"ref":"pyaurorax.conjunctions.classes.conjunction.Conjunction.end", -"url":13, +"ref":"pyaurorax.search.sources.DataSource.platform", +"url":12, "doc":"" }, { -"ref":"pyaurorax.conjunctions.classes.conjunction.Conjunction.data_sources", -"url":13, +"ref":"pyaurorax.search.sources.DataSource.instrument_type", +"url":12, "doc":"" }, { -"ref":"pyaurorax.conjunctions.classes.conjunction.Conjunction.min_distance", -"url":13, +"ref":"pyaurorax.search.sources.DataSource.source_type", +"url":12, "doc":"" }, { -"ref":"pyaurorax.conjunctions.classes.conjunction.Conjunction.max_distance", -"url":13, +"ref":"pyaurorax.search.sources.DataSource.display_name", +"url":12, +"doc":"" +}, +{ +"ref":"pyaurorax.search.sources.DataSource.metadata", +"url":12, +"doc":"" +}, +{ +"ref":"pyaurorax.search.sources.DataSource.owner", +"url":12, +"doc":"" +}, +{ +"ref":"pyaurorax.search.sources.DataSource.maintainers", +"url":12, +"doc":"" +}, +{ +"ref":"pyaurorax.search.sources.DataSource.ephemeris_metadata_schema", +"url":12, +"doc":"" +}, +{ +"ref":"pyaurorax.search.sources.DataSource.data_product_metadata_schema", +"url":12, +"doc":"" +}, +{ +"ref":"pyaurorax.search.sources.DataSource.stats", +"url":12, +"doc":"" +}, +{ +"ref":"pyaurorax.search.sources.DataSource.format", +"url":12, +"doc":"" +}, +{ +"ref":"pyaurorax.search.sources.DataSourceStatistics", +"url":12, +"doc":"Data source statistics information Attributes: ephemeris_count (int): total number of ephemeris records for this data source data_product_count (int): total number of ephemeris records for this data source earliest_ephemeris_loaded (datetime.datetime): timestamp of the earliest ephemeris record latest_ephemeris_loaded (datetime.datetime): timestamp of the latest ephemeris record earliest_data_product_loaded (datetime.datetime): timestamp of the earliest data_product record latest_data_product_loaded (datetime.datetime): timestamp of the latest data product record" +}, +{ +"ref":"pyaurorax.search.sources.DataSourceStatistics.ephemeris_count", +"url":12, +"doc":"" +}, +{ +"ref":"pyaurorax.search.sources.DataSourceStatistics.data_product_count", +"url":12, +"doc":"" +}, +{ +"ref":"pyaurorax.search.sources.DataSourceStatistics.earliest_ephemeris_loaded", +"url":12, +"doc":"" +}, +{ +"ref":"pyaurorax.search.sources.DataSourceStatistics.latest_ephemeris_loaded", +"url":12, "doc":"" }, { -"ref":"pyaurorax.conjunctions.classes.conjunction.Conjunction.events", +"ref":"pyaurorax.search.sources.DataSourceStatistics.earliest_data_product_loaded", +"url":12, +"doc":"" +}, +{ +"ref":"pyaurorax.search.sources.DataSourceStatistics.latest_data_product_loaded", +"url":12, +"doc":"" +}, +{ +"ref":"pyaurorax.search.sources.classes", "url":13, +"doc":"Class definitions used by the sources submodule" +}, +{ +"ref":"pyaurorax.search.sources.classes.data_source", +"url":14, +"doc":"AuroraX data source record" +}, +{ +"ref":"pyaurorax.search.sources.classes.data_source.FORMAT_BASIC_INFO", +"url":14, +"doc":"Data sources are returned with basic information: identifier, program, platform, instrument type, source type, and display name" +}, +{ +"ref":"pyaurorax.search.sources.classes.data_source.FORMAT_BASIC_INFO_WITH_METADATA", +"url":14, +"doc":"Data sources are returned with basic information, plus the metadata" +}, +{ +"ref":"pyaurorax.search.sources.classes.data_source.FORMAT_IDENTIFIER_ONLY", +"url":14, +"doc":"Data sources are returned with only the identifier" +}, +{ +"ref":"pyaurorax.search.sources.classes.data_source.FORMAT_FULL_RECORD", +"url":14, +"doc":"Data sources are returned with all information." +}, +{ +"ref":"pyaurorax.search.sources.classes.data_source.FORMAT_DEFAULT", +"url":14, +"doc":"Default data source format (basic info)" +}, +{ +"ref":"pyaurorax.search.sources.classes.data_source.SOURCE_TYPE_GROUND", +"url":14, +"doc":"Data source 'source_type' category for a ground instrument" +}, +{ +"ref":"pyaurorax.search.sources.classes.data_source.SOURCE_TYPE_LEO", +"url":14, +"doc":"Data source 'source_type' category for a low-earth orbiting satellite" +}, +{ +"ref":"pyaurorax.search.sources.classes.data_source.SOURCE_TYPE_HEO", +"url":14, +"doc":"Data source 'source_type' category for a highly-elliptical orbiting satellite" +}, +{ +"ref":"pyaurorax.search.sources.classes.data_source.SOURCE_TYPE_LUNAR", +"url":14, +"doc":"Data source 'source_type' category for a lunar orbiting satellite" +}, +{ +"ref":"pyaurorax.search.sources.classes.data_source.SOURCE_TYPE_EVENT_LIST", +"url":14, +"doc":"Data source 'source_type' category for a specially-curated event list" +}, +{ +"ref":"pyaurorax.search.sources.classes.data_source.SOURCE_TYPE_NOT_APPLICABLE", +"url":14, +"doc":"Data source 'source_type' category for a specially-curated event list" +}, +{ +"ref":"pyaurorax.search.sources.classes.data_source.DataSource", +"url":14, +"doc":"AuroraX data source record Attributes: identifier (int): the unique AuroraX data source identifier program (str): the program for this data source platform (str): the platform for this data source instrument_type (str): the instrument type for this data source source_type (str): the data source type for this data source. Options are in the pyaurorax.search.sources module, or at the top level using the pyaurorax.search.SOURCE_TYPE_ variables. display_name (str): the display name for this data source metadata (Dict): metadata for this data source (arbitrary keys and values) owner (str): the owner's email address of this data source maintainers (List[str]): the email addresses of AuroraX accounts that can alter this data source and its associated records ephemeris_metadata_schema (Dict): a list of dictionaries capturing the metadata keys and values that can appear in ephemeris records associated with this data source data_product_metadata_schema (Dict): a list of dictionaries capturing the metadata keys and values that can appear in data product records associated with this data source format (str): the format used when printing the data source, defaults to \"full_record\". Other options are in the pyaurorax.search.sources module, or at the top level using the pyaurorax.search.FORMAT_ variables." +}, +{ +"ref":"pyaurorax.search.sources.classes.data_source.DataSource.identifier", +"url":14, "doc":"" }, { -"ref":"pyaurorax.conjunctions.classes.search", +"ref":"pyaurorax.search.sources.classes.data_source.DataSource.program", "url":14, -"doc":"Class definition for a conjunction search" +"doc":"" }, { -"ref":"pyaurorax.conjunctions.classes.search.Search", +"ref":"pyaurorax.search.sources.classes.data_source.DataSource.platform", "url":14, -"doc":"Class representing a conjunction search Attributes: start: start timestamp of the search (inclusive) end: end timestamp of the search (inclusive) distance: the maximum distance allowed between data sources when searching for conjunctions. This can either be a number (int or float), or a dictionary modified from the output of the \"get_advanced_distances_combos()\" function. ground: list of ground instrument search parameters, defaults to [] Example: [{ \"programs\": [\"themis-asi\"], \"platforms\": [\"gillam\", \"rabbit lake\"], \"instrument_types\": [\"RGB\"], \"ephemeris_metadata_filters\": { \"logical_operator\": \"AND\", \"expressions\": [ { \"key\": \"calgary_apa_ml_v1\", \"operator\": \"in\", \"values\": [ \"classified as APA\" ] } ] } }] space: list of one or more space instrument search parameters, defaults to [] Example: [{ \"programs\": [\"themis-asi\", \"swarm\"], \"platforms\": [\"themisa\", \"swarma\"], \"instrument_types\": [\"footprint\"], \"ephemeris_metadata_filters\": { \"logical_operator\": \"AND\", \"expressions\": [ { \"key\": \"nbtrace_region\", \"operator\": \"in\", \"values\": [ \"north auroral oval\" ] } ] }, \"hemisphere\": [ \"northern\" ] }] events: list of one or more events search parameters, defaults to [] Example: [{ \"programs\": [ \"events\" ], \"instrument_types\": [ \"substorm onsets\" ] }] conjunction_types: list of conjunction types, defaults to [\"nbtrace\"]. Options are in the pyaurorax.conjunctions module, or at the top level using the pyaurorax.CONJUNCTION_TYPE_ variables. epoch_search_precision: the time precision to which conjunctions are calculated. Can be 30 or 60 seconds. Defaults to 60 seconds. Note - this parameter is under active development and still considered \"alpha\". response_format: JSON representation of desired data response format request: AuroraXResponse object returned when the search is executed request_id: unique ID assigned to the request by the AuroraX API request_url: unique URL assigned to the request by the AuroraX API executed: indicates if the search has been executed/started completed: indicates if the search has finished data_url: the URL where data is accessed query: the query for this request as JSON status: the status of the query data: the conjunctions found logs: all log messages outputed by the AuroraX API for this request Returns: a pyaurorax.conjunctions.Search object" +"doc":"" }, { -"ref":"pyaurorax.conjunctions.classes.search.Search.check_criteria_block_count_validity", +"ref":"pyaurorax.search.sources.classes.data_source.DataSource.instrument_type", "url":14, -"doc":"Check the number of of criteria blocks to see if there is too many. A max of 10 is allowed by the AuroraX conjunction search engine. An exception is raised if it was determined to have too many. Raises: pyaurorax.exceptions.AuroraXBadParametersException: too many criteria blocks are found", -"func":1 +"doc":"" }, { -"ref":"pyaurorax.conjunctions.classes.search.Search.get_advanced_distances_combos", +"ref":"pyaurorax.search.sources.classes.data_source.DataSource.source_type", "url":14, -"doc":"Get the advanced distances combinations for this search Args: default_distance: the default distance to use, defaults to None Returns: the advanced distances combinations", -"func":1 +"doc":"" }, { -"ref":"pyaurorax.conjunctions.classes.search.Search.distance", +"ref":"pyaurorax.search.sources.classes.data_source.DataSource.display_name", "url":14, -"doc":"Property for the distance parameter Returns: the distance dictionary with all combinations" +"doc":"" }, { -"ref":"pyaurorax.conjunctions.classes.search.Search.query", +"ref":"pyaurorax.search.sources.classes.data_source.DataSource.metadata", "url":14, -"doc":"Property for the query value Returns: the query parameter" +"doc":"" }, { -"ref":"pyaurorax.conjunctions.classes.search.Search.execute", +"ref":"pyaurorax.search.sources.classes.data_source.DataSource.owner", "url":14, -"doc":"Initiate a conjunction search request Raises: pyaurorax.exceptions.AuroraXBadParametersException: too many criteria blocks", -"func":1 +"doc":"" }, { -"ref":"pyaurorax.conjunctions.classes.search.Search.update_status", +"ref":"pyaurorax.search.sources.classes.data_source.DataSource.maintainers", "url":14, -"doc":"Update the status of this conjunction search request Args: status: the previously-retrieved status of this request (include to avoid requesting it from the API again), defaults to None", -"func":1 +"doc":"" }, { -"ref":"pyaurorax.conjunctions.classes.search.Search.check_for_data", +"ref":"pyaurorax.search.sources.classes.data_source.DataSource.ephemeris_metadata_schema", "url":14, -"doc":"Check to see if data is available for this conjunction search request Returns: True if data is available, else False", -"func":1 +"doc":"" }, { -"ref":"pyaurorax.conjunctions.classes.search.Search.get_data", +"ref":"pyaurorax.search.sources.classes.data_source.DataSource.data_product_metadata_schema", "url":14, -"doc":"Retrieve the data available for this conjunction search request", -"func":1 +"doc":"" }, { -"ref":"pyaurorax.conjunctions.classes.search.Search.wait", +"ref":"pyaurorax.search.sources.classes.data_source.DataSource.stats", "url":14, -"doc":"Block and wait until the request is complete and data is available for retrieval Args: poll_interval: time in seconds to wait between polling attempts, defaults to pyaurorax.requests.STANDARD_POLLING_SLEEP_TIME verbose: output poll times and other progress messages, defaults to False", -"func":1 +"doc":"" }, { -"ref":"pyaurorax.conjunctions.classes.search.Search.cancel", +"ref":"pyaurorax.search.sources.classes.data_source.DataSource.format", "url":14, -"doc":"Cancel the conjunction search request This method returns immediately by default since the API processes this request asynchronously. If you would prefer to wait for it to be completed, set the 'wait' parameter to True. You can adjust the polling time using the 'poll_interval' parameter. Args: wait: wait until the cancellation request has been completed (may wait for several minutes) poll_interval: seconds to wait between polling calls, defaults to STANDARD_POLLING_SLEEP_TIME. verbose: output poll times and other progress messages, defaults to False Returns: 1 on success Raises: pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected error pyaurorax.exceptions.AuroraXUnauthorizedException: invalid API key for this operation", -"func":1 +"doc":"" }, { -"ref":"pyaurorax.conjunctions.conjunctions", +"ref":"pyaurorax.search.sources.classes.data_source_stats", "url":15, -"doc":"Functions for performing conjunction searches" +"doc":"Data source statistics information" }, { -"ref":"pyaurorax.conjunctions.conjunctions.search", +"ref":"pyaurorax.search.sources.classes.data_source_stats.DataSourceStatistics", "url":15, -"doc":"Search for conjunctions between data sources By default, this function will block and wait until the request completes and all data is downloaded. If you don't want to wait, set the 'return_immediately value to True. The Search object will be returned right after the search has been started, and you can use the helper functions as part of that object to get the data when it's done. Args: start: start timestamp of the search (inclusive) end: end timestamp of the search (inclusive) distance: the maximum distance allowed between data sources when searching for conjunctions. This can either be a number (int or float), or a dictionary modified from the output of the \"get_advanced_distances_combos()\" function. ground: list of ground instrument search parameters, defaults to [] Example: [{ \"programs\": [\"themis-asi\"], \"platforms\": [\"gillam\", \"rabbit lake\"], \"instrument_types\": [\"RGB\"], \"ephemeris_metadata_filters\": { \"logical_operator\": \"AND\", \"expressions\": [ { \"key\": \"calgary_apa_ml_v1\", \"operator\": \"in\", \"values\": [ \"classified as APA\" ] } ] } }] space: list of one or more space instrument search parameters, defaults to [] Example: [{ \"programs\": [\"themis-asi\", \"swarm\"], \"platforms\": [\"themisa\", \"swarma\"], \"instrument_types\": [\"footprint\"], \"ephemeris_metadata_filters\": { \"logical_operator\": \"AND\", \"expressions\": [ { \"key\": \"nbtrace_region\", \"operator\": \"in\", \"values\": [ \"north auroral oval\" ] } ] }, \"hemisphere\": [ \"northern\" ] }] events: list of one or more events search parameters, defaults to [] Example: [{ \"programs\": [ \"events\" ], \"instrument_types\": [ \"substorm onsets\" ] }] conjunction_types: list of conjunction types, defaults to [] (meaning all conjunction types). Options are in the pyaurorax.conjunctions module, or at the top level using the pyaurorax.CONJUNCTION_TYPE_ variables. epoch_search_precision: the time precision to which conjunctions are calculated. Can be 30 or 60 seconds. Defaults to 60 seconds. Note - this parameter is under active development and still considered \"alpha\". response_format: JSON representation of desired data response format poll_interval: seconds to wait between polling calls, defaults to pyaurorax.requests.STANDARD_POLLING_SLEEP_TIME return_immediately: initiate the search and return without waiting for data to be received, defaults to False verbose: show the progress of the request using the request log, defaults Returns: a pyaurorax.conjunctions.Search object", -"func":1 +"doc":"Data source statistics information Attributes: ephemeris_count (int): total number of ephemeris records for this data source data_product_count (int): total number of ephemeris records for this data source earliest_ephemeris_loaded (datetime.datetime): timestamp of the earliest ephemeris record latest_ephemeris_loaded (datetime.datetime): timestamp of the latest ephemeris record earliest_data_product_loaded (datetime.datetime): timestamp of the earliest data_product record latest_data_product_loaded (datetime.datetime): timestamp of the latest data product record" }, { -"ref":"pyaurorax.conjunctions.conjunctions.describe", +"ref":"pyaurorax.search.sources.classes.data_source_stats.DataSourceStatistics.ephemeris_count", "url":15, -"doc":"Describe a conjunction search as an \"SQL-like\" string Args: search_obj: the conjunction search to describe Returns: the \"SQL-like\" string describing the conjunction search object", -"func":1 +"doc":"" }, { -"ref":"pyaurorax.conjunctions.conjunctions.get_request_url", +"ref":"pyaurorax.search.sources.classes.data_source_stats.DataSourceStatistics.data_product_count", "url":15, -"doc":"Get the conjunction search request URL for a given request ID. This URL can be used for subsequent pyaurorax.requests function calls. Primarily this method facilitates delving into details about a set of already-submitted conjunction searches. Args: request_id: the request identifier Returns: the request URL", -"func":1 +"doc":"" }, { -"ref":"pyaurorax.conjunctions.swarmaurora", -"url":16, -"doc":"Interact with Swarm-Aurora using conjunction searches from AuroraX" +"ref":"pyaurorax.search.sources.classes.data_source_stats.DataSourceStatistics.earliest_ephemeris_loaded", +"url":15, +"doc":"" }, { -"ref":"pyaurorax.conjunctions.swarmaurora.get_url", -"url":16, -"doc":"Get a URL that displays a conjunction search in the Swarm-Aurora Conjunction Finder Args: search_obj: a conjunction search object, must be a completed search with the 'request_id' value populated Returns: the Swarm-Aurora Conjunction Finder URL for this conjunction search", -"func":1 +"ref":"pyaurorax.search.sources.classes.data_source_stats.DataSourceStatistics.latest_ephemeris_loaded", +"url":15, +"doc":"" }, { -"ref":"pyaurorax.conjunctions.swarmaurora.open_in_browser", -"url":16, -"doc":"In a browser, open a conjunction search in the Swarm-Aurora Conjunction Finder. Args: search_obj: a conjunction search object, must be a completed search with the 'request_id' value populated browser: the browser type to load using. Default is your default browser. Some common other options are \"google-chrome\", \"firefox\", or \"safari\". For all available options, refer to https: docs.python.org/3/library/webbrowser.html webbrowser.get", -"func":1 +"ref":"pyaurorax.search.sources.classes.data_source_stats.DataSourceStatistics.earliest_data_product_loaded", +"url":15, +"doc":"" +}, +{ +"ref":"pyaurorax.search.sources.classes.data_source_stats.DataSourceStatistics.latest_data_product_loaded", +"url":15, +"doc":"" }, { -"ref":"pyaurorax.conjunctions.swarmaurora.create_custom_import_file", +"ref":"pyaurorax.search.ephemeris", "url":16, -"doc":"Generate a Swarm-Aurora custom import file for a given conjunction search Args: search_obj: a conjunction search object, must be a completed search with the 'request_id' value populated filename: the output filename, default is 'swarmaurora_custom_import_file_{requestID}.json' returnDict: return the custom import file contents as a dictionary instead of saving a file, default is False Returns: the filename of the saved custom import file, or a dictionary with the file contents if returnDict is set to True", -"func":1 +"doc":"Use the AuroraX search engine to search and upload ephemeris records. Note that all functions and classes from submodules are all imported at this level of the ephemeris module. They can be referenced from here instead of digging in deeper to the submodules." }, { -"ref":"pyaurorax.conjunctions.swarmaurora.tools", -"url":17, -"doc":"Functions for using conjunction searches with Swarm-Aurora" +"ref":"pyaurorax.search.ephemeris.EphemerisManager", +"url":16, +"doc":"The EphemerisManager object is initialized within every PyAuroraX object. It acts as a way to access the submodules and carry over configuration information in the super class." }, { -"ref":"pyaurorax.conjunctions.swarmaurora.tools.get_url", -"url":17, -"doc":"Get a URL that displays a conjunction search in the Swarm-Aurora Conjunction Finder Args: search_obj: a conjunction search object, must be a completed search with the 'request_id' value populated Returns: the Swarm-Aurora Conjunction Finder URL for this conjunction search", +"ref":"pyaurorax.search.ephemeris.EphemerisManager.search", +"url":16, +"doc":"Search for ephemeris records By default, this function will block and wait until the request completes and all data is downloaded. If you don't want to wait, set the 'return_immediately value to True. The Search object will be returned right after the search has been started, and you can use the helper functions as part of that object to get the data when it's done. Note: At least one search criteria from programs, platforms, or instrument_types, must be specified. Args: start: start timestamp of the search (inclusive) end: end timestamp of the search (inclusive) programs: list of programs to search through, defaults to None platforms: list of platforms to search through, defaults to None instrument_types: list of instrument types to search through, defaults to None metadata_filters: list of dictionaries describing metadata keys and values to filter on, defaults to None Example: [{ \"key\": \"nbtrace_region\", \"operator\": \"in\", \"values\": [\"north polar cap\"] }] metadata_filters_logical_operator: the logical operator to use when evaluating metadata filters (either 'AND' or 'OR'), defaults to \"AND\" response_format: JSON representation of desired data response format poll_interval: time in seconds to wait between polling attempts, defaults to pyaurorax.requests.STANDARD_POLLING_SLEEP_TIME return_immediately: initiate the search and return without waiting for data to be received, defaults to False verbose: output poll times and other progress messages, defaults to False Returns: A pyaurorax search.EphemerisSearch object Raises: pyaurorax.exceptions.AuroraXAPIError: An API error was encountered", "func":1 }, { -"ref":"pyaurorax.conjunctions.swarmaurora.tools.open_in_browser", -"url":17, -"doc":"In a browser, open a conjunction search in the Swarm-Aurora Conjunction Finder. Args: search_obj: a conjunction search object, must be a completed search with the 'request_id' value populated browser: the browser type to load using. Default is your default browser. Some common other options are \"google-chrome\", \"firefox\", or \"safari\". For all available options, refer to https: docs.python.org/3/library/webbrowser.html webbrowser.get", +"ref":"pyaurorax.search.ephemeris.EphemerisManager.upload", +"url":16, +"doc":"Upload ephemeris records to AuroraX Args: identifier: AuroraX data source ID records: ephemeris records to upload validate_source: validate all records before uploading, defaults to False chunk_size: number of records to upload in a single call, defaults to 500 Returns: 0 for success, raises exception on error Raises: pyaurorax.exceptions.AuroraXUploadError: upload error pyaurorax.exceptions.AuroraXAPIError: An API error was encountered", "func":1 }, { -"ref":"pyaurorax.conjunctions.swarmaurora.tools.create_custom_import_file", -"url":17, -"doc":"Generate a Swarm-Aurora custom import file for a given conjunction search Args: search_obj: a conjunction search object, must be a completed search with the 'request_id' value populated filename: the output filename, default is 'swarmaurora_custom_import_file_{requestID}.json' returnDict: return the custom import file contents as a dictionary instead of saving a file, default is False Returns: the filename of the saved custom import file, or a dictionary with the file contents if returnDict is set to True", +"ref":"pyaurorax.search.ephemeris.EphemerisManager.delete", +"url":16, +"doc":"Delete ephemeris records between a timeframe. The API processes this request asynchronously, so this method will return immediately whether or not the data has already been deleted. Args: data_source: data source associated with the data product records (note that identifier, program, platform, and instrument_type are required) start: timestamp marking beginning of range to delete records for, inclusive end: timestamp marking end of range to delete records for, inclusive Returns: 0 on success Raises: pyaurorax.exceptions.AuroraXNotFoundError: source not found pyaurorax.exceptions.AuroraXUnauthorizedError: invalid API key for this operation pyaurorax.exceptions.AuroraXAPIError: An API error was encountered", "func":1 }, { -"ref":"pyaurorax.data_products", -"url":18, -"doc":"The data_products module is used to search and upload data product records within AuroraX. One example of a data product is a keogram. Note that all functions and classes from submodules are all imported at this level of the data_products module. They can be referenced from here instead of digging in deeper to the submodules." +"ref":"pyaurorax.search.ephemeris.EphemerisManager.describe", +"url":16, +"doc":"Describe an ephemeris search as an \"SQL-like\" string. Either a EphemerisSearch object can be supplied, or a dictionary of the raw JSON query. Args: search_obj: the ephemeris search to describe, optional query_dict: the ephemeris search query represented as a raw dictionary, optional Returns: the \"SQL-like\" string describing the ephemeris search object", +"func":1 }, { -"ref":"pyaurorax.data_products.DATA_PRODUCT_TYPE_KEOGRAM", -"url":18, -"doc":"Data product type for keograms. Keograms are a 2-D representation of a series of images, and are one of the most popular data products that auroral science uses. More information can be found at https: docs.aurorax.space/about_the_data/standards/ keograms." +"ref":"pyaurorax.search.ephemeris.EphemerisManager.get_request_url", +"url":16, +"doc":"Get the ephemeris search request URL for a given request ID. This URL can be used for subsequent pyaurorax.requests function calls. Primarily this method facilitates delving into details about a set of already-submitted ephemeris searches. Args: request_id: the request identifier Returns: the request URL", +"func":1 }, { -"ref":"pyaurorax.data_products.DATA_PRODUCT_TYPE_MONTAGE", -"url":18, -"doc":"Data product type for montages. Like keograms, montages are another representation of a series of images. However, montages are not a 2D representation but rather a collage of thumnbail images for the period of time. An example can be found at https: data.phys.ucalgary.ca/sort_by_project/THEMIS/asi/stream2/2021/12/28/gill_themis19/20211228__gill_themis19_full-montage.pgm.jpg" +"ref":"pyaurorax.search.ephemeris.classes", +"url":17, +"doc":"Separated classes and functions used by the ephemeris module. Note that these classes and variables are all imported higher up at the top of the ephemeris module. They can be referenced from there instead of digging in deeper to these submodules." }, { -"ref":"pyaurorax.data_products.DATA_PRODUCT_TYPE_MOVIE", +"ref":"pyaurorax.search.ephemeris.classes.search", "url":18, -"doc":"Data product type for movies. Movies are timelapse video files of auroral data, usually as MP4 or MPEG. They can consist of frames for a whole night, or an hour, and can be at any cadence that is most appropriate." +"doc":"Class definition for an ephemeris search" }, { -"ref":"pyaurorax.data_products.DATA_PRODUCT_TYPE_SUMMARY_PLOT", +"ref":"pyaurorax.search.ephemeris.classes.search.EphemerisSearch", "url":18, -"doc":"Data product type for summary plots. A summary plot can be any type of plot that shows auroral data in a summary format, for example a background-subtracted meridian scanning photometer plot showing counts in Rayleighs." +"doc":"Class representing an ephemeris search Note: At least one search criteria from programs, platforms, or instrument_types must be specified. Args: start: start timestamp of the search (inclusive) end: end timestamp of the search (inclusive) programs: list of programs to search through, defaults to None platforms: list of platforms to search through, defaults to None instrument_types: list of instrument types to search through, defaults to None metadata_filters: list of dictionaries describing metadata keys and values to filter on, defaults to None e.g. { \"key\": \"string\", \"operator\": \"=\", \"values\": [ \"string\" ] } metadata_filters_logical_operator: the logical operator to use when evaluating metadata filters (either 'AND' or 'OR'), defaults to \"AND\" response_format: JSON representation of desired data response format request: AuroraXResponse object returned when the search is executed request_id: unique ID assigned to the request by the AuroraX API request_url: unique URL assigned to the request by the AuroraX API executed: indicates if the search has been executed/started completed: indicates if the search has finished data_url: the URL where data is accessed query: the query for this request as JSON status: the status of the query data: the ephemeris records found logs: all log messages outputted by the AuroraX API for this request" }, { -"ref":"pyaurorax.data_products.DATA_PRODUCT_TYPE_DATA_AVAILABILITY", +"ref":"pyaurorax.search.ephemeris.classes.search.EphemerisSearch.query", "url":18, -"doc":"Data product type for data availability. The AuroraX data availability system does not account for times when data was not expected to be collected, such as summer shutdowns due to inadequate night hours. This data product type for 'data availbility' is meant to be used as a smarter data availability mechanism for Aurora." +"doc":"Property for the query value" }, { -"ref":"pyaurorax.data_products.search", +"ref":"pyaurorax.search.ephemeris.classes.search.EphemerisSearch.execute", "url":18, -"doc":"Search for data product records By default, this function will block and wait until the request completes and all data is downloaded. If you don't want to wait, set the 'return_immediately value to True. The Search object will be returned right after the search has been started, and you can use the helper functions as part of that object to get the data when it's done. Note: At least one search criteria from programs, platforms, or instrument_types, must be specified. Args: start: start timestamp of the search (inclusive) end: end timestamp of the search (inclusive) programs: list of programs to search through, defaults to None platforms: list of platforms to search through, defaults to None instrument_types: list of instrument types to search through, defaults to None data_product_types: list of dictionaries describing data product types to filter on e.g. \"keogram\", defaults to None. Options are in the pyaurorax.data_products module, or at the top level using the pyaurorax.DATA_PRODUCT_TYPE variables. metadata_filters: list of dictionaries describing metadata keys and values to filter on, defaults to None Example: [{ \"key\": \"nbtrace_region\", \"operator\": \"in\", \"values\": [\"north polar cap\"] }] metadata_filters_logical_operator: the logical operator to use when evaluating metadata filters (either 'AND' or 'OR'), defaults to \"AND\" response_format: JSON representation of desired data response format poll_interval: time in seconds to wait between polling attempts, defaults to pyaurorax.requests.STANDARD_POLLING_SLEEP_TIME return_immediately: initiate the search and return without waiting for data to be received, defaults to False verbose: output poll times and other progress messages, defaults to False Returns: a pyaurorax.data_products.Search object", +"doc":"Initiate ephemeris search request Raises: pyaurorax.exceptions.AuroraXError: invalid request parameters are set", "func":1 }, { -"ref":"pyaurorax.data_products.upload", +"ref":"pyaurorax.search.ephemeris.classes.search.EphemerisSearch.update_status", "url":18, -"doc":"Upload data product records to AuroraX Args: identifier: the AuroraX data source ID records: data product records to upload validate_source: validate all records before uploading, defaults to False Returns: 0 for success, raises exception on error Raises: pyaurorax.exceptions.AuroraXMaxRetriesException: max retry error pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected content error pyaurorax.exceptions.AuroraXUploadException: upload error pyaurorax.exceptions.AuroraXValidationException: data source validation error", +"doc":"Update the status of this ephemeris search request Args: status: the previously-retrieved status of this request (include to avoid requesting it from the API again), defaults to None", "func":1 }, { -"ref":"pyaurorax.data_products.delete_urls", +"ref":"pyaurorax.search.ephemeris.classes.search.EphemerisSearch.check_for_data", "url":18, -"doc":"Delete data products by URL. The API processes this request asynchronously, so this method will return immediately whether or not the data has already been deleted. Args: data_source: data source associated with the data product records (note that identifier, program, platform, and instrument_type are required) urls: URLs of data product records to delete Returns: 0 on success Raises: pyaurorax.exceptions.AuroraXMaxRetriesException: max retry error pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected error pyaurorax.exceptions.AuroraXBadParametersException: invalid parameters entered pyaurorax.exceptions.AuroraXUnauthorizedException: invalid API key for this operation", +"doc":"Check to see if data is available for this ephemeris search request Returns: True if data is available, else False", "func":1 }, { -"ref":"pyaurorax.data_products.delete", +"ref":"pyaurorax.search.ephemeris.classes.search.EphemerisSearch.get_data", "url":18, -"doc":"Delete data products associated with a data source within a date range. The API processes this request asynchronously, so this method will return immediately whether or not the data has already been deleted. Args: data_source: data source associated with the data product records (note that identifier, program, platform, and instrument_type are required) start: timestamp marking beginning of range to delete records for, inclusive end: timestamp marking end of range to delete records for, inclusive data_product_types: specific types of data product to delete, e.g. [\"keogram\", \"movie\"]. If omitted, all data product types will be deleted. Returns: 1 on success Raises: pyaurorax.exceptions.AuroraXMaxRetriesException: max retry error pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected error pyaurorax.exceptions.AuroraXNotFoundException: source not found pyaurorax.exceptions.AuroraXUnauthorizedException: invalid API key for this operation", +"doc":"Retrieve the data available for this ephemeris search request", "func":1 }, { -"ref":"pyaurorax.data_products.describe", +"ref":"pyaurorax.search.ephemeris.classes.search.EphemerisSearch.wait", "url":18, -"doc":"Describe a data product search as an \"SQL-like\" string Args: search_obj: the data product search object to describe Returns: the \"SQL-like\" string describing the data product search object", +"doc":"Block and wait for the request to complete and data is available for retrieval Args: poll_interval: time in seconds to wait between polling attempts, defaults to pyaurorax.requests.STANDARD_POLLING_SLEEP_TIME verbose: output poll times and other progress messages, defaults to False", "func":1 }, { -"ref":"pyaurorax.data_products.get_request_url", +"ref":"pyaurorax.search.ephemeris.classes.search.EphemerisSearch.cancel", "url":18, -"doc":"Get the data product search request URL for a given request ID. This URL can be used for subsequent pyaurorax.requests function calls. Primarily this method facilitates delving into details about a set of already-submitted data product searches. Args: request_id: the request identifier Returns: the request URL", +"doc":"Cancel the ephemeris search request This method returns immediately by default since the API processes this request asynchronously. If you would prefer to wait for it to be completed, set the 'wait' parameter to True. You can adjust the polling time using the 'poll_interval' parameter. Args: wait: wait until the cancellation request has been completed (may wait for several minutes) poll_interval: seconds to wait between polling calls, defaults to STANDARD_POLLING_SLEEP_TIME. verbose: output poll times and other progress messages, defaults to False Returns: 1 on success Raises: pyaurorax.exceptions.AuroraXUnauthorizedError: invalid API key for this operation pyaurorax.exceptions.AuroraXAPIError: An API error was encountered", "func":1 }, { -"ref":"pyaurorax.data_products.DataProduct", -"url":18, -"doc":"Data product object Attributes: data_source: data source that the ephemeris record is associated with data_product_type: data product type (\"keogram\", \"movie\", \"summary_plot\") start: starting timestamp for the record (assumed it is in UTC), inclusive end: ending timestamp for the record (assumed it is in UTC), inclusive url: the URL of data product metdata: metadata for this record (arbitrary keys and values) Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model." +"ref":"pyaurorax.search.ephemeris.classes.ephemeris", +"url":19, +"doc":"Class definition for an ephemeris record" }, { -"ref":"pyaurorax.data_products.DataProduct.data_source", -"url":18, -"doc":"" +"ref":"pyaurorax.search.ephemeris.classes.ephemeris.EphemerisData", +"url":19, +"doc":"Ephemeris object Attributes: data_source: data source that the ephemeris record is associated with epoch: timestamp for the record (assumed it is in UTC) location_geo: Location object containing geographic latitude and longitude location_gsm: Location object containing GSM latitude and longitude (leave empty for data sources with a type of 'ground') nbtrace: Location object with north B-trace geographic latitude and longitude sbtrace: Location object with south B-trace geographic latitude and longitude metadata: metadata for this record (arbitrary keys and values)" }, { -"ref":"pyaurorax.data_products.DataProduct.data_product_type", -"url":18, -"doc":"" +"ref":"pyaurorax.search.ephemeris.classes.ephemeris.EphemerisData.to_json_serializable", +"url":19, +"doc":"Convert object to a JSON-serializable object (ie. translate datetime objects to strings) Returns: a dictionary object that is JSON-serializable", +"func":1 }, { -"ref":"pyaurorax.data_products.DataProduct.start", -"url":18, -"doc":"" +"ref":"pyaurorax.search.metadata", +"url":20, +"doc":"Interacting with the data source metadata schemas. Note that all functions and classes from submodules are all imported at this level of the metadata module. They can be referenced from here instead of digging in deeper to the submodules." }, { -"ref":"pyaurorax.data_products.DataProduct.end", -"url":18, -"doc":"" +"ref":"pyaurorax.search.metadata.MetadataManager", +"url":20, +"doc":"The MetadataManager object is initialized within every PyAuroraX object. It acts as a way to access the submodules and carry over configuration information in the super class." }, { -"ref":"pyaurorax.data_products.DataProduct.url", -"url":18, -"doc":"" +"ref":"pyaurorax.search.metadata.MetadataManager.validate", +"url":20, +"doc":"Validate a metadata record against a schema. This checks that the key names match and there aren't fewer or more keys than expected. Args: schema: the metadata schema to validate against record: metadata record to validate Returns: True if the metadata record is valid, False if it is not", +"func":1 }, { -"ref":"pyaurorax.data_products.DataProduct.metadata", -"url":18, -"doc":"" +"ref":"pyaurorax.search.metadata.MetadataManager.get_ephemeris_schema", +"url":20, +"doc":"Retrieve the ephemeris metadata schema for a data source Args: identifier: the AuroraX data source ID Returns: the ephemeris metadata schema for the data source", +"func":1 }, { -"ref":"pyaurorax.data_products.DataProduct.to_json_serializable", -"url":18, -"doc":"Convert object to a JSON-serializable object (ie. translate datetime objects to strings) Returns: a dictionary object that is JSON-serializable", +"ref":"pyaurorax.search.metadata.MetadataManager.get_data_products_schema", +"url":20, +"doc":"Retrieve the data products metadata schema for a data source Args: identifier: the AuroraX data source ID Returns: the data products metadata schema for the data source", "func":1 }, { -"ref":"pyaurorax.data_products.Search", -"url":18, -"doc":"Class representing a data product search Attributes: start: start timestamp of the search (inclusive) end: end timestamp of the search (inclusive) programs: list of program names to search platforms: list of platform names to search instrument_types: list of instrument types to search data_product_types: list of dictionaries describing data product types to filter on e.g. \"keogram\", defaults to None. Options are in the pyaurorax.data_products module, or at the top level using the pyaurorax.DATA_PRODUCT_TYPE variables. metadata_filters: list of dictionaries describing metadata keys and values to filter on, defaults to None Example: [{ \"key\": \"nbtrace_region\", \"operator\": \"in\", \"values\": [\"north polar cap\"] }] metadata_filters_logical_operator: the logical operator to use when evaluating metadata filters (either 'AND' or 'OR'), defaults to \"AND\" response_format: JSON representation of desired data response format request: AuroraXResponse object returned when the search is executed request_id: unique ID assigned to the request by the AuroraX API request_url: unique URL assigned to the request by the AuroraX API executed: indicates if the search has been executed/started completed: indicates if the search has finished data_url: the URL where data is accessed query: the query for this request as JSON status: the status of the query data: the data product records found logs: all log messages outputed by the AuroraX API for this request" +"ref":"pyaurorax.search.api", +"url":21, +"doc":"Interface for AuroraX API requests. Primarily an under-the-hood module not needed for most use-cases." }, { -"ref":"pyaurorax.data_products.Search.query", -"url":18, -"doc":"Property for the query value" +"ref":"pyaurorax.search.api.AuroraXAPIRequest", +"url":21, +"doc":"Class definition for an AuroraX API request Attributes: url (str): API endpoint URL for the request method (str): the HTTP method to use. Valid values are: get , post , put , delete , patch params (Dict): URL parameters to send in the request, defaults to {} body (Dict): the body of the request (ie. post data), defaults to {} headers (Dict): any headers to send as part of the request (in addition to the default ones), defaults to {} null_response (bool): signifies if we expect a response from the API that has no body/data in it (ie. requests to upload data that respond with just a 202 status code), defaults to False " }, { -"ref":"pyaurorax.data_products.Search.execute", -"url":18, -"doc":"Initiate a data product search request", +"ref":"pyaurorax.search.api.AuroraXAPIRequest.execute", +"url":21, +"doc":"Execute an AuroraX API request Returns: an pyaurorax.search.api.AuroraXAPIResponse object Raises: pyaurorax.exceptions.AuroraXAPIError: error during API call", "func":1 }, { -"ref":"pyaurorax.data_products.Search.update_status", -"url":18, -"doc":"Update the status of this data product search request Args: status: the previously-retrieved status of this request (include to avoid requesting it from the API again), defaults to None", -"func":1 +"ref":"pyaurorax.search.api.AuroraXAPIResponse", +"url":21, +"doc":"Class definition for an AuroraX API response Attributes: request (Any): the request object data (Any): the data received as part of the request status_code (int): the HTTP status code received when making the request" }, { -"ref":"pyaurorax.data_products.Search.check_for_data", -"url":18, -"doc":"Check to see if data is available for this data product search request Returns: True if data is available, else False", -"func":1 +"ref":"pyaurorax.search.api.classes", +"url":22, +"doc":"Class definitions used by the api submodule" }, { -"ref":"pyaurorax.data_products.Search.get_data", -"url":18, -"doc":"Retrieve the data available for this data product search request", -"func":1 +"ref":"pyaurorax.search.api.classes.request", +"url":23, +"doc":"Class definition for an AuroraX API request" }, { -"ref":"pyaurorax.data_products.Search.wait", -"url":18, -"doc":"Block and wait for the request to complete and data is available for retrieval Args: poll_interval: time in seconds to wait between polling attempts, defaults to pyaurorax.requests.STANDARD_POLLING_SLEEP_TIME verbose: output poll times and other progress messages, defaults to False", -"func":1 +"ref":"pyaurorax.search.api.classes.request.AuroraXAPIRequest", +"url":23, +"doc":"Class definition for an AuroraX API request Attributes: url (str): API endpoint URL for the request method (str): the HTTP method to use. Valid values are: get , post , put , delete , patch params (Dict): URL parameters to send in the request, defaults to {} body (Dict): the body of the request (ie. post data), defaults to {} headers (Dict): any headers to send as part of the request (in addition to the default ones), defaults to {} null_response (bool): signifies if we expect a response from the API that has no body/data in it (ie. requests to upload data that respond with just a 202 status code), defaults to False " }, { -"ref":"pyaurorax.data_products.Search.cancel", -"url":18, -"doc":"Cancel the data product search request This method returns immediately by default since the API processes this request asynchronously. If you would prefer to wait for it to be completed, set the 'wait' parameter to True. You can adjust the polling time using the 'poll_interval' parameter. Args: wait: wait until the cancellation request has been completed (may wait for several minutes) poll_interval: seconds to wait between polling calls, defaults to STANDARD_POLLING_SLEEP_TIME. verbose: output poll times and other progress messages, defaults to False Returns: 1 on success Raises: pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected error pyaurorax.exceptions.AuroraXUnauthorizedException: invalid API key for this operation", +"ref":"pyaurorax.search.api.classes.request.AuroraXAPIRequest.execute", +"url":23, +"doc":"Execute an AuroraX API request Returns: an pyaurorax.search.api.AuroraXAPIResponse object Raises: pyaurorax.exceptions.AuroraXAPIError: error during API call", "func":1 }, { -"ref":"pyaurorax.data_products.classes", -"url":19, -"doc":"Separted classes and functions used by the data_products module. Note that these classes and variables are all imported higher up at the top of the data_products module. They can be referenced from there instead of digging in deeper to these submodules." +"ref":"pyaurorax.search.api.classes.response", +"url":24, +"doc":"Class definition for an AuroraX API response" }, { -"ref":"pyaurorax.data_products.classes.data_product", -"url":20, -"doc":"Class definition for a data product" +"ref":"pyaurorax.search.api.classes.response.AuroraXAPIResponse", +"url":24, +"doc":"Class definition for an AuroraX API response Attributes: request (Any): the request object data (Any): the data received as part of the request status_code (int): the HTTP status code received when making the request" }, { -"ref":"pyaurorax.data_products.classes.data_product.DataProduct", -"url":20, -"doc":"Data product object Attributes: data_source: data source that the ephemeris record is associated with data_product_type: data product type (\"keogram\", \"movie\", \"summary_plot\") start: starting timestamp for the record (assumed it is in UTC), inclusive end: ending timestamp for the record (assumed it is in UTC), inclusive url: the URL of data product metdata: metadata for this record (arbitrary keys and values) Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model." +"ref":"pyaurorax.search.util", +"url":25, +"doc":"Utility methods. For example, converting arbitrary geographic locations to North/South B-trace geographic locations." }, { -"ref":"pyaurorax.data_products.classes.data_product.DataProduct.data_source", -"url":20, -"doc":"" +"ref":"pyaurorax.search.util.UtilManager", +"url":25, +"doc":"The UtilManager object is initialized within every PyAuroraX object. It acts as a way to access the submodules and carry over configuration information in the super class." }, { -"ref":"pyaurorax.data_products.classes.data_product.DataProduct.data_product_type", -"url":20, -"doc":"" +"ref":"pyaurorax.search.util.UtilManager.ground_geo_to_nbtrace", +"url":25, +"doc":"Convert geographic location to North B-Trace geographic location The timestamp is required because when calculating the B-trace values, the location is converted into geomagnetic coordinates using AACGM. This conversion is different based on the timestamp since the magnetic coordinates change over time. Args: geo_location (Location): a Location object representing the geographic location dt (datetime.datetime): timestamp for this set of latitudes and longitudes Returns: the north B-trace location as a Location object", +"func":1 }, { -"ref":"pyaurorax.data_products.classes.data_product.DataProduct.start", -"url":20, -"doc":"" +"ref":"pyaurorax.search.util.UtilManager.ground_geo_to_sbtrace", +"url":25, +"doc":"Convert geographic location to South B-Trace geographic location The timestamp is required because when calculating the B-trace values, the location is converted into geomagnetic coordinates using AACGM. This conversion is different based on the timestamp since the magnetic coordinates change over time. Args: geo_location (Location): a Location object representing the geographic location dt (datetime.datetime): timestamp for this set of latitudes and longitudes Returns: the south B-trace location as a Location object", +"func":1 }, { -"ref":"pyaurorax.data_products.classes.data_product.DataProduct.end", -"url":20, -"doc":"" +"ref":"pyaurorax.search.conjunctions", +"url":26, +"doc":"Use the AuroraX search engine to find conjunctions between groupings of data sources. Note that all functions and classes from submodules are all imported at this level of the conjunctions module. They can be referenced from here instead of digging in deeper to the submodules." }, { -"ref":"pyaurorax.data_products.classes.data_product.DataProduct.url", -"url":20, -"doc":"" +"ref":"pyaurorax.search.conjunctions.ConjunctionsManager", +"url":26, +"doc":"The ConjunctionsManager object is initialized within every PyAuroraX object. It acts as a way to access the submodules and carry over configuration information in the super class." }, { -"ref":"pyaurorax.data_products.classes.data_product.DataProduct.metadata", -"url":20, -"doc":"" +"ref":"pyaurorax.search.conjunctions.ConjunctionsManager.swarmaurora", +"url":26, +"doc":"Access to the swarmaurora submodule from within a PyAuroraX object." }, { -"ref":"pyaurorax.data_products.classes.data_product.DataProduct.to_json_serializable", -"url":20, -"doc":"Convert object to a JSON-serializable object (ie. translate datetime objects to strings) Returns: a dictionary object that is JSON-serializable", +"ref":"pyaurorax.search.conjunctions.ConjunctionsManager.search", +"url":26, +"doc":"Search for conjunctions between data sources By default, this function will block and wait until the request completes and all data is downloaded. If you don't want to wait, set the 'return_immediately value to True. The Search object will be returned right after the search has been started, and you can use the helper functions as part of that object to get the data when it's done. Args: start: start timestamp of the search (inclusive) end: end timestamp of the search (inclusive) distance: the maximum distance allowed between data sources when searching for conjunctions. This can either be a number (int or float), or a dictionary modified from the output of the \"get_advanced_distances_combos()\" function. ground: list of ground instrument search parameters, defaults to [] Example: [{ \"programs\": [\"themis-asi\"], \"platforms\": [\"gillam\", \"rabbit lake\"], \"instrument_types\": [\"RGB\"], \"ephemeris_metadata_filters\": { \"logical_operator\": \"AND\", \"expressions\": [ { \"key\": \"calgary_apa_ml_v1\", \"operator\": \"in\", \"values\": [ \"classified as APA\" ] } ] } }] space: list of one or more space instrument search parameters, defaults to [] Example: [{ \"programs\": [\"themis-asi\", \"swarm\"], \"platforms\": [\"themisa\", \"swarma\"], \"instrument_types\": [\"footprint\"], \"ephemeris_metadata_filters\": { \"logical_operator\": \"AND\", \"expressions\": [ { \"key\": \"nbtrace_region\", \"operator\": \"in\", \"values\": [ \"north auroral oval\" ] } ] }, \"hemisphere\": [ \"northern\" ] }] events: list of one or more events search parameters, defaults to [] Example: [{ \"programs\": [ \"events\" ], \"instrument_types\": [ \"substorm onsets\" ] }] conjunction_types: list of conjunction types, defaults to [] (meaning all conjunction types). Options are in the pyaurorax.conjunctions module, or at the top level using the pyaurorax.CONJUNCTION_TYPE_ variables. epoch_search_precision: the time precision to which conjunctions are calculated. Can be 30 or 60 seconds. Defaults to 60 seconds. Note - this parameter is under active development and still considered \"alpha\". response_format: JSON representation of desired data response format poll_interval: seconds to wait between polling calls, defaults to pyaurorax.requests.STANDARD_POLLING_SLEEP_TIME return_immediately: initiate the search and return without waiting for data to be received, defaults to False verbose: show the progress of the request using the request log, defaults Returns: a pyaurorax.search.ConjunctionSearch object", "func":1 }, { -"ref":"pyaurorax.data_products.classes.search", -"url":21, -"doc":"Class definition for a data product search" +"ref":"pyaurorax.search.conjunctions.ConjunctionsManager.describe", +"url":26, +"doc":"Describe a conjunction search as an \"SQL-like\" string. Either a ConjunctionSearch object can be supplied, or a dictionary of the raw JSON query. Args: search_obj: the conjunction search to describe, optional query_dict: the conjunction search query represented as a raw dictionary, optional Returns: the \"SQL-like\" string describing the conjunction search object", +"func":1 }, { -"ref":"pyaurorax.data_products.classes.search.Search", -"url":21, -"doc":"Class representing a data product search Attributes: start: start timestamp of the search (inclusive) end: end timestamp of the search (inclusive) programs: list of program names to search platforms: list of platform names to search instrument_types: list of instrument types to search data_product_types: list of dictionaries describing data product types to filter on e.g. \"keogram\", defaults to None. Options are in the pyaurorax.data_products module, or at the top level using the pyaurorax.DATA_PRODUCT_TYPE variables. metadata_filters: list of dictionaries describing metadata keys and values to filter on, defaults to None Example: [{ \"key\": \"nbtrace_region\", \"operator\": \"in\", \"values\": [\"north polar cap\"] }] metadata_filters_logical_operator: the logical operator to use when evaluating metadata filters (either 'AND' or 'OR'), defaults to \"AND\" response_format: JSON representation of desired data response format request: AuroraXResponse object returned when the search is executed request_id: unique ID assigned to the request by the AuroraX API request_url: unique URL assigned to the request by the AuroraX API executed: indicates if the search has been executed/started completed: indicates if the search has finished data_url: the URL where data is accessed query: the query for this request as JSON status: the status of the query data: the data product records found logs: all log messages outputed by the AuroraX API for this request" +"ref":"pyaurorax.search.conjunctions.ConjunctionsManager.get_request_url", +"url":26, +"doc":"Get the conjunction search request URL for a given request ID. This URL can be used for subsequent pyaurorax.requests function calls. Primarily this method facilitates delving into details about a set of already-submitted conjunction searches. Args: request_id: the request identifier Returns: the request URL", +"func":1 }, { -"ref":"pyaurorax.data_products.classes.search.Search.query", -"url":21, -"doc":"Property for the query value" +"ref":"pyaurorax.search.conjunctions.swarmaurora", +"url":27, +"doc":"" }, { -"ref":"pyaurorax.data_products.classes.search.Search.execute", -"url":21, -"doc":"Initiate a data product search request", -"func":1 +"ref":"pyaurorax.search.conjunctions.swarmaurora.SwarmAuroraManager", +"url":27, +"doc":"The SwarmAuroraManager object is initialized within every PyAuroraX object. It acts as a way to access the submodules and carry over configuration information in the super class." }, { -"ref":"pyaurorax.data_products.classes.search.Search.update_status", -"url":21, -"doc":"Update the status of this data product search request Args: status: the previously-retrieved status of this request (include to avoid requesting it from the API again), defaults to None", +"ref":"pyaurorax.search.conjunctions.swarmaurora.SwarmAuroraManager.get_url", +"url":27, +"doc":"Get a URL that displays a conjunction search in the Swarm-Aurora Conjunction Finder Args: search_obj: a conjunction search object, must be a completed search with the 'request_id' value populated Returns: the Swarm-Aurora Conjunction Finder URL for this conjunction search", "func":1 }, { -"ref":"pyaurorax.data_products.classes.search.Search.check_for_data", -"url":21, -"doc":"Check to see if data is available for this data product search request Returns: True if data is available, else False", +"ref":"pyaurorax.search.conjunctions.swarmaurora.SwarmAuroraManager.open_in_browser", +"url":27, +"doc":"In a browser, open a conjunction search in the Swarm-Aurora Conjunction Finder. Args: search_obj: a conjunction search object, must be a completed search with the 'request_id' value populated browser: the browser type to load using. Default is your default browser. Some common other options are \"google-chrome\", \"firefox\", or \"safari\". For all available options, refer to https: docs.python.org/3/library/webbrowser.html webbrowser.get", "func":1 }, { -"ref":"pyaurorax.data_products.classes.search.Search.get_data", -"url":21, -"doc":"Retrieve the data available for this data product search request", +"ref":"pyaurorax.search.conjunctions.swarmaurora.SwarmAuroraManager.create_custom_import_file", +"url":27, +"doc":"Generate a Swarm-Aurora custom import file for a given conjunction search Args: search_obj: a conjunction search object, must be a completed search with the 'request_id' value populated filename: the output filename, default is 'swarmaurora_custom_import_file_{requestID}.json' return_dict: return the custom import file contents as a dictionary instead of saving a file, default is False Returns: the filename of the saved custom import file, or a dictionary with the file contents if return_dict is set to True", "func":1 }, { -"ref":"pyaurorax.data_products.classes.search.Search.wait", -"url":21, -"doc":"Block and wait for the request to complete and data is available for retrieval Args: poll_interval: time in seconds to wait between polling attempts, defaults to pyaurorax.requests.STANDARD_POLLING_SLEEP_TIME verbose: output poll times and other progress messages, defaults to False", -"func":1 +"ref":"pyaurorax.search.conjunctions.classes", +"url":28, +"doc":"" }, { -"ref":"pyaurorax.data_products.classes.search.Search.cancel", -"url":21, -"doc":"Cancel the data product search request This method returns immediately by default since the API processes this request asynchronously. If you would prefer to wait for it to be completed, set the 'wait' parameter to True. You can adjust the polling time using the 'poll_interval' parameter. Args: wait: wait until the cancellation request has been completed (may wait for several minutes) poll_interval: seconds to wait between polling calls, defaults to STANDARD_POLLING_SLEEP_TIME. verbose: output poll times and other progress messages, defaults to False Returns: 1 on success Raises: pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected error pyaurorax.exceptions.AuroraXUnauthorizedException: invalid API key for this operation", -"func":1 +"ref":"pyaurorax.search.conjunctions.classes.search", +"url":29, +"doc":"Class definition for a conjunction search" }, { -"ref":"pyaurorax.data_products.data_products", -"url":22, -"doc":"Functions for performing data product searches" +"ref":"pyaurorax.search.conjunctions.classes.search.ConjunctionSearch", +"url":29, +"doc":"Class representing a conjunction search Attributes: start: start timestamp of the search (inclusive) end: end timestamp of the search (inclusive) distance: the maximum distance allowed between data sources when searching for conjunctions. This can either be a number (int or float), or a dictionary modified from the output of the \"get_advanced_distances_combos()\" function. ground: list of ground instrument search parameters, defaults to [] Example: [{ \"programs\": [\"themis-asi\"], \"platforms\": [\"gillam\", \"rabbit lake\"], \"instrument_types\": [\"RGB\"], \"ephemeris_metadata_filters\": { \"logical_operator\": \"AND\", \"expressions\": [ { \"key\": \"calgary_apa_ml_v1\", \"operator\": \"in\", \"values\": [ \"classified as APA\" ] } ] } }] space: list of one or more space instrument search parameters, defaults to [] Example: [{ \"programs\": [\"themis-asi\", \"swarm\"], \"platforms\": [\"themisa\", \"swarma\"], \"instrument_types\": [\"footprint\"], \"ephemeris_metadata_filters\": { \"logical_operator\": \"AND\", \"expressions\": [ { \"key\": \"nbtrace_region\", \"operator\": \"in\", \"values\": [ \"north auroral oval\" ] } ] }, \"hemisphere\": [ \"northern\" ] }] events: list of one or more events search parameters, defaults to [] Example: [{ \"programs\": [ \"events\" ], \"instrument_types\": [ \"substorm onsets\" ] }] conjunction_types: list of conjunction types, defaults to [\"nbtrace\"]. Options are in the pyaurorax.conjunctions module, or at the top level using the pyaurorax.CONJUNCTION_TYPE_ variables. epoch_search_precision: the time precision to which conjunctions are calculated. Can be 30 or 60 seconds. Defaults to 60 seconds. Note - this parameter is under active development and still considered \"alpha\". response_format: JSON representation of desired data response format request: AuroraXResponse object returned when the search is executed request_id: unique ID assigned to the request by the AuroraX API request_url: unique URL assigned to the request by the AuroraX API executed: indicates if the search has been executed/started completed: indicates if the search has finished data_url: the URL where data is accessed query: the query for this request as JSON status: the status of the query data: the conjunctions found logs: all log messages outputted by the AuroraX API for this request" }, { -"ref":"pyaurorax.data_products.data_products.search", -"url":22, -"doc":"Search for data product records By default, this function will block and wait until the request completes and all data is downloaded. If you don't want to wait, set the 'return_immediately value to True. The Search object will be returned right after the search has been started, and you can use the helper functions as part of that object to get the data when it's done. Note: At least one search criteria from programs, platforms, or instrument_types, must be specified. Args: start: start timestamp of the search (inclusive) end: end timestamp of the search (inclusive) programs: list of programs to search through, defaults to None platforms: list of platforms to search through, defaults to None instrument_types: list of instrument types to search through, defaults to None data_product_types: list of dictionaries describing data product types to filter on e.g. \"keogram\", defaults to None. Options are in the pyaurorax.data_products module, or at the top level using the pyaurorax.DATA_PRODUCT_TYPE variables. metadata_filters: list of dictionaries describing metadata keys and values to filter on, defaults to None Example: [{ \"key\": \"nbtrace_region\", \"operator\": \"in\", \"values\": [\"north polar cap\"] }] metadata_filters_logical_operator: the logical operator to use when evaluating metadata filters (either 'AND' or 'OR'), defaults to \"AND\" response_format: JSON representation of desired data response format poll_interval: time in seconds to wait between polling attempts, defaults to pyaurorax.requests.STANDARD_POLLING_SLEEP_TIME return_immediately: initiate the search and return without waiting for data to be received, defaults to False verbose: output poll times and other progress messages, defaults to False Returns: a pyaurorax.data_products.Search object", +"ref":"pyaurorax.search.conjunctions.classes.search.ConjunctionSearch.check_criteria_block_count_validity", +"url":29, +"doc":"Check the number of of criteria blocks to see if there is too many. A max of 10 is allowed by the AuroraX conjunction search engine. An exception is raised if it was determined to have too many. Raises: pyaurorax.exceptions.AuroraXError: too many criteria blocks are found", "func":1 }, { -"ref":"pyaurorax.data_products.data_products.upload", -"url":22, -"doc":"Upload data product records to AuroraX Args: identifier: the AuroraX data source ID records: data product records to upload validate_source: validate all records before uploading, defaults to False Returns: 0 for success, raises exception on error Raises: pyaurorax.exceptions.AuroraXMaxRetriesException: max retry error pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected content error pyaurorax.exceptions.AuroraXUploadException: upload error pyaurorax.exceptions.AuroraXValidationException: data source validation error", +"ref":"pyaurorax.search.conjunctions.classes.search.ConjunctionSearch.get_advanced_distances_combos", +"url":29, +"doc":"Get the advanced distances combinations for this search Args: default_distance: the default distance to use, defaults to None Returns: the advanced distances combinations", "func":1 }, { -"ref":"pyaurorax.data_products.data_products.delete_urls", -"url":22, -"doc":"Delete data products by URL. The API processes this request asynchronously, so this method will return immediately whether or not the data has already been deleted. Args: data_source: data source associated with the data product records (note that identifier, program, platform, and instrument_type are required) urls: URLs of data product records to delete Returns: 0 on success Raises: pyaurorax.exceptions.AuroraXMaxRetriesException: max retry error pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected error pyaurorax.exceptions.AuroraXBadParametersException: invalid parameters entered pyaurorax.exceptions.AuroraXUnauthorizedException: invalid API key for this operation", -"func":1 +"ref":"pyaurorax.search.conjunctions.classes.search.ConjunctionSearch.distance", +"url":29, +"doc":"Property for the distance parameter Returns: the distance dictionary with all combinations" }, { -"ref":"pyaurorax.data_products.data_products.delete", -"url":22, -"doc":"Delete data products associated with a data source within a date range. The API processes this request asynchronously, so this method will return immediately whether or not the data has already been deleted. Args: data_source: data source associated with the data product records (note that identifier, program, platform, and instrument_type are required) start: timestamp marking beginning of range to delete records for, inclusive end: timestamp marking end of range to delete records for, inclusive data_product_types: specific types of data product to delete, e.g. [\"keogram\", \"movie\"]. If omitted, all data product types will be deleted. Returns: 1 on success Raises: pyaurorax.exceptions.AuroraXMaxRetriesException: max retry error pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected error pyaurorax.exceptions.AuroraXNotFoundException: source not found pyaurorax.exceptions.AuroraXUnauthorizedException: invalid API key for this operation", -"func":1 +"ref":"pyaurorax.search.conjunctions.classes.search.ConjunctionSearch.query", +"url":29, +"doc":"Property for the query value Returns: the query parameter" }, { -"ref":"pyaurorax.data_products.data_products.describe", -"url":22, -"doc":"Describe a data product search as an \"SQL-like\" string Args: search_obj: the data product search object to describe Returns: the \"SQL-like\" string describing the data product search object", +"ref":"pyaurorax.search.conjunctions.classes.search.ConjunctionSearch.execute", +"url":29, +"doc":"Initiate a conjunction search request Raises: pyaurorax.exceptions.AuroraXAPIError: An API error was encountered", "func":1 }, { -"ref":"pyaurorax.data_products.data_products.get_request_url", -"url":22, -"doc":"Get the data product search request URL for a given request ID. This URL can be used for subsequent pyaurorax.requests function calls. Primarily this method facilitates delving into details about a set of already-submitted data product searches. Args: request_id: the request identifier Returns: the request URL", +"ref":"pyaurorax.search.conjunctions.classes.search.ConjunctionSearch.update_status", +"url":29, +"doc":"Update the status of this conjunction search request Args: status: the previously-retrieved status of this request (include to avoid requesting it from the API again), defaults to None Raises: pyaurorax.exceptions.AuroraXAPIError: An API error was encountered", "func":1 }, { -"ref":"pyaurorax.ephemeris", -"url":23, -"doc":"The ephemeris module is used to search and upload ephemeris records within AuroraX. Note that all functions and classes from submodules are all imported at this level of the ephemeris module. They can be referenced from here instead of digging in deeper to the submodules." +"ref":"pyaurorax.search.conjunctions.classes.search.ConjunctionSearch.check_for_data", +"url":29, +"doc":"Check to see if data is available for this conjunction search request Returns: True if data is available, else False Raises: pyaurorax.exceptions.AuroraXAPIError: An API error was encountered", +"func":1 }, { -"ref":"pyaurorax.ephemeris.search", -"url":23, -"doc":"Search for ephemeris records By default, this function will block and wait until the request completes and all data is downloaded. If you don't want to wait, set the 'return_immediately value to True. The Search object will be returned right after the search has been started, and you can use the helper functions as part of that object to get the data when it's done. Note: At least one search criteria from programs, platforms, or instrument_types, must be specified. Args: start: start timestamp of the search (inclusive) end: end timestamp of the search (inclusive) programs: list of programs to search through, defaults to None platforms: list of platforms to search through, defaults to None instrument_types: list of instrument types to search through, defaults to None metadata_filters: list of dictionaries describing metadata keys and values to filter on, defaults to None Example: [{ \"key\": \"nbtrace_region\", \"operator\": \"in\", \"values\": [\"north polar cap\"] }] metadata_filters_logical_operator: the logical operator to use when evaluating metadata filters (either 'AND' or 'OR'), defaults to \"AND\" response_format: JSON representation of desired data response format poll_interval: time in seconds to wait between polling attempts, defaults to pyaurorax.requests.STANDARD_POLLING_SLEEP_TIME return_immediately: initiate the search and return without waiting for data to be received, defaults to False verbose: output poll times and other progress messages, defaults to False Returns: A pyaurorax.ephemeris.Search object Raises: pyaurorax.exceptions.AuroraXBadParametersException: missing parameters", +"ref":"pyaurorax.search.conjunctions.classes.search.ConjunctionSearch.get_data", +"url":29, +"doc":"Retrieve the data available for this conjunction search request Raises: pyaurorax.exceptions.AuroraXAPIError: An API error was encountered", "func":1 }, { -"ref":"pyaurorax.ephemeris.upload", -"url":23, -"doc":"Upload ephemeris records to AuroraX Args: identifier: AuroraX data source ID records: ephemeris records to upload validate_source: validate all records before uploading, defaults to False Returns: 0 for success, raises exception on error Raises: pyaurorax.exceptions.AuroraXMaxRetriesException: max retry error pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected content error pyaurorax.exceptions.AuroraXUploadException: upload error pyaurorax.exceptions.AuroraXValidationException: data source validation error", +"ref":"pyaurorax.search.conjunctions.classes.search.ConjunctionSearch.wait", +"url":29, +"doc":"Block and wait until the request is complete and data is available for retrieval Args: poll_interval: time in seconds to wait between polling attempts, defaults to pyaurorax.requests.STANDARD_POLLING_SLEEP_TIME verbose: output poll times and other progress messages, defaults to False Raises: pyaurorax.exceptions.AuroraXAPIError: An API error was encountered", "func":1 }, { -"ref":"pyaurorax.ephemeris.delete", -"url":23, -"doc":"Delete ephemeris records between a timeframe. The API processes this request asynchronously, so this method will return immediately whether or not the data has already been deleted. Args: data_source: data source associated with the data product records (note that identifier, program, platform, and instrument_type are required) start: timestamp marking beginning of range to delete records for, inclusive end: timestamp marking end of range to delete records for, inclusive Returns: 0 on success Raises: pyaurorax.exceptions.AuroraXMaxRetriesException: max retry error pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected error pyaurorax.exceptions.AuroraXNotFoundException: source not found pyaurorax.exceptions.AuroraXUnauthorizedException: invalid API key for this operation pyaurorax.exceptions.AuroraXBadParametersException: missing parameters", +"ref":"pyaurorax.search.conjunctions.classes.search.ConjunctionSearch.cancel", +"url":29, +"doc":"Cancel the conjunction search request This method returns immediately by default since the API processes this request asynchronously. If you would prefer to wait for it to be completed, set the 'wait' parameter to True. You can adjust the polling time using the 'poll_interval' parameter. Args: wait: wait until the cancellation request has been completed (may wait for several minutes) poll_interval: seconds to wait between polling calls, defaults to STANDARD_POLLING_SLEEP_TIME. verbose: output poll times and other progress messages, defaults to False Returns: 1 on success Raises: pyaurorax.exceptions.AuroraXAPIError: An API error was encountered", "func":1 }, { -"ref":"pyaurorax.ephemeris.describe", -"url":23, -"doc":"Describe an ephemeris search as a \"SQL-like\" string Args: search_obj: the ephemeris search object to describe Returns: the \"SQL-like\" string describing the ephemeris search object", -"func":1 +"ref":"pyaurorax.search.conjunctions.classes.conjunction", +"url":30, +"doc":"Class definition for a conjunction" }, { -"ref":"pyaurorax.ephemeris.get_request_url", -"url":23, -"doc":"Get the ephemeris search request URL for a given request ID. This URL can be used for subsequent pyaurorax.requests function calls. Primarily this method facilitates delving into details about a set of already-submitted ephemeris searches. Args: request_id: the request identifier Returns: the request URL", -"func":1 +"ref":"pyaurorax.search.conjunctions.classes.conjunction.CONJUNCTION_TYPE_NBTRACE", +"url":30, +"doc":"Conjunction search 'conjunction_type' category for finding conjunctions using the north B-trace data" }, { -"ref":"pyaurorax.ephemeris.Ephemeris", -"url":23, -"doc":"Ephemeris object Attributes: data_source: data source that the ephemeris record is associated with epoch: timestamp for the record (assumed it is in UTC) location_geo: Location object containing geographic latitude and longitude location_gsm: Location object containing GSM latitude and longitude (leave empty for data sources with a type of 'ground') nbtrace: Location object with north B-trace geographic latitude and longitude sbtrace: Location object with south B-trace geographic latitude and longitude metadata: metadata for this record (arbitrary keys and values) Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model." +"ref":"pyaurorax.search.conjunctions.classes.conjunction.CONJUNCTION_TYPE_SBTRACE", +"url":30, +"doc":"Conjunction search 'conjunction_type' category for finding conjunctions using the south B-trace data" }, { -"ref":"pyaurorax.ephemeris.Ephemeris.data_source", -"url":23, -"doc":"" +"ref":"pyaurorax.search.conjunctions.classes.conjunction.CONJUNCTION_TYPE_GEOGRAPHIC", +"url":30, +"doc":"Conjunction search 'conjunction_type' category for finding conjunctions using the geographic position data" }, { -"ref":"pyaurorax.ephemeris.Ephemeris.epoch", -"url":23, -"doc":"" +"ref":"pyaurorax.search.conjunctions.classes.conjunction.Conjunction", +"url":30, +"doc":"Conjunction object Attributes: conjunction_type: the type of location data used when the conjunction was found (either 'nbtrace', 'sbtrace', or 'geographic') start: start timestamp of the conjunction end: end timestamp of the conjunction data_sources: data sources in the conjunction min_distance: minimum kilometer distance of the conjunction max_distance: maximum kilometer distance of the conjunction events: the sub-conjunctions that make up this over-arching conjunction (the conjunctions between each set of two data sources) closest_epoch: timestamp for when data sources were closest farthest_epoch: timestamp for when data sources were farthest" }, { -"ref":"pyaurorax.ephemeris.Ephemeris.location_geo", -"url":23, -"doc":"" +"ref":"pyaurorax.models", +"url":31, +"doc":"Interact with various auroral models, such as the TREx Auroral Transport Model (ATM)." }, { -"ref":"pyaurorax.ephemeris.Ephemeris.location_gsm", -"url":23, -"doc":"" +"ref":"pyaurorax.models.ModelsManager", +"url":31, +"doc":"The ModelsManager object is initialized within every PyAuroraX object. It acts as a way to access the submodules and carry over configuration information in the super class." }, { -"ref":"pyaurorax.ephemeris.Ephemeris.nbtrace", -"url":23, -"doc":"" +"ref":"pyaurorax.models.ModelsManager.atm", +"url":31, +"doc":"Access to the atm submodule from within a PyAuroraX object." }, { -"ref":"pyaurorax.ephemeris.Ephemeris.sbtrace", -"url":23, +"ref":"pyaurorax.models.atm", +"url":32, "doc":"" }, { -"ref":"pyaurorax.ephemeris.Ephemeris.metadata", -"url":23, -"doc":"" +"ref":"pyaurorax.models.atm.ATMManager", +"url":32, +"doc":"The ATMManager object is initialized within every PyAuroraX object. It acts as a way to access the submodules and carry over configuration information in the super class." }, { -"ref":"pyaurorax.ephemeris.Ephemeris.to_json_serializable", -"url":23, -"doc":"Convert object to a JSON-serializable object (ie. translate datetime objects to strings) Returns: a dictionary object that is JSON-serializable", +"ref":"pyaurorax.models.atm.ATMManager.forward", +"url":32, +"doc":"Perform a forward calculation using the TREx Auroral Transport Model and the supplied input parameters. Note that this function utilizes the UCalgary Space Remote Sensing API to perform the calculation. Args: timestamp (datetime.datetime): Timestamp for the calculation. This value is expected to be in UTC, and is valid for any value up to the end of the previous day. Any timezone data will be ignored. This parameter is required. geodetic_latitude (float): Latitude in geodetic coordinates: -90.0 to 90.0. This parameter is required. geodetic_longitude (float): Longitude in geodetic coordinates: -180.0 to 180.0. This parameter is required. output (ATMForwardOutputFlags): Flags to indicate which values are included in the output. See [ ATMForwardOutputFlags ](https: docs-pyucalgarysrs.phys.ucalgary.ca/models/atm/classes_forward.html pyucalgarysrs.models.atm.classes_forward.ATMForwardOutputFlags) for more details. This parameter is required. maxwellian_energy_flux (float): Maxwellian energy flux in erg/cm2/s. Default is 10. This parameter is optional. gaussian_energy_flux (float): Gaussian energy flux in erg/cm2/s. Default is 0.0. Note that gaussian_peak_energy and gaussian_spectral_width must be specified if the gaussian_energy_flux is not 0. This parameter is optional. maxwellian_characteristic_energy (float): Maxwellian characteristic energy in eV. Default is 5000. Note that maxwellian_characteristic_energy must be specified if the maxwellian_energy_flux is not 0. This parameter is optional. gaussian_peak_energy (float): Gaussian peak energy in eV. Default is 1000. Note this parameter must be specified if the gaussian_energy_flux is not 0. This parameter is optional. gaussian_spectral_width (float): Gaussian spectral width in eV. Default is 100. Note this parameter must be specified if the gaussian_energy_flux is not 0. This parameter is optional. nrlmsis_model_version (str): NRLMSIS version number. Possible values are 00 or 2.0 . Default is 2.0 . This parameter is optional. More details about this empirical model can be found [here](https: ccmc.gsfc.nasa.gov/models/NRLMSIS~00/), and [here](https: ccmc.gsfc.nasa.gov/models/NRLMSIS~2.0/). oxygen_correction_factor (float): Oxygen correction factor used to multiply by in the empirical model. Default is 1. This parameter is optional. timescale_auroral (int): Auroral timescale in seconds. Default is 600 (10 minutes). This parameter is optional. timescale_transport (int): Transport timescale in seconds. Default is 300 (5 minutes). This parameter is optional. atm_model_version (str): ATM model version number. Possible values are only '1.0' at this time, but will have additional possible values in the future. This parameter is optional. custom_spectrum (ndarray): A 2-dimensional numpy array (dtype is any float type) containing values representing the energy in eV, and flux in 1/cm2/sr/eV. The shape is expected to be [N, 2], with energy in [:, 0] and flux in [:, 1]. Note that this array cannot contain negative values (SRSAPIError will be raised if so). This parameter is optional. no_cache (bool): The UCalgary Space Remote Sensing API utilizes a caching layer for performing ATM calculations. If this variation of input parameters has been run before (and the cache is still valid), then it will not re-run the calculation. Instead it will return the cached results immediately. To disable the caching layer, set this parameter to True . Default is False . This parameter is optional. timeout (int): Represents how many seconds to wait for the API to send data before giving up. The default is 10 seconds, or the api_timeout value in the super class' pyaurorax.PyAuroraX object. This parameter is optional. Returns: An [ ATMForwardResult ](https: docs-pyucalgarysrs.phys.ucalgary.ca/models/atm/classes_forward.html pyucalgarysrs.models.atm.classes_forward.ATMForwardResult) object containing the requested output data, among other values. Raises: pyaurorax.exceptions.AuroraXAPIError: An API error was encountered", "func":1 }, { -"ref":"pyaurorax.ephemeris.Search", -"url":23, -"doc":"Class representing an ephemeris search Note: At least one search criteria from programs, platforms, or instrument_types must be specified. Args: start: start timestamp of the search (inclusive) end: end timestamp of the search (inclusive) programs: list of programs to search through, defaults to None platforms: list of platforms to search through, defaults to None instrument_types: list of instrument types to search through, defaults to None metadata_filters: list of dictionaries describing metadata keys and values to filter on, defaults to None e.g. { \"key\": \"string\", \"operator\": \"=\", \"values\": [ \"string\" ] } metadata_filters_logical_operator: the logical operator to use when evaluating metadata filters (either 'AND' or 'OR'), defaults to \"AND\" response_format: JSON representation of desired data response format request: AuroraXResponse object returned when the search is executed request_id: unique ID assigned to the request by the AuroraX API request_url: unique URL assigned to the request by the AuroraX API executed: indicates if the search has been executed/started completed: indicates if the search has finished data_url: the URL where data is accessed query: the query for this request as JSON status: the status of the query data: the ephemeris records found logs: all log messages outputted by the AuroraX API for this request" +"ref":"pyaurorax.models.atm.ATMManager.inverse", +"url":32, +"doc":"Perform an inverse calculation using the TREx Auroral Transport Model and the supplied input parameters. Note that this function utilizes the UCalgary Space Remote Sensing API to perform the calculation. Args: timestamp (datetime.datetime): Timestamp for the calculation. This value is expected to be in UTC, and is valid a pre-defined timeframe. An error will be raised if outside of the valid timeframe. Any timezone data will be ignored. This parameter is required. geodetic_latitude (float): Latitude in geodetic coordinates. Currently limited to the Transition Region Explorer (TREx) region of >=50.0 and =-110 and <-70 degrees. An error will be raised if outside of this range. This parameter is required. intensity_4278 (float): Intensity of the 427.8nm (blue) wavelength in Rayleighs. This parameter is required. intensity_5577 (float): Intensity of the 557.7nm (green) wavelength in Rayleighs. This parameter is required. intensity_6300 (float): Intensity of the 630.0nm (red) wavelength in Rayleighs. This parameter is required. intensity_8446 (float): Intensity of the 844.6nm (near infrared) wavelength in Rayleighs. This parameter is required. output (ATMInverseOutputFlags): Flags to indicate which values are included in the output. See [ ATMInverseOutputFlags ](https: docs-pyucalgarysrs.phys.ucalgary.ca/models/atm/classes_inverse.html pyucalgarysrs.models.atm.classes_inverse.ATMInverseOutputFlags) for more details. This parameter is required. precipitation_flux_spectral_type (str): The precipitation flux spectral type to use. Possible values are gaussian or maxwellian . The default is gaussian . This parameter is optional. nrlmsis_model_version (str): NRLMSIS version number. Possible values are 00 or 2.0 . Default is 2.0 . This parameter is optional. More details about this empirical model can be found [here](https: ccmc.gsfc.nasa.gov/models/NRLMSIS~00/), and [here](https: ccmc.gsfc.nasa.gov/models/NRLMSIS~2.0/). atm_model_version (str): ATM model version number. Possible values are only '1.0' at this time, but will have additional possible values in the future. This parameter is optional. no_cache (bool): The UCalgary Space Remote Sensing API utilizes a caching layer for performing ATM calculations. If this variation of input parameters has been run before (and the cache is still valid), then it will not re-run the calculation. Instead it will return the cached results immediately. To disable the caching layer, set this parameter to True . Default is False . This parameter is optional. timeout (int): Represents how many seconds to wait for the API to send data before giving up. The default is 10 seconds, or the api_timeout value in the super class' pyaurorax.PyAuroraX object. This parameter is optional. Returns: An [ ATMInverseResult ](https: docs-pyucalgarysrs.phys.ucalgary.ca/models/atm/classes_inverse.html pyucalgarysrs.models.atm.classes_inverse.ATMInverseResult) object containing the requested output data, among other values. Raises: pyaurorax.exceptions.AuroraXAPIError: An API error was encountered", +"func":1 }, { -"ref":"pyaurorax.ephemeris.Search.query", -"url":23, -"doc":"Property for the query value" +"ref":"pyaurorax.tools", +"url":33, +"doc":"Data analysis toolkit for working with all-sky imager data available within the AuroraX platform. This portion of the PyAuroraX library allows you to easily generate basic plots for ASI data, and common manipulations. These include things like displaying single images, making keograms, projecting ASI data onto maps, and extracting metrics for a given lat/lon bounding box. Example: For shorter function calls, you can initialize the tools submodule using like so: import pyaurorax aurorax = pyaurorax.PyAuroraX() at = aurorax.tools " }, { -"ref":"pyaurorax.ephemeris.Search.execute", -"url":23, -"doc":"Initiate ephemeris search request Raises: pyaurorax.exceptions.AuroraXBadParametersException: missing parameters", +"ref":"pyaurorax.tools.display", +"url":33, +"doc":"Render a visualization of a single image. Either display it (default behaviour), save it to disk (using the savefig parameter), or return the matplotlib plot object for further usage (using the returnfig parameter). Args: image (numpy.ndarray): The image to display, represented as a numpy array. cmap (str): The matplotlib colormap to use. Commonly used colormaps are: - REGO: gist_heat - THEMIS ASI: gray - TREx Blue: Blues_r - TREx NIR: gray - TREx RGB: None A list of all available colormaps can be found on the [matplotlib documentation](https: matplotlib.org/stable/gallery/color/colormap_reference.html). figsize (tuple): The matplotlib figure size to use when displaying, tuple of two integers (ie. figsize=(14, 4) ) aspect (str or float): The matplotlib imshow aspect ration to use. A common value for this is auto . All valid values can be found on the [matplotlib documentation](https: matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.imshow.html). colorbar (bool): Display a colorbar. Default is False . title (str): A title to display above the rendered image. Defaults to no title. returnfig (bool): Instead of displaying the image, return the matplotlib figure object. This allows for further plot manipulation, for example, adding labels or a title in a different location than the default. Remember - if this parameter is supplied, be sure that you close your plot after finishing work with it. This can be achieved by doing plt.close(fig) . Note that this method cannot be used in combination with savefig . savefig (bool): Save the displayed image to disk instead of displaying it. The parameter savefig_filename is required if this parameter is set to True. Defaults to False . savefig_filename (str): Filename to save the image to. Must be specified if the savefig parameter is set to True. savefig_quality (int): Quality level of the saved image. This can be specified if the savefig_filename is a JPG image. If it is a PNG, quality is ignored. Default quality level for JPGs is matplotlib/Pillow's default of 75%. Returns: The displayed image, by default. If savefig is set to True, nothing will be returned. If returnfig is set to True, the plotting variables (fig, ax) will be returned. Raises: ValueError: issues with supplied parameters.", "func":1 }, { -"ref":"pyaurorax.ephemeris.Search.update_status", -"url":23, -"doc":"Update the status of this ephemeris search request Args: status: the previously-retrieved status of this request (include to avoid requesting it from the API again), defaults to None", +"ref":"pyaurorax.tools.movie", +"url":33, +"doc":"Generate a movie file from a list of filenames. Note that the codec used is \"mp4v\". Args: input_filenames (List[str]): Filenames of frames to use for movie generation. No sorting is applied, so it is assumed the list is in the desired order. This parameter is required. output_filename (str): Filename for the created movie file. This parameter is required. n_parallel (int): Number of multiprocessing workers to use. Default is 1 , which does not use multiprocessing. fps (int): Frames per second (FPS) for the movie file. Default is 25 . progress_bar_disable (bool): Toggle the progress bars off. Default is False . Raises: IOError: I/O related issue while generating movie", "func":1 }, { -"ref":"pyaurorax.ephemeris.Search.check_for_data", -"url":23, -"doc":"Check to see if data is available for this ephemeris search request Returns: True if data is available, else False", +"ref":"pyaurorax.tools.scale_intensity", +"url":33, +"doc":"Scale all values of an array that lie in the range min<=x<=max in to the range 0<=x<=high. Args: data (numpy.ndarray): data array, can be 2, 3, or 4-dimensional. Assumed to be an image, or array of images. Also assumed that the first 2 dimensions are the image's x and y coordinates, and the following dimensions are some combination of the number of images, and/or the colour channel. min (float): minimum value of array to be considered max (float): maximum value of array to be considered top (float): maximum value of the scaled result. If not supplied, the max value of the data array's dtype is used. Returns: A new numpy.ndarray that is the same dimensions as the inputted data array, with the scaling applied. Raises: ValueError: Issues with the supplied min, max, or top", "func":1 }, { -"ref":"pyaurorax.ephemeris.Search.get_data", -"url":23, -"doc":"Retrieve the data available for this ephemeris search request", +"ref":"pyaurorax.tools.set_theme", +"url":33, +"doc":"A handy wrapper for setting the matplotlib global theme. Common choices are light , dark , or default . Args: theme (str): Theme name. Common choices are light , dark , or default . If default, then matplotlib theme settings will be fully reset to their defaults. Additional themes can be found on the [matplotlib documentation](https: matplotlib.org/stable/gallery/style_sheets/style_sheets_reference.html)", "func":1 }, { -"ref":"pyaurorax.ephemeris.Search.wait", -"url":23, -"doc":"Block and wait for the request to complete and data is available for retrieval Args: poll_interval: time in seconds to wait between polling attempts, defaults to pyaurorax.requests.STANDARD_POLLING_SLEEP_TIME verbose: output poll times and other progress messages, defaults to False", +"ref":"pyaurorax.tools.Keogram", +"url":33, +"doc":"Class representation for a keogram Attributes: data (numpy.ndarray): The derived keogram data. timestamp (List[datetime.datetime]): Timestamps corresponding to each keogram slice. ccd_y (numpy.ndarray): The y-axis representing CCD Y coordinates for the keogram. mag_y (numpy.ndarray): The y-axis representing magnetic latitude for the keogram. geo_y (numpy.ndarray): The y-axis representing geographic latitude for the keogram." +}, +{ +"ref":"pyaurorax.tools.Keogram.set_geographic_latitudes", +"url":33, +"doc":"Set the geographic latitude values for this keogram, using the specified skymap data. The data will be set to the geo_y attribute of this Keogram object, which can then be used for plotting and/or further analysis. Note: currently only specific altitudes are supported at this time, matching the ones in the passed-in skymap object. A future release will implement an interpolation routine to allow for a wider range of altitudes. Args: skymap (pyaurorax.data.ucalgary.Skymap): The skymap object to use. This parameter is required. altitude (int): The altitude to use, in kilometers. If not specified, it will use the default in the skymap object. If the specified altitude is not valid, a ValueError will be raised. Returns: None. The Keogram object's geo_y attribute will be updated. Raises: ValueError: Issues with specified altitude.", "func":1 }, { -"ref":"pyaurorax.ephemeris.Search.cancel", -"url":23, -"doc":"Cancel the ephemeris search request This method returns immediately by default since the API processes this request asynchronously. If you would prefer to wait for it to be completed, set the 'wait' parameter to True. You can adjust the polling time using the 'poll_interval' parameter. Args: wait: wait until the cancellation request has been completed (may wait for several minutes) poll_interval: seconds to wait between polling calls, defaults to STANDARD_POLLING_SLEEP_TIME. verbose: output poll times and other progress messages, defaults to False Returns: 1 on success Raises: pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected error pyaurorax.exceptions.AuroraXUnauthorizedException: invalid API key for this operation", +"ref":"pyaurorax.tools.Keogram.set_magnetic_latitudes", +"url":33, +"doc":"Set the magnetic latitude values for this keogram, using the specified skymap data. AACGMv2 will be utilized to perform the calculations. The resulting data will be set to the mag_y attribute of this Keogram object, which can then be used for plotting and/or further analysis. Note: currently only specific altitudes are supported at this time, matching the ones in the passed-in skymap object. A future release will implement an interpolation routine to allow for a wider range of altitudes. Args: skymap (pyaurorax.data.ucalgary.Skymap): The skymap object to use. This parameter is required. timestamp (datetime.datetime): The timestamp to use when converting skymap data to magnetic coordinates. Utilizes AACGMv2 to do the conversion. altitude (int): The altitude to use. If not specified, it will use the default in the skymap object. If the specified altitude is not valid, a ValueError will be raised. Returns: None. The Keogram object's mag_y attribute will be updated. Raises: ValueError: Issues with specified altitude.", "func":1 }, { -"ref":"pyaurorax.ephemeris.classes", -"url":24, -"doc":"Separted classes and functions used by the ephemeris module. Note that these classes and variables are all imported higher up at the top of the ephemeris module. They can be referenced from there instead of digging in deeper to these submodules." +"ref":"pyaurorax.tools.Keogram.plot", +"url":33, +"doc":"Generate a plot of the keogram data. Either display it (default behaviour), save it to disk (using the savefig parameter), or return the matplotlib plot object for further usage (using the returnfig parameter). Args: y_type (str): Type of y-axis to use when plotting. Options are ccd , mag , or geo . The default is ccd . This parameter is required. title (str): The title to display above the plotted keogram. figsize (tuple): The matplotlib figure size to use when plotting. For example figsize=(14,4) . cmap (str): The matplotlib colormap to use. Commonly used colormaps are: - REGO: gist_heat - THEMIS ASI: gray - TREx Blue: Blues_r - TREx NIR: gray - TREx RGB: None A list of all available colormaps can be found on the [matplotlib documentation](https: matplotlib.org/stable/gallery/color/colormap_reference.html). aspect (str or float): The matplotlib imshow aspect ration to use. A common value for this is auto . All valid values can be found on the [matplotlib documentation](https: matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.imshow.html). axes_visible (bool): Display the axes. Default is True . xlabel (str): The x-axis label to use. Default is Time (UTC) . ylabel (str): The y-axis label to use. Default is based on y_type. xtick_increment (int): The x-axis tick increment to use. Default is 100. ytick_increment (int): The y-axis tick increment to use. Default is 50. returnfig (bool): Instead of displaying the image, return the matplotlib figure object. This allows for further plot manipulation, for example, adding labels or a title in a different location than the default. Remember - if this parameter is supplied, be sure that you close your plot after finishing work with it. This can be achieved by doing plt.close(fig) . Note that this method cannot be used in combination with savefig . savefig (bool): Save the displayed image to disk instead of displaying it. The parameter savefig_filename is required if this parameter is set to True. Defaults to False . savefig_filename (str): Filename to save the image to. Must be specified if the savefig parameter is set to True. savefig_quality (int): Quality level of the saved image. This can be specified if the savefig_filename is a JPG image. If it is a PNG, quality is ignored. Default quality level for JPGs is matplotlib/Pillow's default of 75%. Returns: The displayed keogram, by default. If savefig is set to True, nothing will be returned. If returnfig is set to True, the plotting variables (fig, ax) will be returned. Raises: ValueError: Issues with the y-axis choice.", +"func":1 }, { -"ref":"pyaurorax.ephemeris.classes.ephemeris", -"url":25, -"doc":"Class definition for an ephemeris record" +"ref":"pyaurorax.tools.Montage", +"url":33, +"doc":"Class representation for a montage Attributes: data (numpy.ndarray): The derived montage data. timestamp (List[datetime.datetime]): Timestamps corresponding to each montage image." }, { -"ref":"pyaurorax.ephemeris.classes.ephemeris.Ephemeris", -"url":25, -"doc":"Ephemeris object Attributes: data_source: data source that the ephemeris record is associated with epoch: timestamp for the record (assumed it is in UTC) location_geo: Location object containing geographic latitude and longitude location_gsm: Location object containing GSM latitude and longitude (leave empty for data sources with a type of 'ground') nbtrace: Location object with north B-trace geographic latitude and longitude sbtrace: Location object with south B-trace geographic latitude and longitude metadata: metadata for this record (arbitrary keys and values) Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model." +"ref":"pyaurorax.tools.Montage.plot", +"url":33, +"doc":"Generate a plot of the montage data. Either display it (default behaviour), save it to disk (using the savefig parameter), or return the matplotlib plot object for further usage (using the returnfig parameter). Args: figsize (tuple): The matplotlib figure size to use when plotting. For example figsize=(14,4) . cmap (str): The matplotlib colormap to use. Commonly used colormaps are: - REGO: gist_heat - THEMIS ASI: gray - TREx Blue: Blues_r - TREx NIR: gray - TREx RGB: None A list of all available colormaps can be found on the [matplotlib documentation](https: matplotlib.org/stable/gallery/color/colormap_reference.html). returnfig (bool): Instead of displaying the image, return the matplotlib figure object. This allows for further plot manipulation, for example, adding labels or a title in a different location than the default. Remember - if this parameter is supplied, be sure that you close your plot after finishing work with it. This can be achieved by doing plt.close(fig) . Note that this method cannot be used in combination with savefig . savefig (bool): Save the displayed image to disk instead of displaying it. The parameter savefig_filename is required if this parameter is set to True. Defaults to False . savefig_filename (str): Filename to save the image to. Must be specified if the savefig parameter is set to True. savefig_quality (int): Quality level of the saved image. This can be specified if the savefig_filename is a JPG image. If it is a PNG, quality is ignored. Default quality level for JPGs is matplotlib/Pillow's default of 75%. Returns: The displayed montage, by default. If savefig is set to True, nothing will be returned. If returnfig is set to True, the plotting variables (fig, ax) will be returned. Raises: ValueError: Issues with the y-axis choice.", +"func":1 }, { -"ref":"pyaurorax.ephemeris.classes.ephemeris.Ephemeris.data_source", -"url":25, -"doc":"" +"ref":"pyaurorax.tools.Mosaic", +"url":33, +"doc":"Class representation for a generated mosaic. Attributes: polygon_data (matplotlib.collections.PolyCollection): Generated polygons containing rendered data. cartopy_projection (cartopy.crs.Projection): Cartopy projection to utilize. contour_data (Dict[str, List[Any ): Generated contour data." }, { -"ref":"pyaurorax.ephemeris.classes.ephemeris.Ephemeris.epoch", -"url":25, +"ref":"pyaurorax.tools.Mosaic.polygon_data", +"url":33, "doc":"" }, { -"ref":"pyaurorax.ephemeris.classes.ephemeris.Ephemeris.location_geo", -"url":25, +"ref":"pyaurorax.tools.Mosaic.cartopy_projection", +"url":33, "doc":"" }, { -"ref":"pyaurorax.ephemeris.classes.ephemeris.Ephemeris.location_gsm", -"url":25, +"ref":"pyaurorax.tools.Mosaic.contour_data", +"url":33, "doc":"" }, { -"ref":"pyaurorax.ephemeris.classes.ephemeris.Ephemeris.nbtrace", -"url":25, -"doc":"" +"ref":"pyaurorax.tools.Mosaic.plot", +"url":33, +"doc":"Generate a plot of the mosaic data. Either display it (default behaviour), save it to disk (using the savefig parameter), or return the matplotlib plot object for further usage (using the returnfig parameter). Args: map_extent (List[int]): Latitude/longitude range to be visible on the rendered map. This is a list of 4 integers and/or floats, in the order of [min_lon, max_lon, min_lat, max_lat]. figsize (tuple): The matplotlib figure size to use when plotting. For example figsize=(14,4) . rayleighs (bool): Set to True if the data being plotted is in Rayleighs. Defaults to False . max_rayleighs (int): Max intensity scale for Rayleighs. Defaults to 20000 . ocean_color (str): Colour of the ocean. Default is cartopy's default shade of blue. Colours can be supplied as a word, or hexcode prefixed with a ' ' character (ie. 55AADD ). land_color (str): Colour of the land. Default is grey . Colours can be supplied as a word, or hexcode prefixed with a ' ' character (ie. 41BB87 ). land_edgecolor (str): Color of the land edges. Default is 8A8A8A . Colours can be supplied as a word, or hexcode prefixed with a ' ' character. borders_color (str): Color of the country borders. Default is AEAEAE . Colours can be supplied as a word, or hexcode prefixed with a ' ' character. borders_disable (bool): Disbale rendering of the borders. Default is False . cbar_colorcmap (str): The matplotlib colormap to use for the plotted color bar. Default is grey . Commonly used colormaps are: - REGO: gist_heat - THEMIS ASI: gray - TREx Blue: Blues_r - TREx NIR: gray - TREx RGB: None A list of all available colormaps can be found on the [matplotlib documentation](https: matplotlib.org/stable/gallery/color/colormap_reference.html). returnfig (bool): Instead of displaying the image, return the matplotlib figure object. This allows for further plot manipulation, for example, adding labels or a title in a different location than the default. Remember - if this parameter is supplied, be sure that you close your plot after finishing work with it. This can be achieved by doing plt.close(fig) . Note that this method cannot be used in combination with savefig . savefig (bool): Save the displayed image to disk instead of displaying it. The parameter savefig_filename is required if this parameter is set to True. Defaults to False . savefig_filename (str): Filename to save the image to. Must be specified if the savefig parameter is set to True. savefig_quality (int): Quality level of the saved image. This can be specified if the savefig_filename is a JPG image. If it is a PNG, quality is ignored. Default quality level for JPGs is matplotlib/Pillow's default of 75%. Returns: The displayed montage, by default. If savefig is set to True, nothing will be returned. If returnfig is set to True, the plotting variables (fig, ax) will be returned. Raises:", +"func":1 }, { -"ref":"pyaurorax.ephemeris.classes.ephemeris.Ephemeris.sbtrace", -"url":25, +"ref":"pyaurorax.tools.Mosaic.add_geo_contours", +"url":33, +"doc":"Add geographic contours to a mosaic. Args: lats (ndarray or list): Sequence of geographic latitudes defining a contour. lons (ndarray or list): Sequence of geographic longitudes defining a contour. constant_lats (float, int, or Sequence): Geographic Latitude(s) at which to add line(s) of constant latitude. constant_lons (float, int, or Sequence): Geographic Longitude(s) at which to add line(s) of constant longitude. color (str): The matplotlib color used for the contour(s). linewidth (float or int): The contour thickness. linestyle (str): The matplotlib linestyle used for the contour(s). Returns: The object's contour_data parameter is populated appropriately. Raises: ValueError: issues encountered with supplied parameters.", +"func":1 +}, +{ +"ref":"pyaurorax.tools.Mosaic.add_mag_contours", +"url":33, +"doc":"Add geomagnetic contours to a mosaic. Args: timestamp (datetime.datetime): The timestamp used in computing AACGM coordinates. lats (ndarray or list): Sequence of geomagnetic latitudes defining a contour. lons (ndarray or list): Sequence of geomagnetic longitudes defining a contour. constant_lats (float, int, Sequence): Geomagnetic latitude(s) at which to add contour(s) of constant latitude. constant_lons (float, int, Sequence): Geomagnetic longitude(s) at which to add contours(s) of constant longitude. color (str): The matplotlib color used for the contour(s). linewidth (float or int): The contour thickness. linestyle (str): The matplotlib linestyle used for the contour(s). Returns: The object's contour_data parameter is populated appropriately. Raises: ValueError: issues encountered with supplied parameters.", +"func":1 +}, +{ +"ref":"pyaurorax.tools.MosaicData", +"url":33, +"doc":"Prepared image data for use by mosaic routines. Attributes: site_uid_list (List[str]): List of site unique identifiers contained within this object. timestamps (List[datetime.datetime]): Timestamps of corresponding images. images (Dict[str, numpy.ndarray]): Image data prepared into the necessary format; a dictionary. Keys are the site UID, ndarray is the prepared data. images_dimensions (Dict[str, Tuple]): The image dimensions." +}, +{ +"ref":"pyaurorax.tools.MosaicData.site_uid_list", +"url":33, "doc":"" }, { -"ref":"pyaurorax.ephemeris.classes.ephemeris.Ephemeris.metadata", -"url":25, +"ref":"pyaurorax.tools.MosaicData.timestamps", +"url":33, "doc":"" }, { -"ref":"pyaurorax.ephemeris.classes.ephemeris.Ephemeris.to_json_serializable", -"url":25, -"doc":"Convert object to a JSON-serializable object (ie. translate datetime objects to strings) Returns: a dictionary object that is JSON-serializable", -"func":1 +"ref":"pyaurorax.tools.MosaicData.images", +"url":33, +"doc":"" }, { -"ref":"pyaurorax.ephemeris.classes.search", -"url":26, -"doc":"Class definition for an ephemeris search" +"ref":"pyaurorax.tools.MosaicData.images_dimensions", +"url":33, +"doc":"" }, { -"ref":"pyaurorax.ephemeris.classes.search.Search", -"url":26, -"doc":"Class representing an ephemeris search Note: At least one search criteria from programs, platforms, or instrument_types must be specified. Args: start: start timestamp of the search (inclusive) end: end timestamp of the search (inclusive) programs: list of programs to search through, defaults to None platforms: list of platforms to search through, defaults to None instrument_types: list of instrument types to search through, defaults to None metadata_filters: list of dictionaries describing metadata keys and values to filter on, defaults to None e.g. { \"key\": \"string\", \"operator\": \"=\", \"values\": [ \"string\" ] } metadata_filters_logical_operator: the logical operator to use when evaluating metadata filters (either 'AND' or 'OR'), defaults to \"AND\" response_format: JSON representation of desired data response format request: AuroraXResponse object returned when the search is executed request_id: unique ID assigned to the request by the AuroraX API request_url: unique URL assigned to the request by the AuroraX API executed: indicates if the search has been executed/started completed: indicates if the search has finished data_url: the URL where data is accessed query: the query for this request as JSON status: the status of the query data: the ephemeris records found logs: all log messages outputted by the AuroraX API for this request" +"ref":"pyaurorax.tools.MosaicSkymap", +"url":33, +"doc":"Prepared skymap data for use by mosaic routines. Attributes: site_uid_list (List[str]): List of site unique identifiers contained within this object. elevation (List[numpy.ndarray]): List of elevation data, with each element corresponding to each site. Order matches that of the site_uid_list attribute. polyfoll_lat (List[numpy.ndarray]): List of latitude polygon data, with each element corresponding to each site. Order matches that of the site_uid_list attribute. polyfoll_lon (List[numpy.ndarray]): List of longitude polygon data, with each element corresponding to each site. Order matches that of the site_uid_list attribute." }, { -"ref":"pyaurorax.ephemeris.classes.search.Search.query", -"url":26, -"doc":"Property for the query value" +"ref":"pyaurorax.tools.MosaicSkymap.site_uid_list", +"url":33, +"doc":"" }, { -"ref":"pyaurorax.ephemeris.classes.search.Search.execute", -"url":26, -"doc":"Initiate ephemeris search request Raises: pyaurorax.exceptions.AuroraXBadParametersException: missing parameters", -"func":1 +"ref":"pyaurorax.tools.MosaicSkymap.elevation", +"url":33, +"doc":"" }, { -"ref":"pyaurorax.ephemeris.classes.search.Search.update_status", -"url":26, -"doc":"Update the status of this ephemeris search request Args: status: the previously-retrieved status of this request (include to avoid requesting it from the API again), defaults to None", -"func":1 +"ref":"pyaurorax.tools.MosaicSkymap.polyfill_lat", +"url":33, +"doc":"" }, { -"ref":"pyaurorax.ephemeris.classes.search.Search.check_for_data", -"url":26, -"doc":"Check to see if data is available for this ephemeris search request Returns: True if data is available, else False", -"func":1 +"ref":"pyaurorax.tools.MosaicSkymap.polyfill_lon", +"url":33, +"doc":"" }, { -"ref":"pyaurorax.ephemeris.classes.search.Search.get_data", -"url":26, -"doc":"Retrieve the data available for this ephemeris search request", -"func":1 +"ref":"pyaurorax.tools.montage", +"url":34, +"doc":"Create montages." }, { -"ref":"pyaurorax.ephemeris.classes.search.Search.wait", -"url":26, -"doc":"Block and wait for the request to complete and data is available for retrieval Args: poll_interval: time in seconds to wait between polling attempts, defaults to pyaurorax.requests.STANDARD_POLLING_SLEEP_TIME verbose: output poll times and other progress messages, defaults to False", +"ref":"pyaurorax.tools.montage.create", +"url":34, +"doc":"Create a montage from a set of images. Args: images (numpy.ndarray): A set of images. Normally this would come directly from a data read call, but can also be any arbitrary set of images. It is anticipated that the order of axes is [rows, cols, num_images] or [row, cols, channels, num_images]. If it is not, then be sure to specify the axis parameter accordingly. timestamp (List[datetime.datetime]): A list of timestamps corresponding to each image. Returns: A pyaurorax.tools.Montage object.", "func":1 }, { -"ref":"pyaurorax.ephemeris.classes.search.Search.cancel", -"url":26, -"doc":"Cancel the ephemeris search request This method returns immediately by default since the API processes this request asynchronously. If you would prefer to wait for it to be completed, set the 'wait' parameter to True. You can adjust the polling time using the 'poll_interval' parameter. Args: wait: wait until the cancellation request has been completed (may wait for several minutes) poll_interval: seconds to wait between polling calls, defaults to STANDARD_POLLING_SLEEP_TIME. verbose: output poll times and other progress messages, defaults to False Returns: 1 on success Raises: pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected error pyaurorax.exceptions.AuroraXUnauthorizedException: invalid API key for this operation", -"func":1 +"ref":"pyaurorax.tools.bounding_box", +"url":35, +"doc":"Methods for working with data in a specific bounding box." }, { -"ref":"pyaurorax.ephemeris.ephemeris", -"url":27, -"doc":"Functions for performing ephemeris searches" +"ref":"pyaurorax.tools.bounding_box.extract_metric", +"url":36, +"doc":"Extract various metrics from a given bounding box." }, { -"ref":"pyaurorax.ephemeris.ephemeris.search", -"url":27, -"doc":"Search for ephemeris records By default, this function will block and wait until the request completes and all data is downloaded. If you don't want to wait, set the 'return_immediately value to True. The Search object will be returned right after the search has been started, and you can use the helper functions as part of that object to get the data when it's done. Note: At least one search criteria from programs, platforms, or instrument_types, must be specified. Args: start: start timestamp of the search (inclusive) end: end timestamp of the search (inclusive) programs: list of programs to search through, defaults to None platforms: list of platforms to search through, defaults to None instrument_types: list of instrument types to search through, defaults to None metadata_filters: list of dictionaries describing metadata keys and values to filter on, defaults to None Example: [{ \"key\": \"nbtrace_region\", \"operator\": \"in\", \"values\": [\"north polar cap\"] }] metadata_filters_logical_operator: the logical operator to use when evaluating metadata filters (either 'AND' or 'OR'), defaults to \"AND\" response_format: JSON representation of desired data response format poll_interval: time in seconds to wait between polling attempts, defaults to pyaurorax.requests.STANDARD_POLLING_SLEEP_TIME return_immediately: initiate the search and return without waiting for data to be received, defaults to False verbose: output poll times and other progress messages, defaults to False Returns: A pyaurorax.ephemeris.Search object Raises: pyaurorax.exceptions.AuroraXBadParametersException: missing parameters", +"ref":"pyaurorax.tools.bounding_box.extract_metric.geo", +"url":36, +"doc":"Compute a metric of image data within a geographic lat/lon boundary. Args: images (numpy.ndarray): A set of images. Normally this would come directly from a data read call, but can also be any arbitrary set of images. It is anticipated that the order of axes is [rows, cols, num_images] or [row, cols, channels, num_images]. skymap (pyaurorax.data.ucalgary.Skymap): The skymap corresponding to the image data. altitude_km (int or float): The altitude of the image data in kilometers. lonlat_bounds (Sequence): A 4-element sequence specifying the lat/lon bounds from which to extract the metric. Anticipated order is [lon_0, lon_1, lat_0, lat_1]. metric (str): The name of the metric that is to be computed for the bounded area. Valid metrics are mean , median , sum . Default is median . n_channels (int): By default, function will assume the type of data passed as input - this argument can be used to manually specify the number of channels contained in image data. Returns: A numpy.ndarray containing the metrics computed within elevation range, for all image frames. Raises: ValueError: issue encountered with value supplied in parameter", "func":1 }, { -"ref":"pyaurorax.ephemeris.ephemeris.upload", -"url":27, -"doc":"Upload ephemeris records to AuroraX Args: identifier: AuroraX data source ID records: ephemeris records to upload validate_source: validate all records before uploading, defaults to False Returns: 0 for success, raises exception on error Raises: pyaurorax.exceptions.AuroraXMaxRetriesException: max retry error pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected content error pyaurorax.exceptions.AuroraXUploadException: upload error pyaurorax.exceptions.AuroraXValidationException: data source validation error", +"ref":"pyaurorax.tools.bounding_box.extract_metric.ccd", +"url":36, +"doc":"Compute a metric of image data within a CCD boundary. Args: images (numpy.ndarray): A set of images. Normally this would come directly from a data read call, but can also be any arbitrary set of images. It is anticipated that the order of axes is [rows, cols, num_images] or [row, cols, channels, num_images]. ccd_bounds (List[int]): A 4-element sequence specifying the (inclusive) CCD bounds from which to extract the metric. Anticipated order is [y_0, y_1, x_0, x_1]. metric (str): The name of the metric that is to be computed for the bounded area. Valid metrics are mean , median , sum . Defaults to median . n_channels (int): By default, function will assume the type of data passed as input - this argument can be used to manually specify the number of channels contained in image data. Returns: A numpy.ndarray containing the metrics computed within CCD bounds, for all image frames. Raises: ValueError: issue encountered with value supplied in parameter", "func":1 }, { -"ref":"pyaurorax.ephemeris.ephemeris.delete", -"url":27, -"doc":"Delete ephemeris records between a timeframe. The API processes this request asynchronously, so this method will return immediately whether or not the data has already been deleted. Args: data_source: data source associated with the data product records (note that identifier, program, platform, and instrument_type are required) start: timestamp marking beginning of range to delete records for, inclusive end: timestamp marking end of range to delete records for, inclusive Returns: 0 on success Raises: pyaurorax.exceptions.AuroraXMaxRetriesException: max retry error pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected error pyaurorax.exceptions.AuroraXNotFoundException: source not found pyaurorax.exceptions.AuroraXUnauthorizedException: invalid API key for this operation pyaurorax.exceptions.AuroraXBadParametersException: missing parameters", +"ref":"pyaurorax.tools.bounding_box.extract_metric.mag", +"url":36, +"doc":"Compute a metric of image data within a magnetic lat/lon boundary. Args: images (numpy.ndarray): A set of images. Normally this would come directly from a data read call, but can also be any arbitrary set of images. It is anticipated that the order of axes is [rows, cols, num_images] or [row, cols, channels, num_images]. skymap (pyaurorax.data.ucalgary.Skymap): The skymap corresponding to the image data. altitude_km (int or float): The altitude of the image data in kilometers. lonlat_bounds (Sequence): A 4-element sequence specifying the magnetic lat/lon bounds from which to extract the metric. Anticipated order is [lon_0, lon_1, lat_0, lat_1]. metric (str): The name of the metric that is to be computed for the bounded area. Valid metrics are mean , median , sum . Default is median . n_channels (int): By default, function will assume the type of data passed as input - this argument can be used to manually specify the number of channels contained in image data. Returns: A numpy.ndarray containing the metrics computed within elevation range, for all image frames. Raises: ValueError: issue encountered with value supplied in parameter", "func":1 }, { -"ref":"pyaurorax.ephemeris.ephemeris.describe", -"url":27, -"doc":"Describe an ephemeris search as a \"SQL-like\" string Args: search_obj: the ephemeris search object to describe Returns: the \"SQL-like\" string describing the ephemeris search object", +"ref":"pyaurorax.tools.bounding_box.extract_metric.elevation", +"url":36, +"doc":"Compute a metric of image data within an elevation boundary. Args: images (numpy.ndarray): A set of images. Normally this would come directly from a data read call, but can also be any arbitrary set of images. It is anticipated that the order of axes is [rows, cols, num_images] or [row, cols, channels, num_images]. skymap (pyaurorax.data.ucalgary.Skymap): The skymap corresponding to the image data. elevation_bounds (Sequence): A 2-element sequence specifying the elevation bounds from which to extract the metric. Anticipated order is [el_min, el_max]. metric (str): The name of the metric that is to be computed for the bounded area. Valid metrics are mean , median , sum . Default is median . n_channels (int): By default, function will assume the type of data passed as input - this argument can be used to manually specify the number of channels contained in image data. Returns: A numpy.ndarray containing the metrics computed within elevation range, for all image frames. Raises: ValueError: issue encountered with value supplied in parameter", "func":1 }, { -"ref":"pyaurorax.ephemeris.ephemeris.get_request_url", -"url":27, -"doc":"Get the ephemeris search request URL for a given request ID. This URL can be used for subsequent pyaurorax.requests function calls. Primarily this method facilitates delving into details about a set of already-submitted ephemeris searches. Args: request_id: the request identifier Returns: the request URL", +"ref":"pyaurorax.tools.bounding_box.extract_metric.azimuth", +"url":36, +"doc":"Compute a metric of image data within an azimuthal boundary. Args: images (numpy.ndarray): A set of images. Normally this would come directly from a data read call, but can also be any arbitrary set of images. It is anticipated that the order of axes is [rows, cols, num_images] or [row, cols, channels, num_images]. skymap (pyaurorax.data.ucalgary.Skymap): The skymap corresponding to the image data. azimuth_bounds (Sequence[int | float]: A 2-element sequence specifying the azimuthal bounds from which to extract the metric. Anticipated order is [az_min, az_max]. metric (str): The name of the metric that is to be computed for the bounded area. Valid metrics are mean , median , sum . Default is median . n_channels (int): By default, function will assume the type of data passed as input - this argument can be used to manually specify the number of channels contained in image data. Returns: A numpy.ndarray containing the metrics computed within azimuth range, for all image frames. Raises: ValueError: issue encountered with value supplied in parameter", "func":1 }, { -"ref":"pyaurorax.exceptions", -"url":28, -"doc":"The exceptions module contains exceptions unique to the PyAuroraX library" +"ref":"pyaurorax.tools.mosaic", +"url":37, +"doc":"Prepare data and create mosaics." }, { -"ref":"pyaurorax.exceptions.AuroraXException", -"url":28, -"doc":"Common base class for all non-exit exceptions." +"ref":"pyaurorax.tools.mosaic.prep_skymaps", +"url":37, +"doc":"Prepare skymap data for use by the mosaic routine. This is not time-dependent, so it would only need to be done once. Allows for plotting multiple images on a map, masking the boundaries between images by elevation angle. Args: skymaps (List[pyaurorax.data.ucalgary.Skymap]): The skymaps to prep. height_km (int): The altitude to utilize, in kilometers. site_uid_order (List[str]): The site list order. The order of this list is not important for plotting, but must be consistent with the order of the skymaps parameter. progress_bar_disable (bool): Disable the progress bar. Defaults to False . n_parallel (int): Number of skymaps to prepare in parallel using multiprocessing. Default is 1 . Returns: The prepared skymap data as a pyaurorax.tools.MosaicSkymap object. Raises: ValueError: issues encountered with supplied parameters.", +"func":1 }, { -"ref":"pyaurorax.exceptions.AuroraXNotFoundException", -"url":28, -"doc":"The AuroraX record was not found" +"ref":"pyaurorax.tools.mosaic.prep_images", +"url":37, +"doc":"Prepare the image data for use in a mosaic. Args: image_list (List[pyaurorax.data.ucalgary.Data]): List of image data. Each element of the list is the data for each site. data_attribute (str): The data attribute to use when prepping the images. Either data or calibrated_data . Default is data . Returns: The prepared data, as a pyaurorax.tools.MosaicData object. Raises: ValueError: issues encountered with supplied paramters.", +"func":1 }, { -"ref":"pyaurorax.exceptions.AuroraXDuplicateException", -"url":28, -"doc":"A duplicate record already exists" +"ref":"pyaurorax.tools.mosaic.create", +"url":37, +"doc":"Create a mosaic object. Args: prepped_data (pyaurorax.tools.MosaicData): The prepared mosaic data. Generated from a prior prep_images() function call. prepped_skymap (pyaurorax.tools.MosaicSkymap): The prepared skymap data. Generated from a prior prep_skymaps() function call. frame_idx (int): The frame number to generate a mosaic for. cartopy_projection (cartopy.crs.Projection): The cartopy projection to use when creating the mosaic. min_elevation (int): The minimum elevation cutoff when projecting images on the map, in degrees. Default is 5 . cbar_colorcmap (str): The matplotlib colormap to use for the rendered image data. Default is grey . Commonly used colormaps are: - REGO: gist_heat - THEMIS ASI: gray - TREx Blue: Blues_r - TREx NIR: gray - TREx RGB: None A list of all available colormaps can be found on the [matplotlib documentation](https: matplotlib.org/stable/gallery/color/colormap_reference.html). image_intensity_scaled (List or Dict): Ranges for scaling images. Either a a list with 2 elements which will scale all sites with the same range, or as a dictionary which can be used for scaling each site differently. Example of uniform scaling across all sites: image_intensity_scales = [2000, 8000] Example of scaling each site differently: image_intensity_scales = {\"fsmi\": [1000, 10000], \"gill\": [2000, 8000]} Returns: The generated pyaurorax.tools.Mosaic object. Raises: ValueError: issues with supplied parameters.", +"func":1 }, { -"ref":"pyaurorax.exceptions.AuroraXValidationException", -"url":28, -"doc":"Validation of data failed" +"ref":"pyaurorax.tools.keogram", +"url":38, +"doc":"Generate keograms." }, { -"ref":"pyaurorax.exceptions.AuroraXUnexpectedContentTypeException", -"url":28, -"doc":"The API responded with an unexpected content type" +"ref":"pyaurorax.tools.keogram.create", +"url":38, +"doc":"Create a keogram from a set of images. Args: images (numpy.ndarray): A set of images. Normally this would come directly from a data read call, but can also be any arbitrary set of images. It is anticipated that the order of axes is [rows, cols, num_images] or [row, cols, channels, num_images]. If it is not, then be sure to specify the axis parameter accordingly. timestamp (List[datetime.datetime]): A list of timestamps corresponding to each image. axis (int): The axis to extract the keogram slice from. Default is 0 , meaning the rows (or Y) axis. Returns: A pyaurorax.tools.Keogram object. Raises: ValueError: issue with supplied parameters.", +"func":1 }, { -"ref":"pyaurorax.exceptions.AuroraXMaxRetriesException", -"url":28, -"doc":"The maximum number of retries for the request has been reached" +"ref":"pyaurorax.tools.keogram.create_custom", +"url":38, +"doc":"Create a keogram, from a custom slice of a set of images. The slice used is defined by a set of points, in CCD, geographic, or geomagnetic coordinates, within the bounds of the image data. Keogram is created from the bottom up, meaning the first point will correspond to the bottom of the keogram data. Args: images (numpy.ndarray): A set of images. Normally this would come directly from a data read call, but can also be any arbitrary set of images. It is anticipated that the order of axes is [rows, cols, num_images] or [row, cols, channels, num_images]. If it is not, then be sure to specify the axis parameter accordingly. timestamp (List[datetime.datetime]): A list of timestamps corresponding to each image. coordinate_system (str): The coordinate system in which input points are defined. Valid options are \"ccd\", \"geo\", or \"mag\". width (int): Width of the desired keogram slice, in CCD pixel units. x_locs (Sequence[float | int]): Sequence of points giving the x-coordinates that define a path through the image data, from which to build the keogram. y_locs (Sequence[float | int]): Sequence of points giving the y-coordinates that define a path through the image data, from which to build the keogram. preview (Optional[bool]): When True, the first frame in images will be displayed, with the keogram slice plotted. skymap (Skymap): The skymap to use in georeferencing when working in geographic or magnetic coordinates. altitude_km (float | int): The altitude of the image data, in km, to use in georeferencing when working in goegraphic or magnetic coordinates. metric (str): The metric used to compute values for each keogram pixel. Valid options are \"median\", \"mean\", and \"sum\". Defaults to \"median\". Returns: A pyaurorax.tools.Keogram object. Raises:", +"func":1 }, { -"ref":"pyaurorax.exceptions.AuroraXBadParametersException", -"url":28, -"doc":"Bad parameters were given in the request" +"ref":"pyaurorax.tools.calibration", +"url":39, +"doc":"Perform various calibration procedures on image data." }, { -"ref":"pyaurorax.exceptions.AuroraXUnauthorizedException", -"url":28, -"doc":"A privileged operation was attempted without authorization" +"ref":"pyaurorax.tools.calibration.rego", +"url":39, +"doc":"Apply various calibration adjustments to a single or set of images raw images. Args: images (numpy.ndarray): Raw images to perform calibration procedures on. cal_flatfield (pyaurorax.data.ucalgary.Calibration): Calibration object containing the flatfield data to utilize. This field is required if the step_flatfield_corection is set to True. cal_rayleighs (pyaurorax.data.ucalgary.Calibration): Calibration object containing the Rayleighs data to utilize. This field is required if the step_rayleighs_conversion is set to True. step_dark_frame_correction (bool): Perform the dark frame correction step. Defaults to True . step_flatfield_correction (bool): Perform the flatfield correction step. Defaults to True . Note that the cal_flatfield parameter must be supplied if this is True. step_rayleighs_conversion (bool): Perform the Rayleighs conversion step. Defaults to True. Note that the cal_rayleighs parameter must be supplied if this is True. exposure_length_sec (float): Force the exposure length to be a certain value. Default is TREx NIR's nominal operating mode exposure length of 2.0 seconds . Adjusting this field should be done with caution. Returns: The calibrated images. The shape of the calibrated data will be same as the input images. The dtype of the calibrated data will depend on if the Rayleighs conversion was performed. If it was, a float32 array will be returned. If it wasn't, the dtype will be the same as input images' dtype. Raises: ValueError: issues encountered with supplied parameters.", +"func":1 }, { -"ref":"pyaurorax.exceptions.AuroraXConflictException", -"url":28, -"doc":"A conflict occurred while modifying records" +"ref":"pyaurorax.tools.calibration.trex_nir", +"url":39, +"doc":"Apply various calibration adjustments to a single or set of images raw images. Args: images (numpy.ndarray): Raw images to perform calibration procedures on. cal_flatfield (pyaurorax.data.ucalgary.Calibration): Calibration object containing the flatfield data to utilize. This field is required if the step_flatfield_corection is set to True. cal_rayleighs (pyaurorax.data.ucalgary.Calibration): Calibration object containing the Rayleighs data to utilize. This field is required if the step_rayleighs_conversion is set to True. step_dark_frame_correction (bool): Perform the dark frame correction step. Defaults to True . step_flatfield_correction (bool): Perform the flatfield correction step. Defaults to True . Note that the cal_flatfield parameter must be supplied if this is True. step_rayleighs_conversion (bool): Perform the Rayleighs conversion step. Defaults to True. Note that the cal_rayleighs parameter must be supplied if this is True. exposure_length_sec (float): Force the exposure length to be a certain value. Default is TREx NIR's nominal operating mode exposure length of 5.0 seconds . Adjusting this field should be done with caution. Returns: The calibrated images. The shape of the calibrated data will be same as the input images. The dtype of the calibrated data will depend on if the Rayleighs conversion was performed. If it was, a float32 array will be returned. If it wasn't, the dtype will be the same as input images' dtype. Raises: ValueError: issues encountered with supplied parameters.", +"func":1 }, { -"ref":"pyaurorax.exceptions.AuroraXUploadException", -"url":28, -"doc":"Error occurred during upload operation" +"ref":"pyaurorax.tools.ccd_contour", +"url":40, +"doc":"Obtain contours in pixel coordinates from a skymap for plotting over CCD images." }, { -"ref":"pyaurorax.exceptions.AuroraXUnexpectedEmptyResponse", -"url":28, -"doc":"An empty response was received when it wasn't expected" +"ref":"pyaurorax.tools.ccd_contour.elevation", +"url":40, +"doc":"Obtain CCD Coordinates of a line of constant elevation. Args: skymap (pyaurorax.data.ucalgary.Skymap): The skymap corresponding to the CCD image data to generate contours for. constant_elevation (int | float): The elevation angle, in degrees from the horizon, to create contour of. n_points (int | float): Optionally specify the number of points used to define a contour. By default a reasonable value is selected automatically. remove_edge_cases (bool): Due to the nature of skymaps, often, around the edge of CCD data, contours will have often undesired behaviour due to being bounded within the CCD range. The result is flattened contours along the edge of CCD boundaries. This is completely expected, and these points are removed by default, completely for aesthetic choices. Set this keyword to False to keep all points in the contour. Returns: A tuple (x_pix, y_pix) of numpy arrays containing the coordinates, in pixel units, of the elevation contour. Raises: ValueError: invalid elevation supplied.", +"func":1 }, { -"ref":"pyaurorax.exceptions.AuroraXDataRetrievalException", -"url":28, -"doc":"Error occurred while retrieving search data" +"ref":"pyaurorax.tools.ccd_contour.azimuth", +"url":40, +"doc":"Obtain CCD Coordinates of a line of constant latitude. Args: skymap (pyaurorax.data.ucalgary.Skymap): The skymap corresponding to the CCD image data to generate contours for. constant_elevation (int | float): The elevation angle, in degrees from the horizon, to create contour of. min_elevation (int | float): Optionally specify the elevation angle at which contour begins. Defaults to 5. min_elevation (int | float): Optionally specify the elevation angle at which contour begins. Defaults to 90. n_points (int | float): Optionally specify the number of points used to define a contour. By default a reasonable value is selected automatically. remove_edge_cases (bool): Due to the nature of skymaps, often, around the edge of CCD data, contours will have often undesired behaviour due to being bounded within the CCD range. The result is flattened contours along the edge of CCD boundaries. This is completely expected, and these points are removed by default, completely for aesthetic choices. Set this keyword to False to keep all points in the contour. Returns: A tuple (x_pix, y_pix) of numpy arrays containing the coordinates, in pixel units, of the azimuth contour. Raises: ValueError: invalid azimuth supplied.", +"func":1 }, { -"ref":"pyaurorax.exceptions.AuroraXTimeoutException", -"url":28, -"doc":"A timeout was reached while communicating with the AuroraX API" +"ref":"pyaurorax.tools.ccd_contour.geo", +"url":40, +"doc":"Obtain CCD Coordinates of a line of constant geographic latitude, constant geographic longitude, or a custom contour defined in geographic coordinates. Args: skymap (pyaurorax.data.ucalgary.Skymap): The skymap corresponding to the CCD image data to generate contours for. altitude_km (int or float): The altitude of the image data to create contours for, in kilometers. lats (ndarray or list): Sequence of geographic latitudes defining a contour. lons (ndarray or list): Sequence of geographic longitudes defining a contour. constant_lats (float or int): Geographic Latitude at which to create line of constant latitude. constant_lons (float or int): Geographic Longitude at which to create line of constant longitude. n_points (int or float): Optionally specify the number of points used to define a contour. By default a reasonable value is selected automatically. remove_edge_cases (bool): Due to the nature of skymaps, often, around the edge of CCD data, contours will have often undesired behaviour due to being bounded within the CCD range. The result is flattened contours along the edge of CCD boundaries. This is completely expected, and these points are removed by default, completely for aesthetic choices. Set this keyword to False to keep all points in the contour. Returns: A tuple (x_pix, y_pix) of numpy arrays containing the coordinates, in pixel units, of the elevation contour. Raises: ValueError: invalid elevation supplied.", +"func":1 }, { -"ref":"pyaurorax.exceptions.AuroraXSearchException", -"url":28, -"doc":"An error occured in the API while performing a search" +"ref":"pyaurorax.tools.ccd_contour.mag", +"url":40, +"doc":"Obtain CCD Coordinates of a line of constant magnetic latitude, constant magnetic longitude, or a custom contour defined in magnetic coordinates. Args: skymap (pyaurorax.data.ucalgary.Skymap): The skymap corresponding to the CCD image data to generate contours for. timestamp (datetime.datetime): The timestamp used for AACGM Conversions. altitude_km (int or float): The altitude of the image data to create contours for, in kilometers. lats (ndarray or list): Sequence of magnetic latitudes defining a contour. lons (ndarray or list): Sequence of magnetic longitudes defining a contour. constant_lats (float or int): Magnetic Latitude at which to create line of constant latitude. constant_lons (float or int): Magnetic Longitude at which to create line of constant longitude. n_points (int or float): Optionally specify the number of points used to define a contour. By default a reasonable value is selected automatically. remove_edge_cases (bool): Due to the nature of skymaps, often, around the edge of CCD data, contours will have often undesired behaviour due to being bounded within the CCD range. The result is flattened contours along the edge of CCD boundaries. This is completely expected, and these points are removed by default, completely for aesthetic choices. Set this keyword to False to keep all points in the contour. Returns: A tuple (x_pix, y_pix) of numpy arrays containing the coordinates, in pixel units, of the elevation contour. Raises: ValueError: invalid elevation supplied.", +"func":1 }, { -"ref":"pyaurorax.location", -"url":29, -"doc":"The Location module provides a class used throughout the PyAuroraX library to manage lat/lon positions of different things." +"ref":"pyaurorax.tools.classes", +"url":41, +"doc":"Class definitions for data analysis objects." }, { -"ref":"pyaurorax.location.Location", -"url":29, -"doc":"Class representing an AuroraX location (ie. geographic coordinates, GSM coordinates, northern/southern B-trace magnetic footprints) The numbers are in decimal degrees format and range from -90 to 90 for latitude and -180 to 180 for longitude. Attributes: lat: latitude value lon: longitude value Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model." +"ref":"pyaurorax.tools.classes.mosaic", +"url":42, +"doc":"Class representation for a mosaic." }, { -"ref":"pyaurorax.location.Location.lat", -"url":29, -"doc":"" +"ref":"pyaurorax.tools.classes.mosaic.MosaicSkymap", +"url":42, +"doc":"Prepared skymap data for use by mosaic routines. Attributes: site_uid_list (List[str]): List of site unique identifiers contained within this object. elevation (List[numpy.ndarray]): List of elevation data, with each element corresponding to each site. Order matches that of the site_uid_list attribute. polyfoll_lat (List[numpy.ndarray]): List of latitude polygon data, with each element corresponding to each site. Order matches that of the site_uid_list attribute. polyfoll_lon (List[numpy.ndarray]): List of longitude polygon data, with each element corresponding to each site. Order matches that of the site_uid_list attribute." }, { -"ref":"pyaurorax.location.Location.lon", -"url":29, +"ref":"pyaurorax.tools.classes.mosaic.MosaicSkymap.site_uid_list", +"url":42, "doc":"" }, { -"ref":"pyaurorax.metadata", -"url":30, -"doc":"AuroraX metadata schemas describe the intended structure of metadata stored in ephemeris and data product records. This module provides functions for interacting with the schemas. Note that all functions and classes from submodules are all imported at this level of the metadata module. They can be referenced from here instead of digging in deeper to the submodules." -}, -{ -"ref":"pyaurorax.metadata.get_data_products_schema", -"url":30, -"doc":"Retrieve the data products metadata schema for a data source Args: identifier: the AuroraX data source ID Returns: the data products metadata schema for the data source", -"func":1 +"ref":"pyaurorax.tools.classes.mosaic.MosaicSkymap.elevation", +"url":42, +"doc":"" }, { -"ref":"pyaurorax.metadata.get_ephemeris_schema", -"url":30, -"doc":"Retrieve the ephemeris metadata schema for a data source Args: identifier: the AuroraX data source ID Returns: the ephemeris metadata schema for the data source", -"func":1 +"ref":"pyaurorax.tools.classes.mosaic.MosaicSkymap.polyfill_lat", +"url":42, +"doc":"" }, { -"ref":"pyaurorax.metadata.validate", -"url":30, -"doc":"Validate a metadata record against a schema. This checks that the key names match and there aren't fewer or more keys than expected. Args: schema: the metadata schema to validate against record: metadata record to validate Returns: True if the metadata record is valid, False if it is not", -"func":1 +"ref":"pyaurorax.tools.classes.mosaic.MosaicSkymap.polyfill_lon", +"url":42, +"doc":"" }, { -"ref":"pyaurorax.metadata.metadata", -"url":31, -"doc":"Functions for interacting with metadata filters" +"ref":"pyaurorax.tools.classes.mosaic.MosaicData", +"url":42, +"doc":"Prepared image data for use by mosaic routines. Attributes: site_uid_list (List[str]): List of site unique identifiers contained within this object. timestamps (List[datetime.datetime]): Timestamps of corresponding images. images (Dict[str, numpy.ndarray]): Image data prepared into the necessary format; a dictionary. Keys are the site UID, ndarray is the prepared data. images_dimensions (Dict[str, Tuple]): The image dimensions." }, { -"ref":"pyaurorax.metadata.metadata.validate", -"url":31, -"doc":"Validate a metadata record against a schema. This checks that the key names match and there aren't fewer or more keys than expected. Args: schema: the metadata schema to validate against record: metadata record to validate Returns: True if the metadata record is valid, False if it is not", -"func":1 +"ref":"pyaurorax.tools.classes.mosaic.MosaicData.site_uid_list", +"url":42, +"doc":"" }, { -"ref":"pyaurorax.metadata.metadata.get_ephemeris_schema", -"url":31, -"doc":"Retrieve the ephemeris metadata schema for a data source Args: identifier: the AuroraX data source ID Returns: the ephemeris metadata schema for the data source", -"func":1 +"ref":"pyaurorax.tools.classes.mosaic.MosaicData.timestamps", +"url":42, +"doc":"" }, { -"ref":"pyaurorax.metadata.metadata.get_data_products_schema", -"url":31, -"doc":"Retrieve the data products metadata schema for a data source Args: identifier: the AuroraX data source ID Returns: the data products metadata schema for the data source", -"func":1 +"ref":"pyaurorax.tools.classes.mosaic.MosaicData.images", +"url":42, +"doc":"" }, { -"ref":"pyaurorax.requests", -"url":32, -"doc":"The requests module contains helper methods for retrieving data from an AuroraX request. Note that all functions and classes from submodules are all imported at this level of the requests module. They can be referenced from here instead of digging in deeper to the submodules." +"ref":"pyaurorax.tools.classes.mosaic.MosaicData.images_dimensions", +"url":42, +"doc":"" }, { -"ref":"pyaurorax.requests.get_data", -"url":32, -"doc":"Retrieve the data for a request Args: data_url: the URL for the data of a request, response_format: the response format to send as post data, defaults to None skip_serializing: skip any object serializing, defaults to False Raises: pyaurorax.exceptions.AuroraXDataRetrievalException: error retrieving data Returns: the data for this request", -"func":1 +"ref":"pyaurorax.tools.classes.mosaic.Mosaic", +"url":42, +"doc":"Class representation for a generated mosaic. Attributes: polygon_data (matplotlib.collections.PolyCollection): Generated polygons containing rendered data. cartopy_projection (cartopy.crs.Projection): Cartopy projection to utilize. contour_data (Dict[str, List[Any ): Generated contour data." }, { -"ref":"pyaurorax.requests.get_logs", -"url":32, -"doc":"Retrieve the logs for a request Args: request_url: the URL of the request information Returns: the log messages for the request", -"func":1 +"ref":"pyaurorax.tools.classes.mosaic.Mosaic.polygon_data", +"url":42, +"doc":"" }, { -"ref":"pyaurorax.requests.get_status", -"url":32, -"doc":"Retrieve the status of a request Args: request_url: the URL of the request information Returns: the status information for the request", -"func":1 +"ref":"pyaurorax.tools.classes.mosaic.Mosaic.cartopy_projection", +"url":42, +"doc":"" }, { -"ref":"pyaurorax.requests.wait_for_data", -"url":32, -"doc":"Block and wait for the data to be made available for a request Args: request_url: the URL of the request information poll_interval: seconds to wait between polling calls, defaults to STANDARD_POLLING_SLEEP_TIME verbose: output poll times and other progress messages, defaults to False Returns: the status information for the request", -"func":1 +"ref":"pyaurorax.tools.classes.mosaic.Mosaic.contour_data", +"url":42, +"doc":"" }, { -"ref":"pyaurorax.requests.cancel", -"url":32, -"doc":"Cancel the request at the given URL. This method returns immediately by default since the API processes this request asynchronously. If you would prefer to wait for it to be completed, set the 'wait' parameter to True. You can adjust the polling time using the 'poll_interval' parameter. Args: request_url: the URL string of the request to be canceled wait: set to True to block until the cancellation request has been completed (may wait for several minutes) poll_interval: seconds to wait between polling calls, defaults to STANDARD_POLLING_SLEEP_TIME. verbose: if True then output poll times and other progress, defaults to False Returns: 0 on success Raises: pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected error pyaurorax.exceptions.AuroraXUnauthorizedException: invalid API key for this operation", +"ref":"pyaurorax.tools.classes.mosaic.Mosaic.plot", +"url":42, +"doc":"Generate a plot of the mosaic data. Either display it (default behaviour), save it to disk (using the savefig parameter), or return the matplotlib plot object for further usage (using the returnfig parameter). Args: map_extent (List[int]): Latitude/longitude range to be visible on the rendered map. This is a list of 4 integers and/or floats, in the order of [min_lon, max_lon, min_lat, max_lat]. figsize (tuple): The matplotlib figure size to use when plotting. For example figsize=(14,4) . rayleighs (bool): Set to True if the data being plotted is in Rayleighs. Defaults to False . max_rayleighs (int): Max intensity scale for Rayleighs. Defaults to 20000 . ocean_color (str): Colour of the ocean. Default is cartopy's default shade of blue. Colours can be supplied as a word, or hexcode prefixed with a ' ' character (ie. 55AADD ). land_color (str): Colour of the land. Default is grey . Colours can be supplied as a word, or hexcode prefixed with a ' ' character (ie. 41BB87 ). land_edgecolor (str): Color of the land edges. Default is 8A8A8A . Colours can be supplied as a word, or hexcode prefixed with a ' ' character. borders_color (str): Color of the country borders. Default is AEAEAE . Colours can be supplied as a word, or hexcode prefixed with a ' ' character. borders_disable (bool): Disbale rendering of the borders. Default is False . cbar_colorcmap (str): The matplotlib colormap to use for the plotted color bar. Default is grey . Commonly used colormaps are: - REGO: gist_heat - THEMIS ASI: gray - TREx Blue: Blues_r - TREx NIR: gray - TREx RGB: None A list of all available colormaps can be found on the [matplotlib documentation](https: matplotlib.org/stable/gallery/color/colormap_reference.html). returnfig (bool): Instead of displaying the image, return the matplotlib figure object. This allows for further plot manipulation, for example, adding labels or a title in a different location than the default. Remember - if this parameter is supplied, be sure that you close your plot after finishing work with it. This can be achieved by doing plt.close(fig) . Note that this method cannot be used in combination with savefig . savefig (bool): Save the displayed image to disk instead of displaying it. The parameter savefig_filename is required if this parameter is set to True. Defaults to False . savefig_filename (str): Filename to save the image to. Must be specified if the savefig parameter is set to True. savefig_quality (int): Quality level of the saved image. This can be specified if the savefig_filename is a JPG image. If it is a PNG, quality is ignored. Default quality level for JPGs is matplotlib/Pillow's default of 75%. Returns: The displayed montage, by default. If savefig is set to True, nothing will be returned. If returnfig is set to True, the plotting variables (fig, ax) will be returned. Raises:", "func":1 }, { -"ref":"pyaurorax.requests.list", -"url":32, -"doc":"Retrieve a list of search requests matching certain criteria. Administrators only. Args: search_type: the type of search request, valid values are 'conjunction', 'ephemeris', or 'data_product'. Exclusion of value will return all search requests of any type, defaults to None active: return searches that are currently active or not, exclude for both, defaults to None start: start timestamp for narrowing down search timeframes, defaults to None end: end timestamp for narrowing down search timeframes, defaults to None file_size: filter by result file size, measured in KB, defaults to None result_count: filter by result count, defaults to None query_duration: filter by query duration, measured in milliseconds, defaults to None error_condition: filter by if an error occurred or not, exclude for both, defaults to None Returns: list of matching search requests Raises: pyaurorax.exceptions.AuroraXUnauthorizedException: invalid API key for this operation", +"ref":"pyaurorax.tools.classes.mosaic.Mosaic.add_geo_contours", +"url":42, +"doc":"Add geographic contours to a mosaic. Args: lats (ndarray or list): Sequence of geographic latitudes defining a contour. lons (ndarray or list): Sequence of geographic longitudes defining a contour. constant_lats (float, int, or Sequence): Geographic Latitude(s) at which to add line(s) of constant latitude. constant_lons (float, int, or Sequence): Geographic Longitude(s) at which to add line(s) of constant longitude. color (str): The matplotlib color used for the contour(s). linewidth (float or int): The contour thickness. linestyle (str): The matplotlib linestyle used for the contour(s). Returns: The object's contour_data parameter is populated appropriately. Raises: ValueError: issues encountered with supplied parameters.", "func":1 }, { -"ref":"pyaurorax.requests.delete", -"url":32, -"doc":"Entirely remove a search request from the AuroraX database. Administrators only. Args: request_id: search request UUID Returns: 0 on success, raises error on failure Raises: pyaurorax.exceptions.AuroraXNotFoundException: data source not found", +"ref":"pyaurorax.tools.classes.mosaic.Mosaic.add_mag_contours", +"url":42, +"doc":"Add geomagnetic contours to a mosaic. Args: timestamp (datetime.datetime): The timestamp used in computing AACGM coordinates. lats (ndarray or list): Sequence of geomagnetic latitudes defining a contour. lons (ndarray or list): Sequence of geomagnetic longitudes defining a contour. constant_lats (float, int, Sequence): Geomagnetic latitude(s) at which to add contour(s) of constant latitude. constant_lons (float, int, Sequence): Geomagnetic longitude(s) at which to add contours(s) of constant longitude. color (str): The matplotlib color used for the contour(s). linewidth (float or int): The contour thickness. linestyle (str): The matplotlib linestyle used for the contour(s). Returns: The object's contour_data parameter is populated appropriately. Raises: ValueError: issues encountered with supplied parameters.", "func":1 }, { -"ref":"pyaurorax.requests.requests", -"url":33, -"doc":"Functions for interacting with AuroraX requests" -}, -{ -"ref":"pyaurorax.requests.requests.FIRST_FOLLOWUP_SLEEP_TIME", -"url":33, -"doc":"Initial sleep time when waiting for data" -}, -{ -"ref":"pyaurorax.requests.requests.STANDARD_POLLING_SLEEP_TIME", -"url":33, -"doc":"Polling sleep time when waiting for data (after the initial sleep time)" +"ref":"pyaurorax.tools.classes.montage", +"url":43, +"doc":"Class representation for a montage." }, { -"ref":"pyaurorax.requests.requests.ALLOWED_SEARCH_LISTING_TYPES", -"url":33, -"doc":"Allowed types when listing search requests" +"ref":"pyaurorax.tools.classes.montage.Montage", +"url":43, +"doc":"Class representation for a montage Attributes: data (numpy.ndarray): The derived montage data. timestamp (List[datetime.datetime]): Timestamps corresponding to each montage image." }, { -"ref":"pyaurorax.requests.requests.get_status", -"url":33, -"doc":"Retrieve the status of a request Args: request_url: the URL of the request information Returns: the status information for the request", +"ref":"pyaurorax.tools.classes.montage.Montage.plot", +"url":43, +"doc":"Generate a plot of the montage data. Either display it (default behaviour), save it to disk (using the savefig parameter), or return the matplotlib plot object for further usage (using the returnfig parameter). Args: figsize (tuple): The matplotlib figure size to use when plotting. For example figsize=(14,4) . cmap (str): The matplotlib colormap to use. Commonly used colormaps are: - REGO: gist_heat - THEMIS ASI: gray - TREx Blue: Blues_r - TREx NIR: gray - TREx RGB: None A list of all available colormaps can be found on the [matplotlib documentation](https: matplotlib.org/stable/gallery/color/colormap_reference.html). returnfig (bool): Instead of displaying the image, return the matplotlib figure object. This allows for further plot manipulation, for example, adding labels or a title in a different location than the default. Remember - if this parameter is supplied, be sure that you close your plot after finishing work with it. This can be achieved by doing plt.close(fig) . Note that this method cannot be used in combination with savefig . savefig (bool): Save the displayed image to disk instead of displaying it. The parameter savefig_filename is required if this parameter is set to True. Defaults to False . savefig_filename (str): Filename to save the image to. Must be specified if the savefig parameter is set to True. savefig_quality (int): Quality level of the saved image. This can be specified if the savefig_filename is a JPG image. If it is a PNG, quality is ignored. Default quality level for JPGs is matplotlib/Pillow's default of 75%. Returns: The displayed montage, by default. If savefig is set to True, nothing will be returned. If returnfig is set to True, the plotting variables (fig, ax) will be returned. Raises: ValueError: Issues with the y-axis choice.", "func":1 }, { -"ref":"pyaurorax.requests.requests.get_data", -"url":33, -"doc":"Retrieve the data for a request Args: data_url: the URL for the data of a request, response_format: the response format to send as post data, defaults to None skip_serializing: skip any object serializing, defaults to False Raises: pyaurorax.exceptions.AuroraXDataRetrievalException: error retrieving data Returns: the data for this request", -"func":1 +"ref":"pyaurorax.tools.classes.keogram", +"url":44, +"doc":"Class representation for a keogram." }, { -"ref":"pyaurorax.requests.requests.get_logs", -"url":33, -"doc":"Retrieve the logs for a request Args: request_url: the URL of the request information Returns: the log messages for the request", -"func":1 +"ref":"pyaurorax.tools.classes.keogram.Keogram", +"url":44, +"doc":"Class representation for a keogram Attributes: data (numpy.ndarray): The derived keogram data. timestamp (List[datetime.datetime]): Timestamps corresponding to each keogram slice. ccd_y (numpy.ndarray): The y-axis representing CCD Y coordinates for the keogram. mag_y (numpy.ndarray): The y-axis representing magnetic latitude for the keogram. geo_y (numpy.ndarray): The y-axis representing geographic latitude for the keogram." }, { -"ref":"pyaurorax.requests.requests.wait_for_data", -"url":33, -"doc":"Block and wait for the data to be made available for a request Args: request_url: the URL of the request information poll_interval: seconds to wait between polling calls, defaults to STANDARD_POLLING_SLEEP_TIME verbose: output poll times and other progress messages, defaults to False Returns: the status information for the request", +"ref":"pyaurorax.tools.classes.keogram.Keogram.set_geographic_latitudes", +"url":44, +"doc":"Set the geographic latitude values for this keogram, using the specified skymap data. The data will be set to the geo_y attribute of this Keogram object, which can then be used for plotting and/or further analysis. Note: currently only specific altitudes are supported at this time, matching the ones in the passed-in skymap object. A future release will implement an interpolation routine to allow for a wider range of altitudes. Args: skymap (pyaurorax.data.ucalgary.Skymap): The skymap object to use. This parameter is required. altitude (int): The altitude to use, in kilometers. If not specified, it will use the default in the skymap object. If the specified altitude is not valid, a ValueError will be raised. Returns: None. The Keogram object's geo_y attribute will be updated. Raises: ValueError: Issues with specified altitude.", "func":1 }, { -"ref":"pyaurorax.requests.requests.cancel", -"url":33, -"doc":"Cancel the request at the given URL. This method returns immediately by default since the API processes this request asynchronously. If you would prefer to wait for it to be completed, set the 'wait' parameter to True. You can adjust the polling time using the 'poll_interval' parameter. Args: request_url: the URL string of the request to be canceled wait: set to True to block until the cancellation request has been completed (may wait for several minutes) poll_interval: seconds to wait between polling calls, defaults to STANDARD_POLLING_SLEEP_TIME. verbose: if True then output poll times and other progress, defaults to False Returns: 0 on success Raises: pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected error pyaurorax.exceptions.AuroraXUnauthorizedException: invalid API key for this operation", +"ref":"pyaurorax.tools.classes.keogram.Keogram.set_magnetic_latitudes", +"url":44, +"doc":"Set the magnetic latitude values for this keogram, using the specified skymap data. AACGMv2 will be utilized to perform the calculations. The resulting data will be set to the mag_y attribute of this Keogram object, which can then be used for plotting and/or further analysis. Note: currently only specific altitudes are supported at this time, matching the ones in the passed-in skymap object. A future release will implement an interpolation routine to allow for a wider range of altitudes. Args: skymap (pyaurorax.data.ucalgary.Skymap): The skymap object to use. This parameter is required. timestamp (datetime.datetime): The timestamp to use when converting skymap data to magnetic coordinates. Utilizes AACGMv2 to do the conversion. altitude (int): The altitude to use. If not specified, it will use the default in the skymap object. If the specified altitude is not valid, a ValueError will be raised. Returns: None. The Keogram object's mag_y attribute will be updated. Raises: ValueError: Issues with specified altitude.", "func":1 }, { -"ref":"pyaurorax.requests.requests.list", -"url":33, -"doc":"Retrieve a list of search requests matching certain criteria. Administrators only. Args: search_type: the type of search request, valid values are 'conjunction', 'ephemeris', or 'data_product'. Exclusion of value will return all search requests of any type, defaults to None active: return searches that are currently active or not, exclude for both, defaults to None start: start timestamp for narrowing down search timeframes, defaults to None end: end timestamp for narrowing down search timeframes, defaults to None file_size: filter by result file size, measured in KB, defaults to None result_count: filter by result count, defaults to None query_duration: filter by query duration, measured in milliseconds, defaults to None error_condition: filter by if an error occurred or not, exclude for both, defaults to None Returns: list of matching search requests Raises: pyaurorax.exceptions.AuroraXUnauthorizedException: invalid API key for this operation", +"ref":"pyaurorax.tools.classes.keogram.Keogram.plot", +"url":44, +"doc":"Generate a plot of the keogram data. Either display it (default behaviour), save it to disk (using the savefig parameter), or return the matplotlib plot object for further usage (using the returnfig parameter). Args: y_type (str): Type of y-axis to use when plotting. Options are ccd , mag , or geo . The default is ccd . This parameter is required. title (str): The title to display above the plotted keogram. figsize (tuple): The matplotlib figure size to use when plotting. For example figsize=(14,4) . cmap (str): The matplotlib colormap to use. Commonly used colormaps are: - REGO: gist_heat - THEMIS ASI: gray - TREx Blue: Blues_r - TREx NIR: gray - TREx RGB: None A list of all available colormaps can be found on the [matplotlib documentation](https: matplotlib.org/stable/gallery/color/colormap_reference.html). aspect (str or float): The matplotlib imshow aspect ration to use. A common value for this is auto . All valid values can be found on the [matplotlib documentation](https: matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.imshow.html). axes_visible (bool): Display the axes. Default is True . xlabel (str): The x-axis label to use. Default is Time (UTC) . ylabel (str): The y-axis label to use. Default is based on y_type. xtick_increment (int): The x-axis tick increment to use. Default is 100. ytick_increment (int): The y-axis tick increment to use. Default is 50. returnfig (bool): Instead of displaying the image, return the matplotlib figure object. This allows for further plot manipulation, for example, adding labels or a title in a different location than the default. Remember - if this parameter is supplied, be sure that you close your plot after finishing work with it. This can be achieved by doing plt.close(fig) . Note that this method cannot be used in combination with savefig . savefig (bool): Save the displayed image to disk instead of displaying it. The parameter savefig_filename is required if this parameter is set to True. Defaults to False . savefig_filename (str): Filename to save the image to. Must be specified if the savefig parameter is set to True. savefig_quality (int): Quality level of the saved image. This can be specified if the savefig_filename is a JPG image. If it is a PNG, quality is ignored. Default quality level for JPGs is matplotlib/Pillow's default of 75%. Returns: The displayed keogram, by default. If savefig is set to True, nothing will be returned. If returnfig is set to True, the plotting variables (fig, ax) will be returned. Raises: ValueError: Issues with the y-axis choice.", "func":1 }, { -"ref":"pyaurorax.requests.requests.delete", -"url":33, -"doc":"Entirely remove a search request from the AuroraX database. Administrators only. Args: request_id: search request UUID Returns: 0 on success, raises error on failure Raises: pyaurorax.exceptions.AuroraXNotFoundException: data source not found", -"func":1 +"ref":"pyaurorax.data", +"url":45, +"doc":"Instrument data downloading and reading module. This module presently has support for data provided by the University of Calgary, such as THEMIS ASI, REGO, and the Transition Region Explorer (TREx) instruments." }, { -"ref":"pyaurorax.sources", -"url":34, -"doc":"AuroraX data sources are unique instruments that produce ephemeris or data product records. Note that all functions and classes from submodules are all imported at this level of the sources module. They can be referenced from here instead of digging in deeper to the submodules." +"ref":"pyaurorax.data.DataManager", +"url":45, +"doc":"The DataManager object is initialized within every PyAuroraX object. It acts as a way to access the submodules and carry over configuration information in the super class." }, { -"ref":"pyaurorax.sources.FORMAT_BASIC_INFO", -"url":34, -"doc":"Data sources are returned with the basic information: identifier, program, platform, instrument type, source type, and display name" +"ref":"pyaurorax.data.DataManager.ucalgary", +"url":45, +"doc":"Access to the ucalgary submodule from within a PyAuroraX object." }, { -"ref":"pyaurorax.sources.FORMAT_BASIC_INFO_WITH_METADATA", -"url":34, -"doc":"Data sources are returned with the basic information, plus the metadata" +"ref":"pyaurorax.data.DataManager.list_datasets", +"url":45, +"doc":"List available datasets from all providers Args: name (str): Supply a name used for filtering. If that name is found in the available dataset names received from the API, it will be included in the results. This parameter is optional. timeout (int): Represents how many seconds to wait for the API to send data before giving up. The default is 10 seconds, or the api_timeout value in the super class' pyaurorax.PyAuroraX object. This parameter is optional. Returns: A list of [ Dataset ](https: docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html pyucalgarysrs.data.classes.Dataset) objects. Raises: pyaurorax.exceptions.AuroraXAPIError: An API error was encountered.", +"func":1 }, { -"ref":"pyaurorax.sources.FORMAT_FULL_RECORD", -"url":34, -"doc":"Data sources are returned with all information about them. This includes at least: identifier, program, platform, instrument type, source type, display name, metadata, owner, maintainers, the ephemeris metadata schema, and the data products meatadata schema." +"ref":"pyaurorax.data.DataManager.list_datasets_in_table", +"url":45, +"doc":"Print available datasets from all providers in a table Args: name (str): Supply a name used for filtering. If that name is found in the available dataset names received from the API, it will be included in the results. This parameter is optional. max_width (int): Maximum width of the table. Default is 200 . This parameter is optional. timeout (int): Represents how many seconds to wait for the API to send data before giving up. The default is 10 seconds, or the api_timeout value in the super class' pyaurorax.PyAuroraX object. This parameter is optional. Returns: Printed table. Raises: pyaurorax.exceptions.AuroraXAPIError: An API error was encountered.", +"func":1 }, { -"ref":"pyaurorax.sources.FORMAT_IDENTIFIER_ONLY", -"url":34, -"doc":"Data sources are returned with only the identifier" +"ref":"pyaurorax.data.DataManager.list_observatories", +"url":45, +"doc":"List information about observatories utilized by all providers. Args: instrument_array (str): The instrument array to list observatories for. Valid values are: themis_asi, rego, trex_rgb, trex_nir, and trex_blue. uid (str): Supply a observatory unique identifier used for filtering (usually 4-letter site code). If that UID is found in the available observatories received from the API, it will be included in the results. This parameter is optional. timeout (int): Represents how many seconds to wait for the API to send data before giving up. The default is 10 seconds, or the api_timeout value in the super class' pyaurorax.PyAuroraX object. This parameter is optional. Returns: A list of [ Observatory ](https: docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html pyucalgarysrs.data.classes.Observatory) objects. Raises: pyaurorax.exceptions.AuroraXAPIError: An API error was encountered.", +"func":1 }, { -"ref":"pyaurorax.sources.FORMAT_DEFAULT", -"url":34, -"doc":"Default data source format (basic info)" +"ref":"pyaurorax.data.DataManager.list_observatories_in_table", +"url":45, +"doc":"Print available observatories for a given instrument array in a table Args: instrument_array (str): The instrument array to list observatories for. Valid values are: themis_asi, rego, trex_rgb, trex_nir, and trex_blue. uid (str): Supply a observatory unique identifier used for filtering (usually 4-letter site code). If that UID is found in the available observatories received from the API, it will be included in the results. This parameter is optional. max_width (int): Maximum width of the table. Default is 200 . This parameter is optional. timeout (int): Represents how many seconds to wait for the API to send data before giving up. The default is 10 seconds, or the api_timeout value in the super class' pyaurorax.PyAuroraX object. This parameter is optional. Returns: Printed table. Raises: pyaurorax.exceptions.AuroraXAPIError: An API error was encountered.", +"func":1 }, { -"ref":"pyaurorax.sources.SOURCE_TYPE_NOT_APPLICABLE", -"url":34, -"doc":"Data source 'source_type' category for a specially-curated event list" +"ref":"pyaurorax.data.ucalgary", +"url":46, +"doc":"Data downloading and reading routines for data provided by the University of Calgary." }, { -"ref":"pyaurorax.sources.SOURCE_TYPE_EVENT_LIST", -"url":34, -"doc":"Data source 'source_type' category for a specially-curated event list" +"ref":"pyaurorax.data.ucalgary.UCalgaryManager", +"url":46, +"doc":"The UCalgaryManager object is initialized within every PyAuroraX object. It acts as a way to access the submodules and carry over configuration information in the super class." }, { -"ref":"pyaurorax.sources.SOURCE_TYPE_GROUND", -"url":34, -"doc":"Data source 'source_type' category for a ground instrument" +"ref":"pyaurorax.data.ucalgary.UCalgaryManager.readers", +"url":46, +"doc":"Access to the read submodule from within a PyAuroraX object." }, { -"ref":"pyaurorax.sources.SOURCE_TYPE_HEO", -"url":34, -"doc":"Data source 'source_type' category for a highly-elliptical orbiting satellite" +"ref":"pyaurorax.data.ucalgary.UCalgaryManager.list_datasets", +"url":46, +"doc":"List available datasets Args: name (str): Supply a name used for filtering. If that name is found in the available dataset names received from the API, it will be included in the results. This parameter is optional. timeout (int): Represents how many seconds to wait for the API to send data before giving up. The default is 10 seconds, or the api_timeout value in the super class' pyaurorax.PyAuroraX object. This parameter is optional. Returns: A list of [ Dataset ](https: docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html pyucalgarysrs.data.classes.Dataset) objects. Raises: pyaurorax.exceptions.AuroraXAPIError: An API error was encountered.", +"func":1 }, { -"ref":"pyaurorax.sources.SOURCE_TYPE_LEO", -"url":34, -"doc":"Data source 'source_type' category for a low-earth orbiting satellite" +"ref":"pyaurorax.data.ucalgary.UCalgaryManager.list_observatories", +"url":46, +"doc":"List information about observatories Args: instrument_array (str): The instrument array to list observatories for. Valid values are: themis_asi, rego, trex_rgb, trex_nir, and trex_blue. uid (str): Supply a observatory unique identifier used for filtering (usually 4-letter site code). If that UID is found in the available observatories received from the API, it will be included in the results. This parameter is optional. timeout (int): Represents how many seconds to wait for the API to send data before giving up. The default is 10 seconds, or the api_timeout value in the super class' pyaurorax.PyAuroraX object. This parameter is optional. Returns: A list of [ Observatory ](https: docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html pyucalgarysrs.data.classes.Observatory) objects. Raises: pyaurorax.exceptions.AuroraXAPIError: An API error was encountered.", +"func":1 }, { -"ref":"pyaurorax.sources.SOURCE_TYPE_LUNAR", -"url":34, -"doc":"Data source 'source_type' category for a lunar orbiting satellite" +"ref":"pyaurorax.data.ucalgary.UCalgaryManager.list_supported_read_datasets", +"url":46, +"doc":"List the datasets which have file reading capabilities supported. Returns: A list of the dataset names with file reading support.", +"func":1 }, { -"ref":"pyaurorax.sources.list", -"url":34, -"doc":"Retrieve all data source records (using params to filter as desired) Args: program: the program to filter for, defaults to None platform: the platform to filter for, defaults to None instrument_type: the instrument type to filter for, defaults to None source_type: the data source type to filter for, defaults to None. Options are in the pyaurorax.sources module, or at the top level using the pyaurorax.SOURCE_TYPE_ variables. owner: the owner's email address to filter for, defaults to None format: the format of the data sources returned, defaults to \"full_record\". Other options are in the pyaurorax.sources module, or at the top level using the pyaurorax.FORMAT_ variables. order: the value to order results by (identifier, program, platform, instrument_type, display_name, owner), defaults to \"identifier\" include_stats: include additional stats information about the data source (note: slower response time since an additional request must be done for each data source), defaults to False Returns: any data sources matching the requested parameters Raises: pyaurorax.exceptions.AuroraXMaxRetriesException: max retry error pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected error", +"ref":"pyaurorax.data.ucalgary.UCalgaryManager.is_read_supported", +"url":46, +"doc":"Check if a given dataset has file reading support. Not all datasets available in the UCalgary Space Remote Sensing Open Data Platform have special readfile routines in this library. This is because some datasets are in basic formats such as JPG or PNG, so unique functions aren't necessary. We leave it up to the user to open these basic files in whichever way they prefer. Use the list_supported_read_datasets() function to see all datasets that have special file reading functionality in this library. Args: dataset_name (str): The dataset name to check if file reading is supported. This parameter is required. Returns: Boolean indicating if file reading is supported.", "func":1 }, { -"ref":"pyaurorax.sources.search", -"url":34, -"doc":"Search for data source records (using params to filter as desired) This is very similar to the 'list' function, however multiple programs, platforms, and/or instrument types can be supplied to this function. The 'list' function only supports single values for those parameters. Args: programs: the programs to filter for, defaults to [] platforms: the platforms to filter for, defaults to [] instrument_type: the instrument types to filter for, defaults to [] format: the format of the data sources returned, defaults to \"full_record\". Other options are in the pyaurorax.sources module, or at the top level using the pyaurorax.FORMAT_ variables. order: the value to order results by (identifier, program, platform, instrument_type, display_name), defaults to \"identifier\" include_stats: include additional stats information about the data source (note: slower response time since an additional request must be done for each data source), defaults to False Returns: any data sources matching the requested parameters Raises: pyaurorax.exceptions.AuroraXMaxRetriesException: max retry error pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected error", +"ref":"pyaurorax.data.ucalgary.UCalgaryManager.download", +"url":46, +"doc":"Download data from the UCalgary Space Remote Sensing Open Data Platform. The parameters dataset_name , start , and end are required. All other parameters are optional. Note that usage of the site and device UID filters applies differently to some datasets. For example, both fields can be used for most raw and keogram data, but only site UID can be used for skymap datasets, and only device UID can be used for calibration datasets. If fields are specified during a call in which site or device UID is not used, a UserWarning is display to provide the user with feedback about this detail. Args: dataset_name (str): Name of the dataset to download data for. Use the list_datasets() function to get the possible values for this parameter. One example is \"THEMIS_ASI_RAW\". Note that dataset names are case sensitive. This parameter is required. start (datetime.datetime): Start timestamp to use (inclusive), expected to be in UTC. Any timezone data will be ignored. This parameter is required. end (datetime.datetime): End timestamp to use (inclusive), expected to be in UTC. Any timezone data will be ignored. This parameter is required. site_uid (str): The site UID to filter for. If specified, data will be downloaded for only the site matching the given value. If excluded, data for all available sites will be downloaded. An example value could be 'atha', meaning all data from the Athabasca observatory will be downloaded for the given dataset name, start, and end times. This parameter is optional. device_uid (str): The device UID to filter for. If specified, data will be downloaded for only the device matching the given value. If excluded, data for all available devices will be downloaded. An example value could be 'themis02', meaning all data matching that device will be downloaded for the given dataset name, start, and end times. This parameter is optional. n_parallel (int): Number of data files to download in parallel. Default value is 5. Adjust as needed for your internet connection. This parameter is optional. overwrite (bool): By default, data will not be re-downloaded if it already exists locally. Use the overwrite parameter to force re-downloading. Default is False . This parameter is optional. progress_bar_disable (bool): Disable the progress bar. Default is False . This parameter is optional. progress_bar_ncols (int): Number of columns for the progress bar (straight passthrough of the ncols parameter in a tqdm progress bar). This parameter is optional. See Notes section below for further information. progress_bar_ascii (str): ASCII value to use when constructing the visual aspect of the progress bar (straight passthrough of the ascii parameter in a tqdm progress bar). This parameter is optional. See Notes section below for further details. timeout (int): Represents how many seconds to wait for the API to send data before giving up. The default is 10 seconds, or the api_timeout value in the super class' pyaurorax.PyAuroraX object. This parameter is optional. Returns: A [ FileDownloadResult ](https: docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html pyucalgarysrs.data.classes.FileDownloadResult) object containing details about what data files were downloaded. Raises: pyaurorax.exceptions.AuroraXDownloadError: an error was encountered while downloading a specific file pyaurorax.exceptions.AuroraXAPIError: an API error was encountered Notes: The progress_bar_ parameters can be used to enable/disable/adjust the progress bar. Excluding the progress_bar_disable parameter, all others are straight pass-throughs to the tqdm progress bar function. The progress_bar_ncols parameter allows for adjusting the width. The progress_bar_ascii parameter allows for adjusting the appearance of the progress bar. And the progress_bar_desc parameter allows for adjusting the description at the beginning of the progress bar. Further details can be found on the [tqdm documentation](https: tqdm.github.io/docs/tqdm/ tqdm-objects). Data downloading will use the download_data_root_path variable within the super class' object ([ PyAuroraX ]( / /index.html pyaurorax.PyAuroraX to determine where to save data to. If you'd like to change this path to somewhere else you can change that variable before your download() call, like so: import pyaurorax aurorax = pyaurorax.PyAuroraX() aurorax.data_download_root_path = \"some_new_path\" aurorax.data.download(dataset_name, start, end) ", "func":1 }, { -"ref":"pyaurorax.sources.get", -"url":34, -"doc":"Retrieve a specific data source record Args: program: the program name platform: the platform name instrument_type: the instrument type name format: the format of the data sources returned, defaults to \"full_record\". Other options are in the pyaurorax.sources module, or at the top level using the pyaurorax.FORMAT_ variables. include_stats: include additional stats information about the data source (note: slower response time since an additional request must be done for each data source), defaults to False Returns: the data source matching the requested parameters Raises: pyaurorax.exceptions.AuroraXMaxRetriesException: max retry error pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected error pyaurorax.exceptions.AuroraXNotFoundException: source not found", +"ref":"pyaurorax.data.ucalgary.UCalgaryManager.download_using_urls", +"url":46, +"doc":"Download data from the UCalgary Space Remote Sensing Open Data Platform using a FileListingResponse object. This would be used in cases where more customization is needed than the generic download() function. One example of using this function would start by using get_urls() to retrieve the list of URLs available for download, then further process this list to fewer files based on some other requirement (ie. time down-sampling such as one file per hour). Lastly using this function to download the new custom set URLs. Args: file_listing_response (FileListingResponse): A [ FileListingResponse ](https: docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html pyucalgarysrs.data.classes.FileListingResponse) object returned from a get_urls() call, which contains a list of URLs to download for a specific dataset. This parameter is required. n_parallel (int): Number of data files to download in parallel. Default value is 5. Adjust as needed for your internet connection. This parameter is optional. overwrite (bool): By default, data will not be re-downloaded if it already exists locally. Use the overwrite parameter to force re-downloading. Default is False . This parameter is optional. progress_bar_disable (bool): Disable the progress bar. Default is False . This parameter is optional. progress_bar_ncols (int): Number of columns for the progress bar (straight passthrough of the ncols parameter in a tqdm progress bar). This parameter is optional. See Notes section below for further information. progress_bar_ascii (str): ASCII value to use when constructing the visual aspect of the progress bar (straight passthrough of the ascii parameter in a tqdm progress bar). This parameter is optional. See Notes section below for further details. timeout (int): Represents how many seconds to wait for the API to send data before giving up. The default is 10 seconds, or the api_timeout value in the super class' pyaurorax.PyAuroraX object. This parameter is optional. Returns: A [ FileDownloadResult ](https: docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html pyucalgarysrs.data.classes.FileDownloadResult) object containing details about what data files were downloaded. Raises: pyaurorax.exceptions.AuroraXDownloadError: an error was encountered while downloading a specific file pyaurorax.exceptions.AuroraXAPIError: an API error was encountered Notes: The progress_bar_ parameters can be used to enable/disable/adjust the progress bar. Excluding the progress_bar_disable parameter, all others are straight pass-throughs to the tqdm progress bar function. The progress_bar_ncols parameter allows for adjusting the width. The progress_bar_ascii parameter allows for adjusting the appearance of the progress bar. And the progress_bar_desc parameter allows for adjusting the description at the beginning of the progress bar. Further details can be found on the [tqdm documentation](https: tqdm.github.io/docs/tqdm/ tqdm-objects). Data downloading will use the download_data_root_path variable within the super class' object ([ PyAuroraX ]( / /index.html pyaurorax.PyAuroraX to determine where to save data to. If you'd like to change this path to somewhere else you can change that variable before your download() call, like so: import pyaurorax aurorax = pyaurorax.PyAuroraX() aurorax.data_download_root_path = \"some_new_path\" aurorax.data.download(dataset_name, start, end) ", "func":1 }, { -"ref":"pyaurorax.sources.get_using_filters", -"url":34, -"doc":"Retrieve all data source records matching a filter Args: program: the program to filter for, defaults to None platform: the platform to filter for, defaults to None instrument_type: the instrument type to filter for, defaults to None source_type: the data source type to filter for, defaults to None. Options are in the pyaurorax.sources module, or at the top level using the pyaurorax.SOURCE_TYPE_ variables. owner: the owner's email address to filter for, defaults to None format: the format of the data sources returned, defaults to \"full_record\". Other options are in the pyaurorax.sources module, or at the top level using the pyaurorax.FORMAT_ variables. order: the value to order results by (identifier, program, platform, instrument_type, display_name, owner), defaults to \"identifier\" include_stats: include additional stats information about the data source (note: slower response time since an additional request must be done for each data source), defaults to False Returns: any data sources matching the requested parameters Raises: pyaurorax.exceptions.AuroraXMaxRetriesException: max retry error pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected error", +"ref":"pyaurorax.data.ucalgary.UCalgaryManager.get_urls", +"url":46, +"doc":"Get URLs of data files The parameters dataset_name , start , and end are required. All other parameters are optional. Note that usage of the site and device UID filters applies differently to some datasets. For example, both fields can be used for most raw and keogram data, but only site UID can be used for skymap datasets, and only device UID can be used for calibration datasets. If fields are specified during a call in which site or device UID is not used, a UserWarning is display to provide the user with feedback about this detail. Args: dataset_name (str): Name of the dataset to download data for. Use the list_datasets() function to get the possible values for this parameter. One example is \"THEMIS_ASI_RAW\". Note that dataset names are case sensitive. This parameter is required. start (datetime.datetime): Start timestamp to use (inclusive), expected to be in UTC. Any timezone data will be ignored. This parameter is required. end (datetime.datetime): End timestamp to use (inclusive), expected to be in UTC. Any timezone data will be ignored. This parameter is required. site_uid (str): The site UID to filter for. If specified, data will be downloaded for only the site matching the given value. If excluded, data for all available sites will be downloaded. An example value could be 'atha', meaning all data from the Athabasca observatory will be downloaded for the given dataset name, start, and end times. This parameter is optional. device_uid (str): The device UID to filter for. If specified, data will be downloaded for only the device matching the given value. If excluded, data for all available devices will be downloaded. An example value could be 'themis02', meaning all data matching that device will be downloaded for the given dataset name, start, and end times. This parameter is optional. timeout (int): Represents how many seconds to wait for the API to send data before giving up. The default is 10 seconds, or the api_timeout value in the super class' pyaurorax.PyAuroraX object. This parameter is optional. Returns: A [ FileListingResponse ](https: docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html pyucalgarysrs.data.classes.FileListingResponse) object containing a list of the available URLs, among other values. Raises: pyaurorax.exceptions.AuroraXAPIError: an API error was encountered", "func":1 }, { -"ref":"pyaurorax.sources.get_using_identifier", -"url":34, -"doc":"Retrieve data source record matching an identifier Args: identifier: the AuroraX unique ID for the data source format: the format of the data sources returned, defaults to \"full_record\". Other options are in the pyaurorax.sources module, or at the top level using the pyaurorax.FORMAT_ variables. include_stats: include additional stats information about the data source (note: slower response time since an additional request must be done for each data source), defaults to False Returns: the data source matching the identifier Raises: pyaurorax.exceptions.AuroraXMaxRetriesException: max retry error pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected error", +"ref":"pyaurorax.data.ucalgary.UCalgaryManager.read", +"url":46, +"doc":"Read in data files for a given dataset. Note that only one type of dataset's data should be read in using a single call. Args: dataset (Dataset): The dataset object for which the files are associated with. This parameter is required. file_list (List[str], List[Path], str, Path): The files to read in. Absolute paths are recommended, but not technically necessary. This can be a single string for a file, or a list of strings to read in multiple files. This parameter is required. n_parallel (int): Number of data files to read in parallel using multiprocessing. Default value is 1. Adjust according to your computer's available resources. This parameter is optional. first_record (bool): Only read in the first record in each file. This is the same as the first_frame parameter in the themis-imager-readfile and trex-imager-readfile libraries, and is a read optimization if you only need one image per minute, as opposed to the full temporal resolution of data (e.g., 3sec cadence). This parameter is optional. no_metadata (bool): Skip reading of metadata. This is a minor optimization if the metadata is not needed. Default is False . This parameter is optional. quiet (bool): Do not print out errors while reading data files, if any are encountered. Any files that encounter errors will be, as usual, accessible via the problematic_files attribute of the returned Data object. This parameter is optional. Returns: A [ Data ](https: docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html pyucalgarysrs.data.classes.Data) object containing the data read in, among other values. Raises: pyaurorax.exceptions.AuroraXUnsupportedReadError: an unsupported dataset was used when trying to read files. pyaurorax.exceptions.AuroraXError: a generic read error was encountered Notes: - For users who are familiar with the themis-imager-readfile and trex-imager-readfile libraries, the read function provides a near-identical usage. Further improvements have been integrated, and those libraries are anticipated to be deprecated at some point in the future.", "func":1 }, { -"ref":"pyaurorax.sources.get_stats", -"url":34, -"doc":"Retrieve statistics for a data source Args: identifier: the AuroraX unique ID for the data source format: the format of the data sources returned, defaults to \"full_record\". Other options are in the pyaurorax.sources module, or at the top level using the pyaurorax.FORMAT_ variables. slow: retrieve the stats using a slower, but more accurate method, defaults to False Returns: the data source including additional stats information about it Raises: pyaurorax.exceptions.AuroraXMaxRetriesException: max retry error pyaurorax.exceptions.AuroraXNotFoundException: data source not found pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected error", +"ref":"pyaurorax.data.ucalgary.UCalgaryManager.download_best_skymap", +"url":46, +"doc":"Download the skymap file that best matches the parameters supplied. Args: dataset_name (str): Name of the dataset to download data for. Use the list_datasets() function to get the possible values for this parameter. One example is \"THEMIS_ASI_SKYMAP_IDLSAV\". Note that dataset names are case sensitive. This parameter is required. site_uid (str): The site UID to evaluate. timestamp (datetime.datetime): The timestamp to use for deciding the best skymap, expected to be in UTC. Any timezone data will be ignored. This parameter is required. timeout (int): Represents how many seconds to wait for the API to send data before giving up. The default is 10 seconds, or the api_timeout value in the super class' pyaurorax.PyAuroraX object. This parameter is optional. Returns: A [ FileDownloadResult ](https: docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html pyucalgarysrs.data.classes.FileDownloadResult) object containing details about what data files were downloaded. Raises: ValueError: issue with supplied timestamp pyaurorax.exceptions.AuroraXAPIError: an API error was encountered", "func":1 }, { -"ref":"pyaurorax.sources.add", -"url":34, -"doc":"Add a new data source to AuroraX Args: data_source: the data source to add (note: it must be a fully-defined DataSource object) Returns: the newly created data source Raises: pyaurorax.exceptions.AuroraXMaxRetriesException: max retry error pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected error pyaurorax.exceptions.AuroraXDuplicateException: duplicate data source, already exists", +"ref":"pyaurorax.data.ucalgary.UCalgaryManager.download_best_flatfield_calibration", +"url":46, +"doc":"Download the flatfield calibration file that best matches the parameters supplied. Args: dataset_name (str): Name of the dataset to download data for. Use the list_datasets() function to get the possible values for this parameter. One example is \"THEMIS_ASI_SKYMAP_IDLSAV\". Note that dataset names are case sensitive. This parameter is required. device_uid (str): The device UID to evaluate. timestamp (datetime.datetime): The timestamp to use for deciding the best skymap, expected to be in UTC. Any timezone data will be ignored. This parameter is required. timeout (int): Represents how many seconds to wait for the API to send data before giving up. The default is 10 seconds, or the api_timeout value in the super class' pyaurorax.PyAuroraX object. This parameter is optional. Returns: A [ FileDownloadResult ](https: docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html pyucalgarysrs.data.classes.FileDownloadResult) object containing details about what data files were downloaded. Raises: ValueError: issue with supplied timestamp pyaurorax.exceptions.AuroraXAPIError: an API error was encountered", "func":1 }, { -"ref":"pyaurorax.sources.delete", -"url":34, -"doc":"Delete a data source from AuroraX Args: identifier: the AuroraX unique ID for the data source Returns: 0 on success, raises error if an issue was encountered Raises: pyaurorax.exceptions.AuroraXMaxRetriesException: max retry error pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected error pyaurorax.exceptions.AuroraXNotFoundException: data source not found pyaurorax.exceptions.AuroraXConflictException: conflict of some type", +"ref":"pyaurorax.data.ucalgary.UCalgaryManager.download_best_rayleighs_calibration", +"url":46, +"doc":"Download the Rayleighs calibration file that best matches the parameters supplied. Args: dataset_name (str): Name of the dataset to download data for. Use the list_datasets() function to get the possible values for this parameter. One example is \"REGO_CALIBRATION_RAYLEIGHS_IDLSAV\". Note that dataset names are case sensitive. This parameter is required. device_uid (str): The device UID to evaluate. timestamp (datetime.datetime): The timestamp to use for deciding the best calibration file, expected to be in UTC. Any timezone data will be ignored. This parameter is required. timeout (int): Represents how many seconds to wait for the API to send data before giving up. The default is 10 seconds, or the api_timeout value in the super class' pyaurorax.PyAuroraX object. This parameter is optional. Returns: A [ FileDownloadResult ](https: docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html pyucalgarysrs.data.classes.FileDownloadResult) object containing details about what data files were downloaded. Raises: ValueError: issue with supplied timestamp pyaurorax.exceptions.AuroraXAPIError: an API error was encountered", "func":1 }, { -"ref":"pyaurorax.sources.update", -"url":34, -"doc":"Update a data source in AuroraX This operation will fully replace the data source with the data_source argument passed in. Be sure that the data_source object is complete. If the data source is missing the value for identifier, program, platform, instrument type, source type, or display name, the update will fail and raise a AuroraXBadParametersException exception. Args: data_source: the data source to update (note: it must be a fully-defined DataSource object with the values set to what you want AuroraX to update it to) Returns: the updated data source Raises: pyaurorax.exceptions.AuroraXMaxRetriesException: max retry error pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected error pyaurorax.exceptions.AuroraXNotFoundException: data source not found pyaurorax.exceptions.AuroraXBadParametersException: missing parameters", -"func":1 +"ref":"pyaurorax.data.ucalgary.Observatory", +"url":46, +"doc":"Representation for an observatory. Attributes: uid (str): 4-letter unique identifier (traditionally referred to as the site UID) full_name (str): full location string for the observatory geodetic_latitude (float): geodetic latitude for the observatory, in decimal format (-90 to 90) geodetic_longitude (float): geodetic longitude for the observatory, in decimal format (-180 to 180) provider (str): Data provider." }, { -"ref":"pyaurorax.sources.update_partial", -"url":34, -"doc":"Partially update a data source in AuroraX (omitted fields are ignored) Args: identifier: the AuroraX unique ID for the data source, defaults to None program: the new program for the data source, defaults to None platform: the new platform for the data source, defaults to None instrument_type: the new instrument type for the data source, defaults to None source_type: the new source type for the data source, defaults to None. Options are in the pyaurorax.sources module, or at the top level using the pyaurorax.SOURCE_TYPE_ variables. display_name: the new display name for the data source, defaults to None metadata: the new metadata for the data source, defaults to None maintainers: the new maintainer AuroraX account email addresses, defaults to None ephemeris_metadata_schema: a list of dictionaries capturing the metadata keys and values that can appear in ephemeris records associated with the data source, defaults to None data_product_metadata_schema: a list of dictionaries capturing the metadata keys and values that can appear in data product records associated with the data source, defaults to None Returns: the updated data source Raises: pyaurorax.exceptions.AuroraXMaxRetriesException: max retry error pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected error pyaurorax.exceptions.AuroraXNotFoundException: data source not found pyaurorax.exceptions.AuroraXBadParametersException: missing parameters", +"ref":"pyaurorax.data.ucalgary.Observatory.pretty_print", +"url":46, +"doc":"A special print output for this class.", "func":1 }, { -"ref":"pyaurorax.sources.DataSource", -"url":34, -"doc":"Data source object Attributes: identifier: the unique AuroraX ID for this data source program: the program for this data source platform: the platform for this data source instrument_type: the instrument type for this data source source_type: the data source type for this data source. Options are in the pyaurorax.sources module, or at the top level using the pyaurorax.SOURCE_TYPE_ variables. display_name: the display name for this data source metadata: metadata for this data source (arbitrary keys and values) owner: the owner's email address of this data source maintainers: the email addresses of AuroraX accounts that can alter this data source and its associated records ephemeris_metadata_schema: a list of dictionaries capturing the metadata keys and values that can appear in ephemeris records associated with this data source data_product_metadata_schema: a list of dictionaries capturing the metadata keys and values that can appear in data product records associated with this data source format: the format used when printing the data source, defaults to \"full_record\". Other options are in the pyaurorax.sources module, or at the top level using the pyaurorax.FORMAT_ variables. Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model." +"ref":"pyaurorax.data.ucalgary.Dataset", +"url":46, +"doc":"A dataset available from the UCalgary Space Remote Sensing API, with possibly support for downloading and/or reading. Attributes: name (str): Dataset name short_description (str): A short description about the dataset long_description (str): A longer description about the dataset data_tree_url (str): The data tree URL prefix. Used for saving data locally with a similar data tree structure compared to the UCalgary Open Data archive. file_listing_supported (bool): Flag indicating if file listing (downloading) is supported for this dataset. file_reading_supported (bool): Flag indicating if file reading is supported for this dataset. level (str): Dataset level as per L0/L1/L2/etc standards. doi (str): Dataset DOI unique identifier. doi_details (str): Further details about the DOI. citation (str): String to use when citing usage of the dataset. provider (str): Data provider." }, { -"ref":"pyaurorax.sources.DataSource.identifier", -"url":34, -"doc":"" +"ref":"pyaurorax.data.ucalgary.Dataset.pretty_print", +"url":46, +"doc":"A special print output for this class.", +"func":1 }, { -"ref":"pyaurorax.sources.DataSource.program", -"url":34, -"doc":"" +"ref":"pyaurorax.data.ucalgary.FileDownloadResult", +"url":46, +"doc":"Representation of the results from a data download call. Attributes: filenames (List[str]): List of downloaded files, as absolute paths of their location on the local machine. count (int): Number of files downloaded total_bytes (int): Cumulative amount of bytes saved on the local machine. output_root_path (str): The root path of where the data was saved to on the local machine. dataset (Dataset): The Dataset object for this data." }, { -"ref":"pyaurorax.sources.DataSource.platform", -"url":34, +"ref":"pyaurorax.data.ucalgary.FileDownloadResult.filenames", +"url":46, "doc":"" }, { -"ref":"pyaurorax.sources.DataSource.instrument_type", -"url":34, +"ref":"pyaurorax.data.ucalgary.FileDownloadResult.count", +"url":46, "doc":"" }, { -"ref":"pyaurorax.sources.DataSource.source_type", -"url":34, +"ref":"pyaurorax.data.ucalgary.FileDownloadResult.total_bytes", +"url":46, "doc":"" }, { -"ref":"pyaurorax.sources.DataSource.display_name", -"url":34, +"ref":"pyaurorax.data.ucalgary.FileDownloadResult.output_root_path", +"url":46, "doc":"" }, { -"ref":"pyaurorax.sources.DataSource.metadata", -"url":34, +"ref":"pyaurorax.data.ucalgary.FileDownloadResult.dataset", +"url":46, "doc":"" }, { -"ref":"pyaurorax.sources.DataSource.owner", -"url":34, -"doc":"" +"ref":"pyaurorax.data.ucalgary.FileListingResponse", +"url":46, +"doc":"Representation of the file listing response from the UCalgary Space Remote Sensing API. Attributes: urls (List[str]): A list of URLs for available data files. path_prefix (str): The URL prefix, which is sed for saving data locally with a similar data tree structure compared to the UCalgary Open Data archive. count (int): The number of URLs available. dataset (Dataset): The Dataset object for this data. total_bytes (int): The cumulative amount of bytes for the available URLs." }, { -"ref":"pyaurorax.sources.DataSource.maintainers", -"url":34, +"ref":"pyaurorax.data.ucalgary.FileListingResponse.urls", +"url":46, "doc":"" }, { -"ref":"pyaurorax.sources.DataSource.ephemeris_metadata_schema", -"url":34, +"ref":"pyaurorax.data.ucalgary.FileListingResponse.path_prefix", +"url":46, "doc":"" }, { -"ref":"pyaurorax.sources.DataSource.data_product_metadata_schema", -"url":34, +"ref":"pyaurorax.data.ucalgary.FileListingResponse.count", +"url":46, "doc":"" }, { -"ref":"pyaurorax.sources.DataSource.format", -"url":34, +"ref":"pyaurorax.data.ucalgary.FileListingResponse.dataset", +"url":46, "doc":"" }, { -"ref":"pyaurorax.sources.DataSource.stats", -"url":34, +"ref":"pyaurorax.data.ucalgary.FileListingResponse.total_bytes", +"url":46, "doc":"" }, { -"ref":"pyaurorax.sources.DataSourceStatistics", -"url":34, -"doc":"Data source statistics object Attributes: earliest_ephemeris_loaded: timestamp of the earliest ephemeris record latest_ephemeris_loaded: timestamp of the latest ephemeris record ephemeris_count: total number of ephemeris records for this data source earliest_data_product_loaded: timestamp of the earliest data_product record latest_data_product_loaded: timestamp of the latest data product record data_product_count: total number of ephemeris records for this data source Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model." +"ref":"pyaurorax.data.ucalgary.Data", +"url":46, +"doc":"Representation of the data read in from a read call. Attributes: data (Any): The loaded data. This can be one of the following types: ndarray, List[Skymap], List[Calibration]. timestamp (List[datetime.datetime]): List of timestamps for the read in data. metadata (List[Dict]): List of dictionaries containing metadata specific to each timestamp/image/record. problematic_files (List[ProblematicFiles]): A list detailing any files that encountered issues during reading. calibrated_data (Any): A calibrated version of the data. Populated and utilized by data analysis libraries. Has a None value until calibrated data is inserted manually. dataset (Dataset): The Dataset object for this data." }, { -"ref":"pyaurorax.sources.DataSourceStatistics.earliest_ephemeris_loaded", -"url":34, +"ref":"pyaurorax.data.ucalgary.Data.data", +"url":46, "doc":"" }, { -"ref":"pyaurorax.sources.DataSourceStatistics.latest_ephemeris_loaded", -"url":34, +"ref":"pyaurorax.data.ucalgary.Data.timestamp", +"url":46, "doc":"" }, { -"ref":"pyaurorax.sources.DataSourceStatistics.ephemeris_count", -"url":34, +"ref":"pyaurorax.data.ucalgary.Data.metadata", +"url":46, "doc":"" }, { -"ref":"pyaurorax.sources.DataSourceStatistics.earliest_data_product_loaded", -"url":34, +"ref":"pyaurorax.data.ucalgary.Data.problematic_files", +"url":46, "doc":"" }, { -"ref":"pyaurorax.sources.DataSourceStatistics.latest_data_product_loaded", -"url":34, +"ref":"pyaurorax.data.ucalgary.Data.calibrated_data", +"url":46, "doc":"" }, { -"ref":"pyaurorax.sources.DataSourceStatistics.data_product_count", -"url":34, +"ref":"pyaurorax.data.ucalgary.Data.dataset", +"url":46, "doc":"" }, { -"ref":"pyaurorax.sources.classes", -"url":35, -"doc":"Separted classes and functions used by the sources module. Note that these classes and variables are all imported higher up at the top of the sources module. They can be referenced from there instead of digging in deeper to these submodules." +"ref":"pyaurorax.data.ucalgary.Data.pretty_print", +"url":46, +"doc":"A special print output for this class.", +"func":1 }, { -"ref":"pyaurorax.sources.classes.data_source", -"url":36, -"doc":"Class definition for a data source" +"ref":"pyaurorax.data.ucalgary.Skymap", +"url":46, +"doc":"Representation for a skymap file. Attributes: filename (str): Filename for the skymap file, as an absolute path of its location on the local machine. project_uid (str): Project unique identifier site_uid (str): Site unique identifier imager_uid (str): Imager/device unique identifier site_map_latitude (float): Geodetic latitude of instrument site_map_longitude (float): Geodetic longitude of instrument site_map_altitude (float): Altitude of the instrument (in meters) full_elevation (ndarray): Elevation angle from horizon, for each image pixel (in degrees) full_azimuth (ndarray): Local azimuth angle from 0 degrees north, positive moving east (in degrees) full_map_altitude (ndarray): Altitudes that image coordinates are mapped to (in kilometers) full_map_latitude (ndarray): Geodetic latitudes of pixel corners, mapped to various altitudes (specified by full_map_altitude ) full_map_longitude (ndarray): Geodetic longitudes of pixel corners, mapped to various altitudes (specified by full_map_altitude ) generation_info (SkymapGenerationInfo): Metadata describing details about this skymap's generation process version (str): Version of the skymap dataset (Dataset): The Dataset object for this data." }, { -"ref":"pyaurorax.sources.classes.data_source.DataSource", -"url":36, -"doc":"Data source object Attributes: identifier: the unique AuroraX ID for this data source program: the program for this data source platform: the platform for this data source instrument_type: the instrument type for this data source source_type: the data source type for this data source. Options are in the pyaurorax.sources module, or at the top level using the pyaurorax.SOURCE_TYPE_ variables. display_name: the display name for this data source metadata: metadata for this data source (arbitrary keys and values) owner: the owner's email address of this data source maintainers: the email addresses of AuroraX accounts that can alter this data source and its associated records ephemeris_metadata_schema: a list of dictionaries capturing the metadata keys and values that can appear in ephemeris records associated with this data source data_product_metadata_schema: a list of dictionaries capturing the metadata keys and values that can appear in data product records associated with this data source format: the format used when printing the data source, defaults to \"full_record\". Other options are in the pyaurorax.sources module, or at the top level using the pyaurorax.FORMAT_ variables. Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model." +"ref":"pyaurorax.data.ucalgary.Skymap.filename", +"url":46, +"doc":"" }, { -"ref":"pyaurorax.sources.classes.data_source.DataSource.identifier", -"url":36, +"ref":"pyaurorax.data.ucalgary.Skymap.project_uid", +"url":46, "doc":"" }, { -"ref":"pyaurorax.sources.classes.data_source.DataSource.program", -"url":36, +"ref":"pyaurorax.data.ucalgary.Skymap.site_uid", +"url":46, "doc":"" }, { -"ref":"pyaurorax.sources.classes.data_source.DataSource.platform", -"url":36, +"ref":"pyaurorax.data.ucalgary.Skymap.imager_uid", +"url":46, "doc":"" }, { -"ref":"pyaurorax.sources.classes.data_source.DataSource.instrument_type", -"url":36, +"ref":"pyaurorax.data.ucalgary.Skymap.site_map_latitude", +"url":46, "doc":"" }, { -"ref":"pyaurorax.sources.classes.data_source.DataSource.source_type", -"url":36, +"ref":"pyaurorax.data.ucalgary.Skymap.site_map_longitude", +"url":46, "doc":"" }, { -"ref":"pyaurorax.sources.classes.data_source.DataSource.display_name", -"url":36, +"ref":"pyaurorax.data.ucalgary.Skymap.site_map_altitude", +"url":46, "doc":"" }, { -"ref":"pyaurorax.sources.classes.data_source.DataSource.metadata", -"url":36, +"ref":"pyaurorax.data.ucalgary.Skymap.full_elevation", +"url":46, "doc":"" }, { -"ref":"pyaurorax.sources.classes.data_source.DataSource.owner", -"url":36, +"ref":"pyaurorax.data.ucalgary.Skymap.full_azimuth", +"url":46, "doc":"" }, { -"ref":"pyaurorax.sources.classes.data_source.DataSource.maintainers", -"url":36, +"ref":"pyaurorax.data.ucalgary.Skymap.full_map_altitude", +"url":46, "doc":"" }, { -"ref":"pyaurorax.sources.classes.data_source.DataSource.ephemeris_metadata_schema", -"url":36, +"ref":"pyaurorax.data.ucalgary.Skymap.full_map_latitude", +"url":46, "doc":"" }, { -"ref":"pyaurorax.sources.classes.data_source.DataSource.data_product_metadata_schema", -"url":36, +"ref":"pyaurorax.data.ucalgary.Skymap.full_map_longitude", +"url":46, "doc":"" }, { -"ref":"pyaurorax.sources.classes.data_source.DataSource.format", -"url":36, +"ref":"pyaurorax.data.ucalgary.Skymap.generation_info", +"url":46, "doc":"" }, { -"ref":"pyaurorax.sources.classes.data_source.DataSource.stats", -"url":36, +"ref":"pyaurorax.data.ucalgary.Skymap.version", +"url":46, "doc":"" }, { -"ref":"pyaurorax.sources.classes.data_source_stats", -"url":37, -"doc":"Class definition for a statistics about a data source" +"ref":"pyaurorax.data.ucalgary.Skymap.pretty_print", +"url":46, +"doc":"A special print output for this class.", +"func":1 }, { -"ref":"pyaurorax.sources.classes.data_source_stats.DataSourceStatistics", -"url":37, -"doc":"Data source statistics object Attributes: earliest_ephemeris_loaded: timestamp of the earliest ephemeris record latest_ephemeris_loaded: timestamp of the latest ephemeris record ephemeris_count: total number of ephemeris records for this data source earliest_data_product_loaded: timestamp of the earliest data_product record latest_data_product_loaded: timestamp of the latest data product record data_product_count: total number of ephemeris records for this data source Create a new model by parsing and validating input data from keyword arguments. Raises ValidationError if the input data cannot be parsed to form a valid model." +"ref":"pyaurorax.data.ucalgary.Skymap.get_precalculated_altitudes", +"url":46, +"doc":"Get the altitudes that have been precalculated in this skymap. Units are kilometers.", +"func":1 }, { -"ref":"pyaurorax.sources.classes.data_source_stats.DataSourceStatistics.earliest_ephemeris_loaded", -"url":37, -"doc":"" +"ref":"pyaurorax.data.ucalgary.Calibration", +"url":46, +"doc":"Representation for a calibration file. Attributes: filename (str): Filename for the calibration file, as an absolute path of its location on the local machine. detector_uid (str): Detector/imager/camera unique identifier version (str): Version number of the calibration file generation_info (CalibrationGenerationInfo): Metadata describing details about this calibration's generation process rayleighs_perdn_persecond (float): Calibrated value for Rayleighs per data number per second (R/dn/s). This value will be None if a flatfield calibration file was read instead of a rayleighs calibration file. flat_field_multiplier (ndarray): Calibrated flat field array. This value will be None if a rayleighs calibration file was read instead of a flatfield calibration file. dataset (Dataset): The Dataset object for this data." }, { -"ref":"pyaurorax.sources.classes.data_source_stats.DataSourceStatistics.latest_ephemeris_loaded", -"url":37, +"ref":"pyaurorax.data.ucalgary.Calibration.filename", +"url":46, "doc":"" }, { -"ref":"pyaurorax.sources.classes.data_source_stats.DataSourceStatistics.ephemeris_count", -"url":37, +"ref":"pyaurorax.data.ucalgary.Calibration.detector_uid", +"url":46, "doc":"" }, { -"ref":"pyaurorax.sources.classes.data_source_stats.DataSourceStatistics.earliest_data_product_loaded", -"url":37, +"ref":"pyaurorax.data.ucalgary.Calibration.version", +"url":46, "doc":"" }, { -"ref":"pyaurorax.sources.classes.data_source_stats.DataSourceStatistics.latest_data_product_loaded", -"url":37, +"ref":"pyaurorax.data.ucalgary.Calibration.generation_info", +"url":46, "doc":"" }, { -"ref":"pyaurorax.sources.classes.data_source_stats.DataSourceStatistics.data_product_count", -"url":37, +"ref":"pyaurorax.data.ucalgary.Calibration.rayleighs_perdn_persecond", +"url":46, "doc":"" }, { -"ref":"pyaurorax.sources.sources", -"url":38, -"doc":"Functions for interacting with data sources" +"ref":"pyaurorax.data.ucalgary.Calibration.flat_field_multiplier", +"url":46, +"doc":"" }, { -"ref":"pyaurorax.sources.sources.list", -"url":38, -"doc":"Retrieve all data source records (using params to filter as desired) Args: program: the program to filter for, defaults to None platform: the platform to filter for, defaults to None instrument_type: the instrument type to filter for, defaults to None source_type: the data source type to filter for, defaults to None. Options are in the pyaurorax.sources module, or at the top level using the pyaurorax.SOURCE_TYPE_ variables. owner: the owner's email address to filter for, defaults to None format: the format of the data sources returned, defaults to \"full_record\". Other options are in the pyaurorax.sources module, or at the top level using the pyaurorax.FORMAT_ variables. order: the value to order results by (identifier, program, platform, instrument_type, display_name, owner), defaults to \"identifier\" include_stats: include additional stats information about the data source (note: slower response time since an additional request must be done for each data source), defaults to False Returns: any data sources matching the requested parameters Raises: pyaurorax.exceptions.AuroraXMaxRetriesException: max retry error pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected error", -"func":1 +"ref":"pyaurorax.data.ucalgary.Calibration.dataset", +"url":46, +"doc":"" }, { -"ref":"pyaurorax.sources.sources.search", -"url":38, -"doc":"Search for data source records (using params to filter as desired) This is very similar to the 'list' function, however multiple programs, platforms, and/or instrument types can be supplied to this function. The 'list' function only supports single values for those parameters. Args: programs: the programs to filter for, defaults to [] platforms: the platforms to filter for, defaults to [] instrument_type: the instrument types to filter for, defaults to [] format: the format of the data sources returned, defaults to \"full_record\". Other options are in the pyaurorax.sources module, or at the top level using the pyaurorax.FORMAT_ variables. order: the value to order results by (identifier, program, platform, instrument_type, display_name), defaults to \"identifier\" include_stats: include additional stats information about the data source (note: slower response time since an additional request must be done for each data source), defaults to False Returns: any data sources matching the requested parameters Raises: pyaurorax.exceptions.AuroraXMaxRetriesException: max retry error pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected error", +"ref":"pyaurorax.data.ucalgary.Calibration.pretty_print", +"url":46, +"doc":"A special print output for this class.", "func":1 }, { -"ref":"pyaurorax.sources.sources.get", -"url":38, -"doc":"Retrieve a specific data source record Args: program: the program name platform: the platform name instrument_type: the instrument type name format: the format of the data sources returned, defaults to \"full_record\". Other options are in the pyaurorax.sources module, or at the top level using the pyaurorax.FORMAT_ variables. include_stats: include additional stats information about the data source (note: slower response time since an additional request must be done for each data source), defaults to False Returns: the data source matching the requested parameters Raises: pyaurorax.exceptions.AuroraXMaxRetriesException: max retry error pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected error pyaurorax.exceptions.AuroraXNotFoundException: source not found", -"func":1 +"ref":"pyaurorax.data.ucalgary.read", +"url":47, +"doc":"" }, { -"ref":"pyaurorax.sources.sources.get_using_filters", -"url":38, -"doc":"Retrieve all data source records matching a filter Args: program: the program to filter for, defaults to None platform: the platform to filter for, defaults to None instrument_type: the instrument type to filter for, defaults to None source_type: the data source type to filter for, defaults to None. Options are in the pyaurorax.sources module, or at the top level using the pyaurorax.SOURCE_TYPE_ variables. owner: the owner's email address to filter for, defaults to None format: the format of the data sources returned, defaults to \"full_record\". Other options are in the pyaurorax.sources module, or at the top level using the pyaurorax.FORMAT_ variables. order: the value to order results by (identifier, program, platform, instrument_type, display_name, owner), defaults to \"identifier\" include_stats: include additional stats information about the data source (note: slower response time since an additional request must be done for each data source), defaults to False Returns: any data sources matching the requested parameters Raises: pyaurorax.exceptions.AuroraXMaxRetriesException: max retry error pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected error", -"func":1 +"ref":"pyaurorax.data.ucalgary.read.ReadManager", +"url":47, +"doc":"The UCalgaryManager object is initialized within every PyAuroraX object. It acts as a way to access the submodules and carry over configuration information in the super class." }, { -"ref":"pyaurorax.sources.sources.get_using_identifier", -"url":38, -"doc":"Retrieve data source record matching an identifier Args: identifier: the AuroraX unique ID for the data source format: the format of the data sources returned, defaults to \"full_record\". Other options are in the pyaurorax.sources module, or at the top level using the pyaurorax.FORMAT_ variables. include_stats: include additional stats information about the data source (note: slower response time since an additional request must be done for each data source), defaults to False Returns: the data source matching the identifier Raises: pyaurorax.exceptions.AuroraXMaxRetriesException: max retry error pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected error", +"ref":"pyaurorax.data.ucalgary.read.ReadManager.list_supported_datasets", +"url":47, +"doc":"List the datasets which have file reading capabilities supported. Returns: A list of the dataset names with file reading support.", "func":1 }, { -"ref":"pyaurorax.sources.sources.get_stats", -"url":38, -"doc":"Retrieve statistics for a data source Args: identifier: the AuroraX unique ID for the data source format: the format of the data sources returned, defaults to \"full_record\". Other options are in the pyaurorax.sources module, or at the top level using the pyaurorax.FORMAT_ variables. slow: retrieve the stats using a slower, but more accurate method, defaults to False Returns: the data source including additional stats information about it Raises: pyaurorax.exceptions.AuroraXMaxRetriesException: max retry error pyaurorax.exceptions.AuroraXNotFoundException: data source not found pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected error", +"ref":"pyaurorax.data.ucalgary.read.ReadManager.is_supported", +"url":47, +"doc":"Check if a given dataset has file reading support. Not all datasets available in the UCalgary Space Remote Sensing Open Data Platform have special readfile routines in this library. This is because some datasets are in basic formats such as JPG or PNG, so unique functions aren't necessary. We leave it up to the user to open these basic files in whichever way they prefer. Use the list_supported_read_datasets() function to see all datasets that have special file reading functionality in this library. Args: dataset_name (str): The dataset name to check if file reading is supported. This parameter is required. Returns: Boolean indicating if file reading is supported.", "func":1 }, { -"ref":"pyaurorax.sources.sources.add", -"url":38, -"doc":"Add a new data source to AuroraX Args: data_source: the data source to add (note: it must be a fully-defined DataSource object) Returns: the newly created data source Raises: pyaurorax.exceptions.AuroraXMaxRetriesException: max retry error pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected error pyaurorax.exceptions.AuroraXDuplicateException: duplicate data source, already exists", +"ref":"pyaurorax.data.ucalgary.read.ReadManager.read", +"url":47, +"doc":"Read in data files for a given dataset. Note that only one type of dataset's data should be read in using a single call. Args: dataset (Dataset): The dataset object for which the files are associated with. This parameter is required. file_list (List[str], List[Path], str, Path): The files to read in. Absolute paths are recommended, but not technically necessary. This can be a single string for a file, or a list of strings to read in multiple files. This parameter is required. n_parallel (int): Number of data files to read in parallel using multiprocessing. Default value is 1. Adjust according to your computer's available resources. This parameter is optional. first_record (bool): Only read in the first record in each file. This is the same as the first_frame parameter in the themis-imager-readfile and trex-imager-readfile libraries, and is a read optimization if you only need one image per minute, as opposed to the full temporal resolution of data (e.g., 3sec cadence). This parameter is optional. no_metadata (bool): Skip reading of metadata. This is a minor optimization if the metadata is not needed. Default is False . This parameter is optional. quiet (bool): Do not print out errors while reading data files, if any are encountered. Any files that encounter errors will be, as usual, accessible via the problematic_files attribute of the returned Data object. This parameter is optional. Returns: A [ Data ](https: docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html pyucalgarysrs.data.classes.Data) object containing the data read in, among other values. Raises: pyaurorax.exceptions.AuroraXUnsupportedReadError: an unsupported dataset was used when trying to read files. pyaurorax.exceptions.AuroraXError: a generic read error was encountered Notes: - For users who are familiar with the themis-imager-readfile and trex-imager-readfile libraries, the read function provides a near-identical usage. Further improvements have been integrated, and those libraries are anticipated to be deprecated at some point in the future.", "func":1 }, { -"ref":"pyaurorax.sources.sources.delete", -"url":38, -"doc":"Delete a data source from AuroraX Args: identifier: the AuroraX unique ID for the data source Returns: 0 on success, raises error if an issue was encountered Raises: pyaurorax.exceptions.AuroraXMaxRetriesException: max retry error pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected error pyaurorax.exceptions.AuroraXNotFoundException: data source not found pyaurorax.exceptions.AuroraXConflictException: conflict of some type", +"ref":"pyaurorax.data.ucalgary.read.ReadManager.read_themis", +"url":47, +"doc":"Read in THEMIS ASI raw data (stream0 full.pgm files). Args: file_list (List[str], List[Path], str, Path): The files to read in. Absolute paths are recommended, but not technically necessary. This can be a single string for a file, or a list of strings to read in multiple files. This parameter is required. n_parallel (int): Number of data files to read in parallel using multiprocessing. Default value is 1. Adjust according to your computer's available resources. This parameter is optional. first_record (bool): Only read in the first record in each file. This is the same as the first_frame parameter in the themis-imager-readfile and trex-imager-readfile libraries, and is a read optimization if you only need one image per minute, as opposed to the full temporal resolution of data (e.g., 3sec cadence). This parameter is optional. no_metadata (bool): Skip reading of metadata. This is a minor optimization if the metadata is not needed. Default is False . This parameter is optional. quiet (bool): Do not print out errors while reading data files, if any are encountered. Any files that encounter errors will be, as usual, accessible via the problematic_files attribute of the returned Data object. This parameter is optional. dataset (Dataset): The dataset object for which the files are associated with. This parameter is optional. Returns: A [ Data ](https: docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html pyucalgarysrs.data.classes.Data) object containing the data read in, among other values. Raises: pyaurorax.exceptions.AuroraXError: a generic read error was encountered", "func":1 }, { -"ref":"pyaurorax.sources.sources.update", -"url":38, -"doc":"Update a data source in AuroraX This operation will fully replace the data source with the data_source argument passed in. Be sure that the data_source object is complete. If the data source is missing the value for identifier, program, platform, instrument type, source type, or display name, the update will fail and raise a AuroraXBadParametersException exception. Args: data_source: the data source to update (note: it must be a fully-defined DataSource object with the values set to what you want AuroraX to update it to) Returns: the updated data source Raises: pyaurorax.exceptions.AuroraXMaxRetriesException: max retry error pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected error pyaurorax.exceptions.AuroraXNotFoundException: data source not found pyaurorax.exceptions.AuroraXBadParametersException: missing parameters", +"ref":"pyaurorax.data.ucalgary.read.ReadManager.read_rego", +"url":47, +"doc":"Read in REGO raw data (stream0 pgm files). Args: file_list (List[str], List[Path], str, Path): The files to read in. Absolute paths are recommended, but not technically necessary. This can be a single string for a file, or a list of strings to read in multiple files. This parameter is required. n_parallel (int): Number of data files to read in parallel using multiprocessing. Default value is 1. Adjust according to your computer's available resources. This parameter is optional. first_record (bool): Only read in the first record in each file. This is the same as the first_frame parameter in the themis-imager-readfile and trex-imager-readfile libraries, and is a read optimization if you only need one image per minute, as opposed to the full temporal resolution of data (e.g., 3sec cadence). This parameter is optional. no_metadata (bool): Skip reading of metadata. This is a minor optimization if the metadata is not needed. Default is False . This parameter is optional. quiet (bool): Do not print out errors while reading data files, if any are encountered. Any files that encounter errors will be, as usual, accessible via the problematic_files attribute of the returned Data object. This parameter is optional. dataset (Dataset): The dataset object for which the files are associated with. This parameter is optional. Returns: A [ Data ](https: docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html pyucalgarysrs.data.classes.Data) object containing the data read in, among other values. Raises: pyaurorax.exceptions.AuroraXError: a generic read error was encountered", "func":1 }, { -"ref":"pyaurorax.sources.sources.update_partial", -"url":38, -"doc":"Partially update a data source in AuroraX (omitted fields are ignored) Args: identifier: the AuroraX unique ID for the data source, defaults to None program: the new program for the data source, defaults to None platform: the new platform for the data source, defaults to None instrument_type: the new instrument type for the data source, defaults to None source_type: the new source type for the data source, defaults to None. Options are in the pyaurorax.sources module, or at the top level using the pyaurorax.SOURCE_TYPE_ variables. display_name: the new display name for the data source, defaults to None metadata: the new metadata for the data source, defaults to None maintainers: the new maintainer AuroraX account email addresses, defaults to None ephemeris_metadata_schema: a list of dictionaries capturing the metadata keys and values that can appear in ephemeris records associated with the data source, defaults to None data_product_metadata_schema: a list of dictionaries capturing the metadata keys and values that can appear in data product records associated with the data source, defaults to None Returns: the updated data source Raises: pyaurorax.exceptions.AuroraXMaxRetriesException: max retry error pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected error pyaurorax.exceptions.AuroraXNotFoundException: data source not found pyaurorax.exceptions.AuroraXBadParametersException: missing parameters", +"ref":"pyaurorax.data.ucalgary.read.ReadManager.read_trex_nir", +"url":47, +"doc":"Read in TREx near-infrared (NIR) raw data (stream0 pgm files). Args: file_list (List[str], List[Path], str, Path): The files to read in. Absolute paths are recommended, but not technically necessary. This can be a single string for a file, or a list of strings to read in multiple files. This parameter is required. n_parallel (int): Number of data files to read in parallel using multiprocessing. Default value is 1. Adjust according to your computer's available resources. This parameter is optional. first_record (bool): Only read in the first record in each file. This is the same as the first_frame parameter in the themis-imager-readfile and trex-imager-readfile libraries, and is a read optimization if you only need one image per minute, as opposed to the full temporal resolution of data (e.g., 3sec cadence). This parameter is optional. no_metadata (bool): Skip reading of metadata. This is a minor optimization if the metadata is not needed. Default is False . This parameter is optional. quiet (bool): Do not print out errors while reading data files, if any are encountered. Any files that encounter errors will be, as usual, accessible via the problematic_files attribute of the returned Data object. This parameter is optional. dataset (Dataset): The dataset object for which the files are associated with. This parameter is optional. Returns: A [ Data ](https: docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html pyucalgarysrs.data.classes.Data) object containing the data read in, among other values. Raises: pyaurorax.exceptions.AuroraXError: a generic read error was encountered", "func":1 }, { -"ref":"pyaurorax.util", -"url":39, -"doc":"The util module provides helper methods such as converting arbitrary geographic locations to North/South B-trace geographic locations. Note that all functions and classes from submodules are all imported at this level of the util module. They can be referenced from here instead of digging in deeper to the submodules." -}, -{ -"ref":"pyaurorax.util.ground_geo_to_nbtrace", -"url":39, -"doc":"Convert geographic location to North B-Trace geographic location The timestamp is required because when calculating the B-trace values, the location is converted into geomagnetic coordinates. This conversion is different based on the timestamp since the magnetic coordinates change over time. Note: aacgmv2 must be installed. To install it, you can run \"python -m pip install pyaurorax[aacgmv2]\". Args: geo_location: a Location object representing the geographic location dt: timestamp for this set of lat and lons Returns: the north B-trace location as a Location object", +"ref":"pyaurorax.data.ucalgary.read.ReadManager.read_trex_blue", +"url":47, +"doc":"Read in TREx Blueline raw data (stream0 pgm files). Args: file_list (List[str], List[Path], str, Path): The files to read in. Absolute paths are recommended, but not technically necessary. This can be a single string for a file, or a list of strings to read in multiple files. This parameter is required. n_parallel (int): Number of data files to read in parallel using multiprocessing. Default value is 1. Adjust according to your computer's available resources. This parameter is optional. first_record (bool): Only read in the first record in each file. This is the same as the first_frame parameter in the themis-imager-readfile and trex-imager-readfile libraries, and is a read optimization if you only need one image per minute, as opposed to the full temporal resolution of data (e.g., 3sec cadence). This parameter is optional. no_metadata (bool): Skip reading of metadata. This is a minor optimization if the metadata is not needed. Default is False . This parameter is optional. quiet (bool): Do not print out errors while reading data files, if any are encountered. Any files that encounter errors will be, as usual, accessible via the problematic_files attribute of the returned Data object. This parameter is optional. dataset (Dataset): The dataset object for which the files are associated with. This parameter is optional. Returns: A [ Data ](https: docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html pyucalgarysrs.data.classes.Data) object containing the data read in, among other values. Raises: pyaurorax.exceptions.AuroraXError: a generic read error was encountered", "func":1 }, { -"ref":"pyaurorax.util.ground_geo_to_sbtrace", -"url":39, -"doc":"Convert geographic location to South B-Trace geographic location The timestamp is required because when calculating the B-trace values, the location is converted into geomagnetic coordinates. This conversion is different based on the timestamp since the magnetic coordinates change over time. Note: aacgmv2 must be installed. To install it, you can run \"python -m pip install pyaurorax[aacgmv2]\". Args: geo_location: a Location object representing the geographic location dt: timestamp for this set of lat and lons Returns: the south B-trace location as a Location object", +"ref":"pyaurorax.data.ucalgary.read.ReadManager.read_trex_rgb", +"url":47, +"doc":"Read in TREx RGB raw data (stream0 h5, stream0.burst png.tar, unstable stream0 and stream0.colour pgm and png ). Args: file_list (List[str], List[Path], str, Path): The files to read in. Absolute paths are recommended, but not technically necessary. This can be a single string for a file, or a list of strings to read in multiple files. This parameter is required. n_parallel (int): Number of data files to read in parallel using multiprocessing. Default value is 1. Adjust according to your computer's available resources. This parameter is optional. first_record (bool): Only read in the first record in each file. This is the same as the first_frame parameter in the themis-imager-readfile and trex-imager-readfile libraries, and is a read optimization if you only need one image per minute, as opposed to the full temporal resolution of data (e.g., 3sec cadence). This parameter is optional. no_metadata (bool): Skip reading of metadata. This is a minor optimization if the metadata is not needed. Default is False . This parameter is optional. quiet (bool): Do not print out errors while reading data files, if any are encountered. Any files that encounter errors will be, as usual, accessible via the problematic_files attribute of the returned Data object. This parameter is optional. dataset (Dataset): The dataset object for which the files are associated with. This parameter is optional. Returns: A [ Data ](https: docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html pyucalgarysrs.data.classes.Data) object containing the data read in, among other values. Raises: pyaurorax.exceptions.AuroraXError: a generic read error was encountered", "func":1 }, { -"ref":"pyaurorax.util.calculate_btrace", -"url":40, -"doc":"Helper functions for calculating the north and south B-trace geographic locations for ground-based instruments." +"ref":"pyaurorax.data.ucalgary.read.ReadManager.read_trex_spectrograph", +"url":47, +"doc":"Read in TREx Spectrograph raw data (stream0 pgm files). Args: file_list (List[str], List[Path], str, Path): The files to read in. Absolute paths are recommended, but not technically necessary. This can be a single string for a file, or a list of strings to read in multiple files. This parameter is required. n_parallel (int): Number of data files to read in parallel using multiprocessing. Default value is 1. Adjust according to your computer's available resources. This parameter is optional. first_record (bool): Only read in the first record in each file. This is the same as the first_frame parameter in the themis-imager-readfile and trex-imager-readfile libraries, and is a read optimization if you only need one image per minute, as opposed to the full temporal resolution of data (e.g., 3sec cadence). This parameter is optional. no_metadata (bool): Skip reading of metadata. This is a minor optimization if the metadata is not needed. Default is False . This parameter is optional. quiet (bool): Do not print out errors while reading data files, if any are encountered. Any files that encounter errors will be, as usual, accessible via the problematic_files attribute of the returned Data object. This parameter is optional. dataset (Dataset): The dataset object for which the files are associated with. This parameter is optional. Returns: A [ Data ](https: docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html pyucalgarysrs.data.classes.Data) object containing the data read in, among other values. Raises: pyaurorax.exceptions.AuroraXError: a generic read error was encountered", +"func":1 }, { -"ref":"pyaurorax.util.calculate_btrace.ground_geo_to_nbtrace", -"url":40, -"doc":"Convert geographic location to North B-Trace geographic location The timestamp is required because when calculating the B-trace values, the location is converted into geomagnetic coordinates. This conversion is different based on the timestamp since the magnetic coordinates change over time. Note: aacgmv2 must be installed. To install it, you can run \"python -m pip install pyaurorax[aacgmv2]\". Args: geo_location: a Location object representing the geographic location dt: timestamp for this set of lat and lons Returns: the north B-trace location as a Location object", +"ref":"pyaurorax.data.ucalgary.read.ReadManager.read_skymap", +"url":47, +"doc":"Read in UCalgary skymap files. Args: file_list (List[str], List[Path], str, Path): The files to read in. Absolute paths are recommended, but not technically necessary. This can be a single string for a file, or a list of strings to read in multiple files. This parameter is required. n_parallel (int): Number of data files to read in parallel using multiprocessing. Default value is 1. Adjust according to your computer's available resources. This parameter is optional. quiet (bool): Do not print out errors while reading skymap files, if any are encountered. Any files that encounter errors will be, as usual, accessible via the problematic_files attribute of the returned Skymap object. This parameter is optional. dataset (Dataset): The dataset object for which the files are associated with. This parameter is optional. Returns: A [ Data ](https: docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html pyucalgarysrs.data.classes.Data) object containing the data read in, among other values. Raises: pyaurorax.exceptions.AuroraXError: a generic read error was encountered", "func":1 }, { -"ref":"pyaurorax.util.calculate_btrace.ground_geo_to_sbtrace", -"url":40, -"doc":"Convert geographic location to South B-Trace geographic location The timestamp is required because when calculating the B-trace values, the location is converted into geomagnetic coordinates. This conversion is different based on the timestamp since the magnetic coordinates change over time. Note: aacgmv2 must be installed. To install it, you can run \"python -m pip install pyaurorax[aacgmv2]\". Args: geo_location: a Location object representing the geographic location dt: timestamp for this set of lat and lons Returns: the south B-trace location as a Location object", +"ref":"pyaurorax.data.ucalgary.read.ReadManager.read_calibration", +"url":47, +"doc":"Read in UCalgary calibration files. Args: file_list (List[str], List[Path], str, Path): The files to read in. Absolute paths are recommended, but not technically necessary. This can be a single string for a file, or a list of strings to read in multiple files. This parameter is required. n_parallel (int): Number of data files to read in parallel using multiprocessing. Default value is 1. Adjust according to your computer's available resources. This parameter is optional. quiet (bool): Do not print out errors while reading calibration files, if any are encountered. Any files that encounter errors will be, as usual, accessible via the problematic_files attribute of the returned Calibration object. This parameter is optional. dataset (Dataset): The dataset object for which the files are associated with. This parameter is optional. Returns: A [ Data ](https: docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html pyucalgarysrs.data.classes.Data) object containing the data read in, among other values. Raises: pyaurorax.exceptions.AuroraXError: a generic read error was encountered", "func":1 } ] \ No newline at end of file diff --git a/docs/code/pyaurorax_api_reference/pyaurorax/api/api.html b/docs/code/pyaurorax_api_reference/pyaurorax/api/api.html deleted file mode 100644 index 2e0aacf..0000000 --- a/docs/code/pyaurorax_api_reference/pyaurorax/api/api.html +++ /dev/null @@ -1,291 +0,0 @@ - - - - - - -pyaurorax.api.api API documentation - - - - - - - - - - - -
-
-
-

Module pyaurorax.api.api

-
-
-

Helper functions when interacting with the API

-
- -Expand source code - -
"""
-Helper functions when interacting with the API
-"""
-
-from typing import Dict
-from .classes.urls import URLs
-from ..api import DEFAULT_BASE_URL
-
-# pdoc init
-__pdoc__: Dict = {}
-
-# private dynamic globals
-__api_key: str = ""
-
-# create instance of URLs that will be used throughout the application
-urls = URLs()
-
-
-def get_api_key() -> str:
-    """
-    Returns the currently set API key for the module
-
-    Returns:
-        current API key
-    """
-    return __api_key
-
-
-def authenticate(api_key: str) -> None:
-    """
-    Set authentication values for use with subsequent queries
-
-    Args:
-        api_key: an AuroraX API key string
-    """
-    global __api_key
-    __api_key = api_key
-
-
-def set_base_url(url: str) -> None:
-    """
-    Change the base URL for the API (ie. change to the staging
-    system or local server)
-
-    Args:
-        url: the new base url string (ie. 'https://api.staging.aurorax.space')
-    """
-    urls.base_url = url
-
-
-def get_base_url() -> str:
-    """
-    Returns the current base URL for the API
-
-    Returns:
-        current base URL
-    """
-    return urls.base_url
-
-
-def reset_base_url() -> None:
-    """
-    Set the base URL for the API back to the default
-    """
-    urls.base_url = DEFAULT_BASE_URL
-
-
-
-
-
-
-
-

Functions

-
-
-def authenticate(api_key: str) ‑> None -
-
-

Set authentication values for use with subsequent queries

-

Args

-
-
api_key
-
an AuroraX API key string
-
-
- -Expand source code - -
def authenticate(api_key: str) -> None:
-    """
-    Set authentication values for use with subsequent queries
-
-    Args:
-        api_key: an AuroraX API key string
-    """
-    global __api_key
-    __api_key = api_key
-
-
-
-def get_api_key() ‑> str -
-
-

Returns the currently set API key for the module

-

Returns

-

current API key

-
- -Expand source code - -
def get_api_key() -> str:
-    """
-    Returns the currently set API key for the module
-
-    Returns:
-        current API key
-    """
-    return __api_key
-
-
-
-def get_base_url() ‑> str -
-
-

Returns the current base URL for the API

-

Returns

-

current base URL

-
- -Expand source code - -
def get_base_url() -> str:
-    """
-    Returns the current base URL for the API
-
-    Returns:
-        current base URL
-    """
-    return urls.base_url
-
-
-
-def reset_base_url() ‑> None -
-
-

Set the base URL for the API back to the default

-
- -Expand source code - -
def reset_base_url() -> None:
-    """
-    Set the base URL for the API back to the default
-    """
-    urls.base_url = DEFAULT_BASE_URL
-
-
-
-def set_base_url(url: str) ‑> None -
-
-

Change the base URL for the API (ie. change to the staging -system or local server)

-

Args

-
-
url
-
the new base url string (ie. 'https://api.staging.aurorax.space')
-
-
- -Expand source code - -
def set_base_url(url: str) -> None:
-    """
-    Change the base URL for the API (ie. change to the staging
-    system or local server)
-
-    Args:
-        url: the new base url string (ie. 'https://api.staging.aurorax.space')
-    """
-    urls.base_url = url
-
-
-
-
-
-
-
- -
- - - \ No newline at end of file diff --git a/docs/code/pyaurorax_api_reference/pyaurorax/api/classes/request.html b/docs/code/pyaurorax_api_reference/pyaurorax/api/classes/request.html deleted file mode 100644 index 749307c..0000000 --- a/docs/code/pyaurorax_api_reference/pyaurorax/api/classes/request.html +++ /dev/null @@ -1,763 +0,0 @@ - - - - - - -pyaurorax.api.classes.request API documentation - - - - - - - - - - - -
-
-
-

Module pyaurorax.api.classes.request

-
-
-

Class definition used for managing an API request

-
- -Expand source code - -
"""
-Class definition used for managing an API request
-"""
-
-import json
-import requests
-from pydantic import BaseModel
-from typing import Optional, Dict, List, Union
-from ..._internal.util import json_converter
-from .response import AuroraXResponse
-from ..api import get_api_key
-from ...import __version__
-from ...exceptions import (AuroraXMaxRetriesException,
-                           AuroraXUnauthorizedException,
-                           AuroraXNotFoundException,
-                           AuroraXUnexpectedContentTypeException,
-                           AuroraXUnexpectedEmptyResponse,
-                           AuroraXException,
-                           AuroraXTimeoutException)
-
-# pdoc init
-__pdoc__: Dict = {}
-
-# request globals
-DEFAULT_RETRIES: int = 2
-""" Number of retry attempts when requesting data from the API """
-
-REQUEST_HEADERS: Dict = {
-    "accept": "application/json",
-    "Content-Type": "application/json",
-    "User-Agent": "python-pyaurorax/%s" % (__version__),
-}
-""" The default headers sent as part of a request to the AuroraX API """
-
-REQUEST_TIMEOUT = 60
-""" Default request timeout, in seconds """
-
-API_KEY_HEADER_NAME: str = "x-aurorax-api-key"
-""" The API key header used when sending requests to the AuroraX API """
-
-
-class AuroraXRequest(BaseModel):
-    """
-    AuroraX API request class
-
-    Attributes:
-        url: the URL to make the request against
-        method: the HTTP method to use (get, post, put, delete, etc.)
-        params: any URL parameters to send in the request, defaults to {}
-        body: the body of the request (ie. post data), defaults to {}
-        headers: any headers to send as part of the request (in addition to the default ones), default is {}
-        null_response: signifies if we expect a response from the API that has no
-            body/data in it (ie. requests to upload data that respond with just a
-            202 status code), defaults to False
-    """
-    url: str
-    method: str
-    params: Optional[Dict] = {}
-    body: Union[Optional[Dict], Optional[List]] = {}
-    headers: Optional[Dict] = {}
-    null_response: Optional[bool] = False
-
-    def __merge_headers(self):
-        # set initial headers
-        all_headers = REQUEST_HEADERS
-
-        # add headers passed into the class
-        for key, value in self.headers.items():
-            all_headers[key] = value
-
-        # add api key
-        api_key = get_api_key()
-        if api_key:
-            all_headers[API_KEY_HEADER_NAME] = api_key
-
-        # return
-        return all_headers
-
-    def execute(self,
-                limited_evaluation: Optional[bool] = False,
-                skip_retry_logic: Optional[bool] = False) -> AuroraXResponse:
-        """
-        Execute an AuroraX request
-
-        Args:
-            limited_evaluation: don't evaluate the response after the retry
-                mechanism, defaults to False
-            skip_retry_logic: exclude the retry logic in the request, defaults
-                to False
-
-        Returns:
-            an AuroraXResponse object
-
-        Raises:
-            pyaurorax.exceptions.AuroraXMaxRetriesException: max retry error
-            pyaurorax.exceptions.AuroraXNotFoundException: requested resource was not found
-            pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected content error
-            pyaurorax.exceptions.AuroraXUnexpectedEmptyResponse: unexpected empty response
-            pyaurorax.exceptions.AuroraXUnauthorizedException: invalid API key for this operation
-        """
-        # sanitize data
-        body_santized = json.dumps(self.body, default=json_converter)
-
-        # make request
-        try:
-            req = requests.request(self.method,
-                                   self.url,
-                                   headers=self.__merge_headers(),
-                                   params=self.params,
-                                   data=body_santized,
-                                   timeout=REQUEST_TIMEOUT)
-        except requests.exceptions.Timeout:
-            raise AuroraXTimeoutException("Error 408: request timeout reached")
-
-        # retry request if needed
-        if (skip_retry_logic is False):
-            for i in range(0, DEFAULT_RETRIES):
-                if (req.status_code == 500 and "text/plain" in req.headers["Content-Type"]):
-                    if (i == (DEFAULT_RETRIES - 1)):
-                        raise AuroraXMaxRetriesException("%s (%s)" % (req.content.decode(),
-                                                                      req.status_code))
-                    try:
-                        req = requests.request(self.method,
-                                               self.url,
-                                               headers=self.__merge_headers(),
-                                               params=self.params,
-                                               json=self.body,
-                                               data=body_santized,
-                                               timeout=REQUEST_TIMEOUT)
-                    except requests.exceptions.Timeout:
-                        raise AuroraXTimeoutException("Error 408: request timeout reached")
-                else:
-                    break
-
-        # check if authorization worked (raised by API or by Nginx)
-        if (req.status_code == 401):
-            if (req.headers["Content-Type"] == "application/json"):
-                if ("error_message" in req.json()):
-                    # this will be an error message that the API meant to send
-                    raise AuroraXUnauthorizedException("%s %s" % (req.status_code,
-                                                                  req.json()["error_message"]))
-                else:
-                    raise AuroraXUnauthorizedException("Error 401: unauthorized")
-            else:
-                raise AuroraXUnauthorizedException("Error 401: unauthorized")
-
-        # check for 404 error (raised by API or by Nginx)
-        if (req.status_code == 404):
-            if (req.headers["Content-Type"] == "application/json"):
-                if ("error_message" in req.json()):
-                    # this will be an error message that the API meant to send
-                    raise AuroraXNotFoundException("%s %s" % (req.status_code,
-                                                              req.json()["error_message"]))
-                else:
-                    # this will likely be a 404 from the java servlet
-                    raise AuroraXNotFoundException("Error 404: not found")
-            else:
-                raise AuroraXNotFoundException("Error 404: not found")
-
-        # check if we only want to do limited evaluation
-        if (limited_evaluation is True):
-            res = AuroraXResponse(request=req,
-                                  data=None,
-                                  status_code=req.status_code)
-            return res
-
-        # check content type
-        if (self.null_response is False):
-            if (req.headers["Content-Type"] == "application/json"):
-                if (len(req.content) == 0):
-                    raise AuroraXUnexpectedEmptyResponse("No response received")
-                else:
-                    response_data = req.json()
-            else:
-                raise AuroraXUnexpectedContentTypeException("%s (%s)" % (req.content.decode(),
-                                                                         req.status_code))
-        else:
-            if (req.status_code in [200, 201, 202, 204]):
-                response_data = None
-            else:
-                response_data = req.json()
-
-        # check for server error
-        if (req.status_code == 500):
-            response_json = req.json()
-            if ("error_message" in response_json):
-                raise AuroraXException("%s (%s)" % (response_json["error_message"],
-                                                    req.status_code))
-            else:
-                raise AuroraXException(response_json)
-
-        # create response object
-        res = AuroraXResponse(request=req,
-                              data=response_data,
-                              status_code=req.status_code)
-
-        # return
-        return res
-
-    def __str__(self) -> str:
-        """
-        String method
-
-        Returns:
-            string format of AuroraXRequest
-        """
-        return self.__repr__()
-
-    def __repr__(self) -> str:
-        """
-        Object representation
-
-        Returns:
-            object representation of AuroraXRequest
-        """
-        return f"AuroraXRequest(method='{self.method}', url='{self.url}')"
-
-
-
-
-
-

Global variables

-
-
var API_KEY_HEADER_NAME : str
-
-

The API key header used when sending requests to the AuroraX API

-
-
var DEFAULT_RETRIES : int
-
-

Number of retry attempts when requesting data from the API

-
-
var REQUEST_HEADERS : Dict
-
-

The default headers sent as part of a request to the AuroraX API

-
-
var REQUEST_TIMEOUT
-
-

Default request timeout, in seconds

-
-
-
-
-
-
-

Classes

-
-
-class AuroraXRequest -(**data: Any) -
-
-

AuroraX API request class

-

Attributes

-
-
url
-
the URL to make the request against
-
method
-
the HTTP method to use (get, post, put, delete, etc.)
-
params
-
any URL parameters to send in the request, defaults to {}
-
body
-
the body of the request (ie. post data), defaults to {}
-
headers
-
any headers to send as part of the request (in addition to the default ones), default is {}
-
null_response
-
signifies if we expect a response from the API that has no -body/data in it (ie. requests to upload data that respond with just a -202 status code), defaults to False
-
-

Create a new model by parsing and validating input data from keyword arguments.

-

Raises ValidationError if the input data cannot be parsed to form a valid model.

-
- -Expand source code - -
class AuroraXRequest(BaseModel):
-    """
-    AuroraX API request class
-
-    Attributes:
-        url: the URL to make the request against
-        method: the HTTP method to use (get, post, put, delete, etc.)
-        params: any URL parameters to send in the request, defaults to {}
-        body: the body of the request (ie. post data), defaults to {}
-        headers: any headers to send as part of the request (in addition to the default ones), default is {}
-        null_response: signifies if we expect a response from the API that has no
-            body/data in it (ie. requests to upload data that respond with just a
-            202 status code), defaults to False
-    """
-    url: str
-    method: str
-    params: Optional[Dict] = {}
-    body: Union[Optional[Dict], Optional[List]] = {}
-    headers: Optional[Dict] = {}
-    null_response: Optional[bool] = False
-
-    def __merge_headers(self):
-        # set initial headers
-        all_headers = REQUEST_HEADERS
-
-        # add headers passed into the class
-        for key, value in self.headers.items():
-            all_headers[key] = value
-
-        # add api key
-        api_key = get_api_key()
-        if api_key:
-            all_headers[API_KEY_HEADER_NAME] = api_key
-
-        # return
-        return all_headers
-
-    def execute(self,
-                limited_evaluation: Optional[bool] = False,
-                skip_retry_logic: Optional[bool] = False) -> AuroraXResponse:
-        """
-        Execute an AuroraX request
-
-        Args:
-            limited_evaluation: don't evaluate the response after the retry
-                mechanism, defaults to False
-            skip_retry_logic: exclude the retry logic in the request, defaults
-                to False
-
-        Returns:
-            an AuroraXResponse object
-
-        Raises:
-            pyaurorax.exceptions.AuroraXMaxRetriesException: max retry error
-            pyaurorax.exceptions.AuroraXNotFoundException: requested resource was not found
-            pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected content error
-            pyaurorax.exceptions.AuroraXUnexpectedEmptyResponse: unexpected empty response
-            pyaurorax.exceptions.AuroraXUnauthorizedException: invalid API key for this operation
-        """
-        # sanitize data
-        body_santized = json.dumps(self.body, default=json_converter)
-
-        # make request
-        try:
-            req = requests.request(self.method,
-                                   self.url,
-                                   headers=self.__merge_headers(),
-                                   params=self.params,
-                                   data=body_santized,
-                                   timeout=REQUEST_TIMEOUT)
-        except requests.exceptions.Timeout:
-            raise AuroraXTimeoutException("Error 408: request timeout reached")
-
-        # retry request if needed
-        if (skip_retry_logic is False):
-            for i in range(0, DEFAULT_RETRIES):
-                if (req.status_code == 500 and "text/plain" in req.headers["Content-Type"]):
-                    if (i == (DEFAULT_RETRIES - 1)):
-                        raise AuroraXMaxRetriesException("%s (%s)" % (req.content.decode(),
-                                                                      req.status_code))
-                    try:
-                        req = requests.request(self.method,
-                                               self.url,
-                                               headers=self.__merge_headers(),
-                                               params=self.params,
-                                               json=self.body,
-                                               data=body_santized,
-                                               timeout=REQUEST_TIMEOUT)
-                    except requests.exceptions.Timeout:
-                        raise AuroraXTimeoutException("Error 408: request timeout reached")
-                else:
-                    break
-
-        # check if authorization worked (raised by API or by Nginx)
-        if (req.status_code == 401):
-            if (req.headers["Content-Type"] == "application/json"):
-                if ("error_message" in req.json()):
-                    # this will be an error message that the API meant to send
-                    raise AuroraXUnauthorizedException("%s %s" % (req.status_code,
-                                                                  req.json()["error_message"]))
-                else:
-                    raise AuroraXUnauthorizedException("Error 401: unauthorized")
-            else:
-                raise AuroraXUnauthorizedException("Error 401: unauthorized")
-
-        # check for 404 error (raised by API or by Nginx)
-        if (req.status_code == 404):
-            if (req.headers["Content-Type"] == "application/json"):
-                if ("error_message" in req.json()):
-                    # this will be an error message that the API meant to send
-                    raise AuroraXNotFoundException("%s %s" % (req.status_code,
-                                                              req.json()["error_message"]))
-                else:
-                    # this will likely be a 404 from the java servlet
-                    raise AuroraXNotFoundException("Error 404: not found")
-            else:
-                raise AuroraXNotFoundException("Error 404: not found")
-
-        # check if we only want to do limited evaluation
-        if (limited_evaluation is True):
-            res = AuroraXResponse(request=req,
-                                  data=None,
-                                  status_code=req.status_code)
-            return res
-
-        # check content type
-        if (self.null_response is False):
-            if (req.headers["Content-Type"] == "application/json"):
-                if (len(req.content) == 0):
-                    raise AuroraXUnexpectedEmptyResponse("No response received")
-                else:
-                    response_data = req.json()
-            else:
-                raise AuroraXUnexpectedContentTypeException("%s (%s)" % (req.content.decode(),
-                                                                         req.status_code))
-        else:
-            if (req.status_code in [200, 201, 202, 204]):
-                response_data = None
-            else:
-                response_data = req.json()
-
-        # check for server error
-        if (req.status_code == 500):
-            response_json = req.json()
-            if ("error_message" in response_json):
-                raise AuroraXException("%s (%s)" % (response_json["error_message"],
-                                                    req.status_code))
-            else:
-                raise AuroraXException(response_json)
-
-        # create response object
-        res = AuroraXResponse(request=req,
-                              data=response_data,
-                              status_code=req.status_code)
-
-        # return
-        return res
-
-    def __str__(self) -> str:
-        """
-        String method
-
-        Returns:
-            string format of AuroraXRequest
-        """
-        return self.__repr__()
-
-    def __repr__(self) -> str:
-        """
-        Object representation
-
-        Returns:
-            object representation of AuroraXRequest
-        """
-        return f"AuroraXRequest(method='{self.method}', url='{self.url}')"
-
-

Ancestors

-
    -
  • pydantic.main.BaseModel
  • -
  • pydantic.utils.Representation
  • -
-

Class variables

-
-
var body : Union[Dict, ForwardRef(None), List]
-
-
-
-
var headers : Optional[Dict]
-
-
-
-
var method : str
-
-
-
-
var null_response : Optional[bool]
-
-
-
-
var params : Optional[Dict]
-
-
-
-
var url : str
-
-
-
-
-

Methods

-
-
-def execute(self, limited_evaluation: Optional[bool] = False, skip_retry_logic: Optional[bool] = False) ‑> AuroraXResponse -
-
-

Execute an AuroraX request

-

Args

-
-
limited_evaluation
-
don't evaluate the response after the retry -mechanism, defaults to False
-
skip_retry_logic
-
exclude the retry logic in the request, defaults -to False
-
-

Returns

-

an AuroraXResponse object

-

Raises

-
-
AuroraXMaxRetriesException
-
max retry error
-
AuroraXNotFoundException
-
requested resource was not found
-
AuroraXUnexpectedContentTypeException
-
unexpected content error
-
AuroraXUnexpectedEmptyResponse
-
unexpected empty response
-
AuroraXUnauthorizedException
-
invalid API key for this operation
-
-
- -Expand source code - -
def execute(self,
-            limited_evaluation: Optional[bool] = False,
-            skip_retry_logic: Optional[bool] = False) -> AuroraXResponse:
-    """
-    Execute an AuroraX request
-
-    Args:
-        limited_evaluation: don't evaluate the response after the retry
-            mechanism, defaults to False
-        skip_retry_logic: exclude the retry logic in the request, defaults
-            to False
-
-    Returns:
-        an AuroraXResponse object
-
-    Raises:
-        pyaurorax.exceptions.AuroraXMaxRetriesException: max retry error
-        pyaurorax.exceptions.AuroraXNotFoundException: requested resource was not found
-        pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected content error
-        pyaurorax.exceptions.AuroraXUnexpectedEmptyResponse: unexpected empty response
-        pyaurorax.exceptions.AuroraXUnauthorizedException: invalid API key for this operation
-    """
-    # sanitize data
-    body_santized = json.dumps(self.body, default=json_converter)
-
-    # make request
-    try:
-        req = requests.request(self.method,
-                               self.url,
-                               headers=self.__merge_headers(),
-                               params=self.params,
-                               data=body_santized,
-                               timeout=REQUEST_TIMEOUT)
-    except requests.exceptions.Timeout:
-        raise AuroraXTimeoutException("Error 408: request timeout reached")
-
-    # retry request if needed
-    if (skip_retry_logic is False):
-        for i in range(0, DEFAULT_RETRIES):
-            if (req.status_code == 500 and "text/plain" in req.headers["Content-Type"]):
-                if (i == (DEFAULT_RETRIES - 1)):
-                    raise AuroraXMaxRetriesException("%s (%s)" % (req.content.decode(),
-                                                                  req.status_code))
-                try:
-                    req = requests.request(self.method,
-                                           self.url,
-                                           headers=self.__merge_headers(),
-                                           params=self.params,
-                                           json=self.body,
-                                           data=body_santized,
-                                           timeout=REQUEST_TIMEOUT)
-                except requests.exceptions.Timeout:
-                    raise AuroraXTimeoutException("Error 408: request timeout reached")
-            else:
-                break
-
-    # check if authorization worked (raised by API or by Nginx)
-    if (req.status_code == 401):
-        if (req.headers["Content-Type"] == "application/json"):
-            if ("error_message" in req.json()):
-                # this will be an error message that the API meant to send
-                raise AuroraXUnauthorizedException("%s %s" % (req.status_code,
-                                                              req.json()["error_message"]))
-            else:
-                raise AuroraXUnauthorizedException("Error 401: unauthorized")
-        else:
-            raise AuroraXUnauthorizedException("Error 401: unauthorized")
-
-    # check for 404 error (raised by API or by Nginx)
-    if (req.status_code == 404):
-        if (req.headers["Content-Type"] == "application/json"):
-            if ("error_message" in req.json()):
-                # this will be an error message that the API meant to send
-                raise AuroraXNotFoundException("%s %s" % (req.status_code,
-                                                          req.json()["error_message"]))
-            else:
-                # this will likely be a 404 from the java servlet
-                raise AuroraXNotFoundException("Error 404: not found")
-        else:
-            raise AuroraXNotFoundException("Error 404: not found")
-
-    # check if we only want to do limited evaluation
-    if (limited_evaluation is True):
-        res = AuroraXResponse(request=req,
-                              data=None,
-                              status_code=req.status_code)
-        return res
-
-    # check content type
-    if (self.null_response is False):
-        if (req.headers["Content-Type"] == "application/json"):
-            if (len(req.content) == 0):
-                raise AuroraXUnexpectedEmptyResponse("No response received")
-            else:
-                response_data = req.json()
-        else:
-            raise AuroraXUnexpectedContentTypeException("%s (%s)" % (req.content.decode(),
-                                                                     req.status_code))
-    else:
-        if (req.status_code in [200, 201, 202, 204]):
-            response_data = None
-        else:
-            response_data = req.json()
-
-    # check for server error
-    if (req.status_code == 500):
-        response_json = req.json()
-        if ("error_message" in response_json):
-            raise AuroraXException("%s (%s)" % (response_json["error_message"],
-                                                req.status_code))
-        else:
-            raise AuroraXException(response_json)
-
-    # create response object
-    res = AuroraXResponse(request=req,
-                          data=response_data,
-                          status_code=req.status_code)
-
-    # return
-    return res
-
-
-
-
-
-
-
- -
- - - \ No newline at end of file diff --git a/docs/code/pyaurorax_api_reference/pyaurorax/api/classes/urls.html b/docs/code/pyaurorax_api_reference/pyaurorax/api/classes/urls.html deleted file mode 100644 index c2b1e13..0000000 --- a/docs/code/pyaurorax_api_reference/pyaurorax/api/classes/urls.html +++ /dev/null @@ -1,639 +0,0 @@ - - - - - - -pyaurorax.api.classes.urls API documentation - - - - - - - - - - - -
-
-
-

Module pyaurorax.api.classes.urls

-
-
-

This class provides the URL endpoints for different AuroraX -API requests. It is contained in a special class so that we -can use different base URLs if desired.

-
- -Expand source code - -
"""
-This class provides the URL endpoints for different AuroraX
-API requests. It is contained in a special class so that we
-can use different base URLs if desired.
-"""
-
-from typing import Dict
-from ...api import DEFAULT_BASE_URL
-
-# pdoc init
-__pdoc__: Dict = {}
-
-
-class URLs:
-    __DEFAULT_URL_DATA_SOURCES = "/api/v1/data_sources"
-    __DEFAULT_URL_DATA_SOURCES_SEARCH = "/api/v1/data_sources/search"
-    __DEFAULT_URL_EPHEMERIS_AVAILABILITY = "/api/v1/availability/ephemeris"
-    __DEFAULT_URL_EPHEMERIS_UPLOAD = "/api/v1/data_sources/{}/ephemeris"
-    __DEFAULT_URL_EPHEMERIS_SEARCH = "/api/v1/ephemeris/search"
-    __DEFAULT_URL_EPHEMERIS_REQUEST = "/api/v1/ephemeris/requests/{}"
-    __DEFAULT_URL_DATA_PRODUCTS_AVAILABILITY = "/api/v1/availability/data_products"
-    __DEFAULT_URL_DATA_PRODUCTS_UPLOAD = "/api/v1/data_sources/{}/data_products"
-    __DEFAULT_URL_DATA_PRODUCTS_SEARCH = "/api/v1/data_products/search"
-    __DEFAULT_URL_DATA_PRODUCTS_REQUEST = "/api/v1/data_products/requests/{}"
-    __DEFAULT_URL_CONJUNCTION_SEARCH = "/api/v1/conjunctions/search"
-    __DEFAULT_URL_CONJUNCTION_REQUEST = "/api/v1/conjunctions/requests/{}"
-    __DEFAULT_URL_DESCRIBE_CONJUNCTION_QUERY = "/api/v1/utils/describe/query/conjunction"
-    __DEFAULT_URL_DESCRIBE_DATA_PRODUCTS_QUERY = "/api/v1/utils/describe/query/data_products"
-    __DEFAULT_URL_DESCRIBE_EPHEMERIS_QUERY = "/api/v1/utils/describe/query/ephemeris"
-    __DEFAULT_URL_LIST_REQUESTS = "/api/v1/utils/admin/search_requests"
-    __DEFAULT_URL_DELETE_REQUESTS = "/api/v1/utils/admin/search_requests/{}"
-
-    def __init__(self, base_url: str = DEFAULT_BASE_URL) -> None:
-        self.__base = base_url
-        self.__data_sources = self.__DEFAULT_URL_DATA_SOURCES
-        self.__data_sources_search = self.__DEFAULT_URL_DATA_SOURCES_SEARCH
-        self.__ephemeris_availability = self.__DEFAULT_URL_EPHEMERIS_AVAILABILITY
-        self.__ephemeris_search = self.__DEFAULT_URL_EPHEMERIS_SEARCH
-        self.__ephemeris_upload = self.__DEFAULT_URL_EPHEMERIS_UPLOAD
-        self.__ephemeris_request = self.__DEFAULT_URL_EPHEMERIS_REQUEST
-        self.__data_products_availability = self.__DEFAULT_URL_DATA_PRODUCTS_AVAILABILITY
-        self.__data_products_search = self.__DEFAULT_URL_DATA_PRODUCTS_SEARCH
-        self.__data_products_upload = self.__DEFAULT_URL_DATA_PRODUCTS_UPLOAD
-        self.__data_products_request = self.__DEFAULT_URL_DATA_PRODUCTS_REQUEST
-        self.__conjunction_search = self.__DEFAULT_URL_CONJUNCTION_SEARCH
-        self.__conjunction_request = self.__DEFAULT_URL_CONJUNCTION_REQUEST
-        self.__describe_conjunction_query_url = self.__DEFAULT_URL_DESCRIBE_CONJUNCTION_QUERY
-        self.__describe_data_products_query_url = self.__DEFAULT_URL_DESCRIBE_DATA_PRODUCTS_QUERY
-        self.__describe_ephemeris_query_url = self.__DEFAULT_URL_DESCRIBE_EPHEMERIS_QUERY
-        self.__list_requests_url = self.__DEFAULT_URL_LIST_REQUESTS
-        self.__delete_requests_url = self.__DEFAULT_URL_DELETE_REQUESTS
-
-    @property
-    def base_url(self) -> str:
-        return self.__base
-
-    @base_url.setter
-    def base_url(self, value: str) -> None:
-        self.__base = value
-
-    # data sources
-    # -------------------
-    @property
-    def data_sources_url(self) -> str:
-        return f"{self.__base}{self.__data_sources}"
-
-    @property
-    def data_sources_search_url(self) -> str:
-        return f"{self.__base}{self.__data_sources_search}"
-
-    # availability
-    # -------------------
-    @property
-    def ephemeris_availability_url(self) -> str:
-        return f"{self.__base}{self.__ephemeris_availability}"
-
-    @property
-    def data_products_availability_url(self) -> str:
-        return f"{self.__base}{self.__data_products_availability}"
-
-    # ephemeris
-    # -------------------
-    @property
-    def ephemeris_search_url(self) -> str:
-        return f"{self.__base}{self.__ephemeris_search}"
-
-    @property
-    def ephemeris_upload_url(self) -> str:
-        return f"{self.__base}{self.__ephemeris_upload}"
-
-    @property
-    def ephemeris_request_url(self) -> str:
-        return f"{self.__base}{self.__ephemeris_request}"
-
-    # data products
-    # -------------------
-    @property
-    def data_products_search_url(self) -> str:
-        return f"{self.__base}{self.__data_products_search}"
-
-    @property
-    def data_products_upload_url(self) -> str:
-        return f"{self.__base}{self.__data_products_upload}"
-
-    @property
-    def data_products_request_url(self) -> str:
-        return f"{self.__base}{self.__data_products_request}"
-
-    # conjunctions
-    # -------------------
-    @property
-    def conjunction_search_url(self) -> str:
-        return f"{self.__base}{self.__conjunction_search}"
-
-    @property
-    def conjunction_request_url(self) -> str:
-        return f"{self.__base}{self.__conjunction_request}"
-
-    # describe
-    # -------------------
-    @property
-    def describe_conjunction_query_url(self) -> str:
-        return f"{self.__base}{self.__describe_conjunction_query_url}"
-
-    @property
-    def describe_data_products_query_url(self) -> str:
-        return f"{self.__base}{self.__describe_data_products_query_url}"
-
-    @property
-    def describe_ephemeris_query_url(self) -> str:
-        return f"{self.__base}{self.__describe_ephemeris_query_url}"
-
-    # admin
-    # -------------------
-    @property
-    def list_requests_url(self) -> str:
-        return f"{self.__base}{self.__list_requests_url}"
-
-    @property
-    def delete_requests_url(self) -> str:
-        return f"{self.__base}{self.__delete_requests_url}"
-
-
-
-
-
-
-
-
-
-

Classes

-
-
-class URLs -(base_url: str = 'https://api.aurorax.space') -
-
-
-
- -Expand source code - -
class URLs:
-    __DEFAULT_URL_DATA_SOURCES = "/api/v1/data_sources"
-    __DEFAULT_URL_DATA_SOURCES_SEARCH = "/api/v1/data_sources/search"
-    __DEFAULT_URL_EPHEMERIS_AVAILABILITY = "/api/v1/availability/ephemeris"
-    __DEFAULT_URL_EPHEMERIS_UPLOAD = "/api/v1/data_sources/{}/ephemeris"
-    __DEFAULT_URL_EPHEMERIS_SEARCH = "/api/v1/ephemeris/search"
-    __DEFAULT_URL_EPHEMERIS_REQUEST = "/api/v1/ephemeris/requests/{}"
-    __DEFAULT_URL_DATA_PRODUCTS_AVAILABILITY = "/api/v1/availability/data_products"
-    __DEFAULT_URL_DATA_PRODUCTS_UPLOAD = "/api/v1/data_sources/{}/data_products"
-    __DEFAULT_URL_DATA_PRODUCTS_SEARCH = "/api/v1/data_products/search"
-    __DEFAULT_URL_DATA_PRODUCTS_REQUEST = "/api/v1/data_products/requests/{}"
-    __DEFAULT_URL_CONJUNCTION_SEARCH = "/api/v1/conjunctions/search"
-    __DEFAULT_URL_CONJUNCTION_REQUEST = "/api/v1/conjunctions/requests/{}"
-    __DEFAULT_URL_DESCRIBE_CONJUNCTION_QUERY = "/api/v1/utils/describe/query/conjunction"
-    __DEFAULT_URL_DESCRIBE_DATA_PRODUCTS_QUERY = "/api/v1/utils/describe/query/data_products"
-    __DEFAULT_URL_DESCRIBE_EPHEMERIS_QUERY = "/api/v1/utils/describe/query/ephemeris"
-    __DEFAULT_URL_LIST_REQUESTS = "/api/v1/utils/admin/search_requests"
-    __DEFAULT_URL_DELETE_REQUESTS = "/api/v1/utils/admin/search_requests/{}"
-
-    def __init__(self, base_url: str = DEFAULT_BASE_URL) -> None:
-        self.__base = base_url
-        self.__data_sources = self.__DEFAULT_URL_DATA_SOURCES
-        self.__data_sources_search = self.__DEFAULT_URL_DATA_SOURCES_SEARCH
-        self.__ephemeris_availability = self.__DEFAULT_URL_EPHEMERIS_AVAILABILITY
-        self.__ephemeris_search = self.__DEFAULT_URL_EPHEMERIS_SEARCH
-        self.__ephemeris_upload = self.__DEFAULT_URL_EPHEMERIS_UPLOAD
-        self.__ephemeris_request = self.__DEFAULT_URL_EPHEMERIS_REQUEST
-        self.__data_products_availability = self.__DEFAULT_URL_DATA_PRODUCTS_AVAILABILITY
-        self.__data_products_search = self.__DEFAULT_URL_DATA_PRODUCTS_SEARCH
-        self.__data_products_upload = self.__DEFAULT_URL_DATA_PRODUCTS_UPLOAD
-        self.__data_products_request = self.__DEFAULT_URL_DATA_PRODUCTS_REQUEST
-        self.__conjunction_search = self.__DEFAULT_URL_CONJUNCTION_SEARCH
-        self.__conjunction_request = self.__DEFAULT_URL_CONJUNCTION_REQUEST
-        self.__describe_conjunction_query_url = self.__DEFAULT_URL_DESCRIBE_CONJUNCTION_QUERY
-        self.__describe_data_products_query_url = self.__DEFAULT_URL_DESCRIBE_DATA_PRODUCTS_QUERY
-        self.__describe_ephemeris_query_url = self.__DEFAULT_URL_DESCRIBE_EPHEMERIS_QUERY
-        self.__list_requests_url = self.__DEFAULT_URL_LIST_REQUESTS
-        self.__delete_requests_url = self.__DEFAULT_URL_DELETE_REQUESTS
-
-    @property
-    def base_url(self) -> str:
-        return self.__base
-
-    @base_url.setter
-    def base_url(self, value: str) -> None:
-        self.__base = value
-
-    # data sources
-    # -------------------
-    @property
-    def data_sources_url(self) -> str:
-        return f"{self.__base}{self.__data_sources}"
-
-    @property
-    def data_sources_search_url(self) -> str:
-        return f"{self.__base}{self.__data_sources_search}"
-
-    # availability
-    # -------------------
-    @property
-    def ephemeris_availability_url(self) -> str:
-        return f"{self.__base}{self.__ephemeris_availability}"
-
-    @property
-    def data_products_availability_url(self) -> str:
-        return f"{self.__base}{self.__data_products_availability}"
-
-    # ephemeris
-    # -------------------
-    @property
-    def ephemeris_search_url(self) -> str:
-        return f"{self.__base}{self.__ephemeris_search}"
-
-    @property
-    def ephemeris_upload_url(self) -> str:
-        return f"{self.__base}{self.__ephemeris_upload}"
-
-    @property
-    def ephemeris_request_url(self) -> str:
-        return f"{self.__base}{self.__ephemeris_request}"
-
-    # data products
-    # -------------------
-    @property
-    def data_products_search_url(self) -> str:
-        return f"{self.__base}{self.__data_products_search}"
-
-    @property
-    def data_products_upload_url(self) -> str:
-        return f"{self.__base}{self.__data_products_upload}"
-
-    @property
-    def data_products_request_url(self) -> str:
-        return f"{self.__base}{self.__data_products_request}"
-
-    # conjunctions
-    # -------------------
-    @property
-    def conjunction_search_url(self) -> str:
-        return f"{self.__base}{self.__conjunction_search}"
-
-    @property
-    def conjunction_request_url(self) -> str:
-        return f"{self.__base}{self.__conjunction_request}"
-
-    # describe
-    # -------------------
-    @property
-    def describe_conjunction_query_url(self) -> str:
-        return f"{self.__base}{self.__describe_conjunction_query_url}"
-
-    @property
-    def describe_data_products_query_url(self) -> str:
-        return f"{self.__base}{self.__describe_data_products_query_url}"
-
-    @property
-    def describe_ephemeris_query_url(self) -> str:
-        return f"{self.__base}{self.__describe_ephemeris_query_url}"
-
-    # admin
-    # -------------------
-    @property
-    def list_requests_url(self) -> str:
-        return f"{self.__base}{self.__list_requests_url}"
-
-    @property
-    def delete_requests_url(self) -> str:
-        return f"{self.__base}{self.__delete_requests_url}"
-
-

Instance variables

-
-
var base_url : str
-
-
-
- -Expand source code - -
@property
-def base_url(self) -> str:
-    return self.__base
-
-
-
var conjunction_request_url : str
-
-
-
- -Expand source code - -
@property
-def conjunction_request_url(self) -> str:
-    return f"{self.__base}{self.__conjunction_request}"
-
-
-
var conjunction_search_url : str
-
-
-
- -Expand source code - -
@property
-def conjunction_search_url(self) -> str:
-    return f"{self.__base}{self.__conjunction_search}"
-
-
-
var data_products_availability_url : str
-
-
-
- -Expand source code - -
@property
-def data_products_availability_url(self) -> str:
-    return f"{self.__base}{self.__data_products_availability}"
-
-
-
var data_products_request_url : str
-
-
-
- -Expand source code - -
@property
-def data_products_request_url(self) -> str:
-    return f"{self.__base}{self.__data_products_request}"
-
-
-
var data_products_search_url : str
-
-
-
- -Expand source code - -
@property
-def data_products_search_url(self) -> str:
-    return f"{self.__base}{self.__data_products_search}"
-
-
-
var data_products_upload_url : str
-
-
-
- -Expand source code - -
@property
-def data_products_upload_url(self) -> str:
-    return f"{self.__base}{self.__data_products_upload}"
-
-
-
var data_sources_search_url : str
-
-
-
- -Expand source code - -
@property
-def data_sources_search_url(self) -> str:
-    return f"{self.__base}{self.__data_sources_search}"
-
-
-
var data_sources_url : str
-
-
-
- -Expand source code - -
@property
-def data_sources_url(self) -> str:
-    return f"{self.__base}{self.__data_sources}"
-
-
-
var delete_requests_url : str
-
-
-
- -Expand source code - -
@property
-def delete_requests_url(self) -> str:
-    return f"{self.__base}{self.__delete_requests_url}"
-
-
-
var describe_conjunction_query_url : str
-
-
-
- -Expand source code - -
@property
-def describe_conjunction_query_url(self) -> str:
-    return f"{self.__base}{self.__describe_conjunction_query_url}"
-
-
-
var describe_data_products_query_url : str
-
-
-
- -Expand source code - -
@property
-def describe_data_products_query_url(self) -> str:
-    return f"{self.__base}{self.__describe_data_products_query_url}"
-
-
-
var describe_ephemeris_query_url : str
-
-
-
- -Expand source code - -
@property
-def describe_ephemeris_query_url(self) -> str:
-    return f"{self.__base}{self.__describe_ephemeris_query_url}"
-
-
-
var ephemeris_availability_url : str
-
-
-
- -Expand source code - -
@property
-def ephemeris_availability_url(self) -> str:
-    return f"{self.__base}{self.__ephemeris_availability}"
-
-
-
var ephemeris_request_url : str
-
-
-
- -Expand source code - -
@property
-def ephemeris_request_url(self) -> str:
-    return f"{self.__base}{self.__ephemeris_request}"
-
-
-
var ephemeris_search_url : str
-
-
-
- -Expand source code - -
@property
-def ephemeris_search_url(self) -> str:
-    return f"{self.__base}{self.__ephemeris_search}"
-
-
-
var ephemeris_upload_url : str
-
-
-
- -Expand source code - -
@property
-def ephemeris_upload_url(self) -> str:
-    return f"{self.__base}{self.__ephemeris_upload}"
-
-
-
var list_requests_url : str
-
-
-
- -Expand source code - -
@property
-def list_requests_url(self) -> str:
-    return f"{self.__base}{self.__list_requests_url}"
-
-
-
-
-
-
-
- -
- - - \ No newline at end of file diff --git a/docs/code/pyaurorax_api_reference/pyaurorax/api/index.html b/docs/code/pyaurorax_api_reference/pyaurorax/api/index.html deleted file mode 100644 index c0ba790..0000000 --- a/docs/code/pyaurorax_api_reference/pyaurorax/api/index.html +++ /dev/null @@ -1,808 +0,0 @@ - - - - - - -pyaurorax.api API documentation - - - - - - - - - - - -
-
-
-

Module pyaurorax.api

-
-
-

This module is the under-the-hood interface for RESTful API -requests. It provides helper functions that the PyAuroraX library -uses to make robust requests.

-

Note that all functions and classes from submodules are all imported -at this level of the api module. They can be referenced from here -instead of digging in deeper to the submodules.

-
- -Expand source code - -
"""
-This module is the under-the-hood interface for RESTful API
-requests. It provides helper functions that the PyAuroraX library
-uses to make robust requests.
-
-Note that all functions and classes from submodules are all imported
-at this level of the api module. They can be referenced from here
-instead of digging in deeper to the submodules.
-"""
-
-# endpoint URLs
-DEFAULT_BASE_URL: str = "https://api.aurorax.space"
-""" The default API base URL to use when sending requests """
-
-# function and class imports
-from .classes.request import (DEFAULT_RETRIES,
-                              REQUEST_HEADERS,
-                              REQUEST_TIMEOUT,
-                              API_KEY_HEADER_NAME,
-                              AuroraXRequest)
-from .classes.response import AuroraXResponse
-from .api import (urls,
-                  get_api_key,
-                  authenticate,
-                  set_base_url,
-                  get_base_url,
-                  reset_base_url)
-
-# pdoc import and exports
-from .api import __pdoc__ as __api_pdoc__
-from .classes.request import __pdoc__ as __classes_request_pdoc__
-from .classes.response import __pdoc__ as __classes_response_pdoc__
-__pdoc__ = __api_pdoc__
-__pdoc__ = dict(__pdoc__, **__classes_request_pdoc__)
-__pdoc__ = dict(__pdoc__, **__classes_response_pdoc__)
-__all__ = [
-    "DEFAULT_BASE_URL",
-    "DEFAULT_RETRIES",
-    "REQUEST_HEADERS",
-    "REQUEST_TIMEOUT",
-    "API_KEY_HEADER_NAME",
-    "AuroraXRequest",
-    "AuroraXResponse",
-    "urls",
-    "get_api_key",
-    "authenticate",
-    "set_base_url",
-    "get_base_url",
-    "reset_base_url",
-]
-
-
-
-

Sub-modules

-
-
pyaurorax.api.api
-
-

Helper functions when interacting with the API

-
-
pyaurorax.api.classes
-
-

Separted classes and functions used by the api module …

-
-
-
-
-

Global variables

-
-
var DEFAULT_BASE_URL : str
-
-

The default API base URL to use when sending requests

-
-
-
-
-

Functions

-
-
-def authenticate(api_key: str) ‑> None -
-
-

Set authentication values for use with subsequent queries

-

Args

-
-
api_key
-
an AuroraX API key string
-
-
- -Expand source code - -
def authenticate(api_key: str) -> None:
-    """
-    Set authentication values for use with subsequent queries
-
-    Args:
-        api_key: an AuroraX API key string
-    """
-    global __api_key
-    __api_key = api_key
-
-
-
-def get_api_key() ‑> str -
-
-

Returns the currently set API key for the module

-

Returns

-

current API key

-
- -Expand source code - -
def get_api_key() -> str:
-    """
-    Returns the currently set API key for the module
-
-    Returns:
-        current API key
-    """
-    return __api_key
-
-
-
-def get_base_url() ‑> str -
-
-

Returns the current base URL for the API

-

Returns

-

current base URL

-
- -Expand source code - -
def get_base_url() -> str:
-    """
-    Returns the current base URL for the API
-
-    Returns:
-        current base URL
-    """
-    return urls.base_url
-
-
-
-def reset_base_url() ‑> None -
-
-

Set the base URL for the API back to the default

-
- -Expand source code - -
def reset_base_url() -> None:
-    """
-    Set the base URL for the API back to the default
-    """
-    urls.base_url = DEFAULT_BASE_URL
-
-
-
-def set_base_url(url: str) ‑> None -
-
-

Change the base URL for the API (ie. change to the staging -system or local server)

-

Args

-
-
url
-
the new base url string (ie. 'https://api.staging.aurorax.space')
-
-
- -Expand source code - -
def set_base_url(url: str) -> None:
-    """
-    Change the base URL for the API (ie. change to the staging
-    system or local server)
-
-    Args:
-        url: the new base url string (ie. 'https://api.staging.aurorax.space')
-    """
-    urls.base_url = url
-
-
-
-
-
-

Classes

-
-
-class AuroraXRequest -(**data: Any) -
-
-

AuroraX API request class

-

Attributes

-
-
url
-
the URL to make the request against
-
method
-
the HTTP method to use (get, post, put, delete, etc.)
-
params
-
any URL parameters to send in the request, defaults to {}
-
body
-
the body of the request (ie. post data), defaults to {}
-
headers
-
any headers to send as part of the request (in addition to the default ones), default is {}
-
null_response
-
signifies if we expect a response from the API that has no -body/data in it (ie. requests to upload data that respond with just a -202 status code), defaults to False
-
-

Create a new model by parsing and validating input data from keyword arguments.

-

Raises ValidationError if the input data cannot be parsed to form a valid model.

-
- -Expand source code - -
class AuroraXRequest(BaseModel):
-    """
-    AuroraX API request class
-
-    Attributes:
-        url: the URL to make the request against
-        method: the HTTP method to use (get, post, put, delete, etc.)
-        params: any URL parameters to send in the request, defaults to {}
-        body: the body of the request (ie. post data), defaults to {}
-        headers: any headers to send as part of the request (in addition to the default ones), default is {}
-        null_response: signifies if we expect a response from the API that has no
-            body/data in it (ie. requests to upload data that respond with just a
-            202 status code), defaults to False
-    """
-    url: str
-    method: str
-    params: Optional[Dict] = {}
-    body: Union[Optional[Dict], Optional[List]] = {}
-    headers: Optional[Dict] = {}
-    null_response: Optional[bool] = False
-
-    def __merge_headers(self):
-        # set initial headers
-        all_headers = REQUEST_HEADERS
-
-        # add headers passed into the class
-        for key, value in self.headers.items():
-            all_headers[key] = value
-
-        # add api key
-        api_key = get_api_key()
-        if api_key:
-            all_headers[API_KEY_HEADER_NAME] = api_key
-
-        # return
-        return all_headers
-
-    def execute(self,
-                limited_evaluation: Optional[bool] = False,
-                skip_retry_logic: Optional[bool] = False) -> AuroraXResponse:
-        """
-        Execute an AuroraX request
-
-        Args:
-            limited_evaluation: don't evaluate the response after the retry
-                mechanism, defaults to False
-            skip_retry_logic: exclude the retry logic in the request, defaults
-                to False
-
-        Returns:
-            an AuroraXResponse object
-
-        Raises:
-            pyaurorax.exceptions.AuroraXMaxRetriesException: max retry error
-            pyaurorax.exceptions.AuroraXNotFoundException: requested resource was not found
-            pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected content error
-            pyaurorax.exceptions.AuroraXUnexpectedEmptyResponse: unexpected empty response
-            pyaurorax.exceptions.AuroraXUnauthorizedException: invalid API key for this operation
-        """
-        # sanitize data
-        body_santized = json.dumps(self.body, default=json_converter)
-
-        # make request
-        try:
-            req = requests.request(self.method,
-                                   self.url,
-                                   headers=self.__merge_headers(),
-                                   params=self.params,
-                                   data=body_santized,
-                                   timeout=REQUEST_TIMEOUT)
-        except requests.exceptions.Timeout:
-            raise AuroraXTimeoutException("Error 408: request timeout reached")
-
-        # retry request if needed
-        if (skip_retry_logic is False):
-            for i in range(0, DEFAULT_RETRIES):
-                if (req.status_code == 500 and "text/plain" in req.headers["Content-Type"]):
-                    if (i == (DEFAULT_RETRIES - 1)):
-                        raise AuroraXMaxRetriesException("%s (%s)" % (req.content.decode(),
-                                                                      req.status_code))
-                    try:
-                        req = requests.request(self.method,
-                                               self.url,
-                                               headers=self.__merge_headers(),
-                                               params=self.params,
-                                               json=self.body,
-                                               data=body_santized,
-                                               timeout=REQUEST_TIMEOUT)
-                    except requests.exceptions.Timeout:
-                        raise AuroraXTimeoutException("Error 408: request timeout reached")
-                else:
-                    break
-
-        # check if authorization worked (raised by API or by Nginx)
-        if (req.status_code == 401):
-            if (req.headers["Content-Type"] == "application/json"):
-                if ("error_message" in req.json()):
-                    # this will be an error message that the API meant to send
-                    raise AuroraXUnauthorizedException("%s %s" % (req.status_code,
-                                                                  req.json()["error_message"]))
-                else:
-                    raise AuroraXUnauthorizedException("Error 401: unauthorized")
-            else:
-                raise AuroraXUnauthorizedException("Error 401: unauthorized")
-
-        # check for 404 error (raised by API or by Nginx)
-        if (req.status_code == 404):
-            if (req.headers["Content-Type"] == "application/json"):
-                if ("error_message" in req.json()):
-                    # this will be an error message that the API meant to send
-                    raise AuroraXNotFoundException("%s %s" % (req.status_code,
-                                                              req.json()["error_message"]))
-                else:
-                    # this will likely be a 404 from the java servlet
-                    raise AuroraXNotFoundException("Error 404: not found")
-            else:
-                raise AuroraXNotFoundException("Error 404: not found")
-
-        # check if we only want to do limited evaluation
-        if (limited_evaluation is True):
-            res = AuroraXResponse(request=req,
-                                  data=None,
-                                  status_code=req.status_code)
-            return res
-
-        # check content type
-        if (self.null_response is False):
-            if (req.headers["Content-Type"] == "application/json"):
-                if (len(req.content) == 0):
-                    raise AuroraXUnexpectedEmptyResponse("No response received")
-                else:
-                    response_data = req.json()
-            else:
-                raise AuroraXUnexpectedContentTypeException("%s (%s)" % (req.content.decode(),
-                                                                         req.status_code))
-        else:
-            if (req.status_code in [200, 201, 202, 204]):
-                response_data = None
-            else:
-                response_data = req.json()
-
-        # check for server error
-        if (req.status_code == 500):
-            response_json = req.json()
-            if ("error_message" in response_json):
-                raise AuroraXException("%s (%s)" % (response_json["error_message"],
-                                                    req.status_code))
-            else:
-                raise AuroraXException(response_json)
-
-        # create response object
-        res = AuroraXResponse(request=req,
-                              data=response_data,
-                              status_code=req.status_code)
-
-        # return
-        return res
-
-    def __str__(self) -> str:
-        """
-        String method
-
-        Returns:
-            string format of AuroraXRequest
-        """
-        return self.__repr__()
-
-    def __repr__(self) -> str:
-        """
-        Object representation
-
-        Returns:
-            object representation of AuroraXRequest
-        """
-        return f"AuroraXRequest(method='{self.method}', url='{self.url}')"
-
-

Ancestors

-
    -
  • pydantic.main.BaseModel
  • -
  • pydantic.utils.Representation
  • -
-

Class variables

-
-
var body : Union[Dict, ForwardRef(None), List]
-
-
-
-
var headers : Optional[Dict]
-
-
-
-
var method : str
-
-
-
-
var null_response : Optional[bool]
-
-
-
-
var params : Optional[Dict]
-
-
-
-
var url : str
-
-
-
-
-

Methods

-
-
-def execute(self, limited_evaluation: Optional[bool] = False, skip_retry_logic: Optional[bool] = False) ‑> AuroraXResponse -
-
-

Execute an AuroraX request

-

Args

-
-
limited_evaluation
-
don't evaluate the response after the retry -mechanism, defaults to False
-
skip_retry_logic
-
exclude the retry logic in the request, defaults -to False
-
-

Returns

-

an AuroraXResponse object

-

Raises

-
-
AuroraXMaxRetriesException
-
max retry error
-
AuroraXNotFoundException
-
requested resource was not found
-
AuroraXUnexpectedContentTypeException
-
unexpected content error
-
AuroraXUnexpectedEmptyResponse
-
unexpected empty response
-
AuroraXUnauthorizedException
-
invalid API key for this operation
-
-
- -Expand source code - -
def execute(self,
-            limited_evaluation: Optional[bool] = False,
-            skip_retry_logic: Optional[bool] = False) -> AuroraXResponse:
-    """
-    Execute an AuroraX request
-
-    Args:
-        limited_evaluation: don't evaluate the response after the retry
-            mechanism, defaults to False
-        skip_retry_logic: exclude the retry logic in the request, defaults
-            to False
-
-    Returns:
-        an AuroraXResponse object
-
-    Raises:
-        pyaurorax.exceptions.AuroraXMaxRetriesException: max retry error
-        pyaurorax.exceptions.AuroraXNotFoundException: requested resource was not found
-        pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected content error
-        pyaurorax.exceptions.AuroraXUnexpectedEmptyResponse: unexpected empty response
-        pyaurorax.exceptions.AuroraXUnauthorizedException: invalid API key for this operation
-    """
-    # sanitize data
-    body_santized = json.dumps(self.body, default=json_converter)
-
-    # make request
-    try:
-        req = requests.request(self.method,
-                               self.url,
-                               headers=self.__merge_headers(),
-                               params=self.params,
-                               data=body_santized,
-                               timeout=REQUEST_TIMEOUT)
-    except requests.exceptions.Timeout:
-        raise AuroraXTimeoutException("Error 408: request timeout reached")
-
-    # retry request if needed
-    if (skip_retry_logic is False):
-        for i in range(0, DEFAULT_RETRIES):
-            if (req.status_code == 500 and "text/plain" in req.headers["Content-Type"]):
-                if (i == (DEFAULT_RETRIES - 1)):
-                    raise AuroraXMaxRetriesException("%s (%s)" % (req.content.decode(),
-                                                                  req.status_code))
-                try:
-                    req = requests.request(self.method,
-                                           self.url,
-                                           headers=self.__merge_headers(),
-                                           params=self.params,
-                                           json=self.body,
-                                           data=body_santized,
-                                           timeout=REQUEST_TIMEOUT)
-                except requests.exceptions.Timeout:
-                    raise AuroraXTimeoutException("Error 408: request timeout reached")
-            else:
-                break
-
-    # check if authorization worked (raised by API or by Nginx)
-    if (req.status_code == 401):
-        if (req.headers["Content-Type"] == "application/json"):
-            if ("error_message" in req.json()):
-                # this will be an error message that the API meant to send
-                raise AuroraXUnauthorizedException("%s %s" % (req.status_code,
-                                                              req.json()["error_message"]))
-            else:
-                raise AuroraXUnauthorizedException("Error 401: unauthorized")
-        else:
-            raise AuroraXUnauthorizedException("Error 401: unauthorized")
-
-    # check for 404 error (raised by API or by Nginx)
-    if (req.status_code == 404):
-        if (req.headers["Content-Type"] == "application/json"):
-            if ("error_message" in req.json()):
-                # this will be an error message that the API meant to send
-                raise AuroraXNotFoundException("%s %s" % (req.status_code,
-                                                          req.json()["error_message"]))
-            else:
-                # this will likely be a 404 from the java servlet
-                raise AuroraXNotFoundException("Error 404: not found")
-        else:
-            raise AuroraXNotFoundException("Error 404: not found")
-
-    # check if we only want to do limited evaluation
-    if (limited_evaluation is True):
-        res = AuroraXResponse(request=req,
-                              data=None,
-                              status_code=req.status_code)
-        return res
-
-    # check content type
-    if (self.null_response is False):
-        if (req.headers["Content-Type"] == "application/json"):
-            if (len(req.content) == 0):
-                raise AuroraXUnexpectedEmptyResponse("No response received")
-            else:
-                response_data = req.json()
-        else:
-            raise AuroraXUnexpectedContentTypeException("%s (%s)" % (req.content.decode(),
-                                                                     req.status_code))
-    else:
-        if (req.status_code in [200, 201, 202, 204]):
-            response_data = None
-        else:
-            response_data = req.json()
-
-    # check for server error
-    if (req.status_code == 500):
-        response_json = req.json()
-        if ("error_message" in response_json):
-            raise AuroraXException("%s (%s)" % (response_json["error_message"],
-                                                req.status_code))
-        else:
-            raise AuroraXException(response_json)
-
-    # create response object
-    res = AuroraXResponse(request=req,
-                          data=response_data,
-                          status_code=req.status_code)
-
-    # return
-    return res
-
-
-
-
-
-class AuroraXResponse -(**data: Any) -
-
-

AuroraX API response class

-

Attributes

-
-
request
-
the request object
-
data
-
the data received as part of the request
-
status_code
-
the HTTP status code received when making the request
-
-

Create a new model by parsing and validating input data from keyword arguments.

-

Raises ValidationError if the input data cannot be parsed to form a valid model.

-
- -Expand source code - -
class AuroraXResponse(BaseModel):
-    """
-    AuroraX API response class
-
-    Attributes:
-        request: the request object
-        data: the data received as part of the request
-        status_code: the HTTP status code received when making the request
-    """
-    request: Any
-    data: Any
-    status_code: int
-
-    def __str__(self) -> str:
-        """
-        String method
-
-        Returns:
-            string format of AuroraXResponse
-        """
-        return self.__repr__()
-
-    def __repr__(self) -> str:
-        """
-        Object representation
-
-        Returns:
-            object representation of AuroraXResponse
-        """
-        return f"AuroraXResponse [{self.status_code}]"
-
-

Ancestors

-
    -
  • pydantic.main.BaseModel
  • -
  • pydantic.utils.Representation
  • -
-

Class variables

-
-
var data : Any
-
-
-
-
var request : Any
-
-
-
-
var status_code : int
-
-
-
-
-
-
-
-
- -
- - - \ No newline at end of file diff --git a/docs/code/pyaurorax_api_reference/pyaurorax/availability/availability.html b/docs/code/pyaurorax_api_reference/pyaurorax/availability/availability.html deleted file mode 100644 index ce1ed4c..0000000 --- a/docs/code/pyaurorax_api_reference/pyaurorax/availability/availability.html +++ /dev/null @@ -1,430 +0,0 @@ - - - - - - -pyaurorax.availability.availability API documentation - - - - - - - - - - - -
-
-
-

Module pyaurorax.availability.availability

-
-
-

Functions for retrieving availablity information

-
- -Expand source code - -
"""
-Functions for retrieving availablity information
-"""
-
-import datetime
-from typing import Dict, List, Optional
-from .classes.availability_result import AvailabilityResult
-from ..sources import FORMAT_DEFAULT, DataSource
-from ..api import urls, AuroraXRequest
-
-# pdoc init
-__pdoc__: Dict = {}
-
-
-def ephemeris(start: datetime.date,
-              end: datetime.date,
-              program: Optional[str] = None,
-              platform: Optional[str] = None,
-              instrument_type: Optional[str] = None,
-              source_type: Optional[str] = None,
-              owner: Optional[str] = None,
-              format: Optional[str] = FORMAT_DEFAULT,
-              slow: Optional[bool] = False) -> List[AvailabilityResult]:
-    """
-    Retrieve information about the number of existing ephemeris records
-
-    Args:
-        start: start date to retrieve availability info from (inclusive)
-        end: end date to retrieve availability info until (inclusive)
-        program: program name to filter sources by, defaults to None
-        platform: platform name to filter sources by, defaults to None
-        instrument_type: instrument type to filter sources by, defaults to None
-        source_type: source type to filter sources by, defaults to None. Other
-            options are in the pyaurorax.sources module, or at the top level
-            using the pyaurorax.SOURCE_TYPE_* variables.
-        owner: owner email address to filter sources by, defaults to None
-        format: the format of the data sources returned, defaults to "basic_info".
-            Other options are in the pyaurorax.sources module, or at the top level
-            using the pyaurorax.FORMAT_* variables.
-        slow: query the data using a slower, but more accurate method, defaults to False
-
-    Returns:
-        ephemeris availability information matching the requested parameters
-    """
-    # set parameters
-    params = {
-        "start": start.strftime("%Y-%m-%d"),
-        "end": end.strftime("%Y-%m-%d"),
-        "program": program,
-        "platform": platform,
-        "instrument_type": instrument_type,
-        "source_type": source_type,
-        "owner": owner,
-        "format": format,
-        "slow": slow,
-    }
-
-    # do request
-    req = AuroraXRequest(method="get",
-                         url=urls.ephemeris_availability_url,
-                         params=params)
-    res = req.execute()
-
-    # cast data source record
-    for i in range(0, len(res.data)):
-        ds = DataSource(**res.data[i]["data_source"], format=format)
-        res.data[i]["data_source"] = ds
-
-    # return
-    return [AvailabilityResult(**av) for av in res.data]
-
-
-def data_products(start: datetime.date,
-                  end: datetime.date,
-                  program: Optional[str] = None,
-                  platform: Optional[str] = None,
-                  instrument_type: Optional[str] = None,
-                  source_type: Optional[str] = None,
-                  owner: Optional[str] = None,
-                  format: Optional[str] = FORMAT_DEFAULT,
-                  slow: Optional[bool] = False) -> List[AvailabilityResult]:
-    """
-    Retrieve information about the number of existing data product records
-
-    Args:
-        start: start date to retrieve availability info from (inclusive)
-        end: end date to retrieve availability info until (inclusive)
-        program: program name to filter sources by, defaults to None
-        platform: platform name to filter sources by, defaults to None
-        instrument_type: instrument type to filter sources by, defaults to None
-        source_type: source type to filter sources by, defaults to None. Other
-            options are in the pyaurorax.sources module, or at the top level
-            using the pyaurorax.SOURCE_TYPE_* variables.
-        owner: owner email address to filter sources by, defaults to None
-        format: the format of the data sources returned, defaults to "basic_info".
-            Other options are in the pyaurorax.sources module, or at the top level
-            using the pyaurorax.FORMAT_* variables.
-        slow: query the data using a slower, but more accurate method, defaults to False
-
-    Returns:
-        data product availability information matching the requested parameters
-    """
-    # set parameters
-    params = {
-        "start": start.strftime("%Y-%m-%d"),
-        "end": end.strftime("%Y-%m-%d"),
-        "program": program,
-        "platform": platform,
-        "instrument_type": instrument_type,
-        "source_type": source_type,
-        "owner": owner,
-        "format": format,
-        "slow": slow,
-    }
-
-    # do request
-    req = AuroraXRequest(method="get",
-                         url=urls.data_products_availability_url,
-                         params=params)
-    res = req.execute()
-
-    # cast data source record
-    for i in range(0, len(res.data)):
-        ds = DataSource(**res.data[i]["data_source"], format=format)
-        res.data[i]["data_source"] = ds
-
-    # return
-    return [AvailabilityResult(**av) for av in res.data]
-
-
-
-
-
-
-
-

Functions

-
-
-def data_products(start: datetime.date, end: datetime.date, program: Optional[str] = None, platform: Optional[str] = None, instrument_type: Optional[str] = None, source_type: Optional[str] = None, owner: Optional[str] = None, format: Optional[str] = 'basic_info', slow: Optional[bool] = False) ‑> List[AvailabilityResult] -
-
-

Retrieve information about the number of existing data product records

-

Args

-
-
start
-
start date to retrieve availability info from (inclusive)
-
end
-
end date to retrieve availability info until (inclusive)
-
program
-
program name to filter sources by, defaults to None
-
platform
-
platform name to filter sources by, defaults to None
-
instrument_type
-
instrument type to filter sources by, defaults to None
-
source_type
-
source type to filter sources by, defaults to None. Other -options are in the pyaurorax.sources module, or at the top level -using the pyaurorax.SOURCE_TYPE_* variables.
-
owner
-
owner email address to filter sources by, defaults to None
-
format
-
the format of the data sources returned, defaults to "basic_info". -Other options are in the pyaurorax.sources module, or at the top level -using the pyaurorax.FORMAT_* variables.
-
slow
-
query the data using a slower, but more accurate method, defaults to False
-
-

Returns

-

data product availability information matching the requested parameters

-
- -Expand source code - -
def data_products(start: datetime.date,
-                  end: datetime.date,
-                  program: Optional[str] = None,
-                  platform: Optional[str] = None,
-                  instrument_type: Optional[str] = None,
-                  source_type: Optional[str] = None,
-                  owner: Optional[str] = None,
-                  format: Optional[str] = FORMAT_DEFAULT,
-                  slow: Optional[bool] = False) -> List[AvailabilityResult]:
-    """
-    Retrieve information about the number of existing data product records
-
-    Args:
-        start: start date to retrieve availability info from (inclusive)
-        end: end date to retrieve availability info until (inclusive)
-        program: program name to filter sources by, defaults to None
-        platform: platform name to filter sources by, defaults to None
-        instrument_type: instrument type to filter sources by, defaults to None
-        source_type: source type to filter sources by, defaults to None. Other
-            options are in the pyaurorax.sources module, or at the top level
-            using the pyaurorax.SOURCE_TYPE_* variables.
-        owner: owner email address to filter sources by, defaults to None
-        format: the format of the data sources returned, defaults to "basic_info".
-            Other options are in the pyaurorax.sources module, or at the top level
-            using the pyaurorax.FORMAT_* variables.
-        slow: query the data using a slower, but more accurate method, defaults to False
-
-    Returns:
-        data product availability information matching the requested parameters
-    """
-    # set parameters
-    params = {
-        "start": start.strftime("%Y-%m-%d"),
-        "end": end.strftime("%Y-%m-%d"),
-        "program": program,
-        "platform": platform,
-        "instrument_type": instrument_type,
-        "source_type": source_type,
-        "owner": owner,
-        "format": format,
-        "slow": slow,
-    }
-
-    # do request
-    req = AuroraXRequest(method="get",
-                         url=urls.data_products_availability_url,
-                         params=params)
-    res = req.execute()
-
-    # cast data source record
-    for i in range(0, len(res.data)):
-        ds = DataSource(**res.data[i]["data_source"], format=format)
-        res.data[i]["data_source"] = ds
-
-    # return
-    return [AvailabilityResult(**av) for av in res.data]
-
-
-
-def ephemeris(start: datetime.date, end: datetime.date, program: Optional[str] = None, platform: Optional[str] = None, instrument_type: Optional[str] = None, source_type: Optional[str] = None, owner: Optional[str] = None, format: Optional[str] = 'basic_info', slow: Optional[bool] = False) ‑> List[AvailabilityResult] -
-
-

Retrieve information about the number of existing ephemeris records

-

Args

-
-
start
-
start date to retrieve availability info from (inclusive)
-
end
-
end date to retrieve availability info until (inclusive)
-
program
-
program name to filter sources by, defaults to None
-
platform
-
platform name to filter sources by, defaults to None
-
instrument_type
-
instrument type to filter sources by, defaults to None
-
source_type
-
source type to filter sources by, defaults to None. Other -options are in the pyaurorax.sources module, or at the top level -using the pyaurorax.SOURCE_TYPE_* variables.
-
owner
-
owner email address to filter sources by, defaults to None
-
format
-
the format of the data sources returned, defaults to "basic_info". -Other options are in the pyaurorax.sources module, or at the top level -using the pyaurorax.FORMAT_* variables.
-
slow
-
query the data using a slower, but more accurate method, defaults to False
-
-

Returns

-

ephemeris availability information matching the requested parameters

-
- -Expand source code - -
def ephemeris(start: datetime.date,
-              end: datetime.date,
-              program: Optional[str] = None,
-              platform: Optional[str] = None,
-              instrument_type: Optional[str] = None,
-              source_type: Optional[str] = None,
-              owner: Optional[str] = None,
-              format: Optional[str] = FORMAT_DEFAULT,
-              slow: Optional[bool] = False) -> List[AvailabilityResult]:
-    """
-    Retrieve information about the number of existing ephemeris records
-
-    Args:
-        start: start date to retrieve availability info from (inclusive)
-        end: end date to retrieve availability info until (inclusive)
-        program: program name to filter sources by, defaults to None
-        platform: platform name to filter sources by, defaults to None
-        instrument_type: instrument type to filter sources by, defaults to None
-        source_type: source type to filter sources by, defaults to None. Other
-            options are in the pyaurorax.sources module, or at the top level
-            using the pyaurorax.SOURCE_TYPE_* variables.
-        owner: owner email address to filter sources by, defaults to None
-        format: the format of the data sources returned, defaults to "basic_info".
-            Other options are in the pyaurorax.sources module, or at the top level
-            using the pyaurorax.FORMAT_* variables.
-        slow: query the data using a slower, but more accurate method, defaults to False
-
-    Returns:
-        ephemeris availability information matching the requested parameters
-    """
-    # set parameters
-    params = {
-        "start": start.strftime("%Y-%m-%d"),
-        "end": end.strftime("%Y-%m-%d"),
-        "program": program,
-        "platform": platform,
-        "instrument_type": instrument_type,
-        "source_type": source_type,
-        "owner": owner,
-        "format": format,
-        "slow": slow,
-    }
-
-    # do request
-    req = AuroraXRequest(method="get",
-                         url=urls.ephemeris_availability_url,
-                         params=params)
-    res = req.execute()
-
-    # cast data source record
-    for i in range(0, len(res.data)):
-        ds = DataSource(**res.data[i]["data_source"], format=format)
-        res.data[i]["data_source"] = ds
-
-    # return
-    return [AvailabilityResult(**av) for av in res.data]
-
-
-
-
-
-
-
- -
- - - \ No newline at end of file diff --git a/docs/code/pyaurorax_api_reference/pyaurorax/availability/index.html b/docs/code/pyaurorax_api_reference/pyaurorax/availability/index.html deleted file mode 100644 index 57ac545..0000000 --- a/docs/code/pyaurorax_api_reference/pyaurorax/availability/index.html +++ /dev/null @@ -1,427 +0,0 @@ - - - - - - -pyaurorax.availability API documentation - - - - - - - - - - - -
-
-
-

Module pyaurorax.availability

-
-
-

The availability module provides functions to quickly -determine what data exists on the AuroraX platform.

-

Note that all functions and classes from submodules are all imported -at this level of the availability module. They can be referenced from -here instead of digging in deeper to the submodules.

-
- -Expand source code - -
"""
-The availability module provides functions to quickly
-determine what data exists on the AuroraX platform.
-
-Note that all functions and classes from submodules are all imported
-at this level of the availability module. They can be referenced from
-here instead of digging in deeper to the submodules.
-"""
-
-# function and class imports
-from .availability import (ephemeris,
-                           data_products)
-from .classes.availability_result import AvailabilityResult
-
-# pdoc imports and exports
-from .availability import __pdoc__ as __availability_pdoc__
-from .classes.availability_result import __pdoc__ as __classes_avail_result_pdoc__
-__pdoc__ = __availability_pdoc__
-__pdoc__ = dict(__pdoc__, **__classes_avail_result_pdoc__)
-__all__ = [
-    "ephemeris",
-    "data_products",
-    "AvailabilityResult",
-]
-
-
-
-

Sub-modules

-
-
pyaurorax.availability.availability
-
-

Functions for retrieving availablity information

-
-
pyaurorax.availability.classes
-
-

Separted classes and functions used by the availability module …

-
-
-
-
-
-
-

Functions

-
-
-def data_products(start: datetime.date, end: datetime.date, program: Optional[str] = None, platform: Optional[str] = None, instrument_type: Optional[str] = None, source_type: Optional[str] = None, owner: Optional[str] = None, format: Optional[str] = 'basic_info', slow: Optional[bool] = False) ‑> List[AvailabilityResult] -
-
-

Retrieve information about the number of existing data product records

-

Args

-
-
start
-
start date to retrieve availability info from (inclusive)
-
end
-
end date to retrieve availability info until (inclusive)
-
program
-
program name to filter sources by, defaults to None
-
platform
-
platform name to filter sources by, defaults to None
-
instrument_type
-
instrument type to filter sources by, defaults to None
-
source_type
-
source type to filter sources by, defaults to None. Other -options are in the pyaurorax.sources module, or at the top level -using the pyaurorax.SOURCE_TYPE_* variables.
-
owner
-
owner email address to filter sources by, defaults to None
-
format
-
the format of the data sources returned, defaults to "basic_info". -Other options are in the pyaurorax.sources module, or at the top level -using the pyaurorax.FORMAT_* variables.
-
slow
-
query the data using a slower, but more accurate method, defaults to False
-
-

Returns

-

data product availability information matching the requested parameters

-
- -Expand source code - -
def data_products(start: datetime.date,
-                  end: datetime.date,
-                  program: Optional[str] = None,
-                  platform: Optional[str] = None,
-                  instrument_type: Optional[str] = None,
-                  source_type: Optional[str] = None,
-                  owner: Optional[str] = None,
-                  format: Optional[str] = FORMAT_DEFAULT,
-                  slow: Optional[bool] = False) -> List[AvailabilityResult]:
-    """
-    Retrieve information about the number of existing data product records
-
-    Args:
-        start: start date to retrieve availability info from (inclusive)
-        end: end date to retrieve availability info until (inclusive)
-        program: program name to filter sources by, defaults to None
-        platform: platform name to filter sources by, defaults to None
-        instrument_type: instrument type to filter sources by, defaults to None
-        source_type: source type to filter sources by, defaults to None. Other
-            options are in the pyaurorax.sources module, or at the top level
-            using the pyaurorax.SOURCE_TYPE_* variables.
-        owner: owner email address to filter sources by, defaults to None
-        format: the format of the data sources returned, defaults to "basic_info".
-            Other options are in the pyaurorax.sources module, or at the top level
-            using the pyaurorax.FORMAT_* variables.
-        slow: query the data using a slower, but more accurate method, defaults to False
-
-    Returns:
-        data product availability information matching the requested parameters
-    """
-    # set parameters
-    params = {
-        "start": start.strftime("%Y-%m-%d"),
-        "end": end.strftime("%Y-%m-%d"),
-        "program": program,
-        "platform": platform,
-        "instrument_type": instrument_type,
-        "source_type": source_type,
-        "owner": owner,
-        "format": format,
-        "slow": slow,
-    }
-
-    # do request
-    req = AuroraXRequest(method="get",
-                         url=urls.data_products_availability_url,
-                         params=params)
-    res = req.execute()
-
-    # cast data source record
-    for i in range(0, len(res.data)):
-        ds = DataSource(**res.data[i]["data_source"], format=format)
-        res.data[i]["data_source"] = ds
-
-    # return
-    return [AvailabilityResult(**av) for av in res.data]
-
-
-
-def ephemeris(start: datetime.date, end: datetime.date, program: Optional[str] = None, platform: Optional[str] = None, instrument_type: Optional[str] = None, source_type: Optional[str] = None, owner: Optional[str] = None, format: Optional[str] = 'basic_info', slow: Optional[bool] = False) ‑> List[AvailabilityResult] -
-
-

Retrieve information about the number of existing ephemeris records

-

Args

-
-
start
-
start date to retrieve availability info from (inclusive)
-
end
-
end date to retrieve availability info until (inclusive)
-
program
-
program name to filter sources by, defaults to None
-
platform
-
platform name to filter sources by, defaults to None
-
instrument_type
-
instrument type to filter sources by, defaults to None
-
source_type
-
source type to filter sources by, defaults to None. Other -options are in the pyaurorax.sources module, or at the top level -using the pyaurorax.SOURCE_TYPE_* variables.
-
owner
-
owner email address to filter sources by, defaults to None
-
format
-
the format of the data sources returned, defaults to "basic_info". -Other options are in the pyaurorax.sources module, or at the top level -using the pyaurorax.FORMAT_* variables.
-
slow
-
query the data using a slower, but more accurate method, defaults to False
-
-

Returns

-

ephemeris availability information matching the requested parameters

-
- -Expand source code - -
def ephemeris(start: datetime.date,
-              end: datetime.date,
-              program: Optional[str] = None,
-              platform: Optional[str] = None,
-              instrument_type: Optional[str] = None,
-              source_type: Optional[str] = None,
-              owner: Optional[str] = None,
-              format: Optional[str] = FORMAT_DEFAULT,
-              slow: Optional[bool] = False) -> List[AvailabilityResult]:
-    """
-    Retrieve information about the number of existing ephemeris records
-
-    Args:
-        start: start date to retrieve availability info from (inclusive)
-        end: end date to retrieve availability info until (inclusive)
-        program: program name to filter sources by, defaults to None
-        platform: platform name to filter sources by, defaults to None
-        instrument_type: instrument type to filter sources by, defaults to None
-        source_type: source type to filter sources by, defaults to None. Other
-            options are in the pyaurorax.sources module, or at the top level
-            using the pyaurorax.SOURCE_TYPE_* variables.
-        owner: owner email address to filter sources by, defaults to None
-        format: the format of the data sources returned, defaults to "basic_info".
-            Other options are in the pyaurorax.sources module, or at the top level
-            using the pyaurorax.FORMAT_* variables.
-        slow: query the data using a slower, but more accurate method, defaults to False
-
-    Returns:
-        ephemeris availability information matching the requested parameters
-    """
-    # set parameters
-    params = {
-        "start": start.strftime("%Y-%m-%d"),
-        "end": end.strftime("%Y-%m-%d"),
-        "program": program,
-        "platform": platform,
-        "instrument_type": instrument_type,
-        "source_type": source_type,
-        "owner": owner,
-        "format": format,
-        "slow": slow,
-    }
-
-    # do request
-    req = AuroraXRequest(method="get",
-                         url=urls.ephemeris_availability_url,
-                         params=params)
-    res = req.execute()
-
-    # cast data source record
-    for i in range(0, len(res.data)):
-        ds = DataSource(**res.data[i]["data_source"], format=format)
-        res.data[i]["data_source"] = ds
-
-    # return
-    return [AvailabilityResult(**av) for av in res.data]
-
-
-
-
-
-

Classes

-
-
-class AvailabilityResult -(**data: Any) -
-
-

Availability information object

-

Attributes

-
-
data_source
-
the data source that the records are associated with
-
available_data_products
-
the data product availability information
-
available_ephemeris
-
the ephemeris availability information
-
-

Create a new model by parsing and validating input data from keyword arguments.

-

Raises ValidationError if the input data cannot be parsed to form a valid model.

-
- -Expand source code - -
class AvailabilityResult(BaseModel):
-    """
-    Availability information object
-
-    Attributes:
-        data_source: the data source that the records are associated with
-        available_data_products: the data product availability information
-        available_ephemeris: the ephemeris availability information
-    """
-    data_source: DataSource
-    available_data_products: Optional[Dict[str, int]] = None
-    available_ephemeris: Optional[Dict[str, int]] = None
-
-    def __str__(self) -> str:
-        """
-        String method
-
-        Returns:
-            string format of AvailabilityResult
-        """
-        return self.__repr__()
-
-

Ancestors

-
    -
  • pydantic.main.BaseModel
  • -
  • pydantic.utils.Representation
  • -
-

Class variables

-
-
var available_data_products : Optional[Dict[str, int]]
-
-
-
-
var available_ephemeris : Optional[Dict[str, int]]
-
-
-
-
var data_sourceDataSource
-
-
-
-
-
-
-
-
- -
- - - \ No newline at end of file diff --git a/docs/code/pyaurorax_api_reference/pyaurorax/conjunctions/conjunctions.html b/docs/code/pyaurorax_api_reference/pyaurorax/conjunctions/conjunctions.html deleted file mode 100644 index 041c9e3..0000000 --- a/docs/code/pyaurorax_api_reference/pyaurorax/conjunctions/conjunctions.html +++ /dev/null @@ -1,627 +0,0 @@ - - - - - - -pyaurorax.conjunctions.conjunctions API documentation - - - - - - - - - - - -
-
-
-

Module pyaurorax.conjunctions.conjunctions

-
-
-

Functions for performing conjunction searches

-
- -Expand source code - -
"""
-Functions for performing conjunction searches
-"""
-
-import datetime
-import humanize
-from typing import Dict, List, Optional, Union
-from ..exceptions import AuroraXSearchException
-from .classes.search import Search
-from ..api import AuroraXRequest, urls
-from ..requests import STANDARD_POLLING_SLEEP_TIME
-
-# pdoc init
-__pdoc__: Dict = {}
-
-
-def search(start: datetime.datetime,
-           end: datetime.datetime,
-           distance: Union[int, float, Dict[str, Union[int, float]]],
-           ground: Optional[List[Dict[str, str]]] = [],
-           space: Optional[List[Dict[str, str]]] = [],
-           events: Optional[List[Dict[str, str]]] = [],
-           conjunction_types: Optional[List[str]] = [],
-           epoch_search_precision: Optional[int] = 60,
-           response_format: Optional[Dict[str, bool]] = None,
-           poll_interval: Optional[float] = STANDARD_POLLING_SLEEP_TIME,
-           return_immediately: Optional[bool] = False,
-           verbose: Optional[bool] = False) -> Search:
-    """
-    Search for conjunctions between data sources
-
-    By default, this function will block and wait until the request completes and
-    all data is downloaded. If you don't want to wait, set the 'return_immediately`
-    value to True. The Search object will be returned right after the search has been
-    started, and you can use the helper functions as part of that object to get the
-    data when it's done.
-
-    Args:
-        start: start timestamp of the search (inclusive)
-        end: end timestamp of the search (inclusive)
-        distance: the maximum distance allowed between data sources when searching for
-            conjunctions. This can either be a number (int or float), or a dictionary
-            modified from the output of the "get_advanced_distances_combos()" function.
-        ground: list of ground instrument search parameters, defaults to []
-
-            Example:
-
-                [{
-                    "programs": ["themis-asi"],
-                    "platforms": ["gillam", "rabbit lake"],
-                    "instrument_types": ["RGB"],
-                    "ephemeris_metadata_filters": {
-                        "logical_operator": "AND",
-                        "expressions": [
-                            {
-                                "key": "calgary_apa_ml_v1",
-                                "operator": "in",
-                                "values": [ "classified as APA" ]
-                            }
-                        ]
-                    }
-                }]
-        space: list of one or more space instrument search parameters, defaults to []
-
-            Example:
-
-                [{
-                    "programs": ["themis-asi", "swarm"],
-                    "platforms": ["themisa", "swarma"],
-                    "instrument_types": ["footprint"],
-                    "ephemeris_metadata_filters": {
-                        "logical_operator": "AND",
-                        "expressions": [
-                            {
-                                "key": "nbtrace_region",
-                                "operator": "in",
-                                "values": [ "north auroral oval" ]
-                            }
-                        ]
-                    },
-                    "hemisphere": [
-                        "northern"
-                    ]
-                }]
-        events: list of one or more events search parameters, defaults to []
-
-            Example:
-
-                [{
-                    "programs": [ "events" ],
-                    "instrument_types": [ "substorm onsets" ]
-                }]
-        conjunction_types: list of conjunction types, defaults to [] (meaning all conjunction
-            types). Options are in the pyaurorax.conjunctions module, or at the top level using
-            the pyaurorax.CONJUNCTION_TYPE_* variables.
-        epoch_search_precision: the time precision to which conjunctions are calculated. Can be
-            30 or 60 seconds. Defaults to 60 seconds. Note - this parameter is under active
-            development and still considered "alpha".
-        response_format: JSON representation of desired data response format
-        poll_interval: seconds to wait between polling calls, defaults to
-            pyaurorax.requests.STANDARD_POLLING_SLEEP_TIME
-        return_immediately: initiate the search and return without waiting for data to
-            be received, defaults to False
-        verbose: show the progress of the request using the request log, defaults
-
-    Returns:
-        a pyaurorax.conjunctions.Search object
-    """
-    # create a Search object
-    s = Search(start,
-               end,
-               distance,
-               ground=ground,
-               space=space,
-               events=events,
-               conjunction_types=conjunction_types,
-               epoch_search_precision=epoch_search_precision,
-               response_format=response_format)
-    if (verbose is True):
-        print(f"[{datetime.datetime.now()}] Search object created")
-
-    # execute the search
-    s.execute()
-    if (verbose is True):
-        print("[%s] Request submitted" % (datetime.datetime.now()))
-        print("[%s] Request ID: %s" % (datetime.datetime.now(), s.request_id))
-        print("[%s] Request details available at: %s" % (datetime.datetime.now(),
-                                                         s.request_url))
-
-    # return immediately if we wanted to
-    if (return_immediately is True):
-        return s
-
-    # wait for data
-    if (verbose is True):
-        print("[%s] Waiting for data ..." % (datetime.datetime.now()))
-    s.wait(poll_interval=poll_interval, verbose=verbose)
-
-    # check if error condition encountered
-    if (s.status["search_result"]["error_condition"] is True):
-        # error encountered
-        raise AuroraXSearchException(s.logs[-1]["summary"])
-
-    # get the data
-    if (verbose is True):
-        print("[%s] Retrieving data ..." % (datetime.datetime.now()))
-    s.get_data()
-
-    # return response with the data
-    if (verbose is True):
-        print("[%s] Retrieved %s of data containing %d records" % (datetime.datetime.now(),
-                                                                   humanize.filesize.naturalsize(
-                                                                       s.status["search_result"]["file_size"]),
-                                                                   s.status["search_result"]["result_count"]))
-
-    # return
-    return s
-
-
-def describe(search_obj: Search) -> str:
-    """
-    Describe a conjunction search as an "SQL-like" string
-
-    Args:
-        search_obj: the conjunction search to describe
-
-    Returns:
-        the "SQL-like" string describing the conjunction search object
-    """
-    # make request
-    req = AuroraXRequest(method="post",
-                         url=urls.describe_conjunction_query_url,
-                         body=search_obj.query)
-    res = req.execute()
-
-    # return
-    return res.data
-
-
-def get_request_url(request_id: str) -> str:
-    """
-    Get the conjunction search request URL for a given
-    request ID. This URL can be used for subsequent
-    pyaurorax.requests function calls. Primarily this method
-    facilitates delving into details about a set of already-submitted
-    conjunction searches.
-
-    Args:
-        request_id: the request identifier
-
-    Returns:
-        the request URL
-    """
-    url = urls.conjunction_request_url.format(request_id)
-    return url
-
-
-
-
-
-
-
-

Functions

-
-
-def describe(search_obj: Search) ‑> str -
-
-

Describe a conjunction search as an "SQL-like" string

-

Args

-
-
search_obj
-
the conjunction search to describe
-
-

Returns

-

the "SQL-like" string describing the conjunction search object

-
- -Expand source code - -
def describe(search_obj: Search) -> str:
-    """
-    Describe a conjunction search as an "SQL-like" string
-
-    Args:
-        search_obj: the conjunction search to describe
-
-    Returns:
-        the "SQL-like" string describing the conjunction search object
-    """
-    # make request
-    req = AuroraXRequest(method="post",
-                         url=urls.describe_conjunction_query_url,
-                         body=search_obj.query)
-    res = req.execute()
-
-    # return
-    return res.data
-
-
-
-def get_request_url(request_id: str) ‑> str -
-
-

Get the conjunction search request URL for a given -request ID. This URL can be used for subsequent -pyaurorax.requests function calls. Primarily this method -facilitates delving into details about a set of already-submitted -conjunction searches.

-

Args

-
-
request_id
-
the request identifier
-
-

Returns

-

the request URL

-
- -Expand source code - -
def get_request_url(request_id: str) -> str:
-    """
-    Get the conjunction search request URL for a given
-    request ID. This URL can be used for subsequent
-    pyaurorax.requests function calls. Primarily this method
-    facilitates delving into details about a set of already-submitted
-    conjunction searches.
-
-    Args:
-        request_id: the request identifier
-
-    Returns:
-        the request URL
-    """
-    url = urls.conjunction_request_url.format(request_id)
-    return url
-
-
-
-def search(start: datetime.datetime, end: datetime.datetime, distance: Union[int, float, Dict[str, Union[int, float]]], ground: Optional[List[Dict[str, str]]] = [], space: Optional[List[Dict[str, str]]] = [], events: Optional[List[Dict[str, str]]] = [], conjunction_types: Optional[List[str]] = [], epoch_search_precision: Optional[int] = 60, response_format: Optional[Dict[str, bool]] = None, poll_interval: Optional[float] = 1.0, return_immediately: Optional[bool] = False, verbose: Optional[bool] = False) ‑> Search -
-
-

Search for conjunctions between data sources

-

By default, this function will block and wait until the request completes and -all data is downloaded. If you don't want to wait, set the 'return_immediately` -value to True. The Search object will be returned right after the search has been -started, and you can use the helper functions as part of that object to get the -data when it's done.

-

Args

-
-
start
-
start timestamp of the search (inclusive)
-
end
-
end timestamp of the search (inclusive)
-
distance
-
the maximum distance allowed between data sources when searching for -conjunctions. This can either be a number (int or float), or a dictionary -modified from the output of the "get_advanced_distances_combos()" function.
-
ground
-
-

list of ground instrument search parameters, defaults to []

-

Example:

-
[{
-    "programs": ["themis-asi"],
-    "platforms": ["gillam", "rabbit lake"],
-    "instrument_types": ["RGB"],
-    "ephemeris_metadata_filters": {
-        "logical_operator": "AND",
-        "expressions": [
-            {
-                "key": "calgary_apa_ml_v1",
-                "operator": "in",
-                "values": [ "classified as APA" ]
-            }
-        ]
-    }
-}]
-
-
-
space
-
-

list of one or more space instrument search parameters, defaults to []

-

Example:

-
[{
-    "programs": ["themis-asi", "swarm"],
-    "platforms": ["themisa", "swarma"],
-    "instrument_types": ["footprint"],
-    "ephemeris_metadata_filters": {
-        "logical_operator": "AND",
-        "expressions": [
-            {
-                "key": "nbtrace_region",
-                "operator": "in",
-                "values": [ "north auroral oval" ]
-            }
-        ]
-    },
-    "hemisphere": [
-        "northern"
-    ]
-}]
-
-
-
events
-
-

list of one or more events search parameters, defaults to []

-

Example:

-
[{
-    "programs": [ "events" ],
-    "instrument_types": [ "substorm onsets" ]
-}]
-
-
-
conjunction_types
-
list of conjunction types, defaults to [] (meaning all conjunction -types). Options are in the pyaurorax.conjunctions module, or at the top level using -the pyaurorax.CONJUNCTION_TYPE_* variables.
-
epoch_search_precision
-
the time precision to which conjunctions are calculated. Can be -30 or 60 seconds. Defaults to 60 seconds. Note - this parameter is under active -development and still considered "alpha".
-
response_format
-
JSON representation of desired data response format
-
poll_interval
-
seconds to wait between polling calls, defaults to -pyaurorax.requests.STANDARD_POLLING_SLEEP_TIME
-
return_immediately
-
initiate the search and return without waiting for data to -be received, defaults to False
-
verbose
-
show the progress of the request using the request log, defaults
-
-

Returns

-

a pyaurorax.conjunctions.Search object

-
- -Expand source code - -
def search(start: datetime.datetime,
-           end: datetime.datetime,
-           distance: Union[int, float, Dict[str, Union[int, float]]],
-           ground: Optional[List[Dict[str, str]]] = [],
-           space: Optional[List[Dict[str, str]]] = [],
-           events: Optional[List[Dict[str, str]]] = [],
-           conjunction_types: Optional[List[str]] = [],
-           epoch_search_precision: Optional[int] = 60,
-           response_format: Optional[Dict[str, bool]] = None,
-           poll_interval: Optional[float] = STANDARD_POLLING_SLEEP_TIME,
-           return_immediately: Optional[bool] = False,
-           verbose: Optional[bool] = False) -> Search:
-    """
-    Search for conjunctions between data sources
-
-    By default, this function will block and wait until the request completes and
-    all data is downloaded. If you don't want to wait, set the 'return_immediately`
-    value to True. The Search object will be returned right after the search has been
-    started, and you can use the helper functions as part of that object to get the
-    data when it's done.
-
-    Args:
-        start: start timestamp of the search (inclusive)
-        end: end timestamp of the search (inclusive)
-        distance: the maximum distance allowed between data sources when searching for
-            conjunctions. This can either be a number (int or float), or a dictionary
-            modified from the output of the "get_advanced_distances_combos()" function.
-        ground: list of ground instrument search parameters, defaults to []
-
-            Example:
-
-                [{
-                    "programs": ["themis-asi"],
-                    "platforms": ["gillam", "rabbit lake"],
-                    "instrument_types": ["RGB"],
-                    "ephemeris_metadata_filters": {
-                        "logical_operator": "AND",
-                        "expressions": [
-                            {
-                                "key": "calgary_apa_ml_v1",
-                                "operator": "in",
-                                "values": [ "classified as APA" ]
-                            }
-                        ]
-                    }
-                }]
-        space: list of one or more space instrument search parameters, defaults to []
-
-            Example:
-
-                [{
-                    "programs": ["themis-asi", "swarm"],
-                    "platforms": ["themisa", "swarma"],
-                    "instrument_types": ["footprint"],
-                    "ephemeris_metadata_filters": {
-                        "logical_operator": "AND",
-                        "expressions": [
-                            {
-                                "key": "nbtrace_region",
-                                "operator": "in",
-                                "values": [ "north auroral oval" ]
-                            }
-                        ]
-                    },
-                    "hemisphere": [
-                        "northern"
-                    ]
-                }]
-        events: list of one or more events search parameters, defaults to []
-
-            Example:
-
-                [{
-                    "programs": [ "events" ],
-                    "instrument_types": [ "substorm onsets" ]
-                }]
-        conjunction_types: list of conjunction types, defaults to [] (meaning all conjunction
-            types). Options are in the pyaurorax.conjunctions module, or at the top level using
-            the pyaurorax.CONJUNCTION_TYPE_* variables.
-        epoch_search_precision: the time precision to which conjunctions are calculated. Can be
-            30 or 60 seconds. Defaults to 60 seconds. Note - this parameter is under active
-            development and still considered "alpha".
-        response_format: JSON representation of desired data response format
-        poll_interval: seconds to wait between polling calls, defaults to
-            pyaurorax.requests.STANDARD_POLLING_SLEEP_TIME
-        return_immediately: initiate the search and return without waiting for data to
-            be received, defaults to False
-        verbose: show the progress of the request using the request log, defaults
-
-    Returns:
-        a pyaurorax.conjunctions.Search object
-    """
-    # create a Search object
-    s = Search(start,
-               end,
-               distance,
-               ground=ground,
-               space=space,
-               events=events,
-               conjunction_types=conjunction_types,
-               epoch_search_precision=epoch_search_precision,
-               response_format=response_format)
-    if (verbose is True):
-        print(f"[{datetime.datetime.now()}] Search object created")
-
-    # execute the search
-    s.execute()
-    if (verbose is True):
-        print("[%s] Request submitted" % (datetime.datetime.now()))
-        print("[%s] Request ID: %s" % (datetime.datetime.now(), s.request_id))
-        print("[%s] Request details available at: %s" % (datetime.datetime.now(),
-                                                         s.request_url))
-
-    # return immediately if we wanted to
-    if (return_immediately is True):
-        return s
-
-    # wait for data
-    if (verbose is True):
-        print("[%s] Waiting for data ..." % (datetime.datetime.now()))
-    s.wait(poll_interval=poll_interval, verbose=verbose)
-
-    # check if error condition encountered
-    if (s.status["search_result"]["error_condition"] is True):
-        # error encountered
-        raise AuroraXSearchException(s.logs[-1]["summary"])
-
-    # get the data
-    if (verbose is True):
-        print("[%s] Retrieving data ..." % (datetime.datetime.now()))
-    s.get_data()
-
-    # return response with the data
-    if (verbose is True):
-        print("[%s] Retrieved %s of data containing %d records" % (datetime.datetime.now(),
-                                                                   humanize.filesize.naturalsize(
-                                                                       s.status["search_result"]["file_size"]),
-                                                                   s.status["search_result"]["result_count"]))
-
-    # return
-    return s
-
-
-
-
-
-
-
- -
- - - \ No newline at end of file diff --git a/docs/code/pyaurorax_api_reference/pyaurorax/conjunctions/index.html b/docs/code/pyaurorax_api_reference/pyaurorax/conjunctions/index.html deleted file mode 100644 index c6ded88..0000000 --- a/docs/code/pyaurorax_api_reference/pyaurorax/conjunctions/index.html +++ /dev/null @@ -1,1522 +0,0 @@ - - - - - - -pyaurorax.conjunctions API documentation - - - - - - - - - - - -
-
-
-

Module pyaurorax.conjunctions

-
-
-

The conjunction module is used for finding conjunctions between -groupings of data sources.

-

Note that all functions and classes from submodules are all imported -at this level of the conjunctions module. They can be referenced from -here instead of digging in deeper to the submodules.

-
- -Expand source code - -
"""
-The conjunction module is used for finding conjunctions between
-groupings of data sources.
-
-Note that all functions and classes from submodules are all imported
-at this level of the conjunctions module. They can be referenced from
-here instead of digging in deeper to the submodules.
-"""
-
-# conjunction type - north b-trace
-CONJUNCTION_TYPE_NBTRACE: str = "nbtrace"
-"""
-Conjunction search 'conjunction_type' category for
-finding conjunctions using the north B-trace data
-"""
-
-# conjunction type - south b-trace
-CONJUNCTION_TYPE_SBTRACE: str = "sbtrace"
-"""
-Conjunction search 'conjunction_type' category for
-finding conjunctions using the south B-trace data
-"""
-
-# function and class imports
-from .conjunctions import (search,
-                           describe,
-                           get_request_url)
-from .swarmaurora import __all__ as swarmaurora_all
-from .classes.conjunction import Conjunction
-from .classes.search import Search
-
-# pdoc imports and exports
-from .conjunctions import __pdoc__ as __conjunctions_pdoc__
-from .classes.conjunction import __pdoc__ as __classes_conjunctions_pdoc__
-from .classes.search import __pdoc__ as __classes_search_pdoc__
-__pdoc__ = __conjunctions_pdoc__
-__pdoc__ = dict(__pdoc__, **__classes_conjunctions_pdoc__)
-__pdoc__ = dict(__pdoc__, **__classes_search_pdoc__)
-__all__ = [
-    "CONJUNCTION_TYPE_NBTRACE",
-    "CONJUNCTION_TYPE_SBTRACE",
-    "search",
-    "describe",
-    "get_request_url",
-    "Conjunction",
-    "Search",
-    "swarmaurora_all",
-]
-
-
-
-

Sub-modules

-
-
pyaurorax.conjunctions.classes
-
-

Separted classes and functions used by the conjunctions module …

-
-
pyaurorax.conjunctions.conjunctions
-
-

Functions for performing conjunction searches

-
-
pyaurorax.conjunctions.swarmaurora
-
-

Interact with Swarm-Aurora using conjunction searches from AuroraX

-
-
-
-
-

Global variables

-
-
var CONJUNCTION_TYPE_NBTRACE : str
-
-

Conjunction search 'conjunction_type' category for -finding conjunctions using the north B-trace data

-
-
var CONJUNCTION_TYPE_SBTRACE : str
-
-

Conjunction search 'conjunction_type' category for -finding conjunctions using the south B-trace data

-
-
-
-
-

Functions

-
-
-def describe(search_obj: Search) ‑> str -
-
-

Describe a conjunction search as an "SQL-like" string

-

Args

-
-
search_obj
-
the conjunction search to describe
-
-

Returns

-

the "SQL-like" string describing the conjunction search object

-
- -Expand source code - -
def describe(search_obj: Search) -> str:
-    """
-    Describe a conjunction search as an "SQL-like" string
-
-    Args:
-        search_obj: the conjunction search to describe
-
-    Returns:
-        the "SQL-like" string describing the conjunction search object
-    """
-    # make request
-    req = AuroraXRequest(method="post",
-                         url=urls.describe_conjunction_query_url,
-                         body=search_obj.query)
-    res = req.execute()
-
-    # return
-    return res.data
-
-
-
-def get_request_url(request_id: str) ‑> str -
-
-

Get the conjunction search request URL for a given -request ID. This URL can be used for subsequent -pyaurorax.requests function calls. Primarily this method -facilitates delving into details about a set of already-submitted -conjunction searches.

-

Args

-
-
request_id
-
the request identifier
-
-

Returns

-

the request URL

-
- -Expand source code - -
def get_request_url(request_id: str) -> str:
-    """
-    Get the conjunction search request URL for a given
-    request ID. This URL can be used for subsequent
-    pyaurorax.requests function calls. Primarily this method
-    facilitates delving into details about a set of already-submitted
-    conjunction searches.
-
-    Args:
-        request_id: the request identifier
-
-    Returns:
-        the request URL
-    """
-    url = urls.conjunction_request_url.format(request_id)
-    return url
-
-
-
-def search(start: datetime.datetime, end: datetime.datetime, distance: Union[int, float, Dict[str, Union[int, float]]], ground: Optional[List[Dict[str, str]]] = [], space: Optional[List[Dict[str, str]]] = [], events: Optional[List[Dict[str, str]]] = [], conjunction_types: Optional[List[str]] = [], epoch_search_precision: Optional[int] = 60, response_format: Optional[Dict[str, bool]] = None, poll_interval: Optional[float] = 1.0, return_immediately: Optional[bool] = False, verbose: Optional[bool] = False) ‑> Search -
-
-

Search for conjunctions between data sources

-

By default, this function will block and wait until the request completes and -all data is downloaded. If you don't want to wait, set the 'return_immediately` -value to True. The Search object will be returned right after the search has been -started, and you can use the helper functions as part of that object to get the -data when it's done.

-

Args

-
-
start
-
start timestamp of the search (inclusive)
-
end
-
end timestamp of the search (inclusive)
-
distance
-
the maximum distance allowed between data sources when searching for -conjunctions. This can either be a number (int or float), or a dictionary -modified from the output of the "get_advanced_distances_combos()" function.
-
ground
-
-

list of ground instrument search parameters, defaults to []

-

Example:

-
[{
-    "programs": ["themis-asi"],
-    "platforms": ["gillam", "rabbit lake"],
-    "instrument_types": ["RGB"],
-    "ephemeris_metadata_filters": {
-        "logical_operator": "AND",
-        "expressions": [
-            {
-                "key": "calgary_apa_ml_v1",
-                "operator": "in",
-                "values": [ "classified as APA" ]
-            }
-        ]
-    }
-}]
-
-
-
space
-
-

list of one or more space instrument search parameters, defaults to []

-

Example:

-
[{
-    "programs": ["themis-asi", "swarm"],
-    "platforms": ["themisa", "swarma"],
-    "instrument_types": ["footprint"],
-    "ephemeris_metadata_filters": {
-        "logical_operator": "AND",
-        "expressions": [
-            {
-                "key": "nbtrace_region",
-                "operator": "in",
-                "values": [ "north auroral oval" ]
-            }
-        ]
-    },
-    "hemisphere": [
-        "northern"
-    ]
-}]
-
-
-
events
-
-

list of one or more events search parameters, defaults to []

-

Example:

-
[{
-    "programs": [ "events" ],
-    "instrument_types": [ "substorm onsets" ]
-}]
-
-
-
conjunction_types
-
list of conjunction types, defaults to [] (meaning all conjunction -types). Options are in the pyaurorax.conjunctions module, or at the top level using -the pyaurorax.CONJUNCTION_TYPE_* variables.
-
epoch_search_precision
-
the time precision to which conjunctions are calculated. Can be -30 or 60 seconds. Defaults to 60 seconds. Note - this parameter is under active -development and still considered "alpha".
-
response_format
-
JSON representation of desired data response format
-
poll_interval
-
seconds to wait between polling calls, defaults to -pyaurorax.requests.STANDARD_POLLING_SLEEP_TIME
-
return_immediately
-
initiate the search and return without waiting for data to -be received, defaults to False
-
verbose
-
show the progress of the request using the request log, defaults
-
-

Returns

-

a pyaurorax.conjunctions.Search object

-
- -Expand source code - -
def search(start: datetime.datetime,
-           end: datetime.datetime,
-           distance: Union[int, float, Dict[str, Union[int, float]]],
-           ground: Optional[List[Dict[str, str]]] = [],
-           space: Optional[List[Dict[str, str]]] = [],
-           events: Optional[List[Dict[str, str]]] = [],
-           conjunction_types: Optional[List[str]] = [],
-           epoch_search_precision: Optional[int] = 60,
-           response_format: Optional[Dict[str, bool]] = None,
-           poll_interval: Optional[float] = STANDARD_POLLING_SLEEP_TIME,
-           return_immediately: Optional[bool] = False,
-           verbose: Optional[bool] = False) -> Search:
-    """
-    Search for conjunctions between data sources
-
-    By default, this function will block and wait until the request completes and
-    all data is downloaded. If you don't want to wait, set the 'return_immediately`
-    value to True. The Search object will be returned right after the search has been
-    started, and you can use the helper functions as part of that object to get the
-    data when it's done.
-
-    Args:
-        start: start timestamp of the search (inclusive)
-        end: end timestamp of the search (inclusive)
-        distance: the maximum distance allowed between data sources when searching for
-            conjunctions. This can either be a number (int or float), or a dictionary
-            modified from the output of the "get_advanced_distances_combos()" function.
-        ground: list of ground instrument search parameters, defaults to []
-
-            Example:
-
-                [{
-                    "programs": ["themis-asi"],
-                    "platforms": ["gillam", "rabbit lake"],
-                    "instrument_types": ["RGB"],
-                    "ephemeris_metadata_filters": {
-                        "logical_operator": "AND",
-                        "expressions": [
-                            {
-                                "key": "calgary_apa_ml_v1",
-                                "operator": "in",
-                                "values": [ "classified as APA" ]
-                            }
-                        ]
-                    }
-                }]
-        space: list of one or more space instrument search parameters, defaults to []
-
-            Example:
-
-                [{
-                    "programs": ["themis-asi", "swarm"],
-                    "platforms": ["themisa", "swarma"],
-                    "instrument_types": ["footprint"],
-                    "ephemeris_metadata_filters": {
-                        "logical_operator": "AND",
-                        "expressions": [
-                            {
-                                "key": "nbtrace_region",
-                                "operator": "in",
-                                "values": [ "north auroral oval" ]
-                            }
-                        ]
-                    },
-                    "hemisphere": [
-                        "northern"
-                    ]
-                }]
-        events: list of one or more events search parameters, defaults to []
-
-            Example:
-
-                [{
-                    "programs": [ "events" ],
-                    "instrument_types": [ "substorm onsets" ]
-                }]
-        conjunction_types: list of conjunction types, defaults to [] (meaning all conjunction
-            types). Options are in the pyaurorax.conjunctions module, or at the top level using
-            the pyaurorax.CONJUNCTION_TYPE_* variables.
-        epoch_search_precision: the time precision to which conjunctions are calculated. Can be
-            30 or 60 seconds. Defaults to 60 seconds. Note - this parameter is under active
-            development and still considered "alpha".
-        response_format: JSON representation of desired data response format
-        poll_interval: seconds to wait between polling calls, defaults to
-            pyaurorax.requests.STANDARD_POLLING_SLEEP_TIME
-        return_immediately: initiate the search and return without waiting for data to
-            be received, defaults to False
-        verbose: show the progress of the request using the request log, defaults
-
-    Returns:
-        a pyaurorax.conjunctions.Search object
-    """
-    # create a Search object
-    s = Search(start,
-               end,
-               distance,
-               ground=ground,
-               space=space,
-               events=events,
-               conjunction_types=conjunction_types,
-               epoch_search_precision=epoch_search_precision,
-               response_format=response_format)
-    if (verbose is True):
-        print(f"[{datetime.datetime.now()}] Search object created")
-
-    # execute the search
-    s.execute()
-    if (verbose is True):
-        print("[%s] Request submitted" % (datetime.datetime.now()))
-        print("[%s] Request ID: %s" % (datetime.datetime.now(), s.request_id))
-        print("[%s] Request details available at: %s" % (datetime.datetime.now(),
-                                                         s.request_url))
-
-    # return immediately if we wanted to
-    if (return_immediately is True):
-        return s
-
-    # wait for data
-    if (verbose is True):
-        print("[%s] Waiting for data ..." % (datetime.datetime.now()))
-    s.wait(poll_interval=poll_interval, verbose=verbose)
-
-    # check if error condition encountered
-    if (s.status["search_result"]["error_condition"] is True):
-        # error encountered
-        raise AuroraXSearchException(s.logs[-1]["summary"])
-
-    # get the data
-    if (verbose is True):
-        print("[%s] Retrieving data ..." % (datetime.datetime.now()))
-    s.get_data()
-
-    # return response with the data
-    if (verbose is True):
-        print("[%s] Retrieved %s of data containing %d records" % (datetime.datetime.now(),
-                                                                   humanize.filesize.naturalsize(
-                                                                       s.status["search_result"]["file_size"]),
-                                                                   s.status["search_result"]["result_count"]))
-
-    # return
-    return s
-
-
-
-
-
-

Classes

-
-
-class Conjunction -(**data: Any) -
-
-

Conjunction object

-

Attributes

-
-
conjunction_type
-
the type of location data used when the -conjunction was found (either be 'nbtrace' or 'sbtrace')
-
start
-
start timestamp of the conjunction
-
end
-
end timestamp of the conjunction
-
data_sources
-
data sources in the conjunction
-
min_distance
-
minimum kilometer distance of the conjunction
-
max_distance
-
maximum kilometer distance of the conjunction
-
events
-
the sub-conjunctions that make up this over-arching -conjunction (the conjunctions between each set of two data -sources)
-
-

Create a new model by parsing and validating input data from keyword arguments.

-

Raises ValidationError if the input data cannot be parsed to form a valid model.

-
- -Expand source code - -
class Conjunction(BaseModel):
-    """
-    Conjunction object
-
-    Attributes:
-        conjunction_type: the type of location data used when the
-            conjunction was found (either be 'nbtrace' or 'sbtrace')
-        start: start timestamp of the conjunction
-        end: end timestamp of the conjunction
-        data_sources: data sources in the conjunction
-        min_distance: minimum kilometer distance of the conjunction
-        max_distance: maximum kilometer distance of the conjunction
-        events: the sub-conjunctions that make up this over-arching
-            conjunction (the conjunctions between each set of two data
-            sources)
-    """
-    conjunction_type: str
-    start: datetime.datetime
-    end: datetime.datetime
-    data_sources: List[DataSource]
-    min_distance: float
-    max_distance: float
-    events: List[Dict]
-
-    def __str__(self) -> str:
-        """
-        String method
-
-        Returns:
-            string format of Conjunction object
-        """
-        return self.__repr__()
-
-    def __repr__(self) -> str:
-        """
-        Object representation
-
-        Returns:
-            object representation of Conjunction object
-        """
-        return f"Conjunction(start={repr(self.start)}, end={repr(self.end)}, " \
-            f"min_distance={self.min_distance:.2f}, max_distance={self.max_distance:.2f}, " \
-            "data_sources=[...], events=[...])"
-
-

Ancestors

-
    -
  • pydantic.main.BaseModel
  • -
  • pydantic.utils.Representation
  • -
-

Class variables

-
-
var conjunction_type : str
-
-
-
-
var data_sources : List[DataSource]
-
-
-
-
var end : datetime.datetime
-
-
-
-
var events : List[Dict]
-
-
-
-
var max_distance : float
-
-
-
-
var min_distance : float
-
-
-
-
var start : datetime.datetime
-
-
-
-
-
-
-class Search -(start: datetime.datetime, end: datetime.datetime, distance: Union[int, float, Dict[str, Union[int, float]]], ground: Optional[List[Dict[str, str]]] = [], space: Optional[List[Dict[str, str]]] = [], events: Optional[List[Dict[str, str]]] = [], conjunction_types: Optional[List[str]] = ['nbtrace'], epoch_search_precision: Optional[int] = 60, response_format: Optional[Dict[str, bool]] = None) -
-
-

Class representing a conjunction search

-

Attributes

-
-
start
-
start timestamp of the search (inclusive)
-
end
-
end timestamp of the search (inclusive)
-
distance
-
the maximum distance allowed between data sources when searching for -conjunctions. This can either be a number (int or float), or a dictionary -modified from the output of the "get_advanced_distances_combos()" function.
-
ground
-
-

list of ground instrument search parameters, defaults to []

-

Example:

-
[{
-    "programs": ["themis-asi"],
-    "platforms": ["gillam", "rabbit lake"],
-    "instrument_types": ["RGB"],
-    "ephemeris_metadata_filters": {
-        "logical_operator": "AND",
-        "expressions": [
-            {
-                "key": "calgary_apa_ml_v1",
-                "operator": "in",
-                "values": [ "classified as APA" ]
-            }
-        ]
-    }
-}]
-
-
-
space
-
-

list of one or more space instrument search parameters, defaults to []

-

Example:

-
[{
-    "programs": ["themis-asi", "swarm"],
-    "platforms": ["themisa", "swarma"],
-    "instrument_types": ["footprint"],
-    "ephemeris_metadata_filters": {
-        "logical_operator": "AND",
-        "expressions": [
-            {
-                "key": "nbtrace_region",
-                "operator": "in",
-                "values": [ "north auroral oval" ]
-            }
-        ]
-    },
-    "hemisphere": [
-        "northern"
-    ]
-}]
-
-
-
events
-
-

list of one or more events search parameters, defaults to []

-

Example:

-
[{
-    "programs": [ "events" ],
-    "instrument_types": [ "substorm onsets" ]
-}]
-
-
-
conjunction_types
-
list of conjunction types, defaults to ["nbtrace"]. Options are -in the pyaurorax.conjunctions module, or at the top level using the -pyaurorax.CONJUNCTION_TYPE_* variables.
-
epoch_search_precision
-
the time precision to which conjunctions are calculated. Can be -30 or 60 seconds. Defaults to 60 seconds. Note - this parameter is under active -development and still considered "alpha".
-
response_format
-
JSON representation of desired data response format
-
request
-
AuroraXResponse object returned when the search is executed
-
request_id
-
unique ID assigned to the request by the AuroraX API
-
request_url
-
unique URL assigned to the request by the AuroraX API
-
executed
-
indicates if the search has been executed/started
-
completed
-
indicates if the search has finished
-
data_url
-
the URL where data is accessed
-
query
-
the query for this request as JSON
-
status
-
the status of the query
-
data
-
the conjunctions found
-
logs
-
all log messages outputed by the AuroraX API for this request
-
-

Returns: -a pyaurorax.conjunctions.Search object

-
- -Expand source code - -
class Search():
-    """
-    Class representing a conjunction search
-
-    Attributes:
-        start: start timestamp of the search (inclusive)
-        end: end timestamp of the search (inclusive)
-        distance: the maximum distance allowed between data sources when searching for
-            conjunctions. This can either be a number (int or float), or a dictionary
-            modified from the output of the "get_advanced_distances_combos()" function.
-        ground: list of ground instrument search parameters, defaults to []
-
-            Example:
-
-                [{
-                    "programs": ["themis-asi"],
-                    "platforms": ["gillam", "rabbit lake"],
-                    "instrument_types": ["RGB"],
-                    "ephemeris_metadata_filters": {
-                        "logical_operator": "AND",
-                        "expressions": [
-                            {
-                                "key": "calgary_apa_ml_v1",
-                                "operator": "in",
-                                "values": [ "classified as APA" ]
-                            }
-                        ]
-                    }
-                }]
-        space: list of one or more space instrument search parameters, defaults to []
-
-            Example:
-
-                [{
-                    "programs": ["themis-asi", "swarm"],
-                    "platforms": ["themisa", "swarma"],
-                    "instrument_types": ["footprint"],
-                    "ephemeris_metadata_filters": {
-                        "logical_operator": "AND",
-                        "expressions": [
-                            {
-                                "key": "nbtrace_region",
-                                "operator": "in",
-                                "values": [ "north auroral oval" ]
-                            }
-                        ]
-                    },
-                    "hemisphere": [
-                        "northern"
-                    ]
-                }]
-        events: list of one or more events search parameters, defaults to []
-
-            Example:
-
-                [{
-                    "programs": [ "events" ],
-                    "instrument_types": [ "substorm onsets" ]
-                }]
-        conjunction_types: list of conjunction types, defaults to ["nbtrace"]. Options are
-            in the pyaurorax.conjunctions module, or at the top level using the
-            pyaurorax.CONJUNCTION_TYPE_* variables.
-        epoch_search_precision: the time precision to which conjunctions are calculated. Can be
-            30 or 60 seconds. Defaults to 60 seconds. Note - this parameter is under active
-            development and still considered "alpha".
-        response_format: JSON representation of desired data response format
-        request: AuroraXResponse object returned when the search is executed
-        request_id: unique ID assigned to the request by the AuroraX API
-        request_url: unique URL assigned to the request by the AuroraX API
-        executed: indicates if the search has been executed/started
-        completed: indicates if the search has finished
-        data_url: the URL where data is accessed
-        query: the query for this request as JSON
-        status: the status of the query
-        data: the conjunctions found
-        logs: all log messages outputed by the AuroraX API for this request
-
-        Returns:
-            a pyaurorax.conjunctions.Search object
-    """
-
-    def __init__(self, start: datetime.datetime,
-                 end: datetime.datetime,
-                 distance: Union[int, float, Dict[str, Union[int, float]]],
-                 ground: Optional[List[Dict[str, str]]] = [],
-                 space: Optional[List[Dict[str, str]]] = [],
-                 events: Optional[List[Dict[str, str]]] = [],
-                 conjunction_types: Optional[List[str]] = [CONJUNCTION_TYPE_NBTRACE],
-                 epoch_search_precision: Optional[int] = 60,
-                 response_format: Optional[Dict[str, bool]] = None):
-
-        # set variables using passed in args
-        self.start = start
-        self.end = end
-        self.ground = ground
-        self.space = space
-        self.events = events
-        self.distance = distance
-        self.conjunction_types = conjunction_types
-        self.epoch_search_precision = epoch_search_precision
-        self.response_format = response_format
-
-        # initialize additional variables
-        self.request: AuroraXResponse = None
-        self.request_id: str = ""
-        self.request_url: str = ""
-        self.executed: bool = False
-        self.completed: bool = False
-        self.data_url: str = ""
-        self.query: Dict = {}
-        self.status: Dict = {}
-        self.data: List[Union[Conjunction, Dict]] = []
-        self.logs: List[Dict] = []
-
-    def __str__(self):
-        """
-        String method
-
-        Returns:
-            string format of Conjunction Search object
-        """
-        return self.__repr__()
-
-    def __repr__(self):
-        """
-        Object representation
-
-        Returns:
-            object representation of Conjunction Search object
-        """
-        return f"ConjunctionSearch(executed={self.executed}, " \
-            f"completed={self.completed}, request_id='{self.request_id}')"
-
-    def check_criteria_block_count_validity(self) -> None:
-        """
-        Check the number of of criteria blocks to see if there
-        is too many. A max of 10 is allowed by the AuroraX
-        conjunction search engine. An exception is raised if
-        it was determined to have too many.
-
-        Raises:
-            pyaurorax.exceptions.AuroraXBadParametersException: too many criteria blocks are found
-        """
-        if ((len(self.ground) + len(self.space) + len(self.events)) > 10):
-            raise AuroraXBadParametersException("Number of criteria blocks exceeds 10, "
-                                                "please reduce the count")
-
-    def get_advanced_distances_combos(self, default_distance: Union[int, float] = None) -> Dict:
-        """
-        Get the advanced distances combinations for this search
-
-        Args:
-            default_distance: the default distance to use, defaults to None
-
-        Returns:
-            the advanced distances combinations
-        """
-        # set input arrays
-        options = []
-        for i in range(0, len(self.ground)):
-            options.append("ground%d" % (i + 1))
-        for i in range(0, len(self.space)):
-            options.append("space%d" % (i + 1))
-        for i in range(0, len(self.events)):
-            options.append("events%d" % (i + 1))
-
-        # derive all combinations of options of size 2
-        combinations = {}
-        for element in itertools.combinations(options, r=2):
-            combinations["%s-%s" % (element[0], element[1])] = default_distance
-
-        # return
-        return combinations
-
-    def __fill_in_missing_distances(self, curr_distances: Dict) -> Dict:
-        # get all distances possible
-        all_distances = self.get_advanced_distances_combos()
-
-        # go through current distances and fill in the values
-        for curr_key, curr_value in curr_distances.items():
-            curr_key_split = curr_key.split('-')
-            curr_key1 = curr_key_split[0].strip()
-            curr_key2 = curr_key_split[1].strip()
-            for all_key in all_distances.keys():
-                if (curr_key1 in all_key and curr_key2 in all_key):
-                    # found the matching key, replace the value
-                    all_distances[all_key] = curr_value
-
-        # return
-        return all_distances
-
-    @property
-    def distance(self) -> Union[int, float, Dict[str, Union[int, float]]]:
-        """
-        Property for the distance parameter
-
-        Returns:
-            the distance dictionary with all combinations
-        """
-        return self._distance
-
-    @distance.setter
-    def distance(self, distance: Union[int, float, Dict[str, Union[int, float]]]) -> None:
-        # set distances to a dict if it's an int or float
-        if (type(distance) is int or type(distance) is float):
-            self._distance = self.get_advanced_distances_combos(default_distance=distance)  # type: ignore
-        else:
-            # is a dict, fill in any gaps
-            self._distance = self.__fill_in_missing_distances(distance)  # type: ignore
-
-    @property
-    def query(self) -> Dict:
-        """
-        Property for the query value
-
-        Returns:
-            the query parameter
-        """
-        self._query = {
-            "start": self.start.strftime("%Y-%m-%dT%H:%M:%S"),
-            "end": self.end.strftime("%Y-%m-%dT%H:%M:%S"),
-            "ground": self.ground,
-            "space": self.space,
-            "events": self.events,
-            "conjunction_types": self.conjunction_types,
-            "max_distances": self.distance,
-            "epoch_search_precision": self.epoch_search_precision if self.epoch_search_precision in [30, 60] else 60,
-        }
-        return self._query
-
-    @query.setter
-    def query(self, query: Dict) -> None:
-        self._query = query
-
-    def execute(self) -> None:
-        """
-        Initiate a conjunction search request
-
-        Raises:
-            pyaurorax.exceptions.AuroraXBadParametersException: too many criteria blocks
-        """
-        # check number of criteria blocks
-        self.check_criteria_block_count_validity()
-
-        # do request
-        url = urls.conjunction_search_url
-        req = AuroraXRequest(method="post",
-                             url=url,
-                             body=self.query,
-                             null_response=True)
-        res = req.execute()
-
-        # set request ID, request_url, executed
-        self.executed = True
-        if res.status_code == 202:
-            # request successfully dispatched
-            self.executed = True
-            self.request_url = res.request.headers["location"]
-            self.request_id = self.request_url.rsplit("/", 1)[-1]
-
-        # set request variable
-        self.request = res
-
-    def update_status(self, status: Optional[Dict] = None) -> None:
-        """
-        Update the status of this conjunction search request
-
-        Args:
-            status: the previously-retrieved status of this request (include
-                to avoid requesting it from the API again), defaults to None
-        """
-        # get the status if it isn't passed in
-        if (status is None):
-            status = requests_get_status(self.request_url)
-
-        # update request status by checking if data URI is set
-        if (status["search_result"]["data_uri"] is not None):
-            self.completed = True
-            self.data_url = f'{urls.base_url}{status["search_result"]["data_uri"]}'
-
-        # set class variable "status" and "logs"
-        self.status = status
-        self.logs = status["logs"]
-
-    def check_for_data(self) -> bool:
-        """
-        Check to see if data is available for this conjunction
-        search request
-
-        Returns:
-            True if data is available, else False
-        """
-        self.update_status()
-        return self.completed
-
-    def get_data(self) -> None:
-        """
-        Retrieve the data available for this conjunction search request
-        """
-        # check if request is completed
-        if (self.completed is False):
-            print("No data available, update status or check for data first")
-            return
-
-        # get data
-        raw_data = requests_get_data(self.data_url, response_format=self.response_format)
-
-        # set data variable
-        if (self.response_format is not None):
-            self.data = raw_data
-        else:
-            # cast data source objects
-            for i in range(0, len(raw_data)):
-                for j in range(0, len(raw_data[i]["data_sources"])):
-                    ds = DataSource(**raw_data[i]["data_sources"][j], format=FORMAT_BASIC_INFO)
-                    raw_data[i]["data_sources"][j] = ds
-
-            # cast conjunctions
-            self.data = [Conjunction(**c) for c in raw_data]
-
-    def wait(self,
-             poll_interval: Optional[float] = STANDARD_POLLING_SLEEP_TIME,
-             verbose: Optional[bool] = False) -> None:
-        """
-        Block and wait until the request is complete and data is
-        available for retrieval
-
-        Args:
-            poll_interval: time in seconds to wait between polling attempts, defaults
-                to pyaurorax.requests.STANDARD_POLLING_SLEEP_TIME
-            verbose: output poll times and other progress messages, defaults to False
-        """
-        url = urls.conjunction_request_url.format(self.request_id)
-        self.update_status(requests_wait_for_data(url,
-                                                  poll_interval=poll_interval,
-                                                  verbose=verbose))
-
-    def cancel(self,
-               wait: Optional[bool] = False,
-               poll_interval: Optional[float] = STANDARD_POLLING_SLEEP_TIME,
-               verbose: Optional[bool] = False) -> int:
-        """
-        Cancel the conjunction search request
-
-        This method returns immediately by default since the API processes
-        this request asynchronously. If you would prefer to wait for it
-        to be completed, set the 'wait' parameter to True. You can adjust
-        the polling time using the 'poll_interval' parameter.
-
-        Args:
-            wait: wait until the cancellation request has been
-                completed (may wait for several minutes)
-            poll_interval: seconds to wait between polling
-                calls, defaults to STANDARD_POLLING_SLEEP_TIME.
-            verbose: output poll times and other progress messages, defaults
-                to False
-
-        Returns:
-            1 on success
-
-        Raises:
-            pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected error
-            pyaurorax.exceptions.AuroraXUnauthorizedException: invalid API key for this operation
-        """
-        url = urls.conjunction_request_url.format(self.request_id)
-        return requests_cancel(url, wait=wait, poll_interval=poll_interval, verbose=verbose)
-
-

Instance variables

-
-
var distance : Union[int, float, Dict[str, Union[int, float]]]
-
-

Property for the distance parameter

-

Returns

-

the distance dictionary with all combinations

-
- -Expand source code - -
@property
-def distance(self) -> Union[int, float, Dict[str, Union[int, float]]]:
-    """
-    Property for the distance parameter
-
-    Returns:
-        the distance dictionary with all combinations
-    """
-    return self._distance
-
-
-
var query : Dict
-
-

Property for the query value

-

Returns

-

the query parameter

-
- -Expand source code - -
@property
-def query(self) -> Dict:
-    """
-    Property for the query value
-
-    Returns:
-        the query parameter
-    """
-    self._query = {
-        "start": self.start.strftime("%Y-%m-%dT%H:%M:%S"),
-        "end": self.end.strftime("%Y-%m-%dT%H:%M:%S"),
-        "ground": self.ground,
-        "space": self.space,
-        "events": self.events,
-        "conjunction_types": self.conjunction_types,
-        "max_distances": self.distance,
-        "epoch_search_precision": self.epoch_search_precision if self.epoch_search_precision in [30, 60] else 60,
-    }
-    return self._query
-
-
-
-

Methods

-
-
-def cancel(self, wait: Optional[bool] = False, poll_interval: Optional[float] = 1.0, verbose: Optional[bool] = False) ‑> int -
-
-

Cancel the conjunction search request

-

This method returns immediately by default since the API processes -this request asynchronously. If you would prefer to wait for it -to be completed, set the 'wait' parameter to True. You can adjust -the polling time using the 'poll_interval' parameter.

-

Args

-
-
wait
-
wait until the cancellation request has been -completed (may wait for several minutes)
-
poll_interval
-
seconds to wait between polling -calls, defaults to STANDARD_POLLING_SLEEP_TIME.
-
verbose
-
output poll times and other progress messages, defaults -to False
-
-

Returns

-

1 on success

-

Raises

-
-
AuroraXUnexpectedContentTypeException
-
unexpected error
-
AuroraXUnauthorizedException
-
invalid API key for this operation
-
-
- -Expand source code - -
def cancel(self,
-           wait: Optional[bool] = False,
-           poll_interval: Optional[float] = STANDARD_POLLING_SLEEP_TIME,
-           verbose: Optional[bool] = False) -> int:
-    """
-    Cancel the conjunction search request
-
-    This method returns immediately by default since the API processes
-    this request asynchronously. If you would prefer to wait for it
-    to be completed, set the 'wait' parameter to True. You can adjust
-    the polling time using the 'poll_interval' parameter.
-
-    Args:
-        wait: wait until the cancellation request has been
-            completed (may wait for several minutes)
-        poll_interval: seconds to wait between polling
-            calls, defaults to STANDARD_POLLING_SLEEP_TIME.
-        verbose: output poll times and other progress messages, defaults
-            to False
-
-    Returns:
-        1 on success
-
-    Raises:
-        pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected error
-        pyaurorax.exceptions.AuroraXUnauthorizedException: invalid API key for this operation
-    """
-    url = urls.conjunction_request_url.format(self.request_id)
-    return requests_cancel(url, wait=wait, poll_interval=poll_interval, verbose=verbose)
-
-
-
-def check_criteria_block_count_validity(self) ‑> None -
-
-

Check the number of of criteria blocks to see if there -is too many. A max of 10 is allowed by the AuroraX -conjunction search engine. An exception is raised if -it was determined to have too many.

-

Raises

-
-
AuroraXBadParametersException
-
too many criteria blocks are found
-
-
- -Expand source code - -
def check_criteria_block_count_validity(self) -> None:
-    """
-    Check the number of of criteria blocks to see if there
-    is too many. A max of 10 is allowed by the AuroraX
-    conjunction search engine. An exception is raised if
-    it was determined to have too many.
-
-    Raises:
-        pyaurorax.exceptions.AuroraXBadParametersException: too many criteria blocks are found
-    """
-    if ((len(self.ground) + len(self.space) + len(self.events)) > 10):
-        raise AuroraXBadParametersException("Number of criteria blocks exceeds 10, "
-                                            "please reduce the count")
-
-
-
-def check_for_data(self) ‑> bool -
-
-

Check to see if data is available for this conjunction -search request

-

Returns

-

True if data is available, else False

-
- -Expand source code - -
def check_for_data(self) -> bool:
-    """
-    Check to see if data is available for this conjunction
-    search request
-
-    Returns:
-        True if data is available, else False
-    """
-    self.update_status()
-    return self.completed
-
-
-
-def execute(self) ‑> None -
-
-

Initiate a conjunction search request

-

Raises

-
-
AuroraXBadParametersException
-
too many criteria blocks
-
-
- -Expand source code - -
def execute(self) -> None:
-    """
-    Initiate a conjunction search request
-
-    Raises:
-        pyaurorax.exceptions.AuroraXBadParametersException: too many criteria blocks
-    """
-    # check number of criteria blocks
-    self.check_criteria_block_count_validity()
-
-    # do request
-    url = urls.conjunction_search_url
-    req = AuroraXRequest(method="post",
-                         url=url,
-                         body=self.query,
-                         null_response=True)
-    res = req.execute()
-
-    # set request ID, request_url, executed
-    self.executed = True
-    if res.status_code == 202:
-        # request successfully dispatched
-        self.executed = True
-        self.request_url = res.request.headers["location"]
-        self.request_id = self.request_url.rsplit("/", 1)[-1]
-
-    # set request variable
-    self.request = res
-
-
-
-def get_advanced_distances_combos(self, default_distance: Union[int, float] = None) ‑> Dict -
-
-

Get the advanced distances combinations for this search

-

Args

-
-
default_distance
-
the default distance to use, defaults to None
-
-

Returns

-

the advanced distances combinations

-
- -Expand source code - -
def get_advanced_distances_combos(self, default_distance: Union[int, float] = None) -> Dict:
-    """
-    Get the advanced distances combinations for this search
-
-    Args:
-        default_distance: the default distance to use, defaults to None
-
-    Returns:
-        the advanced distances combinations
-    """
-    # set input arrays
-    options = []
-    for i in range(0, len(self.ground)):
-        options.append("ground%d" % (i + 1))
-    for i in range(0, len(self.space)):
-        options.append("space%d" % (i + 1))
-    for i in range(0, len(self.events)):
-        options.append("events%d" % (i + 1))
-
-    # derive all combinations of options of size 2
-    combinations = {}
-    for element in itertools.combinations(options, r=2):
-        combinations["%s-%s" % (element[0], element[1])] = default_distance
-
-    # return
-    return combinations
-
-
-
-def get_data(self) ‑> None -
-
-

Retrieve the data available for this conjunction search request

-
- -Expand source code - -
def get_data(self) -> None:
-    """
-    Retrieve the data available for this conjunction search request
-    """
-    # check if request is completed
-    if (self.completed is False):
-        print("No data available, update status or check for data first")
-        return
-
-    # get data
-    raw_data = requests_get_data(self.data_url, response_format=self.response_format)
-
-    # set data variable
-    if (self.response_format is not None):
-        self.data = raw_data
-    else:
-        # cast data source objects
-        for i in range(0, len(raw_data)):
-            for j in range(0, len(raw_data[i]["data_sources"])):
-                ds = DataSource(**raw_data[i]["data_sources"][j], format=FORMAT_BASIC_INFO)
-                raw_data[i]["data_sources"][j] = ds
-
-        # cast conjunctions
-        self.data = [Conjunction(**c) for c in raw_data]
-
-
-
-def update_status(self, status: Optional[Dict] = None) ‑> None -
-
-

Update the status of this conjunction search request

-

Args

-
-
status
-
the previously-retrieved status of this request (include -to avoid requesting it from the API again), defaults to None
-
-
- -Expand source code - -
def update_status(self, status: Optional[Dict] = None) -> None:
-    """
-    Update the status of this conjunction search request
-
-    Args:
-        status: the previously-retrieved status of this request (include
-            to avoid requesting it from the API again), defaults to None
-    """
-    # get the status if it isn't passed in
-    if (status is None):
-        status = requests_get_status(self.request_url)
-
-    # update request status by checking if data URI is set
-    if (status["search_result"]["data_uri"] is not None):
-        self.completed = True
-        self.data_url = f'{urls.base_url}{status["search_result"]["data_uri"]}'
-
-    # set class variable "status" and "logs"
-    self.status = status
-    self.logs = status["logs"]
-
-
-
-def wait(self, poll_interval: Optional[float] = 1.0, verbose: Optional[bool] = False) ‑> None -
-
-

Block and wait until the request is complete and data is -available for retrieval

-

Args

-
-
poll_interval
-
time in seconds to wait between polling attempts, defaults -to pyaurorax.requests.STANDARD_POLLING_SLEEP_TIME
-
verbose
-
output poll times and other progress messages, defaults to False
-
-
- -Expand source code - -
def wait(self,
-         poll_interval: Optional[float] = STANDARD_POLLING_SLEEP_TIME,
-         verbose: Optional[bool] = False) -> None:
-    """
-    Block and wait until the request is complete and data is
-    available for retrieval
-
-    Args:
-        poll_interval: time in seconds to wait between polling attempts, defaults
-            to pyaurorax.requests.STANDARD_POLLING_SLEEP_TIME
-        verbose: output poll times and other progress messages, defaults to False
-    """
-    url = urls.conjunction_request_url.format(self.request_id)
-    self.update_status(requests_wait_for_data(url,
-                                              poll_interval=poll_interval,
-                                              verbose=verbose))
-
-
-
-
-
-
-
- -
- - - \ No newline at end of file diff --git a/docs/code/pyaurorax_api_reference/pyaurorax/conjunctions/swarmaurora/index.html b/docs/code/pyaurorax_api_reference/pyaurorax/conjunctions/swarmaurora/index.html deleted file mode 100644 index f52df11..0000000 --- a/docs/code/pyaurorax_api_reference/pyaurorax/conjunctions/swarmaurora/index.html +++ /dev/null @@ -1,288 +0,0 @@ - - - - - - -pyaurorax.conjunctions.swarmaurora API documentation - - - - - - - - - - - -
-
-
-

Module pyaurorax.conjunctions.swarmaurora

-
-
-

Interact with Swarm-Aurora using conjunction searches from AuroraX

-
- -Expand source code - -
"""
-Interact with Swarm-Aurora using conjunction searches from AuroraX
-"""
-
-# function and class imports
-from .tools import (get_url,
-                    open_in_browser,
-                    create_custom_import_file)
-
-# pdoc imports and exports
-from .tools import __pdoc__ as __tools_pdoc__
-__pdoc__ = __tools_pdoc__
-__all__ = [
-    "get_url",
-    "open_in_browser",
-    "create_custom_import_file",
-]
-
-
-
-

Sub-modules

-
-
pyaurorax.conjunctions.swarmaurora.tools
-
-

Functions for using conjunction searches with Swarm-Aurora

-
-
-
-
-
-
-

Functions

-
-
-def create_custom_import_file(search_obj: Search, filename: str = None, returnDict: bool = False) ‑> Union[str, Dict] -
-
-

Generate a Swarm-Aurora custom import file for a given -conjunction search

-

Args

-
-
search_obj
-
a conjunction search object, must be a completed -search with the 'request_id' value populated
-
filename
-
the output filename, default is 'swarmaurora_custom_import_file_{requestID}.json'
-
returnDict
-
return the custom import file contents as a dictionary -instead of saving a file, default is False
-
-

Returns

-

the filename of the saved custom import file, or a dictionary with the -file contents if returnDict is set to True

-
- -Expand source code - -
def create_custom_import_file(search_obj: Search,
-                              filename: str = None,
-                              returnDict: bool = False) -> Union[str, Dict]:
-    """
-    Generate a Swarm-Aurora custom import file for a given
-    conjunction search
-
-    Args:
-        search_obj: a conjunction search object, must be a completed
-                    search with the 'request_id' value populated
-        filename: the output filename, default is 'swarmaurora_custom_import_file_{requestID}.json'
-        returnDict: return the custom import file contents as a dictionary
-                    instead of saving a file, default is False
-
-    Returns:
-        the filename of the saved custom import file, or a dictionary with the
-        file contents if `returnDict` is set to True
-    """
-    # make request
-    url = "https://swarm-aurora.com/conjunctionFinder/generate_custom_import_json?aurorax_request_id=%s" % (
-        search_obj.request_id)
-    req = AuroraXRequest(method="get",
-                         url=url,
-                         body=search_obj.query)
-    res = req.execute()
-
-    # return the contents as a dict if requested
-    if (returnDict is True):
-        return res.data
-
-    # set default filename
-    if (filename is None):
-        filename = "swarmaurora_custom_import_%s.json" % (search_obj.request_id)
-
-    # save data to file
-    with open(filename, 'w', encoding='utf-8') as fp:
-        json.dump(res.data, fp, indent=4)
-
-    # return
-    return filename
-
-
-
-def get_url(search_obj: Search) ‑> str -
-
-

Get a URL that displays a conjunction search in the Swarm-Aurora -Conjunction Finder

-

Args

-
-
search_obj
-
a conjunction search object, must be a completed -search with the 'request_id' value populated
-
-

Returns

-

the Swarm-Aurora Conjunction Finder URL for this conjunction search

-
- -Expand source code - -
def get_url(search_obj: Search) -> str:
-    """
-    Get a URL that displays a conjunction search in the Swarm-Aurora
-    Conjunction Finder
-
-    Args:
-        search_obj: a conjunction search object, must be a completed
-                    search with the 'request_id' value populated
-
-    Returns:
-        the Swarm-Aurora Conjunction Finder URL for this conjunction search
-    """
-    return "https://swarm-aurora.com/conjunctionFinder/?aurorax_request_id=%s" % (search_obj.request_id)
-
-
-
-def open_in_browser(search_obj: Search, browser: str = None) ‑> None -
-
-

In a browser, open a conjunction search in the Swarm-Aurora -Conjunction Finder.

-

Args

-
-
search_obj
-
a conjunction search object, must be a completed -search with the 'request_id' value populated
-
browser
-
the browser type to load using. Default is your -default browser. Some common other options are -"google-chrome", "firefox", or "safari". For all available -options, refer to https://docs.python.org/3/library/webbrowser.html#webbrowser.get
-
-
- -Expand source code - -
def open_in_browser(search_obj: Search, browser: str = None) -> None:
-    """
-    In a browser, open a conjunction search in the Swarm-Aurora
-    Conjunction Finder.
-
-    Args:
-        search_obj: a conjunction search object, must be a completed
-                    search with the 'request_id' value populated
-        browser: the browser type to load using. Default is your
-                 default browser. Some common other options are
-                 "google-chrome", "firefox", or "safari". For all available
-                 options, refer to https://docs.python.org/3/library/webbrowser.html#webbrowser.get
-    """
-    url = get_url(search_obj)
-    try:
-        w = webbrowser.get(using=browser)
-        w.open_new_tab(url)
-    except Exception as e:
-        if ("could not locate runnable browser" in str(e)):
-            raise AuroraXException(("Error: selected browser '%s' not found, please try "
-                                   "another. For the list of options, refer to "
-                                    "https://docs.python.org/3/library/webbrowser.html#webbrowser.get") % (browser))
-
-
-
-
-
-
-
- -
- - - \ No newline at end of file diff --git a/docs/code/pyaurorax_api_reference/pyaurorax/data/index.html b/docs/code/pyaurorax_api_reference/pyaurorax/data/index.html new file mode 100644 index 0000000..52419de --- /dev/null +++ b/docs/code/pyaurorax_api_reference/pyaurorax/data/index.html @@ -0,0 +1,1027 @@ + + + + + + +pyaurorax.data API documentation + + + + + + + + + + + +
+
+
+

Module pyaurorax.data

+
+
+

Instrument data downloading and reading module. This module presently has support +for data provided by the University of Calgary, such as THEMIS ASI, REGO, and the +Transition Region Explorer (TREx) instruments.

+
+ +Expand source code + +
# Copyright 2024 University of Calgary
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Instrument data downloading and reading module. This module presently has support 
+for data provided by the University of Calgary, such as THEMIS ASI, REGO, and the 
+Transition Region Explorer (TREx) instruments.
+"""
+
+from typing import List, Literal, Optional
+from pyucalgarysrs.data import Dataset, Observatory
+from texttable import Texttable
+from .ucalgary import UCalgaryManager
+
+__all__ = ["DataManager"]
+
+
+class DataManager:
+    """
+    The DataManager object is initialized within every PyAuroraX object. It acts as a way to access 
+    the submodules and carry over configuration information in the super class.
+    """
+
+    def __init__(self, aurorax_obj):
+        self.__aurorax_obj = aurorax_obj
+
+        # initialize sub-modules
+        self.__ucalgary = UCalgaryManager(self.__aurorax_obj)
+
+    # ------------------------------------------
+    # properties for submodule managers
+    # ------------------------------------------
+    @property
+    def ucalgary(self):
+        """
+        Access to the `ucalgary` submodule from within a PyAuroraX object.
+        """
+        return self.__ucalgary
+
+    def list_datasets(self, name: Optional[str] = None, timeout: Optional[int] = None) -> List[Dataset]:
+        """
+        List available datasets from all providers
+
+        Args:
+            name (str): 
+                Supply a name used for filtering. If that name is found in the available dataset 
+                names received from the API, it will be included in the results. This parameter is
+                optional.
+            
+            timeout (int): 
+                Represents how many seconds to wait for the API to send data before giving up. The 
+                default is 10 seconds, or the `api_timeout` value in the super class' `pyaurorax.PyAuroraX`
+                object. This parameter is optional.
+            
+        Returns:
+            A list of [`Dataset`](https://docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html#pyucalgarysrs.data.classes.Dataset)
+            objects.
+        
+        Raises:
+            pyaurorax.exceptions.AuroraXAPIError: An API error was encountered.
+        """
+        # init
+        datasets = []
+
+        # get ucalgary datasets
+        ucalgary_datasets = self.__ucalgary.list_datasets(name=name, timeout=timeout)
+
+        # merge
+        datasets = datasets + ucalgary_datasets
+
+        # sort by name
+        datasets = sorted(datasets, key=lambda x: x.name)
+
+        # return
+        return datasets
+
+    def list_datasets_in_table(self, name: Optional[str] = None, max_width: int = 200, timeout: Optional[int] = None) -> None:
+        """
+        Print available datasets from all providers in a table
+
+        Args:
+            name (str): 
+                Supply a name used for filtering. If that name is found in the available dataset 
+                names received from the API, it will be included in the results. This parameter is
+                optional.
+            
+            max_width (int): 
+                Maximum width of the table. Default is `200`. This parameter is optional.
+
+            timeout (int): 
+                Represents how many seconds to wait for the API to send data before giving up. The 
+                default is 10 seconds, or the `api_timeout` value in the super class' `pyaurorax.PyAuroraX`
+                object. This parameter is optional.
+            
+        Returns:
+            Printed table.
+
+        Raises:
+            pyaurorax.exceptions.AuroraXAPIError: An API error was encountered.
+        """
+        # get datasets
+        datasets = self.list_datasets(name=name, timeout=timeout)
+
+        # set table lists
+        table_names = []
+        table_providers = []
+        table_levels = []
+        table_doi_details = []
+        table_short_descriptions = []
+        for d in datasets:
+            table_names.append(d.name)
+            table_providers.append(d.provider)
+            table_levels.append(d.level)
+            table_doi_details.append(d.doi_details)
+            table_short_descriptions.append(d.short_description)
+
+        # set header values
+        table_headers = [
+            "Name",
+            "Provider",
+            "Level",
+            "DOI Details",
+            "Short Description",
+        ]
+
+        # print as table
+        table = Texttable(max_width=max_width)
+        table.set_deco(Texttable.HEADER)
+        table.set_cols_dtype(["t"] * len(table_headers))
+        table.set_header_align(["l"] * len(table_headers))
+        table.set_cols_align(["l"] * len(table_headers))
+        table.header(table_headers)
+        for i in range(0, len(table_names)):
+            table.add_row([
+                table_names[i],
+                table_providers[i],
+                table_levels[i],
+                table_doi_details[i],
+                table_short_descriptions[i],
+            ])
+        print(table.draw())
+
+    def list_observatories(self,
+                           instrument_array: Literal["themis_asi", "rego", "trex_rgb", "trex_nir", "trex_blue"],
+                           uid: Optional[str] = None,
+                           timeout: Optional[int] = None) -> List[Observatory]:
+        """
+        List information about observatories utilized by all providers.
+
+        Args:
+            instrument_array (str): 
+                The instrument array to list observatories for. Valid values are: themis_asi, rego, 
+                trex_rgb, trex_nir, and trex_blue.
+
+            uid (str): 
+                Supply a observatory unique identifier used for filtering (usually 4-letter site code). If that UID 
+                is found in the available observatories received from the API, it will be included in the results. This 
+                parameter is optional.
+            
+            timeout (int): 
+                Represents how many seconds to wait for the API to send data before giving up. The 
+                default is 10 seconds, or the `api_timeout` value in the super class' `pyaurorax.PyAuroraX`
+                object. This parameter is optional.
+            
+        Returns:
+            A list of [`Observatory`](https://docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html#pyucalgarysrs.data.classes.Observatory)
+            objects.
+        
+        Raises:
+            pyaurorax.exceptions.AuroraXAPIError: An API error was encountered.
+        """
+        # init
+        observatories = []
+
+        # get ucalgary datasets
+        ucalgary_observatories = self.__ucalgary.list_observatories(instrument_array, uid=uid, timeout=timeout)
+
+        # merge
+        observatories = observatories + ucalgary_observatories
+
+        # sort by name
+        observatories = sorted(observatories, key=lambda x: x.uid)
+
+        # return
+        return observatories
+
+    def list_observatories_in_table(self,
+                                    instrument_array: Literal["themis_asi", "rego", "trex_rgb", "trex_nir", "trex_blue"],
+                                    uid: Optional[str] = None,
+                                    max_width: int = 200,
+                                    timeout: Optional[int] = None) -> None:
+        """
+        Print available observatories for a given instrument array in a table
+
+        Args:
+            instrument_array (str): 
+                The instrument array to list observatories for. Valid values are: themis_asi, rego, 
+                trex_rgb, trex_nir, and trex_blue.
+
+            uid (str): 
+                Supply a observatory unique identifier used for filtering (usually 4-letter site code). If that UID 
+                is found in the available observatories received from the API, it will be included in the results. This 
+                parameter is optional.
+            
+            max_width (int): 
+                Maximum width of the table. Default is `200`. This parameter is optional.
+
+            timeout (int): 
+                Represents how many seconds to wait for the API to send data before giving up. The 
+                default is 10 seconds, or the `api_timeout` value in the super class' `pyaurorax.PyAuroraX`
+                object. This parameter is optional.
+            
+        Returns:
+            Printed table.
+        
+        Raises:
+            pyaurorax.exceptions.AuroraXAPIError: An API error was encountered.
+        """
+        # get observatories
+        observatories = self.list_observatories(instrument_array, uid=uid, timeout=timeout)
+
+        # set table lists
+        table_uids = []
+        table_full_names = []
+        table_geo_lats = []
+        table_geo_lons = []
+        table_providers = []
+        for o in observatories:
+            table_uids.append(o.uid)
+            table_full_names.append(o.full_name)
+            table_geo_lats.append(o.geodetic_latitude)
+            table_geo_lons.append(o.geodetic_longitude)
+            table_providers.append(o.provider)
+
+        # set header values
+        table_headers = [
+            "UID",
+            "Full Name",
+            "Geodetic Latitude",
+            "Geodetic Longitude",
+            "Provider",
+        ]
+
+        # print as table
+        table = Texttable(max_width=max_width)
+        table.set_deco(Texttable.HEADER)
+        table.set_cols_dtype(["t"] * len(table_headers))
+        table.set_header_align(["l"] * len(table_headers))
+        table.set_cols_align(["l"] * len(table_headers))
+        table.header(table_headers)
+        for i in range(0, len(table_uids)):
+            table.add_row([
+                table_uids[i],
+                table_full_names[i],
+                table_geo_lats[i],
+                table_geo_lons[i],
+                table_providers[i],
+            ])
+        print(table.draw())
+
+
+
+

Sub-modules

+
+
pyaurorax.data.ucalgary
+
+

Data downloading and reading routines for data provided by the University of Calgary.

+
+
+
+
+
+
+
+
+

Classes

+
+
+class DataManager +(aurorax_obj) +
+
+

The DataManager object is initialized within every PyAuroraX object. It acts as a way to access +the submodules and carry over configuration information in the super class.

+
+ +Expand source code + +
class DataManager:
+    """
+    The DataManager object is initialized within every PyAuroraX object. It acts as a way to access 
+    the submodules and carry over configuration information in the super class.
+    """
+
+    def __init__(self, aurorax_obj):
+        self.__aurorax_obj = aurorax_obj
+
+        # initialize sub-modules
+        self.__ucalgary = UCalgaryManager(self.__aurorax_obj)
+
+    # ------------------------------------------
+    # properties for submodule managers
+    # ------------------------------------------
+    @property
+    def ucalgary(self):
+        """
+        Access to the `ucalgary` submodule from within a PyAuroraX object.
+        """
+        return self.__ucalgary
+
+    def list_datasets(self, name: Optional[str] = None, timeout: Optional[int] = None) -> List[Dataset]:
+        """
+        List available datasets from all providers
+
+        Args:
+            name (str): 
+                Supply a name used for filtering. If that name is found in the available dataset 
+                names received from the API, it will be included in the results. This parameter is
+                optional.
+            
+            timeout (int): 
+                Represents how many seconds to wait for the API to send data before giving up. The 
+                default is 10 seconds, or the `api_timeout` value in the super class' `pyaurorax.PyAuroraX`
+                object. This parameter is optional.
+            
+        Returns:
+            A list of [`Dataset`](https://docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html#pyucalgarysrs.data.classes.Dataset)
+            objects.
+        
+        Raises:
+            pyaurorax.exceptions.AuroraXAPIError: An API error was encountered.
+        """
+        # init
+        datasets = []
+
+        # get ucalgary datasets
+        ucalgary_datasets = self.__ucalgary.list_datasets(name=name, timeout=timeout)
+
+        # merge
+        datasets = datasets + ucalgary_datasets
+
+        # sort by name
+        datasets = sorted(datasets, key=lambda x: x.name)
+
+        # return
+        return datasets
+
+    def list_datasets_in_table(self, name: Optional[str] = None, max_width: int = 200, timeout: Optional[int] = None) -> None:
+        """
+        Print available datasets from all providers in a table
+
+        Args:
+            name (str): 
+                Supply a name used for filtering. If that name is found in the available dataset 
+                names received from the API, it will be included in the results. This parameter is
+                optional.
+            
+            max_width (int): 
+                Maximum width of the table. Default is `200`. This parameter is optional.
+
+            timeout (int): 
+                Represents how many seconds to wait for the API to send data before giving up. The 
+                default is 10 seconds, or the `api_timeout` value in the super class' `pyaurorax.PyAuroraX`
+                object. This parameter is optional.
+            
+        Returns:
+            Printed table.
+
+        Raises:
+            pyaurorax.exceptions.AuroraXAPIError: An API error was encountered.
+        """
+        # get datasets
+        datasets = self.list_datasets(name=name, timeout=timeout)
+
+        # set table lists
+        table_names = []
+        table_providers = []
+        table_levels = []
+        table_doi_details = []
+        table_short_descriptions = []
+        for d in datasets:
+            table_names.append(d.name)
+            table_providers.append(d.provider)
+            table_levels.append(d.level)
+            table_doi_details.append(d.doi_details)
+            table_short_descriptions.append(d.short_description)
+
+        # set header values
+        table_headers = [
+            "Name",
+            "Provider",
+            "Level",
+            "DOI Details",
+            "Short Description",
+        ]
+
+        # print as table
+        table = Texttable(max_width=max_width)
+        table.set_deco(Texttable.HEADER)
+        table.set_cols_dtype(["t"] * len(table_headers))
+        table.set_header_align(["l"] * len(table_headers))
+        table.set_cols_align(["l"] * len(table_headers))
+        table.header(table_headers)
+        for i in range(0, len(table_names)):
+            table.add_row([
+                table_names[i],
+                table_providers[i],
+                table_levels[i],
+                table_doi_details[i],
+                table_short_descriptions[i],
+            ])
+        print(table.draw())
+
+    def list_observatories(self,
+                           instrument_array: Literal["themis_asi", "rego", "trex_rgb", "trex_nir", "trex_blue"],
+                           uid: Optional[str] = None,
+                           timeout: Optional[int] = None) -> List[Observatory]:
+        """
+        List information about observatories utilized by all providers.
+
+        Args:
+            instrument_array (str): 
+                The instrument array to list observatories for. Valid values are: themis_asi, rego, 
+                trex_rgb, trex_nir, and trex_blue.
+
+            uid (str): 
+                Supply a observatory unique identifier used for filtering (usually 4-letter site code). If that UID 
+                is found in the available observatories received from the API, it will be included in the results. This 
+                parameter is optional.
+            
+            timeout (int): 
+                Represents how many seconds to wait for the API to send data before giving up. The 
+                default is 10 seconds, or the `api_timeout` value in the super class' `pyaurorax.PyAuroraX`
+                object. This parameter is optional.
+            
+        Returns:
+            A list of [`Observatory`](https://docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html#pyucalgarysrs.data.classes.Observatory)
+            objects.
+        
+        Raises:
+            pyaurorax.exceptions.AuroraXAPIError: An API error was encountered.
+        """
+        # init
+        observatories = []
+
+        # get ucalgary datasets
+        ucalgary_observatories = self.__ucalgary.list_observatories(instrument_array, uid=uid, timeout=timeout)
+
+        # merge
+        observatories = observatories + ucalgary_observatories
+
+        # sort by name
+        observatories = sorted(observatories, key=lambda x: x.uid)
+
+        # return
+        return observatories
+
+    def list_observatories_in_table(self,
+                                    instrument_array: Literal["themis_asi", "rego", "trex_rgb", "trex_nir", "trex_blue"],
+                                    uid: Optional[str] = None,
+                                    max_width: int = 200,
+                                    timeout: Optional[int] = None) -> None:
+        """
+        Print available observatories for a given instrument array in a table
+
+        Args:
+            instrument_array (str): 
+                The instrument array to list observatories for. Valid values are: themis_asi, rego, 
+                trex_rgb, trex_nir, and trex_blue.
+
+            uid (str): 
+                Supply a observatory unique identifier used for filtering (usually 4-letter site code). If that UID 
+                is found in the available observatories received from the API, it will be included in the results. This 
+                parameter is optional.
+            
+            max_width (int): 
+                Maximum width of the table. Default is `200`. This parameter is optional.
+
+            timeout (int): 
+                Represents how many seconds to wait for the API to send data before giving up. The 
+                default is 10 seconds, or the `api_timeout` value in the super class' `pyaurorax.PyAuroraX`
+                object. This parameter is optional.
+            
+        Returns:
+            Printed table.
+        
+        Raises:
+            pyaurorax.exceptions.AuroraXAPIError: An API error was encountered.
+        """
+        # get observatories
+        observatories = self.list_observatories(instrument_array, uid=uid, timeout=timeout)
+
+        # set table lists
+        table_uids = []
+        table_full_names = []
+        table_geo_lats = []
+        table_geo_lons = []
+        table_providers = []
+        for o in observatories:
+            table_uids.append(o.uid)
+            table_full_names.append(o.full_name)
+            table_geo_lats.append(o.geodetic_latitude)
+            table_geo_lons.append(o.geodetic_longitude)
+            table_providers.append(o.provider)
+
+        # set header values
+        table_headers = [
+            "UID",
+            "Full Name",
+            "Geodetic Latitude",
+            "Geodetic Longitude",
+            "Provider",
+        ]
+
+        # print as table
+        table = Texttable(max_width=max_width)
+        table.set_deco(Texttable.HEADER)
+        table.set_cols_dtype(["t"] * len(table_headers))
+        table.set_header_align(["l"] * len(table_headers))
+        table.set_cols_align(["l"] * len(table_headers))
+        table.header(table_headers)
+        for i in range(0, len(table_uids)):
+            table.add_row([
+                table_uids[i],
+                table_full_names[i],
+                table_geo_lats[i],
+                table_geo_lons[i],
+                table_providers[i],
+            ])
+        print(table.draw())
+
+

Instance variables

+
+
var ucalgary
+
+

Access to the pyaurorax.data.ucalgary submodule from within a PyAuroraX object.

+
+ +Expand source code + +
@property
+def ucalgary(self):
+    """
+    Access to the `ucalgary` submodule from within a PyAuroraX object.
+    """
+    return self.__ucalgary
+
+
+
+

Methods

+
+
+def list_datasets(self, name: Optional[str] = None, timeout: Optional[int] = None) ‑> List[pyucalgarysrs.data.classes.Dataset] +
+
+

List available datasets from all providers

+

Args

+
+
name : str
+
Supply a name used for filtering. If that name is found in the available dataset +names received from the API, it will be included in the results. This parameter is +optional.
+
timeout : int
+
Represents how many seconds to wait for the API to send data before giving up. The +default is 10 seconds, or the api_timeout value in the super class' PyAuroraX +object. This parameter is optional.
+
+

Returns

+

A list of Dataset +objects.

+

Raises

+
+
AuroraXAPIError
+
An API error was encountered.
+
+
+ +Expand source code + +
def list_datasets(self, name: Optional[str] = None, timeout: Optional[int] = None) -> List[Dataset]:
+    """
+    List available datasets from all providers
+
+    Args:
+        name (str): 
+            Supply a name used for filtering. If that name is found in the available dataset 
+            names received from the API, it will be included in the results. This parameter is
+            optional.
+        
+        timeout (int): 
+            Represents how many seconds to wait for the API to send data before giving up. The 
+            default is 10 seconds, or the `api_timeout` value in the super class' `pyaurorax.PyAuroraX`
+            object. This parameter is optional.
+        
+    Returns:
+        A list of [`Dataset`](https://docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html#pyucalgarysrs.data.classes.Dataset)
+        objects.
+    
+    Raises:
+        pyaurorax.exceptions.AuroraXAPIError: An API error was encountered.
+    """
+    # init
+    datasets = []
+
+    # get ucalgary datasets
+    ucalgary_datasets = self.__ucalgary.list_datasets(name=name, timeout=timeout)
+
+    # merge
+    datasets = datasets + ucalgary_datasets
+
+    # sort by name
+    datasets = sorted(datasets, key=lambda x: x.name)
+
+    # return
+    return datasets
+
+
+
+def list_datasets_in_table(self, name: Optional[str] = None, max_width: int = 200, timeout: Optional[int] = None) ‑> None +
+
+

Print available datasets from all providers in a table

+

Args

+
+
name : str
+
Supply a name used for filtering. If that name is found in the available dataset +names received from the API, it will be included in the results. This parameter is +optional.
+
max_width : int
+
Maximum width of the table. Default is 200. This parameter is optional.
+
timeout : int
+
Represents how many seconds to wait for the API to send data before giving up. The +default is 10 seconds, or the api_timeout value in the super class' PyAuroraX +object. This parameter is optional.
+
+

Returns

+

Printed table.

+

Raises

+
+
AuroraXAPIError
+
An API error was encountered.
+
+
+ +Expand source code + +
def list_datasets_in_table(self, name: Optional[str] = None, max_width: int = 200, timeout: Optional[int] = None) -> None:
+    """
+    Print available datasets from all providers in a table
+
+    Args:
+        name (str): 
+            Supply a name used for filtering. If that name is found in the available dataset 
+            names received from the API, it will be included in the results. This parameter is
+            optional.
+        
+        max_width (int): 
+            Maximum width of the table. Default is `200`. This parameter is optional.
+
+        timeout (int): 
+            Represents how many seconds to wait for the API to send data before giving up. The 
+            default is 10 seconds, or the `api_timeout` value in the super class' `pyaurorax.PyAuroraX`
+            object. This parameter is optional.
+        
+    Returns:
+        Printed table.
+
+    Raises:
+        pyaurorax.exceptions.AuroraXAPIError: An API error was encountered.
+    """
+    # get datasets
+    datasets = self.list_datasets(name=name, timeout=timeout)
+
+    # set table lists
+    table_names = []
+    table_providers = []
+    table_levels = []
+    table_doi_details = []
+    table_short_descriptions = []
+    for d in datasets:
+        table_names.append(d.name)
+        table_providers.append(d.provider)
+        table_levels.append(d.level)
+        table_doi_details.append(d.doi_details)
+        table_short_descriptions.append(d.short_description)
+
+    # set header values
+    table_headers = [
+        "Name",
+        "Provider",
+        "Level",
+        "DOI Details",
+        "Short Description",
+    ]
+
+    # print as table
+    table = Texttable(max_width=max_width)
+    table.set_deco(Texttable.HEADER)
+    table.set_cols_dtype(["t"] * len(table_headers))
+    table.set_header_align(["l"] * len(table_headers))
+    table.set_cols_align(["l"] * len(table_headers))
+    table.header(table_headers)
+    for i in range(0, len(table_names)):
+        table.add_row([
+            table_names[i],
+            table_providers[i],
+            table_levels[i],
+            table_doi_details[i],
+            table_short_descriptions[i],
+        ])
+    print(table.draw())
+
+
+
+def list_observatories(self, instrument_array: Literal['themis_asi', 'rego', 'trex_rgb', 'trex_nir', 'trex_blue'], uid: Optional[str] = None, timeout: Optional[int] = None) ‑> List[pyucalgarysrs.data.classes.Observatory] +
+
+

List information about observatories utilized by all providers.

+

Args

+
+
instrument_array : str
+
The instrument array to list observatories for. Valid values are: themis_asi, rego, +trex_rgb, trex_nir, and trex_blue.
+
uid : str
+
Supply a observatory unique identifier used for filtering (usually 4-letter site code). If that UID +is found in the available observatories received from the API, it will be included in the results. This +parameter is optional.
+
timeout : int
+
Represents how many seconds to wait for the API to send data before giving up. The +default is 10 seconds, or the api_timeout value in the super class' PyAuroraX +object. This parameter is optional.
+
+

Returns

+

A list of Observatory +objects.

+

Raises

+
+
AuroraXAPIError
+
An API error was encountered.
+
+
+ +Expand source code + +
def list_observatories(self,
+                       instrument_array: Literal["themis_asi", "rego", "trex_rgb", "trex_nir", "trex_blue"],
+                       uid: Optional[str] = None,
+                       timeout: Optional[int] = None) -> List[Observatory]:
+    """
+    List information about observatories utilized by all providers.
+
+    Args:
+        instrument_array (str): 
+            The instrument array to list observatories for. Valid values are: themis_asi, rego, 
+            trex_rgb, trex_nir, and trex_blue.
+
+        uid (str): 
+            Supply a observatory unique identifier used for filtering (usually 4-letter site code). If that UID 
+            is found in the available observatories received from the API, it will be included in the results. This 
+            parameter is optional.
+        
+        timeout (int): 
+            Represents how many seconds to wait for the API to send data before giving up. The 
+            default is 10 seconds, or the `api_timeout` value in the super class' `pyaurorax.PyAuroraX`
+            object. This parameter is optional.
+        
+    Returns:
+        A list of [`Observatory`](https://docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html#pyucalgarysrs.data.classes.Observatory)
+        objects.
+    
+    Raises:
+        pyaurorax.exceptions.AuroraXAPIError: An API error was encountered.
+    """
+    # init
+    observatories = []
+
+    # get ucalgary datasets
+    ucalgary_observatories = self.__ucalgary.list_observatories(instrument_array, uid=uid, timeout=timeout)
+
+    # merge
+    observatories = observatories + ucalgary_observatories
+
+    # sort by name
+    observatories = sorted(observatories, key=lambda x: x.uid)
+
+    # return
+    return observatories
+
+
+
+def list_observatories_in_table(self, instrument_array: Literal['themis_asi', 'rego', 'trex_rgb', 'trex_nir', 'trex_blue'], uid: Optional[str] = None, max_width: int = 200, timeout: Optional[int] = None) ‑> None +
+
+

Print available observatories for a given instrument array in a table

+

Args

+
+
instrument_array : str
+
The instrument array to list observatories for. Valid values are: themis_asi, rego, +trex_rgb, trex_nir, and trex_blue.
+
uid : str
+
Supply a observatory unique identifier used for filtering (usually 4-letter site code). If that UID +is found in the available observatories received from the API, it will be included in the results. This +parameter is optional.
+
max_width : int
+
Maximum width of the table. Default is 200. This parameter is optional.
+
timeout : int
+
Represents how many seconds to wait for the API to send data before giving up. The +default is 10 seconds, or the api_timeout value in the super class' PyAuroraX +object. This parameter is optional.
+
+

Returns

+

Printed table.

+

Raises

+
+
AuroraXAPIError
+
An API error was encountered.
+
+
+ +Expand source code + +
def list_observatories_in_table(self,
+                                instrument_array: Literal["themis_asi", "rego", "trex_rgb", "trex_nir", "trex_blue"],
+                                uid: Optional[str] = None,
+                                max_width: int = 200,
+                                timeout: Optional[int] = None) -> None:
+    """
+    Print available observatories for a given instrument array in a table
+
+    Args:
+        instrument_array (str): 
+            The instrument array to list observatories for. Valid values are: themis_asi, rego, 
+            trex_rgb, trex_nir, and trex_blue.
+
+        uid (str): 
+            Supply a observatory unique identifier used for filtering (usually 4-letter site code). If that UID 
+            is found in the available observatories received from the API, it will be included in the results. This 
+            parameter is optional.
+        
+        max_width (int): 
+            Maximum width of the table. Default is `200`. This parameter is optional.
+
+        timeout (int): 
+            Represents how many seconds to wait for the API to send data before giving up. The 
+            default is 10 seconds, or the `api_timeout` value in the super class' `pyaurorax.PyAuroraX`
+            object. This parameter is optional.
+        
+    Returns:
+        Printed table.
+    
+    Raises:
+        pyaurorax.exceptions.AuroraXAPIError: An API error was encountered.
+    """
+    # get observatories
+    observatories = self.list_observatories(instrument_array, uid=uid, timeout=timeout)
+
+    # set table lists
+    table_uids = []
+    table_full_names = []
+    table_geo_lats = []
+    table_geo_lons = []
+    table_providers = []
+    for o in observatories:
+        table_uids.append(o.uid)
+        table_full_names.append(o.full_name)
+        table_geo_lats.append(o.geodetic_latitude)
+        table_geo_lons.append(o.geodetic_longitude)
+        table_providers.append(o.provider)
+
+    # set header values
+    table_headers = [
+        "UID",
+        "Full Name",
+        "Geodetic Latitude",
+        "Geodetic Longitude",
+        "Provider",
+    ]
+
+    # print as table
+    table = Texttable(max_width=max_width)
+    table.set_deco(Texttable.HEADER)
+    table.set_cols_dtype(["t"] * len(table_headers))
+    table.set_header_align(["l"] * len(table_headers))
+    table.set_cols_align(["l"] * len(table_headers))
+    table.header(table_headers)
+    for i in range(0, len(table_uids)):
+        table.add_row([
+            table_uids[i],
+            table_full_names[i],
+            table_geo_lats[i],
+            table_geo_lons[i],
+            table_providers[i],
+        ])
+    print(table.draw())
+
+
+
+
+
+
+
+ +
+ + + \ No newline at end of file diff --git a/docs/code/pyaurorax_api_reference/pyaurorax/data/ucalgary/index.html b/docs/code/pyaurorax_api_reference/pyaurorax/data/ucalgary/index.html new file mode 100644 index 0000000..0ec6899 --- /dev/null +++ b/docs/code/pyaurorax_api_reference/pyaurorax/data/ucalgary/index.html @@ -0,0 +1,3990 @@ + + + + + + +pyaurorax.data.ucalgary API documentation + + + + + + + + + + + +
+
+
+

Module pyaurorax.data.ucalgary

+
+
+

Data downloading and reading routines for data provided by the University of Calgary.

+
+ +Expand source code + +
# Copyright 2024 University of Calgary
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Data downloading and reading routines for data provided by the University of Calgary.
+"""
+
+import os
+import datetime
+from pathlib import Path
+from typing import TYPE_CHECKING, Optional, List, Union, Literal
+from pyucalgarysrs.data import (
+    Observatory,
+    Dataset,
+    FileDownloadResult,
+    FileListingResponse,
+    Data,
+    Skymap,
+    Calibration,
+)
+from pyucalgarysrs.exceptions import SRSAPIError, SRSDownloadError
+from ...exceptions import AuroraXAPIError, AuroraXDownloadError
+from .read import ReadManager
+if TYPE_CHECKING:
+    from ...pyaurorax import PyAuroraX
+
+__all__ = [
+    "UCalgaryManager",
+    "Observatory",
+    "Dataset",
+    "FileDownloadResult",
+    "FileListingResponse",
+    "Data",
+    "Skymap",
+    "Calibration",
+]
+
+
+class UCalgaryManager:
+    """
+    The UCalgaryManager object is initialized within every PyAuroraX object. It acts as a way to access 
+    the submodules and carry over configuration information in the super class.
+    """
+
+    __DEFAULT_DOWNLOAD_N_PARALLEL = 5
+
+    def __init__(self, aurorax_obj):
+        self.__aurorax_obj: PyAuroraX = aurorax_obj
+
+        # initialize sub-modules
+        self.__readers = ReadManager(self.__aurorax_obj)
+
+    @property
+    def readers(self):
+        """
+        Access to the `read` submodule from within a PyAuroraX object.
+        """
+        return self.__readers
+
+    def list_datasets(self, name: Optional[str] = None, timeout: Optional[int] = None) -> List[Dataset]:
+        """
+        List available datasets
+
+        Args:
+            name (str): 
+                Supply a name used for filtering. If that name is found in the available dataset 
+                names received from the API, it will be included in the results. This parameter is
+                optional.
+            
+            timeout (int): 
+                Represents how many seconds to wait for the API to send data before giving up. The 
+                default is 10 seconds, or the `api_timeout` value in the super class' `pyaurorax.PyAuroraX`
+                object. This parameter is optional.
+            
+        Returns:
+            A list of [`Dataset`](https://docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html#pyucalgarysrs.data.classes.Dataset)
+            objects.
+        
+        Raises:
+            pyaurorax.exceptions.AuroraXAPIError: An API error was encountered.
+        """
+        try:
+            return self.__aurorax_obj.srs_obj.data.list_datasets(name=name, timeout=timeout)
+        except SRSAPIError as e:
+            raise AuroraXAPIError(e) from e
+
+    def list_observatories(self,
+                           instrument_array: Literal["themis_asi", "rego", "trex_rgb", "trex_nir", "trex_blue"],
+                           uid: Optional[str] = None,
+                           timeout: Optional[int] = None) -> List[Observatory]:
+        """
+        List information about observatories
+
+        Args:
+            instrument_array (str): 
+                The instrument array to list observatories for. Valid values are: themis_asi, rego, 
+                trex_rgb, trex_nir, and trex_blue.
+
+            uid (str): 
+                Supply a observatory unique identifier used for filtering (usually 4-letter site code). If that UID 
+                is found in the available observatories received from the API, it will be included in the results. This 
+                parameter is optional.
+            
+            timeout (int): 
+                Represents how many seconds to wait for the API to send data before giving up. The 
+                default is 10 seconds, or the `api_timeout` value in the super class' `pyaurorax.PyAuroraX`
+                object. This parameter is optional.
+            
+        Returns:
+            A list of [`Observatory`](https://docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html#pyucalgarysrs.data.classes.Observatory)
+            objects.
+        
+        Raises:
+            pyaurorax.exceptions.AuroraXAPIError: An API error was encountered.
+        """
+        try:
+            return self.__aurorax_obj.srs_obj.data.list_observatories(instrument_array, uid=uid, timeout=timeout)
+        except SRSAPIError as e:
+            raise AuroraXAPIError(e) from e
+
+    def list_supported_read_datasets(self) -> List[str]:
+        """
+        List the datasets which have file reading capabilities supported.
+
+        Returns:
+            A list of the dataset names with file reading support.
+        """
+        return self.__aurorax_obj.srs_obj.data.list_supported_read_datasets()
+
+    def is_read_supported(self, dataset_name: str) -> bool:
+        """
+        Check if a given dataset has file reading support. 
+        
+        Not all datasets available in the UCalgary Space Remote Sensing Open Data Platform 
+        have special readfile routines in this library. This is because some datasets are 
+        in basic formats such as JPG or PNG, so unique functions aren't necessary. We leave 
+        it up to the user to open these basic files in whichever way they prefer. Use the 
+        `list_supported_read_datasets()` function to see all datasets that have special
+        file reading functionality in this library.
+
+        Args:
+            dataset_name (str): 
+                The dataset name to check if file reading is supported. This parameter 
+                is required.
+        
+        Returns:
+            Boolean indicating if file reading is supported.
+        """
+        return self.__aurorax_obj.srs_obj.data.is_read_supported(dataset_name)
+
+    def download(self,
+                 dataset_name: str,
+                 start: datetime.datetime,
+                 end: datetime.datetime,
+                 site_uid: Optional[str] = None,
+                 device_uid: Optional[str] = None,
+                 n_parallel: int = __DEFAULT_DOWNLOAD_N_PARALLEL,
+                 overwrite: bool = False,
+                 progress_bar_disable: bool = False,
+                 progress_bar_ncols: Optional[int] = None,
+                 progress_bar_ascii: Optional[str] = None,
+                 progress_bar_desc: Optional[str] = None,
+                 timeout: Optional[int] = None) -> FileDownloadResult:
+        """
+        Download data from the UCalgary Space Remote Sensing Open Data Platform.
+
+        The parameters `dataset_name`, `start`, and `end` are required. All other parameters
+        are optional.
+
+        Note that usage of the site and device UID filters applies differently to some datasets.
+        For example, both fields can be used for most raw and keogram data, but only site UID can
+        be used for skymap datasets, and only device UID can be used for calibration datasets. If 
+        fields are specified during a call in which site or device UID is not used, a UserWarning
+        is display to provide the user with feedback about this detail.
+
+        Args:
+            dataset_name (str): 
+                Name of the dataset to download data for. Use the `list_datasets()` function
+                to get the possible values for this parameter. One example is "THEMIS_ASI_RAW". 
+                Note that dataset names are case sensitive. This parameter is required.
+
+            start (datetime.datetime): 
+                Start timestamp to use (inclusive), expected to be in UTC. Any timezone data 
+                will be ignored. This parameter is required.
+
+            end (datetime.datetime): 
+                End timestamp to use (inclusive), expected to be in UTC. Any timezone data 
+                will be ignored. This parameter is required.
+
+            site_uid (str): 
+                The site UID to filter for. If specified, data will be downloaded for only the 
+                site matching the given value. If excluded, data for all available sites will 
+                be downloaded. An example value could be 'atha', meaning all data from the 
+                Athabasca observatory will be downloaded for the given dataset name, start, and 
+                end times. This parameter is optional.
+
+            device_uid (str): 
+                The device UID to filter for. If specified, data will be downloaded for only the
+                device matching the given value. If excluded, data for all available devices will
+                be downloaded. An example value could be 'themis02', meaning all data matching that
+                device will be downloaded for the given dataset name, start, and end times. This
+                parameter is optional.
+
+            n_parallel (int): 
+                Number of data files to download in parallel. Default value is 5. Adjust as needed 
+                for your internet connection. This parameter is optional.
+
+            overwrite (bool): 
+                By default, data will not be re-downloaded if it already exists locally. Use 
+                the `overwrite` parameter to force re-downloading. Default is `False`. This 
+                parameter is optional.
+
+            progress_bar_disable (bool): 
+                Disable the progress bar. Default is `False`. This parameter is optional.
+
+            progress_bar_ncols (int): 
+                Number of columns for the progress bar (straight passthrough of the `ncols` 
+                parameter in a tqdm progress bar). This parameter is optional. See Notes section
+                below for further information.
+            
+            progress_bar_ascii (str): 
+                ASCII value to use when constructing the visual aspect of the progress bar (straight 
+                passthrough of the `ascii` parameter in a tqdm progress bar). This parameter is 
+                optional. See Notes section below for further details.
+
+            timeout (int): 
+                Represents how many seconds to wait for the API to send data before giving up. The 
+                default is 10 seconds, or the `api_timeout` value in the super class' `pyaurorax.PyAuroraX`
+                object. This parameter is optional.
+
+        Returns:
+            A [`FileDownloadResult`](https://docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html#pyucalgarysrs.data.classes.FileDownloadResult) 
+            object containing details about what data files were downloaded.
+
+        Raises:
+            pyaurorax.exceptions.AuroraXDownloadError: an error was encountered while downloading a 
+                specific file
+            pyaurorax.exceptions.AuroraXAPIError: an API error was encountered
+
+        Notes:
+        --------
+        The `progress_bar_*` parameters can be used to enable/disable/adjust the progress bar. 
+        Excluding the `progress_bar_disable` parameter, all others are straight pass-throughs 
+        to the tqdm progress bar function. The `progress_bar_ncols` parameter allows for 
+        adjusting the width. The `progress_bar_ascii` parameter allows for adjusting the appearance 
+        of the progress bar. And the `progress_bar_desc` parameter allows for adjusting the 
+        description at the beginning of the progress bar. Further details can be found on the
+        [tqdm documentation](https://tqdm.github.io/docs/tqdm/#tqdm-objects).
+
+        Data downloading will use the `download_data_root_path` variable within the super class'
+        object ([`PyAuroraX`](../../index.html#pyaurorax.PyAuroraX)) to determine where to save data to. If 
+        you'd like to change this path to somewhere else you can change that variable before your
+        download() call, like so:
+
+        ```python
+        import pyaurorax
+        aurorax = pyaurorax.PyAuroraX()
+        aurorax.data_download_root_path = "some_new_path"
+        aurorax.data.download(dataset_name, start, end)
+        ```
+        """
+        try:
+            return self.__aurorax_obj.srs_obj.data.download(
+                dataset_name,
+                start,
+                end,
+                site_uid=site_uid,
+                device_uid=device_uid,
+                n_parallel=n_parallel,
+                overwrite=overwrite,
+                progress_bar_disable=progress_bar_disable,
+                progress_bar_ncols=progress_bar_ncols,
+                progress_bar_ascii=progress_bar_ascii,
+                progress_bar_desc=progress_bar_desc,
+                timeout=timeout,
+            )
+        except SRSDownloadError as e:
+            raise AuroraXDownloadError(e) from e
+        except SRSAPIError as e:
+            raise AuroraXAPIError(e) from e
+
+    def download_using_urls(self,
+                            file_listing_response: FileListingResponse,
+                            n_parallel: int = __DEFAULT_DOWNLOAD_N_PARALLEL,
+                            overwrite: bool = False,
+                            progress_bar_disable: bool = False,
+                            progress_bar_ncols: Optional[int] = None,
+                            progress_bar_ascii: Optional[str] = None,
+                            progress_bar_desc: Optional[str] = None,
+                            timeout: Optional[int] = None) -> FileDownloadResult:
+        """
+        Download data from the UCalgary Space Remote Sensing Open Data Platform using 
+        a FileListingResponse object. This would be used in cases where more customization 
+        is needed than the generic `download()` function. 
+        
+        One example of using this function would start by using `get_urls()` to retrieve the
+        list of URLs available for download, then further process this list to fewer files
+        based on some other requirement (ie. time down-sampling such as one file per hour). 
+        Lastly using this function to download the new custom set URLs.
+
+        Args:
+            file_listing_response (FileListingResponse): 
+                A [`FileListingResponse`](https://docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html#pyucalgarysrs.data.classes.FileListingResponse) 
+                object returned from a `get_urls()` call, which contains a list of URLs to download 
+                for a specific dataset. This parameter is required.
+
+            n_parallel (int): 
+                Number of data files to download in parallel. Default value is 5. Adjust as needed 
+                for your internet connection. This parameter is optional.
+
+            overwrite (bool): 
+                By default, data will not be re-downloaded if it already exists locally. Use 
+                the `overwrite` parameter to force re-downloading. Default is `False`. This 
+                parameter is optional.
+
+            progress_bar_disable (bool): 
+                Disable the progress bar. Default is `False`. This parameter is optional.
+
+            progress_bar_ncols (int): 
+                Number of columns for the progress bar (straight passthrough of the `ncols` 
+                parameter in a tqdm progress bar). This parameter is optional. See Notes section
+                below for further information.
+            
+            progress_bar_ascii (str): 
+                ASCII value to use when constructing the visual aspect of the progress bar (straight 
+                passthrough of the `ascii` parameter in a tqdm progress bar). This parameter is 
+                optional. See Notes section below for further details.
+
+            timeout (int): 
+                Represents how many seconds to wait for the API to send data before giving up. The 
+                default is 10 seconds, or the `api_timeout` value in the super class' `pyaurorax.PyAuroraX`
+                object. This parameter is optional.
+
+        Returns:
+            A [`FileDownloadResult`](https://docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html#pyucalgarysrs.data.classes.FileDownloadResult) 
+            object containing details about what data files were downloaded.
+
+        Raises:
+            pyaurorax.exceptions.AuroraXDownloadError: an error was encountered while downloading a 
+                specific file
+            pyaurorax.exceptions.AuroraXAPIError: an API error was encountered
+
+        Notes:
+        --------
+        The `progress_bar_*` parameters can be used to enable/disable/adjust the progress bar. 
+        Excluding the `progress_bar_disable` parameter, all others are straight pass-throughs 
+        to the tqdm progress bar function. The `progress_bar_ncols` parameter allows for 
+        adjusting the width. The `progress_bar_ascii` parameter allows for adjusting the appearance 
+        of the progress bar. And the `progress_bar_desc` parameter allows for adjusting the 
+        description at the beginning of the progress bar. Further details can be found on the
+        [tqdm documentation](https://tqdm.github.io/docs/tqdm/#tqdm-objects).
+
+        Data downloading will use the `download_data_root_path` variable within the super class'
+        object ([`PyAuroraX`](../../index.html#pyaurorax.PyAuroraX)) to determine where to save data to. If 
+        you'd like to change this path to somewhere else you can change that variable before your
+        download() call, like so:
+
+        ```python
+        import pyaurorax
+        aurorax = pyaurorax.PyAuroraX()
+        aurorax.data_download_root_path = "some_new_path"
+        aurorax.data.download(dataset_name, start, end)
+        ```
+        """
+        try:
+            return self.__aurorax_obj.srs_obj.data.download_using_urls(
+                file_listing_response,
+                n_parallel=n_parallel,
+                overwrite=overwrite,
+                progress_bar_disable=progress_bar_disable,
+                progress_bar_ncols=progress_bar_ncols,
+                progress_bar_ascii=progress_bar_ascii,
+                progress_bar_desc=progress_bar_desc,
+                timeout=timeout,
+            )
+        except SRSDownloadError as e:
+            raise AuroraXDownloadError(e) from e
+        except SRSAPIError as e:
+            raise AuroraXAPIError(e) from e
+
+    def get_urls(self,
+                 dataset_name: str,
+                 start: datetime.datetime,
+                 end: datetime.datetime,
+                 site_uid: Optional[str] = None,
+                 device_uid: Optional[str] = None,
+                 timeout: Optional[int] = None) -> FileListingResponse:
+        """
+        Get URLs of data files
+
+        The parameters `dataset_name`, `start`, and `end` are required. All other parameters
+        are optional.
+
+        Note that usage of the site and device UID filters applies differently to some datasets.
+        For example, both fields can be used for most raw and keogram data, but only site UID can
+        be used for skymap datasets, and only device UID can be used for calibration datasets. If 
+        fields are specified during a call in which site or device UID is not used, a UserWarning
+        is display to provide the user with feedback about this detail.
+
+        Args:
+            dataset_name (str): 
+                Name of the dataset to download data for. Use the `list_datasets()` function
+                to get the possible values for this parameter. One example is "THEMIS_ASI_RAW". 
+                Note that dataset names are case sensitive. This parameter is required.
+
+            start (datetime.datetime): 
+                Start timestamp to use (inclusive), expected to be in UTC. Any timezone data 
+                will be ignored. This parameter is required.
+
+            end (datetime.datetime): 
+                End timestamp to use (inclusive), expected to be in UTC. Any timezone data 
+                will be ignored. This parameter is required.
+
+            site_uid (str): 
+                The site UID to filter for. If specified, data will be downloaded for only the 
+                site matching the given value. If excluded, data for all available sites will 
+                be downloaded. An example value could be 'atha', meaning all data from the 
+                Athabasca observatory will be downloaded for the given dataset name, start, and 
+                end times. This parameter is optional.
+
+            device_uid (str): 
+                The device UID to filter for. If specified, data will be downloaded for only the
+                device matching the given value. If excluded, data for all available devices will
+                be downloaded. An example value could be 'themis02', meaning all data matching that
+                device will be downloaded for the given dataset name, start, and end times. This
+                parameter is optional.
+
+            timeout (int): 
+                Represents how many seconds to wait for the API to send data before giving up. The 
+                default is 10 seconds, or the `api_timeout` value in the super class' `pyaurorax.PyAuroraX`
+                object. This parameter is optional.
+    
+        Returns:
+            A [`FileListingResponse`](https://docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html#pyucalgarysrs.data.classes.FileListingResponse)
+            object containing a list of the available URLs, among other values.
+
+        Raises:
+            pyaurorax.exceptions.AuroraXAPIError: an API error was encountered
+        """
+        try:
+            return self.__aurorax_obj.srs_obj.data.get_urls(
+                dataset_name,
+                start,
+                end,
+                site_uid=site_uid,
+                device_uid=device_uid,
+                timeout=timeout,
+            )
+        except SRSAPIError as e:
+            raise AuroraXAPIError(e) from e
+
+    def read(self,
+             dataset: Dataset,
+             file_list: Union[List[str], List[Path], str, Path],
+             n_parallel: int = 1,
+             first_record: bool = False,
+             no_metadata: bool = False,
+             quiet: bool = False) -> Data:
+        """
+        Read in data files for a given dataset. Note that only one type of dataset's data
+        should be read in using a single call.
+
+        Args:
+            dataset (Dataset): 
+                The dataset object for which the files are associated with. This parameter is
+                required.
+            
+            file_list (List[str], List[Path], str, Path): 
+                The files to read in. Absolute paths are recommended, but not technically
+                necessary. This can be a single string for a file, or a list of strings to read
+                in multiple files. This parameter is required.
+
+            n_parallel (int): 
+                Number of data files to read in parallel using multiprocessing. Default value 
+                is 1. Adjust according to your computer's available resources. This parameter 
+                is optional.
+            
+            first_record (bool): 
+                Only read in the first record in each file. This is the same as the first_frame
+                parameter in the themis-imager-readfile and trex-imager-readfile libraries, and
+                is a read optimization if you only need one image per minute, as opposed to the
+                full temporal resolution of data (e.g., 3sec cadence). This parameter is optional.
+            
+            no_metadata (bool): 
+                Skip reading of metadata. This is a minor optimization if the metadata is not needed.
+                Default is `False`. This parameter is optional.
+            
+            quiet (bool): 
+                Do not print out errors while reading data files, if any are encountered. Any files
+                that encounter errors will be, as usual, accessible via the `problematic_files` 
+                attribute of the returned `Data` object. This parameter is optional.
+        
+        Returns:
+            A [`Data`](https://docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html#pyucalgarysrs.data.classes.Data) 
+            object containing the data read in, among other values.
+        
+        Raises:
+            pyaurorax.exceptions.AuroraXUnsupportedReadError: an unsupported dataset was used when
+                trying to read files.
+            pyaurorax.exceptions.AuroraXError: a generic read error was encountered
+
+        Notes:
+        ---------
+        For users who are familiar with the themis-imager-readfile and trex-imager-readfile
+        libraries, the read function provides a near-identical usage. Further improvements have 
+        been integrated, and those libraries are anticipated to be deprecated at some point in the
+        future.
+        """
+        # NOTE: we do not wrap the exceptions here, instead we pass the call along
+        # to the ReadManager object since the method and exception catching is
+        # implemented there. No need to duplicate the exception handling logic.
+        return self.__readers.read(
+            dataset,
+            file_list,
+            n_parallel=n_parallel,
+            first_record=first_record,
+            no_metadata=no_metadata,
+            quiet=quiet,
+        )
+
+    def download_best_skymap(
+        self,
+        dataset_name: str,
+        site_uid: str,
+        timestamp: datetime.datetime,
+        timeout: Optional[int] = None,
+        overwrite: bool = False,
+    ) -> FileDownloadResult:
+        """
+        Download the skymap file that best matches the parameters supplied.
+
+        Args:
+            dataset_name (str): 
+                Name of the dataset to download data for. Use the `list_datasets()` function
+                to get the possible values for this parameter. One example is "THEMIS_ASI_SKYMAP_IDLSAV". 
+                Note that dataset names are case sensitive. This parameter is required.
+
+            site_uid (str): 
+                The site UID to evaluate.
+
+            timestamp (datetime.datetime): 
+                The timestamp to use for deciding the best skymap, expected to be in UTC. Any timezone 
+                data will be ignored. This parameter is required.
+
+            timeout (int): 
+                Represents how many seconds to wait for the API to send data before giving up. The 
+                default is 10 seconds, or the `api_timeout` value in the super class' `pyaurorax.PyAuroraX`
+                object. This parameter is optional.
+
+        Returns:
+            A [`FileDownloadResult`](https://docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html#pyucalgarysrs.data.classes.FileDownloadResult) 
+            object containing details about what data files were downloaded.
+
+        Raises:
+            ValueError: issue with supplied timestamp
+            pyaurorax.exceptions.AuroraXAPIError: an API error was encountered        
+        """
+        # get list of all skymap urls for the dataset and site
+        start_dt = datetime.datetime(2000, 1, 1)
+        end_dt = datetime.datetime.now() + datetime.timedelta(days=5)
+        file_listing_obj = self.get_urls(dataset_name, start_dt, end_dt, site_uid=site_uid, timeout=timeout)
+
+        # filter down and find the best skymap for the timestamp supplied
+        best_skymap_filename = None
+        for url in file_listing_obj.urls:
+            # extract start date for this skymap
+            url_short = url.replace(file_listing_obj.path_prefix + "/", "")
+
+            # parse filename into several values
+            filename_split = os.path.basename(url_short).split('_')
+            filename_times_split = filename_split[3].split('-')
+            valid_interval_start_dt = datetime.datetime.strptime(filename_times_split[0], "%Y%m%d")
+
+            # check start time
+            if (timestamp >= valid_interval_start_dt):
+                # valid
+                #
+                # NOTE: this works because of the order that the list is in already
+                best_skymap_filename = url
+
+        # check if we found a skymap
+        if (best_skymap_filename is None):
+            raise ValueError("Unable to determine a skymap recommendation")
+
+        # set the filename
+        file_listing_obj.urls = [best_skymap_filename]
+        download_obj = self.download_using_urls(
+            file_listing_obj,
+            progress_bar_disable=True,
+            overwrite=overwrite,
+            timeout=timeout,
+        )
+
+        # return
+        return download_obj
+
+    def download_best_flatfield_calibration(
+        self,
+        dataset_name: str,
+        device_uid: str,
+        timestamp: datetime.datetime,
+        timeout: Optional[int] = None,
+        overwrite: bool = False,
+    ) -> FileDownloadResult:
+        """
+        Download the flatfield calibration file that best matches the parameters supplied.
+
+        Args:
+            dataset_name (str): 
+                Name of the dataset to download data for. Use the `list_datasets()` function
+                to get the possible values for this parameter. One example is "THEMIS_ASI_SKYMAP_IDLSAV". 
+                Note that dataset names are case sensitive. This parameter is required.
+
+            device_uid (str): 
+                The device UID to evaluate.
+
+            timestamp (datetime.datetime): 
+                The timestamp to use for deciding the best skymap, expected to be in UTC. Any timezone 
+                data will be ignored. This parameter is required.
+
+            timeout (int): 
+                Represents how many seconds to wait for the API to send data before giving up. The 
+                default is 10 seconds, or the `api_timeout` value in the super class' `pyaurorax.PyAuroraX`
+                object. This parameter is optional.
+
+        Returns:
+            A [`FileDownloadResult`](https://docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html#pyucalgarysrs.data.classes.FileDownloadResult) 
+            object containing details about what data files were downloaded.
+
+        Raises:
+            ValueError: issue with supplied timestamp
+            pyaurorax.exceptions.AuroraXAPIError: an API error was encountered        
+        """
+        # get list of all flatfield urls for the dataset and device
+        start_dt = datetime.datetime(2000, 1, 1)
+        end_dt = datetime.datetime.now() + datetime.timedelta(days=5)
+        file_listing_obj = self.get_urls(dataset_name, start_dt, end_dt, device_uid=device_uid, timeout=timeout)
+
+        # filter down and find the best skymap for the timestamp supplied
+        best_cal_filename = None
+        for url in file_listing_obj.urls:
+            # extract start date for this skymap
+            url_short = url.replace(file_listing_obj.path_prefix + "/", "")
+
+            # parse filename into several values
+            filename_split = os.path.basename(url_short).split('_')
+            filename_times_split = filename_split[3].split('-')
+            valid_interval_start_dt = datetime.datetime.strptime(filename_times_split[0], "%Y%m%d")
+
+            # check start time
+            if (timestamp >= valid_interval_start_dt):
+                # valid
+                #
+                # NOTE: this works because of the order that the list is in already
+                best_cal_filename = url
+
+        # check if we found a skymap
+        if (best_cal_filename is None):
+            raise ValueError("Unable to determine a flatfield calibration recommendation")
+
+        # set the filename
+        file_listing_obj.urls = [best_cal_filename]
+        download_obj = self.download_using_urls(
+            file_listing_obj,
+            progress_bar_disable=True,
+            overwrite=overwrite,
+            timeout=timeout,
+        )
+
+        # return
+        return download_obj
+
+    def download_best_rayleighs_calibration(
+        self,
+        dataset_name: str,
+        device_uid: str,
+        timestamp: datetime.datetime,
+        timeout: Optional[int] = None,
+        overwrite: bool = False,
+    ) -> FileDownloadResult:
+        """
+        Download the Rayleighs calibration file that best matches the parameters supplied.
+
+        Args:
+            dataset_name (str): 
+                Name of the dataset to download data for. Use the `list_datasets()` function
+                to get the possible values for this parameter. One example is "REGO_CALIBRATION_RAYLEIGHS_IDLSAV". 
+                Note that dataset names are case sensitive. This parameter is required.
+
+            device_uid (str): 
+                The device UID to evaluate.
+
+            timestamp (datetime.datetime): 
+                The timestamp to use for deciding the best calibration file, expected to be in 
+                UTC. Any timezone data will be ignored. This parameter is required.
+
+            timeout (int): 
+                Represents how many seconds to wait for the API to send data before giving up. The 
+                default is 10 seconds, or the `api_timeout` value in the super class' `pyaurorax.PyAuroraX`
+                object. This parameter is optional.
+
+        Returns:
+            A [`FileDownloadResult`](https://docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html#pyucalgarysrs.data.classes.FileDownloadResult) 
+            object containing details about what data files were downloaded.
+
+        Raises:
+            ValueError: issue with supplied timestamp
+            pyaurorax.exceptions.AuroraXAPIError: an API error was encountered        
+        """
+        # get list of all rayleighs urls for the dataset and device
+        start_dt = datetime.datetime(2000, 1, 1)
+        end_dt = datetime.datetime.now() + datetime.timedelta(days=5)
+        file_listing_obj = self.get_urls(dataset_name, start_dt, end_dt, device_uid=device_uid, timeout=timeout)
+
+        # filter down and find the best skymap for the timestamp supplied
+        best_cal_filename = None
+        for url in file_listing_obj.urls:
+            # extract start date for this skymap
+            url_short = url.replace(file_listing_obj.path_prefix + "/", "")
+
+            # parse filename into several values
+            filename_split = os.path.basename(url_short).split('_')
+            filename_times_split = filename_split[3].split('-')
+            valid_interval_start_dt = datetime.datetime.strptime(filename_times_split[0], "%Y%m%d")
+
+            # check start time
+            if (timestamp >= valid_interval_start_dt):
+                # valid
+                #
+                # NOTE: this works because of the order that the list is in already
+                best_cal_filename = url
+
+        # check if we found a skymap
+        if (best_cal_filename is None):
+            raise ValueError("Unable to determine a Rayleighs calibration recommendation")
+
+        # set the filename
+        file_listing_obj.urls = [best_cal_filename]
+        download_obj = self.download_using_urls(
+            file_listing_obj,
+            progress_bar_disable=True,
+            overwrite=overwrite,
+            timeout=timeout,
+        )
+
+        # return
+        return download_obj
+
+
+
+

Sub-modules

+
+
pyaurorax.data.ucalgary.read
+
+
+
+
+
+
+
+
+
+
+

Classes

+
+
+class Calibration +(filename: str, detector_uid: str, version: str, generation_info: pyucalgarysrs.data.classes.CalibrationGenerationInfo, rayleighs_perdn_persecond: Optional[float] = None, flat_field_multiplier: Optional[numpy.ndarray] = None, dataset: Optional[pyucalgarysrs.data.classes.Dataset] = None) +
+
+

Representation for a calibration file.

+

Attributes

+
+
filename : str
+
Filename for the calibration file, as an absolute path of its location on the local machine.
+
detector_uid : str
+
Detector/imager/camera unique identifier
+
version : str
+
Version number of the calibration file
+
generation_info : CalibrationGenerationInfo
+
Metadata describing details about this calibration's generation process
+
rayleighs_perdn_persecond : float
+
Calibrated value for Rayleighs per data number per second (R/dn/s). This value will be None +if a flatfield calibration file was read instead of a rayleighs calibration file.
+
flat_field_multiplier : ndarray
+
Calibrated flat field array. This value will be None if a rayleighs calibration file was +read instead of a flatfield calibration file.
+
dataset : Dataset
+
The Dataset object for this data.
+
+
+ +Expand source code + +
@dataclass
+class Calibration:
+    """
+    Representation for a calibration file.
+
+    Attributes:
+        filename (str): 
+            Filename for the calibration file, as an absolute path of its location on the local machine.
+        
+        detector_uid (str): 
+            Detector/imager/camera unique identifier
+        
+        version (str): 
+            Version number of the calibration file
+        
+        generation_info (CalibrationGenerationInfo): 
+            Metadata describing details about this calibration's generation process
+        
+        rayleighs_perdn_persecond (float): 
+            Calibrated value for Rayleighs per data number per second (R/dn/s). This value will be None 
+            if a flatfield calibration file was read instead of a rayleighs calibration file.
+        
+        flat_field_multiplier (ndarray): 
+            Calibrated flat field array. This value will be None if a rayleighs calibration file was 
+            read instead of a flatfield calibration file.
+        
+        dataset (Dataset): 
+            The `Dataset` object for this data.
+    """
+    filename: str
+    detector_uid: str
+    version: str
+    generation_info: CalibrationGenerationInfo
+    rayleighs_perdn_persecond: Optional[float] = None
+    flat_field_multiplier: Optional[ndarray] = None
+    dataset: Optional[Dataset] = None
+
+    def pretty_print(self):
+        """
+        A special print output for this class.
+        """
+        print("Calibration:")
+        for var_name in dir(self):
+            # exclude methods
+            if (var_name.startswith("__") or var_name == "pretty_print"):
+                continue
+
+            # convert var to string format we want
+            var_value = getattr(self, var_name)
+            var_str = "None"
+            if (var_name == "generation_info"):
+                var_str = "CalibrationGenerationInfo(...)"
+            elif (var_name == "dataset" and var_value is not None):
+                var_str = "Dataset(...)"
+            elif (var_value is not None):
+                if (isinstance(var_value, ndarray)):
+                    var_str = "array(dims=%s, dtype=%s)" % (var_value.shape, var_value.dtype)
+                else:
+                    var_str = str(var_value)
+
+            # print string for this var
+            print("  %-30s: %s" % (var_name, var_str))
+
+

Class variables

+
+
var dataset : Optional[pyucalgarysrs.data.classes.Dataset]
+
+
+
+
var detector_uid : str
+
+
+
+
var filename : str
+
+
+
+
var flat_field_multiplier : Optional[numpy.ndarray]
+
+
+
+
var generation_info : pyucalgarysrs.data.classes.CalibrationGenerationInfo
+
+
+
+
var rayleighs_perdn_persecond : Optional[float]
+
+
+
+
var version : str
+
+
+
+
+

Methods

+
+
+def pretty_print(self) +
+
+

A special print output for this class.

+
+ +Expand source code + +
def pretty_print(self):
+    """
+    A special print output for this class.
+    """
+    print("Calibration:")
+    for var_name in dir(self):
+        # exclude methods
+        if (var_name.startswith("__") or var_name == "pretty_print"):
+            continue
+
+        # convert var to string format we want
+        var_value = getattr(self, var_name)
+        var_str = "None"
+        if (var_name == "generation_info"):
+            var_str = "CalibrationGenerationInfo(...)"
+        elif (var_name == "dataset" and var_value is not None):
+            var_str = "Dataset(...)"
+        elif (var_value is not None):
+            if (isinstance(var_value, ndarray)):
+                var_str = "array(dims=%s, dtype=%s)" % (var_value.shape, var_value.dtype)
+            else:
+                var_str = str(var_value)
+
+        # print string for this var
+        print("  %-30s: %s" % (var_name, var_str))
+
+
+
+
+
+class Data +(data: Any, timestamp: List[datetime.datetime], metadata: List[Dict], problematic_files: List[pyucalgarysrs.data.classes.ProblematicFile], calibrated_data: Any, dataset: Optional[pyucalgarysrs.data.classes.Dataset] = None) +
+
+

Representation of the data read in from a pyaurorax.data.ucalgary.read call.

+

Attributes

+
+
data : Any
+
The loaded data. This can be one of the following types: ndarray, List[Skymap], List[Calibration].
+
timestamp : List[datetime.datetime]
+
List of timestamps for the read in data.
+
metadata : List[Dict]
+
List of dictionaries containing metadata specific to each timestamp/image/record.
+
problematic_files : List[ProblematicFiles]
+
A list detailing any files that encountered issues during reading.
+
calibrated_data : Any
+
A calibrated version of the data. Populated and utilized by data analysis libraries. Has a None value +until calibrated data is inserted manually.
+
dataset : Dataset
+
The Dataset object for this data.
+
+
+ +Expand source code + +
@dataclass
+class Data:
+    """
+    Representation of the data read in from a `read` call.
+
+    Attributes:
+        data (Any): 
+            The loaded data. This can be one of the following types: ndarray, List[Skymap], List[Calibration].
+        
+        timestamp (List[datetime.datetime]): 
+            List of timestamps for the read in data.
+        
+        metadata (List[Dict]): 
+            List of dictionaries containing metadata specific to each timestamp/image/record.
+        
+        problematic_files (List[ProblematicFiles]): 
+            A list detailing any files that encountered issues during reading.
+        
+        calibrated_data (Any): 
+            A calibrated version of the data. Populated and utilized by data analysis libraries. Has a `None` value
+            until calibrated data is inserted manually.
+
+        dataset (Dataset): 
+            The `Dataset` object for this data.
+    """
+    data: Any
+    timestamp: List[datetime.datetime]
+    metadata: List[Dict]
+    problematic_files: List[ProblematicFile]
+    calibrated_data: Any
+    dataset: Optional[Dataset] = None
+
+    def __str__(self) -> str:
+        return self.__repr__()
+
+    def __repr__(self) -> str:
+        # set data value
+        if (isinstance(self.data, ndarray) is True):
+            data_str = "array(dims=%s, dtype=%s)" % (self.data.shape, self.data.dtype)
+        elif (isinstance(self.data, list) is True):
+            if (len(self.data) == 0):
+                data_str = "[0 items]"
+            elif (isinstance(self.data[0], Skymap) is True):
+                if (len(self.data) == 1):
+                    data_str = "[1 Skymap object]"
+                else:
+                    data_str = "[%d Skymap objects]" % (len(self.data))
+            elif (isinstance(self.data[0], Calibration) is True):
+                if (len(self.data) == 1):
+                    data_str = "[1 Calibration object]"
+                else:
+                    data_str = "[%d Calibration objects]" % (len(self.data))
+            else:
+                data_str = "[%d items]" % (len(self.data))
+        else:
+            data_str = self.data.__repr__()
+
+        # set timestamp string
+        if (len(self.timestamp) == 0):
+            timestamp_str = "[]"
+        elif (len(self.timestamp) == 1):
+            timestamp_str = "[1 datetime]"
+        else:
+            timestamp_str = "[%d datetimes]" % (len(self.timestamp))
+
+        # set metadata string
+        if (len(self.metadata) == 0):
+            metadata_str = "[]"
+        elif (len(self.metadata) == 1):
+            metadata_str = "[1 dictionary]"
+        else:
+            metadata_str = "[%d dictionaries]" % (len(self.timestamp))
+
+        # set rest of values
+        problematic_files_str = "[]" if len(self.problematic_files) == 0 else "[%d problematic files]" % (len(self.problematic_files))
+        calibrated_data_str = "None" if self.calibrated_data is None else "array(dims=%s, dtype=%s)" % (self.calibrated_data.shape,
+                                                                                                        self.calibrated_data.dtype)
+        dataset_str = "None" if self.dataset is None else self.dataset.__repr__()[0:75] + "...)"
+
+        # return
+        return "Data(data=%s, timestamp=%s, metadata=%s, problematic_files=%s, calibrated_data=%s, dataset=%s)" % (
+            data_str,
+            timestamp_str,
+            metadata_str,
+            problematic_files_str,
+            calibrated_data_str,
+            dataset_str,
+        )
+
+    def pretty_print(self):
+        """
+        A special print output for this class.
+        """
+        # set data value
+        if (isinstance(self.data, ndarray) is True):
+            data_str = "array(dims=%s, dtype=%s)" % (self.data.shape, self.data.dtype)
+        elif (isinstance(self.data, list) is True):
+            if (len(self.data) == 0):
+                data_str = "[0 items]"
+            elif (isinstance(self.data[0], Skymap) is True):
+                if (len(self.data) == 1):
+                    data_str = "[1 Skymap object]"
+                else:
+                    data_str = "[%d Skymap objects]" % (len(self.data))
+            elif (isinstance(self.data[0], Calibration) is True):
+                if (len(self.data) == 1):
+                    data_str = "[1 Calibration object]"
+                else:
+                    data_str = "[%d Calibration objects]" % (len(self.data))
+            else:
+                data_str = "[%d items]" % (len(self.data))
+        else:
+            data_str = self.data.__repr__()
+
+        # set timestamp string
+        if (len(self.timestamp) == 0):
+            timestamp_str = "[]"
+        elif (len(self.timestamp) == 1):
+            timestamp_str = "[1 datetime]"
+        else:
+            timestamp_str = "[%d datetimes]" % (len(self.timestamp))
+
+        # set metadata string
+        if (len(self.metadata) == 0):
+            metadata_str = "[]"
+        elif (len(self.metadata) == 1):
+            metadata_str = "[1 dictionary]"
+        else:
+            metadata_str = "[%d dictionaries]" % (len(self.timestamp))
+
+        # set rest of values
+        problematic_files_str = "[]" if len(self.problematic_files) == 0 else "[%d problematic files]" % (len(self.problematic_files))
+        calibrated_data_str = "None" if self.calibrated_data is None else "array(dims=%s, dtype=%s)" % (self.calibrated_data.shape,
+                                                                                                        self.calibrated_data.dtype)
+        dataset_str = "None" if self.dataset is None else self.dataset.__repr__()[0:75] + "...)"
+
+        # print
+        print("Data:")
+        print("  %-22s: %s" % ("data", data_str))
+        print("  %-22s: %s" % ("timestamp", timestamp_str))
+        print("  %-22s: %s" % ("metadata", metadata_str))
+        print("  %-22s: %s" % ("problematic_files", problematic_files_str))
+        print("  %-22s: %s" % ("calibrated_data", calibrated_data_str))
+        print("  %-22s: %s" % ("dataset", dataset_str))
+
+

Class variables

+
+
var calibrated_data : Any
+
+
+
+
var data : Any
+
+
+
+
var dataset : Optional[pyucalgarysrs.data.classes.Dataset]
+
+
+
+
var metadata : List[Dict]
+
+
+
+
var problematic_files : List[pyucalgarysrs.data.classes.ProblematicFile]
+
+
+
+
var timestamp : List[datetime.datetime]
+
+
+
+
+

Methods

+
+
+def pretty_print(self) +
+
+

A special print output for this class.

+
+ +Expand source code + +
def pretty_print(self):
+    """
+    A special print output for this class.
+    """
+    # set data value
+    if (isinstance(self.data, ndarray) is True):
+        data_str = "array(dims=%s, dtype=%s)" % (self.data.shape, self.data.dtype)
+    elif (isinstance(self.data, list) is True):
+        if (len(self.data) == 0):
+            data_str = "[0 items]"
+        elif (isinstance(self.data[0], Skymap) is True):
+            if (len(self.data) == 1):
+                data_str = "[1 Skymap object]"
+            else:
+                data_str = "[%d Skymap objects]" % (len(self.data))
+        elif (isinstance(self.data[0], Calibration) is True):
+            if (len(self.data) == 1):
+                data_str = "[1 Calibration object]"
+            else:
+                data_str = "[%d Calibration objects]" % (len(self.data))
+        else:
+            data_str = "[%d items]" % (len(self.data))
+    else:
+        data_str = self.data.__repr__()
+
+    # set timestamp string
+    if (len(self.timestamp) == 0):
+        timestamp_str = "[]"
+    elif (len(self.timestamp) == 1):
+        timestamp_str = "[1 datetime]"
+    else:
+        timestamp_str = "[%d datetimes]" % (len(self.timestamp))
+
+    # set metadata string
+    if (len(self.metadata) == 0):
+        metadata_str = "[]"
+    elif (len(self.metadata) == 1):
+        metadata_str = "[1 dictionary]"
+    else:
+        metadata_str = "[%d dictionaries]" % (len(self.timestamp))
+
+    # set rest of values
+    problematic_files_str = "[]" if len(self.problematic_files) == 0 else "[%d problematic files]" % (len(self.problematic_files))
+    calibrated_data_str = "None" if self.calibrated_data is None else "array(dims=%s, dtype=%s)" % (self.calibrated_data.shape,
+                                                                                                    self.calibrated_data.dtype)
+    dataset_str = "None" if self.dataset is None else self.dataset.__repr__()[0:75] + "...)"
+
+    # print
+    print("Data:")
+    print("  %-22s: %s" % ("data", data_str))
+    print("  %-22s: %s" % ("timestamp", timestamp_str))
+    print("  %-22s: %s" % ("metadata", metadata_str))
+    print("  %-22s: %s" % ("problematic_files", problematic_files_str))
+    print("  %-22s: %s" % ("calibrated_data", calibrated_data_str))
+    print("  %-22s: %s" % ("dataset", dataset_str))
+
+
+
+
+
+class Dataset +(name: str, short_description: str, long_description: str, data_tree_url: str, file_listing_supported: bool, file_reading_supported: bool, level: str, doi: Optional[str] = None, doi_details: Optional[str] = None, citation: Optional[str] = None) +
+
+

A dataset available from the UCalgary Space Remote Sensing API, with possibly +support for downloading and/or reading.

+

Attributes

+
+
name : str
+
Dataset name
+
short_description : str
+
A short description about the dataset
+
long_description : str
+
A longer description about the dataset
+
data_tree_url : str
+
The data tree URL prefix. Used for saving data locally with a similar data tree +structure compared to the UCalgary Open Data archive.
+
file_listing_supported : bool
+
Flag indicating if file listing (downloading) is supported for this dataset.
+
file_reading_supported : bool
+
Flag indicating if file reading is supported for this dataset.
+
level : str
+
Dataset level as per L0/L1/L2/etc standards.
+
doi : str
+
Dataset DOI unique identifier.
+
doi_details : str
+
Further details about the DOI.
+
citation : str
+
String to use when citing usage of the dataset.
+
provider : str
+
Data provider.
+
+
+ +Expand source code + +
class Dataset:
+    """
+    A dataset available from the UCalgary Space Remote Sensing API, with possibly
+    support for downloading and/or reading.
+
+    Attributes:
+        name (str): 
+            Dataset name
+        
+        short_description (str): 
+            A short description about the dataset
+        
+        long_description (str): 
+            A longer description about the dataset
+        
+        data_tree_url (str): 
+            The data tree URL prefix. Used for saving data locally with a similar data tree 
+            structure compared to the UCalgary Open Data archive.
+        
+        file_listing_supported (bool): 
+            Flag indicating if file listing (downloading) is supported for this dataset.
+        
+        file_reading_supported (bool): 
+            Flag indicating if file reading is supported for this dataset.
+        
+        level (str): 
+            Dataset level as per L0/L1/L2/etc standards.
+        
+        doi (str): 
+            Dataset DOI unique identifier.
+        
+        doi_details (str): 
+            Further details about the DOI.
+        
+        citation (str): 
+            String to use when citing usage of the dataset.
+        
+        provider (str): 
+            Data provider.
+    """
+
+    def __init__(self,
+                 name: str,
+                 short_description: str,
+                 long_description: str,
+                 data_tree_url: str,
+                 file_listing_supported: bool,
+                 file_reading_supported: bool,
+                 level: str,
+                 doi: Optional[str] = None,
+                 doi_details: Optional[str] = None,
+                 citation: Optional[str] = None):
+        self.name = name
+        self.short_description = short_description
+        self.long_description = long_description
+        self.data_tree_url = data_tree_url
+        self.file_listing_supported = file_listing_supported
+        self.file_reading_supported = file_reading_supported
+        self.level = level
+        self.doi = doi
+        self.doi_details = doi_details
+        self.citation = citation
+        self.provider = "UCalgary"
+
+    def __str__(self) -> str:
+        return self.__repr__()
+
+    def __repr__(self) -> str:
+        return "Dataset(name=%s, short_description='%s', provider='%s', level='%s', doi_details='%s', ...)" % (
+            self.name,
+            self.short_description,
+            self.provider,
+            self.level,
+            self.doi_details,
+        )
+
+    def pretty_print(self):
+        """
+        A special print output for this class.
+        """
+        print("Dataset:")
+        for var_name in dir(self):
+            # exclude methods
+            if (var_name.startswith("__") or var_name == "pretty_print"):
+                continue
+
+            # convert var to string format we want
+            var_value = getattr(self, var_name)
+            print("  %-27s: %s" % (var_name, None if var_value is None else var_value))
+
+

Methods

+
+
+def pretty_print(self) +
+
+

A special print output for this class.

+
+ +Expand source code + +
def pretty_print(self):
+    """
+    A special print output for this class.
+    """
+    print("Dataset:")
+    for var_name in dir(self):
+        # exclude methods
+        if (var_name.startswith("__") or var_name == "pretty_print"):
+            continue
+
+        # convert var to string format we want
+        var_value = getattr(self, var_name)
+        print("  %-27s: %s" % (var_name, None if var_value is None else var_value))
+
+
+
+
+
+class FileDownloadResult +(filenames: List[str], count: int, total_bytes: int, output_root_path: str, dataset: pyucalgarysrs.data.classes.Dataset) +
+
+

Representation of the results from a data download call.

+

Attributes

+
+
filenames : List[str]
+
List of downloaded files, as absolute paths of their location on the local machine.
+
count : int
+
Number of files downloaded
+
total_bytes : int
+
Cumulative amount of bytes saved on the local machine.
+
output_root_path : str
+
The root path of where the data was saved to on the local machine.
+
dataset : Dataset
+
The Dataset object for this data.
+
+
+ +Expand source code + +
@dataclass
+class FileDownloadResult:
+    """
+    Representation of the results from a data download call.
+
+    Attributes:
+        filenames (List[str]): 
+            List of downloaded files, as absolute paths of their location on the local machine.
+        
+        count (int): 
+            Number of files downloaded
+        
+        total_bytes (int): 
+            Cumulative amount of bytes saved on the local machine.
+        
+        output_root_path (str): 
+            The root path of where the data was saved to on the local machine.
+        
+        dataset (Dataset): 
+            The `Dataset` object for this data.
+    """
+    filenames: List[str]
+    count: int
+    total_bytes: int
+    output_root_path: str
+    dataset: Dataset
+
+

Class variables

+
+
var count : int
+
+
+
+
var dataset : pyucalgarysrs.data.classes.Dataset
+
+
+
+
var filenames : List[str]
+
+
+
+
var output_root_path : str
+
+
+
+
var total_bytes : int
+
+
+
+
+
+
+class FileListingResponse +(urls: List[str], path_prefix: str, count: int, dataset: pyucalgarysrs.data.classes.Dataset, total_bytes: Optional[int] = None) +
+
+

Representation of the file listing response from the UCalgary Space Remote Sensing API.

+

Attributes

+
+
urls : List[str]
+
A list of URLs for available data files.
+
path_prefix : str
+
The URL prefix, which is sed for saving data locally with a similar data tree +structure compared to the UCalgary Open Data archive.
+
count : int
+
The number of URLs available.
+
dataset : Dataset
+
The Dataset object for this data.
+
total_bytes : int
+
The cumulative amount of bytes for the available URLs.
+
+
+ +Expand source code + +
@dataclass
+class FileListingResponse:
+    """
+    Representation of the file listing response from the UCalgary Space Remote Sensing API.
+
+    Attributes:
+        urls (List[str]): 
+            A list of URLs for available data files.
+        
+        path_prefix (str): 
+            The URL prefix, which is sed for saving data locally with a similar data tree 
+            structure compared to the UCalgary Open Data archive.
+        
+        count (int): 
+            The number of URLs available.
+        
+        dataset (Dataset): 
+            The `Dataset` object for this data.
+        
+        total_bytes (int): 
+            The cumulative amount of bytes for the available URLs.
+    """
+    urls: List[str]
+    path_prefix: str
+    count: int
+    dataset: Dataset
+    total_bytes: Optional[int] = None
+
+

Class variables

+
+
var count : int
+
+
+
+
var dataset : pyucalgarysrs.data.classes.Dataset
+
+
+
+
var path_prefix : str
+
+
+
+
var total_bytes : Optional[int]
+
+
+
+
var urls : List[str]
+
+
+
+
+
+
+class Observatory +(uid: str, full_name: str, geodetic_latitude: float, geodetic_longitude: float) +
+
+

Representation for an observatory.

+

Attributes

+
+
uid : str
+
4-letter unique identifier (traditionally referred to as the site UID)
+
full_name : str
+
full location string for the observatory
+
geodetic_latitude : float
+
geodetic latitude for the observatory, in decimal format (-90 to 90)
+
geodetic_longitude : float
+
geodetic longitude for the observatory, in decimal format (-180 to 180)
+
provider : str
+
Data provider.
+
+
+ +Expand source code + +
class Observatory:
+    """
+    Representation for an observatory.
+
+    Attributes:
+        uid (str): 
+            4-letter unique identifier (traditionally referred to as the site UID)
+
+        full_name (str): 
+            full location string for the observatory
+        
+        geodetic_latitude (float): 
+            geodetic latitude for the observatory, in decimal format (-90 to 90)
+        
+        geodetic_longitude (float): 
+            geodetic longitude for the observatory, in decimal format (-180 to 180)
+
+        provider (str): 
+            Data provider.
+    """
+
+    def __init__(self, uid: str, full_name: str, geodetic_latitude: float, geodetic_longitude: float):
+        self.uid = uid
+        self.full_name = full_name
+        self.geodetic_latitude = geodetic_latitude
+        self.geodetic_longitude = geodetic_longitude
+        self.provider = "UCalgary"
+
+    def __str__(self) -> str:
+        return self.__repr__()
+
+    def __repr__(self) -> str:
+        return "Observatory(uid=%s, full_name='%s', geodetic_latitude=%s, geodetic_longitude=%s, provider='%s')" % (
+            self.uid,
+            self.full_name,
+            self.geodetic_latitude,
+            self.geodetic_longitude,
+            self.provider,
+        )
+
+    def pretty_print(self):
+        """
+        A special print output for this class.
+        """
+        print("Observatory:")
+        for var_name in dir(self):
+            # exclude methods
+            if (var_name.startswith("__") or var_name == "pretty_print"):
+                continue
+
+            # convert var to string format we want
+            var_value = getattr(self, var_name)
+            print("  %-22s: %s" % (var_name, None if var_value is None else var_value))
+
+

Methods

+
+
+def pretty_print(self) +
+
+

A special print output for this class.

+
+ +Expand source code + +
def pretty_print(self):
+    """
+    A special print output for this class.
+    """
+    print("Observatory:")
+    for var_name in dir(self):
+        # exclude methods
+        if (var_name.startswith("__") or var_name == "pretty_print"):
+            continue
+
+        # convert var to string format we want
+        var_value = getattr(self, var_name)
+        print("  %-22s: %s" % (var_name, None if var_value is None else var_value))
+
+
+
+
+
+class Skymap +(filename: str, project_uid: str, site_uid: str, imager_uid: str, site_map_latitude: float, site_map_longitude: float, site_map_altitude: float, full_elevation: numpy.ndarray, full_azimuth: numpy.ndarray, full_map_altitude: numpy.ndarray, full_map_latitude: numpy.ndarray, full_map_longitude: numpy.ndarray, generation_info: pyucalgarysrs.data.classes.SkymapGenerationInfo, version: str) +
+
+

Representation for a skymap file.

+

Attributes

+
+
filename : str
+
Filename for the skymap file, as an absolute path of its location on the local machine.
+
project_uid : str
+
Project unique identifier
+
site_uid : str
+
Site unique identifier
+
imager_uid : str
+
Imager/device unique identifier
+
site_map_latitude : float
+
Geodetic latitude of instrument
+
site_map_longitude : float
+
Geodetic longitude of instrument
+
site_map_altitude : float
+
Altitude of the instrument (in meters)
+
full_elevation : ndarray
+
Elevation angle from horizon, for each image pixel (in degrees)
+
full_azimuth : ndarray
+
Local azimuth angle from 0 degrees north, positive moving east (in degrees)
+
full_map_altitude : ndarray
+
Altitudes that image coordinates are mapped to (in kilometers)
+
full_map_latitude : ndarray
+
Geodetic latitudes of pixel corners, mapped to various altitudes (specified by full_map_altitude)
+
full_map_longitude : ndarray
+
Geodetic longitudes of pixel corners, mapped to various altitudes (specified by full_map_altitude)
+
generation_info : SkymapGenerationInfo
+
Metadata describing details about this skymap's generation process
+
version : str
+
Version of the skymap
+
dataset : Dataset
+
The Dataset object for this data.
+
+
+ +Expand source code + +
@dataclass
+class Skymap:
+    """
+    Representation for a skymap file.
+
+    Attributes:
+        filename (str): 
+            Filename for the skymap file, as an absolute path of its location on the local machine.
+        
+        project_uid (str): 
+            Project unique identifier
+        
+        site_uid (str): 
+            Site unique identifier
+        
+        imager_uid (str): 
+            Imager/device unique identifier
+        
+        site_map_latitude (float): 
+            Geodetic latitude of instrument
+        
+        site_map_longitude (float): 
+            Geodetic longitude of instrument
+        
+        site_map_altitude (float): 
+            Altitude of the instrument (in meters)
+        
+        full_elevation (ndarray): 
+            Elevation angle from horizon, for each image pixel (in degrees)
+        
+        full_azimuth (ndarray): 
+            Local azimuth angle from 0 degrees north, positive moving east (in degrees)
+        
+        full_map_altitude (ndarray): 
+            Altitudes that image coordinates are mapped to (in kilometers)
+        
+        full_map_latitude (ndarray): 
+            Geodetic latitudes of pixel corners, mapped to various altitudes (specified by `full_map_altitude`)
+        
+        full_map_longitude (ndarray): 
+            Geodetic longitudes of pixel corners, mapped to various altitudes (specified by `full_map_altitude`)
+        
+        generation_info (SkymapGenerationInfo): 
+            Metadata describing details about this skymap's generation process
+        
+        version (str): 
+            Version of the skymap
+        
+        dataset (Dataset): 
+            The `Dataset` object for this data.
+    """
+    filename: str
+    project_uid: str
+    site_uid: str
+    imager_uid: str
+    site_map_latitude: float
+    site_map_longitude: float
+    site_map_altitude: float
+    full_elevation: ndarray
+    full_azimuth: ndarray
+    full_map_altitude: ndarray
+    full_map_latitude: ndarray
+    full_map_longitude: ndarray
+    generation_info: SkymapGenerationInfo
+    version: str
+
+    def __str__(self) -> str:
+        return self.__repr__()
+
+    def __repr__(self) -> str:
+        return "Skymap(project_uid=%s, site_uid=%s, imager_uid=%s, site_map_latitude=%f, site_map_longitude=%f, ...)" % (
+            self.project_uid,
+            self.site_uid,
+            self.imager_uid,
+            self.site_map_latitude,
+            self.site_map_longitude,
+        )
+
+    def pretty_print(self):
+        """
+        A special print output for this class.
+        """
+        print("Skymap:")
+        for var_name in dir(self):
+            # exclude methods
+            if (var_name.startswith("__") or var_name == "pretty_print"):
+                continue
+
+            # convert var to string format we want
+            var_value = getattr(self, var_name)
+            var_str = "None"
+            if (var_name == "generation_info"):
+                var_str = "SkymapGenerationInfo(...)"
+            elif (var_value is not None):
+                if (isinstance(var_value, ndarray)):
+                    var_str = "array(dims=%s, dtype=%s)" % (var_value.shape, var_value.dtype)
+                else:
+                    var_str = str(var_value)
+
+            # print string for this var
+            print("  %-23s: %s" % (var_name, var_str))
+
+    def get_precalculated_altitudes(self):
+        """
+        Get the altitudes that have been precalculated in this skymap. Units are kilometers.
+        """
+        alts_km = [float(x / 1000.) for x in self.full_map_altitude]
+        return alts_km
+
+

Class variables

+
+
var filename : str
+
+
+
+
var full_azimuth : numpy.ndarray
+
+
+
+
var full_elevation : numpy.ndarray
+
+
+
+
var full_map_altitude : numpy.ndarray
+
+
+
+
var full_map_latitude : numpy.ndarray
+
+
+
+
var full_map_longitude : numpy.ndarray
+
+
+
+
var generation_info : pyucalgarysrs.data.classes.SkymapGenerationInfo
+
+
+
+
var imager_uid : str
+
+
+
+
var project_uid : str
+
+
+
+
var site_map_altitude : float
+
+
+
+
var site_map_latitude : float
+
+
+
+
var site_map_longitude : float
+
+
+
+
var site_uid : str
+
+
+
+
var version : str
+
+
+
+
+

Methods

+
+
+def get_precalculated_altitudes(self) +
+
+

Get the altitudes that have been precalculated in this skymap. Units are kilometers.

+
+ +Expand source code + +
def get_precalculated_altitudes(self):
+    """
+    Get the altitudes that have been precalculated in this skymap. Units are kilometers.
+    """
+    alts_km = [float(x / 1000.) for x in self.full_map_altitude]
+    return alts_km
+
+
+
+def pretty_print(self) +
+
+

A special print output for this class.

+
+ +Expand source code + +
def pretty_print(self):
+    """
+    A special print output for this class.
+    """
+    print("Skymap:")
+    for var_name in dir(self):
+        # exclude methods
+        if (var_name.startswith("__") or var_name == "pretty_print"):
+            continue
+
+        # convert var to string format we want
+        var_value = getattr(self, var_name)
+        var_str = "None"
+        if (var_name == "generation_info"):
+            var_str = "SkymapGenerationInfo(...)"
+        elif (var_value is not None):
+            if (isinstance(var_value, ndarray)):
+                var_str = "array(dims=%s, dtype=%s)" % (var_value.shape, var_value.dtype)
+            else:
+                var_str = str(var_value)
+
+        # print string for this var
+        print("  %-23s: %s" % (var_name, var_str))
+
+
+
+
+
+class UCalgaryManager +(aurorax_obj) +
+
+

The UCalgaryManager object is initialized within every PyAuroraX object. It acts as a way to access +the submodules and carry over configuration information in the super class.

+
+ +Expand source code + +
class UCalgaryManager:
+    """
+    The UCalgaryManager object is initialized within every PyAuroraX object. It acts as a way to access 
+    the submodules and carry over configuration information in the super class.
+    """
+
+    __DEFAULT_DOWNLOAD_N_PARALLEL = 5
+
+    def __init__(self, aurorax_obj):
+        self.__aurorax_obj: PyAuroraX = aurorax_obj
+
+        # initialize sub-modules
+        self.__readers = ReadManager(self.__aurorax_obj)
+
+    @property
+    def readers(self):
+        """
+        Access to the `read` submodule from within a PyAuroraX object.
+        """
+        return self.__readers
+
+    def list_datasets(self, name: Optional[str] = None, timeout: Optional[int] = None) -> List[Dataset]:
+        """
+        List available datasets
+
+        Args:
+            name (str): 
+                Supply a name used for filtering. If that name is found in the available dataset 
+                names received from the API, it will be included in the results. This parameter is
+                optional.
+            
+            timeout (int): 
+                Represents how many seconds to wait for the API to send data before giving up. The 
+                default is 10 seconds, or the `api_timeout` value in the super class' `pyaurorax.PyAuroraX`
+                object. This parameter is optional.
+            
+        Returns:
+            A list of [`Dataset`](https://docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html#pyucalgarysrs.data.classes.Dataset)
+            objects.
+        
+        Raises:
+            pyaurorax.exceptions.AuroraXAPIError: An API error was encountered.
+        """
+        try:
+            return self.__aurorax_obj.srs_obj.data.list_datasets(name=name, timeout=timeout)
+        except SRSAPIError as e:
+            raise AuroraXAPIError(e) from e
+
+    def list_observatories(self,
+                           instrument_array: Literal["themis_asi", "rego", "trex_rgb", "trex_nir", "trex_blue"],
+                           uid: Optional[str] = None,
+                           timeout: Optional[int] = None) -> List[Observatory]:
+        """
+        List information about observatories
+
+        Args:
+            instrument_array (str): 
+                The instrument array to list observatories for. Valid values are: themis_asi, rego, 
+                trex_rgb, trex_nir, and trex_blue.
+
+            uid (str): 
+                Supply a observatory unique identifier used for filtering (usually 4-letter site code). If that UID 
+                is found in the available observatories received from the API, it will be included in the results. This 
+                parameter is optional.
+            
+            timeout (int): 
+                Represents how many seconds to wait for the API to send data before giving up. The 
+                default is 10 seconds, or the `api_timeout` value in the super class' `pyaurorax.PyAuroraX`
+                object. This parameter is optional.
+            
+        Returns:
+            A list of [`Observatory`](https://docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html#pyucalgarysrs.data.classes.Observatory)
+            objects.
+        
+        Raises:
+            pyaurorax.exceptions.AuroraXAPIError: An API error was encountered.
+        """
+        try:
+            return self.__aurorax_obj.srs_obj.data.list_observatories(instrument_array, uid=uid, timeout=timeout)
+        except SRSAPIError as e:
+            raise AuroraXAPIError(e) from e
+
+    def list_supported_read_datasets(self) -> List[str]:
+        """
+        List the datasets which have file reading capabilities supported.
+
+        Returns:
+            A list of the dataset names with file reading support.
+        """
+        return self.__aurorax_obj.srs_obj.data.list_supported_read_datasets()
+
+    def is_read_supported(self, dataset_name: str) -> bool:
+        """
+        Check if a given dataset has file reading support. 
+        
+        Not all datasets available in the UCalgary Space Remote Sensing Open Data Platform 
+        have special readfile routines in this library. This is because some datasets are 
+        in basic formats such as JPG or PNG, so unique functions aren't necessary. We leave 
+        it up to the user to open these basic files in whichever way they prefer. Use the 
+        `list_supported_read_datasets()` function to see all datasets that have special
+        file reading functionality in this library.
+
+        Args:
+            dataset_name (str): 
+                The dataset name to check if file reading is supported. This parameter 
+                is required.
+        
+        Returns:
+            Boolean indicating if file reading is supported.
+        """
+        return self.__aurorax_obj.srs_obj.data.is_read_supported(dataset_name)
+
+    def download(self,
+                 dataset_name: str,
+                 start: datetime.datetime,
+                 end: datetime.datetime,
+                 site_uid: Optional[str] = None,
+                 device_uid: Optional[str] = None,
+                 n_parallel: int = __DEFAULT_DOWNLOAD_N_PARALLEL,
+                 overwrite: bool = False,
+                 progress_bar_disable: bool = False,
+                 progress_bar_ncols: Optional[int] = None,
+                 progress_bar_ascii: Optional[str] = None,
+                 progress_bar_desc: Optional[str] = None,
+                 timeout: Optional[int] = None) -> FileDownloadResult:
+        """
+        Download data from the UCalgary Space Remote Sensing Open Data Platform.
+
+        The parameters `dataset_name`, `start`, and `end` are required. All other parameters
+        are optional.
+
+        Note that usage of the site and device UID filters applies differently to some datasets.
+        For example, both fields can be used for most raw and keogram data, but only site UID can
+        be used for skymap datasets, and only device UID can be used for calibration datasets. If 
+        fields are specified during a call in which site or device UID is not used, a UserWarning
+        is display to provide the user with feedback about this detail.
+
+        Args:
+            dataset_name (str): 
+                Name of the dataset to download data for. Use the `list_datasets()` function
+                to get the possible values for this parameter. One example is "THEMIS_ASI_RAW". 
+                Note that dataset names are case sensitive. This parameter is required.
+
+            start (datetime.datetime): 
+                Start timestamp to use (inclusive), expected to be in UTC. Any timezone data 
+                will be ignored. This parameter is required.
+
+            end (datetime.datetime): 
+                End timestamp to use (inclusive), expected to be in UTC. Any timezone data 
+                will be ignored. This parameter is required.
+
+            site_uid (str): 
+                The site UID to filter for. If specified, data will be downloaded for only the 
+                site matching the given value. If excluded, data for all available sites will 
+                be downloaded. An example value could be 'atha', meaning all data from the 
+                Athabasca observatory will be downloaded for the given dataset name, start, and 
+                end times. This parameter is optional.
+
+            device_uid (str): 
+                The device UID to filter for. If specified, data will be downloaded for only the
+                device matching the given value. If excluded, data for all available devices will
+                be downloaded. An example value could be 'themis02', meaning all data matching that
+                device will be downloaded for the given dataset name, start, and end times. This
+                parameter is optional.
+
+            n_parallel (int): 
+                Number of data files to download in parallel. Default value is 5. Adjust as needed 
+                for your internet connection. This parameter is optional.
+
+            overwrite (bool): 
+                By default, data will not be re-downloaded if it already exists locally. Use 
+                the `overwrite` parameter to force re-downloading. Default is `False`. This 
+                parameter is optional.
+
+            progress_bar_disable (bool): 
+                Disable the progress bar. Default is `False`. This parameter is optional.
+
+            progress_bar_ncols (int): 
+                Number of columns for the progress bar (straight passthrough of the `ncols` 
+                parameter in a tqdm progress bar). This parameter is optional. See Notes section
+                below for further information.
+            
+            progress_bar_ascii (str): 
+                ASCII value to use when constructing the visual aspect of the progress bar (straight 
+                passthrough of the `ascii` parameter in a tqdm progress bar). This parameter is 
+                optional. See Notes section below for further details.
+
+            timeout (int): 
+                Represents how many seconds to wait for the API to send data before giving up. The 
+                default is 10 seconds, or the `api_timeout` value in the super class' `pyaurorax.PyAuroraX`
+                object. This parameter is optional.
+
+        Returns:
+            A [`FileDownloadResult`](https://docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html#pyucalgarysrs.data.classes.FileDownloadResult) 
+            object containing details about what data files were downloaded.
+
+        Raises:
+            pyaurorax.exceptions.AuroraXDownloadError: an error was encountered while downloading a 
+                specific file
+            pyaurorax.exceptions.AuroraXAPIError: an API error was encountered
+
+        Notes:
+        --------
+        The `progress_bar_*` parameters can be used to enable/disable/adjust the progress bar. 
+        Excluding the `progress_bar_disable` parameter, all others are straight pass-throughs 
+        to the tqdm progress bar function. The `progress_bar_ncols` parameter allows for 
+        adjusting the width. The `progress_bar_ascii` parameter allows for adjusting the appearance 
+        of the progress bar. And the `progress_bar_desc` parameter allows for adjusting the 
+        description at the beginning of the progress bar. Further details can be found on the
+        [tqdm documentation](https://tqdm.github.io/docs/tqdm/#tqdm-objects).
+
+        Data downloading will use the `download_data_root_path` variable within the super class'
+        object ([`PyAuroraX`](../../index.html#pyaurorax.PyAuroraX)) to determine where to save data to. If 
+        you'd like to change this path to somewhere else you can change that variable before your
+        download() call, like so:
+
+        ```python
+        import pyaurorax
+        aurorax = pyaurorax.PyAuroraX()
+        aurorax.data_download_root_path = "some_new_path"
+        aurorax.data.download(dataset_name, start, end)
+        ```
+        """
+        try:
+            return self.__aurorax_obj.srs_obj.data.download(
+                dataset_name,
+                start,
+                end,
+                site_uid=site_uid,
+                device_uid=device_uid,
+                n_parallel=n_parallel,
+                overwrite=overwrite,
+                progress_bar_disable=progress_bar_disable,
+                progress_bar_ncols=progress_bar_ncols,
+                progress_bar_ascii=progress_bar_ascii,
+                progress_bar_desc=progress_bar_desc,
+                timeout=timeout,
+            )
+        except SRSDownloadError as e:
+            raise AuroraXDownloadError(e) from e
+        except SRSAPIError as e:
+            raise AuroraXAPIError(e) from e
+
+    def download_using_urls(self,
+                            file_listing_response: FileListingResponse,
+                            n_parallel: int = __DEFAULT_DOWNLOAD_N_PARALLEL,
+                            overwrite: bool = False,
+                            progress_bar_disable: bool = False,
+                            progress_bar_ncols: Optional[int] = None,
+                            progress_bar_ascii: Optional[str] = None,
+                            progress_bar_desc: Optional[str] = None,
+                            timeout: Optional[int] = None) -> FileDownloadResult:
+        """
+        Download data from the UCalgary Space Remote Sensing Open Data Platform using 
+        a FileListingResponse object. This would be used in cases where more customization 
+        is needed than the generic `download()` function. 
+        
+        One example of using this function would start by using `get_urls()` to retrieve the
+        list of URLs available for download, then further process this list to fewer files
+        based on some other requirement (ie. time down-sampling such as one file per hour). 
+        Lastly using this function to download the new custom set URLs.
+
+        Args:
+            file_listing_response (FileListingResponse): 
+                A [`FileListingResponse`](https://docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html#pyucalgarysrs.data.classes.FileListingResponse) 
+                object returned from a `get_urls()` call, which contains a list of URLs to download 
+                for a specific dataset. This parameter is required.
+
+            n_parallel (int): 
+                Number of data files to download in parallel. Default value is 5. Adjust as needed 
+                for your internet connection. This parameter is optional.
+
+            overwrite (bool): 
+                By default, data will not be re-downloaded if it already exists locally. Use 
+                the `overwrite` parameter to force re-downloading. Default is `False`. This 
+                parameter is optional.
+
+            progress_bar_disable (bool): 
+                Disable the progress bar. Default is `False`. This parameter is optional.
+
+            progress_bar_ncols (int): 
+                Number of columns for the progress bar (straight passthrough of the `ncols` 
+                parameter in a tqdm progress bar). This parameter is optional. See Notes section
+                below for further information.
+            
+            progress_bar_ascii (str): 
+                ASCII value to use when constructing the visual aspect of the progress bar (straight 
+                passthrough of the `ascii` parameter in a tqdm progress bar). This parameter is 
+                optional. See Notes section below for further details.
+
+            timeout (int): 
+                Represents how many seconds to wait for the API to send data before giving up. The 
+                default is 10 seconds, or the `api_timeout` value in the super class' `pyaurorax.PyAuroraX`
+                object. This parameter is optional.
+
+        Returns:
+            A [`FileDownloadResult`](https://docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html#pyucalgarysrs.data.classes.FileDownloadResult) 
+            object containing details about what data files were downloaded.
+
+        Raises:
+            pyaurorax.exceptions.AuroraXDownloadError: an error was encountered while downloading a 
+                specific file
+            pyaurorax.exceptions.AuroraXAPIError: an API error was encountered
+
+        Notes:
+        --------
+        The `progress_bar_*` parameters can be used to enable/disable/adjust the progress bar. 
+        Excluding the `progress_bar_disable` parameter, all others are straight pass-throughs 
+        to the tqdm progress bar function. The `progress_bar_ncols` parameter allows for 
+        adjusting the width. The `progress_bar_ascii` parameter allows for adjusting the appearance 
+        of the progress bar. And the `progress_bar_desc` parameter allows for adjusting the 
+        description at the beginning of the progress bar. Further details can be found on the
+        [tqdm documentation](https://tqdm.github.io/docs/tqdm/#tqdm-objects).
+
+        Data downloading will use the `download_data_root_path` variable within the super class'
+        object ([`PyAuroraX`](../../index.html#pyaurorax.PyAuroraX)) to determine where to save data to. If 
+        you'd like to change this path to somewhere else you can change that variable before your
+        download() call, like so:
+
+        ```python
+        import pyaurorax
+        aurorax = pyaurorax.PyAuroraX()
+        aurorax.data_download_root_path = "some_new_path"
+        aurorax.data.download(dataset_name, start, end)
+        ```
+        """
+        try:
+            return self.__aurorax_obj.srs_obj.data.download_using_urls(
+                file_listing_response,
+                n_parallel=n_parallel,
+                overwrite=overwrite,
+                progress_bar_disable=progress_bar_disable,
+                progress_bar_ncols=progress_bar_ncols,
+                progress_bar_ascii=progress_bar_ascii,
+                progress_bar_desc=progress_bar_desc,
+                timeout=timeout,
+            )
+        except SRSDownloadError as e:
+            raise AuroraXDownloadError(e) from e
+        except SRSAPIError as e:
+            raise AuroraXAPIError(e) from e
+
+    def get_urls(self,
+                 dataset_name: str,
+                 start: datetime.datetime,
+                 end: datetime.datetime,
+                 site_uid: Optional[str] = None,
+                 device_uid: Optional[str] = None,
+                 timeout: Optional[int] = None) -> FileListingResponse:
+        """
+        Get URLs of data files
+
+        The parameters `dataset_name`, `start`, and `end` are required. All other parameters
+        are optional.
+
+        Note that usage of the site and device UID filters applies differently to some datasets.
+        For example, both fields can be used for most raw and keogram data, but only site UID can
+        be used for skymap datasets, and only device UID can be used for calibration datasets. If 
+        fields are specified during a call in which site or device UID is not used, a UserWarning
+        is display to provide the user with feedback about this detail.
+
+        Args:
+            dataset_name (str): 
+                Name of the dataset to download data for. Use the `list_datasets()` function
+                to get the possible values for this parameter. One example is "THEMIS_ASI_RAW". 
+                Note that dataset names are case sensitive. This parameter is required.
+
+            start (datetime.datetime): 
+                Start timestamp to use (inclusive), expected to be in UTC. Any timezone data 
+                will be ignored. This parameter is required.
+
+            end (datetime.datetime): 
+                End timestamp to use (inclusive), expected to be in UTC. Any timezone data 
+                will be ignored. This parameter is required.
+
+            site_uid (str): 
+                The site UID to filter for. If specified, data will be downloaded for only the 
+                site matching the given value. If excluded, data for all available sites will 
+                be downloaded. An example value could be 'atha', meaning all data from the 
+                Athabasca observatory will be downloaded for the given dataset name, start, and 
+                end times. This parameter is optional.
+
+            device_uid (str): 
+                The device UID to filter for. If specified, data will be downloaded for only the
+                device matching the given value. If excluded, data for all available devices will
+                be downloaded. An example value could be 'themis02', meaning all data matching that
+                device will be downloaded for the given dataset name, start, and end times. This
+                parameter is optional.
+
+            timeout (int): 
+                Represents how many seconds to wait for the API to send data before giving up. The 
+                default is 10 seconds, or the `api_timeout` value in the super class' `pyaurorax.PyAuroraX`
+                object. This parameter is optional.
+    
+        Returns:
+            A [`FileListingResponse`](https://docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html#pyucalgarysrs.data.classes.FileListingResponse)
+            object containing a list of the available URLs, among other values.
+
+        Raises:
+            pyaurorax.exceptions.AuroraXAPIError: an API error was encountered
+        """
+        try:
+            return self.__aurorax_obj.srs_obj.data.get_urls(
+                dataset_name,
+                start,
+                end,
+                site_uid=site_uid,
+                device_uid=device_uid,
+                timeout=timeout,
+            )
+        except SRSAPIError as e:
+            raise AuroraXAPIError(e) from e
+
+    def read(self,
+             dataset: Dataset,
+             file_list: Union[List[str], List[Path], str, Path],
+             n_parallel: int = 1,
+             first_record: bool = False,
+             no_metadata: bool = False,
+             quiet: bool = False) -> Data:
+        """
+        Read in data files for a given dataset. Note that only one type of dataset's data
+        should be read in using a single call.
+
+        Args:
+            dataset (Dataset): 
+                The dataset object for which the files are associated with. This parameter is
+                required.
+            
+            file_list (List[str], List[Path], str, Path): 
+                The files to read in. Absolute paths are recommended, but not technically
+                necessary. This can be a single string for a file, or a list of strings to read
+                in multiple files. This parameter is required.
+
+            n_parallel (int): 
+                Number of data files to read in parallel using multiprocessing. Default value 
+                is 1. Adjust according to your computer's available resources. This parameter 
+                is optional.
+            
+            first_record (bool): 
+                Only read in the first record in each file. This is the same as the first_frame
+                parameter in the themis-imager-readfile and trex-imager-readfile libraries, and
+                is a read optimization if you only need one image per minute, as opposed to the
+                full temporal resolution of data (e.g., 3sec cadence). This parameter is optional.
+            
+            no_metadata (bool): 
+                Skip reading of metadata. This is a minor optimization if the metadata is not needed.
+                Default is `False`. This parameter is optional.
+            
+            quiet (bool): 
+                Do not print out errors while reading data files, if any are encountered. Any files
+                that encounter errors will be, as usual, accessible via the `problematic_files` 
+                attribute of the returned `Data` object. This parameter is optional.
+        
+        Returns:
+            A [`Data`](https://docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html#pyucalgarysrs.data.classes.Data) 
+            object containing the data read in, among other values.
+        
+        Raises:
+            pyaurorax.exceptions.AuroraXUnsupportedReadError: an unsupported dataset was used when
+                trying to read files.
+            pyaurorax.exceptions.AuroraXError: a generic read error was encountered
+
+        Notes:
+        ---------
+        For users who are familiar with the themis-imager-readfile and trex-imager-readfile
+        libraries, the read function provides a near-identical usage. Further improvements have 
+        been integrated, and those libraries are anticipated to be deprecated at some point in the
+        future.
+        """
+        # NOTE: we do not wrap the exceptions here, instead we pass the call along
+        # to the ReadManager object since the method and exception catching is
+        # implemented there. No need to duplicate the exception handling logic.
+        return self.__readers.read(
+            dataset,
+            file_list,
+            n_parallel=n_parallel,
+            first_record=first_record,
+            no_metadata=no_metadata,
+            quiet=quiet,
+        )
+
+    def download_best_skymap(
+        self,
+        dataset_name: str,
+        site_uid: str,
+        timestamp: datetime.datetime,
+        timeout: Optional[int] = None,
+        overwrite: bool = False,
+    ) -> FileDownloadResult:
+        """
+        Download the skymap file that best matches the parameters supplied.
+
+        Args:
+            dataset_name (str): 
+                Name of the dataset to download data for. Use the `list_datasets()` function
+                to get the possible values for this parameter. One example is "THEMIS_ASI_SKYMAP_IDLSAV". 
+                Note that dataset names are case sensitive. This parameter is required.
+
+            site_uid (str): 
+                The site UID to evaluate.
+
+            timestamp (datetime.datetime): 
+                The timestamp to use for deciding the best skymap, expected to be in UTC. Any timezone 
+                data will be ignored. This parameter is required.
+
+            timeout (int): 
+                Represents how many seconds to wait for the API to send data before giving up. The 
+                default is 10 seconds, or the `api_timeout` value in the super class' `pyaurorax.PyAuroraX`
+                object. This parameter is optional.
+
+        Returns:
+            A [`FileDownloadResult`](https://docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html#pyucalgarysrs.data.classes.FileDownloadResult) 
+            object containing details about what data files were downloaded.
+
+        Raises:
+            ValueError: issue with supplied timestamp
+            pyaurorax.exceptions.AuroraXAPIError: an API error was encountered        
+        """
+        # get list of all skymap urls for the dataset and site
+        start_dt = datetime.datetime(2000, 1, 1)
+        end_dt = datetime.datetime.now() + datetime.timedelta(days=5)
+        file_listing_obj = self.get_urls(dataset_name, start_dt, end_dt, site_uid=site_uid, timeout=timeout)
+
+        # filter down and find the best skymap for the timestamp supplied
+        best_skymap_filename = None
+        for url in file_listing_obj.urls:
+            # extract start date for this skymap
+            url_short = url.replace(file_listing_obj.path_prefix + "/", "")
+
+            # parse filename into several values
+            filename_split = os.path.basename(url_short).split('_')
+            filename_times_split = filename_split[3].split('-')
+            valid_interval_start_dt = datetime.datetime.strptime(filename_times_split[0], "%Y%m%d")
+
+            # check start time
+            if (timestamp >= valid_interval_start_dt):
+                # valid
+                #
+                # NOTE: this works because of the order that the list is in already
+                best_skymap_filename = url
+
+        # check if we found a skymap
+        if (best_skymap_filename is None):
+            raise ValueError("Unable to determine a skymap recommendation")
+
+        # set the filename
+        file_listing_obj.urls = [best_skymap_filename]
+        download_obj = self.download_using_urls(
+            file_listing_obj,
+            progress_bar_disable=True,
+            overwrite=overwrite,
+            timeout=timeout,
+        )
+
+        # return
+        return download_obj
+
+    def download_best_flatfield_calibration(
+        self,
+        dataset_name: str,
+        device_uid: str,
+        timestamp: datetime.datetime,
+        timeout: Optional[int] = None,
+        overwrite: bool = False,
+    ) -> FileDownloadResult:
+        """
+        Download the flatfield calibration file that best matches the parameters supplied.
+
+        Args:
+            dataset_name (str): 
+                Name of the dataset to download data for. Use the `list_datasets()` function
+                to get the possible values for this parameter. One example is "THEMIS_ASI_SKYMAP_IDLSAV". 
+                Note that dataset names are case sensitive. This parameter is required.
+
+            device_uid (str): 
+                The device UID to evaluate.
+
+            timestamp (datetime.datetime): 
+                The timestamp to use for deciding the best skymap, expected to be in UTC. Any timezone 
+                data will be ignored. This parameter is required.
+
+            timeout (int): 
+                Represents how many seconds to wait for the API to send data before giving up. The 
+                default is 10 seconds, or the `api_timeout` value in the super class' `pyaurorax.PyAuroraX`
+                object. This parameter is optional.
+
+        Returns:
+            A [`FileDownloadResult`](https://docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html#pyucalgarysrs.data.classes.FileDownloadResult) 
+            object containing details about what data files were downloaded.
+
+        Raises:
+            ValueError: issue with supplied timestamp
+            pyaurorax.exceptions.AuroraXAPIError: an API error was encountered        
+        """
+        # get list of all flatfield urls for the dataset and device
+        start_dt = datetime.datetime(2000, 1, 1)
+        end_dt = datetime.datetime.now() + datetime.timedelta(days=5)
+        file_listing_obj = self.get_urls(dataset_name, start_dt, end_dt, device_uid=device_uid, timeout=timeout)
+
+        # filter down and find the best skymap for the timestamp supplied
+        best_cal_filename = None
+        for url in file_listing_obj.urls:
+            # extract start date for this skymap
+            url_short = url.replace(file_listing_obj.path_prefix + "/", "")
+
+            # parse filename into several values
+            filename_split = os.path.basename(url_short).split('_')
+            filename_times_split = filename_split[3].split('-')
+            valid_interval_start_dt = datetime.datetime.strptime(filename_times_split[0], "%Y%m%d")
+
+            # check start time
+            if (timestamp >= valid_interval_start_dt):
+                # valid
+                #
+                # NOTE: this works because of the order that the list is in already
+                best_cal_filename = url
+
+        # check if we found a skymap
+        if (best_cal_filename is None):
+            raise ValueError("Unable to determine a flatfield calibration recommendation")
+
+        # set the filename
+        file_listing_obj.urls = [best_cal_filename]
+        download_obj = self.download_using_urls(
+            file_listing_obj,
+            progress_bar_disable=True,
+            overwrite=overwrite,
+            timeout=timeout,
+        )
+
+        # return
+        return download_obj
+
+    def download_best_rayleighs_calibration(
+        self,
+        dataset_name: str,
+        device_uid: str,
+        timestamp: datetime.datetime,
+        timeout: Optional[int] = None,
+        overwrite: bool = False,
+    ) -> FileDownloadResult:
+        """
+        Download the Rayleighs calibration file that best matches the parameters supplied.
+
+        Args:
+            dataset_name (str): 
+                Name of the dataset to download data for. Use the `list_datasets()` function
+                to get the possible values for this parameter. One example is "REGO_CALIBRATION_RAYLEIGHS_IDLSAV". 
+                Note that dataset names are case sensitive. This parameter is required.
+
+            device_uid (str): 
+                The device UID to evaluate.
+
+            timestamp (datetime.datetime): 
+                The timestamp to use for deciding the best calibration file, expected to be in 
+                UTC. Any timezone data will be ignored. This parameter is required.
+
+            timeout (int): 
+                Represents how many seconds to wait for the API to send data before giving up. The 
+                default is 10 seconds, or the `api_timeout` value in the super class' `pyaurorax.PyAuroraX`
+                object. This parameter is optional.
+
+        Returns:
+            A [`FileDownloadResult`](https://docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html#pyucalgarysrs.data.classes.FileDownloadResult) 
+            object containing details about what data files were downloaded.
+
+        Raises:
+            ValueError: issue with supplied timestamp
+            pyaurorax.exceptions.AuroraXAPIError: an API error was encountered        
+        """
+        # get list of all rayleighs urls for the dataset and device
+        start_dt = datetime.datetime(2000, 1, 1)
+        end_dt = datetime.datetime.now() + datetime.timedelta(days=5)
+        file_listing_obj = self.get_urls(dataset_name, start_dt, end_dt, device_uid=device_uid, timeout=timeout)
+
+        # filter down and find the best skymap for the timestamp supplied
+        best_cal_filename = None
+        for url in file_listing_obj.urls:
+            # extract start date for this skymap
+            url_short = url.replace(file_listing_obj.path_prefix + "/", "")
+
+            # parse filename into several values
+            filename_split = os.path.basename(url_short).split('_')
+            filename_times_split = filename_split[3].split('-')
+            valid_interval_start_dt = datetime.datetime.strptime(filename_times_split[0], "%Y%m%d")
+
+            # check start time
+            if (timestamp >= valid_interval_start_dt):
+                # valid
+                #
+                # NOTE: this works because of the order that the list is in already
+                best_cal_filename = url
+
+        # check if we found a skymap
+        if (best_cal_filename is None):
+            raise ValueError("Unable to determine a Rayleighs calibration recommendation")
+
+        # set the filename
+        file_listing_obj.urls = [best_cal_filename]
+        download_obj = self.download_using_urls(
+            file_listing_obj,
+            progress_bar_disable=True,
+            overwrite=overwrite,
+            timeout=timeout,
+        )
+
+        # return
+        return download_obj
+
+

Instance variables

+
+
var readers
+
+

Access to the pyaurorax.data.ucalgary.read submodule from within a PyAuroraX object.

+
+ +Expand source code + +
@property
+def readers(self):
+    """
+    Access to the `read` submodule from within a PyAuroraX object.
+    """
+    return self.__readers
+
+
+
+

Methods

+
+
+def download(self, dataset_name: str, start: datetime.datetime, end: datetime.datetime, site_uid: Optional[str] = None, device_uid: Optional[str] = None, n_parallel: int = 5, overwrite: bool = False, progress_bar_disable: bool = False, progress_bar_ncols: Optional[int] = None, progress_bar_ascii: Optional[str] = None, progress_bar_desc: Optional[str] = None, timeout: Optional[int] = None) ‑> pyucalgarysrs.data.classes.FileDownloadResult +
+
+

Download data from the UCalgary Space Remote Sensing Open Data Platform.

+

The parameters dataset_name, start, and end are required. All other parameters +are optional.

+

Note that usage of the site and device UID filters applies differently to some datasets. +For example, both fields can be used for most raw and keogram data, but only site UID can +be used for skymap datasets, and only device UID can be used for calibration datasets. If +fields are specified during a call in which site or device UID is not used, a UserWarning +is display to provide the user with feedback about this detail.

+

Args

+
+
dataset_name : str
+
Name of the dataset to download data for. Use the list_datasets() function +to get the possible values for this parameter. One example is "THEMIS_ASI_RAW". +Note that dataset names are case sensitive. This parameter is required.
+
start : datetime.datetime
+
Start timestamp to use (inclusive), expected to be in UTC. Any timezone data +will be ignored. This parameter is required.
+
end : datetime.datetime
+
End timestamp to use (inclusive), expected to be in UTC. Any timezone data +will be ignored. This parameter is required.
+
site_uid : str
+
The site UID to filter for. If specified, data will be downloaded for only the +site matching the given value. If excluded, data for all available sites will +be downloaded. An example value could be 'atha', meaning all data from the +Athabasca observatory will be downloaded for the given dataset name, start, and +end times. This parameter is optional.
+
device_uid : str
+
The device UID to filter for. If specified, data will be downloaded for only the +device matching the given value. If excluded, data for all available devices will +be downloaded. An example value could be 'themis02', meaning all data matching that +device will be downloaded for the given dataset name, start, and end times. This +parameter is optional.
+
n_parallel : int
+
Number of data files to download in parallel. Default value is 5. Adjust as needed +for your internet connection. This parameter is optional.
+
overwrite : bool
+
By default, data will not be re-downloaded if it already exists locally. Use +the overwrite parameter to force re-downloading. Default is False. This +parameter is optional.
+
progress_bar_disable : bool
+
Disable the progress bar. Default is False. This parameter is optional.
+
progress_bar_ncols : int
+
Number of columns for the progress bar (straight passthrough of the ncols +parameter in a tqdm progress bar). This parameter is optional. See Notes section +below for further information.
+
progress_bar_ascii : str
+
ASCII value to use when constructing the visual aspect of the progress bar (straight +passthrough of the ascii parameter in a tqdm progress bar). This parameter is +optional. See Notes section below for further details.
+
timeout : int
+
Represents how many seconds to wait for the API to send data before giving up. The +default is 10 seconds, or the api_timeout value in the super class' PyAuroraX +object. This parameter is optional.
+
+

Returns

+

A FileDownloadResult +object containing details about what data files were downloaded.

+

Raises

+
+
AuroraXDownloadError
+
an error was encountered while downloading a +specific file
+
AuroraXAPIError
+
an API error was encountered
+
+

Notes:

+

The progress_bar_* parameters can be used to enable/disable/adjust the progress bar. +Excluding the progress_bar_disable parameter, all others are straight pass-throughs +to the tqdm progress bar function. The progress_bar_ncols parameter allows for +adjusting the width. The progress_bar_ascii parameter allows for adjusting the appearance +of the progress bar. And the progress_bar_desc parameter allows for adjusting the +description at the beginning of the progress bar. Further details can be found on the +tqdm documentation.

+

Data downloading will use the download_data_root_path variable within the super class' +object (PyAuroraX) to determine where to save data to. If +you'd like to change this path to somewhere else you can change that variable before your +download() call, like so:

+
import pyaurorax
+aurorax = pyaurorax.PyAuroraX()
+aurorax.data_download_root_path = "some_new_path"
+aurorax.data.download(dataset_name, start, end)
+
+
+ +Expand source code + +
def download(self,
+             dataset_name: str,
+             start: datetime.datetime,
+             end: datetime.datetime,
+             site_uid: Optional[str] = None,
+             device_uid: Optional[str] = None,
+             n_parallel: int = __DEFAULT_DOWNLOAD_N_PARALLEL,
+             overwrite: bool = False,
+             progress_bar_disable: bool = False,
+             progress_bar_ncols: Optional[int] = None,
+             progress_bar_ascii: Optional[str] = None,
+             progress_bar_desc: Optional[str] = None,
+             timeout: Optional[int] = None) -> FileDownloadResult:
+    """
+    Download data from the UCalgary Space Remote Sensing Open Data Platform.
+
+    The parameters `dataset_name`, `start`, and `end` are required. All other parameters
+    are optional.
+
+    Note that usage of the site and device UID filters applies differently to some datasets.
+    For example, both fields can be used for most raw and keogram data, but only site UID can
+    be used for skymap datasets, and only device UID can be used for calibration datasets. If 
+    fields are specified during a call in which site or device UID is not used, a UserWarning
+    is display to provide the user with feedback about this detail.
+
+    Args:
+        dataset_name (str): 
+            Name of the dataset to download data for. Use the `list_datasets()` function
+            to get the possible values for this parameter. One example is "THEMIS_ASI_RAW". 
+            Note that dataset names are case sensitive. This parameter is required.
+
+        start (datetime.datetime): 
+            Start timestamp to use (inclusive), expected to be in UTC. Any timezone data 
+            will be ignored. This parameter is required.
+
+        end (datetime.datetime): 
+            End timestamp to use (inclusive), expected to be in UTC. Any timezone data 
+            will be ignored. This parameter is required.
+
+        site_uid (str): 
+            The site UID to filter for. If specified, data will be downloaded for only the 
+            site matching the given value. If excluded, data for all available sites will 
+            be downloaded. An example value could be 'atha', meaning all data from the 
+            Athabasca observatory will be downloaded for the given dataset name, start, and 
+            end times. This parameter is optional.
+
+        device_uid (str): 
+            The device UID to filter for. If specified, data will be downloaded for only the
+            device matching the given value. If excluded, data for all available devices will
+            be downloaded. An example value could be 'themis02', meaning all data matching that
+            device will be downloaded for the given dataset name, start, and end times. This
+            parameter is optional.
+
+        n_parallel (int): 
+            Number of data files to download in parallel. Default value is 5. Adjust as needed 
+            for your internet connection. This parameter is optional.
+
+        overwrite (bool): 
+            By default, data will not be re-downloaded if it already exists locally. Use 
+            the `overwrite` parameter to force re-downloading. Default is `False`. This 
+            parameter is optional.
+
+        progress_bar_disable (bool): 
+            Disable the progress bar. Default is `False`. This parameter is optional.
+
+        progress_bar_ncols (int): 
+            Number of columns for the progress bar (straight passthrough of the `ncols` 
+            parameter in a tqdm progress bar). This parameter is optional. See Notes section
+            below for further information.
+        
+        progress_bar_ascii (str): 
+            ASCII value to use when constructing the visual aspect of the progress bar (straight 
+            passthrough of the `ascii` parameter in a tqdm progress bar). This parameter is 
+            optional. See Notes section below for further details.
+
+        timeout (int): 
+            Represents how many seconds to wait for the API to send data before giving up. The 
+            default is 10 seconds, or the `api_timeout` value in the super class' `pyaurorax.PyAuroraX`
+            object. This parameter is optional.
+
+    Returns:
+        A [`FileDownloadResult`](https://docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html#pyucalgarysrs.data.classes.FileDownloadResult) 
+        object containing details about what data files were downloaded.
+
+    Raises:
+        pyaurorax.exceptions.AuroraXDownloadError: an error was encountered while downloading a 
+            specific file
+        pyaurorax.exceptions.AuroraXAPIError: an API error was encountered
+
+    Notes:
+    --------
+    The `progress_bar_*` parameters can be used to enable/disable/adjust the progress bar. 
+    Excluding the `progress_bar_disable` parameter, all others are straight pass-throughs 
+    to the tqdm progress bar function. The `progress_bar_ncols` parameter allows for 
+    adjusting the width. The `progress_bar_ascii` parameter allows for adjusting the appearance 
+    of the progress bar. And the `progress_bar_desc` parameter allows for adjusting the 
+    description at the beginning of the progress bar. Further details can be found on the
+    [tqdm documentation](https://tqdm.github.io/docs/tqdm/#tqdm-objects).
+
+    Data downloading will use the `download_data_root_path` variable within the super class'
+    object ([`PyAuroraX`](../../index.html#pyaurorax.PyAuroraX)) to determine where to save data to. If 
+    you'd like to change this path to somewhere else you can change that variable before your
+    download() call, like so:
+
+    ```python
+    import pyaurorax
+    aurorax = pyaurorax.PyAuroraX()
+    aurorax.data_download_root_path = "some_new_path"
+    aurorax.data.download(dataset_name, start, end)
+    ```
+    """
+    try:
+        return self.__aurorax_obj.srs_obj.data.download(
+            dataset_name,
+            start,
+            end,
+            site_uid=site_uid,
+            device_uid=device_uid,
+            n_parallel=n_parallel,
+            overwrite=overwrite,
+            progress_bar_disable=progress_bar_disable,
+            progress_bar_ncols=progress_bar_ncols,
+            progress_bar_ascii=progress_bar_ascii,
+            progress_bar_desc=progress_bar_desc,
+            timeout=timeout,
+        )
+    except SRSDownloadError as e:
+        raise AuroraXDownloadError(e) from e
+    except SRSAPIError as e:
+        raise AuroraXAPIError(e) from e
+
+
+
+def download_best_flatfield_calibration(self, dataset_name: str, device_uid: str, timestamp: datetime.datetime, timeout: Optional[int] = None, overwrite: bool = False) ‑> pyucalgarysrs.data.classes.FileDownloadResult +
+
+

Download the flatfield calibration file that best matches the parameters supplied.

+

Args

+
+
dataset_name : str
+
Name of the dataset to download data for. Use the list_datasets() function +to get the possible values for this parameter. One example is "THEMIS_ASI_SKYMAP_IDLSAV". +Note that dataset names are case sensitive. This parameter is required.
+
device_uid : str
+
The device UID to evaluate.
+
timestamp : datetime.datetime
+
The timestamp to use for deciding the best skymap, expected to be in UTC. Any timezone +data will be ignored. This parameter is required.
+
timeout : int
+
Represents how many seconds to wait for the API to send data before giving up. The +default is 10 seconds, or the api_timeout value in the super class' PyAuroraX +object. This parameter is optional.
+
+

Returns

+

A FileDownloadResult +object containing details about what data files were downloaded.

+

Raises

+
+
ValueError
+
issue with supplied timestamp
+
AuroraXAPIError
+
an API error was encountered
+
+
+ +Expand source code + +
def download_best_flatfield_calibration(
+    self,
+    dataset_name: str,
+    device_uid: str,
+    timestamp: datetime.datetime,
+    timeout: Optional[int] = None,
+    overwrite: bool = False,
+) -> FileDownloadResult:
+    """
+    Download the flatfield calibration file that best matches the parameters supplied.
+
+    Args:
+        dataset_name (str): 
+            Name of the dataset to download data for. Use the `list_datasets()` function
+            to get the possible values for this parameter. One example is "THEMIS_ASI_SKYMAP_IDLSAV". 
+            Note that dataset names are case sensitive. This parameter is required.
+
+        device_uid (str): 
+            The device UID to evaluate.
+
+        timestamp (datetime.datetime): 
+            The timestamp to use for deciding the best skymap, expected to be in UTC. Any timezone 
+            data will be ignored. This parameter is required.
+
+        timeout (int): 
+            Represents how many seconds to wait for the API to send data before giving up. The 
+            default is 10 seconds, or the `api_timeout` value in the super class' `pyaurorax.PyAuroraX`
+            object. This parameter is optional.
+
+    Returns:
+        A [`FileDownloadResult`](https://docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html#pyucalgarysrs.data.classes.FileDownloadResult) 
+        object containing details about what data files were downloaded.
+
+    Raises:
+        ValueError: issue with supplied timestamp
+        pyaurorax.exceptions.AuroraXAPIError: an API error was encountered        
+    """
+    # get list of all flatfield urls for the dataset and device
+    start_dt = datetime.datetime(2000, 1, 1)
+    end_dt = datetime.datetime.now() + datetime.timedelta(days=5)
+    file_listing_obj = self.get_urls(dataset_name, start_dt, end_dt, device_uid=device_uid, timeout=timeout)
+
+    # filter down and find the best skymap for the timestamp supplied
+    best_cal_filename = None
+    for url in file_listing_obj.urls:
+        # extract start date for this skymap
+        url_short = url.replace(file_listing_obj.path_prefix + "/", "")
+
+        # parse filename into several values
+        filename_split = os.path.basename(url_short).split('_')
+        filename_times_split = filename_split[3].split('-')
+        valid_interval_start_dt = datetime.datetime.strptime(filename_times_split[0], "%Y%m%d")
+
+        # check start time
+        if (timestamp >= valid_interval_start_dt):
+            # valid
+            #
+            # NOTE: this works because of the order that the list is in already
+            best_cal_filename = url
+
+    # check if we found a skymap
+    if (best_cal_filename is None):
+        raise ValueError("Unable to determine a flatfield calibration recommendation")
+
+    # set the filename
+    file_listing_obj.urls = [best_cal_filename]
+    download_obj = self.download_using_urls(
+        file_listing_obj,
+        progress_bar_disable=True,
+        overwrite=overwrite,
+        timeout=timeout,
+    )
+
+    # return
+    return download_obj
+
+
+
+def download_best_rayleighs_calibration(self, dataset_name: str, device_uid: str, timestamp: datetime.datetime, timeout: Optional[int] = None, overwrite: bool = False) ‑> pyucalgarysrs.data.classes.FileDownloadResult +
+
+

Download the Rayleighs calibration file that best matches the parameters supplied.

+

Args

+
+
dataset_name : str
+
Name of the dataset to download data for. Use the list_datasets() function +to get the possible values for this parameter. One example is "REGO_CALIBRATION_RAYLEIGHS_IDLSAV". +Note that dataset names are case sensitive. This parameter is required.
+
device_uid : str
+
The device UID to evaluate.
+
timestamp : datetime.datetime
+
The timestamp to use for deciding the best calibration file, expected to be in +UTC. Any timezone data will be ignored. This parameter is required.
+
timeout : int
+
Represents how many seconds to wait for the API to send data before giving up. The +default is 10 seconds, or the api_timeout value in the super class' PyAuroraX +object. This parameter is optional.
+
+

Returns

+

A FileDownloadResult +object containing details about what data files were downloaded.

+

Raises

+
+
ValueError
+
issue with supplied timestamp
+
AuroraXAPIError
+
an API error was encountered
+
+
+ +Expand source code + +
def download_best_rayleighs_calibration(
+    self,
+    dataset_name: str,
+    device_uid: str,
+    timestamp: datetime.datetime,
+    timeout: Optional[int] = None,
+    overwrite: bool = False,
+) -> FileDownloadResult:
+    """
+    Download the Rayleighs calibration file that best matches the parameters supplied.
+
+    Args:
+        dataset_name (str): 
+            Name of the dataset to download data for. Use the `list_datasets()` function
+            to get the possible values for this parameter. One example is "REGO_CALIBRATION_RAYLEIGHS_IDLSAV". 
+            Note that dataset names are case sensitive. This parameter is required.
+
+        device_uid (str): 
+            The device UID to evaluate.
+
+        timestamp (datetime.datetime): 
+            The timestamp to use for deciding the best calibration file, expected to be in 
+            UTC. Any timezone data will be ignored. This parameter is required.
+
+        timeout (int): 
+            Represents how many seconds to wait for the API to send data before giving up. The 
+            default is 10 seconds, or the `api_timeout` value in the super class' `pyaurorax.PyAuroraX`
+            object. This parameter is optional.
+
+    Returns:
+        A [`FileDownloadResult`](https://docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html#pyucalgarysrs.data.classes.FileDownloadResult) 
+        object containing details about what data files were downloaded.
+
+    Raises:
+        ValueError: issue with supplied timestamp
+        pyaurorax.exceptions.AuroraXAPIError: an API error was encountered        
+    """
+    # get list of all rayleighs urls for the dataset and device
+    start_dt = datetime.datetime(2000, 1, 1)
+    end_dt = datetime.datetime.now() + datetime.timedelta(days=5)
+    file_listing_obj = self.get_urls(dataset_name, start_dt, end_dt, device_uid=device_uid, timeout=timeout)
+
+    # filter down and find the best skymap for the timestamp supplied
+    best_cal_filename = None
+    for url in file_listing_obj.urls:
+        # extract start date for this skymap
+        url_short = url.replace(file_listing_obj.path_prefix + "/", "")
+
+        # parse filename into several values
+        filename_split = os.path.basename(url_short).split('_')
+        filename_times_split = filename_split[3].split('-')
+        valid_interval_start_dt = datetime.datetime.strptime(filename_times_split[0], "%Y%m%d")
+
+        # check start time
+        if (timestamp >= valid_interval_start_dt):
+            # valid
+            #
+            # NOTE: this works because of the order that the list is in already
+            best_cal_filename = url
+
+    # check if we found a skymap
+    if (best_cal_filename is None):
+        raise ValueError("Unable to determine a Rayleighs calibration recommendation")
+
+    # set the filename
+    file_listing_obj.urls = [best_cal_filename]
+    download_obj = self.download_using_urls(
+        file_listing_obj,
+        progress_bar_disable=True,
+        overwrite=overwrite,
+        timeout=timeout,
+    )
+
+    # return
+    return download_obj
+
+
+
+def download_best_skymap(self, dataset_name: str, site_uid: str, timestamp: datetime.datetime, timeout: Optional[int] = None, overwrite: bool = False) ‑> pyucalgarysrs.data.classes.FileDownloadResult +
+
+

Download the skymap file that best matches the parameters supplied.

+

Args

+
+
dataset_name : str
+
Name of the dataset to download data for. Use the list_datasets() function +to get the possible values for this parameter. One example is "THEMIS_ASI_SKYMAP_IDLSAV". +Note that dataset names are case sensitive. This parameter is required.
+
site_uid : str
+
The site UID to evaluate.
+
timestamp : datetime.datetime
+
The timestamp to use for deciding the best skymap, expected to be in UTC. Any timezone +data will be ignored. This parameter is required.
+
timeout : int
+
Represents how many seconds to wait for the API to send data before giving up. The +default is 10 seconds, or the api_timeout value in the super class' PyAuroraX +object. This parameter is optional.
+
+

Returns

+

A FileDownloadResult +object containing details about what data files were downloaded.

+

Raises

+
+
ValueError
+
issue with supplied timestamp
+
AuroraXAPIError
+
an API error was encountered
+
+
+ +Expand source code + +
def download_best_skymap(
+    self,
+    dataset_name: str,
+    site_uid: str,
+    timestamp: datetime.datetime,
+    timeout: Optional[int] = None,
+    overwrite: bool = False,
+) -> FileDownloadResult:
+    """
+    Download the skymap file that best matches the parameters supplied.
+
+    Args:
+        dataset_name (str): 
+            Name of the dataset to download data for. Use the `list_datasets()` function
+            to get the possible values for this parameter. One example is "THEMIS_ASI_SKYMAP_IDLSAV". 
+            Note that dataset names are case sensitive. This parameter is required.
+
+        site_uid (str): 
+            The site UID to evaluate.
+
+        timestamp (datetime.datetime): 
+            The timestamp to use for deciding the best skymap, expected to be in UTC. Any timezone 
+            data will be ignored. This parameter is required.
+
+        timeout (int): 
+            Represents how many seconds to wait for the API to send data before giving up. The 
+            default is 10 seconds, or the `api_timeout` value in the super class' `pyaurorax.PyAuroraX`
+            object. This parameter is optional.
+
+    Returns:
+        A [`FileDownloadResult`](https://docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html#pyucalgarysrs.data.classes.FileDownloadResult) 
+        object containing details about what data files were downloaded.
+
+    Raises:
+        ValueError: issue with supplied timestamp
+        pyaurorax.exceptions.AuroraXAPIError: an API error was encountered        
+    """
+    # get list of all skymap urls for the dataset and site
+    start_dt = datetime.datetime(2000, 1, 1)
+    end_dt = datetime.datetime.now() + datetime.timedelta(days=5)
+    file_listing_obj = self.get_urls(dataset_name, start_dt, end_dt, site_uid=site_uid, timeout=timeout)
+
+    # filter down and find the best skymap for the timestamp supplied
+    best_skymap_filename = None
+    for url in file_listing_obj.urls:
+        # extract start date for this skymap
+        url_short = url.replace(file_listing_obj.path_prefix + "/", "")
+
+        # parse filename into several values
+        filename_split = os.path.basename(url_short).split('_')
+        filename_times_split = filename_split[3].split('-')
+        valid_interval_start_dt = datetime.datetime.strptime(filename_times_split[0], "%Y%m%d")
+
+        # check start time
+        if (timestamp >= valid_interval_start_dt):
+            # valid
+            #
+            # NOTE: this works because of the order that the list is in already
+            best_skymap_filename = url
+
+    # check if we found a skymap
+    if (best_skymap_filename is None):
+        raise ValueError("Unable to determine a skymap recommendation")
+
+    # set the filename
+    file_listing_obj.urls = [best_skymap_filename]
+    download_obj = self.download_using_urls(
+        file_listing_obj,
+        progress_bar_disable=True,
+        overwrite=overwrite,
+        timeout=timeout,
+    )
+
+    # return
+    return download_obj
+
+
+
+def download_using_urls(self, file_listing_response: pyucalgarysrs.data.classes.FileListingResponse, n_parallel: int = 5, overwrite: bool = False, progress_bar_disable: bool = False, progress_bar_ncols: Optional[int] = None, progress_bar_ascii: Optional[str] = None, progress_bar_desc: Optional[str] = None, timeout: Optional[int] = None) ‑> pyucalgarysrs.data.classes.FileDownloadResult +
+
+

Download data from the UCalgary Space Remote Sensing Open Data Platform using +a FileListingResponse object. This would be used in cases where more customization +is needed than the generic download() function.

+

One example of using this function would start by using get_urls() to retrieve the +list of URLs available for download, then further process this list to fewer files +based on some other requirement (ie. time down-sampling such as one file per hour). +Lastly using this function to download the new custom set URLs.

+

Args

+
+
file_listing_response : FileListingResponse
+
A FileListingResponse +object returned from a get_urls() call, which contains a list of URLs to download +for a specific dataset. This parameter is required.
+
n_parallel : int
+
Number of data files to download in parallel. Default value is 5. Adjust as needed +for your internet connection. This parameter is optional.
+
overwrite : bool
+
By default, data will not be re-downloaded if it already exists locally. Use +the overwrite parameter to force re-downloading. Default is False. This +parameter is optional.
+
progress_bar_disable : bool
+
Disable the progress bar. Default is False. This parameter is optional.
+
progress_bar_ncols : int
+
Number of columns for the progress bar (straight passthrough of the ncols +parameter in a tqdm progress bar). This parameter is optional. See Notes section +below for further information.
+
progress_bar_ascii : str
+
ASCII value to use when constructing the visual aspect of the progress bar (straight +passthrough of the ascii parameter in a tqdm progress bar). This parameter is +optional. See Notes section below for further details.
+
timeout : int
+
Represents how many seconds to wait for the API to send data before giving up. The +default is 10 seconds, or the api_timeout value in the super class' PyAuroraX +object. This parameter is optional.
+
+

Returns

+

A FileDownloadResult +object containing details about what data files were downloaded.

+

Raises

+
+
AuroraXDownloadError
+
an error was encountered while downloading a +specific file
+
AuroraXAPIError
+
an API error was encountered
+
+

Notes:

+

The progress_bar_* parameters can be used to enable/disable/adjust the progress bar. +Excluding the progress_bar_disable parameter, all others are straight pass-throughs +to the tqdm progress bar function. The progress_bar_ncols parameter allows for +adjusting the width. The progress_bar_ascii parameter allows for adjusting the appearance +of the progress bar. And the progress_bar_desc parameter allows for adjusting the +description at the beginning of the progress bar. Further details can be found on the +tqdm documentation.

+

Data downloading will use the download_data_root_path variable within the super class' +object (PyAuroraX) to determine where to save data to. If +you'd like to change this path to somewhere else you can change that variable before your +download() call, like so:

+
import pyaurorax
+aurorax = pyaurorax.PyAuroraX()
+aurorax.data_download_root_path = "some_new_path"
+aurorax.data.download(dataset_name, start, end)
+
+
+ +Expand source code + +
def download_using_urls(self,
+                        file_listing_response: FileListingResponse,
+                        n_parallel: int = __DEFAULT_DOWNLOAD_N_PARALLEL,
+                        overwrite: bool = False,
+                        progress_bar_disable: bool = False,
+                        progress_bar_ncols: Optional[int] = None,
+                        progress_bar_ascii: Optional[str] = None,
+                        progress_bar_desc: Optional[str] = None,
+                        timeout: Optional[int] = None) -> FileDownloadResult:
+    """
+    Download data from the UCalgary Space Remote Sensing Open Data Platform using 
+    a FileListingResponse object. This would be used in cases where more customization 
+    is needed than the generic `download()` function. 
+    
+    One example of using this function would start by using `get_urls()` to retrieve the
+    list of URLs available for download, then further process this list to fewer files
+    based on some other requirement (ie. time down-sampling such as one file per hour). 
+    Lastly using this function to download the new custom set URLs.
+
+    Args:
+        file_listing_response (FileListingResponse): 
+            A [`FileListingResponse`](https://docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html#pyucalgarysrs.data.classes.FileListingResponse) 
+            object returned from a `get_urls()` call, which contains a list of URLs to download 
+            for a specific dataset. This parameter is required.
+
+        n_parallel (int): 
+            Number of data files to download in parallel. Default value is 5. Adjust as needed 
+            for your internet connection. This parameter is optional.
+
+        overwrite (bool): 
+            By default, data will not be re-downloaded if it already exists locally. Use 
+            the `overwrite` parameter to force re-downloading. Default is `False`. This 
+            parameter is optional.
+
+        progress_bar_disable (bool): 
+            Disable the progress bar. Default is `False`. This parameter is optional.
+
+        progress_bar_ncols (int): 
+            Number of columns for the progress bar (straight passthrough of the `ncols` 
+            parameter in a tqdm progress bar). This parameter is optional. See Notes section
+            below for further information.
+        
+        progress_bar_ascii (str): 
+            ASCII value to use when constructing the visual aspect of the progress bar (straight 
+            passthrough of the `ascii` parameter in a tqdm progress bar). This parameter is 
+            optional. See Notes section below for further details.
+
+        timeout (int): 
+            Represents how many seconds to wait for the API to send data before giving up. The 
+            default is 10 seconds, or the `api_timeout` value in the super class' `pyaurorax.PyAuroraX`
+            object. This parameter is optional.
+
+    Returns:
+        A [`FileDownloadResult`](https://docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html#pyucalgarysrs.data.classes.FileDownloadResult) 
+        object containing details about what data files were downloaded.
+
+    Raises:
+        pyaurorax.exceptions.AuroraXDownloadError: an error was encountered while downloading a 
+            specific file
+        pyaurorax.exceptions.AuroraXAPIError: an API error was encountered
+
+    Notes:
+    --------
+    The `progress_bar_*` parameters can be used to enable/disable/adjust the progress bar. 
+    Excluding the `progress_bar_disable` parameter, all others are straight pass-throughs 
+    to the tqdm progress bar function. The `progress_bar_ncols` parameter allows for 
+    adjusting the width. The `progress_bar_ascii` parameter allows for adjusting the appearance 
+    of the progress bar. And the `progress_bar_desc` parameter allows for adjusting the 
+    description at the beginning of the progress bar. Further details can be found on the
+    [tqdm documentation](https://tqdm.github.io/docs/tqdm/#tqdm-objects).
+
+    Data downloading will use the `download_data_root_path` variable within the super class'
+    object ([`PyAuroraX`](../../index.html#pyaurorax.PyAuroraX)) to determine where to save data to. If 
+    you'd like to change this path to somewhere else you can change that variable before your
+    download() call, like so:
+
+    ```python
+    import pyaurorax
+    aurorax = pyaurorax.PyAuroraX()
+    aurorax.data_download_root_path = "some_new_path"
+    aurorax.data.download(dataset_name, start, end)
+    ```
+    """
+    try:
+        return self.__aurorax_obj.srs_obj.data.download_using_urls(
+            file_listing_response,
+            n_parallel=n_parallel,
+            overwrite=overwrite,
+            progress_bar_disable=progress_bar_disable,
+            progress_bar_ncols=progress_bar_ncols,
+            progress_bar_ascii=progress_bar_ascii,
+            progress_bar_desc=progress_bar_desc,
+            timeout=timeout,
+        )
+    except SRSDownloadError as e:
+        raise AuroraXDownloadError(e) from e
+    except SRSAPIError as e:
+        raise AuroraXAPIError(e) from e
+
+
+
+def get_urls(self, dataset_name: str, start: datetime.datetime, end: datetime.datetime, site_uid: Optional[str] = None, device_uid: Optional[str] = None, timeout: Optional[int] = None) ‑> pyucalgarysrs.data.classes.FileListingResponse +
+
+

Get URLs of data files

+

The parameters dataset_name, start, and end are required. All other parameters +are optional.

+

Note that usage of the site and device UID filters applies differently to some datasets. +For example, both fields can be used for most raw and keogram data, but only site UID can +be used for skymap datasets, and only device UID can be used for calibration datasets. If +fields are specified during a call in which site or device UID is not used, a UserWarning +is display to provide the user with feedback about this detail.

+

Args

+
+
dataset_name : str
+
Name of the dataset to download data for. Use the list_datasets() function +to get the possible values for this parameter. One example is "THEMIS_ASI_RAW". +Note that dataset names are case sensitive. This parameter is required.
+
start : datetime.datetime
+
Start timestamp to use (inclusive), expected to be in UTC. Any timezone data +will be ignored. This parameter is required.
+
end : datetime.datetime
+
End timestamp to use (inclusive), expected to be in UTC. Any timezone data +will be ignored. This parameter is required.
+
site_uid : str
+
The site UID to filter for. If specified, data will be downloaded for only the +site matching the given value. If excluded, data for all available sites will +be downloaded. An example value could be 'atha', meaning all data from the +Athabasca observatory will be downloaded for the given dataset name, start, and +end times. This parameter is optional.
+
device_uid : str
+
The device UID to filter for. If specified, data will be downloaded for only the +device matching the given value. If excluded, data for all available devices will +be downloaded. An example value could be 'themis02', meaning all data matching that +device will be downloaded for the given dataset name, start, and end times. This +parameter is optional.
+
timeout : int
+
Represents how many seconds to wait for the API to send data before giving up. The +default is 10 seconds, or the api_timeout value in the super class' PyAuroraX +object. This parameter is optional.
+
+

Returns

+

A FileListingResponse +object containing a list of the available URLs, among other values.

+

Raises

+
+
AuroraXAPIError
+
an API error was encountered
+
+
+ +Expand source code + +
def get_urls(self,
+             dataset_name: str,
+             start: datetime.datetime,
+             end: datetime.datetime,
+             site_uid: Optional[str] = None,
+             device_uid: Optional[str] = None,
+             timeout: Optional[int] = None) -> FileListingResponse:
+    """
+    Get URLs of data files
+
+    The parameters `dataset_name`, `start`, and `end` are required. All other parameters
+    are optional.
+
+    Note that usage of the site and device UID filters applies differently to some datasets.
+    For example, both fields can be used for most raw and keogram data, but only site UID can
+    be used for skymap datasets, and only device UID can be used for calibration datasets. If 
+    fields are specified during a call in which site or device UID is not used, a UserWarning
+    is display to provide the user with feedback about this detail.
+
+    Args:
+        dataset_name (str): 
+            Name of the dataset to download data for. Use the `list_datasets()` function
+            to get the possible values for this parameter. One example is "THEMIS_ASI_RAW". 
+            Note that dataset names are case sensitive. This parameter is required.
+
+        start (datetime.datetime): 
+            Start timestamp to use (inclusive), expected to be in UTC. Any timezone data 
+            will be ignored. This parameter is required.
+
+        end (datetime.datetime): 
+            End timestamp to use (inclusive), expected to be in UTC. Any timezone data 
+            will be ignored. This parameter is required.
+
+        site_uid (str): 
+            The site UID to filter for. If specified, data will be downloaded for only the 
+            site matching the given value. If excluded, data for all available sites will 
+            be downloaded. An example value could be 'atha', meaning all data from the 
+            Athabasca observatory will be downloaded for the given dataset name, start, and 
+            end times. This parameter is optional.
+
+        device_uid (str): 
+            The device UID to filter for. If specified, data will be downloaded for only the
+            device matching the given value. If excluded, data for all available devices will
+            be downloaded. An example value could be 'themis02', meaning all data matching that
+            device will be downloaded for the given dataset name, start, and end times. This
+            parameter is optional.
+
+        timeout (int): 
+            Represents how many seconds to wait for the API to send data before giving up. The 
+            default is 10 seconds, or the `api_timeout` value in the super class' `pyaurorax.PyAuroraX`
+            object. This parameter is optional.
+
+    Returns:
+        A [`FileListingResponse`](https://docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html#pyucalgarysrs.data.classes.FileListingResponse)
+        object containing a list of the available URLs, among other values.
+
+    Raises:
+        pyaurorax.exceptions.AuroraXAPIError: an API error was encountered
+    """
+    try:
+        return self.__aurorax_obj.srs_obj.data.get_urls(
+            dataset_name,
+            start,
+            end,
+            site_uid=site_uid,
+            device_uid=device_uid,
+            timeout=timeout,
+        )
+    except SRSAPIError as e:
+        raise AuroraXAPIError(e) from e
+
+
+
+def is_read_supported(self, dataset_name: str) ‑> bool +
+
+

Check if a given dataset has file reading support.

+

Not all datasets available in the UCalgary Space Remote Sensing Open Data Platform +have special readfile routines in this library. This is because some datasets are +in basic formats such as JPG or PNG, so unique functions aren't necessary. We leave +it up to the user to open these basic files in whichever way they prefer. Use the +list_supported_read_datasets() function to see all datasets that have special +file reading functionality in this library.

+

Args

+
+
dataset_name : str
+
The dataset name to check if file reading is supported. This parameter +is required.
+
+

Returns

+

Boolean indicating if file reading is supported.

+
+ +Expand source code + +
def is_read_supported(self, dataset_name: str) -> bool:
+    """
+    Check if a given dataset has file reading support. 
+    
+    Not all datasets available in the UCalgary Space Remote Sensing Open Data Platform 
+    have special readfile routines in this library. This is because some datasets are 
+    in basic formats such as JPG or PNG, so unique functions aren't necessary. We leave 
+    it up to the user to open these basic files in whichever way they prefer. Use the 
+    `list_supported_read_datasets()` function to see all datasets that have special
+    file reading functionality in this library.
+
+    Args:
+        dataset_name (str): 
+            The dataset name to check if file reading is supported. This parameter 
+            is required.
+    
+    Returns:
+        Boolean indicating if file reading is supported.
+    """
+    return self.__aurorax_obj.srs_obj.data.is_read_supported(dataset_name)
+
+
+
+def list_datasets(self, name: Optional[str] = None, timeout: Optional[int] = None) ‑> List[pyucalgarysrs.data.classes.Dataset] +
+
+

List available datasets

+

Args

+
+
name : str
+
Supply a name used for filtering. If that name is found in the available dataset +names received from the API, it will be included in the results. This parameter is +optional.
+
timeout : int
+
Represents how many seconds to wait for the API to send data before giving up. The +default is 10 seconds, or the api_timeout value in the super class' PyAuroraX +object. This parameter is optional.
+
+

Returns

+

A list of Dataset +objects.

+

Raises

+
+
AuroraXAPIError
+
An API error was encountered.
+
+
+ +Expand source code + +
def list_datasets(self, name: Optional[str] = None, timeout: Optional[int] = None) -> List[Dataset]:
+    """
+    List available datasets
+
+    Args:
+        name (str): 
+            Supply a name used for filtering. If that name is found in the available dataset 
+            names received from the API, it will be included in the results. This parameter is
+            optional.
+        
+        timeout (int): 
+            Represents how many seconds to wait for the API to send data before giving up. The 
+            default is 10 seconds, or the `api_timeout` value in the super class' `pyaurorax.PyAuroraX`
+            object. This parameter is optional.
+        
+    Returns:
+        A list of [`Dataset`](https://docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html#pyucalgarysrs.data.classes.Dataset)
+        objects.
+    
+    Raises:
+        pyaurorax.exceptions.AuroraXAPIError: An API error was encountered.
+    """
+    try:
+        return self.__aurorax_obj.srs_obj.data.list_datasets(name=name, timeout=timeout)
+    except SRSAPIError as e:
+        raise AuroraXAPIError(e) from e
+
+
+
+def list_observatories(self, instrument_array: Literal['themis_asi', 'rego', 'trex_rgb', 'trex_nir', 'trex_blue'], uid: Optional[str] = None, timeout: Optional[int] = None) ‑> List[pyucalgarysrs.data.classes.Observatory] +
+
+

List information about observatories

+

Args

+
+
instrument_array : str
+
The instrument array to list observatories for. Valid values are: themis_asi, rego, +trex_rgb, trex_nir, and trex_blue.
+
uid : str
+
Supply a observatory unique identifier used for filtering (usually 4-letter site code). If that UID +is found in the available observatories received from the API, it will be included in the results. This +parameter is optional.
+
timeout : int
+
Represents how many seconds to wait for the API to send data before giving up. The +default is 10 seconds, or the api_timeout value in the super class' PyAuroraX +object. This parameter is optional.
+
+

Returns

+

A list of Observatory +objects.

+

Raises

+
+
AuroraXAPIError
+
An API error was encountered.
+
+
+ +Expand source code + +
def list_observatories(self,
+                       instrument_array: Literal["themis_asi", "rego", "trex_rgb", "trex_nir", "trex_blue"],
+                       uid: Optional[str] = None,
+                       timeout: Optional[int] = None) -> List[Observatory]:
+    """
+    List information about observatories
+
+    Args:
+        instrument_array (str): 
+            The instrument array to list observatories for. Valid values are: themis_asi, rego, 
+            trex_rgb, trex_nir, and trex_blue.
+
+        uid (str): 
+            Supply a observatory unique identifier used for filtering (usually 4-letter site code). If that UID 
+            is found in the available observatories received from the API, it will be included in the results. This 
+            parameter is optional.
+        
+        timeout (int): 
+            Represents how many seconds to wait for the API to send data before giving up. The 
+            default is 10 seconds, or the `api_timeout` value in the super class' `pyaurorax.PyAuroraX`
+            object. This parameter is optional.
+        
+    Returns:
+        A list of [`Observatory`](https://docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html#pyucalgarysrs.data.classes.Observatory)
+        objects.
+    
+    Raises:
+        pyaurorax.exceptions.AuroraXAPIError: An API error was encountered.
+    """
+    try:
+        return self.__aurorax_obj.srs_obj.data.list_observatories(instrument_array, uid=uid, timeout=timeout)
+    except SRSAPIError as e:
+        raise AuroraXAPIError(e) from e
+
+
+
+def list_supported_read_datasets(self) ‑> List[str] +
+
+

List the datasets which have file reading capabilities supported.

+

Returns

+

A list of the dataset names with file reading support.

+
+ +Expand source code + +
def list_supported_read_datasets(self) -> List[str]:
+    """
+    List the datasets which have file reading capabilities supported.
+
+    Returns:
+        A list of the dataset names with file reading support.
+    """
+    return self.__aurorax_obj.srs_obj.data.list_supported_read_datasets()
+
+
+
+def read(self, dataset: pyucalgarysrs.data.classes.Dataset, file_list: Union[List[str], List[pathlib.Path], str, pathlib.Path], n_parallel: int = 1, first_record: bool = False, no_metadata: bool = False, quiet: bool = False) ‑> pyucalgarysrs.data.classes.Data +
+
+

Read in data files for a given dataset. Note that only one type of dataset's data +should be read in using a single call.

+

Args

+
+
dataset : Dataset
+
The dataset object for which the files are associated with. This parameter is +required.
+
file_list : List[str], List[Path], str, Path
+
The files to read in. Absolute paths are recommended, but not technically +necessary. This can be a single string for a file, or a list of strings to read +in multiple files. This parameter is required.
+
n_parallel : int
+
Number of data files to read in parallel using multiprocessing. Default value +is 1. Adjust according to your computer's available resources. This parameter +is optional.
+
first_record : bool
+
Only read in the first record in each file. This is the same as the first_frame +parameter in the themis-imager-readfile and trex-imager-readfile libraries, and +is a read optimization if you only need one image per minute, as opposed to the +full temporal resolution of data (e.g., 3sec cadence). This parameter is optional.
+
no_metadata : bool
+
Skip reading of metadata. This is a minor optimization if the metadata is not needed. +Default is False. This parameter is optional.
+
quiet : bool
+
Do not print out errors while reading data files, if any are encountered. Any files +that encounter errors will be, as usual, accessible via the problematic_files +attribute of the returned Data object. This parameter is optional.
+
+

Returns

+

A Data +object containing the data read in, among other values.

+

Raises

+
+
AuroraXUnsupportedReadError
+
an unsupported dataset was used when +trying to read files.
+
AuroraXError
+
a generic read error was encountered
+
+

Notes:

+

For users who are familiar with the themis-imager-readfile and trex-imager-readfile +libraries, the read function provides a near-identical usage. Further improvements have +been integrated, and those libraries are anticipated to be deprecated at some point in the +future.

+
+ +Expand source code + +
def read(self,
+         dataset: Dataset,
+         file_list: Union[List[str], List[Path], str, Path],
+         n_parallel: int = 1,
+         first_record: bool = False,
+         no_metadata: bool = False,
+         quiet: bool = False) -> Data:
+    """
+    Read in data files for a given dataset. Note that only one type of dataset's data
+    should be read in using a single call.
+
+    Args:
+        dataset (Dataset): 
+            The dataset object for which the files are associated with. This parameter is
+            required.
+        
+        file_list (List[str], List[Path], str, Path): 
+            The files to read in. Absolute paths are recommended, but not technically
+            necessary. This can be a single string for a file, or a list of strings to read
+            in multiple files. This parameter is required.
+
+        n_parallel (int): 
+            Number of data files to read in parallel using multiprocessing. Default value 
+            is 1. Adjust according to your computer's available resources. This parameter 
+            is optional.
+        
+        first_record (bool): 
+            Only read in the first record in each file. This is the same as the first_frame
+            parameter in the themis-imager-readfile and trex-imager-readfile libraries, and
+            is a read optimization if you only need one image per minute, as opposed to the
+            full temporal resolution of data (e.g., 3sec cadence). This parameter is optional.
+        
+        no_metadata (bool): 
+            Skip reading of metadata. This is a minor optimization if the metadata is not needed.
+            Default is `False`. This parameter is optional.
+        
+        quiet (bool): 
+            Do not print out errors while reading data files, if any are encountered. Any files
+            that encounter errors will be, as usual, accessible via the `problematic_files` 
+            attribute of the returned `Data` object. This parameter is optional.
+    
+    Returns:
+        A [`Data`](https://docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html#pyucalgarysrs.data.classes.Data) 
+        object containing the data read in, among other values.
+    
+    Raises:
+        pyaurorax.exceptions.AuroraXUnsupportedReadError: an unsupported dataset was used when
+            trying to read files.
+        pyaurorax.exceptions.AuroraXError: a generic read error was encountered
+
+    Notes:
+    ---------
+    For users who are familiar with the themis-imager-readfile and trex-imager-readfile
+    libraries, the read function provides a near-identical usage. Further improvements have 
+    been integrated, and those libraries are anticipated to be deprecated at some point in the
+    future.
+    """
+    # NOTE: we do not wrap the exceptions here, instead we pass the call along
+    # to the ReadManager object since the method and exception catching is
+    # implemented there. No need to duplicate the exception handling logic.
+    return self.__readers.read(
+        dataset,
+        file_list,
+        n_parallel=n_parallel,
+        first_record=first_record,
+        no_metadata=no_metadata,
+        quiet=quiet,
+    )
+
+
+
+
+
+
+
+ +
+ + + \ No newline at end of file diff --git a/docs/code/pyaurorax_api_reference/pyaurorax/data/ucalgary/read/index.html b/docs/code/pyaurorax_api_reference/pyaurorax/data/ucalgary/read/index.html new file mode 100644 index 0000000..85c14af --- /dev/null +++ b/docs/code/pyaurorax_api_reference/pyaurorax/data/ucalgary/read/index.html @@ -0,0 +1,2199 @@ + + + + + + +pyaurorax.data.ucalgary.read API documentation + + + + + + + + + + + +
+
+
+

Module pyaurorax.data.ucalgary.read

+
+
+
+ +Expand source code + +
# Copyright 2024 University of Calgary
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from pathlib import Path
+from typing import TYPE_CHECKING, List, Union, Optional
+from pyucalgarysrs.data import Dataset, Data
+from pyucalgarysrs.exceptions import SRSError, SRSUnsupportedReadError
+from ....exceptions import AuroraXError, AuroraXUnsupportedReadError
+if TYPE_CHECKING:
+    from ....pyaurorax import PyAuroraX
+
+
+class ReadManager:
+    """
+    The UCalgaryManager object is initialized within every PyAuroraX object. It acts as a way to access 
+    the submodules and carry over configuration information in the super class.
+    """
+
+    def __init__(self, aurorax_obj):
+        self.__aurorax_obj: PyAuroraX = aurorax_obj
+
+    def list_supported_datasets(self) -> List[str]:
+        """
+        List the datasets which have file reading capabilities supported.
+
+        Returns:
+            A list of the dataset names with file reading support.
+        """
+        return self.__aurorax_obj.srs_obj.data.readers.list_supported_datasets()
+
+    def is_supported(self, dataset_name: str) -> bool:
+        """
+        Check if a given dataset has file reading support. 
+        
+        Not all datasets available in the UCalgary Space Remote Sensing Open Data Platform 
+        have special readfile routines in this library. This is because some datasets are 
+        in basic formats such as JPG or PNG, so unique functions aren't necessary. We leave 
+        it up to the user to open these basic files in whichever way they prefer. Use the 
+        `list_supported_read_datasets()` function to see all datasets that have special
+        file reading functionality in this library.
+
+        Args:
+            dataset_name (str): 
+                The dataset name to check if file reading is supported. This parameter 
+                is required.
+        
+        Returns:
+            Boolean indicating if file reading is supported.
+        """
+        return self.__aurorax_obj.srs_obj.data.readers.is_supported(dataset_name)
+
+    def read(self,
+             dataset: Dataset,
+             file_list: Union[List[str], List[Path], str, Path],
+             n_parallel: int = 1,
+             first_record: bool = False,
+             no_metadata: bool = False,
+             quiet: bool = False) -> Data:
+        """
+        Read in data files for a given dataset. Note that only one type of dataset's data
+        should be read in using a single call.
+
+        Args:
+            dataset (Dataset): 
+                The dataset object for which the files are associated with. This parameter is
+                required.
+            
+            file_list (List[str], List[Path], str, Path): 
+                The files to read in. Absolute paths are recommended, but not technically
+                necessary. This can be a single string for a file, or a list of strings to read
+                in multiple files. This parameter is required.
+
+            n_parallel (int): 
+                Number of data files to read in parallel using multiprocessing. Default value 
+                is 1. Adjust according to your computer's available resources. This parameter 
+                is optional.
+            
+            first_record (bool): 
+                Only read in the first record in each file. This is the same as the first_frame
+                parameter in the themis-imager-readfile and trex-imager-readfile libraries, and
+                is a read optimization if you only need one image per minute, as opposed to the
+                full temporal resolution of data (e.g., 3sec cadence). This parameter is optional.
+            
+            no_metadata (bool): 
+                Skip reading of metadata. This is a minor optimization if the metadata is not needed.
+                Default is `False`. This parameter is optional.
+            
+            quiet (bool): 
+                Do not print out errors while reading data files, if any are encountered. Any files
+                that encounter errors will be, as usual, accessible via the `problematic_files` 
+                attribute of the returned `Data` object. This parameter is optional.
+        
+        Returns:
+            A [`Data`](https://docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html#pyucalgarysrs.data.classes.Data) 
+            object containing the data read in, among other values.
+        
+        Raises:
+            pyaurorax.exceptions.AuroraXUnsupportedReadError: an unsupported dataset was used when
+                trying to read files.
+            pyaurorax.exceptions.AuroraXError: a generic read error was encountered
+
+        Notes:
+        ---------
+        For users who are familiar with the themis-imager-readfile and trex-imager-readfile
+        libraries, the read function provides a near-identical usage. Further improvements have 
+        been integrated, and those libraries are anticipated to be deprecated at some point in the
+        future.
+        """
+        try:
+            return self.__aurorax_obj.srs_obj.data.readers.read(
+                dataset,
+                file_list,
+                n_parallel=n_parallel,
+                first_record=first_record,
+                no_metadata=no_metadata,
+                quiet=quiet,
+            )
+        except SRSUnsupportedReadError as e:
+            raise AuroraXUnsupportedReadError(e) from e
+        except SRSError as e:
+            raise AuroraXError(e) from e
+
+    def read_themis(self,
+                    file_list: Union[List[str], List[Path], str, Path],
+                    n_parallel: int = 1,
+                    first_record: bool = False,
+                    no_metadata: bool = False,
+                    quiet: bool = False,
+                    dataset: Optional[Dataset] = None) -> Data:
+        """
+        Read in THEMIS ASI raw data (stream0 full.pgm* files).
+
+        Args:
+            file_list (List[str], List[Path], str, Path): 
+                The files to read in. Absolute paths are recommended, but not technically
+                necessary. This can be a single string for a file, or a list of strings to read
+                in multiple files. This parameter is required.
+
+            n_parallel (int): 
+                Number of data files to read in parallel using multiprocessing. Default value 
+                is 1. Adjust according to your computer's available resources. This parameter 
+                is optional.
+            
+            first_record (bool): 
+                Only read in the first record in each file. This is the same as the first_frame
+                parameter in the themis-imager-readfile and trex-imager-readfile libraries, and
+                is a read optimization if you only need one image per minute, as opposed to the
+                full temporal resolution of data (e.g., 3sec cadence). This parameter is optional.
+            
+            no_metadata (bool): 
+                Skip reading of metadata. This is a minor optimization if the metadata is not needed.
+                Default is `False`. This parameter is optional.
+            
+            quiet (bool): 
+                Do not print out errors while reading data files, if any are encountered. Any files
+                that encounter errors will be, as usual, accessible via the `problematic_files` 
+                attribute of the returned `Data` object. This parameter is optional.
+
+            dataset (Dataset): 
+                The dataset object for which the files are associated with. This parameter is
+                optional.
+
+        Returns:
+            A [`Data`](https://docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html#pyucalgarysrs.data.classes.Data) 
+            object containing the data read in, among other values.
+        
+        Raises:
+            pyaurorax.exceptions.AuroraXError: a generic read error was encountered
+        """
+        try:
+            return self.__aurorax_obj.srs_obj.data.readers.read_themis(
+                file_list,
+                n_parallel=n_parallel,
+                first_record=first_record,
+                no_metadata=no_metadata,
+                quiet=quiet,
+                dataset=dataset,
+            )
+        except SRSError as e:
+            raise AuroraXError(e) from e
+
+    def read_rego(self,
+                  file_list: Union[List[str], List[Path], str, Path],
+                  n_parallel: int = 1,
+                  first_record: bool = False,
+                  no_metadata: bool = False,
+                  quiet: bool = False,
+                  dataset: Optional[Dataset] = None) -> Data:
+        """
+        Read in REGO raw data (stream0 pgm* files).
+
+        Args:
+            file_list (List[str], List[Path], str, Path): 
+                The files to read in. Absolute paths are recommended, but not technically
+                necessary. This can be a single string for a file, or a list of strings to read
+                in multiple files. This parameter is required.
+
+            n_parallel (int): 
+                Number of data files to read in parallel using multiprocessing. Default value 
+                is 1. Adjust according to your computer's available resources. This parameter 
+                is optional.
+            
+            first_record (bool): 
+                Only read in the first record in each file. This is the same as the first_frame
+                parameter in the themis-imager-readfile and trex-imager-readfile libraries, and
+                is a read optimization if you only need one image per minute, as opposed to the
+                full temporal resolution of data (e.g., 3sec cadence). This parameter is optional.
+            
+            no_metadata (bool): 
+                Skip reading of metadata. This is a minor optimization if the metadata is not needed.
+                Default is `False`. This parameter is optional.
+            
+            quiet (bool): 
+                Do not print out errors while reading data files, if any are encountered. Any files
+                that encounter errors will be, as usual, accessible via the `problematic_files` 
+                attribute of the returned `Data` object. This parameter is optional.
+
+            dataset (Dataset): 
+                The dataset object for which the files are associated with. This parameter is
+                optional.
+
+        Returns:
+            A [`Data`](https://docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html#pyucalgarysrs.data.classes.Data) 
+            object containing the data read in, among other values.
+        
+        Raises:
+            pyaurorax.exceptions.AuroraXError: a generic read error was encountered
+        """
+        return self.__aurorax_obj.srs_obj.data.readers.read_rego(
+            file_list,
+            n_parallel=n_parallel,
+            first_record=first_record,
+            no_metadata=no_metadata,
+            quiet=quiet,
+            dataset=dataset,
+        )
+
+    def read_trex_nir(self,
+                      file_list: Union[List[str], List[Path], str, Path],
+                      n_parallel: int = 1,
+                      first_record: bool = False,
+                      no_metadata: bool = False,
+                      quiet: bool = False,
+                      dataset: Optional[Dataset] = None) -> Data:
+        """
+        Read in TREx near-infrared (NIR) raw data (stream0 pgm* files).
+
+        Args:
+            file_list (List[str], List[Path], str, Path): 
+                The files to read in. Absolute paths are recommended, but not technically
+                necessary. This can be a single string for a file, or a list of strings to read
+                in multiple files. This parameter is required.
+
+            n_parallel (int): 
+                Number of data files to read in parallel using multiprocessing. Default value 
+                is 1. Adjust according to your computer's available resources. This parameter 
+                is optional.
+            
+            first_record (bool): 
+                Only read in the first record in each file. This is the same as the first_frame
+                parameter in the themis-imager-readfile and trex-imager-readfile libraries, and
+                is a read optimization if you only need one image per minute, as opposed to the
+                full temporal resolution of data (e.g., 3sec cadence). This parameter is optional.
+            
+            no_metadata (bool): 
+                Skip reading of metadata. This is a minor optimization if the metadata is not needed.
+                Default is `False`. This parameter is optional.
+            
+            quiet (bool): 
+                Do not print out errors while reading data files, if any are encountered. Any files
+                that encounter errors will be, as usual, accessible via the `problematic_files` 
+                attribute of the returned `Data` object. This parameter is optional.
+
+            dataset (Dataset): 
+                The dataset object for which the files are associated with. This parameter is
+                optional.
+
+        Returns:
+            A [`Data`](https://docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html#pyucalgarysrs.data.classes.Data) 
+            object containing the data read in, among other values.
+        
+        Raises:
+            pyaurorax.exceptions.AuroraXError: a generic read error was encountered
+        """
+        return self.__aurorax_obj.srs_obj.data.readers.read_trex_nir(
+            file_list,
+            n_parallel=n_parallel,
+            first_record=first_record,
+            no_metadata=no_metadata,
+            quiet=quiet,
+            dataset=dataset,
+        )
+
+    def read_trex_blue(self,
+                       file_list: Union[List[str], List[Path], str, Path],
+                       n_parallel: int = 1,
+                       first_record: bool = False,
+                       no_metadata: bool = False,
+                       quiet: bool = False,
+                       dataset: Optional[Dataset] = None) -> Data:
+        """
+        Read in TREx Blueline raw data (stream0 pgm* files).
+
+        Args:
+            file_list (List[str], List[Path], str, Path): 
+                The files to read in. Absolute paths are recommended, but not technically
+                necessary. This can be a single string for a file, or a list of strings to read
+                in multiple files. This parameter is required.
+
+            n_parallel (int): 
+                Number of data files to read in parallel using multiprocessing. Default value 
+                is 1. Adjust according to your computer's available resources. This parameter 
+                is optional.
+            
+            first_record (bool): 
+                Only read in the first record in each file. This is the same as the first_frame
+                parameter in the themis-imager-readfile and trex-imager-readfile libraries, and
+                is a read optimization if you only need one image per minute, as opposed to the
+                full temporal resolution of data (e.g., 3sec cadence). This parameter is optional.
+            
+            no_metadata (bool): 
+                Skip reading of metadata. This is a minor optimization if the metadata is not needed.
+                Default is `False`. This parameter is optional.
+            
+            quiet (bool): 
+                Do not print out errors while reading data files, if any are encountered. Any files
+                that encounter errors will be, as usual, accessible via the `problematic_files` 
+                attribute of the returned `Data` object. This parameter
+                is optional.
+
+            dataset (Dataset): 
+                The dataset object for which the files are associated with. This parameter is
+                optional.
+
+        Returns:
+            A [`Data`](https://docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html#pyucalgarysrs.data.classes.Data) 
+            object containing the data read in, among other values.
+        
+        Raises:
+            pyaurorax.exceptions.AuroraXError: a generic read error was encountered
+        """
+        return self.__aurorax_obj.srs_obj.data.readers.read_trex_blue(
+            file_list,
+            n_parallel=n_parallel,
+            first_record=first_record,
+            no_metadata=no_metadata,
+            quiet=quiet,
+            dataset=dataset,
+        )
+
+    def read_trex_rgb(self,
+                      file_list: Union[List[str], List[Path], str, Path],
+                      n_parallel: int = 1,
+                      first_record: bool = False,
+                      no_metadata: bool = False,
+                      quiet: bool = False,
+                      dataset: Optional[Dataset] = None) -> Data:
+        """
+        Read in TREx RGB raw data (stream0 h5, stream0.burst png.tar, unstable stream0 and 
+        stream0.colour pgm* and png*).
+
+        Args:
+            file_list (List[str], List[Path], str, Path): 
+                The files to read in. Absolute paths are recommended, but not technically
+                necessary. This can be a single string for a file, or a list of strings to read
+                in multiple files. This parameter is required.
+
+            n_parallel (int): 
+                Number of data files to read in parallel using multiprocessing. Default value 
+                is 1. Adjust according to your computer's available resources. This parameter 
+                is optional.
+            
+            first_record (bool): 
+                Only read in the first record in each file. This is the same as the first_frame
+                parameter in the themis-imager-readfile and trex-imager-readfile libraries, and
+                is a read optimization if you only need one image per minute, as opposed to the
+                full temporal resolution of data (e.g., 3sec cadence). This parameter is optional.
+            
+            no_metadata (bool): 
+                Skip reading of metadata. This is a minor optimization if the metadata is not needed.
+                Default is `False`. This parameter is optional.
+            
+            quiet (bool): 
+                Do not print out errors while reading data files, if any are encountered. Any files
+                that encounter errors will be, as usual, accessible via the `problematic_files` 
+                attribute of the returned `Data` object. This parameter is optional.
+
+            dataset (Dataset): 
+                The dataset object for which the files are associated with. This parameter is
+                optional.
+
+        Returns:
+            A [`Data`](https://docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html#pyucalgarysrs.data.classes.Data) 
+            object containing the data read in, among other values.
+        
+        Raises:
+            pyaurorax.exceptions.AuroraXError: a generic read error was encountered
+        """
+        return self.__aurorax_obj.srs_obj.data.readers.read_trex_rgb(
+            file_list,
+            n_parallel=n_parallel,
+            first_record=first_record,
+            no_metadata=no_metadata,
+            quiet=quiet,
+            dataset=dataset,
+        )
+
+    def read_trex_spectrograph(self,
+                               file_list: Union[List[str], List[Path], str, Path],
+                               n_parallel: int = 1,
+                               first_record: bool = False,
+                               no_metadata: bool = False,
+                               quiet: bool = False,
+                               dataset: Optional[Dataset] = None) -> Data:
+        """
+        Read in TREx Spectrograph raw data (stream0 pgm* files).
+
+        Args:
+            file_list (List[str], List[Path], str, Path): 
+                The files to read in. Absolute paths are recommended, but not technically
+                necessary. This can be a single string for a file, or a list of strings to read
+                in multiple files. This parameter is required.
+
+            n_parallel (int): 
+                Number of data files to read in parallel using multiprocessing. Default value 
+                is 1. Adjust according to your computer's available resources. This parameter 
+                is optional.
+            
+            first_record (bool): 
+                Only read in the first record in each file. This is the same as the first_frame
+                parameter in the themis-imager-readfile and trex-imager-readfile libraries, and
+                is a read optimization if you only need one image per minute, as opposed to the
+                full temporal resolution of data (e.g., 3sec cadence). This parameter is optional.
+            
+            no_metadata (bool): 
+                Skip reading of metadata. This is a minor optimization if the metadata is not needed.
+                Default is `False`. This parameter is optional.
+            
+            quiet (bool): 
+                Do not print out errors while reading data files, if any are encountered. Any files
+                that encounter errors will be, as usual, accessible via the `problematic_files` 
+                attribute of the returned `Data` object. This parameter is optional.
+
+            dataset (Dataset): 
+                The dataset object for which the files are associated with. This parameter is
+                optional.
+
+        Returns:
+            A [`Data`](https://docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html#pyucalgarysrs.data.classes.Data) 
+            object containing the data read in, among other values.
+        
+        Raises:
+            pyaurorax.exceptions.AuroraXError: a generic read error was encountered
+        """
+        return self.__aurorax_obj.srs_obj.data.readers.read_trex_spectrograph(
+            file_list,
+            n_parallel=n_parallel,
+            first_record=first_record,
+            no_metadata=no_metadata,
+            quiet=quiet,
+            dataset=dataset,
+        )
+
+    def read_skymap(
+        self,
+        file_list: Union[List[str], List[Path], str, Path],
+        n_parallel: int = 1,
+        quiet: bool = False,
+        dataset: Optional[Dataset] = None,
+    ) -> Data:
+        """
+        Read in UCalgary skymap files.
+
+        Args:
+            file_list (List[str], List[Path], str, Path): 
+                The files to read in. Absolute paths are recommended, but not technically
+                necessary. This can be a single string for a file, or a list of strings to read
+                in multiple files. This parameter is required.
+
+            n_parallel (int): 
+                Number of data files to read in parallel using multiprocessing. Default value 
+                is 1. Adjust according to your computer's available resources. This parameter 
+                is optional.
+                                    
+            quiet (bool): 
+                Do not print out errors while reading skymap files, if any are encountered. Any 
+                files that encounter errors will be, as usual, accessible via the `problematic_files` 
+                attribute of the returned `Skymap` object. This parameter is optional.
+
+            dataset (Dataset): 
+                The dataset object for which the files are associated with. This parameter is
+                optional.
+
+        Returns:
+            A [`Data`](https://docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html#pyucalgarysrs.data.classes.Data) 
+            object containing the data read in, among other values.
+        
+        Raises:
+            pyaurorax.exceptions.AuroraXError: a generic read error was encountered        
+        """
+        return self.__aurorax_obj.srs_obj.data.readers.read_skymap(
+            file_list,
+            n_parallel=n_parallel,
+            quiet=quiet,
+            dataset=dataset,
+        )
+
+    def read_calibration(
+        self,
+        file_list: Union[List[str], List[Path], str, Path],
+        n_parallel: int = 1,
+        quiet: bool = False,
+        dataset: Optional[Dataset] = None,
+    ) -> Data:
+        """
+        Read in UCalgary calibration files.
+
+        Args:
+            file_list (List[str], List[Path], str, Path): 
+                The files to read in. Absolute paths are recommended, but not technically
+                necessary. This can be a single string for a file, or a list of strings to read
+                in multiple files. This parameter is required.
+
+            n_parallel (int): 
+                Number of data files to read in parallel using multiprocessing. Default value 
+                is 1. Adjust according to your computer's available resources. This parameter 
+                is optional.
+
+            quiet (bool): 
+                Do not print out errors while reading calibration files, if any are encountered. 
+                Any files that encounter errors will be, as usual, accessible via the `problematic_files` 
+                attribute of the returned `Calibration` object. This parameter is optional.
+
+            dataset (Dataset): 
+                The dataset object for which the files are associated with. This parameter is
+                optional.
+
+        Returns:
+            A [`Data`](https://docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html#pyucalgarysrs.data.classes.Data) 
+            object containing the data read in, among other values.
+        
+        Raises:
+            pyaurorax.exceptions.AuroraXError: a generic read error was encountered        
+        """
+        return self.__aurorax_obj.srs_obj.data.readers.read_calibration(
+            file_list,
+            n_parallel=n_parallel,
+            quiet=quiet,
+            dataset=dataset,
+        )
+
+
+
+
+
+
+
+
+
+

Classes

+
+
+class ReadManager +(aurorax_obj) +
+
+

The UCalgaryManager object is initialized within every PyAuroraX object. It acts as a way to access +the submodules and carry over configuration information in the super class.

+
+ +Expand source code + +
class ReadManager:
+    """
+    The UCalgaryManager object is initialized within every PyAuroraX object. It acts as a way to access 
+    the submodules and carry over configuration information in the super class.
+    """
+
+    def __init__(self, aurorax_obj):
+        self.__aurorax_obj: PyAuroraX = aurorax_obj
+
+    def list_supported_datasets(self) -> List[str]:
+        """
+        List the datasets which have file reading capabilities supported.
+
+        Returns:
+            A list of the dataset names with file reading support.
+        """
+        return self.__aurorax_obj.srs_obj.data.readers.list_supported_datasets()
+
+    def is_supported(self, dataset_name: str) -> bool:
+        """
+        Check if a given dataset has file reading support. 
+        
+        Not all datasets available in the UCalgary Space Remote Sensing Open Data Platform 
+        have special readfile routines in this library. This is because some datasets are 
+        in basic formats such as JPG or PNG, so unique functions aren't necessary. We leave 
+        it up to the user to open these basic files in whichever way they prefer. Use the 
+        `list_supported_read_datasets()` function to see all datasets that have special
+        file reading functionality in this library.
+
+        Args:
+            dataset_name (str): 
+                The dataset name to check if file reading is supported. This parameter 
+                is required.
+        
+        Returns:
+            Boolean indicating if file reading is supported.
+        """
+        return self.__aurorax_obj.srs_obj.data.readers.is_supported(dataset_name)
+
+    def read(self,
+             dataset: Dataset,
+             file_list: Union[List[str], List[Path], str, Path],
+             n_parallel: int = 1,
+             first_record: bool = False,
+             no_metadata: bool = False,
+             quiet: bool = False) -> Data:
+        """
+        Read in data files for a given dataset. Note that only one type of dataset's data
+        should be read in using a single call.
+
+        Args:
+            dataset (Dataset): 
+                The dataset object for which the files are associated with. This parameter is
+                required.
+            
+            file_list (List[str], List[Path], str, Path): 
+                The files to read in. Absolute paths are recommended, but not technically
+                necessary. This can be a single string for a file, or a list of strings to read
+                in multiple files. This parameter is required.
+
+            n_parallel (int): 
+                Number of data files to read in parallel using multiprocessing. Default value 
+                is 1. Adjust according to your computer's available resources. This parameter 
+                is optional.
+            
+            first_record (bool): 
+                Only read in the first record in each file. This is the same as the first_frame
+                parameter in the themis-imager-readfile and trex-imager-readfile libraries, and
+                is a read optimization if you only need one image per minute, as opposed to the
+                full temporal resolution of data (e.g., 3sec cadence). This parameter is optional.
+            
+            no_metadata (bool): 
+                Skip reading of metadata. This is a minor optimization if the metadata is not needed.
+                Default is `False`. This parameter is optional.
+            
+            quiet (bool): 
+                Do not print out errors while reading data files, if any are encountered. Any files
+                that encounter errors will be, as usual, accessible via the `problematic_files` 
+                attribute of the returned `Data` object. This parameter is optional.
+        
+        Returns:
+            A [`Data`](https://docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html#pyucalgarysrs.data.classes.Data) 
+            object containing the data read in, among other values.
+        
+        Raises:
+            pyaurorax.exceptions.AuroraXUnsupportedReadError: an unsupported dataset was used when
+                trying to read files.
+            pyaurorax.exceptions.AuroraXError: a generic read error was encountered
+
+        Notes:
+        ---------
+        For users who are familiar with the themis-imager-readfile and trex-imager-readfile
+        libraries, the read function provides a near-identical usage. Further improvements have 
+        been integrated, and those libraries are anticipated to be deprecated at some point in the
+        future.
+        """
+        try:
+            return self.__aurorax_obj.srs_obj.data.readers.read(
+                dataset,
+                file_list,
+                n_parallel=n_parallel,
+                first_record=first_record,
+                no_metadata=no_metadata,
+                quiet=quiet,
+            )
+        except SRSUnsupportedReadError as e:
+            raise AuroraXUnsupportedReadError(e) from e
+        except SRSError as e:
+            raise AuroraXError(e) from e
+
+    def read_themis(self,
+                    file_list: Union[List[str], List[Path], str, Path],
+                    n_parallel: int = 1,
+                    first_record: bool = False,
+                    no_metadata: bool = False,
+                    quiet: bool = False,
+                    dataset: Optional[Dataset] = None) -> Data:
+        """
+        Read in THEMIS ASI raw data (stream0 full.pgm* files).
+
+        Args:
+            file_list (List[str], List[Path], str, Path): 
+                The files to read in. Absolute paths are recommended, but not technically
+                necessary. This can be a single string for a file, or a list of strings to read
+                in multiple files. This parameter is required.
+
+            n_parallel (int): 
+                Number of data files to read in parallel using multiprocessing. Default value 
+                is 1. Adjust according to your computer's available resources. This parameter 
+                is optional.
+            
+            first_record (bool): 
+                Only read in the first record in each file. This is the same as the first_frame
+                parameter in the themis-imager-readfile and trex-imager-readfile libraries, and
+                is a read optimization if you only need one image per minute, as opposed to the
+                full temporal resolution of data (e.g., 3sec cadence). This parameter is optional.
+            
+            no_metadata (bool): 
+                Skip reading of metadata. This is a minor optimization if the metadata is not needed.
+                Default is `False`. This parameter is optional.
+            
+            quiet (bool): 
+                Do not print out errors while reading data files, if any are encountered. Any files
+                that encounter errors will be, as usual, accessible via the `problematic_files` 
+                attribute of the returned `Data` object. This parameter is optional.
+
+            dataset (Dataset): 
+                The dataset object for which the files are associated with. This parameter is
+                optional.
+
+        Returns:
+            A [`Data`](https://docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html#pyucalgarysrs.data.classes.Data) 
+            object containing the data read in, among other values.
+        
+        Raises:
+            pyaurorax.exceptions.AuroraXError: a generic read error was encountered
+        """
+        try:
+            return self.__aurorax_obj.srs_obj.data.readers.read_themis(
+                file_list,
+                n_parallel=n_parallel,
+                first_record=first_record,
+                no_metadata=no_metadata,
+                quiet=quiet,
+                dataset=dataset,
+            )
+        except SRSError as e:
+            raise AuroraXError(e) from e
+
+    def read_rego(self,
+                  file_list: Union[List[str], List[Path], str, Path],
+                  n_parallel: int = 1,
+                  first_record: bool = False,
+                  no_metadata: bool = False,
+                  quiet: bool = False,
+                  dataset: Optional[Dataset] = None) -> Data:
+        """
+        Read in REGO raw data (stream0 pgm* files).
+
+        Args:
+            file_list (List[str], List[Path], str, Path): 
+                The files to read in. Absolute paths are recommended, but not technically
+                necessary. This can be a single string for a file, or a list of strings to read
+                in multiple files. This parameter is required.
+
+            n_parallel (int): 
+                Number of data files to read in parallel using multiprocessing. Default value 
+                is 1. Adjust according to your computer's available resources. This parameter 
+                is optional.
+            
+            first_record (bool): 
+                Only read in the first record in each file. This is the same as the first_frame
+                parameter in the themis-imager-readfile and trex-imager-readfile libraries, and
+                is a read optimization if you only need one image per minute, as opposed to the
+                full temporal resolution of data (e.g., 3sec cadence). This parameter is optional.
+            
+            no_metadata (bool): 
+                Skip reading of metadata. This is a minor optimization if the metadata is not needed.
+                Default is `False`. This parameter is optional.
+            
+            quiet (bool): 
+                Do not print out errors while reading data files, if any are encountered. Any files
+                that encounter errors will be, as usual, accessible via the `problematic_files` 
+                attribute of the returned `Data` object. This parameter is optional.
+
+            dataset (Dataset): 
+                The dataset object for which the files are associated with. This parameter is
+                optional.
+
+        Returns:
+            A [`Data`](https://docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html#pyucalgarysrs.data.classes.Data) 
+            object containing the data read in, among other values.
+        
+        Raises:
+            pyaurorax.exceptions.AuroraXError: a generic read error was encountered
+        """
+        return self.__aurorax_obj.srs_obj.data.readers.read_rego(
+            file_list,
+            n_parallel=n_parallel,
+            first_record=first_record,
+            no_metadata=no_metadata,
+            quiet=quiet,
+            dataset=dataset,
+        )
+
+    def read_trex_nir(self,
+                      file_list: Union[List[str], List[Path], str, Path],
+                      n_parallel: int = 1,
+                      first_record: bool = False,
+                      no_metadata: bool = False,
+                      quiet: bool = False,
+                      dataset: Optional[Dataset] = None) -> Data:
+        """
+        Read in TREx near-infrared (NIR) raw data (stream0 pgm* files).
+
+        Args:
+            file_list (List[str], List[Path], str, Path): 
+                The files to read in. Absolute paths are recommended, but not technically
+                necessary. This can be a single string for a file, or a list of strings to read
+                in multiple files. This parameter is required.
+
+            n_parallel (int): 
+                Number of data files to read in parallel using multiprocessing. Default value 
+                is 1. Adjust according to your computer's available resources. This parameter 
+                is optional.
+            
+            first_record (bool): 
+                Only read in the first record in each file. This is the same as the first_frame
+                parameter in the themis-imager-readfile and trex-imager-readfile libraries, and
+                is a read optimization if you only need one image per minute, as opposed to the
+                full temporal resolution of data (e.g., 3sec cadence). This parameter is optional.
+            
+            no_metadata (bool): 
+                Skip reading of metadata. This is a minor optimization if the metadata is not needed.
+                Default is `False`. This parameter is optional.
+            
+            quiet (bool): 
+                Do not print out errors while reading data files, if any are encountered. Any files
+                that encounter errors will be, as usual, accessible via the `problematic_files` 
+                attribute of the returned `Data` object. This parameter is optional.
+
+            dataset (Dataset): 
+                The dataset object for which the files are associated with. This parameter is
+                optional.
+
+        Returns:
+            A [`Data`](https://docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html#pyucalgarysrs.data.classes.Data) 
+            object containing the data read in, among other values.
+        
+        Raises:
+            pyaurorax.exceptions.AuroraXError: a generic read error was encountered
+        """
+        return self.__aurorax_obj.srs_obj.data.readers.read_trex_nir(
+            file_list,
+            n_parallel=n_parallel,
+            first_record=first_record,
+            no_metadata=no_metadata,
+            quiet=quiet,
+            dataset=dataset,
+        )
+
+    def read_trex_blue(self,
+                       file_list: Union[List[str], List[Path], str, Path],
+                       n_parallel: int = 1,
+                       first_record: bool = False,
+                       no_metadata: bool = False,
+                       quiet: bool = False,
+                       dataset: Optional[Dataset] = None) -> Data:
+        """
+        Read in TREx Blueline raw data (stream0 pgm* files).
+
+        Args:
+            file_list (List[str], List[Path], str, Path): 
+                The files to read in. Absolute paths are recommended, but not technically
+                necessary. This can be a single string for a file, or a list of strings to read
+                in multiple files. This parameter is required.
+
+            n_parallel (int): 
+                Number of data files to read in parallel using multiprocessing. Default value 
+                is 1. Adjust according to your computer's available resources. This parameter 
+                is optional.
+            
+            first_record (bool): 
+                Only read in the first record in each file. This is the same as the first_frame
+                parameter in the themis-imager-readfile and trex-imager-readfile libraries, and
+                is a read optimization if you only need one image per minute, as opposed to the
+                full temporal resolution of data (e.g., 3sec cadence). This parameter is optional.
+            
+            no_metadata (bool): 
+                Skip reading of metadata. This is a minor optimization if the metadata is not needed.
+                Default is `False`. This parameter is optional.
+            
+            quiet (bool): 
+                Do not print out errors while reading data files, if any are encountered. Any files
+                that encounter errors will be, as usual, accessible via the `problematic_files` 
+                attribute of the returned `Data` object. This parameter
+                is optional.
+
+            dataset (Dataset): 
+                The dataset object for which the files are associated with. This parameter is
+                optional.
+
+        Returns:
+            A [`Data`](https://docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html#pyucalgarysrs.data.classes.Data) 
+            object containing the data read in, among other values.
+        
+        Raises:
+            pyaurorax.exceptions.AuroraXError: a generic read error was encountered
+        """
+        return self.__aurorax_obj.srs_obj.data.readers.read_trex_blue(
+            file_list,
+            n_parallel=n_parallel,
+            first_record=first_record,
+            no_metadata=no_metadata,
+            quiet=quiet,
+            dataset=dataset,
+        )
+
+    def read_trex_rgb(self,
+                      file_list: Union[List[str], List[Path], str, Path],
+                      n_parallel: int = 1,
+                      first_record: bool = False,
+                      no_metadata: bool = False,
+                      quiet: bool = False,
+                      dataset: Optional[Dataset] = None) -> Data:
+        """
+        Read in TREx RGB raw data (stream0 h5, stream0.burst png.tar, unstable stream0 and 
+        stream0.colour pgm* and png*).
+
+        Args:
+            file_list (List[str], List[Path], str, Path): 
+                The files to read in. Absolute paths are recommended, but not technically
+                necessary. This can be a single string for a file, or a list of strings to read
+                in multiple files. This parameter is required.
+
+            n_parallel (int): 
+                Number of data files to read in parallel using multiprocessing. Default value 
+                is 1. Adjust according to your computer's available resources. This parameter 
+                is optional.
+            
+            first_record (bool): 
+                Only read in the first record in each file. This is the same as the first_frame
+                parameter in the themis-imager-readfile and trex-imager-readfile libraries, and
+                is a read optimization if you only need one image per minute, as opposed to the
+                full temporal resolution of data (e.g., 3sec cadence). This parameter is optional.
+            
+            no_metadata (bool): 
+                Skip reading of metadata. This is a minor optimization if the metadata is not needed.
+                Default is `False`. This parameter is optional.
+            
+            quiet (bool): 
+                Do not print out errors while reading data files, if any are encountered. Any files
+                that encounter errors will be, as usual, accessible via the `problematic_files` 
+                attribute of the returned `Data` object. This parameter is optional.
+
+            dataset (Dataset): 
+                The dataset object for which the files are associated with. This parameter is
+                optional.
+
+        Returns:
+            A [`Data`](https://docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html#pyucalgarysrs.data.classes.Data) 
+            object containing the data read in, among other values.
+        
+        Raises:
+            pyaurorax.exceptions.AuroraXError: a generic read error was encountered
+        """
+        return self.__aurorax_obj.srs_obj.data.readers.read_trex_rgb(
+            file_list,
+            n_parallel=n_parallel,
+            first_record=first_record,
+            no_metadata=no_metadata,
+            quiet=quiet,
+            dataset=dataset,
+        )
+
+    def read_trex_spectrograph(self,
+                               file_list: Union[List[str], List[Path], str, Path],
+                               n_parallel: int = 1,
+                               first_record: bool = False,
+                               no_metadata: bool = False,
+                               quiet: bool = False,
+                               dataset: Optional[Dataset] = None) -> Data:
+        """
+        Read in TREx Spectrograph raw data (stream0 pgm* files).
+
+        Args:
+            file_list (List[str], List[Path], str, Path): 
+                The files to read in. Absolute paths are recommended, but not technically
+                necessary. This can be a single string for a file, or a list of strings to read
+                in multiple files. This parameter is required.
+
+            n_parallel (int): 
+                Number of data files to read in parallel using multiprocessing. Default value 
+                is 1. Adjust according to your computer's available resources. This parameter 
+                is optional.
+            
+            first_record (bool): 
+                Only read in the first record in each file. This is the same as the first_frame
+                parameter in the themis-imager-readfile and trex-imager-readfile libraries, and
+                is a read optimization if you only need one image per minute, as opposed to the
+                full temporal resolution of data (e.g., 3sec cadence). This parameter is optional.
+            
+            no_metadata (bool): 
+                Skip reading of metadata. This is a minor optimization if the metadata is not needed.
+                Default is `False`. This parameter is optional.
+            
+            quiet (bool): 
+                Do not print out errors while reading data files, if any are encountered. Any files
+                that encounter errors will be, as usual, accessible via the `problematic_files` 
+                attribute of the returned `Data` object. This parameter is optional.
+
+            dataset (Dataset): 
+                The dataset object for which the files are associated with. This parameter is
+                optional.
+
+        Returns:
+            A [`Data`](https://docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html#pyucalgarysrs.data.classes.Data) 
+            object containing the data read in, among other values.
+        
+        Raises:
+            pyaurorax.exceptions.AuroraXError: a generic read error was encountered
+        """
+        return self.__aurorax_obj.srs_obj.data.readers.read_trex_spectrograph(
+            file_list,
+            n_parallel=n_parallel,
+            first_record=first_record,
+            no_metadata=no_metadata,
+            quiet=quiet,
+            dataset=dataset,
+        )
+
+    def read_skymap(
+        self,
+        file_list: Union[List[str], List[Path], str, Path],
+        n_parallel: int = 1,
+        quiet: bool = False,
+        dataset: Optional[Dataset] = None,
+    ) -> Data:
+        """
+        Read in UCalgary skymap files.
+
+        Args:
+            file_list (List[str], List[Path], str, Path): 
+                The files to read in. Absolute paths are recommended, but not technically
+                necessary. This can be a single string for a file, or a list of strings to read
+                in multiple files. This parameter is required.
+
+            n_parallel (int): 
+                Number of data files to read in parallel using multiprocessing. Default value 
+                is 1. Adjust according to your computer's available resources. This parameter 
+                is optional.
+                                    
+            quiet (bool): 
+                Do not print out errors while reading skymap files, if any are encountered. Any 
+                files that encounter errors will be, as usual, accessible via the `problematic_files` 
+                attribute of the returned `Skymap` object. This parameter is optional.
+
+            dataset (Dataset): 
+                The dataset object for which the files are associated with. This parameter is
+                optional.
+
+        Returns:
+            A [`Data`](https://docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html#pyucalgarysrs.data.classes.Data) 
+            object containing the data read in, among other values.
+        
+        Raises:
+            pyaurorax.exceptions.AuroraXError: a generic read error was encountered        
+        """
+        return self.__aurorax_obj.srs_obj.data.readers.read_skymap(
+            file_list,
+            n_parallel=n_parallel,
+            quiet=quiet,
+            dataset=dataset,
+        )
+
+    def read_calibration(
+        self,
+        file_list: Union[List[str], List[Path], str, Path],
+        n_parallel: int = 1,
+        quiet: bool = False,
+        dataset: Optional[Dataset] = None,
+    ) -> Data:
+        """
+        Read in UCalgary calibration files.
+
+        Args:
+            file_list (List[str], List[Path], str, Path): 
+                The files to read in. Absolute paths are recommended, but not technically
+                necessary. This can be a single string for a file, or a list of strings to read
+                in multiple files. This parameter is required.
+
+            n_parallel (int): 
+                Number of data files to read in parallel using multiprocessing. Default value 
+                is 1. Adjust according to your computer's available resources. This parameter 
+                is optional.
+
+            quiet (bool): 
+                Do not print out errors while reading calibration files, if any are encountered. 
+                Any files that encounter errors will be, as usual, accessible via the `problematic_files` 
+                attribute of the returned `Calibration` object. This parameter is optional.
+
+            dataset (Dataset): 
+                The dataset object for which the files are associated with. This parameter is
+                optional.
+
+        Returns:
+            A [`Data`](https://docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html#pyucalgarysrs.data.classes.Data) 
+            object containing the data read in, among other values.
+        
+        Raises:
+            pyaurorax.exceptions.AuroraXError: a generic read error was encountered        
+        """
+        return self.__aurorax_obj.srs_obj.data.readers.read_calibration(
+            file_list,
+            n_parallel=n_parallel,
+            quiet=quiet,
+            dataset=dataset,
+        )
+
+

Methods

+
+
+def is_supported(self, dataset_name: str) ‑> bool +
+
+

Check if a given dataset has file reading support.

+

Not all datasets available in the UCalgary Space Remote Sensing Open Data Platform +have special readfile routines in this library. This is because some datasets are +in basic formats such as JPG or PNG, so unique functions aren't necessary. We leave +it up to the user to open these basic files in whichever way they prefer. Use the +list_supported_read_datasets() function to see all datasets that have special +file reading functionality in this library.

+

Args

+
+
dataset_name : str
+
The dataset name to check if file reading is supported. This parameter +is required.
+
+

Returns

+

Boolean indicating if file reading is supported.

+
+ +Expand source code + +
def is_supported(self, dataset_name: str) -> bool:
+    """
+    Check if a given dataset has file reading support. 
+    
+    Not all datasets available in the UCalgary Space Remote Sensing Open Data Platform 
+    have special readfile routines in this library. This is because some datasets are 
+    in basic formats such as JPG or PNG, so unique functions aren't necessary. We leave 
+    it up to the user to open these basic files in whichever way they prefer. Use the 
+    `list_supported_read_datasets()` function to see all datasets that have special
+    file reading functionality in this library.
+
+    Args:
+        dataset_name (str): 
+            The dataset name to check if file reading is supported. This parameter 
+            is required.
+    
+    Returns:
+        Boolean indicating if file reading is supported.
+    """
+    return self.__aurorax_obj.srs_obj.data.readers.is_supported(dataset_name)
+
+
+
+def list_supported_datasets(self) ‑> List[str] +
+
+

List the datasets which have file reading capabilities supported.

+

Returns

+

A list of the dataset names with file reading support.

+
+ +Expand source code + +
def list_supported_datasets(self) -> List[str]:
+    """
+    List the datasets which have file reading capabilities supported.
+
+    Returns:
+        A list of the dataset names with file reading support.
+    """
+    return self.__aurorax_obj.srs_obj.data.readers.list_supported_datasets()
+
+
+
+def read(self, dataset: pyucalgarysrs.data.classes.Dataset, file_list: Union[List[str], List[pathlib.Path], str, pathlib.Path], n_parallel: int = 1, first_record: bool = False, no_metadata: bool = False, quiet: bool = False) ‑> pyucalgarysrs.data.classes.Data +
+
+

Read in data files for a given dataset. Note that only one type of dataset's data +should be read in using a single call.

+

Args

+
+
dataset : Dataset
+
The dataset object for which the files are associated with. This parameter is +required.
+
file_list : List[str], List[Path], str, Path
+
The files to read in. Absolute paths are recommended, but not technically +necessary. This can be a single string for a file, or a list of strings to read +in multiple files. This parameter is required.
+
n_parallel : int
+
Number of data files to read in parallel using multiprocessing. Default value +is 1. Adjust according to your computer's available resources. This parameter +is optional.
+
first_record : bool
+
Only read in the first record in each file. This is the same as the first_frame +parameter in the themis-imager-readfile and trex-imager-readfile libraries, and +is a read optimization if you only need one image per minute, as opposed to the +full temporal resolution of data (e.g., 3sec cadence). This parameter is optional.
+
no_metadata : bool
+
Skip reading of metadata. This is a minor optimization if the metadata is not needed. +Default is False. This parameter is optional.
+
quiet : bool
+
Do not print out errors while reading data files, if any are encountered. Any files +that encounter errors will be, as usual, accessible via the problematic_files +attribute of the returned Data object. This parameter is optional.
+
+

Returns

+

A Data +object containing the data read in, among other values.

+

Raises

+
+
AuroraXUnsupportedReadError
+
an unsupported dataset was used when +trying to read files.
+
AuroraXError
+
a generic read error was encountered
+
+

Notes:

+

For users who are familiar with the themis-imager-readfile and trex-imager-readfile +libraries, the read function provides a near-identical usage. Further improvements have +been integrated, and those libraries are anticipated to be deprecated at some point in the +future.

+
+ +Expand source code + +
def read(self,
+         dataset: Dataset,
+         file_list: Union[List[str], List[Path], str, Path],
+         n_parallel: int = 1,
+         first_record: bool = False,
+         no_metadata: bool = False,
+         quiet: bool = False) -> Data:
+    """
+    Read in data files for a given dataset. Note that only one type of dataset's data
+    should be read in using a single call.
+
+    Args:
+        dataset (Dataset): 
+            The dataset object for which the files are associated with. This parameter is
+            required.
+        
+        file_list (List[str], List[Path], str, Path): 
+            The files to read in. Absolute paths are recommended, but not technically
+            necessary. This can be a single string for a file, or a list of strings to read
+            in multiple files. This parameter is required.
+
+        n_parallel (int): 
+            Number of data files to read in parallel using multiprocessing. Default value 
+            is 1. Adjust according to your computer's available resources. This parameter 
+            is optional.
+        
+        first_record (bool): 
+            Only read in the first record in each file. This is the same as the first_frame
+            parameter in the themis-imager-readfile and trex-imager-readfile libraries, and
+            is a read optimization if you only need one image per minute, as opposed to the
+            full temporal resolution of data (e.g., 3sec cadence). This parameter is optional.
+        
+        no_metadata (bool): 
+            Skip reading of metadata. This is a minor optimization if the metadata is not needed.
+            Default is `False`. This parameter is optional.
+        
+        quiet (bool): 
+            Do not print out errors while reading data files, if any are encountered. Any files
+            that encounter errors will be, as usual, accessible via the `problematic_files` 
+            attribute of the returned `Data` object. This parameter is optional.
+    
+    Returns:
+        A [`Data`](https://docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html#pyucalgarysrs.data.classes.Data) 
+        object containing the data read in, among other values.
+    
+    Raises:
+        pyaurorax.exceptions.AuroraXUnsupportedReadError: an unsupported dataset was used when
+            trying to read files.
+        pyaurorax.exceptions.AuroraXError: a generic read error was encountered
+
+    Notes:
+    ---------
+    For users who are familiar with the themis-imager-readfile and trex-imager-readfile
+    libraries, the read function provides a near-identical usage. Further improvements have 
+    been integrated, and those libraries are anticipated to be deprecated at some point in the
+    future.
+    """
+    try:
+        return self.__aurorax_obj.srs_obj.data.readers.read(
+            dataset,
+            file_list,
+            n_parallel=n_parallel,
+            first_record=first_record,
+            no_metadata=no_metadata,
+            quiet=quiet,
+        )
+    except SRSUnsupportedReadError as e:
+        raise AuroraXUnsupportedReadError(e) from e
+    except SRSError as e:
+        raise AuroraXError(e) from e
+
+
+
+def read_calibration(self, file_list: Union[List[str], List[pathlib.Path], str, pathlib.Path], n_parallel: int = 1, quiet: bool = False, dataset: Optional[pyucalgarysrs.data.classes.Dataset] = None) ‑> pyucalgarysrs.data.classes.Data +
+
+

Read in UCalgary calibration files.

+

Args

+
+
file_list : List[str], List[Path], str, Path
+
The files to read in. Absolute paths are recommended, but not technically +necessary. This can be a single string for a file, or a list of strings to read +in multiple files. This parameter is required.
+
n_parallel : int
+
Number of data files to read in parallel using multiprocessing. Default value +is 1. Adjust according to your computer's available resources. This parameter +is optional.
+
quiet : bool
+
Do not print out errors while reading calibration files, if any are encountered. +Any files that encounter errors will be, as usual, accessible via the problematic_files +attribute of the returned Calibration object. This parameter is optional.
+
dataset : Dataset
+
The dataset object for which the files are associated with. This parameter is +optional.
+
+

Returns

+

A Data +object containing the data read in, among other values.

+

Raises

+
+
AuroraXError
+
a generic read error was encountered
+
+
+ +Expand source code + +
def read_calibration(
+    self,
+    file_list: Union[List[str], List[Path], str, Path],
+    n_parallel: int = 1,
+    quiet: bool = False,
+    dataset: Optional[Dataset] = None,
+) -> Data:
+    """
+    Read in UCalgary calibration files.
+
+    Args:
+        file_list (List[str], List[Path], str, Path): 
+            The files to read in. Absolute paths are recommended, but not technically
+            necessary. This can be a single string for a file, or a list of strings to read
+            in multiple files. This parameter is required.
+
+        n_parallel (int): 
+            Number of data files to read in parallel using multiprocessing. Default value 
+            is 1. Adjust according to your computer's available resources. This parameter 
+            is optional.
+
+        quiet (bool): 
+            Do not print out errors while reading calibration files, if any are encountered. 
+            Any files that encounter errors will be, as usual, accessible via the `problematic_files` 
+            attribute of the returned `Calibration` object. This parameter is optional.
+
+        dataset (Dataset): 
+            The dataset object for which the files are associated with. This parameter is
+            optional.
+
+    Returns:
+        A [`Data`](https://docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html#pyucalgarysrs.data.classes.Data) 
+        object containing the data read in, among other values.
+    
+    Raises:
+        pyaurorax.exceptions.AuroraXError: a generic read error was encountered        
+    """
+    return self.__aurorax_obj.srs_obj.data.readers.read_calibration(
+        file_list,
+        n_parallel=n_parallel,
+        quiet=quiet,
+        dataset=dataset,
+    )
+
+
+
+def read_rego(self, file_list: Union[List[str], List[pathlib.Path], str, pathlib.Path], n_parallel: int = 1, first_record: bool = False, no_metadata: bool = False, quiet: bool = False, dataset: Optional[pyucalgarysrs.data.classes.Dataset] = None) ‑> pyucalgarysrs.data.classes.Data +
+
+

Read in REGO raw data (stream0 pgm* files).

+

Args

+
+
file_list : List[str], List[Path], str, Path
+
The files to read in. Absolute paths are recommended, but not technically +necessary. This can be a single string for a file, or a list of strings to read +in multiple files. This parameter is required.
+
n_parallel : int
+
Number of data files to read in parallel using multiprocessing. Default value +is 1. Adjust according to your computer's available resources. This parameter +is optional.
+
first_record : bool
+
Only read in the first record in each file. This is the same as the first_frame +parameter in the themis-imager-readfile and trex-imager-readfile libraries, and +is a read optimization if you only need one image per minute, as opposed to the +full temporal resolution of data (e.g., 3sec cadence). This parameter is optional.
+
no_metadata : bool
+
Skip reading of metadata. This is a minor optimization if the metadata is not needed. +Default is False. This parameter is optional.
+
quiet : bool
+
Do not print out errors while reading data files, if any are encountered. Any files +that encounter errors will be, as usual, accessible via the problematic_files +attribute of the returned Data object. This parameter is optional.
+
dataset : Dataset
+
The dataset object for which the files are associated with. This parameter is +optional.
+
+

Returns

+

A Data +object containing the data read in, among other values.

+

Raises

+
+
AuroraXError
+
a generic read error was encountered
+
+
+ +Expand source code + +
def read_rego(self,
+              file_list: Union[List[str], List[Path], str, Path],
+              n_parallel: int = 1,
+              first_record: bool = False,
+              no_metadata: bool = False,
+              quiet: bool = False,
+              dataset: Optional[Dataset] = None) -> Data:
+    """
+    Read in REGO raw data (stream0 pgm* files).
+
+    Args:
+        file_list (List[str], List[Path], str, Path): 
+            The files to read in. Absolute paths are recommended, but not technically
+            necessary. This can be a single string for a file, or a list of strings to read
+            in multiple files. This parameter is required.
+
+        n_parallel (int): 
+            Number of data files to read in parallel using multiprocessing. Default value 
+            is 1. Adjust according to your computer's available resources. This parameter 
+            is optional.
+        
+        first_record (bool): 
+            Only read in the first record in each file. This is the same as the first_frame
+            parameter in the themis-imager-readfile and trex-imager-readfile libraries, and
+            is a read optimization if you only need one image per minute, as opposed to the
+            full temporal resolution of data (e.g., 3sec cadence). This parameter is optional.
+        
+        no_metadata (bool): 
+            Skip reading of metadata. This is a minor optimization if the metadata is not needed.
+            Default is `False`. This parameter is optional.
+        
+        quiet (bool): 
+            Do not print out errors while reading data files, if any are encountered. Any files
+            that encounter errors will be, as usual, accessible via the `problematic_files` 
+            attribute of the returned `Data` object. This parameter is optional.
+
+        dataset (Dataset): 
+            The dataset object for which the files are associated with. This parameter is
+            optional.
+
+    Returns:
+        A [`Data`](https://docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html#pyucalgarysrs.data.classes.Data) 
+        object containing the data read in, among other values.
+    
+    Raises:
+        pyaurorax.exceptions.AuroraXError: a generic read error was encountered
+    """
+    return self.__aurorax_obj.srs_obj.data.readers.read_rego(
+        file_list,
+        n_parallel=n_parallel,
+        first_record=first_record,
+        no_metadata=no_metadata,
+        quiet=quiet,
+        dataset=dataset,
+    )
+
+
+
+def read_skymap(self, file_list: Union[List[str], List[pathlib.Path], str, pathlib.Path], n_parallel: int = 1, quiet: bool = False, dataset: Optional[pyucalgarysrs.data.classes.Dataset] = None) ‑> pyucalgarysrs.data.classes.Data +
+
+

Read in UCalgary skymap files.

+

Args

+
+
file_list : List[str], List[Path], str, Path
+
The files to read in. Absolute paths are recommended, but not technically +necessary. This can be a single string for a file, or a list of strings to read +in multiple files. This parameter is required.
+
n_parallel : int
+
Number of data files to read in parallel using multiprocessing. Default value +is 1. Adjust according to your computer's available resources. This parameter +is optional.
+
quiet : bool
+
Do not print out errors while reading skymap files, if any are encountered. Any +files that encounter errors will be, as usual, accessible via the problematic_files +attribute of the returned Skymap object. This parameter is optional.
+
dataset : Dataset
+
The dataset object for which the files are associated with. This parameter is +optional.
+
+

Returns

+

A Data +object containing the data read in, among other values.

+

Raises

+
+
AuroraXError
+
a generic read error was encountered
+
+
+ +Expand source code + +
def read_skymap(
+    self,
+    file_list: Union[List[str], List[Path], str, Path],
+    n_parallel: int = 1,
+    quiet: bool = False,
+    dataset: Optional[Dataset] = None,
+) -> Data:
+    """
+    Read in UCalgary skymap files.
+
+    Args:
+        file_list (List[str], List[Path], str, Path): 
+            The files to read in. Absolute paths are recommended, but not technically
+            necessary. This can be a single string for a file, or a list of strings to read
+            in multiple files. This parameter is required.
+
+        n_parallel (int): 
+            Number of data files to read in parallel using multiprocessing. Default value 
+            is 1. Adjust according to your computer's available resources. This parameter 
+            is optional.
+                                
+        quiet (bool): 
+            Do not print out errors while reading skymap files, if any are encountered. Any 
+            files that encounter errors will be, as usual, accessible via the `problematic_files` 
+            attribute of the returned `Skymap` object. This parameter is optional.
+
+        dataset (Dataset): 
+            The dataset object for which the files are associated with. This parameter is
+            optional.
+
+    Returns:
+        A [`Data`](https://docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html#pyucalgarysrs.data.classes.Data) 
+        object containing the data read in, among other values.
+    
+    Raises:
+        pyaurorax.exceptions.AuroraXError: a generic read error was encountered        
+    """
+    return self.__aurorax_obj.srs_obj.data.readers.read_skymap(
+        file_list,
+        n_parallel=n_parallel,
+        quiet=quiet,
+        dataset=dataset,
+    )
+
+
+
+def read_themis(self, file_list: Union[List[str], List[pathlib.Path], str, pathlib.Path], n_parallel: int = 1, first_record: bool = False, no_metadata: bool = False, quiet: bool = False, dataset: Optional[pyucalgarysrs.data.classes.Dataset] = None) ‑> pyucalgarysrs.data.classes.Data +
+
+

Read in THEMIS ASI raw data (stream0 full.pgm* files).

+

Args

+
+
file_list : List[str], List[Path], str, Path
+
The files to read in. Absolute paths are recommended, but not technically +necessary. This can be a single string for a file, or a list of strings to read +in multiple files. This parameter is required.
+
n_parallel : int
+
Number of data files to read in parallel using multiprocessing. Default value +is 1. Adjust according to your computer's available resources. This parameter +is optional.
+
first_record : bool
+
Only read in the first record in each file. This is the same as the first_frame +parameter in the themis-imager-readfile and trex-imager-readfile libraries, and +is a read optimization if you only need one image per minute, as opposed to the +full temporal resolution of data (e.g., 3sec cadence). This parameter is optional.
+
no_metadata : bool
+
Skip reading of metadata. This is a minor optimization if the metadata is not needed. +Default is False. This parameter is optional.
+
quiet : bool
+
Do not print out errors while reading data files, if any are encountered. Any files +that encounter errors will be, as usual, accessible via the problematic_files +attribute of the returned Data object. This parameter is optional.
+
dataset : Dataset
+
The dataset object for which the files are associated with. This parameter is +optional.
+
+

Returns

+

A Data +object containing the data read in, among other values.

+

Raises

+
+
AuroraXError
+
a generic read error was encountered
+
+
+ +Expand source code + +
def read_themis(self,
+                file_list: Union[List[str], List[Path], str, Path],
+                n_parallel: int = 1,
+                first_record: bool = False,
+                no_metadata: bool = False,
+                quiet: bool = False,
+                dataset: Optional[Dataset] = None) -> Data:
+    """
+    Read in THEMIS ASI raw data (stream0 full.pgm* files).
+
+    Args:
+        file_list (List[str], List[Path], str, Path): 
+            The files to read in. Absolute paths are recommended, but not technically
+            necessary. This can be a single string for a file, or a list of strings to read
+            in multiple files. This parameter is required.
+
+        n_parallel (int): 
+            Number of data files to read in parallel using multiprocessing. Default value 
+            is 1. Adjust according to your computer's available resources. This parameter 
+            is optional.
+        
+        first_record (bool): 
+            Only read in the first record in each file. This is the same as the first_frame
+            parameter in the themis-imager-readfile and trex-imager-readfile libraries, and
+            is a read optimization if you only need one image per minute, as opposed to the
+            full temporal resolution of data (e.g., 3sec cadence). This parameter is optional.
+        
+        no_metadata (bool): 
+            Skip reading of metadata. This is a minor optimization if the metadata is not needed.
+            Default is `False`. This parameter is optional.
+        
+        quiet (bool): 
+            Do not print out errors while reading data files, if any are encountered. Any files
+            that encounter errors will be, as usual, accessible via the `problematic_files` 
+            attribute of the returned `Data` object. This parameter is optional.
+
+        dataset (Dataset): 
+            The dataset object for which the files are associated with. This parameter is
+            optional.
+
+    Returns:
+        A [`Data`](https://docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html#pyucalgarysrs.data.classes.Data) 
+        object containing the data read in, among other values.
+    
+    Raises:
+        pyaurorax.exceptions.AuroraXError: a generic read error was encountered
+    """
+    try:
+        return self.__aurorax_obj.srs_obj.data.readers.read_themis(
+            file_list,
+            n_parallel=n_parallel,
+            first_record=first_record,
+            no_metadata=no_metadata,
+            quiet=quiet,
+            dataset=dataset,
+        )
+    except SRSError as e:
+        raise AuroraXError(e) from e
+
+
+
+def read_trex_blue(self, file_list: Union[List[str], List[pathlib.Path], str, pathlib.Path], n_parallel: int = 1, first_record: bool = False, no_metadata: bool = False, quiet: bool = False, dataset: Optional[pyucalgarysrs.data.classes.Dataset] = None) ‑> pyucalgarysrs.data.classes.Data +
+
+

Read in TREx Blueline raw data (stream0 pgm* files).

+

Args

+
+
file_list : List[str], List[Path], str, Path
+
The files to read in. Absolute paths are recommended, but not technically +necessary. This can be a single string for a file, or a list of strings to read +in multiple files. This parameter is required.
+
n_parallel : int
+
Number of data files to read in parallel using multiprocessing. Default value +is 1. Adjust according to your computer's available resources. This parameter +is optional.
+
first_record : bool
+
Only read in the first record in each file. This is the same as the first_frame +parameter in the themis-imager-readfile and trex-imager-readfile libraries, and +is a read optimization if you only need one image per minute, as opposed to the +full temporal resolution of data (e.g., 3sec cadence). This parameter is optional.
+
no_metadata : bool
+
Skip reading of metadata. This is a minor optimization if the metadata is not needed. +Default is False. This parameter is optional.
+
quiet : bool
+
Do not print out errors while reading data files, if any are encountered. Any files +that encounter errors will be, as usual, accessible via the problematic_files +attribute of the returned Data object. This parameter +is optional.
+
dataset : Dataset
+
The dataset object for which the files are associated with. This parameter is +optional.
+
+

Returns

+

A Data +object containing the data read in, among other values.

+

Raises

+
+
AuroraXError
+
a generic read error was encountered
+
+
+ +Expand source code + +
def read_trex_blue(self,
+                   file_list: Union[List[str], List[Path], str, Path],
+                   n_parallel: int = 1,
+                   first_record: bool = False,
+                   no_metadata: bool = False,
+                   quiet: bool = False,
+                   dataset: Optional[Dataset] = None) -> Data:
+    """
+    Read in TREx Blueline raw data (stream0 pgm* files).
+
+    Args:
+        file_list (List[str], List[Path], str, Path): 
+            The files to read in. Absolute paths are recommended, but not technically
+            necessary. This can be a single string for a file, or a list of strings to read
+            in multiple files. This parameter is required.
+
+        n_parallel (int): 
+            Number of data files to read in parallel using multiprocessing. Default value 
+            is 1. Adjust according to your computer's available resources. This parameter 
+            is optional.
+        
+        first_record (bool): 
+            Only read in the first record in each file. This is the same as the first_frame
+            parameter in the themis-imager-readfile and trex-imager-readfile libraries, and
+            is a read optimization if you only need one image per minute, as opposed to the
+            full temporal resolution of data (e.g., 3sec cadence). This parameter is optional.
+        
+        no_metadata (bool): 
+            Skip reading of metadata. This is a minor optimization if the metadata is not needed.
+            Default is `False`. This parameter is optional.
+        
+        quiet (bool): 
+            Do not print out errors while reading data files, if any are encountered. Any files
+            that encounter errors will be, as usual, accessible via the `problematic_files` 
+            attribute of the returned `Data` object. This parameter
+            is optional.
+
+        dataset (Dataset): 
+            The dataset object for which the files are associated with. This parameter is
+            optional.
+
+    Returns:
+        A [`Data`](https://docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html#pyucalgarysrs.data.classes.Data) 
+        object containing the data read in, among other values.
+    
+    Raises:
+        pyaurorax.exceptions.AuroraXError: a generic read error was encountered
+    """
+    return self.__aurorax_obj.srs_obj.data.readers.read_trex_blue(
+        file_list,
+        n_parallel=n_parallel,
+        first_record=first_record,
+        no_metadata=no_metadata,
+        quiet=quiet,
+        dataset=dataset,
+    )
+
+
+
+def read_trex_nir(self, file_list: Union[List[str], List[pathlib.Path], str, pathlib.Path], n_parallel: int = 1, first_record: bool = False, no_metadata: bool = False, quiet: bool = False, dataset: Optional[pyucalgarysrs.data.classes.Dataset] = None) ‑> pyucalgarysrs.data.classes.Data +
+
+

Read in TREx near-infrared (NIR) raw data (stream0 pgm* files).

+

Args

+
+
file_list : List[str], List[Path], str, Path
+
The files to read in. Absolute paths are recommended, but not technically +necessary. This can be a single string for a file, or a list of strings to read +in multiple files. This parameter is required.
+
n_parallel : int
+
Number of data files to read in parallel using multiprocessing. Default value +is 1. Adjust according to your computer's available resources. This parameter +is optional.
+
first_record : bool
+
Only read in the first record in each file. This is the same as the first_frame +parameter in the themis-imager-readfile and trex-imager-readfile libraries, and +is a read optimization if you only need one image per minute, as opposed to the +full temporal resolution of data (e.g., 3sec cadence). This parameter is optional.
+
no_metadata : bool
+
Skip reading of metadata. This is a minor optimization if the metadata is not needed. +Default is False. This parameter is optional.
+
quiet : bool
+
Do not print out errors while reading data files, if any are encountered. Any files +that encounter errors will be, as usual, accessible via the problematic_files +attribute of the returned Data object. This parameter is optional.
+
dataset : Dataset
+
The dataset object for which the files are associated with. This parameter is +optional.
+
+

Returns

+

A Data +object containing the data read in, among other values.

+

Raises

+
+
AuroraXError
+
a generic read error was encountered
+
+
+ +Expand source code + +
def read_trex_nir(self,
+                  file_list: Union[List[str], List[Path], str, Path],
+                  n_parallel: int = 1,
+                  first_record: bool = False,
+                  no_metadata: bool = False,
+                  quiet: bool = False,
+                  dataset: Optional[Dataset] = None) -> Data:
+    """
+    Read in TREx near-infrared (NIR) raw data (stream0 pgm* files).
+
+    Args:
+        file_list (List[str], List[Path], str, Path): 
+            The files to read in. Absolute paths are recommended, but not technically
+            necessary. This can be a single string for a file, or a list of strings to read
+            in multiple files. This parameter is required.
+
+        n_parallel (int): 
+            Number of data files to read in parallel using multiprocessing. Default value 
+            is 1. Adjust according to your computer's available resources. This parameter 
+            is optional.
+        
+        first_record (bool): 
+            Only read in the first record in each file. This is the same as the first_frame
+            parameter in the themis-imager-readfile and trex-imager-readfile libraries, and
+            is a read optimization if you only need one image per minute, as opposed to the
+            full temporal resolution of data (e.g., 3sec cadence). This parameter is optional.
+        
+        no_metadata (bool): 
+            Skip reading of metadata. This is a minor optimization if the metadata is not needed.
+            Default is `False`. This parameter is optional.
+        
+        quiet (bool): 
+            Do not print out errors while reading data files, if any are encountered. Any files
+            that encounter errors will be, as usual, accessible via the `problematic_files` 
+            attribute of the returned `Data` object. This parameter is optional.
+
+        dataset (Dataset): 
+            The dataset object for which the files are associated with. This parameter is
+            optional.
+
+    Returns:
+        A [`Data`](https://docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html#pyucalgarysrs.data.classes.Data) 
+        object containing the data read in, among other values.
+    
+    Raises:
+        pyaurorax.exceptions.AuroraXError: a generic read error was encountered
+    """
+    return self.__aurorax_obj.srs_obj.data.readers.read_trex_nir(
+        file_list,
+        n_parallel=n_parallel,
+        first_record=first_record,
+        no_metadata=no_metadata,
+        quiet=quiet,
+        dataset=dataset,
+    )
+
+
+
+def read_trex_rgb(self, file_list: Union[List[str], List[pathlib.Path], str, pathlib.Path], n_parallel: int = 1, first_record: bool = False, no_metadata: bool = False, quiet: bool = False, dataset: Optional[pyucalgarysrs.data.classes.Dataset] = None) ‑> pyucalgarysrs.data.classes.Data +
+
+

Read in TREx RGB raw data (stream0 h5, stream0.burst png.tar, unstable stream0 and +stream0.colour pgm and png).

+

Args

+
+
file_list : List[str], List[Path], str, Path
+
The files to read in. Absolute paths are recommended, but not technically +necessary. This can be a single string for a file, or a list of strings to read +in multiple files. This parameter is required.
+
n_parallel : int
+
Number of data files to read in parallel using multiprocessing. Default value +is 1. Adjust according to your computer's available resources. This parameter +is optional.
+
first_record : bool
+
Only read in the first record in each file. This is the same as the first_frame +parameter in the themis-imager-readfile and trex-imager-readfile libraries, and +is a read optimization if you only need one image per minute, as opposed to the +full temporal resolution of data (e.g., 3sec cadence). This parameter is optional.
+
no_metadata : bool
+
Skip reading of metadata. This is a minor optimization if the metadata is not needed. +Default is False. This parameter is optional.
+
quiet : bool
+
Do not print out errors while reading data files, if any are encountered. Any files +that encounter errors will be, as usual, accessible via the problematic_files +attribute of the returned Data object. This parameter is optional.
+
dataset : Dataset
+
The dataset object for which the files are associated with. This parameter is +optional.
+
+

Returns

+

A Data +object containing the data read in, among other values.

+

Raises

+
+
AuroraXError
+
a generic read error was encountered
+
+
+ +Expand source code + +
def read_trex_rgb(self,
+                  file_list: Union[List[str], List[Path], str, Path],
+                  n_parallel: int = 1,
+                  first_record: bool = False,
+                  no_metadata: bool = False,
+                  quiet: bool = False,
+                  dataset: Optional[Dataset] = None) -> Data:
+    """
+    Read in TREx RGB raw data (stream0 h5, stream0.burst png.tar, unstable stream0 and 
+    stream0.colour pgm* and png*).
+
+    Args:
+        file_list (List[str], List[Path], str, Path): 
+            The files to read in. Absolute paths are recommended, but not technically
+            necessary. This can be a single string for a file, or a list of strings to read
+            in multiple files. This parameter is required.
+
+        n_parallel (int): 
+            Number of data files to read in parallel using multiprocessing. Default value 
+            is 1. Adjust according to your computer's available resources. This parameter 
+            is optional.
+        
+        first_record (bool): 
+            Only read in the first record in each file. This is the same as the first_frame
+            parameter in the themis-imager-readfile and trex-imager-readfile libraries, and
+            is a read optimization if you only need one image per minute, as opposed to the
+            full temporal resolution of data (e.g., 3sec cadence). This parameter is optional.
+        
+        no_metadata (bool): 
+            Skip reading of metadata. This is a minor optimization if the metadata is not needed.
+            Default is `False`. This parameter is optional.
+        
+        quiet (bool): 
+            Do not print out errors while reading data files, if any are encountered. Any files
+            that encounter errors will be, as usual, accessible via the `problematic_files` 
+            attribute of the returned `Data` object. This parameter is optional.
+
+        dataset (Dataset): 
+            The dataset object for which the files are associated with. This parameter is
+            optional.
+
+    Returns:
+        A [`Data`](https://docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html#pyucalgarysrs.data.classes.Data) 
+        object containing the data read in, among other values.
+    
+    Raises:
+        pyaurorax.exceptions.AuroraXError: a generic read error was encountered
+    """
+    return self.__aurorax_obj.srs_obj.data.readers.read_trex_rgb(
+        file_list,
+        n_parallel=n_parallel,
+        first_record=first_record,
+        no_metadata=no_metadata,
+        quiet=quiet,
+        dataset=dataset,
+    )
+
+
+
+def read_trex_spectrograph(self, file_list: Union[List[str], List[pathlib.Path], str, pathlib.Path], n_parallel: int = 1, first_record: bool = False, no_metadata: bool = False, quiet: bool = False, dataset: Optional[pyucalgarysrs.data.classes.Dataset] = None) ‑> pyucalgarysrs.data.classes.Data +
+
+

Read in TREx Spectrograph raw data (stream0 pgm* files).

+

Args

+
+
file_list : List[str], List[Path], str, Path
+
The files to read in. Absolute paths are recommended, but not technically +necessary. This can be a single string for a file, or a list of strings to read +in multiple files. This parameter is required.
+
n_parallel : int
+
Number of data files to read in parallel using multiprocessing. Default value +is 1. Adjust according to your computer's available resources. This parameter +is optional.
+
first_record : bool
+
Only read in the first record in each file. This is the same as the first_frame +parameter in the themis-imager-readfile and trex-imager-readfile libraries, and +is a read optimization if you only need one image per minute, as opposed to the +full temporal resolution of data (e.g., 3sec cadence). This parameter is optional.
+
no_metadata : bool
+
Skip reading of metadata. This is a minor optimization if the metadata is not needed. +Default is False. This parameter is optional.
+
quiet : bool
+
Do not print out errors while reading data files, if any are encountered. Any files +that encounter errors will be, as usual, accessible via the problematic_files +attribute of the returned Data object. This parameter is optional.
+
dataset : Dataset
+
The dataset object for which the files are associated with. This parameter is +optional.
+
+

Returns

+

A Data +object containing the data read in, among other values.

+

Raises

+
+
AuroraXError
+
a generic read error was encountered
+
+
+ +Expand source code + +
def read_trex_spectrograph(self,
+                           file_list: Union[List[str], List[Path], str, Path],
+                           n_parallel: int = 1,
+                           first_record: bool = False,
+                           no_metadata: bool = False,
+                           quiet: bool = False,
+                           dataset: Optional[Dataset] = None) -> Data:
+    """
+    Read in TREx Spectrograph raw data (stream0 pgm* files).
+
+    Args:
+        file_list (List[str], List[Path], str, Path): 
+            The files to read in. Absolute paths are recommended, but not technically
+            necessary. This can be a single string for a file, or a list of strings to read
+            in multiple files. This parameter is required.
+
+        n_parallel (int): 
+            Number of data files to read in parallel using multiprocessing. Default value 
+            is 1. Adjust according to your computer's available resources. This parameter 
+            is optional.
+        
+        first_record (bool): 
+            Only read in the first record in each file. This is the same as the first_frame
+            parameter in the themis-imager-readfile and trex-imager-readfile libraries, and
+            is a read optimization if you only need one image per minute, as opposed to the
+            full temporal resolution of data (e.g., 3sec cadence). This parameter is optional.
+        
+        no_metadata (bool): 
+            Skip reading of metadata. This is a minor optimization if the metadata is not needed.
+            Default is `False`. This parameter is optional.
+        
+        quiet (bool): 
+            Do not print out errors while reading data files, if any are encountered. Any files
+            that encounter errors will be, as usual, accessible via the `problematic_files` 
+            attribute of the returned `Data` object. This parameter is optional.
+
+        dataset (Dataset): 
+            The dataset object for which the files are associated with. This parameter is
+            optional.
+
+    Returns:
+        A [`Data`](https://docs-pyucalgarysrs.phys.ucalgary.ca/data/classes.html#pyucalgarysrs.data.classes.Data) 
+        object containing the data read in, among other values.
+    
+    Raises:
+        pyaurorax.exceptions.AuroraXError: a generic read error was encountered
+    """
+    return self.__aurorax_obj.srs_obj.data.readers.read_trex_spectrograph(
+        file_list,
+        n_parallel=n_parallel,
+        first_record=first_record,
+        no_metadata=no_metadata,
+        quiet=quiet,
+        dataset=dataset,
+    )
+
+
+
+
+
+
+
+ +
+ + + \ No newline at end of file diff --git a/docs/code/pyaurorax_api_reference/pyaurorax/data_products/data_products.html b/docs/code/pyaurorax_api_reference/pyaurorax/data_products/data_products.html deleted file mode 100644 index 8a0d9ca..0000000 --- a/docs/code/pyaurorax_api_reference/pyaurorax/data_products/data_products.html +++ /dev/null @@ -1,973 +0,0 @@ - - - - - - -pyaurorax.data_products.data_products API documentation - - - - - - - - - - - -
-
-
-

Module pyaurorax.data_products.data_products

-
-
-

Functions for performing data product searches

-
- -Expand source code - -
"""
-Functions for performing data product searches
-"""
-
-import datetime
-import humanize
-from typing import Dict, List, Optional
-from .classes.data_product import DataProduct
-from .classes.search import Search
-from ..sources import (DataSource,
-                       list as sources_list)
-from ..exceptions import (AuroraXSearchException,
-                          AuroraXValidationException,
-                          AuroraXUploadException,
-                          AuroraXBadParametersException,
-                          AuroraXException)
-from ..requests import STANDARD_POLLING_SLEEP_TIME
-from ..api import (AuroraXRequest,
-                   urls as api_urls)
-
-# pdoc init
-__pdoc__: Dict = {}
-
-
-def __validate_data_source(identifier: int,
-                           records: List[DataProduct]) -> Optional[DataProduct]:
-    # get all current sources
-    sources = {source.identifier: source for source in sources_list()}
-    if identifier not in sources.keys():
-        raise AuroraXValidationException(f"Data source with unique identifier "
-                                         "{identifier} could not be found")
-
-    # process each record to make sure the program/platform/instrument_type matches
-    # the identifier found for the data source
-    for record in records:
-        # check the identifier, program name, platform name, and instrument type
-        try:
-            reference = sources[record.data_source.identifier]
-        except KeyError:
-            raise AuroraXValidationException(f"Data source with unique identifier "
-                                             "{record.data_source.identifier} could "
-                                             "not be found")
-
-        # check if it's a bad record
-        if not (record.data_source.program == reference.program
-                and record.data_source.platform == reference.platform
-                and record.data_source.instrument_type == reference.instrument_type):
-            return record
-
-    # found no bad records
-    return None
-
-
-def search(start: datetime.datetime,
-           end: datetime.datetime,
-           programs: Optional[List[str]] = None,
-           platforms: Optional[List[str]] = None,
-           instrument_types: Optional[List[str]] = None,
-           data_product_types: Optional[List[str]] = None,
-           metadata_filters: Optional[List[Dict]] = None,
-           metadata_filters_logical_operator: Optional[str] = None,
-           response_format: Optional[Dict] = None,
-           poll_interval: Optional[float] = STANDARD_POLLING_SLEEP_TIME,
-           return_immediately: Optional[bool] = False,
-           verbose: Optional[bool] = False) -> Search:
-    """
-    Search for data product records
-
-    By default, this function will block and wait until the request completes and
-    all data is downloaded. If you don't want to wait, set the 'return_immediately`
-    value to True. The Search object will be returned right after the search has been
-    started, and you can use the helper functions as part of that object to get the
-    data when it's done.
-
-    Note: At least one search criteria from programs, platforms, or
-    instrument_types, must be specified.
-
-    Args:
-        start: start timestamp of the search (inclusive)
-        end: end timestamp of the search (inclusive)
-        programs: list of programs to search through, defaults to None
-        platforms: list of platforms to search through, defaults to None
-        instrument_types: list of instrument types to search through, defaults to None
-        data_product_types: list of dictionaries describing data product
-            types to filter on e.g. "keogram", defaults to None. Options are in the
-            pyaurorax.data_products module, or at the top level using the
-            pyaurorax.DATA_PRODUCT_TYPE* variables.
-        metadata_filters: list of dictionaries describing metadata keys and
-            values to filter on, defaults to None
-
-            Example:
-
-                [{
-                    "key": "nbtrace_region",
-                    "operator": "in",
-                    "values": ["north polar cap"]
-                }]
-        metadata_filters_logical_operator: the logical operator to use when
-            evaluating metadata filters (either 'AND' or 'OR'), defaults
-            to "AND"
-        response_format: JSON representation of desired data response format
-        poll_interval: time in seconds to wait between polling attempts, defaults
-            to pyaurorax.requests.STANDARD_POLLING_SLEEP_TIME
-        return_immediately: initiate the search and return without waiting for data to
-            be received, defaults to False
-        verbose: output poll times and other progress messages, defaults to False
-
-    Returns:
-        a pyaurorax.data_products.Search object
-    """
-    # create a Search() object
-    s = Search(start,
-               end,
-               programs=programs,
-               platforms=platforms,
-               instrument_types=instrument_types,
-               data_product_types=data_product_types,
-               metadata_filters=metadata_filters,
-               metadata_filters_logical_operator=metadata_filters_logical_operator,
-               response_format=response_format)
-    if (verbose is True):
-        print("[%s] Search object created" % (datetime.datetime.now()))
-
-    # execute the search
-    s.execute()
-    if (verbose is True):
-        print("[%s] Request submitted" % (datetime.datetime.now()))
-        print("[%s] Request ID: %s" % (datetime.datetime.now(), s.request_id))
-        print("[%s] Request details available at: %s" % (datetime.datetime.now(),
-                                                         s.request_url))
-
-    # return immediately if we wanted to
-    if (return_immediately is True):
-        return s
-
-    # wait for data
-    if (verbose is True):
-        print("[%s] Waiting for data ..." % (datetime.datetime.now()))
-    s.wait(poll_interval=poll_interval, verbose=verbose)
-
-    # check if error condition encountered
-    if (s.status["search_result"]["error_condition"] is True):
-        # error encountered
-        raise AuroraXSearchException(s.logs[-1]["summary"])
-
-    # get the data
-    if (verbose is True):
-        print("[%s] Retrieving data ..." % (datetime.datetime.now()))
-    s.get_data()
-
-    # return response with the data
-    if (verbose is True):
-        print("[%s] Retrieved %s of data containing %d records" % (datetime.datetime.now(),
-                                                                   humanize.filesize.naturalsize(
-                                                                       s.status["search_result"]["file_size"]),
-                                                                   s.status["search_result"]["result_count"]))
-    return s
-
-
-def upload(identifier: int,
-           records: List[DataProduct],
-           validate_source: Optional[bool] = False) -> int:
-    """
-    Upload data product records to AuroraX
-
-    Args:
-        identifier: the AuroraX data source ID
-        records: data product records to upload
-        validate_source: validate all records before uploading, defaults to False
-
-    Returns:
-        0 for success, raises exception on error
-
-    Raises:
-        pyaurorax.exceptions.AuroraXMaxRetriesException: max retry error
-        pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected content error
-        pyaurorax.exceptions.AuroraXUploadException: upload error
-        pyaurorax.exceptions.AuroraXValidationException: data source validation error
-    """
-    # validate record sources if the flag is set
-    if validate_source:
-        validation_error = __validate_data_source(identifier, records)
-        if validation_error:
-            raise AuroraXValidationException(f"Unable to validate data source "
-                                             "found in record: {validation_error}")
-
-    # translate each data product record to a request-friendly
-    # dict (ie. convert datetimes to strings, etc.)
-    for i, _ in enumerate(records):
-        if (type(records[i]) is DataProduct):
-            records[i] = records[i].to_json_serializable()  # type: ignore
-
-    # make request
-    url = api_urls.data_products_upload_url.format(identifier)
-    req = AuroraXRequest(method="post",
-                         url=url,
-                         body=records,
-                         null_response=True)
-    res = req.execute()
-
-    # evaluate response
-    if (res.status_code == 400):
-        if type(res.data) is list:
-            raise AuroraXUploadException("%s - %s" % (res.status_code,
-                                                      res.data[0]["error_message"]))
-
-        raise AuroraXUploadException("%s - %s" % (res.status_code,
-                                                  res.data["error_message"]))
-
-    # return
-    return 0
-
-
-def delete_urls(data_source: DataSource,
-                urls: List[str]) -> int:
-    """
-    Delete data products by URL.
-
-    The API processes this request asynchronously, so this method will return
-    immediately whether or not the data has already been deleted.
-
-    Args:
-        data_source: data source associated with the data product records (note that
-            identifier, program, platform, and instrument_type are required)
-        urls: URLs of data product records to delete
-
-    Returns:
-        0 on success
-
-    Raises:
-        pyaurorax.exceptions.AuroraXMaxRetriesException: max retry error
-        pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected error
-        pyaurorax.exceptions.AuroraXBadParametersException: invalid parameters entered
-        pyaurorax.exceptions.AuroraXUnauthorizedException: invalid API key for this operation
-    """
-    # check to make sure the identifier, program, platform, and instrument type are all set in the data source
-    if not all([data_source.identifier, data_source.program, data_source.platform, data_source.instrument_type]):
-        raise AuroraXBadParametersException("One or more required data source parameters "
-                                            "are missing, delete operation aborted")
-
-    # do request
-    url = api_urls.data_products_upload_url.format(data_source.identifier)
-    params = {
-        "program": data_source.program,
-        "platform": data_source.platform,
-        "instrument_type": data_source.instrument_type,
-        "urls": urls
-    }
-    delete_req = AuroraXRequest(method="delete",
-                                url=url,
-                                body=params,
-                                null_response=True)
-    res = delete_req.execute()
-
-    # evaluate response
-    if (res.status_code == 400):
-        if type(res.data) is list:
-            raise AuroraXBadParametersException("%s - %s" % (res.status_code,
-                                                             res.data[0]["message"]))
-        raise AuroraXBadParametersException("%s - %s" % (res.status_code,
-                                                         res.data["message"]))
-
-    # return
-    return 0
-
-
-def delete(data_source: DataSource,
-           start: datetime.datetime,
-           end: datetime.datetime,
-           data_product_types: Optional[List[str]] = None) -> int:
-    """
-    Delete data products associated with a data source within a date range.
-
-    The API processes this request asynchronously, so this method will return
-    immediately whether or not the data has already been deleted.
-
-    Args:
-        data_source: data source associated with the data product records (note that
-            identifier, program, platform, and instrument_type are required)
-        start: timestamp marking beginning of range to delete records for, inclusive
-        end: timestamp marking end of range to delete records for, inclusive
-        data_product_types: specific types of data product to delete, e.g.
-            ["keogram", "movie"]. If omitted, all data product types will be deleted.
-
-    Returns:
-        1 on success
-
-    Raises:
-        pyaurorax.exceptions.AuroraXMaxRetriesException: max retry error
-        pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected error
-        pyaurorax.exceptions.AuroraXNotFoundException: source not found
-        pyaurorax.exceptions.AuroraXUnauthorizedException: invalid API key for this operation
-    """
-    # check to make sure the identifier, program, platform, and instrument type are all set in the data source
-    if not all([data_source.identifier, data_source.program, data_source.platform, data_source.instrument_type]):
-        raise AuroraXBadParametersException("One or more required data source parameters "
-                                            "are missing, delete operation aborted")
-
-    # do request to get all data products between start and end datetimes
-    try:
-        s = search(start,
-                   end,
-                   programs=[data_source.program],
-                   platforms=[data_source.platform],
-                   instrument_types=[data_source.instrument_type],
-                   data_product_types=[] if not data_product_types else data_product_types)
-    except Exception as e:
-        raise AuroraXException(e)
-
-    # collect URLs from search result
-    urls = []
-    for dp in s.data:
-        urls.append(dp.url)  # type: ignore
-
-    # do delete request
-    return delete_urls(data_source, urls)
-
-
-def describe(search_obj: Search) -> str:
-    """
-    Describe a data product search as an "SQL-like" string
-
-    Args:
-        search_obj: the data product search object to describe
-
-    Returns:
-        the "SQL-like" string describing the data product search object
-    """
-    # make request
-    req = AuroraXRequest(method="post",
-                         url=api_urls.describe_data_products_query_url,
-                         body=search_obj.query)
-    res = req.execute()
-
-    # return
-    return res.data
-
-
-def get_request_url(request_id: str) -> str:
-    """
-    Get the data product search request URL for a given
-    request ID. This URL can be used for subsequent
-    pyaurorax.requests function calls. Primarily this method
-    facilitates delving into details about a set of already-submitted
-    data product searches.
-
-    Args:
-        request_id: the request identifier
-
-    Returns:
-        the request URL
-    """
-    url = api_urls.data_products_request_url.format(request_id)
-    return url
-
-
-
-
-
-
-
-

Functions

-
-
-def delete(data_source: DataSource, start: datetime.datetime, end: datetime.datetime, data_product_types: Optional[List[str]] = None) ‑> int -
-
-

Delete data products associated with a data source within a date range.

-

The API processes this request asynchronously, so this method will return -immediately whether or not the data has already been deleted.

-

Args

-
-
data_source
-
data source associated with the data product records (note that -identifier, program, platform, and instrument_type are required)
-
start
-
timestamp marking beginning of range to delete records for, inclusive
-
end
-
timestamp marking end of range to delete records for, inclusive
-
data_product_types
-
specific types of data product to delete, e.g. -["keogram", "movie"]. If omitted, all data product types will be deleted.
-
-

Returns

-

1 on success

-

Raises

-
-
AuroraXMaxRetriesException
-
max retry error
-
AuroraXUnexpectedContentTypeException
-
unexpected error
-
AuroraXNotFoundException
-
source not found
-
AuroraXUnauthorizedException
-
invalid API key for this operation
-
-
- -Expand source code - -
def delete(data_source: DataSource,
-           start: datetime.datetime,
-           end: datetime.datetime,
-           data_product_types: Optional[List[str]] = None) -> int:
-    """
-    Delete data products associated with a data source within a date range.
-
-    The API processes this request asynchronously, so this method will return
-    immediately whether or not the data has already been deleted.
-
-    Args:
-        data_source: data source associated with the data product records (note that
-            identifier, program, platform, and instrument_type are required)
-        start: timestamp marking beginning of range to delete records for, inclusive
-        end: timestamp marking end of range to delete records for, inclusive
-        data_product_types: specific types of data product to delete, e.g.
-            ["keogram", "movie"]. If omitted, all data product types will be deleted.
-
-    Returns:
-        1 on success
-
-    Raises:
-        pyaurorax.exceptions.AuroraXMaxRetriesException: max retry error
-        pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected error
-        pyaurorax.exceptions.AuroraXNotFoundException: source not found
-        pyaurorax.exceptions.AuroraXUnauthorizedException: invalid API key for this operation
-    """
-    # check to make sure the identifier, program, platform, and instrument type are all set in the data source
-    if not all([data_source.identifier, data_source.program, data_source.platform, data_source.instrument_type]):
-        raise AuroraXBadParametersException("One or more required data source parameters "
-                                            "are missing, delete operation aborted")
-
-    # do request to get all data products between start and end datetimes
-    try:
-        s = search(start,
-                   end,
-                   programs=[data_source.program],
-                   platforms=[data_source.platform],
-                   instrument_types=[data_source.instrument_type],
-                   data_product_types=[] if not data_product_types else data_product_types)
-    except Exception as e:
-        raise AuroraXException(e)
-
-    # collect URLs from search result
-    urls = []
-    for dp in s.data:
-        urls.append(dp.url)  # type: ignore
-
-    # do delete request
-    return delete_urls(data_source, urls)
-
-
-
-def delete_urls(data_source: DataSource, urls: List[str]) ‑> int -
-
-

Delete data products by URL.

-

The API processes this request asynchronously, so this method will return -immediately whether or not the data has already been deleted.

-

Args

-
-
data_source
-
data source associated with the data product records (note that -identifier, program, platform, and instrument_type are required)
-
urls
-
URLs of data product records to delete
-
-

Returns

-

0 on success

-

Raises

-
-
AuroraXMaxRetriesException
-
max retry error
-
AuroraXUnexpectedContentTypeException
-
unexpected error
-
AuroraXBadParametersException
-
invalid parameters entered
-
AuroraXUnauthorizedException
-
invalid API key for this operation
-
-
- -Expand source code - -
def delete_urls(data_source: DataSource,
-                urls: List[str]) -> int:
-    """
-    Delete data products by URL.
-
-    The API processes this request asynchronously, so this method will return
-    immediately whether or not the data has already been deleted.
-
-    Args:
-        data_source: data source associated with the data product records (note that
-            identifier, program, platform, and instrument_type are required)
-        urls: URLs of data product records to delete
-
-    Returns:
-        0 on success
-
-    Raises:
-        pyaurorax.exceptions.AuroraXMaxRetriesException: max retry error
-        pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected error
-        pyaurorax.exceptions.AuroraXBadParametersException: invalid parameters entered
-        pyaurorax.exceptions.AuroraXUnauthorizedException: invalid API key for this operation
-    """
-    # check to make sure the identifier, program, platform, and instrument type are all set in the data source
-    if not all([data_source.identifier, data_source.program, data_source.platform, data_source.instrument_type]):
-        raise AuroraXBadParametersException("One or more required data source parameters "
-                                            "are missing, delete operation aborted")
-
-    # do request
-    url = api_urls.data_products_upload_url.format(data_source.identifier)
-    params = {
-        "program": data_source.program,
-        "platform": data_source.platform,
-        "instrument_type": data_source.instrument_type,
-        "urls": urls
-    }
-    delete_req = AuroraXRequest(method="delete",
-                                url=url,
-                                body=params,
-                                null_response=True)
-    res = delete_req.execute()
-
-    # evaluate response
-    if (res.status_code == 400):
-        if type(res.data) is list:
-            raise AuroraXBadParametersException("%s - %s" % (res.status_code,
-                                                             res.data[0]["message"]))
-        raise AuroraXBadParametersException("%s - %s" % (res.status_code,
-                                                         res.data["message"]))
-
-    # return
-    return 0
-
-
-
-def describe(search_obj: Search) ‑> str -
-
-

Describe a data product search as an "SQL-like" string

-

Args

-
-
search_obj
-
the data product search object to describe
-
-

Returns

-

the "SQL-like" string describing the data product search object

-
- -Expand source code - -
def describe(search_obj: Search) -> str:
-    """
-    Describe a data product search as an "SQL-like" string
-
-    Args:
-        search_obj: the data product search object to describe
-
-    Returns:
-        the "SQL-like" string describing the data product search object
-    """
-    # make request
-    req = AuroraXRequest(method="post",
-                         url=api_urls.describe_data_products_query_url,
-                         body=search_obj.query)
-    res = req.execute()
-
-    # return
-    return res.data
-
-
-
-def get_request_url(request_id: str) ‑> str -
-
-

Get the data product search request URL for a given -request ID. This URL can be used for subsequent -pyaurorax.requests function calls. Primarily this method -facilitates delving into details about a set of already-submitted -data product searches.

-

Args

-
-
request_id
-
the request identifier
-
-

Returns

-

the request URL

-
- -Expand source code - -
def get_request_url(request_id: str) -> str:
-    """
-    Get the data product search request URL for a given
-    request ID. This URL can be used for subsequent
-    pyaurorax.requests function calls. Primarily this method
-    facilitates delving into details about a set of already-submitted
-    data product searches.
-
-    Args:
-        request_id: the request identifier
-
-    Returns:
-        the request URL
-    """
-    url = api_urls.data_products_request_url.format(request_id)
-    return url
-
-
-
-def search(start: datetime.datetime, end: datetime.datetime, programs: Optional[List[str]] = None, platforms: Optional[List[str]] = None, instrument_types: Optional[List[str]] = None, data_product_types: Optional[List[str]] = None, metadata_filters: Optional[List[Dict]] = None, metadata_filters_logical_operator: Optional[str] = None, response_format: Optional[Dict] = None, poll_interval: Optional[float] = 1.0, return_immediately: Optional[bool] = False, verbose: Optional[bool] = False) ‑> Search -
-
-

Search for data product records

-

By default, this function will block and wait until the request completes and -all data is downloaded. If you don't want to wait, set the 'return_immediately` -value to True. The Search object will be returned right after the search has been -started, and you can use the helper functions as part of that object to get the -data when it's done.

-

Note: At least one search criteria from programs, platforms, or -instrument_types, must be specified.

-

Args

-
-
start
-
start timestamp of the search (inclusive)
-
end
-
end timestamp of the search (inclusive)
-
programs
-
list of programs to search through, defaults to None
-
platforms
-
list of platforms to search through, defaults to None
-
instrument_types
-
list of instrument types to search through, defaults to None
-
data_product_types
-
list of dictionaries describing data product -types to filter on e.g. "keogram", defaults to None. Options are in the -pyaurorax.data_products module, or at the top level using the -pyaurorax.DATA_PRODUCT_TYPE* variables.
-
metadata_filters
-
-

list of dictionaries describing metadata keys and -values to filter on, defaults to None

-

Example:

-
[{
-    "key": "nbtrace_region",
-    "operator": "in",
-    "values": ["north polar cap"]
-}]
-
-
-
metadata_filters_logical_operator
-
the logical operator to use when -evaluating metadata filters (either 'AND' or 'OR'), defaults -to "AND"
-
response_format
-
JSON representation of desired data response format
-
poll_interval
-
time in seconds to wait between polling attempts, defaults -to pyaurorax.requests.STANDARD_POLLING_SLEEP_TIME
-
return_immediately
-
initiate the search and return without waiting for data to -be received, defaults to False
-
verbose
-
output poll times and other progress messages, defaults to False
-
-

Returns

-

a pyaurorax.data_products.Search object

-
- -Expand source code - -
def search(start: datetime.datetime,
-           end: datetime.datetime,
-           programs: Optional[List[str]] = None,
-           platforms: Optional[List[str]] = None,
-           instrument_types: Optional[List[str]] = None,
-           data_product_types: Optional[List[str]] = None,
-           metadata_filters: Optional[List[Dict]] = None,
-           metadata_filters_logical_operator: Optional[str] = None,
-           response_format: Optional[Dict] = None,
-           poll_interval: Optional[float] = STANDARD_POLLING_SLEEP_TIME,
-           return_immediately: Optional[bool] = False,
-           verbose: Optional[bool] = False) -> Search:
-    """
-    Search for data product records
-
-    By default, this function will block and wait until the request completes and
-    all data is downloaded. If you don't want to wait, set the 'return_immediately`
-    value to True. The Search object will be returned right after the search has been
-    started, and you can use the helper functions as part of that object to get the
-    data when it's done.
-
-    Note: At least one search criteria from programs, platforms, or
-    instrument_types, must be specified.
-
-    Args:
-        start: start timestamp of the search (inclusive)
-        end: end timestamp of the search (inclusive)
-        programs: list of programs to search through, defaults to None
-        platforms: list of platforms to search through, defaults to None
-        instrument_types: list of instrument types to search through, defaults to None
-        data_product_types: list of dictionaries describing data product
-            types to filter on e.g. "keogram", defaults to None. Options are in the
-            pyaurorax.data_products module, or at the top level using the
-            pyaurorax.DATA_PRODUCT_TYPE* variables.
-        metadata_filters: list of dictionaries describing metadata keys and
-            values to filter on, defaults to None
-
-            Example:
-
-                [{
-                    "key": "nbtrace_region",
-                    "operator": "in",
-                    "values": ["north polar cap"]
-                }]
-        metadata_filters_logical_operator: the logical operator to use when
-            evaluating metadata filters (either 'AND' or 'OR'), defaults
-            to "AND"
-        response_format: JSON representation of desired data response format
-        poll_interval: time in seconds to wait between polling attempts, defaults
-            to pyaurorax.requests.STANDARD_POLLING_SLEEP_TIME
-        return_immediately: initiate the search and return without waiting for data to
-            be received, defaults to False
-        verbose: output poll times and other progress messages, defaults to False
-
-    Returns:
-        a pyaurorax.data_products.Search object
-    """
-    # create a Search() object
-    s = Search(start,
-               end,
-               programs=programs,
-               platforms=platforms,
-               instrument_types=instrument_types,
-               data_product_types=data_product_types,
-               metadata_filters=metadata_filters,
-               metadata_filters_logical_operator=metadata_filters_logical_operator,
-               response_format=response_format)
-    if (verbose is True):
-        print("[%s] Search object created" % (datetime.datetime.now()))
-
-    # execute the search
-    s.execute()
-    if (verbose is True):
-        print("[%s] Request submitted" % (datetime.datetime.now()))
-        print("[%s] Request ID: %s" % (datetime.datetime.now(), s.request_id))
-        print("[%s] Request details available at: %s" % (datetime.datetime.now(),
-                                                         s.request_url))
-
-    # return immediately if we wanted to
-    if (return_immediately is True):
-        return s
-
-    # wait for data
-    if (verbose is True):
-        print("[%s] Waiting for data ..." % (datetime.datetime.now()))
-    s.wait(poll_interval=poll_interval, verbose=verbose)
-
-    # check if error condition encountered
-    if (s.status["search_result"]["error_condition"] is True):
-        # error encountered
-        raise AuroraXSearchException(s.logs[-1]["summary"])
-
-    # get the data
-    if (verbose is True):
-        print("[%s] Retrieving data ..." % (datetime.datetime.now()))
-    s.get_data()
-
-    # return response with the data
-    if (verbose is True):
-        print("[%s] Retrieved %s of data containing %d records" % (datetime.datetime.now(),
-                                                                   humanize.filesize.naturalsize(
-                                                                       s.status["search_result"]["file_size"]),
-                                                                   s.status["search_result"]["result_count"]))
-    return s
-
-
-
-def upload(identifier: int, records: List[DataProduct], validate_source: Optional[bool] = False) ‑> int -
-
-

Upload data product records to AuroraX

-

Args

-
-
identifier
-
the AuroraX data source ID
-
records
-
data product records to upload
-
validate_source
-
validate all records before uploading, defaults to False
-
-

Returns

-

0 for success, raises exception on error

-

Raises

-
-
AuroraXMaxRetriesException
-
max retry error
-
AuroraXUnexpectedContentTypeException
-
unexpected content error
-
AuroraXUploadException
-
upload error
-
AuroraXValidationException
-
data source validation error
-
-
- -Expand source code - -
def upload(identifier: int,
-           records: List[DataProduct],
-           validate_source: Optional[bool] = False) -> int:
-    """
-    Upload data product records to AuroraX
-
-    Args:
-        identifier: the AuroraX data source ID
-        records: data product records to upload
-        validate_source: validate all records before uploading, defaults to False
-
-    Returns:
-        0 for success, raises exception on error
-
-    Raises:
-        pyaurorax.exceptions.AuroraXMaxRetriesException: max retry error
-        pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected content error
-        pyaurorax.exceptions.AuroraXUploadException: upload error
-        pyaurorax.exceptions.AuroraXValidationException: data source validation error
-    """
-    # validate record sources if the flag is set
-    if validate_source:
-        validation_error = __validate_data_source(identifier, records)
-        if validation_error:
-            raise AuroraXValidationException(f"Unable to validate data source "
-                                             "found in record: {validation_error}")
-
-    # translate each data product record to a request-friendly
-    # dict (ie. convert datetimes to strings, etc.)
-    for i, _ in enumerate(records):
-        if (type(records[i]) is DataProduct):
-            records[i] = records[i].to_json_serializable()  # type: ignore
-
-    # make request
-    url = api_urls.data_products_upload_url.format(identifier)
-    req = AuroraXRequest(method="post",
-                         url=url,
-                         body=records,
-                         null_response=True)
-    res = req.execute()
-
-    # evaluate response
-    if (res.status_code == 400):
-        if type(res.data) is list:
-            raise AuroraXUploadException("%s - %s" % (res.status_code,
-                                                      res.data[0]["error_message"]))
-
-        raise AuroraXUploadException("%s - %s" % (res.status_code,
-                                                  res.data["error_message"]))
-
-    # return
-    return 0
-
-
-
-
-
-
-
- -
- - - \ No newline at end of file diff --git a/docs/code/pyaurorax_api_reference/pyaurorax/data_products/index.html b/docs/code/pyaurorax_api_reference/pyaurorax/data_products/index.html deleted file mode 100644 index c529696..0000000 --- a/docs/code/pyaurorax_api_reference/pyaurorax/data_products/index.html +++ /dev/null @@ -1,1591 +0,0 @@ - - - - - - -pyaurorax.data_products API documentation - - - - - - - - - - - -
-
-
-

Module pyaurorax.data_products

-
-
-

The data_products module is used to search and upload data -product records within AuroraX. One example of a data product -is a keogram.

-

Note that all functions and classes from submodules are all imported -at this level of the data_products module. They can be referenced from -here instead of digging in deeper to the submodules.

-
- -Expand source code - -
"""
-The data_products module is used to search and upload data
-product records within AuroraX. One example of a data product
-is a keogram.
-
-Note that all functions and classes from submodules are all imported
-at this level of the data_products module. They can be referenced from
-here instead of digging in deeper to the submodules.
-"""
-
-# keogram data product type
-DATA_PRODUCT_TYPE_KEOGRAM = "keogram"
-"""
-Data product type for keograms. Keograms are a 2-D
-representation of a series of images, and are one of
-the most popular data products that auroral science
-uses. More information can be found at
-https://docs.aurorax.space/about_the_data/standards/#keograms.
-"""
-
-# montage data product type
-DATA_PRODUCT_TYPE_MONTAGE = "montage"
-"""
-Data product type for montages. Like keograms, montages are
-another representation of a series of images. However, montages
-are not a 2D representation but rather a collage of thumnbail
-images for the period of time. An example can be found at
-https://data.phys.ucalgary.ca/sort_by_project/THEMIS/asi/stream2/2021/12/28/gill_themis19/20211228__gill_themis19_full-montage.pgm.jpg
-"""
-
-# movie data product type
-DATA_PRODUCT_TYPE_MOVIE = "movie"
-"""
-Data product type for movies. Movies are timelapse video
-files of auroral data, usually as MP4 or MPEG. They can
-consist of frames for a whole night, or an hour, and can
-be at any cadence that is most appropriate.
-"""
-
-# summary plot data product type
-DATA_PRODUCT_TYPE_SUMMARY_PLOT = "summary_plot"
-"""
-Data product type for summary plots. A summary plot can be any type
-of plot that shows auroral data in a summary format, for example a
-background-subtracted meridian scanning photometer plot showing
-counts in Rayleighs.
-"""
-
-# data availability data product type
-DATA_PRODUCT_TYPE_DATA_AVAILABILITY = "data_availability"
-"""
-Data product type for data availability. The AuroraX data availability
-system does not account for times when data was not expected to be
-collected, such as summer shutdowns due to inadequate night hours. This
-data product type for 'data availbility' is meant to be used as a smarter
-data availability mechanism for Aurora.
-"""
-
-# function and class imports
-from .data_products import (search,
-                            upload,
-                            delete_urls,
-                            delete,
-                            describe,
-                            get_request_url)
-from .classes.data_product import DataProduct
-from .classes.search import Search
-
-# pdoc imports and exports
-from .data_products import __pdoc__ as __data_products_pdoc__
-from .classes.data_product import __pdoc__ as __classes_data_product_pdoc__
-from .classes.search import __pdoc__ as __classes_search_pdoc__
-__pdoc__ = __data_products_pdoc__
-__pdoc__ = dict(__pdoc__, **__classes_data_product_pdoc__)
-__pdoc__ = dict(__pdoc__, **__classes_search_pdoc__)
-__all__ = [
-    "DATA_PRODUCT_TYPE_KEOGRAM",
-    "DATA_PRODUCT_TYPE_MONTAGE",
-    "DATA_PRODUCT_TYPE_MOVIE",
-    "DATA_PRODUCT_TYPE_SUMMARY_PLOT",
-    "DATA_PRODUCT_TYPE_DATA_AVAILABILITY",
-    "search",
-    "upload",
-    "delete_urls",
-    "delete",
-    "describe",
-    "get_request_url",
-    "DataProduct",
-    "Search",
-]
-
-
-
-

Sub-modules

-
-
pyaurorax.data_products.classes
-
-

Separted classes and functions used by the data_products module …

-
-
pyaurorax.data_products.data_products
-
-

Functions for performing data product searches

-
-
-
-
-

Global variables

-
-
var DATA_PRODUCT_TYPE_DATA_AVAILABILITY
-
-

Data product type for data availability. The AuroraX data availability -system does not account for times when data was not expected to be -collected, such as summer shutdowns due to inadequate night hours. This -data product type for 'data availbility' is meant to be used as a smarter -data availability mechanism for Aurora.

-
-
var DATA_PRODUCT_TYPE_KEOGRAM
-
-

Data product type for keograms. Keograms are a 2-D -representation of a series of images, and are one of -the most popular data products that auroral science -uses. More information can be found at -https://docs.aurorax.space/about_the_data/standards/#keograms.

-
-
var DATA_PRODUCT_TYPE_MONTAGE
-
-

Data product type for montages. Like keograms, montages are -another representation of a series of images. However, montages -are not a 2D representation but rather a collage of thumnbail -images for the period of time. An example can be found at -https://data.phys.ucalgary.ca/sort_by_project/THEMIS/asi/stream2/2021/12/28/gill_themis19/20211228__gill_themis19_full-montage.pgm.jpg

-
-
var DATA_PRODUCT_TYPE_MOVIE
-
-

Data product type for movies. Movies are timelapse video -files of auroral data, usually as MP4 or MPEG. They can -consist of frames for a whole night, or an hour, and can -be at any cadence that is most appropriate.

-
-
var DATA_PRODUCT_TYPE_SUMMARY_PLOT
-
-

Data product type for summary plots. A summary plot can be any type -of plot that shows auroral data in a summary format, for example a -background-subtracted meridian scanning photometer plot showing -counts in Rayleighs.

-
-
-
-
-

Functions

-
-
-def delete(data_source: DataSource, start: datetime.datetime, end: datetime.datetime, data_product_types: Optional[List[str]] = None) ‑> int -
-
-

Delete data products associated with a data source within a date range.

-

The API processes this request asynchronously, so this method will return -immediately whether or not the data has already been deleted.

-

Args

-
-
data_source
-
data source associated with the data product records (note that -identifier, program, platform, and instrument_type are required)
-
start
-
timestamp marking beginning of range to delete records for, inclusive
-
end
-
timestamp marking end of range to delete records for, inclusive
-
data_product_types
-
specific types of data product to delete, e.g. -["keogram", "movie"]. If omitted, all data product types will be deleted.
-
-

Returns

-

1 on success

-

Raises

-
-
AuroraXMaxRetriesException
-
max retry error
-
AuroraXUnexpectedContentTypeException
-
unexpected error
-
AuroraXNotFoundException
-
source not found
-
AuroraXUnauthorizedException
-
invalid API key for this operation
-
-
- -Expand source code - -
def delete(data_source: DataSource,
-           start: datetime.datetime,
-           end: datetime.datetime,
-           data_product_types: Optional[List[str]] = None) -> int:
-    """
-    Delete data products associated with a data source within a date range.
-
-    The API processes this request asynchronously, so this method will return
-    immediately whether or not the data has already been deleted.
-
-    Args:
-        data_source: data source associated with the data product records (note that
-            identifier, program, platform, and instrument_type are required)
-        start: timestamp marking beginning of range to delete records for, inclusive
-        end: timestamp marking end of range to delete records for, inclusive
-        data_product_types: specific types of data product to delete, e.g.
-            ["keogram", "movie"]. If omitted, all data product types will be deleted.
-
-    Returns:
-        1 on success
-
-    Raises:
-        pyaurorax.exceptions.AuroraXMaxRetriesException: max retry error
-        pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected error
-        pyaurorax.exceptions.AuroraXNotFoundException: source not found
-        pyaurorax.exceptions.AuroraXUnauthorizedException: invalid API key for this operation
-    """
-    # check to make sure the identifier, program, platform, and instrument type are all set in the data source
-    if not all([data_source.identifier, data_source.program, data_source.platform, data_source.instrument_type]):
-        raise AuroraXBadParametersException("One or more required data source parameters "
-                                            "are missing, delete operation aborted")
-
-    # do request to get all data products between start and end datetimes
-    try:
-        s = search(start,
-                   end,
-                   programs=[data_source.program],
-                   platforms=[data_source.platform],
-                   instrument_types=[data_source.instrument_type],
-                   data_product_types=[] if not data_product_types else data_product_types)
-    except Exception as e:
-        raise AuroraXException(e)
-
-    # collect URLs from search result
-    urls = []
-    for dp in s.data:
-        urls.append(dp.url)  # type: ignore
-
-    # do delete request
-    return delete_urls(data_source, urls)
-
-
-
-def delete_urls(data_source: DataSource, urls: List[str]) ‑> int -
-
-

Delete data products by URL.

-

The API processes this request asynchronously, so this method will return -immediately whether or not the data has already been deleted.

-

Args

-
-
data_source
-
data source associated with the data product records (note that -identifier, program, platform, and instrument_type are required)
-
urls
-
URLs of data product records to delete
-
-

Returns

-

0 on success

-

Raises

-
-
AuroraXMaxRetriesException
-
max retry error
-
AuroraXUnexpectedContentTypeException
-
unexpected error
-
AuroraXBadParametersException
-
invalid parameters entered
-
AuroraXUnauthorizedException
-
invalid API key for this operation
-
-
- -Expand source code - -
def delete_urls(data_source: DataSource,
-                urls: List[str]) -> int:
-    """
-    Delete data products by URL.
-
-    The API processes this request asynchronously, so this method will return
-    immediately whether or not the data has already been deleted.
-
-    Args:
-        data_source: data source associated with the data product records (note that
-            identifier, program, platform, and instrument_type are required)
-        urls: URLs of data product records to delete
-
-    Returns:
-        0 on success
-
-    Raises:
-        pyaurorax.exceptions.AuroraXMaxRetriesException: max retry error
-        pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected error
-        pyaurorax.exceptions.AuroraXBadParametersException: invalid parameters entered
-        pyaurorax.exceptions.AuroraXUnauthorizedException: invalid API key for this operation
-    """
-    # check to make sure the identifier, program, platform, and instrument type are all set in the data source
-    if not all([data_source.identifier, data_source.program, data_source.platform, data_source.instrument_type]):
-        raise AuroraXBadParametersException("One or more required data source parameters "
-                                            "are missing, delete operation aborted")
-
-    # do request
-    url = api_urls.data_products_upload_url.format(data_source.identifier)
-    params = {
-        "program": data_source.program,
-        "platform": data_source.platform,
-        "instrument_type": data_source.instrument_type,
-        "urls": urls
-    }
-    delete_req = AuroraXRequest(method="delete",
-                                url=url,
-                                body=params,
-                                null_response=True)
-    res = delete_req.execute()
-
-    # evaluate response
-    if (res.status_code == 400):
-        if type(res.data) is list:
-            raise AuroraXBadParametersException("%s - %s" % (res.status_code,
-                                                             res.data[0]["message"]))
-        raise AuroraXBadParametersException("%s - %s" % (res.status_code,
-                                                         res.data["message"]))
-
-    # return
-    return 0
-
-
-
-def describe(search_obj: Search) ‑> str -
-
-

Describe a data product search as an "SQL-like" string

-

Args

-
-
search_obj
-
the data product search object to describe
-
-

Returns

-

the "SQL-like" string describing the data product search object

-
- -Expand source code - -
def describe(search_obj: Search) -> str:
-    """
-    Describe a data product search as an "SQL-like" string
-
-    Args:
-        search_obj: the data product search object to describe
-
-    Returns:
-        the "SQL-like" string describing the data product search object
-    """
-    # make request
-    req = AuroraXRequest(method="post",
-                         url=api_urls.describe_data_products_query_url,
-                         body=search_obj.query)
-    res = req.execute()
-
-    # return
-    return res.data
-
-
-
-def get_request_url(request_id: str) ‑> str -
-
-

Get the data product search request URL for a given -request ID. This URL can be used for subsequent -pyaurorax.requests function calls. Primarily this method -facilitates delving into details about a set of already-submitted -data product searches.

-

Args

-
-
request_id
-
the request identifier
-
-

Returns

-

the request URL

-
- -Expand source code - -
def get_request_url(request_id: str) -> str:
-    """
-    Get the data product search request URL for a given
-    request ID. This URL can be used for subsequent
-    pyaurorax.requests function calls. Primarily this method
-    facilitates delving into details about a set of already-submitted
-    data product searches.
-
-    Args:
-        request_id: the request identifier
-
-    Returns:
-        the request URL
-    """
-    url = api_urls.data_products_request_url.format(request_id)
-    return url
-
-
-
-def search(start: datetime.datetime, end: datetime.datetime, programs: Optional[List[str]] = None, platforms: Optional[List[str]] = None, instrument_types: Optional[List[str]] = None, data_product_types: Optional[List[str]] = None, metadata_filters: Optional[List[Dict]] = None, metadata_filters_logical_operator: Optional[str] = None, response_format: Optional[Dict] = None, poll_interval: Optional[float] = 1.0, return_immediately: Optional[bool] = False, verbose: Optional[bool] = False) ‑> Search -
-
-

Search for data product records

-

By default, this function will block and wait until the request completes and -all data is downloaded. If you don't want to wait, set the 'return_immediately` -value to True. The Search object will be returned right after the search has been -started, and you can use the helper functions as part of that object to get the -data when it's done.

-

Note: At least one search criteria from programs, platforms, or -instrument_types, must be specified.

-

Args

-
-
start
-
start timestamp of the search (inclusive)
-
end
-
end timestamp of the search (inclusive)
-
programs
-
list of programs to search through, defaults to None
-
platforms
-
list of platforms to search through, defaults to None
-
instrument_types
-
list of instrument types to search through, defaults to None
-
data_product_types
-
list of dictionaries describing data product -types to filter on e.g. "keogram", defaults to None. Options are in the -pyaurorax.data_products module, or at the top level using the -pyaurorax.DATA_PRODUCT_TYPE* variables.
-
metadata_filters
-
-

list of dictionaries describing metadata keys and -values to filter on, defaults to None

-

Example:

-
[{
-    "key": "nbtrace_region",
-    "operator": "in",
-    "values": ["north polar cap"]
-}]
-
-
-
metadata_filters_logical_operator
-
the logical operator to use when -evaluating metadata filters (either 'AND' or 'OR'), defaults -to "AND"
-
response_format
-
JSON representation of desired data response format
-
poll_interval
-
time in seconds to wait between polling attempts, defaults -to pyaurorax.requests.STANDARD_POLLING_SLEEP_TIME
-
return_immediately
-
initiate the search and return without waiting for data to -be received, defaults to False
-
verbose
-
output poll times and other progress messages, defaults to False
-
-

Returns

-

a pyaurorax.data_products.Search object

-
- -Expand source code - -
def search(start: datetime.datetime,
-           end: datetime.datetime,
-           programs: Optional[List[str]] = None,
-           platforms: Optional[List[str]] = None,
-           instrument_types: Optional[List[str]] = None,
-           data_product_types: Optional[List[str]] = None,
-           metadata_filters: Optional[List[Dict]] = None,
-           metadata_filters_logical_operator: Optional[str] = None,
-           response_format: Optional[Dict] = None,
-           poll_interval: Optional[float] = STANDARD_POLLING_SLEEP_TIME,
-           return_immediately: Optional[bool] = False,
-           verbose: Optional[bool] = False) -> Search:
-    """
-    Search for data product records
-
-    By default, this function will block and wait until the request completes and
-    all data is downloaded. If you don't want to wait, set the 'return_immediately`
-    value to True. The Search object will be returned right after the search has been
-    started, and you can use the helper functions as part of that object to get the
-    data when it's done.
-
-    Note: At least one search criteria from programs, platforms, or
-    instrument_types, must be specified.
-
-    Args:
-        start: start timestamp of the search (inclusive)
-        end: end timestamp of the search (inclusive)
-        programs: list of programs to search through, defaults to None
-        platforms: list of platforms to search through, defaults to None
-        instrument_types: list of instrument types to search through, defaults to None
-        data_product_types: list of dictionaries describing data product
-            types to filter on e.g. "keogram", defaults to None. Options are in the
-            pyaurorax.data_products module, or at the top level using the
-            pyaurorax.DATA_PRODUCT_TYPE* variables.
-        metadata_filters: list of dictionaries describing metadata keys and
-            values to filter on, defaults to None
-
-            Example:
-
-                [{
-                    "key": "nbtrace_region",
-                    "operator": "in",
-                    "values": ["north polar cap"]
-                }]
-        metadata_filters_logical_operator: the logical operator to use when
-            evaluating metadata filters (either 'AND' or 'OR'), defaults
-            to "AND"
-        response_format: JSON representation of desired data response format
-        poll_interval: time in seconds to wait between polling attempts, defaults
-            to pyaurorax.requests.STANDARD_POLLING_SLEEP_TIME
-        return_immediately: initiate the search and return without waiting for data to
-            be received, defaults to False
-        verbose: output poll times and other progress messages, defaults to False
-
-    Returns:
-        a pyaurorax.data_products.Search object
-    """
-    # create a Search() object
-    s = Search(start,
-               end,
-               programs=programs,
-               platforms=platforms,
-               instrument_types=instrument_types,
-               data_product_types=data_product_types,
-               metadata_filters=metadata_filters,
-               metadata_filters_logical_operator=metadata_filters_logical_operator,
-               response_format=response_format)
-    if (verbose is True):
-        print("[%s] Search object created" % (datetime.datetime.now()))
-
-    # execute the search
-    s.execute()
-    if (verbose is True):
-        print("[%s] Request submitted" % (datetime.datetime.now()))
-        print("[%s] Request ID: %s" % (datetime.datetime.now(), s.request_id))
-        print("[%s] Request details available at: %s" % (datetime.datetime.now(),
-                                                         s.request_url))
-
-    # return immediately if we wanted to
-    if (return_immediately is True):
-        return s
-
-    # wait for data
-    if (verbose is True):
-        print("[%s] Waiting for data ..." % (datetime.datetime.now()))
-    s.wait(poll_interval=poll_interval, verbose=verbose)
-
-    # check if error condition encountered
-    if (s.status["search_result"]["error_condition"] is True):
-        # error encountered
-        raise AuroraXSearchException(s.logs[-1]["summary"])
-
-    # get the data
-    if (verbose is True):
-        print("[%s] Retrieving data ..." % (datetime.datetime.now()))
-    s.get_data()
-
-    # return response with the data
-    if (verbose is True):
-        print("[%s] Retrieved %s of data containing %d records" % (datetime.datetime.now(),
-                                                                   humanize.filesize.naturalsize(
-                                                                       s.status["search_result"]["file_size"]),
-                                                                   s.status["search_result"]["result_count"]))
-    return s
-
-
-
-def upload(identifier: int, records: List[DataProduct], validate_source: Optional[bool] = False) ‑> int -
-
-

Upload data product records to AuroraX

-

Args

-
-
identifier
-
the AuroraX data source ID
-
records
-
data product records to upload
-
validate_source
-
validate all records before uploading, defaults to False
-
-

Returns

-

0 for success, raises exception on error

-

Raises

-
-
AuroraXMaxRetriesException
-
max retry error
-
AuroraXUnexpectedContentTypeException
-
unexpected content error
-
AuroraXUploadException
-
upload error
-
AuroraXValidationException
-
data source validation error
-
-
- -Expand source code - -
def upload(identifier: int,
-           records: List[DataProduct],
-           validate_source: Optional[bool] = False) -> int:
-    """
-    Upload data product records to AuroraX
-
-    Args:
-        identifier: the AuroraX data source ID
-        records: data product records to upload
-        validate_source: validate all records before uploading, defaults to False
-
-    Returns:
-        0 for success, raises exception on error
-
-    Raises:
-        pyaurorax.exceptions.AuroraXMaxRetriesException: max retry error
-        pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected content error
-        pyaurorax.exceptions.AuroraXUploadException: upload error
-        pyaurorax.exceptions.AuroraXValidationException: data source validation error
-    """
-    # validate record sources if the flag is set
-    if validate_source:
-        validation_error = __validate_data_source(identifier, records)
-        if validation_error:
-            raise AuroraXValidationException(f"Unable to validate data source "
-                                             "found in record: {validation_error}")
-
-    # translate each data product record to a request-friendly
-    # dict (ie. convert datetimes to strings, etc.)
-    for i, _ in enumerate(records):
-        if (type(records[i]) is DataProduct):
-            records[i] = records[i].to_json_serializable()  # type: ignore
-
-    # make request
-    url = api_urls.data_products_upload_url.format(identifier)
-    req = AuroraXRequest(method="post",
-                         url=url,
-                         body=records,
-                         null_response=True)
-    res = req.execute()
-
-    # evaluate response
-    if (res.status_code == 400):
-        if type(res.data) is list:
-            raise AuroraXUploadException("%s - %s" % (res.status_code,
-                                                      res.data[0]["error_message"]))
-
-        raise AuroraXUploadException("%s - %s" % (res.status_code,
-                                                  res.data["error_message"]))
-
-    # return
-    return 0
-
-
-
-
-
-

Classes

-
-
-class DataProduct -(**data: Any) -
-
-

Data product object

-

Attributes

-
-
data_source
-
data source that the ephemeris record is associated with
-
data_product_type
-
data product type ("keogram", "movie", "summary_plot")
-
start
-
starting timestamp for the record (assumed it is in UTC), inclusive
-
end
-
ending timestamp for the record (assumed it is in UTC), inclusive
-
url
-
the URL of data product
-
metdata
-
metadata for this record (arbitrary keys and values)
-
-

Create a new model by parsing and validating input data from keyword arguments.

-

Raises ValidationError if the input data cannot be parsed to form a valid model.

-
- -Expand source code - -
class DataProduct(BaseModel):
-    """
-    Data product object
-
-    Attributes:
-        data_source: data source that the ephemeris record is associated with
-        data_product_type: data product type ("keogram", "movie", "summary_plot")
-        start: starting timestamp for the record (assumed it is in UTC), inclusive
-        end: ending timestamp for the record (assumed it is in UTC), inclusive
-        url: the URL of data product
-        metdata: metadata for this record (arbitrary keys and values)
-    """
-    data_source: DataSource
-    data_product_type: str
-    start: datetime.datetime
-    end: datetime.datetime
-    url: str
-    metadata: Dict
-
-    def to_json_serializable(self) -> Dict:
-        """
-        Convert object to a JSON-serializable object (ie. translate
-        datetime objects to strings)
-
-        Returns:
-            a dictionary object that is JSON-serializable
-        """
-        # init
-        d = self.__dict__
-
-        # format epoch as str
-        if (type(d["start"]) is datetime.datetime):
-            d["start"] = d["start"].strftime("%Y-%m-%dT%H:%M:00.000")
-        if (type(d["end"]) is datetime.datetime):
-            d["end"] = d["end"].strftime("%Y-%m-%dT%H:%M:00.000")
-
-        # format metadata
-        if (type(self.metadata) is dict):
-            for key, value in self.metadata.items():
-                if (type(value) is datetime.datetime or type(value) is datetime.date):
-                    self.metadata[key] = self.metadata[key].strftime(
-                        "%Y-%m-%dT%H:%M:%S.%f")
-        if (type(self.metadata) is list):
-            self.metadata = {}
-
-        # format data source fields for query
-        d["program"] = self.data_source.program
-        d["platform"] = self.data_source.platform
-        d["instrument_type"] = self.data_source.instrument_type
-        del d["data_source"]
-
-        # return
-        return d
-
-    def __str__(self) -> str:
-        """
-        String method
-
-        Returns:
-            string format of DataProduct object
-        """
-        return self.__repr__()
-
-    def __repr__(self) -> str:
-        """
-        Object representation
-
-        Returns:
-            object representation of DataProduct object
-        """
-        # shorten the metadata and url
-        max_len = 20
-        attr_metadata = f"{self.metadata}"
-        if (len(attr_metadata) > max_len):
-            attr_metadata = attr_metadata[0:max_len] + "...}"
-        attr_url = f"{self.url}"
-        if (len(attr_url) > max_len):
-            attr_url = attr_url[0:max_len] + "..."
-
-        # return formatted representation
-        return f"DataProduct(data_source={repr(self.data_source)}, start={repr(self.start)}, " \
-            f"end={repr(self.end)}, data_product_type='{self.data_product_type}', url='{attr_url}', " \
-            f"metadata={attr_metadata})"
-
-

Ancestors

-
    -
  • pydantic.main.BaseModel
  • -
  • pydantic.utils.Representation
  • -
-

Class variables

-
-
var data_product_type : str
-
-
-
-
var data_sourceDataSource
-
-
-
-
var end : datetime.datetime
-
-
-
-
var metadata : Dict
-
-
-
-
var start : datetime.datetime
-
-
-
-
var url : str
-
-
-
-
-

Methods

-
-
-def to_json_serializable(self) ‑> Dict -
-
-

Convert object to a JSON-serializable object (ie. translate -datetime objects to strings)

-

Returns

-

a dictionary object that is JSON-serializable

-
- -Expand source code - -
def to_json_serializable(self) -> Dict:
-    """
-    Convert object to a JSON-serializable object (ie. translate
-    datetime objects to strings)
-
-    Returns:
-        a dictionary object that is JSON-serializable
-    """
-    # init
-    d = self.__dict__
-
-    # format epoch as str
-    if (type(d["start"]) is datetime.datetime):
-        d["start"] = d["start"].strftime("%Y-%m-%dT%H:%M:00.000")
-    if (type(d["end"]) is datetime.datetime):
-        d["end"] = d["end"].strftime("%Y-%m-%dT%H:%M:00.000")
-
-    # format metadata
-    if (type(self.metadata) is dict):
-        for key, value in self.metadata.items():
-            if (type(value) is datetime.datetime or type(value) is datetime.date):
-                self.metadata[key] = self.metadata[key].strftime(
-                    "%Y-%m-%dT%H:%M:%S.%f")
-    if (type(self.metadata) is list):
-        self.metadata = {}
-
-    # format data source fields for query
-    d["program"] = self.data_source.program
-    d["platform"] = self.data_source.platform
-    d["instrument_type"] = self.data_source.instrument_type
-    del d["data_source"]
-
-    # return
-    return d
-
-
-
-
-
-class Search -(start: datetime.datetime, end: datetime.datetime, programs: Optional[List[str]] = None, platforms: Optional[List[str]] = None, instrument_types: Optional[List[str]] = None, data_product_types: Optional[List[str]] = None, metadata_filters: Optional[List[Dict]] = None, metadata_filters_logical_operator: Optional[str] = 'AND', response_format: Optional[Dict] = None) -
-
-

Class representing a data product search

-

Attributes

-
-
start
-
start timestamp of the search (inclusive)
-
end
-
end timestamp of the search (inclusive)
-
programs
-
list of program names to search
-
platforms
-
list of platform names to search
-
instrument_types
-
list of instrument types to search
-
data_product_types
-
list of dictionaries describing data product -types to filter on e.g. "keogram", defaults to None. Options are in the -pyaurorax.data_products module, or at the top level using the -pyaurorax.DATA_PRODUCT_TYPE* variables.
-
metadata_filters
-
-

list of dictionaries describing metadata keys and -values to filter on, defaults to None

-

Example:

-
[{
-    "key": "nbtrace_region",
-    "operator": "in",
-    "values": ["north polar cap"]
-}]
-
-
-
metadata_filters_logical_operator
-
the logical operator to use when -evaluating metadata filters (either 'AND' or 'OR'), defaults -to "AND"
-
response_format
-
JSON representation of desired data response format
-
request
-
AuroraXResponse object returned when the search is executed
-
request_id
-
unique ID assigned to the request by the AuroraX API
-
request_url
-
unique URL assigned to the request by the AuroraX API
-
executed
-
indicates if the search has been executed/started
-
completed
-
indicates if the search has finished
-
data_url
-
the URL where data is accessed
-
query
-
the query for this request as JSON
-
status
-
the status of the query
-
data
-
the data product records found
-
logs
-
all log messages outputed by the AuroraX API for this request
-
-
- -Expand source code - -
class Search():
-    """
-    Class representing a data product search
-
-    Attributes:
-        start: start timestamp of the search (inclusive)
-        end: end timestamp of the search (inclusive)
-        programs: list of program names to search
-        platforms: list of platform names to search
-        instrument_types: list of instrument types to search
-        data_product_types: list of dictionaries describing data product
-            types to filter on e.g. "keogram", defaults to None. Options are in the
-            pyaurorax.data_products module, or at the top level using the
-            pyaurorax.DATA_PRODUCT_TYPE* variables.
-        metadata_filters: list of dictionaries describing metadata keys and
-            values to filter on, defaults to None
-
-            Example:
-
-                [{
-                    "key": "nbtrace_region",
-                    "operator": "in",
-                    "values": ["north polar cap"]
-                }]
-        metadata_filters_logical_operator: the logical operator to use when
-            evaluating metadata filters (either 'AND' or 'OR'), defaults
-            to "AND"
-        response_format: JSON representation of desired data response format
-        request: AuroraXResponse object returned when the search is executed
-        request_id: unique ID assigned to the request by the AuroraX API
-        request_url: unique URL assigned to the request by the AuroraX API
-        executed: indicates if the search has been executed/started
-        completed: indicates if the search has finished
-        data_url: the URL where data is accessed
-        query: the query for this request as JSON
-        status: the status of the query
-        data: the data product records found
-        logs: all log messages outputed by the AuroraX API for this request
-    """
-
-    def __init__(self,
-                 start: datetime.datetime,
-                 end: datetime.datetime,
-                 programs: Optional[List[str]] = None,
-                 platforms: Optional[List[str]] = None,
-                 instrument_types: Optional[List[str]] = None,
-                 data_product_types: Optional[List[str]] = None,
-                 metadata_filters: Optional[List[Dict]] = None,
-                 metadata_filters_logical_operator: Optional[str] = "AND",
-                 response_format: Optional[Dict] = None) -> None:
-
-        # set variables using passed in args
-        self.start = start
-        self.end = end
-        self.programs = programs
-        self.platforms = platforms
-        self.instrument_types = instrument_types
-        self.data_product_types = data_product_types
-        self.metadata_filters = metadata_filters
-        self.metadata_filters_logical_operator = metadata_filters_logical_operator
-        self.response_format = response_format
-
-        # initialize additional variables
-        self.request: AuroraXResponse = None
-        self.request_id: str = ""
-        self.request_url: str = ""
-        self.executed: bool = False
-        self.completed: bool = False
-        self.data_url: str = ""
-        self.query: Dict = {}
-        self.status: Dict = {}
-        self.data: List[Union[DataProduct, Dict]] = []
-        self.logs: List[Dict] = []
-
-    def __str__(self) -> str:
-        """
-        String method
-
-        Returns:
-            string format of DataProduct Search object
-        """
-        return self.__repr__()
-
-    def __repr__(self) -> str:
-        """
-        Object representation
-
-        Returns:
-            object representation of DataProduct Search object
-        """
-        return f"DataProductsSearch(executed={self.executed}, " \
-            f"completed={self.completed}, request_id='{self.request_id}')"
-
-    @property
-    def query(self):
-        """
-        Property for the query value
-        """
-        self._query = {
-            "data_sources": {
-                "programs": [] if not self.programs else self.programs,
-                "platforms": [] if not self.platforms else self.platforms,
-                "instrument_types": [] if not self.instrument_types else self.instrument_types,
-                "data_product_metadata_filters": {} if not self.metadata_filters
-                else {
-                    "logical_operator": self.metadata_filters_logical_operator,
-                    "expressions": self.metadata_filters
-                },
-            },
-            "start": self.start.strftime("%Y-%m-%dT%H:%M:%S"),
-            "end": self.end.strftime("%Y-%m-%dT%H:%M:%S"),
-            "data_product_type_filters": [] if not self.data_product_types else self.data_product_types,
-        }
-        return self._query
-
-    @query.setter
-    def query(self, query):
-        self._query = query
-
-    def execute(self) -> None:
-        """
-        Initiate a data product search request
-        """
-        # do request
-        url = urls.data_products_search_url
-        req = AuroraXRequest(method="post",
-                             url=url,
-                             body=self.query,
-                             null_response=True)
-        res = req.execute()
-
-        # set request ID, request_url, executed
-        self.executed = True
-        if (res.status_code == 202):
-            # request successfully dispatched
-            self.executed = True
-            self.request_url = res.request.headers["location"]
-            self.request_id = self.request_url.rsplit("/", 1)[-1]
-
-        # set request variable
-        self.request = res
-
-    def update_status(self, status: Optional[Dict] = None) -> None:
-        """
-        Update the status of this data product search request
-
-        Args:
-            status: the previously-retrieved status of this request (include
-                to avoid requesting it from the API again), defaults to None
-        """
-        # get the status if it isn't passed in
-        if (status is None):
-            status = requests_get_status(self.request_url)
-
-        # update request status by checking if data URI is set
-        if (status["search_result"]["data_uri"] is not None):
-            self.completed = True
-            self.data_url = "%s%s" % (urls.base_url,
-                                      status["search_result"]["data_uri"])
-
-        # set class variable "status" and "logs"
-        self.status = status
-        self.logs = status["logs"]
-
-    def check_for_data(self) -> bool:
-        """
-        Check to see if data is available for this data product
-        search request
-
-        Returns:
-            True if data is available, else False
-        """
-        self.update_status()
-        return self.completed
-
-    def get_data(self) -> None:
-        """
-        Retrieve the data available for this data product search request
-        """
-        # check if it's completed yet
-        if (self.completed is False):
-            print("No data available, update status or check for data first")
-            return
-
-        # get data
-        raw_data = requests_get_data(self.data_url, response_format=self.response_format)
-
-        # set data variable
-        if self.response_format is not None:
-            self.data = raw_data
-        else:
-            # cast data source objects
-            for i in range(0, len(raw_data)):
-                ds = DataSource(**raw_data[i]["data_source"], format=FORMAT_BASIC_INFO)
-                raw_data[i]["data_source"] = ds
-
-            # cast data product objects
-            self.data = [DataProduct(**dp) for dp in raw_data]
-
-    def wait(self,
-             poll_interval: Optional[float] = STANDARD_POLLING_SLEEP_TIME,
-             verbose: Optional[bool] = False) -> None:
-        """
-        Block and wait for the request to complete and data is available
-        for retrieval
-
-        Args:
-            poll_interval: time in seconds to wait between polling attempts,
-                defaults to pyaurorax.requests.STANDARD_POLLING_SLEEP_TIME
-            verbose: output poll times and other progress messages, defaults
-                to False
-        """
-        url = urls.data_products_request_url.format(self.request_id)
-        self.update_status(requests_wait_for_data(url,
-                                                  poll_interval=poll_interval,
-                                                  verbose=verbose))
-
-    def cancel(self,
-               wait: Optional[bool] = False,
-               poll_interval: Optional[float] = STANDARD_POLLING_SLEEP_TIME,
-               verbose: Optional[bool] = False) -> int:
-        """
-        Cancel the data product search request
-
-        This method returns immediately by default since the API processes
-        this request asynchronously. If you would prefer to wait for it
-        to be completed, set the 'wait' parameter to True. You can adjust
-        the polling time using the 'poll_interval' parameter.
-
-        Args:
-            wait: wait until the cancellation request has been
-                completed (may wait for several minutes)
-            poll_interval: seconds to wait between polling
-                calls, defaults to STANDARD_POLLING_SLEEP_TIME.
-            verbose: output poll times and other progress messages, defaults
-                to False
-
-        Returns:
-            1 on success
-
-        Raises:
-            pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected error
-            pyaurorax.exceptions.AuroraXUnauthorizedException: invalid API key for this operation
-        """
-        url = urls.data_products_request_url.format(self.request_id)
-        return requests_cancel(url, wait=wait, poll_interval=poll_interval, verbose=verbose)
-
-

Instance variables

-
-
var query
-
-

Property for the query value

-
- -Expand source code - -
@property
-def query(self):
-    """
-    Property for the query value
-    """
-    self._query = {
-        "data_sources": {
-            "programs": [] if not self.programs else self.programs,
-            "platforms": [] if not self.platforms else self.platforms,
-            "instrument_types": [] if not self.instrument_types else self.instrument_types,
-            "data_product_metadata_filters": {} if not self.metadata_filters
-            else {
-                "logical_operator": self.metadata_filters_logical_operator,
-                "expressions": self.metadata_filters
-            },
-        },
-        "start": self.start.strftime("%Y-%m-%dT%H:%M:%S"),
-        "end": self.end.strftime("%Y-%m-%dT%H:%M:%S"),
-        "data_product_type_filters": [] if not self.data_product_types else self.data_product_types,
-    }
-    return self._query
-
-
-
-

Methods

-
-
-def cancel(self, wait: Optional[bool] = False, poll_interval: Optional[float] = 1.0, verbose: Optional[bool] = False) ‑> int -
-
-

Cancel the data product search request

-

This method returns immediately by default since the API processes -this request asynchronously. If you would prefer to wait for it -to be completed, set the 'wait' parameter to True. You can adjust -the polling time using the 'poll_interval' parameter.

-

Args

-
-
wait
-
wait until the cancellation request has been -completed (may wait for several minutes)
-
poll_interval
-
seconds to wait between polling -calls, defaults to STANDARD_POLLING_SLEEP_TIME.
-
verbose
-
output poll times and other progress messages, defaults -to False
-
-

Returns

-

1 on success

-

Raises

-
-
AuroraXUnexpectedContentTypeException
-
unexpected error
-
AuroraXUnauthorizedException
-
invalid API key for this operation
-
-
- -Expand source code - -
def cancel(self,
-           wait: Optional[bool] = False,
-           poll_interval: Optional[float] = STANDARD_POLLING_SLEEP_TIME,
-           verbose: Optional[bool] = False) -> int:
-    """
-    Cancel the data product search request
-
-    This method returns immediately by default since the API processes
-    this request asynchronously. If you would prefer to wait for it
-    to be completed, set the 'wait' parameter to True. You can adjust
-    the polling time using the 'poll_interval' parameter.
-
-    Args:
-        wait: wait until the cancellation request has been
-            completed (may wait for several minutes)
-        poll_interval: seconds to wait between polling
-            calls, defaults to STANDARD_POLLING_SLEEP_TIME.
-        verbose: output poll times and other progress messages, defaults
-            to False
-
-    Returns:
-        1 on success
-
-    Raises:
-        pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected error
-        pyaurorax.exceptions.AuroraXUnauthorizedException: invalid API key for this operation
-    """
-    url = urls.data_products_request_url.format(self.request_id)
-    return requests_cancel(url, wait=wait, poll_interval=poll_interval, verbose=verbose)
-
-
-
-def check_for_data(self) ‑> bool -
-
-

Check to see if data is available for this data product -search request

-

Returns

-

True if data is available, else False

-
- -Expand source code - -
def check_for_data(self) -> bool:
-    """
-    Check to see if data is available for this data product
-    search request
-
-    Returns:
-        True if data is available, else False
-    """
-    self.update_status()
-    return self.completed
-
-
-
-def execute(self) ‑> None -
-
-

Initiate a data product search request

-
- -Expand source code - -
def execute(self) -> None:
-    """
-    Initiate a data product search request
-    """
-    # do request
-    url = urls.data_products_search_url
-    req = AuroraXRequest(method="post",
-                         url=url,
-                         body=self.query,
-                         null_response=True)
-    res = req.execute()
-
-    # set request ID, request_url, executed
-    self.executed = True
-    if (res.status_code == 202):
-        # request successfully dispatched
-        self.executed = True
-        self.request_url = res.request.headers["location"]
-        self.request_id = self.request_url.rsplit("/", 1)[-1]
-
-    # set request variable
-    self.request = res
-
-
-
-def get_data(self) ‑> None -
-
-

Retrieve the data available for this data product search request

-
- -Expand source code - -
def get_data(self) -> None:
-    """
-    Retrieve the data available for this data product search request
-    """
-    # check if it's completed yet
-    if (self.completed is False):
-        print("No data available, update status or check for data first")
-        return
-
-    # get data
-    raw_data = requests_get_data(self.data_url, response_format=self.response_format)
-
-    # set data variable
-    if self.response_format is not None:
-        self.data = raw_data
-    else:
-        # cast data source objects
-        for i in range(0, len(raw_data)):
-            ds = DataSource(**raw_data[i]["data_source"], format=FORMAT_BASIC_INFO)
-            raw_data[i]["data_source"] = ds
-
-        # cast data product objects
-        self.data = [DataProduct(**dp) for dp in raw_data]
-
-
-
-def update_status(self, status: Optional[Dict] = None) ‑> None -
-
-

Update the status of this data product search request

-

Args

-
-
status
-
the previously-retrieved status of this request (include -to avoid requesting it from the API again), defaults to None
-
-
- -Expand source code - -
def update_status(self, status: Optional[Dict] = None) -> None:
-    """
-    Update the status of this data product search request
-
-    Args:
-        status: the previously-retrieved status of this request (include
-            to avoid requesting it from the API again), defaults to None
-    """
-    # get the status if it isn't passed in
-    if (status is None):
-        status = requests_get_status(self.request_url)
-
-    # update request status by checking if data URI is set
-    if (status["search_result"]["data_uri"] is not None):
-        self.completed = True
-        self.data_url = "%s%s" % (urls.base_url,
-                                  status["search_result"]["data_uri"])
-
-    # set class variable "status" and "logs"
-    self.status = status
-    self.logs = status["logs"]
-
-
-
-def wait(self, poll_interval: Optional[float] = 1.0, verbose: Optional[bool] = False) ‑> None -
-
-

Block and wait for the request to complete and data is available -for retrieval

-

Args

-
-
poll_interval
-
time in seconds to wait between polling attempts, -defaults to pyaurorax.requests.STANDARD_POLLING_SLEEP_TIME
-
verbose
-
output poll times and other progress messages, defaults -to False
-
-
- -Expand source code - -
def wait(self,
-         poll_interval: Optional[float] = STANDARD_POLLING_SLEEP_TIME,
-         verbose: Optional[bool] = False) -> None:
-    """
-    Block and wait for the request to complete and data is available
-    for retrieval
-
-    Args:
-        poll_interval: time in seconds to wait between polling attempts,
-            defaults to pyaurorax.requests.STANDARD_POLLING_SLEEP_TIME
-        verbose: output poll times and other progress messages, defaults
-            to False
-    """
-    url = urls.data_products_request_url.format(self.request_id)
-    self.update_status(requests_wait_for_data(url,
-                                              poll_interval=poll_interval,
-                                              verbose=verbose))
-
-
-
-
-
-
-
- -
- - - \ No newline at end of file diff --git a/docs/code/pyaurorax_api_reference/pyaurorax/ephemeris/ephemeris.html b/docs/code/pyaurorax_api_reference/pyaurorax/ephemeris/ephemeris.html deleted file mode 100644 index 25e3b0b..0000000 --- a/docs/code/pyaurorax_api_reference/pyaurorax/ephemeris/ephemeris.html +++ /dev/null @@ -1,827 +0,0 @@ - - - - - - -pyaurorax.ephemeris.ephemeris API documentation - - - - - - - - - - - -
-
-
-

Module pyaurorax.ephemeris.ephemeris

-
-
-

Functions for performing ephemeris searches

-
- -Expand source code - -
"""
-Functions for performing ephemeris searches
-"""
-
-import datetime
-import humanize
-from typing import Dict, List, Optional
-from .classes.ephemeris import Ephemeris
-from .classes.search import Search
-from ..sources import (DataSource,
-                       list as sources_list)
-from ..exceptions import (AuroraXSearchException,
-                          AuroraXValidationException,
-                          AuroraXUploadException,
-                          AuroraXBadParametersException)
-from ..requests import STANDARD_POLLING_SLEEP_TIME
-from ..api import (AuroraXRequest, urls)
-
-# pdoc init
-__pdoc__: Dict = {}
-
-
-def __validate_data_source(identifier: int,
-                           records: List[Ephemeris]) -> Optional[Ephemeris]:
-    # get all current sources
-    sources = {source.identifier: source for source in sources_list()}
-    if identifier not in sources.keys():
-        raise AuroraXValidationException(f"Data source with unique identifier "
-                                         "{identifier} could not be found")
-
-    # process each record to make sure the program/platform/instrument_type matches
-    # the identifier found for the data source
-    for record in records:
-        # check the identifier, program name, platform name, and instrument type
-        try:
-            reference = sources[record.data_source.identifier]
-        except KeyError:
-            raise AuroraXValidationException(f"Data source with unique identifier "
-                                             "{record.data_source.identifier} could "
-                                             "not be found")
-
-        # check if it's a bad record
-        if not (record.data_source.program == reference.program
-                and record.data_source.platform == reference.platform
-                and record.data_source.instrument_type == reference.instrument_type):
-            return record
-
-    # found no bad records
-    return None
-
-
-def search(start: datetime.datetime,
-           end: datetime.datetime,
-           programs: Optional[List[str]] = None,
-           platforms: Optional[List[str]] = None,
-           instrument_types: Optional[List[str]] = None,
-           metadata_filters: Optional[List[Dict]] = None,
-           metadata_filters_logical_operator: Optional[str] = None,
-           response_format: Optional[Dict] = None,
-           poll_interval: Optional[float] = STANDARD_POLLING_SLEEP_TIME,
-           return_immediately: Optional[bool] = False,
-           verbose: Optional[bool] = False) -> Search:
-    """
-    Search for ephemeris records
-
-    By default, this function will block and wait until the request completes and
-    all data is downloaded. If you don't want to wait, set the 'return_immediately`
-    value to True. The Search object will be returned right after the search has been
-    started, and you can use the helper functions as part of that object to get the
-    data when it's done.
-
-    Note: At least one search criteria from programs, platforms, or
-    instrument_types, must be specified.
-
-    Args:
-        start: start timestamp of the search (inclusive)
-        end: end timestamp of the search (inclusive)
-        programs: list of programs to search through, defaults to None
-        platforms: list of platforms to search through, defaults to None
-        instrument_types: list of instrument types to search through, defaults to None
-        metadata_filters: list of dictionaries describing metadata keys and
-            values to filter on, defaults to None
-
-            Example:
-
-                [{
-                    "key": "nbtrace_region",
-                    "operator": "in",
-                    "values": ["north polar cap"]
-                }]
-        metadata_filters_logical_operator: the logical operator to use when
-            evaluating metadata filters (either 'AND' or 'OR'), defaults
-            to "AND"
-        response_format: JSON representation of desired data response format
-        poll_interval: time in seconds to wait between polling attempts, defaults
-            to pyaurorax.requests.STANDARD_POLLING_SLEEP_TIME
-        return_immediately: initiate the search and return without waiting for data to
-            be received, defaults to False
-        verbose: output poll times and other progress messages, defaults to False
-
-    Returns:
-        A pyaurorax.ephemeris.Search object
-
-    Raises:
-        pyaurorax.exceptions.AuroraXBadParametersException: missing parameters
-    """
-    # create a Search() object
-    s = Search(start,
-               end,
-               programs=programs,
-               platforms=platforms,
-               instrument_types=instrument_types,
-               metadata_filters=metadata_filters,
-               metadata_filters_logical_operator=metadata_filters_logical_operator,
-               response_format=response_format)
-    if (verbose is True):
-        print("[%s] Search object created" % (datetime.datetime.now()))
-
-    # execute the search
-    s.execute()
-    if (verbose is True):
-        print("[%s] Request submitted" % (datetime.datetime.now()))
-        print("[%s] Request ID: %s" % (datetime.datetime.now(), s.request_id))
-        print("[%s] Request details available at: %s" % (datetime.datetime.now(),
-                                                         s.request_url))
-
-    # return immediately if we wanted to
-    if (return_immediately is True):
-        return s
-
-    # wait for data
-    if (verbose is True):
-        print("[%s] Waiting for data ..." % (datetime.datetime.now()))
-    s.wait(poll_interval=poll_interval, verbose=verbose)
-
-    # check if error condition encountered
-    if (s.status["search_result"]["error_condition"] is True):
-        # error encountered
-        raise AuroraXSearchException(s.logs[-1]["summary"])
-
-    # get the data
-    if (verbose is True):
-        print("[%s] Retrieving data ..." % (datetime.datetime.now()))
-    s.get_data()
-
-    # return response with the data
-    if (verbose is True):
-        print("[%s] Retrieved %s of data containing %d records" % (datetime.datetime.now(),
-                                                                   humanize.filesize.naturalsize(
-                                                                       s.status["search_result"]["file_size"]),
-                                                                   s.status["search_result"]["result_count"]))
-    return s
-
-
-def upload(identifier: int,
-           records: List[Ephemeris],
-           validate_source: Optional[bool] = False) -> int:
-    """
-    Upload ephemeris records to AuroraX
-
-    Args:
-        identifier: AuroraX data source ID
-        records: ephemeris records to upload
-        validate_source: validate all records before uploading, defaults to False
-
-    Returns:
-        0 for success, raises exception on error
-
-    Raises:
-        pyaurorax.exceptions.AuroraXMaxRetriesException: max retry error
-        pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected content error
-        pyaurorax.exceptions.AuroraXUploadException: upload error
-        pyaurorax.exceptions.AuroraXValidationException: data source validation error
-    """
-    # validate record sources if the flag is set
-    if validate_source:
-        validation_error = __validate_data_source(identifier, records)
-        if validation_error:
-            raise AuroraXValidationException("Unable to validate data source found "
-                                             "in record: {}".format(validation_error))
-
-    # translate each ephemeris record to a request-friendly
-    # dict (ie. convert datetimes to strings, etc.)
-    for i, _ in enumerate(records):
-        if (type(records[i]) is Ephemeris):
-            records[i] = records[i].to_json_serializable()  # type: ignore
-
-    # make request
-    url = urls.ephemeris_upload_url.format(identifier)
-    req = AuroraXRequest(method="post",
-                         url=url,
-                         body=records,
-                         null_response=True)
-    res = req.execute()
-
-    # evaluate response
-    if (res.status_code == 400):
-        raise AuroraXUploadException("%s - %s" % (res.data["error_code"],
-                                                  res.data["error_message"]))
-
-    # return
-    return 0
-
-
-def delete(data_source: DataSource,
-           start: datetime.datetime,
-           end: datetime.datetime) -> int:
-    """
-    Delete ephemeris records between a timeframe.
-
-    The API processes this request asynchronously, so this method will return
-    immediately whether or not the data has already been deleted.
-
-    Args:
-        data_source: data source associated with the data product records (note that
-            identifier, program, platform, and instrument_type are required)
-        start: timestamp marking beginning of range to delete records for, inclusive
-        end: timestamp marking end of range to delete records for, inclusive
-
-    Returns:
-        0 on success
-
-    Raises:
-        pyaurorax.exceptions.AuroraXMaxRetriesException: max retry error
-        pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected error
-        pyaurorax.exceptions.AuroraXNotFoundException: source not found
-        pyaurorax.exceptions.AuroraXUnauthorizedException: invalid API key for this operation
-        pyaurorax.exceptions.AuroraXBadParametersException: missing parameters
-    """
-    # check to make sure the identifier, program, platform, and instrument type are all set in the data source
-    if not all([data_source.identifier, data_source.program, data_source.platform, data_source.instrument_type]):
-        raise AuroraXBadParametersException("One or more required data source parameters "
-                                            "are missing, delete operation aborted")
-
-    # do request
-    url = urls.ephemeris_upload_url.format(data_source.identifier)
-    params = {
-        "program": data_source.program,
-        "platform": data_source.platform,
-        "instrument_type": data_source.instrument_type,
-        "start": start.strftime("%Y-%m-%dT%H:%M:%S"),
-        "end": end.strftime("%Y-%m-%dT%H:%M:%S")
-    }
-    delete_req = AuroraXRequest(method="delete",
-                                url=url,
-                                body=params,
-                                null_response=True)
-    res = delete_req.execute()
-
-    # evaluate response
-    if (res.status_code == 400):
-        if type(res.data) is list:
-            raise AuroraXBadParametersException("%s - %s" % (res.status_code,
-                                                             res.data[0]["message"]))
-        raise AuroraXBadParametersException("%s - %s" % (res.data["error_code"],
-                                                         res.data["error_message"]))
-
-    # return
-    return 0
-
-
-def describe(search_obj: Search) -> str:
-    """
-    Describe an ephemeris search as a "SQL-like" string
-
-    Args:
-        search_obj: the ephemeris search object to describe
-
-    Returns:
-        the "SQL-like" string describing the ephemeris search object
-    """
-    # make request
-    req = AuroraXRequest(method="post",
-                         url=urls.describe_ephemeris_query_url,
-                         body=search_obj.query)
-    res = req.execute()
-
-    # return
-    return res.data
-
-
-def get_request_url(request_id: str) -> str:
-    """
-    Get the ephemeris search request URL for a given
-    request ID. This URL can be used for subsequent
-    pyaurorax.requests function calls. Primarily this method
-    facilitates delving into details about a set of already-submitted
-    ephemeris searches.
-
-    Args:
-        request_id: the request identifier
-
-    Returns:
-        the request URL
-    """
-    url = urls.ephemeris_request_url.format(request_id)
-    return url
-
-
-
-
-
-
-
-

Functions

-
-
-def delete(data_source: DataSource, start: datetime.datetime, end: datetime.datetime) ‑> int -
-
-

Delete ephemeris records between a timeframe.

-

The API processes this request asynchronously, so this method will return -immediately whether or not the data has already been deleted.

-

Args

-
-
data_source
-
data source associated with the data product records (note that -identifier, program, platform, and instrument_type are required)
-
start
-
timestamp marking beginning of range to delete records for, inclusive
-
end
-
timestamp marking end of range to delete records for, inclusive
-
-

Returns

-

0 on success

-

Raises

-
-
AuroraXMaxRetriesException
-
max retry error
-
AuroraXUnexpectedContentTypeException
-
unexpected error
-
AuroraXNotFoundException
-
source not found
-
AuroraXUnauthorizedException
-
invalid API key for this operation
-
AuroraXBadParametersException
-
missing parameters
-
-
- -Expand source code - -
def delete(data_source: DataSource,
-           start: datetime.datetime,
-           end: datetime.datetime) -> int:
-    """
-    Delete ephemeris records between a timeframe.
-
-    The API processes this request asynchronously, so this method will return
-    immediately whether or not the data has already been deleted.
-
-    Args:
-        data_source: data source associated with the data product records (note that
-            identifier, program, platform, and instrument_type are required)
-        start: timestamp marking beginning of range to delete records for, inclusive
-        end: timestamp marking end of range to delete records for, inclusive
-
-    Returns:
-        0 on success
-
-    Raises:
-        pyaurorax.exceptions.AuroraXMaxRetriesException: max retry error
-        pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected error
-        pyaurorax.exceptions.AuroraXNotFoundException: source not found
-        pyaurorax.exceptions.AuroraXUnauthorizedException: invalid API key for this operation
-        pyaurorax.exceptions.AuroraXBadParametersException: missing parameters
-    """
-    # check to make sure the identifier, program, platform, and instrument type are all set in the data source
-    if not all([data_source.identifier, data_source.program, data_source.platform, data_source.instrument_type]):
-        raise AuroraXBadParametersException("One or more required data source parameters "
-                                            "are missing, delete operation aborted")
-
-    # do request
-    url = urls.ephemeris_upload_url.format(data_source.identifier)
-    params = {
-        "program": data_source.program,
-        "platform": data_source.platform,
-        "instrument_type": data_source.instrument_type,
-        "start": start.strftime("%Y-%m-%dT%H:%M:%S"),
-        "end": end.strftime("%Y-%m-%dT%H:%M:%S")
-    }
-    delete_req = AuroraXRequest(method="delete",
-                                url=url,
-                                body=params,
-                                null_response=True)
-    res = delete_req.execute()
-
-    # evaluate response
-    if (res.status_code == 400):
-        if type(res.data) is list:
-            raise AuroraXBadParametersException("%s - %s" % (res.status_code,
-                                                             res.data[0]["message"]))
-        raise AuroraXBadParametersException("%s - %s" % (res.data["error_code"],
-                                                         res.data["error_message"]))
-
-    # return
-    return 0
-
-
-
-def describe(search_obj: Search) ‑> str -
-
-

Describe an ephemeris search as a "SQL-like" string

-

Args

-
-
search_obj
-
the ephemeris search object to describe
-
-

Returns

-

the "SQL-like" string describing the ephemeris search object

-
- -Expand source code - -
def describe(search_obj: Search) -> str:
-    """
-    Describe an ephemeris search as a "SQL-like" string
-
-    Args:
-        search_obj: the ephemeris search object to describe
-
-    Returns:
-        the "SQL-like" string describing the ephemeris search object
-    """
-    # make request
-    req = AuroraXRequest(method="post",
-                         url=urls.describe_ephemeris_query_url,
-                         body=search_obj.query)
-    res = req.execute()
-
-    # return
-    return res.data
-
-
-
-def get_request_url(request_id: str) ‑> str -
-
-

Get the ephemeris search request URL for a given -request ID. This URL can be used for subsequent -pyaurorax.requests function calls. Primarily this method -facilitates delving into details about a set of already-submitted -ephemeris searches.

-

Args

-
-
request_id
-
the request identifier
-
-

Returns

-

the request URL

-
- -Expand source code - -
def get_request_url(request_id: str) -> str:
-    """
-    Get the ephemeris search request URL for a given
-    request ID. This URL can be used for subsequent
-    pyaurorax.requests function calls. Primarily this method
-    facilitates delving into details about a set of already-submitted
-    ephemeris searches.
-
-    Args:
-        request_id: the request identifier
-
-    Returns:
-        the request URL
-    """
-    url = urls.ephemeris_request_url.format(request_id)
-    return url
-
-
-
-def search(start: datetime.datetime, end: datetime.datetime, programs: Optional[List[str]] = None, platforms: Optional[List[str]] = None, instrument_types: Optional[List[str]] = None, metadata_filters: Optional[List[Dict]] = None, metadata_filters_logical_operator: Optional[str] = None, response_format: Optional[Dict] = None, poll_interval: Optional[float] = 1.0, return_immediately: Optional[bool] = False, verbose: Optional[bool] = False) ‑> Search -
-
-

Search for ephemeris records

-

By default, this function will block and wait until the request completes and -all data is downloaded. If you don't want to wait, set the 'return_immediately` -value to True. The Search object will be returned right after the search has been -started, and you can use the helper functions as part of that object to get the -data when it's done.

-

Note: At least one search criteria from programs, platforms, or -instrument_types, must be specified.

-

Args

-
-
start
-
start timestamp of the search (inclusive)
-
end
-
end timestamp of the search (inclusive)
-
programs
-
list of programs to search through, defaults to None
-
platforms
-
list of platforms to search through, defaults to None
-
instrument_types
-
list of instrument types to search through, defaults to None
-
metadata_filters
-
-

list of dictionaries describing metadata keys and -values to filter on, defaults to None

-

Example:

-
[{
-    "key": "nbtrace_region",
-    "operator": "in",
-    "values": ["north polar cap"]
-}]
-
-
-
metadata_filters_logical_operator
-
the logical operator to use when -evaluating metadata filters (either 'AND' or 'OR'), defaults -to "AND"
-
response_format
-
JSON representation of desired data response format
-
poll_interval
-
time in seconds to wait between polling attempts, defaults -to pyaurorax.requests.STANDARD_POLLING_SLEEP_TIME
-
return_immediately
-
initiate the search and return without waiting for data to -be received, defaults to False
-
verbose
-
output poll times and other progress messages, defaults to False
-
-

Returns

-

A pyaurorax.ephemeris.Search object

-

Raises

-
-
AuroraXBadParametersException
-
missing parameters
-
-
- -Expand source code - -
def search(start: datetime.datetime,
-           end: datetime.datetime,
-           programs: Optional[List[str]] = None,
-           platforms: Optional[List[str]] = None,
-           instrument_types: Optional[List[str]] = None,
-           metadata_filters: Optional[List[Dict]] = None,
-           metadata_filters_logical_operator: Optional[str] = None,
-           response_format: Optional[Dict] = None,
-           poll_interval: Optional[float] = STANDARD_POLLING_SLEEP_TIME,
-           return_immediately: Optional[bool] = False,
-           verbose: Optional[bool] = False) -> Search:
-    """
-    Search for ephemeris records
-
-    By default, this function will block and wait until the request completes and
-    all data is downloaded. If you don't want to wait, set the 'return_immediately`
-    value to True. The Search object will be returned right after the search has been
-    started, and you can use the helper functions as part of that object to get the
-    data when it's done.
-
-    Note: At least one search criteria from programs, platforms, or
-    instrument_types, must be specified.
-
-    Args:
-        start: start timestamp of the search (inclusive)
-        end: end timestamp of the search (inclusive)
-        programs: list of programs to search through, defaults to None
-        platforms: list of platforms to search through, defaults to None
-        instrument_types: list of instrument types to search through, defaults to None
-        metadata_filters: list of dictionaries describing metadata keys and
-            values to filter on, defaults to None
-
-            Example:
-
-                [{
-                    "key": "nbtrace_region",
-                    "operator": "in",
-                    "values": ["north polar cap"]
-                }]
-        metadata_filters_logical_operator: the logical operator to use when
-            evaluating metadata filters (either 'AND' or 'OR'), defaults
-            to "AND"
-        response_format: JSON representation of desired data response format
-        poll_interval: time in seconds to wait between polling attempts, defaults
-            to pyaurorax.requests.STANDARD_POLLING_SLEEP_TIME
-        return_immediately: initiate the search and return without waiting for data to
-            be received, defaults to False
-        verbose: output poll times and other progress messages, defaults to False
-
-    Returns:
-        A pyaurorax.ephemeris.Search object
-
-    Raises:
-        pyaurorax.exceptions.AuroraXBadParametersException: missing parameters
-    """
-    # create a Search() object
-    s = Search(start,
-               end,
-               programs=programs,
-               platforms=platforms,
-               instrument_types=instrument_types,
-               metadata_filters=metadata_filters,
-               metadata_filters_logical_operator=metadata_filters_logical_operator,
-               response_format=response_format)
-    if (verbose is True):
-        print("[%s] Search object created" % (datetime.datetime.now()))
-
-    # execute the search
-    s.execute()
-    if (verbose is True):
-        print("[%s] Request submitted" % (datetime.datetime.now()))
-        print("[%s] Request ID: %s" % (datetime.datetime.now(), s.request_id))
-        print("[%s] Request details available at: %s" % (datetime.datetime.now(),
-                                                         s.request_url))
-
-    # return immediately if we wanted to
-    if (return_immediately is True):
-        return s
-
-    # wait for data
-    if (verbose is True):
-        print("[%s] Waiting for data ..." % (datetime.datetime.now()))
-    s.wait(poll_interval=poll_interval, verbose=verbose)
-
-    # check if error condition encountered
-    if (s.status["search_result"]["error_condition"] is True):
-        # error encountered
-        raise AuroraXSearchException(s.logs[-1]["summary"])
-
-    # get the data
-    if (verbose is True):
-        print("[%s] Retrieving data ..." % (datetime.datetime.now()))
-    s.get_data()
-
-    # return response with the data
-    if (verbose is True):
-        print("[%s] Retrieved %s of data containing %d records" % (datetime.datetime.now(),
-                                                                   humanize.filesize.naturalsize(
-                                                                       s.status["search_result"]["file_size"]),
-                                                                   s.status["search_result"]["result_count"]))
-    return s
-
-
-
-def upload(identifier: int, records: List[Ephemeris], validate_source: Optional[bool] = False) ‑> int -
-
-

Upload ephemeris records to AuroraX

-

Args

-
-
identifier
-
AuroraX data source ID
-
records
-
ephemeris records to upload
-
validate_source
-
validate all records before uploading, defaults to False
-
-

Returns

-

0 for success, raises exception on error

-

Raises

-
-
AuroraXMaxRetriesException
-
max retry error
-
AuroraXUnexpectedContentTypeException
-
unexpected content error
-
AuroraXUploadException
-
upload error
-
AuroraXValidationException
-
data source validation error
-
-
- -Expand source code - -
def upload(identifier: int,
-           records: List[Ephemeris],
-           validate_source: Optional[bool] = False) -> int:
-    """
-    Upload ephemeris records to AuroraX
-
-    Args:
-        identifier: AuroraX data source ID
-        records: ephemeris records to upload
-        validate_source: validate all records before uploading, defaults to False
-
-    Returns:
-        0 for success, raises exception on error
-
-    Raises:
-        pyaurorax.exceptions.AuroraXMaxRetriesException: max retry error
-        pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected content error
-        pyaurorax.exceptions.AuroraXUploadException: upload error
-        pyaurorax.exceptions.AuroraXValidationException: data source validation error
-    """
-    # validate record sources if the flag is set
-    if validate_source:
-        validation_error = __validate_data_source(identifier, records)
-        if validation_error:
-            raise AuroraXValidationException("Unable to validate data source found "
-                                             "in record: {}".format(validation_error))
-
-    # translate each ephemeris record to a request-friendly
-    # dict (ie. convert datetimes to strings, etc.)
-    for i, _ in enumerate(records):
-        if (type(records[i]) is Ephemeris):
-            records[i] = records[i].to_json_serializable()  # type: ignore
-
-    # make request
-    url = urls.ephemeris_upload_url.format(identifier)
-    req = AuroraXRequest(method="post",
-                         url=url,
-                         body=records,
-                         null_response=True)
-    res = req.execute()
-
-    # evaluate response
-    if (res.status_code == 400):
-        raise AuroraXUploadException("%s - %s" % (res.data["error_code"],
-                                                  res.data["error_message"]))
-
-    # return
-    return 0
-
-
-
-
-
-
-
- -
- - - \ No newline at end of file diff --git a/docs/code/pyaurorax_api_reference/pyaurorax/ephemeris/index.html b/docs/code/pyaurorax_api_reference/pyaurorax/ephemeris/index.html deleted file mode 100644 index 9492b99..0000000 --- a/docs/code/pyaurorax_api_reference/pyaurorax/ephemeris/index.html +++ /dev/null @@ -1,1433 +0,0 @@ - - - - - - -pyaurorax.ephemeris API documentation - - - - - - - - - - - -
-
-
-

Module pyaurorax.ephemeris

-
-
-

The ephemeris module is used to search and upload ephemeris records -within AuroraX.

-

Note that all functions and classes from submodules are all imported -at this level of the ephemeris module. They can be referenced from -here instead of digging in deeper to the submodules.

-
- -Expand source code - -
"""
-The ephemeris module is used to search and upload ephemeris records
-within AuroraX.
-
-Note that all functions and classes from submodules are all imported
-at this level of the ephemeris module. They can be referenced from
-here instead of digging in deeper to the submodules.
-"""
-
-# function and class imports
-from .ephemeris import (search,
-                        upload,
-                        delete,
-                        describe,
-                        get_request_url)
-from .classes.ephemeris import Ephemeris
-from .classes.search import Search
-
-# pdoc imports and exports
-from .ephemeris import __pdoc__ as __ephemeris_pdoc__
-from .classes.ephemeris import __pdoc__ as __classes_ephemeris_pdoc__
-from .classes.search import __pdoc__ as __classes_search_pdoc__
-__pdoc__ = __ephemeris_pdoc__
-__pdoc__ = dict(__pdoc__, **__classes_ephemeris_pdoc__)
-__pdoc__ = dict(__pdoc__, **__classes_search_pdoc__)
-__all__ = [
-    "search",
-    "upload",
-    "delete",
-    "describe",
-    "get_request_url",
-    "Ephemeris",
-    "Search",
-]
-
-
-
-

Sub-modules

-
-
pyaurorax.ephemeris.classes
-
-

Separted classes and functions used by the ephemeris module …

-
-
pyaurorax.ephemeris.ephemeris
-
-

Functions for performing ephemeris searches

-
-
-
-
-
-
-

Functions

-
-
-def delete(data_source: DataSource, start: datetime.datetime, end: datetime.datetime) ‑> int -
-
-

Delete ephemeris records between a timeframe.

-

The API processes this request asynchronously, so this method will return -immediately whether or not the data has already been deleted.

-

Args

-
-
data_source
-
data source associated with the data product records (note that -identifier, program, platform, and instrument_type are required)
-
start
-
timestamp marking beginning of range to delete records for, inclusive
-
end
-
timestamp marking end of range to delete records for, inclusive
-
-

Returns

-

0 on success

-

Raises

-
-
AuroraXMaxRetriesException
-
max retry error
-
AuroraXUnexpectedContentTypeException
-
unexpected error
-
AuroraXNotFoundException
-
source not found
-
AuroraXUnauthorizedException
-
invalid API key for this operation
-
AuroraXBadParametersException
-
missing parameters
-
-
- -Expand source code - -
def delete(data_source: DataSource,
-           start: datetime.datetime,
-           end: datetime.datetime) -> int:
-    """
-    Delete ephemeris records between a timeframe.
-
-    The API processes this request asynchronously, so this method will return
-    immediately whether or not the data has already been deleted.
-
-    Args:
-        data_source: data source associated with the data product records (note that
-            identifier, program, platform, and instrument_type are required)
-        start: timestamp marking beginning of range to delete records for, inclusive
-        end: timestamp marking end of range to delete records for, inclusive
-
-    Returns:
-        0 on success
-
-    Raises:
-        pyaurorax.exceptions.AuroraXMaxRetriesException: max retry error
-        pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected error
-        pyaurorax.exceptions.AuroraXNotFoundException: source not found
-        pyaurorax.exceptions.AuroraXUnauthorizedException: invalid API key for this operation
-        pyaurorax.exceptions.AuroraXBadParametersException: missing parameters
-    """
-    # check to make sure the identifier, program, platform, and instrument type are all set in the data source
-    if not all([data_source.identifier, data_source.program, data_source.platform, data_source.instrument_type]):
-        raise AuroraXBadParametersException("One or more required data source parameters "
-                                            "are missing, delete operation aborted")
-
-    # do request
-    url = urls.ephemeris_upload_url.format(data_source.identifier)
-    params = {
-        "program": data_source.program,
-        "platform": data_source.platform,
-        "instrument_type": data_source.instrument_type,
-        "start": start.strftime("%Y-%m-%dT%H:%M:%S"),
-        "end": end.strftime("%Y-%m-%dT%H:%M:%S")
-    }
-    delete_req = AuroraXRequest(method="delete",
-                                url=url,
-                                body=params,
-                                null_response=True)
-    res = delete_req.execute()
-
-    # evaluate response
-    if (res.status_code == 400):
-        if type(res.data) is list:
-            raise AuroraXBadParametersException("%s - %s" % (res.status_code,
-                                                             res.data[0]["message"]))
-        raise AuroraXBadParametersException("%s - %s" % (res.data["error_code"],
-                                                         res.data["error_message"]))
-
-    # return
-    return 0
-
-
-
-def describe(search_obj: Search) ‑> str -
-
-

Describe an ephemeris search as a "SQL-like" string

-

Args

-
-
search_obj
-
the ephemeris search object to describe
-
-

Returns

-

the "SQL-like" string describing the ephemeris search object

-
- -Expand source code - -
def describe(search_obj: Search) -> str:
-    """
-    Describe an ephemeris search as a "SQL-like" string
-
-    Args:
-        search_obj: the ephemeris search object to describe
-
-    Returns:
-        the "SQL-like" string describing the ephemeris search object
-    """
-    # make request
-    req = AuroraXRequest(method="post",
-                         url=urls.describe_ephemeris_query_url,
-                         body=search_obj.query)
-    res = req.execute()
-
-    # return
-    return res.data
-
-
-
-def get_request_url(request_id: str) ‑> str -
-
-

Get the ephemeris search request URL for a given -request ID. This URL can be used for subsequent -pyaurorax.requests function calls. Primarily this method -facilitates delving into details about a set of already-submitted -ephemeris searches.

-

Args

-
-
request_id
-
the request identifier
-
-

Returns

-

the request URL

-
- -Expand source code - -
def get_request_url(request_id: str) -> str:
-    """
-    Get the ephemeris search request URL for a given
-    request ID. This URL can be used for subsequent
-    pyaurorax.requests function calls. Primarily this method
-    facilitates delving into details about a set of already-submitted
-    ephemeris searches.
-
-    Args:
-        request_id: the request identifier
-
-    Returns:
-        the request URL
-    """
-    url = urls.ephemeris_request_url.format(request_id)
-    return url
-
-
-
-def search(start: datetime.datetime, end: datetime.datetime, programs: Optional[List[str]] = None, platforms: Optional[List[str]] = None, instrument_types: Optional[List[str]] = None, metadata_filters: Optional[List[Dict]] = None, metadata_filters_logical_operator: Optional[str] = None, response_format: Optional[Dict] = None, poll_interval: Optional[float] = 1.0, return_immediately: Optional[bool] = False, verbose: Optional[bool] = False) ‑> Search -
-
-

Search for ephemeris records

-

By default, this function will block and wait until the request completes and -all data is downloaded. If you don't want to wait, set the 'return_immediately` -value to True. The Search object will be returned right after the search has been -started, and you can use the helper functions as part of that object to get the -data when it's done.

-

Note: At least one search criteria from programs, platforms, or -instrument_types, must be specified.

-

Args

-
-
start
-
start timestamp of the search (inclusive)
-
end
-
end timestamp of the search (inclusive)
-
programs
-
list of programs to search through, defaults to None
-
platforms
-
list of platforms to search through, defaults to None
-
instrument_types
-
list of instrument types to search through, defaults to None
-
metadata_filters
-
-

list of dictionaries describing metadata keys and -values to filter on, defaults to None

-

Example:

-
[{
-    "key": "nbtrace_region",
-    "operator": "in",
-    "values": ["north polar cap"]
-}]
-
-
-
metadata_filters_logical_operator
-
the logical operator to use when -evaluating metadata filters (either 'AND' or 'OR'), defaults -to "AND"
-
response_format
-
JSON representation of desired data response format
-
poll_interval
-
time in seconds to wait between polling attempts, defaults -to pyaurorax.requests.STANDARD_POLLING_SLEEP_TIME
-
return_immediately
-
initiate the search and return without waiting for data to -be received, defaults to False
-
verbose
-
output poll times and other progress messages, defaults to False
-
-

Returns

-

A pyaurorax.ephemeris.Search object

-

Raises

-
-
AuroraXBadParametersException
-
missing parameters
-
-
- -Expand source code - -
def search(start: datetime.datetime,
-           end: datetime.datetime,
-           programs: Optional[List[str]] = None,
-           platforms: Optional[List[str]] = None,
-           instrument_types: Optional[List[str]] = None,
-           metadata_filters: Optional[List[Dict]] = None,
-           metadata_filters_logical_operator: Optional[str] = None,
-           response_format: Optional[Dict] = None,
-           poll_interval: Optional[float] = STANDARD_POLLING_SLEEP_TIME,
-           return_immediately: Optional[bool] = False,
-           verbose: Optional[bool] = False) -> Search:
-    """
-    Search for ephemeris records
-
-    By default, this function will block and wait until the request completes and
-    all data is downloaded. If you don't want to wait, set the 'return_immediately`
-    value to True. The Search object will be returned right after the search has been
-    started, and you can use the helper functions as part of that object to get the
-    data when it's done.
-
-    Note: At least one search criteria from programs, platforms, or
-    instrument_types, must be specified.
-
-    Args:
-        start: start timestamp of the search (inclusive)
-        end: end timestamp of the search (inclusive)
-        programs: list of programs to search through, defaults to None
-        platforms: list of platforms to search through, defaults to None
-        instrument_types: list of instrument types to search through, defaults to None
-        metadata_filters: list of dictionaries describing metadata keys and
-            values to filter on, defaults to None
-
-            Example:
-
-                [{
-                    "key": "nbtrace_region",
-                    "operator": "in",
-                    "values": ["north polar cap"]
-                }]
-        metadata_filters_logical_operator: the logical operator to use when
-            evaluating metadata filters (either 'AND' or 'OR'), defaults
-            to "AND"
-        response_format: JSON representation of desired data response format
-        poll_interval: time in seconds to wait between polling attempts, defaults
-            to pyaurorax.requests.STANDARD_POLLING_SLEEP_TIME
-        return_immediately: initiate the search and return without waiting for data to
-            be received, defaults to False
-        verbose: output poll times and other progress messages, defaults to False
-
-    Returns:
-        A pyaurorax.ephemeris.Search object
-
-    Raises:
-        pyaurorax.exceptions.AuroraXBadParametersException: missing parameters
-    """
-    # create a Search() object
-    s = Search(start,
-               end,
-               programs=programs,
-               platforms=platforms,
-               instrument_types=instrument_types,
-               metadata_filters=metadata_filters,
-               metadata_filters_logical_operator=metadata_filters_logical_operator,
-               response_format=response_format)
-    if (verbose is True):
-        print("[%s] Search object created" % (datetime.datetime.now()))
-
-    # execute the search
-    s.execute()
-    if (verbose is True):
-        print("[%s] Request submitted" % (datetime.datetime.now()))
-        print("[%s] Request ID: %s" % (datetime.datetime.now(), s.request_id))
-        print("[%s] Request details available at: %s" % (datetime.datetime.now(),
-                                                         s.request_url))
-
-    # return immediately if we wanted to
-    if (return_immediately is True):
-        return s
-
-    # wait for data
-    if (verbose is True):
-        print("[%s] Waiting for data ..." % (datetime.datetime.now()))
-    s.wait(poll_interval=poll_interval, verbose=verbose)
-
-    # check if error condition encountered
-    if (s.status["search_result"]["error_condition"] is True):
-        # error encountered
-        raise AuroraXSearchException(s.logs[-1]["summary"])
-
-    # get the data
-    if (verbose is True):
-        print("[%s] Retrieving data ..." % (datetime.datetime.now()))
-    s.get_data()
-
-    # return response with the data
-    if (verbose is True):
-        print("[%s] Retrieved %s of data containing %d records" % (datetime.datetime.now(),
-                                                                   humanize.filesize.naturalsize(
-                                                                       s.status["search_result"]["file_size"]),
-                                                                   s.status["search_result"]["result_count"]))
-    return s
-
-
-
-def upload(identifier: int, records: List[Ephemeris], validate_source: Optional[bool] = False) ‑> int -
-
-

Upload ephemeris records to AuroraX

-

Args

-
-
identifier
-
AuroraX data source ID
-
records
-
ephemeris records to upload
-
validate_source
-
validate all records before uploading, defaults to False
-
-

Returns

-

0 for success, raises exception on error

-

Raises

-
-
AuroraXMaxRetriesException
-
max retry error
-
AuroraXUnexpectedContentTypeException
-
unexpected content error
-
AuroraXUploadException
-
upload error
-
AuroraXValidationException
-
data source validation error
-
-
- -Expand source code - -
def upload(identifier: int,
-           records: List[Ephemeris],
-           validate_source: Optional[bool] = False) -> int:
-    """
-    Upload ephemeris records to AuroraX
-
-    Args:
-        identifier: AuroraX data source ID
-        records: ephemeris records to upload
-        validate_source: validate all records before uploading, defaults to False
-
-    Returns:
-        0 for success, raises exception on error
-
-    Raises:
-        pyaurorax.exceptions.AuroraXMaxRetriesException: max retry error
-        pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected content error
-        pyaurorax.exceptions.AuroraXUploadException: upload error
-        pyaurorax.exceptions.AuroraXValidationException: data source validation error
-    """
-    # validate record sources if the flag is set
-    if validate_source:
-        validation_error = __validate_data_source(identifier, records)
-        if validation_error:
-            raise AuroraXValidationException("Unable to validate data source found "
-                                             "in record: {}".format(validation_error))
-
-    # translate each ephemeris record to a request-friendly
-    # dict (ie. convert datetimes to strings, etc.)
-    for i, _ in enumerate(records):
-        if (type(records[i]) is Ephemeris):
-            records[i] = records[i].to_json_serializable()  # type: ignore
-
-    # make request
-    url = urls.ephemeris_upload_url.format(identifier)
-    req = AuroraXRequest(method="post",
-                         url=url,
-                         body=records,
-                         null_response=True)
-    res = req.execute()
-
-    # evaluate response
-    if (res.status_code == 400):
-        raise AuroraXUploadException("%s - %s" % (res.data["error_code"],
-                                                  res.data["error_message"]))
-
-    # return
-    return 0
-
-
-
-
-
-

Classes

-
-
-class Ephemeris -(**data: Any) -
-
-

Ephemeris object

-

Attributes

-
-
data_source
-
data source that the ephemeris record is associated with
-
epoch
-
timestamp for the record (assumed it is in UTC)
-
location_geo
-
Location object containing geographic latitude and longitude
-
location_gsm
-
Location object containing GSM latitude and longitude (leave -empty for data sources with a type of 'ground')
-
nbtrace
-
Location object with north B-trace geographic latitude and longitude
-
sbtrace
-
Location object with south B-trace geographic latitude and longitude
-
metadata
-
metadata for this record (arbitrary keys and values)
-
-

Create a new model by parsing and validating input data from keyword arguments.

-

Raises ValidationError if the input data cannot be parsed to form a valid model.

-
- -Expand source code - -
class Ephemeris(BaseModel):
-    """
-    Ephemeris object
-
-    Attributes:
-        data_source: data source that the ephemeris record is associated with
-        epoch: timestamp for the record (assumed it is in UTC)
-        location_geo: Location object containing geographic latitude and longitude
-        location_gsm: Location object containing GSM latitude and longitude (leave
-            empty for data sources with a type of 'ground')
-        nbtrace: Location object with north B-trace geographic latitude and longitude
-        sbtrace: Location object with south B-trace geographic latitude and longitude
-        metadata: metadata for this record (arbitrary keys and values)
-    """
-    data_source: DataSource
-    epoch: datetime.datetime
-    location_geo: Location
-    location_gsm: Optional[Location] = Location(lat=None, lon=None)
-    nbtrace: Location
-    sbtrace: Location
-    metadata: Optional[Dict] = None
-
-    def to_json_serializable(self) -> Dict:
-        """
-        Convert object to a JSON-serializable object (ie. translate
-        datetime objects to strings)
-
-        Returns:
-            a dictionary object that is JSON-serializable
-        """
-        # init
-        d = self.__dict__
-
-        # format epoch as str
-        if (type(d["epoch"]) is datetime.datetime):
-            d["epoch"] = d["epoch"].strftime("%Y-%m-%dT%H:%M:00.000Z")
-
-        # format location
-        if (type(d["location_geo"]) is Location):
-            d["location_geo"] = d["location_geo"].__dict__
-        if (type(d["location_gsm"]) is Location):
-            d["location_gsm"] = d["location_gsm"].__dict__
-        if (type(d["nbtrace"]) is Location):
-            d["nbtrace"] = d["nbtrace"].__dict__
-        if (type(d["sbtrace"]) is Location):
-            d["sbtrace"] = d["sbtrace"].__dict__
-
-        # format metadata
-        if (type(self.metadata) is dict):
-            for key, value in self.metadata.items():
-                if (type(value) is datetime.datetime or type(value) is datetime.date):
-                    self.metadata[key] = self.metadata[key].strftime("%Y-%m-%dT%H:%M:%S.%f")
-        if (type(self.metadata) is list):
-            self.metadata = {}
-
-        # format data source fields for query
-        d["program"] = self.data_source.program
-        d["platform"] = self.data_source.platform
-        d["instrument_type"] = self.data_source.instrument_type
-        del d["data_source"]
-
-        # return
-        return d
-
-    def __str__(self) -> str:
-        """
-        String method
-
-        Returns:
-            string format of Ephemeris
-        """
-        return self.__repr__()
-
-    def __repr__(self) -> str:
-        """
-        Object representation
-
-        Returns:
-            object representation of Ephemeris
-        """
-        # shorten the metadata
-        max_len = 20
-        attr_metadata = f"{self.metadata}"
-        if (len(attr_metadata) > max_len):
-            attr_metadata = attr_metadata[0:max_len] + "...}"
-
-        # return formatted representation
-        return f"Ephemeris(data_source={repr(self.data_source)}, epoch={repr(self.epoch)}, " \
-            f"location_geo={repr(self.location_geo)}, location_gsm={repr(self.location_gsm)}, " \
-            f"nbtrace={repr(self.nbtrace)}, sbtrace={repr(self.sbtrace)}, " \
-            f"metadata={attr_metadata})"
-
-

Ancestors

-
    -
  • pydantic.main.BaseModel
  • -
  • pydantic.utils.Representation
  • -
-

Class variables

-
-
var data_sourceDataSource
-
-
-
-
var epoch : datetime.datetime
-
-
-
-
var location_geoLocation
-
-
-
-
var location_gsm : Optional[Location]
-
-
-
-
var metadata : Optional[Dict]
-
-
-
-
var nbtraceLocation
-
-
-
-
var sbtraceLocation
-
-
-
-
-

Methods

-
-
-def to_json_serializable(self) ‑> Dict -
-
-

Convert object to a JSON-serializable object (ie. translate -datetime objects to strings)

-

Returns

-

a dictionary object that is JSON-serializable

-
- -Expand source code - -
def to_json_serializable(self) -> Dict:
-    """
-    Convert object to a JSON-serializable object (ie. translate
-    datetime objects to strings)
-
-    Returns:
-        a dictionary object that is JSON-serializable
-    """
-    # init
-    d = self.__dict__
-
-    # format epoch as str
-    if (type(d["epoch"]) is datetime.datetime):
-        d["epoch"] = d["epoch"].strftime("%Y-%m-%dT%H:%M:00.000Z")
-
-    # format location
-    if (type(d["location_geo"]) is Location):
-        d["location_geo"] = d["location_geo"].__dict__
-    if (type(d["location_gsm"]) is Location):
-        d["location_gsm"] = d["location_gsm"].__dict__
-    if (type(d["nbtrace"]) is Location):
-        d["nbtrace"] = d["nbtrace"].__dict__
-    if (type(d["sbtrace"]) is Location):
-        d["sbtrace"] = d["sbtrace"].__dict__
-
-    # format metadata
-    if (type(self.metadata) is dict):
-        for key, value in self.metadata.items():
-            if (type(value) is datetime.datetime or type(value) is datetime.date):
-                self.metadata[key] = self.metadata[key].strftime("%Y-%m-%dT%H:%M:%S.%f")
-    if (type(self.metadata) is list):
-        self.metadata = {}
-
-    # format data source fields for query
-    d["program"] = self.data_source.program
-    d["platform"] = self.data_source.platform
-    d["instrument_type"] = self.data_source.instrument_type
-    del d["data_source"]
-
-    # return
-    return d
-
-
-
-
-
-class Search -(start: datetime.datetime, end: datetime.datetime, programs: Optional[List[str]] = None, platforms: Optional[List[str]] = None, instrument_types: Optional[List[str]] = None, metadata_filters: Optional[List[Dict]] = None, metadata_filters_logical_operator: Optional[str] = 'AND', response_format: Optional[Dict] = None) -
-
-

Class representing an ephemeris search

-

Note: At least one search criteria from programs, platforms, or instrument_types -must be specified.

-

Args

-
-
start
-
start timestamp of the search (inclusive)
-
end
-
end timestamp of the search (inclusive)
-
programs
-
list of programs to search through, defaults to None
-
platforms
-
list of platforms to search through, defaults to None
-
instrument_types
-
list of instrument types to search through, defaults to None
-
metadata_filters
-
-

list of dictionaries describing metadata keys and -values to filter on, defaults to None

-

e.g. { -"key": "string", -"operator": "=", -"values": [ -"string" -] -}

-
-
metadata_filters_logical_operator
-
the logical operator to use when -evaluating metadata filters (either 'AND' or 'OR'), defaults -to "AND"
-
response_format
-
JSON representation of desired data response format
-
request
-
AuroraXResponse object returned when the search is executed
-
request_id
-
unique ID assigned to the request by the AuroraX API
-
request_url
-
unique URL assigned to the request by the AuroraX API
-
executed
-
indicates if the search has been executed/started
-
completed
-
indicates if the search has finished
-
data_url
-
the URL where data is accessed
-
query
-
the query for this request as JSON
-
status
-
the status of the query
-
data
-
the ephemeris records found
-
logs
-
all log messages outputted by the AuroraX API for this request
-
-
- -Expand source code - -
class Search():
-    """
-    Class representing an ephemeris search
-
-    Note: At least one search criteria from programs, platforms, or instrument_types
-    must be specified.
-
-    Args:
-        start: start timestamp of the search (inclusive)
-        end: end timestamp of the search (inclusive)
-        programs: list of programs to search through, defaults to None
-        platforms: list of platforms to search through, defaults to None
-        instrument_types: list of instrument types to search through, defaults to None
-        metadata_filters: list of dictionaries describing metadata keys and
-            values to filter on, defaults to None
-
-            e.g. {
-                "key": "string",
-                "operator": "=",
-                "values": [
-                    "string"
-                ]
-            }
-        metadata_filters_logical_operator: the logical operator to use when
-            evaluating metadata filters (either 'AND' or 'OR'), defaults
-            to "AND"
-        response_format: JSON representation of desired data response format
-        request: AuroraXResponse object returned when the search is executed
-        request_id: unique ID assigned to the request by the AuroraX API
-        request_url: unique URL assigned to the request by the AuroraX API
-        executed: indicates if the search has been executed/started
-        completed: indicates if the search has finished
-        data_url: the URL where data is accessed
-        query: the query for this request as JSON
-        status: the status of the query
-        data: the ephemeris records found
-        logs: all log messages outputted by the AuroraX API for this request
-    """
-
-    def __init__(self,
-                 start: datetime.datetime,
-                 end: datetime.datetime,
-                 programs: Optional[List[str]] = None,
-                 platforms: Optional[List[str]] = None,
-                 instrument_types: Optional[List[str]] = None,
-                 metadata_filters: Optional[List[Dict]] = None,
-                 metadata_filters_logical_operator: Optional[str] = "AND",
-                 response_format: Optional[Dict] = None) -> None:
-
-        # set variables using passed in args
-        self.start = start
-        self.end = end
-        self.programs = programs
-        self.platforms = platforms
-        self.instrument_types = instrument_types
-        self.metadata_filters = metadata_filters
-        self.metadata_filters_logical_operator = metadata_filters_logical_operator
-        self.response_format = response_format
-
-        # initialize additional variables
-        self.request: AuroraXResponse = None
-        self.request_id: str = ""
-        self.request_url: str = ""
-        self.executed: bool = False
-        self.completed: bool = False
-        self.data_url: str = ""
-        self.query: Dict = {}
-        self.status: Dict = {}
-        self.data: List[Union[Ephemeris, Dict]] = []
-        self.logs: List[Dict] = []
-
-    def __str__(self) -> str:
-        """
-        String method
-
-        Returns:
-            string format of Ephemeris Search object
-        """
-        return self.__repr__()
-
-    def __repr__(self) -> str:
-        """
-        Object representation
-
-        Returns:
-            object representation of Ephemeris Search object
-        """
-        return f"EphemerisSearch(executed={self.executed}, " \
-            f"completed={self.completed}, request_id='{self.request_id}')"
-
-    @property
-    def query(self):
-        """
-        Property for the query value
-        """
-        self._query = {
-            "data_sources": {
-                "programs": [] if not self.programs else self.programs,
-                "platforms": [] if not self.platforms else self.platforms,
-                "instrument_types": [] if not self.instrument_types else self.instrument_types,
-                "ephemeris_metadata_filters": {} if not self.metadata_filters
-                else {
-                    "logical_operator": self.metadata_filters_logical_operator,
-                    "expressions": self.metadata_filters
-                },
-            },
-            "start": self.start.strftime("%Y-%m-%dT%H:%M:%S"),
-            "end": self.end.strftime("%Y-%m-%dT%H:%M:%S"),
-        }
-        return self._query
-
-    @query.setter
-    def query(self, query):
-        self._query = query
-
-    def execute(self) -> None:
-        """
-        Initiate ephemeris search request
-
-        Raises:
-            pyaurorax.exceptions.AuroraXBadParametersException: missing parameters
-        """
-        # check for at least one filter criteria
-        if not (self.programs or self.platforms or self.instrument_types or self.metadata_filters):
-            raise AuroraXBadParametersException("At least one filter criteria parameter "
-                                                "besides 'start' and 'end' must be specified")
-
-        # do request
-        url = urls.ephemeris_search_url
-        req = AuroraXRequest(method="post",
-                             url=url,
-                             body=self.query,
-                             null_response=True)
-        res = req.execute()
-
-        # set request ID, request_url, executed
-        self.executed = True
-        if (res.status_code == 202):
-            # request successfully dispatched
-            self.executed = True
-            self.request_url = res.request.headers["location"]
-            self.request_id = self.request_url.rsplit("/", 1)[-1]
-
-        # set the request variable
-        self.request = res
-
-    def update_status(self, status: Optional[Dict] = None) -> None:
-        """
-        Update the status of this ephemeris search request
-
-        Args:
-            status: the previously-retrieved status of this request (include
-                to avoid requesting it from the API again), defaults to None
-        """
-        # get the status if it isn't passed in
-        if (status is None):
-            status = requests_get_status(self.request_url)
-
-        # update request status by checking if data URI is set
-        if (status["search_result"]["data_uri"] is not None):
-            self.completed = True
-            self.data_url = "%s%s" % (urls.base_url,
-                                      status["search_result"]["data_uri"])
-
-        # set class variable "status" and "logs"
-        self.status = status
-        self.logs = status["logs"]
-
-    def check_for_data(self) -> bool:
-        """
-        Check to see if data is available for this ephemeris
-        search request
-
-        Returns:
-            True if data is available, else False
-        """
-        self.update_status()
-        return self.completed
-
-    def get_data(self) -> None:
-        """
-        Retrieve the data available for this ephemeris search
-        request
-        """
-        # check if completed yet
-        if (self.completed is False):
-            print("No data available, update status or check for data first")
-            return
-
-        # get data
-        raw_data = requests_get_data(self.data_url, response_format=self.response_format)
-
-        # set data variable
-        if self.response_format is not None:
-            self.data = raw_data
-        else:
-            # cast data source objects
-            for i in range(0, len(raw_data)):
-                ds = DataSource(**raw_data[i]["data_source"], format=FORMAT_BASIC_INFO)
-                raw_data[i]["data_source"] = ds
-
-            # cast ephemeris objects
-            self.data = [Ephemeris(**e) for e in raw_data]
-
-    def wait(self,
-             poll_interval: Optional[float] = STANDARD_POLLING_SLEEP_TIME,
-             verbose: Optional[bool] = False) -> None:
-        """
-        Block and wait for the request to complete and data is
-        available for retrieval
-
-        Args:
-            poll_interval: time in seconds to wait between polling attempts,
-                defaults to pyaurorax.requests.STANDARD_POLLING_SLEEP_TIME
-            verbose: output poll times and other progress messages, defaults
-                to False
-        """
-        url = urls.ephemeris_request_url.format(self.request_id)
-        self.update_status(requests_wait_for_data(url,
-                                                  poll_interval=poll_interval,
-                                                  verbose=verbose))
-
-    def cancel(self,
-               wait: Optional[bool] = False,
-               poll_interval: float = STANDARD_POLLING_SLEEP_TIME,
-               verbose: Optional[bool] = False) -> int:
-        """
-        Cancel the ephemeris search request
-
-        This method returns immediately by default since the API processes
-        this request asynchronously. If you would prefer to wait for it
-        to be completed, set the 'wait' parameter to True. You can adjust
-        the polling time using the 'poll_interval' parameter.
-
-        Args:
-            wait: wait until the cancellation request has been
-                completed (may wait for several minutes)
-            poll_interval: seconds to wait between polling
-                calls, defaults to STANDARD_POLLING_SLEEP_TIME.
-            verbose: output poll times and other progress messages, defaults
-                to False
-
-        Returns:
-            1 on success
-
-        Raises:
-            pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected error
-            pyaurorax.exceptions.AuroraXUnauthorizedException: invalid API key for this operation
-        """
-        url = urls.ephemeris_request_url.format(self.request_id)
-        return requests_cancel(url, wait=wait, poll_interval=poll_interval, verbose=verbose)
-
-

Instance variables

-
-
var query
-
-

Property for the query value

-
- -Expand source code - -
@property
-def query(self):
-    """
-    Property for the query value
-    """
-    self._query = {
-        "data_sources": {
-            "programs": [] if not self.programs else self.programs,
-            "platforms": [] if not self.platforms else self.platforms,
-            "instrument_types": [] if not self.instrument_types else self.instrument_types,
-            "ephemeris_metadata_filters": {} if not self.metadata_filters
-            else {
-                "logical_operator": self.metadata_filters_logical_operator,
-                "expressions": self.metadata_filters
-            },
-        },
-        "start": self.start.strftime("%Y-%m-%dT%H:%M:%S"),
-        "end": self.end.strftime("%Y-%m-%dT%H:%M:%S"),
-    }
-    return self._query
-
-
-
-

Methods

-
-
-def cancel(self, wait: Optional[bool] = False, poll_interval: float = 1.0, verbose: Optional[bool] = False) ‑> int -
-
-

Cancel the ephemeris search request

-

This method returns immediately by default since the API processes -this request asynchronously. If you would prefer to wait for it -to be completed, set the 'wait' parameter to True. You can adjust -the polling time using the 'poll_interval' parameter.

-

Args

-
-
wait
-
wait until the cancellation request has been -completed (may wait for several minutes)
-
poll_interval
-
seconds to wait between polling -calls, defaults to STANDARD_POLLING_SLEEP_TIME.
-
verbose
-
output poll times and other progress messages, defaults -to False
-
-

Returns

-

1 on success

-

Raises

-
-
AuroraXUnexpectedContentTypeException
-
unexpected error
-
AuroraXUnauthorizedException
-
invalid API key for this operation
-
-
- -Expand source code - -
def cancel(self,
-           wait: Optional[bool] = False,
-           poll_interval: float = STANDARD_POLLING_SLEEP_TIME,
-           verbose: Optional[bool] = False) -> int:
-    """
-    Cancel the ephemeris search request
-
-    This method returns immediately by default since the API processes
-    this request asynchronously. If you would prefer to wait for it
-    to be completed, set the 'wait' parameter to True. You can adjust
-    the polling time using the 'poll_interval' parameter.
-
-    Args:
-        wait: wait until the cancellation request has been
-            completed (may wait for several minutes)
-        poll_interval: seconds to wait between polling
-            calls, defaults to STANDARD_POLLING_SLEEP_TIME.
-        verbose: output poll times and other progress messages, defaults
-            to False
-
-    Returns:
-        1 on success
-
-    Raises:
-        pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected error
-        pyaurorax.exceptions.AuroraXUnauthorizedException: invalid API key for this operation
-    """
-    url = urls.ephemeris_request_url.format(self.request_id)
-    return requests_cancel(url, wait=wait, poll_interval=poll_interval, verbose=verbose)
-
-
-
-def check_for_data(self) ‑> bool -
-
-

Check to see if data is available for this ephemeris -search request

-

Returns

-

True if data is available, else False

-
- -Expand source code - -
def check_for_data(self) -> bool:
-    """
-    Check to see if data is available for this ephemeris
-    search request
-
-    Returns:
-        True if data is available, else False
-    """
-    self.update_status()
-    return self.completed
-
-
-
-def execute(self) ‑> None -
-
-

Initiate ephemeris search request

-

Raises

-
-
AuroraXBadParametersException
-
missing parameters
-
-
- -Expand source code - -
def execute(self) -> None:
-    """
-    Initiate ephemeris search request
-
-    Raises:
-        pyaurorax.exceptions.AuroraXBadParametersException: missing parameters
-    """
-    # check for at least one filter criteria
-    if not (self.programs or self.platforms or self.instrument_types or self.metadata_filters):
-        raise AuroraXBadParametersException("At least one filter criteria parameter "
-                                            "besides 'start' and 'end' must be specified")
-
-    # do request
-    url = urls.ephemeris_search_url
-    req = AuroraXRequest(method="post",
-                         url=url,
-                         body=self.query,
-                         null_response=True)
-    res = req.execute()
-
-    # set request ID, request_url, executed
-    self.executed = True
-    if (res.status_code == 202):
-        # request successfully dispatched
-        self.executed = True
-        self.request_url = res.request.headers["location"]
-        self.request_id = self.request_url.rsplit("/", 1)[-1]
-
-    # set the request variable
-    self.request = res
-
-
-
-def get_data(self) ‑> None -
-
-

Retrieve the data available for this ephemeris search -request

-
- -Expand source code - -
def get_data(self) -> None:
-    """
-    Retrieve the data available for this ephemeris search
-    request
-    """
-    # check if completed yet
-    if (self.completed is False):
-        print("No data available, update status or check for data first")
-        return
-
-    # get data
-    raw_data = requests_get_data(self.data_url, response_format=self.response_format)
-
-    # set data variable
-    if self.response_format is not None:
-        self.data = raw_data
-    else:
-        # cast data source objects
-        for i in range(0, len(raw_data)):
-            ds = DataSource(**raw_data[i]["data_source"], format=FORMAT_BASIC_INFO)
-            raw_data[i]["data_source"] = ds
-
-        # cast ephemeris objects
-        self.data = [Ephemeris(**e) for e in raw_data]
-
-
-
-def update_status(self, status: Optional[Dict] = None) ‑> None -
-
-

Update the status of this ephemeris search request

-

Args

-
-
status
-
the previously-retrieved status of this request (include -to avoid requesting it from the API again), defaults to None
-
-
- -Expand source code - -
def update_status(self, status: Optional[Dict] = None) -> None:
-    """
-    Update the status of this ephemeris search request
-
-    Args:
-        status: the previously-retrieved status of this request (include
-            to avoid requesting it from the API again), defaults to None
-    """
-    # get the status if it isn't passed in
-    if (status is None):
-        status = requests_get_status(self.request_url)
-
-    # update request status by checking if data URI is set
-    if (status["search_result"]["data_uri"] is not None):
-        self.completed = True
-        self.data_url = "%s%s" % (urls.base_url,
-                                  status["search_result"]["data_uri"])
-
-    # set class variable "status" and "logs"
-    self.status = status
-    self.logs = status["logs"]
-
-
-
-def wait(self, poll_interval: Optional[float] = 1.0, verbose: Optional[bool] = False) ‑> None -
-
-

Block and wait for the request to complete and data is -available for retrieval

-

Args

-
-
poll_interval
-
time in seconds to wait between polling attempts, -defaults to pyaurorax.requests.STANDARD_POLLING_SLEEP_TIME
-
verbose
-
output poll times and other progress messages, defaults -to False
-
-
- -Expand source code - -
def wait(self,
-         poll_interval: Optional[float] = STANDARD_POLLING_SLEEP_TIME,
-         verbose: Optional[bool] = False) -> None:
-    """
-    Block and wait for the request to complete and data is
-    available for retrieval
-
-    Args:
-        poll_interval: time in seconds to wait between polling attempts,
-            defaults to pyaurorax.requests.STANDARD_POLLING_SLEEP_TIME
-        verbose: output poll times and other progress messages, defaults
-            to False
-    """
-    url = urls.ephemeris_request_url.format(self.request_id)
-    self.update_status(requests_wait_for_data(url,
-                                              poll_interval=poll_interval,
-                                              verbose=verbose))
-
-
-
-
-
-
-
- -
- - - \ No newline at end of file diff --git a/docs/code/pyaurorax_api_reference/pyaurorax/exceptions.html b/docs/code/pyaurorax_api_reference/pyaurorax/exceptions.html index 6a3749f..66614a0 100644 --- a/docs/code/pyaurorax_api_reference/pyaurorax/exceptions.html +++ b/docs/code/pyaurorax_api_reference/pyaurorax/exceptions.html @@ -5,7 +5,8 @@ pyaurorax.exceptions API documentation - + @@ -22,114 +23,135 @@

Module pyaurorax.exceptions

-

The exceptions module contains exceptions unique to the PyAuroraX library

+

Unique exception classes utilized by PyAuroraX. These exceptions can be used to help trap specific +errors raised by this library.

+

Note that all exceptions are imported at the root level of the library. They +can be referenced using pyaurorax.AuroraXError +or AuroraXError.

Expand source code -
"""
-The exceptions module contains exceptions unique to the PyAuroraX library
+
# Copyright 2024 University of Calgary
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Unique exception classes utilized by PyAuroraX. These exceptions can be used to help trap specific 
+errors raised by this library.
+
+Note that all exceptions are imported at the root level of the library. They
+can be referenced using [`pyaurorax.AuroraXError`](exceptions.html#pyaurorax.exceptions.AuroraXError) 
+or `pyaurorax.exceptions.AuroraXError`.
 """
 
 
-class AuroraXException(Exception):
+class AuroraXError(Exception):
 
     def __init__(self, *args, **kwargs):
-        response = kwargs.pop("response", None)
-        self.response = response
-        self.request = kwargs.pop("request", None)
-        if (response is not None and not self.request and hasattr(response, "request")):
-            self.request = self.response.request
-        super(AuroraXException, self).__init__(*args, **kwargs)
+        super(AuroraXError, self).__init__(*args, **kwargs)  # pragma: no cover
 
 
-class AuroraXNotFoundException(AuroraXException):
+class AuroraXInitializationError(AuroraXError):
     """
-    The AuroraX record was not found
+    Error occurred during library initialization
     """
     pass
 
 
-class AuroraXDuplicateException(AuroraXException):
+class AuroraXPurgeError(AuroraXError):
     """
-    A duplicate record already exists
+    Error occurred during purging of download or tar extraction working directory
     """
     pass
 
 
-class AuroraXValidationException(AuroraXException):
+class AuroraXAPIError(AuroraXError):
     """
-    Validation of data failed
+    Error occurred during an API call
     """
     pass
 
 
-class AuroraXUnexpectedContentTypeException(AuroraXException):
+class AuroraXNotFoundError(AuroraXError):
     """
-    The API responded with an unexpected content type
+    The AuroraX record was not found
     """
     pass
 
 
-class AuroraXMaxRetriesException(AuroraXException):
+class AuroraXDuplicateError(AuroraXError):
     """
-    The maximum number of retries for the request has been reached
+    A duplicate record already exists
     """
     pass
 
 
-class AuroraXBadParametersException(AuroraXException):
+class AuroraXUnauthorizedError(AuroraXError):
     """
-    Bad parameters were given in the request
+    A privileged operation was attempted without authorization
     """
     pass
 
 
-class AuroraXUnauthorizedException(AuroraXException):
+class AuroraXConflictError(AuroraXError):
     """
-    A privileged operation was attempted without authorization
+    A conflict occurred while modifying records
     """
     pass
 
 
-class AuroraXConflictException(AuroraXException):
+class AuroraXDataRetrievalError(AuroraXError):
     """
-    A conflict occurred while modifying records
+    Error occurred while retrieving search data
     """
     pass
 
 
-class AuroraXUploadException(AuroraXException):
+class AuroraXSearchError(AuroraXError):
     """
-    Error occurred during upload operation
+    An error occurred in the API while performing a search
     """
     pass
 
 
-class AuroraXUnexpectedEmptyResponse(AuroraXException):
+class AuroraXUploadError(AuroraXError):
     """
-    An empty response was received when it wasn't expected
+    Error occurred during upload operation
     """
     pass
 
 
-class AuroraXDataRetrievalException(AuroraXException):
+class AuroraXMaintenanceError(AuroraXError):
     """
-    Error occurred while retrieving search data
+    AuroraX API is in maintenance mode, read-only tasks are only possible
     """
     pass
 
 
-class AuroraXTimeoutException(AuroraXException):
+class AuroraXUnsupportedReadError(AuroraXError):
     """
-    A timeout was reached while communicating with the AuroraX API
+    Unsupported dataset for read function
+
+    NOTE: this is primarily a PyUCalgarySRS error
     """
     pass
 
 
-class AuroraXSearchException(AuroraXException):
+class AuroraXDownloadError(AuroraXError):
     """
-    An error occured in the API while performing a search
+    Error occurred during downloading of data
+
+    NOTE: this is primarily a PyUCalgarySRS error
     """
     pass
@@ -143,31 +165,31 @@

Module pyaurorax.exceptions

Classes

-
-class AuroraXBadParametersException +
+class AuroraXAPIError (*args, **kwargs)
-

Bad parameters were given in the request

+

Error occurred during an API call

Expand source code -
class AuroraXBadParametersException(AuroraXException):
+
class AuroraXAPIError(AuroraXError):
     """
-    Bad parameters were given in the request
+    Error occurred during an API call
     """
     pass

Ancestors

-
-class AuroraXConflictException +
+class AuroraXConflictError (*args, **kwargs)
@@ -176,7 +198,7 @@

Ancestors

Expand source code -
class AuroraXConflictException(AuroraXException):
+
class AuroraXConflictError(AuroraXError):
     """
     A conflict occurred while modifying records
     """
@@ -184,13 +206,13 @@ 

Ancestors

Ancestors

-
-class AuroraXDataRetrievalException +
+class AuroraXDataRetrievalError (*args, **kwargs)
@@ -199,7 +221,7 @@

Ancestors

Expand source code -
class AuroraXDataRetrievalException(AuroraXException):
+
class AuroraXDataRetrievalError(AuroraXError):
     """
     Error occurred while retrieving search data
     """
@@ -207,279 +229,280 @@ 

Ancestors

Ancestors

-
-class AuroraXDuplicateException +
+class AuroraXDownloadError (*args, **kwargs)
-

A duplicate record already exists

+

Error occurred during downloading of data

+

NOTE: this is primarily a PyUCalgarySRS error

Expand source code -
class AuroraXDuplicateException(AuroraXException):
+
class AuroraXDownloadError(AuroraXError):
     """
-    A duplicate record already exists
+    Error occurred during downloading of data
+
+    NOTE: this is primarily a PyUCalgarySRS error
     """
     pass

Ancestors

-
-class AuroraXException +
+class AuroraXDuplicateError (*args, **kwargs)
-

Common base class for all non-exit exceptions.

+

A duplicate record already exists

Expand source code -
class AuroraXException(Exception):
-
-    def __init__(self, *args, **kwargs):
-        response = kwargs.pop("response", None)
-        self.response = response
-        self.request = kwargs.pop("request", None)
-        if (response is not None and not self.request and hasattr(response, "request")):
-            self.request = self.response.request
-        super(AuroraXException, self).__init__(*args, **kwargs)
+
class AuroraXDuplicateError(AuroraXError):
+    """
+    A duplicate record already exists
+    """
+    pass

Ancestors

-

Subclasses

-
-
-class AuroraXMaxRetriesException +
+class AuroraXError (*args, **kwargs)
-

The maximum number of retries for the request has been reached

+

Common base class for all non-exit exceptions.

Expand source code -
class AuroraXMaxRetriesException(AuroraXException):
-    """
-    The maximum number of retries for the request has been reached
-    """
-    pass
+
class AuroraXError(Exception):
+
+    def __init__(self, *args, **kwargs):
+        super(AuroraXError, self).__init__(*args, **kwargs)  # pragma: no cover

Ancestors

+

Subclasses

+
-
-class AuroraXNotFoundException +
+class AuroraXInitializationError (*args, **kwargs)
-

The AuroraX record was not found

+

Error occurred during library initialization

Expand source code -
class AuroraXNotFoundException(AuroraXException):
+
class AuroraXInitializationError(AuroraXError):
     """
-    The AuroraX record was not found
+    Error occurred during library initialization
     """
     pass

Ancestors

-
-class AuroraXSearchException +
+class AuroraXMaintenanceError (*args, **kwargs)
-

An error occured in the API while performing a search

+

AuroraX API is in maintenance mode, read-only tasks are only possible

Expand source code -
class AuroraXSearchException(AuroraXException):
+
class AuroraXMaintenanceError(AuroraXError):
     """
-    An error occured in the API while performing a search
+    AuroraX API is in maintenance mode, read-only tasks are only possible
     """
     pass

Ancestors

-
-class AuroraXTimeoutException +
+class AuroraXNotFoundError (*args, **kwargs)
-

A timeout was reached while communicating with the AuroraX API

+

The AuroraX record was not found

Expand source code -
class AuroraXTimeoutException(AuroraXException):
+
class AuroraXNotFoundError(AuroraXError):
     """
-    A timeout was reached while communicating with the AuroraX API
+    The AuroraX record was not found
     """
     pass

Ancestors

-
-class AuroraXUnauthorizedException +
+class AuroraXPurgeError (*args, **kwargs)
-

A privileged operation was attempted without authorization

+

Error occurred during purging of download or tar extraction working directory

Expand source code -
class AuroraXUnauthorizedException(AuroraXException):
+
class AuroraXPurgeError(AuroraXError):
     """
-    A privileged operation was attempted without authorization
+    Error occurred during purging of download or tar extraction working directory
     """
     pass

Ancestors

-
-class AuroraXUnexpectedContentTypeException +
+class AuroraXSearchError (*args, **kwargs)
-

The API responded with an unexpected content type

+

An error occurred in the API while performing a search

Expand source code -
class AuroraXUnexpectedContentTypeException(AuroraXException):
+
class AuroraXSearchError(AuroraXError):
     """
-    The API responded with an unexpected content type
+    An error occurred in the API while performing a search
     """
     pass

Ancestors

-
-class AuroraXUnexpectedEmptyResponse +
+class AuroraXUnauthorizedError (*args, **kwargs)
-

An empty response was received when it wasn't expected

+

A privileged operation was attempted without authorization

Expand source code -
class AuroraXUnexpectedEmptyResponse(AuroraXException):
+
class AuroraXUnauthorizedError(AuroraXError):
     """
-    An empty response was received when it wasn't expected
+    A privileged operation was attempted without authorization
     """
     pass

Ancestors

-
-class AuroraXUploadException +
+class AuroraXUnsupportedReadError (*args, **kwargs)
-

Error occurred during upload operation

+

Unsupported dataset for read function

+

NOTE: this is primarily a PyUCalgarySRS error

Expand source code -
class AuroraXUploadException(AuroraXException):
+
class AuroraXUnsupportedReadError(AuroraXError):
     """
-    Error occurred during upload operation
+    Unsupported dataset for read function
+
+    NOTE: this is primarily a PyUCalgarySRS error
     """
     pass

Ancestors

-
-class AuroraXValidationException +
+class AuroraXUploadError (*args, **kwargs)
-

Validation of data failed

+

Error occurred during upload operation

Expand source code -
class AuroraXValidationException(AuroraXException):
+
class AuroraXUploadError(AuroraXError):
     """
-    Validation of data failed
+    Error occurred during upload operation
     """
     pass

Ancestors

@@ -547,46 +570,46 @@

Index

  • Classes

  • diff --git a/docs/code/pyaurorax_api_reference/pyaurorax/index.html b/docs/code/pyaurorax_api_reference/pyaurorax/index.html index 5dce78c..805f08d 100644 --- a/docs/code/pyaurorax_api_reference/pyaurorax/index.html +++ b/docs/code/pyaurorax_api_reference/pyaurorax/index.html @@ -5,9 +5,8 @@ pyaurorax API documentation - + @@ -24,182 +23,1043 @@

    Package pyaurorax

    -

    The PyAuroraX package provides a way to interact with the -AuroraX API. It is intended -to provide an intuitive process for those in the space physics and related -communities to programmatically query AuroraX's vast database for conjunctions, -ephemeris or data product records, data availability information, and more.

    -

    Check out this project on GitHub -and explore the evolving ecosystem of visualizations, tools, and data -at AuroraX.

    +

    The PyAuroraX package provides a way to interact with the AuroraX Data Platform, +facilitating programmatic usage of AuroraX's search engine and data analysis tools.

    For an overview of usage and examples, visit the -AuroraX Documentation website. -Details of functionality and options are available in the -API reference.

    +AuroraX Developer Zone website, or explore the examples contained +in the Github repository here.

    Installation:

    -
    $ python -m pip install pyaurorax
    +
    pip install pyaurorax
     

    Basic usage:

    -
    > import pyaurorax
    +
    import pyaurorax
    +aurorax = pyaurorax.PyAuroraX()
     
    Expand source code -
    """
    -The PyAuroraX package provides a way to interact with the
    -[AuroraX API](https://aurorax.space/data/apiLibraries). It is intended
    -to provide an intuitive process for those in the space physics and related
    -communities to programmatically query AuroraX's vast database for conjunctions,
    -ephemeris or data product records, data availability information, and more.
    -
    -Check out this project on [GitHub](https://github.com/aurorax-space/pyaurorax)
    -and explore the evolving ecosystem of visualizations, tools, and data
    -at [AuroraX](https://aurorax.space/).
    +
    # Copyright 2024 University of Calgary
    +#
    +# Licensed under the Apache License, Version 2.0 (the "License");
    +# you may not use this file except in compliance with the License.
    +# You may obtain a copy of the License at
    +#
    +#     http://www.apache.org/licenses/LICENSE-2.0
    +#
    +# Unless required by applicable law or agreed to in writing, software
    +# distributed under the License is distributed on an "AS IS" BASIS,
    +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +# See the License for the specific language governing permissions and
    +# limitations under the License.
    +"""
    +The PyAuroraX package provides a way to interact with the [AuroraX Data Platform](https://aurorax.space), 
    +facilitating programmatic usage of AuroraX's search engine and data analysis tools.
     
     For an overview of usage and examples, visit the
    -[AuroraX Documentation website](https://docs.aurorax.space/code/overview).
    -Details of functionality and options are available in the
    -[API reference](https://docs.aurorax.space/code/pyaurorax_api_reference/pyaurorax/).
    +[AuroraX Developer Zone website](https://docs.aurorax.space/code/overview), or explore the examples contained
    +in the Github repository [here](https://github.com/aurorax-space/pyaurorax/tree/main/examples).
     
     Installation:
     ```console
    -$ python -m pip install pyaurorax
    +pip install pyaurorax
     ```
     
     Basic usage:
     ```python
    -> import pyaurorax
    +import pyaurorax
    +aurorax = pyaurorax.PyAuroraX()
     ```
     """
     
     # versioning info
    -__version__ = "0.13.2"
    +__version__ = "1.0.0-rc1"
     
     # documentation excludes
    -__pdoc__ = {"cli": False}
    -
    -# pull in top level functions
    -from .api import (AuroraXRequest,
    -                  authenticate,
    -                  get_api_key)
    -from .sources import (FORMAT_BASIC_INFO,
    -                      FORMAT_BASIC_INFO_WITH_METADATA,
    -                      FORMAT_FULL_RECORD,
    -                      FORMAT_IDENTIFIER_ONLY,
    -                      FORMAT_DEFAULT,
    -                      SOURCE_TYPE_EVENT_LIST,
    -                      SOURCE_TYPE_GROUND,
    -                      SOURCE_TYPE_HEO,
    -                      SOURCE_TYPE_LEO,
    -                      SOURCE_TYPE_LUNAR)
    -from .conjunctions import (CONJUNCTION_TYPE_NBTRACE,
    -                           CONJUNCTION_TYPE_SBTRACE)
    -from .data_products import (DATA_PRODUCT_TYPE_KEOGRAM,
    -                            DATA_PRODUCT_TYPE_MONTAGE,
    -                            DATA_PRODUCT_TYPE_MOVIE,
    -                            DATA_PRODUCT_TYPE_SUMMARY_PLOT,
    -                            DATA_PRODUCT_TYPE_DATA_AVAILABILITY)
    -
    -# pull in exceptions at top level
    -from .exceptions import (AuroraXException,
    -                         AuroraXNotFoundException,
    -                         AuroraXMaxRetriesException,
    -                         AuroraXDuplicateException,
    -                         AuroraXUnexpectedContentTypeException,
    -                         AuroraXValidationException,
    -                         AuroraXBadParametersException,
    -                         AuroraXUnauthorizedException,
    -                         AuroraXConflictException,
    -                         AuroraXUploadException,
    -                         AuroraXUnexpectedEmptyResponse,
    -                         AuroraXDataRetrievalException,
    -                         AuroraXTimeoutException)
    -
    -# pull in models
    -from .location import Location
    -
    -# pull in modules (order matters otherwise we get circular import errors)
    -from pyaurorax import requests
    -from pyaurorax import api
    -from pyaurorax import sources
    -from pyaurorax import exceptions
    -from pyaurorax import metadata
    -from pyaurorax import util
    -from pyaurorax import availability
    -from pyaurorax import conjunctions
    -from pyaurorax import ephemeris
    -from pyaurorax import data_products
    +__pdoc__ = {"cli": False, "pyaurorax": False} +__all__ = ["PyAuroraX"] + +# pull in top level class +from .pyaurorax import PyAuroraX + +# pull in top-level submodules +# +# NOTE: we do this only so that we can access classes within the +# submodules, like `pyaurorax.search.EphemerisSearch`. Without this, +# they are selectively addressable, such as within ipython, but not +# vscode. Currently, this is ONLY included for VSCode's sake. Will +# take more testing to explore other use-cases. +from . import search +from . import data +from . import models + +# pull in exceptions +from .exceptions import ( + AuroraXError, + AuroraXInitializationError, + AuroraXPurgeError, + AuroraXAPIError, + AuroraXNotFoundError, + AuroraXDuplicateError, + AuroraXUnauthorizedError, + AuroraXConflictError, + AuroraXDataRetrievalError, + AuroraXSearchError, + AuroraXUploadError, + AuroraXMaintenanceError, + AuroraXUnsupportedReadError, + AuroraXDownloadError, +)

    Sub-modules

    -
    pyaurorax.api
    +
    pyaurorax.data
    -

    This module is the under-the-hood interface for RESTful API -requests. It provides helper functions that the PyAuroraX library -uses to make robust …

    +

    Instrument data downloading and reading module. This module presently has support +for data provided by the University of Calgary, such as THEMIS ASI, …

    -
    pyaurorax.availability
    +
    pyaurorax.exceptions
    -

    The availability module provides functions to quickly -determine what data exists on the AuroraX platform …

    +

    Unique exception classes utilized by PyAuroraX. These exceptions can be used to help trap specific +errors raised by this library …

    -
    pyaurorax.conjunctions
    +
    pyaurorax.models
    -

    The conjunction module is used for finding conjunctions between -groupings of data sources …

    +

    Interact with various auroral models, such as the TREx Auroral Transport Model (ATM).

    -
    pyaurorax.data_products
    +
    pyaurorax.search
    -

    The data_products module is used to search and upload data -product records within AuroraX. One example of a data product -is a keogram …

    +

    Interact with the AuroraX search engine. This includes finding data sources, searching +for conjunctions or ephemeris data, and uploading/managing …

    -
    pyaurorax.ephemeris
    +
    pyaurorax.tools
    -

    The ephemeris module is used to search and upload ephemeris records -within AuroraX …

    +

    Data analysis toolkit for working with all-sky imager data available within the +AuroraX platform …

    -
    pyaurorax.exceptions
    +
    +
    +
    +
    +
    +
    +
    +

    Classes

    +
    +
    +class PyAuroraX +(download_output_root_path: Optional[str] = None, read_tar_temp_path: Optional[str] = None, api_base_url: Optional[str] = None, api_timeout: Optional[int] = None, api_headers: Optional[Dict] = None, api_key: Optional[str] = None, srs_obj: Optional[pyucalgarysrs.pyucalgarysrs.PyUCalgarySRS] = None) +
    -

    The exceptions module contains exceptions unique to the PyAuroraX library

    +

    The PyAuroraX class is the primary entry point for utilizing +this library. It is used to initialize a session, capturing details +about API connectivity, environment, and more. All submodules are +encapsulated within this class, so any usage of the library starts +with creating this object.

    +
    import pyaurorax
    +aurorax = pyaurorax.PyAuroraX()
    +
    +

    When working with this object, you can set configuration parameters, such +as the destination directory for downloaded data, or API special settings +(e.g., timeout, HTTP headers, API key). These parameters can be set when +instantiating the object, or after instantiating using the self-contained +accessible variables.

    +

    Attributes

    +
    +
    download_output_root_path : str
    +
    Destination directory for downloaded data. The default for this path is a +subfolder in the user's home directory, such +as /home/user/pyaurorax_data +in Linux. In Windows and Mac, it is similar.
    +
    read_tar_temp_path : str
    +
    Temporary directory used for tar extraction phases during file reading (e.g., +reading TREx RGB Burst data). The default for this is <download_output_root_path>/.tar_temp_working. +For faster performance when reading tar-based data, one option on Linux is +to set this to use RAM directly at /dev/shm/pyaurorax_tar_temp_working.
    +
    api_base_url : str
    +
    URL prefix to use when interacting with the AuroraX API. By default this is set to +https://api.phys.ucalgary.ca. This parameter is primarily used by the development +team to test and build new functions using the private staging API.
    +
    api_timeout : int
    +
    The timeout used when communicating with the Aurorax API. This value is represented in +seconds, and by default is 10 seconds.
    +
    api_headers : Dict
    +
    HTTP headers used when communicating with the AuroraX API. The default for this value +consists of several standard headers. Any changes to this parameter are in addition to +the default standard headers.
    +
    api_key : str
    +
    API key to use when interacting with the AuroraX API. The default value is None. Please note +that an API key is only required for write operations to the AuroraX search API, such as +creating data sources or uploading ephemeris data.
    +
    srs_obj : pyucalgarysrs.PyUCalgarySRS
    +
    A PyUCalgarySRS object. +If not supplied, it will create the object with some settings carried over from the PyAuroraX +object. Note that specifying this is for advanced users and only necessary a few special use-cases.
    +
    +

    Raises

    +
    +
    AuroraXInitializationError
    +
    an error was encountered during initialization +of the paths
    +
    +
    + +Expand source code + +
    class PyAuroraX:
    +    """
    +    The `PyAuroraX` class is the primary entry point for utilizing
    +    this library. It is used to initialize a session, capturing details
    +    about API connectivity, environment, and more. All submodules are 
    +    encapsulated within this class, so any usage of the library starts 
    +    with creating this object.
    +
    +    ```python
    +    import pyaurorax
    +    aurorax = pyaurorax.PyAuroraX()
    +    ```
    +
    +    When working with this object, you can set configuration parameters, such 
    +    as the destination directory for downloaded data, or API special settings 
    +    (e.g., timeout, HTTP headers, API key). These parameters can be set when 
    +    instantiating the object, or after instantiating using the self-contained 
    +    accessible variables.
    +    """
    +
    +    __DEFAULT_API_BASE_URL = "https://api.aurorax.space"
    +    __DEFAULT_API_TIMEOUT = 10
    +    __DEFAULT_API_HEADERS = {
    +        "content-type": "application/json",
    +        "user-agent": "python-pyaurorax/%s" % (__version__),
    +    }  # NOTE: these MUST be lowercase so that the decorator logic cannot be overridden
    +
    +    def __init__(self,
    +                 download_output_root_path: Optional[str] = None,
    +                 read_tar_temp_path: Optional[str] = None,
    +                 api_base_url: Optional[str] = None,
    +                 api_timeout: Optional[int] = None,
    +                 api_headers: Optional[Dict] = None,
    +                 api_key: Optional[str] = None,
    +                 srs_obj: Optional[pyucalgarysrs.PyUCalgarySRS] = None):
    +        """
    +        Attributes:
    +            download_output_root_path (str): 
    +                Destination directory for downloaded data. The default for this path is a 
    +                subfolder in the user's home directory, such  as `/home/user/pyaurorax_data` 
    +                in Linux. In Windows and Mac, it is similar.
    +
    +            read_tar_temp_path (str): 
    +                Temporary directory used for tar extraction phases during file reading (e.g., 
    +                reading TREx RGB Burst data). The default for this is `<download_output_root_path>/.tar_temp_working`. 
    +                For faster performance when reading tar-based data, one option on Linux is 
    +                to set this to use RAM directly at `/dev/shm/pyaurorax_tar_temp_working`.
    +
    +            api_base_url (str): 
    +                URL prefix to use when interacting with the AuroraX API. By default this is set to 
    +                `https://api.phys.ucalgary.ca`. This parameter is primarily used by the development 
    +                team to test and build new functions using the private staging API.
    +
    +            api_timeout (int): 
    +                The timeout used when communicating with the Aurorax API. This value is represented in 
    +                seconds, and by default is `10 seconds`.
    +            
    +            api_headers (Dict): 
    +                HTTP headers used when communicating with the AuroraX API. The default for this value 
    +                consists of several standard headers. Any changes to this parameter are in addition to 
    +                the default standard headers.
    +
    +            api_key (str): 
    +                API key to use when interacting with the AuroraX API. The default value is None. Please note
    +                that an API key is only required for write operations to the AuroraX search API, such as
    +                creating data sources or uploading ephemeris data.
    +        
    +            srs_obj (pyucalgarysrs.PyUCalgarySRS): 
    +                A [PyUCalgarySRS](https://docs-pyucalgarysrs.phys.ucalgary.ca/#pyucalgarysrs.PyUCalgarySRS) object. 
    +                If not supplied, it will create the object with some settings carried over from the PyAuroraX 
    +                object. Note that specifying this is for advanced users and only necessary a few special use-cases.
    +
    +        Raises:
    +            pyaurorax.exceptions.AuroraXInitializationError: an error was encountered during initialization 
    +                of the paths
    +        """
    +        # initialize path parameters
    +        self.__download_output_root_path = download_output_root_path
    +        self.__read_tar_temp_path = read_tar_temp_path
    +
    +        # initialize api parameters
    +        self.__api_base_url = api_base_url
    +        if (api_base_url is None):
    +            self.__api_base_url = self.__DEFAULT_API_BASE_URL
    +        self.__api_headers = api_headers
    +        if (api_headers is None):
    +            self.__api_headers = self.__DEFAULT_API_HEADERS
    +        self.__api_timeout = api_timeout
    +        if (api_timeout is None):
    +            self.__api_timeout = self.__DEFAULT_API_TIMEOUT
    +        self.__api_key = api_key
    +
    +        # initialize paths
    +        self.__initialize_paths()
    +
    +        # initialize PyUCalgarySRS object
    +        if (srs_obj is None):
    +            self.__srs_obj = pyucalgarysrs.PyUCalgarySRS(
    +                api_headers=self.__api_headers,
    +                api_timeout=self.__api_timeout,
    +                download_output_root_path=self.download_output_root_path,
    +                read_tar_temp_path=self.read_tar_temp_path,
    +            )
    +        else:
    +            self.__srs_obj = srs_obj
    +
    +        # initialize sub-modules
    +        self.__search = SearchManager(self)
    +        self.__data = DataManager(self)
    +        self.__models = ModelsManager(self)
    +        self.__tools = tools_module
    +
    +    # ------------------------------------------
    +    # properties for submodule managers
    +    # ------------------------------------------
    +    @property
    +    def search(self):
    +        """
    +        Access to the `search` submodule from within a PyAuroraX object.
    +        """
    +        return self.__search
    +
    +    @property
    +    def data(self):
    +        """
    +        Access to the `data` submodule from within a PyAuroraX object.
    +        """
    +        return self.__data
    +
    +    @property
    +    def models(self):
    +        """
    +        Access to the `models` submodule from within a PyAuroraX object.
    +        """
    +        return self.__models
    +
    +    @property
    +    def tools(self):
    +        """
    +        Access to the `tools` submodule from within a PyAuroraX object.
    +        """
    +        return self.__tools
    +
    +    # ------------------------------------------
    +    # properties for configuration parameters
    +    # ------------------------------------------
    +    @property
    +    def api_base_url(self):
    +        """
    +        Property for the API base URL. See above for details.
    +        """
    +        return self.__api_base_url
    +
    +    @api_base_url.setter
    +    def api_base_url(self, value: str):
    +        if (value is None):
    +            self.__api_base_url = self.__DEFAULT_API_BASE_URL
    +        else:
    +            self.__api_base_url = value
    +
    +    @property
    +    def api_headers(self):
    +        """
    +        Property for the API headers. See above for details.
    +        """
    +        return self.__api_headers
    +
    +    @api_headers.setter
    +    def api_headers(self, value: Dict):
    +        new_headers = self.__DEFAULT_API_HEADERS
    +        if (value is not None):
    +            for k, v in value.items():
    +                k = k.lower()
    +                if (k in new_headers):
    +                    warnings.warn("Cannot override default '%s' header" % (k), UserWarning, stacklevel=1)
    +                else:
    +                    new_headers[k] = v
    +        self.__api_headers = new_headers
    +        if ("user-agent" in new_headers):
    +            self.__srs_obj.api_headers = {"user-agent": new_headers["user-agent"]}
    +
    +    @property
    +    def api_timeout(self):
    +        """
    +        Property for the API timeout. See above for details.
    +        """
    +        return self.__api_timeout
    +
    +    @api_timeout.setter
    +    def api_timeout(self, value: int):
    +        new_timeout = self.__DEFAULT_API_TIMEOUT
    +        if (value is not None):
    +            new_timeout = value
    +        self.__api_timeout = new_timeout
    +        self.__srs_obj.api_timeout = new_timeout
    +
    +    @property
    +    def api_key(self):
    +        """
    +        Property for the API key. See above for details.
    +        """
    +        return self.__api_key
    +
    +    @api_key.setter
    +    def api_key(self, value: str):
    +        self.__api_key = value
    +
    +    @property
    +    def download_output_root_path(self):
    +        """
    +        Property for the download output root path. See above for details.
    +        """
    +        return str(self.__download_output_root_path)
    +
    +    @download_output_root_path.setter
    +    def download_output_root_path(self, value: str):
    +        self.__download_output_root_path = value
    +        self.__initialize_paths()
    +        self.__srs_obj.download_output_root_path = self.__download_output_root_path
    +
    +    @property
    +    def read_tar_temp_path(self):
    +        """
    +        Property for the read tar temp path. See above for details.
    +        """
    +        return str(self.__read_tar_temp_path)
    +
    +    @read_tar_temp_path.setter
    +    def read_tar_temp_path(self, value: str):
    +        self.__read_tar_temp_path = value
    +        self.__initialize_paths()
    +        self.__srs_obj.read_tar_temp_path = self.__read_tar_temp_path
    +
    +    @property
    +    def srs_obj(self):
    +        """
    +        Property for the PyUCalgarySRS object. See above for details.
    +        """
    +        return self.__srs_obj
    +
    +    @srs_obj.setter
    +    def srs_obj(self, new_obj: pyucalgarysrs.PyUCalgarySRS):
    +        self.__srs_obj = new_obj
    +
    +    # -----------------------------
    +    # special methods
    +    # -----------------------------
    +    def __str__(self) -> str:
    +        return self.__repr__()
    +
    +    def __repr__(self) -> str:
    +        return ("PyAuroraX(download_output_root_path='%s', read_tar_temp_path='%s', api_base_url='%s', " +
    +                "api_headers=%s, api_timeout=%s, api_key='%s', srs_obj=PyUCalgarySRS(...))") % (
    +                    self.__download_output_root_path,
    +                    self.__read_tar_temp_path,
    +                    self.api_base_url,
    +                    self.api_headers,
    +                    self.api_timeout,
    +                    self.api_key,
    +                )
    +
    +    # -----------------------------
    +    # private methods
    +    # -----------------------------
    +    def __initialize_paths(self):
    +        """
    +        Initialize the `download_output_root_path` and `read_tar_temp_path` directories.
    +
    +        Raises:
    +            pyaurorax.exceptions.AuroraXInitializationError: an error was encountered during
    +                initialization of the paths
    +        """
    +        if (self.__download_output_root_path is None):
    +            self.__download_output_root_path = Path("%s/pyaurorax_data" % (str(Path.home())))
    +        if (self.__read_tar_temp_path is None):
    +            self.__read_tar_temp_path = Path("%s/tar_temp_working" % (self.__download_output_root_path))
    +        try:
    +            os.makedirs(self.download_output_root_path, exist_ok=True)
    +            os.makedirs(self.read_tar_temp_path, exist_ok=True)
    +        except IOError as e:  # pragma: nocover
    +            raise AuroraXInitializationError("Error during output path creation: %s" % str(e)) from e
    +
    +    # -----------------------------
    +    # public methods
    +    # -----------------------------
    +    def purge_download_output_root_path(self):
    +        """
    +        Delete all files in the `download_output_root_path` directory. Since the
    +        library downloads data to this directory, over time it can grow too large
    +        and the user can risk running out of space. This method is here to assist
    +        with easily clearing out this directory.
    +
    +        Note that it also deletes all files in the PyUCalgarySRS object's 
    +        download_output_root_path path as well. Normally, these two paths are the 
    +        same, but it can be different if the user specifically changes it. 
    +
    +        Raises:
    +            pyaurorax.exceptions.AuroraXPurgeError: an error was encountered during the purge operation
    +        """
    +        try:
    +            # purge pyaurorax path
    +            for item in os.listdir(self.download_output_root_path):
    +                item = Path(self.download_output_root_path) / item
    +                if (os.path.isdir(item) is True and self.read_tar_temp_path not in str(item)):
    +                    shutil.rmtree(item)
    +                elif (os.path.isfile(item) is True):
    +                    os.remove(item)
    +
    +            # purge pyucalgarysrs path
    +            self.__srs_obj.purge_download_output_root_path()
    +        except Exception as e:  # pragma: nocover
    +            raise AuroraXPurgeError("Error while purging download output root path: %s" % (str(e))) from e
    +
    +    def purge_read_tar_temp_path(self):
    +        """
    +        Delete all files in the `read_tar_temp_path` directory. Since the library 
    +        extracts temporary data to this directory, sometime issues during reading 
    +        can cause this directory to contain residual files that aren't deleted during 
    +        the normal read routine. Though this is very rare, it is still possible. 
    +        Therefore, this method is here to assist with easily clearing out this 
    +        directory.
    +
    +        Note that it also deletes all files in the PyUCalgarySRS object's 
    +        read_tar_temp_path path as well. Normally, these two paths are the 
    +        same, but it can be different if the user specifically changes it. 
    +
    +        Raises:
    +            pyaurorax.exceptions.AuroraXPurgeError: an error was encountered during the purge operation
    +        """
    +        try:
    +            # purge pyaurorax path
    +            for item in os.listdir(self.read_tar_temp_path):
    +                item = Path(self.read_tar_temp_path) / item
    +                if (os.path.isdir(item) is True and self.download_output_root_path not in str(item)):
    +                    shutil.rmtree(item)
    +                elif (os.path.isfile(item) is True):
    +                    os.remove(item)
    +
    +            # purge pyucalgarysrs path
    +            self.__srs_obj.purge_read_tar_temp_path()
    +        except Exception as e:  # pragma: nocover
    +            raise AuroraXPurgeError("Error while purging read tar temp path: %s" % (str(e))) from e
    +
    +    def show_data_usage(self, order: Literal["name", "size"] = "size", return_dict: bool = False) -> Any:
    +        """
    +        Print the volume of data existing in the download_output_root_path, broken down
    +        by dataset. Alternatively return the information in a dictionary.
    +        
    +        This can be a helpful tool for managing your disk space.
    +
    +        Args:
    +            order (bool): 
    +                Order results by either `size` or `name`. Default is `size`.
    +
    +            return_dict (bool): 
    +                Instead of printing the data usage information, return the information as a dictionary.
    +
    +        Returns:
    +            Printed output. If `return_dict` is True, then it will instead return a dictionary with the
    +            disk usage information.
    +        
    +        Notes:
    +            Note that size on disk may differ slightly from the values determined by this 
    +            routine. For example, the results here will be slightly different than the output
    +            of a 'du' command on *nix systems.
    +        """
    +        # init
    +        total_size = 0
    +        download_pathlib_path = Path(self.download_output_root_path)
    +
    +        # get list of dataset paths
    +        dataset_paths = []
    +        for f in os.listdir(download_pathlib_path):
    +            path_f = download_pathlib_path / f
    +            if (os.path.isdir(path_f) is True and str(path_f) != self.read_tar_temp_path):
    +                dataset_paths.append(path_f)
    +
    +        # get size of each dataset path
    +        dataset_dict = {}
    +        longest_path_len = 0
    +        for dataset_path in dataset_paths:
    +            # get size
    +            dataset_size = 0
    +            for dirpath, _, filenames in os.walk(dataset_path):
    +                for filename in filenames:
    +                    filepath = os.path.join(dirpath, filename)
    +                    if (os.path.isfile(filepath) is True):
    +                        dataset_size += os.path.getsize(filepath)
    +
    +            # check if this is the longest path name
    +            path_basename = os.path.basename(dataset_path)
    +            if (longest_path_len == 0):
    +                longest_path_len = len(path_basename)
    +            elif (len(path_basename) > longest_path_len):
    +                longest_path_len = len(path_basename)
    +
    +            # set dict
    +            dataset_dict[path_basename] = {
    +                "path_obj": dataset_path,
    +                "size_bytes": dataset_size,
    +                "size_str": humanize.naturalsize(dataset_size),
    +            }
    +
    +            # add to total
    +            total_size += dataset_size
    +
    +        # return dictionary
    +        if (return_dict is True):
    +            return dataset_dict
    +
    +        # print table
    +        #
    +        # order into list
    +        order_key = "size_bytes" if order == "size" else order
    +        ordered_list = []
    +        for path, p_dict in dataset_dict.items():
    +            this_dict = p_dict
    +            this_dict["name"] = path
    +            ordered_list.append(this_dict)
    +        if (order == "size"):
    +            ordered_list = reversed(sorted(ordered_list, key=lambda x: x[order_key]))
    +        else:
    +            ordered_list = sorted(ordered_list, key=lambda x: x[order_key])
    +
    +        # set column data
    +        table_names = []
    +        table_sizes = []
    +        for item in ordered_list:
    +            table_names.append(item["name"])
    +            table_sizes.append(item["size_str"])
    +
    +        # set header values
    +        table_headers = ["Dataset name", "Size"]
    +
    +        # print as table
    +        table = Texttable()
    +        table.set_deco(Texttable.HEADER)
    +        table.set_cols_dtype(["t"] * len(table_headers))
    +        table.set_header_align(["l"] * len(table_headers))
    +        table.set_cols_align(["l"] * len(table_headers))
    +        table.header(table_headers)
    +        for i in range(0, len(table_names)):
    +            table.add_row([table_names[i], table_sizes[i]])
    +        print(table.draw())
    +
    +        print("\nTotal size: %s" % (humanize.naturalsize(total_size)))
    +
    +

    Instance variables

    +
    +
    var api_base_url
    +
    +

    Property for the API base URL. See above for details.

    +
    + +Expand source code + +
    @property
    +def api_base_url(self):
    +    """
    +    Property for the API base URL. See above for details.
    +    """
    +    return self.__api_base_url
    +
    -
    pyaurorax.location
    +
    var api_headers
    -

    The Location module provides a class used throughout the PyAuroraX -library to manage lat/lon positions of different things.

    +

    Property for the API headers. See above for details.

    +
    + +Expand source code + +
    @property
    +def api_headers(self):
    +    """
    +    Property for the API headers. See above for details.
    +    """
    +    return self.__api_headers
    +
    -
    pyaurorax.metadata
    +
    var api_key
    -

    AuroraX metadata schemas describe the intended structure of metadata stored in -ephemeris and data product records. This module provides functions for -…

    +

    Property for the API key. See above for details.

    +
    + +Expand source code + +
    @property
    +def api_key(self):
    +    """
    +    Property for the API key. See above for details.
    +    """
    +    return self.__api_key
    +
    -
    pyaurorax.requests
    +
    var api_timeout
    -

    The requests module contains helper methods for retrieving data from -an AuroraX request …

    +

    Property for the API timeout. See above for details.

    +
    + +Expand source code + +
    @property
    +def api_timeout(self):
    +    """
    +    Property for the API timeout. See above for details.
    +    """
    +    return self.__api_timeout
    +
    -
    pyaurorax.sources
    +
    var data
    -

    AuroraX data sources are unique instruments that produce ephemeris or -data product records …

    +

    Access to the pyaurorax.data submodule from within a PyAuroraX object.

    +
    + +Expand source code + +
    @property
    +def data(self):
    +    """
    +    Access to the `data` submodule from within a PyAuroraX object.
    +    """
    +    return self.__data
    +
    +
    +
    var download_output_root_path
    +
    +

    Property for the download output root path. See above for details.

    +
    + +Expand source code + +
    @property
    +def download_output_root_path(self):
    +    """
    +    Property for the download output root path. See above for details.
    +    """
    +    return str(self.__download_output_root_path)
    +
    +
    +
    var models
    +
    +

    Access to the pyaurorax.models submodule from within a PyAuroraX object.

    +
    + +Expand source code + +
    @property
    +def models(self):
    +    """
    +    Access to the `models` submodule from within a PyAuroraX object.
    +    """
    +    return self.__models
    +
    +
    +
    var read_tar_temp_path
    +
    +

    Property for the read tar temp path. See above for details.

    +
    + +Expand source code + +
    @property
    +def read_tar_temp_path(self):
    +    """
    +    Property for the read tar temp path. See above for details.
    +    """
    +    return str(self.__read_tar_temp_path)
    +
    +
    +
    var search
    +
    +

    Access to the pyaurorax.search submodule from within a PyAuroraX object.

    +
    + +Expand source code + +
    @property
    +def search(self):
    +    """
    +    Access to the `search` submodule from within a PyAuroraX object.
    +    """
    +    return self.__search
    +
    +
    +
    var srs_obj
    +
    +

    Property for the PyUCalgarySRS object. See above for details.

    +
    + +Expand source code + +
    @property
    +def srs_obj(self):
    +    """
    +    Property for the PyUCalgarySRS object. See above for details.
    +    """
    +    return self.__srs_obj
    +
    -
    pyaurorax.util
    +
    var tools
    -

    The util module provides helper methods such as converting -arbitrary geographic locations to North/South B-trace geographic -locations …

    +

    Access to the pyaurorax.tools submodule from within a PyAuroraX object.

    +
    + +Expand source code + +
    @property
    +def tools(self):
    +    """
    +    Access to the `tools` submodule from within a PyAuroraX object.
    +    """
    +    return self.__tools
    +
    +
    +
    +

    Methods

    +
    +
    +def purge_download_output_root_path(self) +
    +
    +

    Delete all files in the download_output_root_path directory. Since the +library downloads data to this directory, over time it can grow too large +and the user can risk running out of space. This method is here to assist +with easily clearing out this directory.

    +

    Note that it also deletes all files in the PyUCalgarySRS object's +download_output_root_path path as well. Normally, these two paths are the +same, but it can be different if the user specifically changes it.

    +

    Raises

    +
    +
    AuroraXPurgeError
    +
    an error was encountered during the purge operation
    +
    +
    + +Expand source code + +
    def purge_download_output_root_path(self):
    +    """
    +    Delete all files in the `download_output_root_path` directory. Since the
    +    library downloads data to this directory, over time it can grow too large
    +    and the user can risk running out of space. This method is here to assist
    +    with easily clearing out this directory.
    +
    +    Note that it also deletes all files in the PyUCalgarySRS object's 
    +    download_output_root_path path as well. Normally, these two paths are the 
    +    same, but it can be different if the user specifically changes it. 
    +
    +    Raises:
    +        pyaurorax.exceptions.AuroraXPurgeError: an error was encountered during the purge operation
    +    """
    +    try:
    +        # purge pyaurorax path
    +        for item in os.listdir(self.download_output_root_path):
    +            item = Path(self.download_output_root_path) / item
    +            if (os.path.isdir(item) is True and self.read_tar_temp_path not in str(item)):
    +                shutil.rmtree(item)
    +            elif (os.path.isfile(item) is True):
    +                os.remove(item)
    +
    +        # purge pyucalgarysrs path
    +        self.__srs_obj.purge_download_output_root_path()
    +    except Exception as e:  # pragma: nocover
    +        raise AuroraXPurgeError("Error while purging download output root path: %s" % (str(e))) from e
    +
    +
    +
    +def purge_read_tar_temp_path(self) +
    +
    +

    Delete all files in the read_tar_temp_path directory. Since the library +extracts temporary data to this directory, sometime issues during reading +can cause this directory to contain residual files that aren't deleted during +the normal read routine. Though this is very rare, it is still possible. +Therefore, this method is here to assist with easily clearing out this +directory.

    +

    Note that it also deletes all files in the PyUCalgarySRS object's +read_tar_temp_path path as well. Normally, these two paths are the +same, but it can be different if the user specifically changes it.

    +

    Raises

    +
    +
    AuroraXPurgeError
    +
    an error was encountered during the purge operation
    +
    +
    + +Expand source code + +
    def purge_read_tar_temp_path(self):
    +    """
    +    Delete all files in the `read_tar_temp_path` directory. Since the library 
    +    extracts temporary data to this directory, sometime issues during reading 
    +    can cause this directory to contain residual files that aren't deleted during 
    +    the normal read routine. Though this is very rare, it is still possible. 
    +    Therefore, this method is here to assist with easily clearing out this 
    +    directory.
    +
    +    Note that it also deletes all files in the PyUCalgarySRS object's 
    +    read_tar_temp_path path as well. Normally, these two paths are the 
    +    same, but it can be different if the user specifically changes it. 
    +
    +    Raises:
    +        pyaurorax.exceptions.AuroraXPurgeError: an error was encountered during the purge operation
    +    """
    +    try:
    +        # purge pyaurorax path
    +        for item in os.listdir(self.read_tar_temp_path):
    +            item = Path(self.read_tar_temp_path) / item
    +            if (os.path.isdir(item) is True and self.download_output_root_path not in str(item)):
    +                shutil.rmtree(item)
    +            elif (os.path.isfile(item) is True):
    +                os.remove(item)
    +
    +        # purge pyucalgarysrs path
    +        self.__srs_obj.purge_read_tar_temp_path()
    +    except Exception as e:  # pragma: nocover
    +        raise AuroraXPurgeError("Error while purging read tar temp path: %s" % (str(e))) from e
    +
    +
    +
    +def show_data_usage(self, order: Literal['name', 'size'] = 'size', return_dict: bool = False) ‑> Any +
    +
    +

    Print the volume of data existing in the download_output_root_path, broken down +by dataset. Alternatively return the information in a dictionary.

    +

    This can be a helpful tool for managing your disk space.

    +

    Args

    +
    +
    order : bool
    +
    Order results by either size or name. Default is size.
    +
    return_dict : bool
    +
    Instead of printing the data usage information, return the information as a dictionary.
    +
    +

    Returns

    +

    Printed output. If return_dict is True, then it will instead return a dictionary with the +disk usage information.

    +

    Notes

    +

    Note that size on disk may differ slightly from the values determined by this +routine. For example, the results here will be slightly different than the output +of a 'du' command on *nix systems.

    +
    + +Expand source code + +
    def show_data_usage(self, order: Literal["name", "size"] = "size", return_dict: bool = False) -> Any:
    +    """
    +    Print the volume of data existing in the download_output_root_path, broken down
    +    by dataset. Alternatively return the information in a dictionary.
    +    
    +    This can be a helpful tool for managing your disk space.
    +
    +    Args:
    +        order (bool): 
    +            Order results by either `size` or `name`. Default is `size`.
    +
    +        return_dict (bool): 
    +            Instead of printing the data usage information, return the information as a dictionary.
    +
    +    Returns:
    +        Printed output. If `return_dict` is True, then it will instead return a dictionary with the
    +        disk usage information.
    +    
    +    Notes:
    +        Note that size on disk may differ slightly from the values determined by this 
    +        routine. For example, the results here will be slightly different than the output
    +        of a 'du' command on *nix systems.
    +    """
    +    # init
    +    total_size = 0
    +    download_pathlib_path = Path(self.download_output_root_path)
    +
    +    # get list of dataset paths
    +    dataset_paths = []
    +    for f in os.listdir(download_pathlib_path):
    +        path_f = download_pathlib_path / f
    +        if (os.path.isdir(path_f) is True and str(path_f) != self.read_tar_temp_path):
    +            dataset_paths.append(path_f)
    +
    +    # get size of each dataset path
    +    dataset_dict = {}
    +    longest_path_len = 0
    +    for dataset_path in dataset_paths:
    +        # get size
    +        dataset_size = 0
    +        for dirpath, _, filenames in os.walk(dataset_path):
    +            for filename in filenames:
    +                filepath = os.path.join(dirpath, filename)
    +                if (os.path.isfile(filepath) is True):
    +                    dataset_size += os.path.getsize(filepath)
    +
    +        # check if this is the longest path name
    +        path_basename = os.path.basename(dataset_path)
    +        if (longest_path_len == 0):
    +            longest_path_len = len(path_basename)
    +        elif (len(path_basename) > longest_path_len):
    +            longest_path_len = len(path_basename)
    +
    +        # set dict
    +        dataset_dict[path_basename] = {
    +            "path_obj": dataset_path,
    +            "size_bytes": dataset_size,
    +            "size_str": humanize.naturalsize(dataset_size),
    +        }
    +
    +        # add to total
    +        total_size += dataset_size
    +
    +    # return dictionary
    +    if (return_dict is True):
    +        return dataset_dict
    +
    +    # print table
    +    #
    +    # order into list
    +    order_key = "size_bytes" if order == "size" else order
    +    ordered_list = []
    +    for path, p_dict in dataset_dict.items():
    +        this_dict = p_dict
    +        this_dict["name"] = path
    +        ordered_list.append(this_dict)
    +    if (order == "size"):
    +        ordered_list = reversed(sorted(ordered_list, key=lambda x: x[order_key]))
    +    else:
    +        ordered_list = sorted(ordered_list, key=lambda x: x[order_key])
    +
    +    # set column data
    +    table_names = []
    +    table_sizes = []
    +    for item in ordered_list:
    +        table_names.append(item["name"])
    +        table_sizes.append(item["size_str"])
    +
    +    # set header values
    +    table_headers = ["Dataset name", "Size"]
    +
    +    # print as table
    +    table = Texttable()
    +    table.set_deco(Texttable.HEADER)
    +    table.set_cols_dtype(["t"] * len(table_headers))
    +    table.set_header_align(["l"] * len(table_headers))
    +    table.set_cols_align(["l"] * len(table_headers))
    +    table.header(table_headers)
    +    for i in range(0, len(table_names)):
    +        table.add_row([table_names[i], table_sizes[i]])
    +    print(table.draw())
    +
    +    print("\nTotal size: %s" % (humanize.naturalsize(total_size)))
    +
    +
    +
    -
    -
    -
    -
    -
    -
    @@ -193,7 +157,7 @@

    Class variables

    if (query) search(query); function search(query) { -const url = '../../../doc-search.html#' + encodeURIComponent(query); +const url = '../../../../doc-search.html#' + encodeURIComponent(query); new tingle.modal({ cssClass: ['modal-dialog'], onClose: () => { @@ -212,18 +176,13 @@

    Index

    • Super-module

    • Classes

    • diff --git a/docs/code/pyaurorax_api_reference/pyaurorax/search/api/index.html b/docs/code/pyaurorax_api_reference/pyaurorax/search/api/index.html new file mode 100644 index 0000000..0384a0b --- /dev/null +++ b/docs/code/pyaurorax_api_reference/pyaurorax/search/api/index.html @@ -0,0 +1,499 @@ + + + + + + +pyaurorax.search.api API documentation + + + + + + + + + + + +
      +
      +
      +

      Module pyaurorax.search.api

      +
      +
      +

      Interface for AuroraX API requests. Primarily an under-the-hood module +not needed for most use-cases.

      +
      + +Expand source code + +
      # Copyright 2024 University of Calgary
      +#
      +# Licensed under the Apache License, Version 2.0 (the "License");
      +# you may not use this file except in compliance with the License.
      +# You may obtain a copy of the License at
      +#
      +#     http://www.apache.org/licenses/LICENSE-2.0
      +#
      +# Unless required by applicable law or agreed to in writing, software
      +# distributed under the License is distributed on an "AS IS" BASIS,
      +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +# See the License for the specific language governing permissions and
      +# limitations under the License.
      +"""
      +Interface for AuroraX API requests. Primarily an under-the-hood module 
      +not needed for most use-cases.
      +"""
      +
      +from .classes.request import AuroraXAPIRequest
      +from .classes.response import AuroraXAPIResponse
      +
      +URL_SUFFIX_DATA_SOURCES = "api/v1/data_sources"
      +URL_SUFFIX_DATA_SOURCES_SEARCH = "/api/v1/data_sources/search"
      +URL_SUFFIX_EPHEMERIS_AVAILABILITY = "api/v1/availability/ephemeris"
      +URL_SUFFIX_EPHEMERIS_UPLOAD = "api/v1/data_sources/{}/ephemeris"
      +URL_SUFFIX_EPHEMERIS_SEARCH = "api/v1/ephemeris/search"
      +URL_SUFFIX_EPHEMERIS_REQUEST = "api/v1/ephemeris/requests/{}"
      +URL_SUFFIX_DATA_PRODUCTS_AVAILABILITY = "api/v1/availability/data_products"
      +URL_SUFFIX_DATA_PRODUCTS_UPLOAD = "api/v1/data_sources/{}/data_products"
      +URL_SUFFIX_DATA_PRODUCTS_SEARCH = "api/v1/data_products/search"
      +URL_SUFFIX_DATA_PRODUCTS_REQUEST = "api/v1/data_products/requests/{}"
      +URL_SUFFIX_CONJUNCTION_SEARCH = "api/v1/conjunctions/search"
      +URL_SUFFIX_CONJUNCTION_REQUEST = "api/v1/conjunctions/requests/{}"
      +URL_SUFFIX_DESCRIBE_CONJUNCTION_QUERY = "api/v1/utils/describe/query/conjunction"
      +URL_SUFFIX_DESCRIBE_DATA_PRODUCTS_QUERY = "api/v1/utils/describe/query/data_products"
      +URL_SUFFIX_DESCRIBE_EPHEMERIS_QUERY = "api/v1/utils/describe/query/ephemeris"
      +URL_SUFFIX_LIST_REQUESTS = "api/v1/utils/admin/search_requests"
      +URL_SUFFIX_DELETE_REQUESTS = "api/v1/utils/admin/search_requests/{}"
      +
      +__all__ = [
      +    "AuroraXAPIRequest",
      +    "AuroraXAPIResponse",
      +]
      +
      +
      +
      +

      Sub-modules

      +
      +
      pyaurorax.search.api.classes
      +
      +

      Class definitions used by the api submodule

      +
      +
      +
      +
      +
      +
      +
      +
      +

      Classes

      +
      +
      +class AuroraXAPIRequest +(aurorax_obj, url: str, method: Literal['get', 'post', 'put', 'delete', 'patch'], params: Dict = {}, body: Union[List, Dict] = {}, headers: Dict = {}, null_response: bool = False) +
      +
      +

      Class definition for an AuroraX API request

      +

      Attributes

      +
      +
      url : str
      +
      API endpoint URL for the request
      +
      method : str
      +
      the HTTP method to use. Valid values are: get, post, put, delete, patch
      +
      params : Dict
      +
      URL parameters to send in the request, defaults to {}
      +
      body : Dict
      +
      the body of the request (ie. post data), defaults to {}
      +
      headers : Dict
      +
      any headers to send as part of the request (in addition to the default ones), defaults to {}
      +
      null_response : bool
      +
      signifies if we expect a response from the API that has no body/data in it (ie. +requests to upload data that respond with just a 202 status code), defaults to False
      +
      +
      + +Expand source code + +
      class AuroraXAPIRequest:
      +    """
      +    Class definition for an AuroraX API request
      +
      +    Attributes:
      +        url (str): API endpoint URL for the request
      +        method (str): the HTTP method to use. Valid values are: `get`, `post`, `put`, `delete`, `patch`
      +        params (Dict): URL parameters to send in the request, defaults to `{}`
      +        body (Dict): the body of the request (ie. post data), defaults to `{}`
      +        headers (Dict): any headers to send as part of the request (in addition to the default ones), defaults to `{}`
      +        null_response (bool): signifies if we expect a response from the API that has no body/data in it (ie. 
      +            requests to upload data that respond with just a 202 status code), defaults to `False`
      +    """
      +
      +    __API_KEY_HEADER_NAME = "x-aurorax-api-key"
      +
      +    def __init__(self,
      +                 aurorax_obj,
      +                 url: str,
      +                 method: Literal["get", "post", "put", "delete", "patch"],
      +                 params: Dict = {},
      +                 body: Union[List, Dict] = {},
      +                 headers: Dict = {},
      +                 null_response: bool = False):
      +        self.__aurorax_obj = aurorax_obj
      +        self.url = url
      +        self.method = method
      +        self.params = params
      +        self.body = body
      +        self.headers = headers
      +        self.null_response = null_response
      +
      +    def __json_converter(self, o):
      +        if (isinstance(o, datetime.datetime) is True):
      +            return str(o)
      +
      +    def __merge_headers(self):
      +        # set initial headers
      +        all_headers = self.__aurorax_obj.api_headers
      +
      +        # add headers passed into the class
      +        for key, value in self.headers.items():
      +            all_headers[key.lower()] = value
      +
      +        # add api key
      +        if self.__aurorax_obj.api_key:
      +            all_headers[self.__API_KEY_HEADER_NAME] = self.__aurorax_obj.api_key
      +
      +        # return
      +        return all_headers
      +
      +    def execute(self) -> AuroraXAPIResponse:
      +        """
      +        Execute an AuroraX API request
      +    
      +        Returns:
      +            an `pyaurorax.search.api.AuroraXAPIResponse` object
      +
      +        Raises:
      +            pyaurorax.exceptions.AuroraXAPIError: error during API call
      +        """
      +        # sanitize data
      +        body_santized = json.dumps(self.body, default=self.__json_converter)
      +
      +        # make request
      +        try:
      +            req = requests.request(self.method,
      +                                   self.url,
      +                                   headers=self.__merge_headers(),
      +                                   params=self.params,
      +                                   data=body_santized,
      +                                   timeout=self.__aurorax_obj.api_timeout)
      +        except requests.exceptions.Timeout:
      +            raise AuroraXAPIError("API request timeout reached") from None
      +
      +        # check if authorization worked (raised by API or Nginx)
      +        if (req.status_code == 401):
      +            if (req.headers["Content-Type"] == "application/json"):
      +                if ("error_message" in req.json()):
      +                    # this will be an error message that the API meant to send
      +                    raise AuroraXUnauthorizedError("API error code %d: %s" % (req.status_code, req.json()["error_message"]))
      +                else:
      +                    raise AuroraXUnauthorizedError("API error code 401: unauthorized")
      +            else:
      +                raise AuroraXUnauthorizedError("API error code 401: unauthorized")
      +
      +        # check for 404 error (raised by API or by Nginx)
      +        if (req.status_code == 404):
      +            if (req.headers["Content-Type"] == "application/json"):
      +                if ("error_message" in req.json()):
      +                    # this will be an error message that the API meant to send
      +                    raise AuroraXAPIError("API error code %d: %s" % (req.status_code, req.json()["error_message"]))
      +                else:
      +                    # this will likely be a 404 from the API
      +                    raise AuroraXAPIError("API error code 404: not found")
      +            else:
      +                raise AuroraXAPIError("API error code 404: not found")
      +
      +        # check for server error
      +        if (req.status_code == 500):
      +            response_json = req.json()
      +            if ("error_message" in response_json):
      +                raise AuroraXAPIError("API error code %d: %s" % (req.status_code, response_json["error_message"]))
      +            else:
      +                raise AuroraXAPIError("API error code %d: %s" % (req.status_code, response_json))
      +
      +        # check for maintenance mode error
      +        if (req.status_code == 502):
      +            raise AuroraXAPIError("API error code %d: API inaccessible, bad gateway" % (req.status_code))
      +
      +        # check for maintenance mode error
      +        if (req.status_code == 503):
      +            response_json = req.json()
      +            if ("maintenance mode" in response_json["error_message"].lower()):
      +                raise AuroraXMaintenanceError(response_json["error_message"])
      +            else:
      +                raise AuroraXAPIError("API error code %d: %s" % (req.status_code, response_json["error_message"]))
      +
      +        # check content type
      +        if (self.null_response is False):
      +            if (req.headers["Content-Type"] == "application/json"):
      +                if (len(req.content) == 0):
      +                    raise AuroraXAPIError("API error code %d: no response received" % (req.status_code))
      +                else:
      +                    response_data = req.json()
      +            else:
      +                raise AuroraXAPIError("API error code %d: %s" % (req.status_code, req.content.decode()))
      +        else:
      +            if (req.status_code in [200, 201, 202, 204]):
      +                response_data = None
      +            else:
      +                response_data = req.json()
      +
      +        # create response object
      +        res = AuroraXAPIResponse(request=req, data=response_data, status_code=req.status_code)
      +
      +        # return
      +        return res
      +
      +    def __str__(self) -> str:
      +        return self.__repr__()
      +
      +    def __repr__(self) -> str:
      +        return f"AuroraXAPIRequest(method='{self.method}', url='{self.url}')"
      +
      +

      Methods

      +
      +
      +def execute(self) ‑> AuroraXAPIResponse +
      +
      +

      Execute an AuroraX API request

      +

      Returns

      +

      an AuroraXAPIResponse object

      +

      Raises

      +
      +
      AuroraXAPIError
      +
      error during API call
      +
      +
      + +Expand source code + +
      def execute(self) -> AuroraXAPIResponse:
      +    """
      +    Execute an AuroraX API request
      +
      +    Returns:
      +        an `pyaurorax.search.api.AuroraXAPIResponse` object
      +
      +    Raises:
      +        pyaurorax.exceptions.AuroraXAPIError: error during API call
      +    """
      +    # sanitize data
      +    body_santized = json.dumps(self.body, default=self.__json_converter)
      +
      +    # make request
      +    try:
      +        req = requests.request(self.method,
      +                               self.url,
      +                               headers=self.__merge_headers(),
      +                               params=self.params,
      +                               data=body_santized,
      +                               timeout=self.__aurorax_obj.api_timeout)
      +    except requests.exceptions.Timeout:
      +        raise AuroraXAPIError("API request timeout reached") from None
      +
      +    # check if authorization worked (raised by API or Nginx)
      +    if (req.status_code == 401):
      +        if (req.headers["Content-Type"] == "application/json"):
      +            if ("error_message" in req.json()):
      +                # this will be an error message that the API meant to send
      +                raise AuroraXUnauthorizedError("API error code %d: %s" % (req.status_code, req.json()["error_message"]))
      +            else:
      +                raise AuroraXUnauthorizedError("API error code 401: unauthorized")
      +        else:
      +            raise AuroraXUnauthorizedError("API error code 401: unauthorized")
      +
      +    # check for 404 error (raised by API or by Nginx)
      +    if (req.status_code == 404):
      +        if (req.headers["Content-Type"] == "application/json"):
      +            if ("error_message" in req.json()):
      +                # this will be an error message that the API meant to send
      +                raise AuroraXAPIError("API error code %d: %s" % (req.status_code, req.json()["error_message"]))
      +            else:
      +                # this will likely be a 404 from the API
      +                raise AuroraXAPIError("API error code 404: not found")
      +        else:
      +            raise AuroraXAPIError("API error code 404: not found")
      +
      +    # check for server error
      +    if (req.status_code == 500):
      +        response_json = req.json()
      +        if ("error_message" in response_json):
      +            raise AuroraXAPIError("API error code %d: %s" % (req.status_code, response_json["error_message"]))
      +        else:
      +            raise AuroraXAPIError("API error code %d: %s" % (req.status_code, response_json))
      +
      +    # check for maintenance mode error
      +    if (req.status_code == 502):
      +        raise AuroraXAPIError("API error code %d: API inaccessible, bad gateway" % (req.status_code))
      +
      +    # check for maintenance mode error
      +    if (req.status_code == 503):
      +        response_json = req.json()
      +        if ("maintenance mode" in response_json["error_message"].lower()):
      +            raise AuroraXMaintenanceError(response_json["error_message"])
      +        else:
      +            raise AuroraXAPIError("API error code %d: %s" % (req.status_code, response_json["error_message"]))
      +
      +    # check content type
      +    if (self.null_response is False):
      +        if (req.headers["Content-Type"] == "application/json"):
      +            if (len(req.content) == 0):
      +                raise AuroraXAPIError("API error code %d: no response received" % (req.status_code))
      +            else:
      +                response_data = req.json()
      +        else:
      +            raise AuroraXAPIError("API error code %d: %s" % (req.status_code, req.content.decode()))
      +    else:
      +        if (req.status_code in [200, 201, 202, 204]):
      +            response_data = None
      +        else:
      +            response_data = req.json()
      +
      +    # create response object
      +    res = AuroraXAPIResponse(request=req, data=response_data, status_code=req.status_code)
      +
      +    # return
      +    return res
      +
      +
      +
      +
      +
      +class AuroraXAPIResponse +(request: Any, data: Any, status_code: int) +
      +
      +

      Class definition for an AuroraX API response

      +

      Attributes

      +
      +
      request : Any
      +
      the request object
      +
      data : Any
      +
      the data received as part of the request
      +
      status_code : int
      +
      the HTTP status code received when making the request
      +
      +
      + +Expand source code + +
      class AuroraXAPIResponse:
      +    """
      +    Class definition for an AuroraX API response
      +
      +    Attributes:
      +        request (Any): the request object
      +        data (Any): the data received as part of the request
      +        status_code (int): the HTTP status code received when making the request
      +    """
      +
      +    def __init__(self, request: Any, data: Any, status_code: int):
      +        self.request = request
      +        self.data = data
      +        self.status_code = status_code
      +
      +    def __str__(self) -> str:
      +        return self.__repr__()
      +
      +    def __repr__(self) -> str:
      +        return f"AuroraXAPIResponse [{self.status_code}]"
      +
      +
      +
      +
      +
      + +
      + + + \ No newline at end of file diff --git a/docs/code/pyaurorax_api_reference/pyaurorax/location.html b/docs/code/pyaurorax_api_reference/pyaurorax/search/availability/classes/availability_result.html similarity index 60% rename from docs/code/pyaurorax_api_reference/pyaurorax/location.html rename to docs/code/pyaurorax_api_reference/pyaurorax/search/availability/classes/availability_result.html index 2e100a4..ef5563e 100644 --- a/docs/code/pyaurorax_api_reference/pyaurorax/location.html +++ b/docs/code/pyaurorax_api_reference/pyaurorax/search/availability/classes/availability_result.html @@ -4,9 +4,8 @@ -pyaurorax.location API documentation - +pyaurorax.search.availability.classes.availability_result API documentation + @@ -20,65 +19,52 @@
      -

      Module pyaurorax.location

      +

      Module pyaurorax.search.availability.classes.availability_result

      -

      The Location module provides a class used throughout the PyAuroraX -library to manage lat/lon positions of different things.

      +

      Class definition for data availability information

      Expand source code -
      """
      -The Location module provides a class used throughout the PyAuroraX
      -library to manage lat/lon positions of different things.
      +
      # Copyright 2024 University of Calgary
      +#
      +# Licensed under the Apache License, Version 2.0 (the "License");
      +# you may not use this file except in compliance with the License.
      +# You may obtain a copy of the License at
      +#
      +#     http://www.apache.org/licenses/LICENSE-2.0
      +#
      +# Unless required by applicable law or agreed to in writing, software
      +# distributed under the License is distributed on an "AS IS" BASIS,
      +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +# See the License for the specific language governing permissions and
      +# limitations under the License.
      +"""
      +Class definition for data availability information
       """
       
      -from pydantic import BaseModel, validator
      -from typing import Union, Optional
      +from dataclasses import dataclass
      +from typing import Dict, Optional
      +from ...sources.classes.data_source import DataSource
       
       
      -class Location(BaseModel):
      +@dataclass
      +class AvailabilityResult:
           """
      -    Class representing an AuroraX location (ie. geographic coordinates,
      -    GSM coordinates, northern/southern B-trace magnetic footprints)
      -
      -    The numbers are in decimal degrees format and range from -90 to 90
      -    for latitude and -180 to 180 for longitude.
      +    Class definition for data availability information
       
           Attributes:
      -        lat: latitude value
      -        lon: longitude value
      +        data_source (pyaurorax.search.DataSource): 
      +            the data source that the records are associated with
      +        available_ephemeris (Dict): 
      +            the ephemeris availability information
      +        available_data_products (Dict): 
      +            the data product availability information
           """
      -    lat: Optional[Union[float, None]] = None
      -    lon: Optional[Union[float, None]] = None
      -
      -    @validator("lon")
      -    def __both_must_be_none_or_number(cls, v, values):  # pylint: disable=unused-private-member
      -        # check to make sure the values are both numbers or None types. We don't
      -        # allow a Location object to have the latitude set and not the
      -        # longitude (or vice-versa)
      -        if (v and not values["lat"]) or (values["lat"] and not v):
      -            raise ValueError("Latitude and longitude must both be numbers, or both be None")
      -        return v
      -
      -    def __str__(self) -> str:
      -        """
      -        String method
      -
      -        Returns:
      -            string format of Location object
      -        """
      -        return self.__repr__()
      -
      -    def __repr__(self) -> str:
      -        """
      -        Object representation
      -
      -        Returns:
      -            object representation of Location object
      -        """
      -        return "%s(lat=%s, lon=%s)" % (self.__class__.__name__, str(self.lat), str(self.lon))
      + data_source: DataSource + available_data_products: Optional[Dict] = None + available_ephemeris: Optional[Dict] = None
      @@ -90,82 +76,53 @@

      Module pyaurorax.location

      Classes

      -
      -class Location -(**data: Any) +
      +class AvailabilityResult +(data_source: DataSource, available_data_products: Optional[Dict] = None, available_ephemeris: Optional[Dict] = None)
      -

      Class representing an AuroraX location (ie. geographic coordinates, -GSM coordinates, northern/southern B-trace magnetic footprints)

      -

      The numbers are in decimal degrees format and range from -90 to 90 -for latitude and -180 to 180 for longitude.

      +

      Class definition for data availability information

      Attributes

      -
      lat
      -
      latitude value
      -
      lon
      -
      longitude value
      -
      -

      Create a new model by parsing and validating input data from keyword arguments.

      -

      Raises ValidationError if the input data cannot be parsed to form a valid model.

      +
      data_source : DataSource
      +
      the data source that the records are associated with
      +
      available_ephemeris : Dict
      +
      the ephemeris availability information
      +
      available_data_products : Dict
      +
      the data product availability information
      +
      Expand source code -
      class Location(BaseModel):
      +
      @dataclass
      +class AvailabilityResult:
           """
      -    Class representing an AuroraX location (ie. geographic coordinates,
      -    GSM coordinates, northern/southern B-trace magnetic footprints)
      -
      -    The numbers are in decimal degrees format and range from -90 to 90
      -    for latitude and -180 to 180 for longitude.
      +    Class definition for data availability information
       
           Attributes:
      -        lat: latitude value
      -        lon: longitude value
      +        data_source (pyaurorax.search.DataSource): 
      +            the data source that the records are associated with
      +        available_ephemeris (Dict): 
      +            the ephemeris availability information
      +        available_data_products (Dict): 
      +            the data product availability information
           """
      -    lat: Optional[Union[float, None]] = None
      -    lon: Optional[Union[float, None]] = None
      -
      -    @validator("lon")
      -    def __both_must_be_none_or_number(cls, v, values):  # pylint: disable=unused-private-member
      -        # check to make sure the values are both numbers or None types. We don't
      -        # allow a Location object to have the latitude set and not the
      -        # longitude (or vice-versa)
      -        if (v and not values["lat"]) or (values["lat"] and not v):
      -            raise ValueError("Latitude and longitude must both be numbers, or both be None")
      -        return v
      -
      -    def __str__(self) -> str:
      -        """
      -        String method
      -
      -        Returns:
      -            string format of Location object
      -        """
      -        return self.__repr__()
      -
      -    def __repr__(self) -> str:
      -        """
      -        Object representation
      -
      -        Returns:
      -            object representation of Location object
      -        """
      -        return "%s(lat=%s, lon=%s)" % (self.__class__.__name__, str(self.lat), str(self.lon))
      + data_source: DataSource + available_data_products: Optional[Dict] = None + available_ephemeris: Optional[Dict] = None
      -

      Ancestors

      -
        -
      • pydantic.main.BaseModel
      • -
      • pydantic.utils.Representation
      • -

      Class variables

      -
      var lat : Optional[float]
      +
      var available_data_products : Optional[Dict]
      +
      +
      +
      +
      var available_ephemeris : Optional[Dict]
      -
      var lon : Optional[float]
      +
      var data_sourceDataSource
      @@ -209,7 +166,7 @@

      Class variables

      if (query) search(query); function search(query) { -const url = '../doc-search.html#' + encodeURIComponent(query); +const url = '../../../../doc-search.html#' + encodeURIComponent(query); new tingle.modal({ cssClass: ['modal-dialog'], onClose: () => { @@ -228,16 +185,17 @@

      Index

      • Super-module

      • Classes

        diff --git a/docs/code/pyaurorax_api_reference/pyaurorax/sources/classes/index.html b/docs/code/pyaurorax_api_reference/pyaurorax/search/availability/classes/index.html similarity index 82% rename from docs/code/pyaurorax_api_reference/pyaurorax/sources/classes/index.html rename to docs/code/pyaurorax_api_reference/pyaurorax/search/availability/classes/index.html index a46f0f4..d976a71 100644 --- a/docs/code/pyaurorax_api_reference/pyaurorax/sources/classes/index.html +++ b/docs/code/pyaurorax_api_reference/pyaurorax/search/availability/classes/index.html @@ -4,8 +4,8 @@ -pyaurorax.sources.classes API documentation - +pyaurorax.search.availability.classes API documentation + @@ -19,36 +19,38 @@
        -

        Module pyaurorax.sources.classes

        +

        Module pyaurorax.search.availability.classes

        -

        Separted classes and functions used by the sources module.

        -

        Note that these classes and variables are all imported higher up at the -top of the sources module. They can be referenced from there instead -of digging in deeper to these submodules.

        +

        Class definitions used by the availability submodule

        Expand source code -
        """
        -Separted classes and functions used by the sources module.
        -
        -Note that these classes and variables are all imported higher up at the
        -top of the sources module. They can be referenced from there instead
        -of digging in deeper to these submodules.
        +
        # Copyright 2024 University of Calgary
        +#
        +# Licensed under the Apache License, Version 2.0 (the "License");
        +# you may not use this file except in compliance with the License.
        +# You may obtain a copy of the License at
        +#
        +#     http://www.apache.org/licenses/LICENSE-2.0
        +#
        +# Unless required by applicable law or agreed to in writing, software
        +# distributed under the License is distributed on an "AS IS" BASIS,
        +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
        +# See the License for the specific language governing permissions and
        +# limitations under the License.
        +"""
        +Class definitions used by the `availability` submodule
         """

        Sub-modules

        -
        pyaurorax.sources.classes.data_source
        +
        pyaurorax.search.availability.classes.availability_result
        -

        Class definition for a data source

        -
        -
        pyaurorax.sources.classes.data_source_stats
        -
        -

        Class definition for a statistics about a data source

        +

        Class definition for data availability information

        @@ -94,7 +96,7 @@

        Sub-modules

        if (query) search(query); function search(query) { -const url = '../../../doc-search.html#' + encodeURIComponent(query); +const url = '../../../../doc-search.html#' + encodeURIComponent(query); new tingle.modal({ cssClass: ['modal-dialog'], onClose: () => { @@ -113,13 +115,12 @@

        Index

        diff --git a/docs/code/pyaurorax_api_reference/pyaurorax/search/availability/index.html b/docs/code/pyaurorax_api_reference/pyaurorax/search/availability/index.html new file mode 100644 index 0000000..86485b4 --- /dev/null +++ b/docs/code/pyaurorax_api_reference/pyaurorax/search/availability/index.html @@ -0,0 +1,604 @@ + + + + + + +pyaurorax.search.availability API documentation + + + + + + + + + + + +
        +
        +
        +

        Module pyaurorax.search.availability

        +
        +
        +

        Retrieve availability information about data in the AuroraX search engine.

        +
        + +Expand source code + +
        # Copyright 2024 University of Calgary
        +#
        +# Licensed under the Apache License, Version 2.0 (the "License");
        +# you may not use this file except in compliance with the License.
        +# You may obtain a copy of the License at
        +#
        +#     http://www.apache.org/licenses/LICENSE-2.0
        +#
        +# Unless required by applicable law or agreed to in writing, software
        +# distributed under the License is distributed on an "AS IS" BASIS,
        +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
        +# See the License for the specific language governing permissions and
        +# limitations under the License.
        +"""
        +Retrieve availability information about data in the AuroraX search engine.
        +"""
        +
        +import datetime
        +from typing import Optional, List
        +from ._availability import ephemeris as func_ephemeris
        +from ._availability import data_products as func_data_products
        +from .classes.availability_result import AvailabilityResult
        +from ..sources.classes.data_source import FORMAT_DEFAULT
        +
        +__all__ = ["AvailabilityManager", "AvailabilityResult"]
        +
        +
        +class AvailabilityManager:
        +    """
        +    The AvailabilityManager object is initialized within every PyAuroraX object. It acts as a way to access 
        +    the submodules and carry over configuration information in the super class.
        +    """
        +
        +    def __init__(self, aurorax_obj):
        +        self.__aurorax_obj = aurorax_obj
        +
        +    def ephemeris(self,
        +                  start: datetime.date,
        +                  end: datetime.date,
        +                  program: Optional[str] = None,
        +                  platform: Optional[str] = None,
        +                  instrument_type: Optional[str] = None,
        +                  source_type: Optional[str] = None,
        +                  owner: Optional[str] = None,
        +                  format: str = FORMAT_DEFAULT,
        +                  slow: bool = False) -> List[AvailabilityResult]:
        +        """
        +        Retrieve information about the number of existing ephemeris records
        +
        +        Args:
        +            start (datetime.date): 
        +                Start date to retrieve availability info for (inclusive)
        +            end (datetime.date): 
        +                End date to retrieve availability info for (inclusive)
        +            program (str): 
        +                Program name to filter sources by, defaults to `None`
        +            platform (str): 
        +                Platform name to filter sources by, defaults to `None`
        +            instrument_type (str): 
        +                Instrument type to filter sources by, defaults to `None`
        +            source_type (str): 
        +                The data source type to filter for, defaults to `None`. Options are in 
        +                the pyaurorax.search.sources module, or at the top level using the 
        +                pyaurorax.search.SOURCE_TYPE_* variables.
        +            owner (str): 
        +                Owner email address to filter sources by, defaults to `None`
        +            format (str): 
        +                The format of the data sources returned, defaults to `FORMAT_FULL_RECORD`. 
        +                Other options are in the pyaurorax.search.sources module, or at the top level using 
        +                the pyaurorax.search.FORMAT_* variables.
        +            slow (bool): 
        +                Query the data using a slower, but more accurate method, defaults to `False`
        +
        +        Returns:
        +            Ephemeris availability information matching the requested parameters
        +        
        +        Raises:
        +            pyaurorax.exceptions.AuroraXAPIError: An API error was encountered
        +        """
        +        return func_ephemeris(self.__aurorax_obj, start, end, program, platform, instrument_type, source_type, owner, format, slow)
        +
        +    def data_products(self,
        +                      start: datetime.date,
        +                      end: datetime.date,
        +                      program: Optional[str] = None,
        +                      platform: Optional[str] = None,
        +                      instrument_type: Optional[str] = None,
        +                      source_type: Optional[str] = None,
        +                      owner: Optional[str] = None,
        +                      format: Optional[str] = FORMAT_DEFAULT,
        +                      slow: Optional[bool] = False) -> List[AvailabilityResult]:
        +        """
        +        Retrieve information about the number of existing data product records
        +
        +        Args:
        +            start (datetime.date): 
        +                Start date to retrieve availability info for (inclusive)
        +            end (datetime.date): 
        +                End date to retrieve availability info for (inclusive)
        +            program (str): 
        +                Program name to filter sources by, defaults to `None`
        +            platform (str): 
        +                Platform name to filter sources by, defaults to `None`
        +            instrument_type (str): 
        +                Instrument type to filter sources by, defaults to `None`
        +            source_type (str): 
        +                The data source type to filter for, defaults to `None`. Options are in 
        +                the pyaurorax.search.sources module, or at the top level using the 
        +                pyaurorax.search.SOURCE_TYPE_* variables.
        +            owner (str): 
        +                Owner email address to filter sources by, defaults to `None`
        +            format (str): 
        +                The format of the data sources returned, defaults to `FORMAT_FULL_RECORD`. 
        +                Other options are in the pyaurorax.search.sources module, or at the top level using 
        +                the pyaurorax.search.FORMAT_* variables.
        +            slow (bool): 
        +                Query the data using a slower, but more accurate method, defaults to `False`
        +
        +        Returns:
        +            Data product availability information matching the requested parameters
        +
        +        Raises:
        +            pyaurorax.exceptions.AuroraXAPIError: An API error was encountered
        +        """
        +        return func_data_products(self.__aurorax_obj, start, end, program, platform, instrument_type, source_type, owner, format, slow)
        +
        +
        +
        +

        Sub-modules

        +
        +
        pyaurorax.search.availability.classes
        +
        +

        Class definitions used by the availability submodule

        +
        +
        +
        +
        +
        +
        +
        +
        +

        Classes

        +
        +
        +class AvailabilityManager +(aurorax_obj) +
        +
        +

        The AvailabilityManager object is initialized within every PyAuroraX object. It acts as a way to access +the submodules and carry over configuration information in the super class.

        +
        + +Expand source code + +
        class AvailabilityManager:
        +    """
        +    The AvailabilityManager object is initialized within every PyAuroraX object. It acts as a way to access 
        +    the submodules and carry over configuration information in the super class.
        +    """
        +
        +    def __init__(self, aurorax_obj):
        +        self.__aurorax_obj = aurorax_obj
        +
        +    def ephemeris(self,
        +                  start: datetime.date,
        +                  end: datetime.date,
        +                  program: Optional[str] = None,
        +                  platform: Optional[str] = None,
        +                  instrument_type: Optional[str] = None,
        +                  source_type: Optional[str] = None,
        +                  owner: Optional[str] = None,
        +                  format: str = FORMAT_DEFAULT,
        +                  slow: bool = False) -> List[AvailabilityResult]:
        +        """
        +        Retrieve information about the number of existing ephemeris records
        +
        +        Args:
        +            start (datetime.date): 
        +                Start date to retrieve availability info for (inclusive)
        +            end (datetime.date): 
        +                End date to retrieve availability info for (inclusive)
        +            program (str): 
        +                Program name to filter sources by, defaults to `None`
        +            platform (str): 
        +                Platform name to filter sources by, defaults to `None`
        +            instrument_type (str): 
        +                Instrument type to filter sources by, defaults to `None`
        +            source_type (str): 
        +                The data source type to filter for, defaults to `None`. Options are in 
        +                the pyaurorax.search.sources module, or at the top level using the 
        +                pyaurorax.search.SOURCE_TYPE_* variables.
        +            owner (str): 
        +                Owner email address to filter sources by, defaults to `None`
        +            format (str): 
        +                The format of the data sources returned, defaults to `FORMAT_FULL_RECORD`. 
        +                Other options are in the pyaurorax.search.sources module, or at the top level using 
        +                the pyaurorax.search.FORMAT_* variables.
        +            slow (bool): 
        +                Query the data using a slower, but more accurate method, defaults to `False`
        +
        +        Returns:
        +            Ephemeris availability information matching the requested parameters
        +        
        +        Raises:
        +            pyaurorax.exceptions.AuroraXAPIError: An API error was encountered
        +        """
        +        return func_ephemeris(self.__aurorax_obj, start, end, program, platform, instrument_type, source_type, owner, format, slow)
        +
        +    def data_products(self,
        +                      start: datetime.date,
        +                      end: datetime.date,
        +                      program: Optional[str] = None,
        +                      platform: Optional[str] = None,
        +                      instrument_type: Optional[str] = None,
        +                      source_type: Optional[str] = None,
        +                      owner: Optional[str] = None,
        +                      format: Optional[str] = FORMAT_DEFAULT,
        +                      slow: Optional[bool] = False) -> List[AvailabilityResult]:
        +        """
        +        Retrieve information about the number of existing data product records
        +
        +        Args:
        +            start (datetime.date): 
        +                Start date to retrieve availability info for (inclusive)
        +            end (datetime.date): 
        +                End date to retrieve availability info for (inclusive)
        +            program (str): 
        +                Program name to filter sources by, defaults to `None`
        +            platform (str): 
        +                Platform name to filter sources by, defaults to `None`
        +            instrument_type (str): 
        +                Instrument type to filter sources by, defaults to `None`
        +            source_type (str): 
        +                The data source type to filter for, defaults to `None`. Options are in 
        +                the pyaurorax.search.sources module, or at the top level using the 
        +                pyaurorax.search.SOURCE_TYPE_* variables.
        +            owner (str): 
        +                Owner email address to filter sources by, defaults to `None`
        +            format (str): 
        +                The format of the data sources returned, defaults to `FORMAT_FULL_RECORD`. 
        +                Other options are in the pyaurorax.search.sources module, or at the top level using 
        +                the pyaurorax.search.FORMAT_* variables.
        +            slow (bool): 
        +                Query the data using a slower, but more accurate method, defaults to `False`
        +
        +        Returns:
        +            Data product availability information matching the requested parameters
        +
        +        Raises:
        +            pyaurorax.exceptions.AuroraXAPIError: An API error was encountered
        +        """
        +        return func_data_products(self.__aurorax_obj, start, end, program, platform, instrument_type, source_type, owner, format, slow)
        +
        +

        Methods

        +
        +
        +def data_products(self, start: datetime.date, end: datetime.date, program: Optional[str] = None, platform: Optional[str] = None, instrument_type: Optional[str] = None, source_type: Optional[str] = None, owner: Optional[str] = None, format: Optional[str] = 'basic_info', slow: Optional[bool] = False) ‑> List[AvailabilityResult] +
        +
        +

        Retrieve information about the number of existing data product records

        +

        Args

        +
        +
        start : datetime.date
        +
        Start date to retrieve availability info for (inclusive)
        +
        end : datetime.date
        +
        End date to retrieve availability info for (inclusive)
        +
        program : str
        +
        Program name to filter sources by, defaults to None
        +
        platform : str
        +
        Platform name to filter sources by, defaults to None
        +
        instrument_type : str
        +
        Instrument type to filter sources by, defaults to None
        +
        source_type : str
        +
        The data source type to filter for, defaults to None. Options are in +the pyaurorax.search.sources module, or at the top level using the +pyaurorax.search.SOURCE_TYPE_* variables.
        +
        owner : str
        +
        Owner email address to filter sources by, defaults to None
        +
        format : str
        +
        The format of the data sources returned, defaults to FORMAT_FULL_RECORD. +Other options are in the pyaurorax.search.sources module, or at the top level using +the pyaurorax.search.FORMAT_* variables.
        +
        slow : bool
        +
        Query the data using a slower, but more accurate method, defaults to False
        +
        +

        Returns

        +

        Data product availability information matching the requested parameters

        +

        Raises

        +
        +
        AuroraXAPIError
        +
        An API error was encountered
        +
        +
        + +Expand source code + +
        def data_products(self,
        +                  start: datetime.date,
        +                  end: datetime.date,
        +                  program: Optional[str] = None,
        +                  platform: Optional[str] = None,
        +                  instrument_type: Optional[str] = None,
        +                  source_type: Optional[str] = None,
        +                  owner: Optional[str] = None,
        +                  format: Optional[str] = FORMAT_DEFAULT,
        +                  slow: Optional[bool] = False) -> List[AvailabilityResult]:
        +    """
        +    Retrieve information about the number of existing data product records
        +
        +    Args:
        +        start (datetime.date): 
        +            Start date to retrieve availability info for (inclusive)
        +        end (datetime.date): 
        +            End date to retrieve availability info for (inclusive)
        +        program (str): 
        +            Program name to filter sources by, defaults to `None`
        +        platform (str): 
        +            Platform name to filter sources by, defaults to `None`
        +        instrument_type (str): 
        +            Instrument type to filter sources by, defaults to `None`
        +        source_type (str): 
        +            The data source type to filter for, defaults to `None`. Options are in 
        +            the pyaurorax.search.sources module, or at the top level using the 
        +            pyaurorax.search.SOURCE_TYPE_* variables.
        +        owner (str): 
        +            Owner email address to filter sources by, defaults to `None`
        +        format (str): 
        +            The format of the data sources returned, defaults to `FORMAT_FULL_RECORD`. 
        +            Other options are in the pyaurorax.search.sources module, or at the top level using 
        +            the pyaurorax.search.FORMAT_* variables.
        +        slow (bool): 
        +            Query the data using a slower, but more accurate method, defaults to `False`
        +
        +    Returns:
        +        Data product availability information matching the requested parameters
        +
        +    Raises:
        +        pyaurorax.exceptions.AuroraXAPIError: An API error was encountered
        +    """
        +    return func_data_products(self.__aurorax_obj, start, end, program, platform, instrument_type, source_type, owner, format, slow)
        +
        +
        +
        +def ephemeris(self, start: datetime.date, end: datetime.date, program: Optional[str] = None, platform: Optional[str] = None, instrument_type: Optional[str] = None, source_type: Optional[str] = None, owner: Optional[str] = None, format: str = 'basic_info', slow: bool = False) ‑> List[AvailabilityResult] +
        +
        +

        Retrieve information about the number of existing ephemeris records

        +

        Args

        +
        +
        start : datetime.date
        +
        Start date to retrieve availability info for (inclusive)
        +
        end : datetime.date
        +
        End date to retrieve availability info for (inclusive)
        +
        program : str
        +
        Program name to filter sources by, defaults to None
        +
        platform : str
        +
        Platform name to filter sources by, defaults to None
        +
        instrument_type : str
        +
        Instrument type to filter sources by, defaults to None
        +
        source_type : str
        +
        The data source type to filter for, defaults to None. Options are in +the pyaurorax.search.sources module, or at the top level using the +pyaurorax.search.SOURCE_TYPE_* variables.
        +
        owner : str
        +
        Owner email address to filter sources by, defaults to None
        +
        format : str
        +
        The format of the data sources returned, defaults to FORMAT_FULL_RECORD. +Other options are in the pyaurorax.search.sources module, or at the top level using +the pyaurorax.search.FORMAT_* variables.
        +
        slow : bool
        +
        Query the data using a slower, but more accurate method, defaults to False
        +
        +

        Returns

        +

        Ephemeris availability information matching the requested parameters

        +

        Raises

        +
        +
        AuroraXAPIError
        +
        An API error was encountered
        +
        +
        + +Expand source code + +
        def ephemeris(self,
        +              start: datetime.date,
        +              end: datetime.date,
        +              program: Optional[str] = None,
        +              platform: Optional[str] = None,
        +              instrument_type: Optional[str] = None,
        +              source_type: Optional[str] = None,
        +              owner: Optional[str] = None,
        +              format: str = FORMAT_DEFAULT,
        +              slow: bool = False) -> List[AvailabilityResult]:
        +    """
        +    Retrieve information about the number of existing ephemeris records
        +
        +    Args:
        +        start (datetime.date): 
        +            Start date to retrieve availability info for (inclusive)
        +        end (datetime.date): 
        +            End date to retrieve availability info for (inclusive)
        +        program (str): 
        +            Program name to filter sources by, defaults to `None`
        +        platform (str): 
        +            Platform name to filter sources by, defaults to `None`
        +        instrument_type (str): 
        +            Instrument type to filter sources by, defaults to `None`
        +        source_type (str): 
        +            The data source type to filter for, defaults to `None`. Options are in 
        +            the pyaurorax.search.sources module, or at the top level using the 
        +            pyaurorax.search.SOURCE_TYPE_* variables.
        +        owner (str): 
        +            Owner email address to filter sources by, defaults to `None`
        +        format (str): 
        +            The format of the data sources returned, defaults to `FORMAT_FULL_RECORD`. 
        +            Other options are in the pyaurorax.search.sources module, or at the top level using 
        +            the pyaurorax.search.FORMAT_* variables.
        +        slow (bool): 
        +            Query the data using a slower, but more accurate method, defaults to `False`
        +
        +    Returns:
        +        Ephemeris availability information matching the requested parameters
        +    
        +    Raises:
        +        pyaurorax.exceptions.AuroraXAPIError: An API error was encountered
        +    """
        +    return func_ephemeris(self.__aurorax_obj, start, end, program, platform, instrument_type, source_type, owner, format, slow)
        +
        +
        +
        +
        +
        +class AvailabilityResult +(data_source: DataSource, available_data_products: Optional[Dict] = None, available_ephemeris: Optional[Dict] = None) +
        +
        +

        Class definition for data availability information

        +

        Attributes

        +
        +
        data_source : DataSource
        +
        the data source that the records are associated with
        +
        available_ephemeris : Dict
        +
        the ephemeris availability information
        +
        available_data_products : Dict
        +
        the data product availability information
        +
        +
        + +Expand source code + +
        @dataclass
        +class AvailabilityResult:
        +    """
        +    Class definition for data availability information
        +
        +    Attributes:
        +        data_source (pyaurorax.search.DataSource): 
        +            the data source that the records are associated with
        +        available_ephemeris (Dict): 
        +            the ephemeris availability information
        +        available_data_products (Dict): 
        +            the data product availability information
        +    """
        +    data_source: DataSource
        +    available_data_products: Optional[Dict] = None
        +    available_ephemeris: Optional[Dict] = None
        +
        +

        Class variables

        +
        +
        var available_data_products : Optional[Dict]
        +
        +
        +
        +
        var available_ephemeris : Optional[Dict]
        +
        +
        +
        +
        var data_sourceDataSource
        +
        +
        +
        +
        +
        +
        +
        +
        + +
        + + + \ No newline at end of file diff --git a/docs/code/pyaurorax_api_reference/pyaurorax/conjunctions/classes/conjunction.html b/docs/code/pyaurorax_api_reference/pyaurorax/search/conjunctions/classes/conjunction.html similarity index 61% rename from docs/code/pyaurorax_api_reference/pyaurorax/conjunctions/classes/conjunction.html rename to docs/code/pyaurorax_api_reference/pyaurorax/search/conjunctions/classes/conjunction.html index 06fd736..0e23650 100644 --- a/docs/code/pyaurorax_api_reference/pyaurorax/conjunctions/classes/conjunction.html +++ b/docs/code/pyaurorax_api_reference/pyaurorax/search/conjunctions/classes/conjunction.html @@ -4,7 +4,7 @@ -pyaurorax.conjunctions.classes.conjunction API documentation +pyaurorax.search.conjunctions.classes.conjunction API documentation @@ -19,7 +19,7 @@
        -

        Module pyaurorax.conjunctions.classes.conjunction

        +

        Module pyaurorax.search.conjunctions.classes.conjunction

        Class definition for a conjunction

        @@ -27,26 +27,53 @@

        Module pyaurorax.conjunctions.classes.conjunction Expand source code -
        """
        +
        # Copyright 2024 University of Calgary
        +#
        +# Licensed under the Apache License, Version 2.0 (the "License");
        +# you may not use this file except in compliance with the License.
        +# You may obtain a copy of the License at
        +#
        +#     http://www.apache.org/licenses/LICENSE-2.0
        +#
        +# Unless required by applicable law or agreed to in writing, software
        +# distributed under the License is distributed on an "AS IS" BASIS,
        +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
        +# See the License for the specific language governing permissions and
        +# limitations under the License.
        +"""
         Class definition for a conjunction
         """
         
         import datetime
        -from pydantic import BaseModel
         from typing import Dict, List
         from ...sources import DataSource
         
        -# pdoc init
        -__pdoc__: Dict = {}
        +# conjunction type - north b-trace
        +CONJUNCTION_TYPE_NBTRACE: str = "nbtrace"
        +"""
        +Conjunction search 'conjunction_type' category for finding conjunctions using the north B-trace data
        +"""
         
        +# conjunction type - south b-trace
        +CONJUNCTION_TYPE_SBTRACE: str = "sbtrace"
        +"""
        +Conjunction search 'conjunction_type' category for finding conjunctions using the south B-trace data
        +"""
         
        -class Conjunction(BaseModel):
        +# conjunction type - geographic
        +CONJUNCTION_TYPE_GEOGRAPHIC: str = "geographic"
        +"""
        +Conjunction search 'conjunction_type' category for finding conjunctions using the geographic position data
        +"""
        +
        +
        +class Conjunction:
             """
             Conjunction object
         
             Attributes:
                 conjunction_type: the type of location data used when the
        -            conjunction was found (either be 'nbtrace' or 'sbtrace')
        +            conjunction was found (either 'nbtrace', 'sbtrace', or 'geographic')
                 start: start timestamp of the conjunction
                 end: end timestamp of the conjunction
                 data_sources: data sources in the conjunction
        @@ -55,48 +82,67 @@ 

        Module pyaurorax.conjunctions.classes.conjunction events: the sub-conjunctions that make up this over-arching conjunction (the conjunctions between each set of two data sources) + closest_epoch: timestamp for when data sources were closest + farthest_epoch: timestamp for when data sources were farthest """ - conjunction_type: str - start: datetime.datetime - end: datetime.datetime - data_sources: List[DataSource] - min_distance: float - max_distance: float - events: List[Dict] - def __str__(self) -> str: - """ - String method + def __init__( + self, + conjunction_type: str, + start: datetime.datetime, + end: datetime.datetime, + data_sources: List[DataSource], + min_distance: float, + max_distance: float, + events: List[Dict], + closest_epoch: datetime.datetime, + farthest_epoch: datetime.datetime, + ): + self.conjunction_type = conjunction_type + self.start = start + self.end = end + self.data_sources = data_sources + self.min_distance = min_distance + self.max_distance = max_distance + self.events = events + self.closest_epoch = closest_epoch + self.farthest_epoch = farthest_epoch - Returns: - string format of Conjunction object - """ + def __str__(self) -> str: return self.__repr__() def __repr__(self) -> str: - """ - Object representation - - Returns: - object representation of Conjunction object - """ - return f"Conjunction(start={repr(self.start)}, end={repr(self.end)}, " \ - f"min_distance={self.min_distance:.2f}, max_distance={self.max_distance:.2f}, " \ - "data_sources=[...], events=[...])"

        + return f"Conjunction(start={repr(self.start)}, end={repr(self.end)}, min_distance={self.min_distance:.2f}, " \ + f"max_distance={self.max_distance:.2f}, data_sources=[...], events=[...])"

        +

        Global variables

        +
        +
        var CONJUNCTION_TYPE_GEOGRAPHIC : str
        +
        +

        Conjunction search 'conjunction_type' category for finding conjunctions using the geographic position data

        +
        +
        var CONJUNCTION_TYPE_NBTRACE : str
        +
        +

        Conjunction search 'conjunction_type' category for finding conjunctions using the north B-trace data

        +
        +
        var CONJUNCTION_TYPE_SBTRACE : str
        +
        +

        Conjunction search 'conjunction_type' category for finding conjunctions using the south B-trace data

        +
        +

        Classes

        -
        +
        class Conjunction -(**data: Any) +(conjunction_type: str, start: datetime.datetime, end: datetime.datetime, data_sources: List[DataSource], min_distance: float, max_distance: float, events: List[Dict], closest_epoch: datetime.datetime, farthest_epoch: datetime.datetime)

        Conjunction object

        @@ -104,7 +150,7 @@

        Attributes

        conjunction_type
        the type of location data used when the -conjunction was found (either be 'nbtrace' or 'sbtrace')
        +conjunction was found (either 'nbtrace', 'sbtrace', or 'geographic')
        start
        start timestamp of the conjunction
        end
        @@ -119,20 +165,22 @@

        Attributes

        the sub-conjunctions that make up this over-arching conjunction (the conjunctions between each set of two data sources)
        -
        -

        Create a new model by parsing and validating input data from keyword arguments.

        -

        Raises ValidationError if the input data cannot be parsed to form a valid model.

        +
        closest_epoch
        +
        timestamp for when data sources were closest
        +
        farthest_epoch
        +
        timestamp for when data sources were farthest
        +
      Expand source code -
      class Conjunction(BaseModel):
      +
      class Conjunction:
           """
           Conjunction object
       
           Attributes:
               conjunction_type: the type of location data used when the
      -            conjunction was found (either be 'nbtrace' or 'sbtrace')
      +            conjunction was found (either 'nbtrace', 'sbtrace', or 'geographic')
               start: start timestamp of the conjunction
               end: end timestamp of the conjunction
               data_sources: data sources in the conjunction
      @@ -141,71 +189,39 @@ 

      Attributes

      events: the sub-conjunctions that make up this over-arching conjunction (the conjunctions between each set of two data sources) + closest_epoch: timestamp for when data sources were closest + farthest_epoch: timestamp for when data sources were farthest """ - conjunction_type: str - start: datetime.datetime - end: datetime.datetime - data_sources: List[DataSource] - min_distance: float - max_distance: float - events: List[Dict] - def __str__(self) -> str: - """ - String method + def __init__( + self, + conjunction_type: str, + start: datetime.datetime, + end: datetime.datetime, + data_sources: List[DataSource], + min_distance: float, + max_distance: float, + events: List[Dict], + closest_epoch: datetime.datetime, + farthest_epoch: datetime.datetime, + ): + self.conjunction_type = conjunction_type + self.start = start + self.end = end + self.data_sources = data_sources + self.min_distance = min_distance + self.max_distance = max_distance + self.events = events + self.closest_epoch = closest_epoch + self.farthest_epoch = farthest_epoch - Returns: - string format of Conjunction object - """ + def __str__(self) -> str: return self.__repr__() def __repr__(self) -> str: - """ - Object representation - - Returns: - object representation of Conjunction object - """ - return f"Conjunction(start={repr(self.start)}, end={repr(self.end)}, " \ - f"min_distance={self.min_distance:.2f}, max_distance={self.max_distance:.2f}, " \ - "data_sources=[...], events=[...])"
      + return f"Conjunction(start={repr(self.start)}, end={repr(self.end)}, min_distance={self.min_distance:.2f}, " \ + f"max_distance={self.max_distance:.2f}, data_sources=[...], events=[...])"
      -

      Ancestors

      -
        -
      • pydantic.main.BaseModel
      • -
      • pydantic.utils.Representation
      • -
      -

      Class variables

      -
      -
      var conjunction_type : str
      -
      -
      -
      -
      var data_sources : List[DataSource]
      -
      -
      -
      -
      var end : datetime.datetime
      -
      -
      -
      -
      var events : List[Dict]
      -
      -
      -
      -
      var max_distance : float
      -
      -
      -
      -
      var min_distance : float
      -
      -
      -
      -
      var start : datetime.datetime
      -
      -
      -
      -
      @@ -245,7 +261,7 @@

      Class variables

      if (query) search(query); function search(query) { -const url = '../../../doc-search.html#' + encodeURIComponent(query); +const url = '../../../../doc-search.html#' + encodeURIComponent(query); new tingle.modal({ cssClass: ['modal-dialog'], onClose: () => { @@ -264,22 +280,20 @@

      Index

      • Super-module

        +
      • +
      • Global variables

        +
      • Classes

      • diff --git a/docs/code/pyaurorax_api_reference/pyaurorax/availability/classes/index.html b/docs/code/pyaurorax_api_reference/pyaurorax/search/conjunctions/classes/index.html similarity index 83% rename from docs/code/pyaurorax_api_reference/pyaurorax/availability/classes/index.html rename to docs/code/pyaurorax_api_reference/pyaurorax/search/conjunctions/classes/index.html index c9ec4ec..7a05468 100644 --- a/docs/code/pyaurorax_api_reference/pyaurorax/availability/classes/index.html +++ b/docs/code/pyaurorax_api_reference/pyaurorax/search/conjunctions/classes/index.html @@ -4,8 +4,8 @@ -pyaurorax.availability.classes API documentation - +pyaurorax.search.conjunctions.classes API documentation + @@ -19,32 +19,20 @@
        -

        Module pyaurorax.availability.classes

        +

        Namespace pyaurorax.search.conjunctions.classes

        -

        Separted classes and functions used by the availability module.

        -

        Note that these classes and variables are all imported higher up at the -top of the availability module. They can be referenced from there instead -of digging in deeper to these submodules.

        -
        - -Expand source code - -
        """
        -Separted classes and functions used by the availability module.
        -
        -Note that these classes and variables are all imported higher up at the
        -top of the availability module. They can be referenced from there instead
        -of digging in deeper to these submodules.
        -"""
        -

        Sub-modules

        -
        pyaurorax.availability.classes.availability_result
        +
        pyaurorax.search.conjunctions.classes.conjunction
        -

        Class definition used for containing Availability information

        +

        Class definition for a conjunction

        +
        +
        pyaurorax.search.conjunctions.classes.search
        +
        +

        Class definition for a conjunction search

        @@ -90,7 +78,7 @@

        Sub-modules

        if (query) search(query); function search(query) { -const url = '../../../doc-search.html#' + encodeURIComponent(query); +const url = '../../../../doc-search.html#' + encodeURIComponent(query); new tingle.modal({ cssClass: ['modal-dialog'], onClose: () => { @@ -109,12 +97,13 @@

        Index

        diff --git a/docs/code/pyaurorax_api_reference/pyaurorax/conjunctions/classes/search.html b/docs/code/pyaurorax_api_reference/pyaurorax/search/conjunctions/classes/search.html similarity index 68% rename from docs/code/pyaurorax_api_reference/pyaurorax/conjunctions/classes/search.html rename to docs/code/pyaurorax_api_reference/pyaurorax/search/conjunctions/classes/search.html index 39fa1ed..afc7d0b 100644 --- a/docs/code/pyaurorax_api_reference/pyaurorax/conjunctions/classes/search.html +++ b/docs/code/pyaurorax_api_reference/pyaurorax/search/conjunctions/classes/search.html @@ -4,7 +4,7 @@ -pyaurorax.conjunctions.classes.search API documentation +pyaurorax.search.conjunctions.classes.search API documentation @@ -19,7 +19,7 @@
        -

        Module pyaurorax.conjunctions.classes.search

        +

        Module pyaurorax.search.conjunctions.classes.search

        Class definition for a conjunction search

        @@ -27,29 +27,42 @@

        Module pyaurorax.conjunctions.classes.search

        Expand source code -
        """
        +
        # Copyright 2024 University of Calgary
        +#
        +# Licensed under the Apache License, Version 2.0 (the "License");
        +# you may not use this file except in compliance with the License.
        +# You may obtain a copy of the License at
        +#
        +#     http://www.apache.org/licenses/LICENSE-2.0
        +#
        +# Unless required by applicable law or agreed to in writing, software
        +# distributed under the License is distributed on an "AS IS" BASIS,
        +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
        +# See the License for the specific language governing permissions and
        +# limitations under the License.
        +"""
         Class definition for a conjunction search
         """
         
        +from __future__ import annotations
         import datetime
         import itertools
        -from typing import Dict, List, Union, Optional
        -from .conjunction import Conjunction
        -from ...conjunctions import CONJUNCTION_TYPE_NBTRACE
        -from ...api import AuroraXRequest, AuroraXResponse, urls
        +from typing import TYPE_CHECKING, Dict, List, Union, Optional
        +from .conjunction import Conjunction, CONJUNCTION_TYPE_NBTRACE
        +from ...api import AuroraXAPIRequest
         from ...sources import DataSource, FORMAT_BASIC_INFO
        -from ...exceptions import AuroraXBadParametersException
        -from ...requests import (STANDARD_POLLING_SLEEP_TIME,
        -                         cancel as requests_cancel,
        -                         wait_for_data as requests_wait_for_data,
        -                         get_data as requests_get_data,
        -                         get_status as requests_get_status)
        -
        -# pdoc init
        -__pdoc__: Dict = {}
        -
        -
        -class Search():
        +from ....exceptions import AuroraXError, AuroraXAPIError
        +from ...requests._requests import (
        +    cancel as requests_cancel,
        +    wait_for_data as requests_wait_for_data,
        +    get_data as requests_get_data,
        +    get_status as requests_get_status,
        +)
        +if TYPE_CHECKING:
        +    from ....pyaurorax import PyAuroraX
        +
        +
        +class ConjunctionSearch:
             """
             Class representing a conjunction search
         
        @@ -124,63 +137,69 @@ 

        Module pyaurorax.conjunctions.classes.search

        query: the query for this request as JSON status: the status of the query data: the conjunctions found - logs: all log messages outputed by the AuroraX API for this request - - Returns: - a pyaurorax.conjunctions.Search object + logs: all log messages outputted by the AuroraX API for this request """ - def __init__(self, start: datetime.datetime, + __STANDARD_POLLING_SLEEP_TIME: float = 1.0 + + def __init__(self, + aurorax_obj: PyAuroraX, + start: datetime.datetime, end: datetime.datetime, - distance: Union[int, float, Dict[str, Union[int, float]]], - ground: Optional[List[Dict[str, str]]] = [], - space: Optional[List[Dict[str, str]]] = [], - events: Optional[List[Dict[str, str]]] = [], - conjunction_types: Optional[List[str]] = [CONJUNCTION_TYPE_NBTRACE], - epoch_search_precision: Optional[int] = 60, - response_format: Optional[Dict[str, bool]] = None): + distance: Union[int, float, Dict], + ground: Optional[List[Dict]] = None, + space: Optional[List[Dict]] = None, + events: Optional[List[Dict]] = None, + conjunction_types: Optional[List[str]] = None, + epoch_search_precision: Optional[int] = None, + response_format: Optional[Dict] = None): # set variables using passed in args + self.aurorax_obj = aurorax_obj self.start = start self.end = end - self.ground = ground - self.space = space - self.events = events + self.ground = [] if ground is None else ground + self.space = [] if space is None else space + self.events = [] if events is None else events self.distance = distance - self.conjunction_types = conjunction_types - self.epoch_search_precision = epoch_search_precision + self.conjunction_types = [CONJUNCTION_TYPE_NBTRACE] if conjunction_types is None else conjunction_types + self.epoch_search_precision = 60 if epoch_search_precision is None else epoch_search_precision self.response_format = response_format # initialize additional variables - self.request: AuroraXResponse = None - self.request_id: str = "" - self.request_url: str = "" - self.executed: bool = False - self.completed: bool = False - self.data_url: str = "" - self.query: Dict = {} - self.status: Dict = {} - self.data: List[Union[Conjunction, Dict]] = [] - self.logs: List[Dict] = [] + self.request = None + self.request_id = "" + self.request_url = "" + self.executed = False + self.completed = False + self.data_url = "" + self.query = {} + self.status = {} + self.data = [] + self.logs = [] def __str__(self): - """ - String method - - Returns: - string format of Conjunction Search object - """ return self.__repr__() def __repr__(self): - """ - Object representation + return f"ConjunctionSearch(executed={self.executed}, completed={self.completed}, request_id='{self.request_id}')" - Returns: - object representation of Conjunction Search object - """ - return f"ConjunctionSearch(executed={self.executed}, " \ - f"completed={self.completed}, request_id='{self.request_id}')" + def __fill_in_missing_distances(self, curr_distances: Dict) -> Dict: + # get all distances possible + all_distances = self.get_advanced_distances_combos() + + # go through current distances and fill in the values + for curr_key, curr_value in curr_distances.items(): + curr_key_split = curr_key.split('-') + curr_key1 = curr_key_split[0].strip() + curr_key2 = curr_key_split[1].strip() + for all_key in all_distances.keys(): + if (curr_key1 in all_key and curr_key2 in all_key): + # found the matching key, replace the value + all_distances[all_key] = curr_value + + # return + return all_distances def check_criteria_block_count_validity(self) -> None: """ @@ -190,13 +209,21 @@

        Module pyaurorax.conjunctions.classes.search

        it was determined to have too many. Raises: - pyaurorax.exceptions.AuroraXBadParametersException: too many criteria blocks are found + pyaurorax.exceptions.AuroraXError: too many criteria blocks are found """ - if ((len(self.ground) + len(self.space) + len(self.events)) > 10): - raise AuroraXBadParametersException("Number of criteria blocks exceeds 10, " - "please reduce the count") - - def get_advanced_distances_combos(self, default_distance: Union[int, float] = None) -> Dict: + count_ground = 0 + count_space = 0 + count_events = 0 + if (self.ground is not None): + count_ground = len(self.ground) + if (self.space is not None): + count_space = len(self.space) + if (self.events is not None): + count_events = len(self.events) + if ((count_ground + count_space + count_events) > 10): + raise AuroraXError("Number of criteria blocks exceeds 10, please reduce the count") + + def get_advanced_distances_combos(self, default_distance: Optional[Union[int, float]] = None) -> Dict: """ Get the advanced distances combinations for this search @@ -208,12 +235,15 @@

        Module pyaurorax.conjunctions.classes.search

        """ # set input arrays options = [] - for i in range(0, len(self.ground)): - options.append("ground%d" % (i + 1)) - for i in range(0, len(self.space)): - options.append("space%d" % (i + 1)) - for i in range(0, len(self.events)): - options.append("events%d" % (i + 1)) + if (self.ground is not None): + for i in range(0, len(self.ground)): + options.append("ground%d" % (i + 1)) + if (self.space is not None): + for i in range(0, len(self.space)): + options.append("space%d" % (i + 1)) + if (self.events is not None): + for i in range(0, len(self.events)): + options.append("events%d" % (i + 1)) # derive all combinations of options of size 2 combinations = {} @@ -223,23 +253,6 @@

        Module pyaurorax.conjunctions.classes.search

        # return return combinations - def __fill_in_missing_distances(self, curr_distances: Dict) -> Dict: - # get all distances possible - all_distances = self.get_advanced_distances_combos() - - # go through current distances and fill in the values - for curr_key, curr_value in curr_distances.items(): - curr_key_split = curr_key.split('-') - curr_key1 = curr_key_split[0].strip() - curr_key2 = curr_key_split[1].strip() - for all_key in all_distances.keys(): - if (curr_key1 in all_key and curr_key2 in all_key): - # found the matching key, replace the value - all_distances[all_key] = curr_value - - # return - return all_distances - @property def distance(self) -> Union[int, float, Dict[str, Union[int, float]]]: """ @@ -248,16 +261,16 @@

        Module pyaurorax.conjunctions.classes.search

        Returns: the distance dictionary with all combinations """ - return self._distance + return self.__distance @distance.setter def distance(self, distance: Union[int, float, Dict[str, Union[int, float]]]) -> None: # set distances to a dict if it's an int or float - if (type(distance) is int or type(distance) is float): - self._distance = self.get_advanced_distances_combos(default_distance=distance) # type: ignore + if (isinstance(distance, int) or isinstance(distance, float)): + self.__distance = self.get_advanced_distances_combos(default_distance=distance) # type: ignore else: # is a dict, fill in any gaps - self._distance = self.__fill_in_missing_distances(distance) # type: ignore + self.__distance = self.__fill_in_missing_distances(distance) # type: ignore @property def query(self) -> Dict: @@ -288,17 +301,14 @@

        Module pyaurorax.conjunctions.classes.search

        Initiate a conjunction search request Raises: - pyaurorax.exceptions.AuroraXBadParametersException: too many criteria blocks + pyaurorax.exceptions.AuroraXAPIError: An API error was encountered """ # check number of criteria blocks self.check_criteria_block_count_validity() # do request - url = urls.conjunction_search_url - req = AuroraXRequest(method="post", - url=url, - body=self.query, - null_response=True) + url = "%s/%s" % (self.aurorax_obj.api_base_url, self.aurorax_obj.search.api.URL_SUFFIX_CONJUNCTION_SEARCH) + req = AuroraXAPIRequest(self.aurorax_obj, method="post", url=url, body=self.query, null_response=True) res = req.execute() # set request ID, request_url, executed @@ -319,15 +329,22 @@

        Module pyaurorax.conjunctions.classes.search

        Args: status: the previously-retrieved status of this request (include to avoid requesting it from the API again), defaults to None + + Raises: + pyaurorax.exceptions.AuroraXAPIError: An API error was encountered """ # get the status if it isn't passed in if (status is None): - status = requests_get_status(self.request_url) + status = requests_get_status(self.aurorax_obj, self.request_url) + + # check response + if (status is None): + raise AuroraXAPIError("Could not retrieve status for this request") # update request status by checking if data URI is set if (status["search_result"]["data_uri"] is not None): self.completed = True - self.data_url = f'{urls.base_url}{status["search_result"]["data_uri"]}' + self.data_url = "%s/%s" % (self.aurorax_obj.api_base_url, status["search_result"]["data_uri"]) # set class variable "status" and "logs" self.status = status @@ -340,6 +357,9 @@

        Module pyaurorax.conjunctions.classes.search

        Returns: True if data is available, else False + + Raises: + pyaurorax.exceptions.AuroraXAPIError: An API error was encountered """ self.update_status() return self.completed @@ -347,14 +367,17 @@

        Module pyaurorax.conjunctions.classes.search

        def get_data(self) -> None: """ Retrieve the data available for this conjunction search request + + Raises: + pyaurorax.exceptions.AuroraXAPIError: An API error was encountered """ - # check if request is completed + # check if completed yet if (self.completed is False): print("No data available, update status or check for data first") return # get data - raw_data = requests_get_data(self.data_url, response_format=self.response_format) + raw_data = requests_get_data(self.aurorax_obj, self.data_url, self.response_format, False) # set data variable if (self.response_format is not None): @@ -369,9 +392,7 @@

        Module pyaurorax.conjunctions.classes.search

        # cast conjunctions self.data = [Conjunction(**c) for c in raw_data] - def wait(self, - poll_interval: Optional[float] = STANDARD_POLLING_SLEEP_TIME, - verbose: Optional[bool] = False) -> None: + def wait(self, poll_interval: float = __STANDARD_POLLING_SLEEP_TIME, verbose: bool = False) -> None: """ Block and wait until the request is complete and data is available for retrieval @@ -380,16 +401,14 @@

        Module pyaurorax.conjunctions.classes.search

        poll_interval: time in seconds to wait between polling attempts, defaults to pyaurorax.requests.STANDARD_POLLING_SLEEP_TIME verbose: output poll times and other progress messages, defaults to False + + Raises: + pyaurorax.exceptions.AuroraXAPIError: An API error was encountered """ - url = urls.conjunction_request_url.format(self.request_id) - self.update_status(requests_wait_for_data(url, - poll_interval=poll_interval, - verbose=verbose)) - - def cancel(self, - wait: Optional[bool] = False, - poll_interval: Optional[float] = STANDARD_POLLING_SLEEP_TIME, - verbose: Optional[bool] = False) -> int: + url = "%s/%s" % (self.aurorax_obj.api_base_url, self.aurorax_obj.search.api.URL_SUFFIX_CONJUNCTION_REQUEST.format(self.request_id)) + self.update_status(requests_wait_for_data(self.aurorax_obj, url, poll_interval, verbose)) + + def cancel(self, wait: bool = False, poll_interval: float = __STANDARD_POLLING_SLEEP_TIME, verbose: bool = False) -> int: """ Cancel the conjunction search request @@ -410,11 +429,10 @@

        Module pyaurorax.conjunctions.classes.search

        1 on success Raises: - pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected error - pyaurorax.exceptions.AuroraXUnauthorizedException: invalid API key for this operation + pyaurorax.exceptions.AuroraXAPIError: An API error was encountered """ - url = urls.conjunction_request_url.format(self.request_id) - return requests_cancel(url, wait=wait, poll_interval=poll_interval, verbose=verbose)
        + url = "%s/%s" % (self.aurorax_obj.api_base_url, self.aurorax_obj.search.api.URL_SUFFIX_CONJUNCTION_REQUEST.format(self.request_id)) + return requests_cancel(self.aurorax_obj, url, wait, poll_interval, verbose)
        @@ -426,9 +444,9 @@

        Module pyaurorax.conjunctions.classes.search

        Classes

        -
        -class Search -(start: datetime.datetime, end: datetime.datetime, distance: Union[int, float, Dict[str, Union[int, float]]], ground: Optional[List[Dict[str, str]]] = [], space: Optional[List[Dict[str, str]]] = [], events: Optional[List[Dict[str, str]]] = [], conjunction_types: Optional[List[str]] = ['nbtrace'], epoch_search_precision: Optional[int] = 60, response_format: Optional[Dict[str, bool]] = None) +
        +class ConjunctionSearch +(aurorax_obj: PyAuroraX, start: datetime.datetime, end: datetime.datetime, distance: Union[int, float, Dict], ground: Optional[List[Dict]] = None, space: Optional[List[Dict]] = None, events: Optional[List[Dict]] = None, conjunction_types: Optional[List[str]] = None, epoch_search_precision: Optional[int] = None, response_format: Optional[Dict] = None)

        Class representing a conjunction search

        @@ -526,15 +544,13 @@

        Attributes

        data
        the conjunctions found
        logs
        -
        all log messages outputed by the AuroraX API for this request
        -
        -

        Returns: -a pyaurorax.conjunctions.Search object

        +
        all log messages outputted by the AuroraX API for this request
        +
        Expand source code -
        class Search():
        +
        class ConjunctionSearch:
             """
             Class representing a conjunction search
         
        @@ -609,63 +625,69 @@ 

        Attributes

        query: the query for this request as JSON status: the status of the query data: the conjunctions found - logs: all log messages outputed by the AuroraX API for this request - - Returns: - a pyaurorax.conjunctions.Search object + logs: all log messages outputted by the AuroraX API for this request """ - def __init__(self, start: datetime.datetime, + __STANDARD_POLLING_SLEEP_TIME: float = 1.0 + + def __init__(self, + aurorax_obj: PyAuroraX, + start: datetime.datetime, end: datetime.datetime, - distance: Union[int, float, Dict[str, Union[int, float]]], - ground: Optional[List[Dict[str, str]]] = [], - space: Optional[List[Dict[str, str]]] = [], - events: Optional[List[Dict[str, str]]] = [], - conjunction_types: Optional[List[str]] = [CONJUNCTION_TYPE_NBTRACE], - epoch_search_precision: Optional[int] = 60, - response_format: Optional[Dict[str, bool]] = None): + distance: Union[int, float, Dict], + ground: Optional[List[Dict]] = None, + space: Optional[List[Dict]] = None, + events: Optional[List[Dict]] = None, + conjunction_types: Optional[List[str]] = None, + epoch_search_precision: Optional[int] = None, + response_format: Optional[Dict] = None): # set variables using passed in args + self.aurorax_obj = aurorax_obj self.start = start self.end = end - self.ground = ground - self.space = space - self.events = events + self.ground = [] if ground is None else ground + self.space = [] if space is None else space + self.events = [] if events is None else events self.distance = distance - self.conjunction_types = conjunction_types - self.epoch_search_precision = epoch_search_precision + self.conjunction_types = [CONJUNCTION_TYPE_NBTRACE] if conjunction_types is None else conjunction_types + self.epoch_search_precision = 60 if epoch_search_precision is None else epoch_search_precision self.response_format = response_format # initialize additional variables - self.request: AuroraXResponse = None - self.request_id: str = "" - self.request_url: str = "" - self.executed: bool = False - self.completed: bool = False - self.data_url: str = "" - self.query: Dict = {} - self.status: Dict = {} - self.data: List[Union[Conjunction, Dict]] = [] - self.logs: List[Dict] = [] + self.request = None + self.request_id = "" + self.request_url = "" + self.executed = False + self.completed = False + self.data_url = "" + self.query = {} + self.status = {} + self.data = [] + self.logs = [] def __str__(self): - """ - String method - - Returns: - string format of Conjunction Search object - """ return self.__repr__() def __repr__(self): - """ - Object representation + return f"ConjunctionSearch(executed={self.executed}, completed={self.completed}, request_id='{self.request_id}')" - Returns: - object representation of Conjunction Search object - """ - return f"ConjunctionSearch(executed={self.executed}, " \ - f"completed={self.completed}, request_id='{self.request_id}')" + def __fill_in_missing_distances(self, curr_distances: Dict) -> Dict: + # get all distances possible + all_distances = self.get_advanced_distances_combos() + + # go through current distances and fill in the values + for curr_key, curr_value in curr_distances.items(): + curr_key_split = curr_key.split('-') + curr_key1 = curr_key_split[0].strip() + curr_key2 = curr_key_split[1].strip() + for all_key in all_distances.keys(): + if (curr_key1 in all_key and curr_key2 in all_key): + # found the matching key, replace the value + all_distances[all_key] = curr_value + + # return + return all_distances def check_criteria_block_count_validity(self) -> None: """ @@ -675,13 +697,21 @@

        Attributes

        it was determined to have too many. Raises: - pyaurorax.exceptions.AuroraXBadParametersException: too many criteria blocks are found + pyaurorax.exceptions.AuroraXError: too many criteria blocks are found """ - if ((len(self.ground) + len(self.space) + len(self.events)) > 10): - raise AuroraXBadParametersException("Number of criteria blocks exceeds 10, " - "please reduce the count") - - def get_advanced_distances_combos(self, default_distance: Union[int, float] = None) -> Dict: + count_ground = 0 + count_space = 0 + count_events = 0 + if (self.ground is not None): + count_ground = len(self.ground) + if (self.space is not None): + count_space = len(self.space) + if (self.events is not None): + count_events = len(self.events) + if ((count_ground + count_space + count_events) > 10): + raise AuroraXError("Number of criteria blocks exceeds 10, please reduce the count") + + def get_advanced_distances_combos(self, default_distance: Optional[Union[int, float]] = None) -> Dict: """ Get the advanced distances combinations for this search @@ -693,12 +723,15 @@

        Attributes

        """ # set input arrays options = [] - for i in range(0, len(self.ground)): - options.append("ground%d" % (i + 1)) - for i in range(0, len(self.space)): - options.append("space%d" % (i + 1)) - for i in range(0, len(self.events)): - options.append("events%d" % (i + 1)) + if (self.ground is not None): + for i in range(0, len(self.ground)): + options.append("ground%d" % (i + 1)) + if (self.space is not None): + for i in range(0, len(self.space)): + options.append("space%d" % (i + 1)) + if (self.events is not None): + for i in range(0, len(self.events)): + options.append("events%d" % (i + 1)) # derive all combinations of options of size 2 combinations = {} @@ -708,23 +741,6 @@

        Attributes

        # return return combinations - def __fill_in_missing_distances(self, curr_distances: Dict) -> Dict: - # get all distances possible - all_distances = self.get_advanced_distances_combos() - - # go through current distances and fill in the values - for curr_key, curr_value in curr_distances.items(): - curr_key_split = curr_key.split('-') - curr_key1 = curr_key_split[0].strip() - curr_key2 = curr_key_split[1].strip() - for all_key in all_distances.keys(): - if (curr_key1 in all_key and curr_key2 in all_key): - # found the matching key, replace the value - all_distances[all_key] = curr_value - - # return - return all_distances - @property def distance(self) -> Union[int, float, Dict[str, Union[int, float]]]: """ @@ -733,16 +749,16 @@

        Attributes

        Returns: the distance dictionary with all combinations """ - return self._distance + return self.__distance @distance.setter def distance(self, distance: Union[int, float, Dict[str, Union[int, float]]]) -> None: # set distances to a dict if it's an int or float - if (type(distance) is int or type(distance) is float): - self._distance = self.get_advanced_distances_combos(default_distance=distance) # type: ignore + if (isinstance(distance, int) or isinstance(distance, float)): + self.__distance = self.get_advanced_distances_combos(default_distance=distance) # type: ignore else: # is a dict, fill in any gaps - self._distance = self.__fill_in_missing_distances(distance) # type: ignore + self.__distance = self.__fill_in_missing_distances(distance) # type: ignore @property def query(self) -> Dict: @@ -773,17 +789,14 @@

        Attributes

        Initiate a conjunction search request Raises: - pyaurorax.exceptions.AuroraXBadParametersException: too many criteria blocks + pyaurorax.exceptions.AuroraXAPIError: An API error was encountered """ # check number of criteria blocks self.check_criteria_block_count_validity() # do request - url = urls.conjunction_search_url - req = AuroraXRequest(method="post", - url=url, - body=self.query, - null_response=True) + url = "%s/%s" % (self.aurorax_obj.api_base_url, self.aurorax_obj.search.api.URL_SUFFIX_CONJUNCTION_SEARCH) + req = AuroraXAPIRequest(self.aurorax_obj, method="post", url=url, body=self.query, null_response=True) res = req.execute() # set request ID, request_url, executed @@ -804,15 +817,22 @@

        Attributes

        Args: status: the previously-retrieved status of this request (include to avoid requesting it from the API again), defaults to None + + Raises: + pyaurorax.exceptions.AuroraXAPIError: An API error was encountered """ # get the status if it isn't passed in if (status is None): - status = requests_get_status(self.request_url) + status = requests_get_status(self.aurorax_obj, self.request_url) + + # check response + if (status is None): + raise AuroraXAPIError("Could not retrieve status for this request") # update request status by checking if data URI is set if (status["search_result"]["data_uri"] is not None): self.completed = True - self.data_url = f'{urls.base_url}{status["search_result"]["data_uri"]}' + self.data_url = "%s/%s" % (self.aurorax_obj.api_base_url, status["search_result"]["data_uri"]) # set class variable "status" and "logs" self.status = status @@ -825,6 +845,9 @@

        Attributes

        Returns: True if data is available, else False + + Raises: + pyaurorax.exceptions.AuroraXAPIError: An API error was encountered """ self.update_status() return self.completed @@ -832,14 +855,17 @@

        Attributes

        def get_data(self) -> None: """ Retrieve the data available for this conjunction search request + + Raises: + pyaurorax.exceptions.AuroraXAPIError: An API error was encountered """ - # check if request is completed + # check if completed yet if (self.completed is False): print("No data available, update status or check for data first") return # get data - raw_data = requests_get_data(self.data_url, response_format=self.response_format) + raw_data = requests_get_data(self.aurorax_obj, self.data_url, self.response_format, False) # set data variable if (self.response_format is not None): @@ -854,9 +880,7 @@

        Attributes

        # cast conjunctions self.data = [Conjunction(**c) for c in raw_data] - def wait(self, - poll_interval: Optional[float] = STANDARD_POLLING_SLEEP_TIME, - verbose: Optional[bool] = False) -> None: + def wait(self, poll_interval: float = __STANDARD_POLLING_SLEEP_TIME, verbose: bool = False) -> None: """ Block and wait until the request is complete and data is available for retrieval @@ -865,16 +889,14 @@

        Attributes

        poll_interval: time in seconds to wait between polling attempts, defaults to pyaurorax.requests.STANDARD_POLLING_SLEEP_TIME verbose: output poll times and other progress messages, defaults to False + + Raises: + pyaurorax.exceptions.AuroraXAPIError: An API error was encountered """ - url = urls.conjunction_request_url.format(self.request_id) - self.update_status(requests_wait_for_data(url, - poll_interval=poll_interval, - verbose=verbose)) - - def cancel(self, - wait: Optional[bool] = False, - poll_interval: Optional[float] = STANDARD_POLLING_SLEEP_TIME, - verbose: Optional[bool] = False) -> int: + url = "%s/%s" % (self.aurorax_obj.api_base_url, self.aurorax_obj.search.api.URL_SUFFIX_CONJUNCTION_REQUEST.format(self.request_id)) + self.update_status(requests_wait_for_data(self.aurorax_obj, url, poll_interval, verbose)) + + def cancel(self, wait: bool = False, poll_interval: float = __STANDARD_POLLING_SLEEP_TIME, verbose: bool = False) -> int: """ Cancel the conjunction search request @@ -895,15 +917,14 @@

        Attributes

        1 on success Raises: - pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected error - pyaurorax.exceptions.AuroraXUnauthorizedException: invalid API key for this operation + pyaurorax.exceptions.AuroraXAPIError: An API error was encountered """ - url = urls.conjunction_request_url.format(self.request_id) - return requests_cancel(url, wait=wait, poll_interval=poll_interval, verbose=verbose)
        + url = "%s/%s" % (self.aurorax_obj.api_base_url, self.aurorax_obj.search.api.URL_SUFFIX_CONJUNCTION_REQUEST.format(self.request_id)) + return requests_cancel(self.aurorax_obj, url, wait, poll_interval, verbose)

        Instance variables

        -
        var distance : Union[int, float, Dict[str, Union[int, float]]]
        +
        var distance : Union[int, float, Dict[str, Union[int, float]]]

        Property for the distance parameter

        Returns

        @@ -920,10 +941,10 @@

        Returns

        Returns: the distance dictionary with all combinations """ - return self._distance
        + return self.__distance
        -
        var query : Dict
        +
        var query : Dict

        Property for the query value

        Returns

        @@ -956,8 +977,8 @@

        Returns

        Methods

        -
        -def cancel(self, wait: Optional[bool] = False, poll_interval: Optional[float] = 1.0, verbose: Optional[bool] = False) ‑> int +
        +def cancel(self, wait: bool = False, poll_interval: float = 1.0, verbose: bool = False) ‑> int

        Cancel the conjunction search request

        @@ -981,19 +1002,14 @@

        Returns

        1 on success

        Raises

        -
        AuroraXUnexpectedContentTypeException
        -
        unexpected error
        -
        AuroraXUnauthorizedException
        -
        invalid API key for this operation
        +
        AuroraXAPIError
        +
        An API error was encountered
        Expand source code -
        def cancel(self,
        -           wait: Optional[bool] = False,
        -           poll_interval: Optional[float] = STANDARD_POLLING_SLEEP_TIME,
        -           verbose: Optional[bool] = False) -> int:
        +
        def cancel(self, wait: bool = False, poll_interval: float = __STANDARD_POLLING_SLEEP_TIME, verbose: bool = False) -> int:
             """
             Cancel the conjunction search request
         
        @@ -1014,14 +1030,13 @@ 

        Raises

        1 on success Raises: - pyaurorax.exceptions.AuroraXUnexpectedContentTypeException: unexpected error - pyaurorax.exceptions.AuroraXUnauthorizedException: invalid API key for this operation + pyaurorax.exceptions.AuroraXAPIError: An API error was encountered """ - url = urls.conjunction_request_url.format(self.request_id) - return requests_cancel(url, wait=wait, poll_interval=poll_interval, verbose=verbose)
        + url = "%s/%s" % (self.aurorax_obj.api_base_url, self.aurorax_obj.search.api.URL_SUFFIX_CONJUNCTION_REQUEST.format(self.request_id)) + return requests_cancel(self.aurorax_obj, url, wait, poll_interval, verbose)
        -
        +
        def check_criteria_block_count_validity(self) ‑> None
        @@ -1031,7 +1046,7 @@

        Raises

        it was determined to have too many.

        Raises

        -
        AuroraXBadParametersException
        +
        AuroraXError
        too many criteria blocks are found
        @@ -1046,21 +1061,34 @@

        Raises

        it was determined to have too many. Raises: - pyaurorax.exceptions.AuroraXBadParametersException: too many criteria blocks are found + pyaurorax.exceptions.AuroraXError: too many criteria blocks are found """ - if ((len(self.ground) + len(self.space) + len(self.events)) > 10): - raise AuroraXBadParametersException("Number of criteria blocks exceeds 10, " - "please reduce the count")
        + count_ground = 0 + count_space = 0 + count_events = 0 + if (self.ground is not None): + count_ground = len(self.ground) + if (self.space is not None): + count_space = len(self.space) + if (self.events is not None): + count_events = len(self.events) + if ((count_ground + count_space + count_events) > 10): + raise AuroraXError("Number of criteria blocks exceeds 10, please reduce the count")
        -
        +
        def check_for_data(self) ‑> bool

        Check to see if data is available for this conjunction search request

        Returns

        -

        True if data is available, else False

        +

        True if data is available, else False

        +

        Raises

        +
        +
        AuroraXAPIError
        +
        An API error was encountered
        +
        Expand source code @@ -1072,20 +1100,23 @@

        Returns

        Returns: True if data is available, else False + + Raises: + pyaurorax.exceptions.AuroraXAPIError: An API error was encountered """ self.update_status() return self.completed
        -
        +
        def execute(self) ‑> None

        Initiate a conjunction search request

        Raises

        -
        AuroraXBadParametersException
        -
        too many criteria blocks
        +
        AuroraXAPIError
        +
        An API error was encountered
        @@ -1096,17 +1127,14 @@

        Raises

        Initiate a conjunction search request Raises: - pyaurorax.exceptions.AuroraXBadParametersException: too many criteria blocks + pyaurorax.exceptions.AuroraXAPIError: An API error was encountered """ # check number of criteria blocks self.check_criteria_block_count_validity() # do request - url = urls.conjunction_search_url - req = AuroraXRequest(method="post", - url=url, - body=self.query, - null_response=True) + url = "%s/%s" % (self.aurorax_obj.api_base_url, self.aurorax_obj.search.api.URL_SUFFIX_CONJUNCTION_SEARCH) + req = AuroraXAPIRequest(self.aurorax_obj, method="post", url=url, body=self.query, null_response=True) res = req.execute() # set request ID, request_url, executed @@ -1121,8 +1149,8 @@

        Raises

        self.request = res
        -
        -def get_advanced_distances_combos(self, default_distance: Union[int, float] = None) ‑> Dict +
        +def get_advanced_distances_combos(self, default_distance: Optional[Union[int, float]] = None) ‑> Dict

        Get the advanced distances combinations for this search

        @@ -1137,7 +1165,7 @@

        Returns

        Expand source code -
        def get_advanced_distances_combos(self, default_distance: Union[int, float] = None) -> Dict:
        +
        def get_advanced_distances_combos(self, default_distance: Optional[Union[int, float]] = None) -> Dict:
             """
             Get the advanced distances combinations for this search
         
        @@ -1149,12 +1177,15 @@ 

        Returns

        """ # set input arrays options = [] - for i in range(0, len(self.ground)): - options.append("ground%d" % (i + 1)) - for i in range(0, len(self.space)): - options.append("space%d" % (i + 1)) - for i in range(0, len(self.events)): - options.append("events%d" % (i + 1)) + if (self.ground is not None): + for i in range(0, len(self.ground)): + options.append("ground%d" % (i + 1)) + if (self.space is not None): + for i in range(0, len(self.space)): + options.append("space%d" % (i + 1)) + if (self.events is not None): + for i in range(0, len(self.events)): + options.append("events%d" % (i + 1)) # derive all combinations of options of size 2 combinations = {} @@ -1165,11 +1196,16 @@

        Returns

        return combinations
        -
        +
        def get_data(self) ‑> None
        -

        Retrieve the data available for this conjunction search request

        +

        Retrieve the data available for this conjunction search request

        +

        Raises

        +
        +
        AuroraXAPIError
        +
        An API error was encountered
        +
        Expand source code @@ -1177,14 +1213,17 @@

        Returns

        def get_data(self) -> None:
             """
             Retrieve the data available for this conjunction search request
        +
        +    Raises:
        +        pyaurorax.exceptions.AuroraXAPIError: An API error was encountered
             """
        -    # check if request is completed
        +    # check if completed yet
             if (self.completed is False):
                 print("No data available, update status or check for data first")
                 return
         
             # get data
        -    raw_data = requests_get_data(self.data_url, response_format=self.response_format)
        +    raw_data = requests_get_data(self.aurorax_obj, self.data_url, self.response_format, False)
         
             # set data variable
             if (self.response_format is not None):
        @@ -1200,7 +1239,7 @@ 

        Returns

        self.data = [Conjunction(**c) for c in raw_data]
        -
        +
        def update_status(self, status: Optional[Dict] = None) ‑> None
        @@ -1210,6 +1249,11 @@

        Args

        status
        the previously-retrieved status of this request (include to avoid requesting it from the API again), defaults to None
        +
        +

        Raises

        +
        +
        AuroraXAPIError
        +
        An API error was encountered
        @@ -1222,23 +1266,30 @@

        Args

        Args: status: the previously-retrieved status of this request (include to avoid requesting it from the API again), defaults to None + + Raises: + pyaurorax.exceptions.AuroraXAPIError: An API error was encountered """ # get the status if it isn't passed in if (status is None): - status = requests_get_status(self.request_url) + status = requests_get_status(self.aurorax_obj, self.request_url) + + # check response + if (status is None): + raise AuroraXAPIError("Could not retrieve status for this request") # update request status by checking if data URI is set if (status["search_result"]["data_uri"] is not None): self.completed = True - self.data_url = f'{urls.base_url}{status["search_result"]["data_uri"]}' + self.data_url = "%s/%s" % (self.aurorax_obj.api_base_url, status["search_result"]["data_uri"]) # set class variable "status" and "logs" self.status = status self.logs = status["logs"]
        -
        -def wait(self, poll_interval: Optional[float] = 1.0, verbose: Optional[bool] = False) ‑> None +
        +def wait(self, poll_interval: float = 1.0, verbose: bool = False) ‑> None

        Block and wait until the request is complete and data is @@ -1250,14 +1301,17 @@

        Args

        to pyaurorax.requests.STANDARD_POLLING_SLEEP_TIME
        verbose
        output poll times and other progress messages, defaults to False
        + +

        Raises

        +
        +
        AuroraXAPIError
        +
        An API error was encountered
        Expand source code -
        def wait(self,
        -         poll_interval: Optional[float] = STANDARD_POLLING_SLEEP_TIME,
        -         verbose: Optional[bool] = False) -> None:
        +
        def wait(self, poll_interval: float = __STANDARD_POLLING_SLEEP_TIME, verbose: bool = False) -> None:
             """
             Block and wait until the request is complete and data is
             available for retrieval
        @@ -1266,11 +1320,12 @@ 

        Args

        poll_interval: time in seconds to wait between polling attempts, defaults to pyaurorax.requests.STANDARD_POLLING_SLEEP_TIME verbose: output poll times and other progress messages, defaults to False + + Raises: + pyaurorax.exceptions.AuroraXAPIError: An API error was encountered """ - url = urls.conjunction_request_url.format(self.request_id) - self.update_status(requests_wait_for_data(url, - poll_interval=poll_interval, - verbose=verbose))
        + url = "%s/%s" % (self.aurorax_obj.api_base_url, self.aurorax_obj.search.api.URL_SUFFIX_CONJUNCTION_REQUEST.format(self.request_id)) + self.update_status(requests_wait_for_data(self.aurorax_obj, url, poll_interval, verbose))
        @@ -1313,7 +1368,7 @@

        Args

        if (query) search(query); function search(query) { -const url = '../../../doc-search.html#' + encodeURIComponent(query); +const url = '../../../../doc-search.html#' + encodeURIComponent(query); new tingle.modal({ cssClass: ['modal-dialog'], onClose: () => { @@ -1332,24 +1387,24 @@

        Index

        • Super-module

        • Classes

          diff --git a/docs/code/pyaurorax_api_reference/pyaurorax/search/conjunctions/index.html b/docs/code/pyaurorax_api_reference/pyaurorax/search/conjunctions/index.html new file mode 100644 index 0000000..3e5060c --- /dev/null +++ b/docs/code/pyaurorax_api_reference/pyaurorax/search/conjunctions/index.html @@ -0,0 +1,809 @@ + + + + + + +pyaurorax.search.conjunctions API documentation + + + + + + + + + + + +
          +
          +
          +

          Module pyaurorax.search.conjunctions

          +
          +
          +

          Use the AuroraX search engine to find conjunctions between groupings +of data sources.

          +

          Note that all functions and classes from submodules are all imported +at this level of the conjunctions module. They can be referenced from +here instead of digging in deeper to the submodules.

          +
          + +Expand source code + +
          # Copyright 2024 University of Calgary
          +#
          +# Licensed under the Apache License, Version 2.0 (the "License");
          +# you may not use this file except in compliance with the License.
          +# You may obtain a copy of the License at
          +#
          +#     http://www.apache.org/licenses/LICENSE-2.0
          +#
          +# Unless required by applicable law or agreed to in writing, software
          +# distributed under the License is distributed on an "AS IS" BASIS,
          +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
          +# See the License for the specific language governing permissions and
          +# limitations under the License.
          +"""
          +Use the AuroraX search engine to find conjunctions between groupings 
          +of data sources.
          +
          +Note that all functions and classes from submodules are all imported
          +at this level of the conjunctions module. They can be referenced from
          +here instead of digging in deeper to the submodules.
          +"""
          +
          +import datetime
          +from typing import Dict, List, Optional, Union
          +from .swarmaurora import SwarmAuroraManager
          +from .classes.search import ConjunctionSearch
          +from ._conjunctions import search as func_search
          +from ._conjunctions import describe as func_describe
          +from ._conjunctions import get_request_url as func_get_request_url
          +
          +__all__ = ["ConjunctionsManager"]
          +
          +
          +class ConjunctionsManager:
          +    """
          +    The ConjunctionsManager object is initialized within every PyAuroraX object. It acts as a way to access 
          +    the submodules and carry over configuration information in the super class.
          +    """
          +
          +    __STANDARD_POLLING_SLEEP_TIME: float = 1.0  # Polling sleep time when waiting for data (after the initial sleep time), in seconds
          +
          +    def __init__(self, aurorax_obj):
          +        self.__aurorax_obj = aurorax_obj
          +
          +        # initialize sub-modules
          +        self.__swarmaurora = SwarmAuroraManager(self.__aurorax_obj)
          +
          +    @property
          +    def swarmaurora(self):
          +        """
          +        Access to the `swarmaurora` submodule from within a PyAuroraX object.
          +        """
          +        return self.__swarmaurora
          +
          +    def search(self,
          +               start: datetime.datetime,
          +               end: datetime.datetime,
          +               distance: Union[int, float, Dict],
          +               ground: Optional[List[Dict]] = [],
          +               space: Optional[List[Dict]] = [],
          +               events: Optional[List[Dict]] = [],
          +               conjunction_types: Optional[List[str]] = [],
          +               epoch_search_precision: Optional[int] = 60,
          +               response_format: Optional[Dict] = None,
          +               poll_interval: Optional[float] = __STANDARD_POLLING_SLEEP_TIME,
          +               return_immediately: Optional[bool] = False,
          +               verbose: Optional[bool] = False) -> ConjunctionSearch:
          +        """
          +        Search for conjunctions between data sources
          +
          +        By default, this function will block and wait until the request completes and
          +        all data is downloaded. If you don't want to wait, set the 'return_immediately`
          +        value to True. The Search object will be returned right after the search has been
          +        started, and you can use the helper functions as part of that object to get the
          +        data when it's done.
          +
          +        Args:
          +            start: start timestamp of the search (inclusive)
          +            end: end timestamp of the search (inclusive)
          +            distance: the maximum distance allowed between data sources when searching for
          +                conjunctions. This can either be a number (int or float), or a dictionary
          +                modified from the output of the "get_advanced_distances_combos()" function.
          +            ground: list of ground instrument search parameters, defaults to []
          +
          +                Example:
          +
          +                    [{
          +                        "programs": ["themis-asi"],
          +                        "platforms": ["gillam", "rabbit lake"],
          +                        "instrument_types": ["RGB"],
          +                        "ephemeris_metadata_filters": {
          +                            "logical_operator": "AND",
          +                            "expressions": [
          +                                {
          +                                    "key": "calgary_apa_ml_v1",
          +                                    "operator": "in",
          +                                    "values": [ "classified as APA" ]
          +                                }
          +                            ]
          +                        }
          +                    }]
          +            space: list of one or more space instrument search parameters, defaults to []
          +
          +                Example:
          +
          +                    [{
          +                        "programs": ["themis-asi", "swarm"],
          +                        "platforms": ["themisa", "swarma"],
          +                        "instrument_types": ["footprint"],
          +                        "ephemeris_metadata_filters": {
          +                            "logical_operator": "AND",
          +                            "expressions": [
          +                                {
          +                                    "key": "nbtrace_region",
          +                                    "operator": "in",
          +                                    "values": [ "north auroral oval" ]
          +                                }
          +                            ]
          +                        },
          +                        "hemisphere": [
          +                            "northern"
          +                        ]
          +                    }]
          +            events: list of one or more events search parameters, defaults to []
          +
          +                Example:
          +
          +                    [{
          +                        "programs": [ "events" ],
          +                        "instrument_types": [ "substorm onsets" ]
          +                    }]
          +            conjunction_types: list of conjunction types, defaults to [] (meaning all conjunction
          +                types). Options are in the pyaurorax.conjunctions module, or at the top level using
          +                the pyaurorax.CONJUNCTION_TYPE_* variables.
          +            epoch_search_precision: the time precision to which conjunctions are calculated. Can be
          +                30 or 60 seconds. Defaults to 60 seconds. Note - this parameter is under active
          +                development and still considered "alpha".
          +            response_format: JSON representation of desired data response format
          +            poll_interval: seconds to wait between polling calls, defaults to
          +                pyaurorax.requests.STANDARD_POLLING_SLEEP_TIME
          +            return_immediately: initiate the search and return without waiting for data to
          +                be received, defaults to False
          +            verbose: show the progress of the request using the request log, defaults
          +
          +        Returns:
          +            a `pyaurorax.search.ConjunctionSearch` object
          +        """
          +        return func_search(
          +            self.__aurorax_obj,
          +            start,
          +            end,
          +            distance,
          +            ground,
          +            space,
          +            events,
          +            conjunction_types,
          +            epoch_search_precision,
          +            response_format,
          +            poll_interval,
          +            return_immediately,
          +            verbose,
          +        )
          +
          +    def describe(self, search_obj: Optional[ConjunctionSearch] = None, query_dict: Optional[Dict] = None) -> str:
          +        """
          +        Describe a conjunction search as an "SQL-like" string. Either a ConjunctionSearch
          +        object can be supplied, or a dictionary of the raw JSON query.
          +
          +        Args:
          +            search_obj: the conjunction search to describe, optional
          +            query_dict: the conjunction search query represented as a raw dictionary, optional
          +
          +        Returns:
          +            the "SQL-like" string describing the conjunction search object
          +        """
          +        return func_describe(self.__aurorax_obj, search_obj, query_dict)
          +
          +    def get_request_url(self, request_id: str) -> str:
          +        """
          +        Get the conjunction search request URL for a given
          +        request ID. This URL can be used for subsequent
          +        pyaurorax.requests function calls. Primarily this method
          +        facilitates delving into details about a set of already-submitted
          +        conjunction searches.
          +
          +        Args:
          +            request_id: the request identifier
          +
          +        Returns:
          +            the request URL
          +        """
          +        return func_get_request_url(self.__aurorax_obj, request_id)
          +
          +
          +
          +

          Sub-modules

          +
          +
          pyaurorax.search.conjunctions.classes
          +
          +
          +
          +
          pyaurorax.search.conjunctions.swarmaurora
          +
          +
          +
          +
          +
          +
          +
          +
          +
          +
          +

          Classes

          +
          +
          +class ConjunctionsManager +(aurorax_obj) +
          +
          +

          The ConjunctionsManager object is initialized within every PyAuroraX object. It acts as a way to access +the submodules and carry over configuration information in the super class.

          +
          + +Expand source code + +
          class ConjunctionsManager:
          +    """
          +    The ConjunctionsManager object is initialized within every PyAuroraX object. It acts as a way to access 
          +    the submodules and carry over configuration information in the super class.
          +    """
          +
          +    __STANDARD_POLLING_SLEEP_TIME: float = 1.0  # Polling sleep time when waiting for data (after the initial sleep time), in seconds
          +
          +    def __init__(self, aurorax_obj):
          +        self.__aurorax_obj = aurorax_obj
          +
          +        # initialize sub-modules
          +        self.__swarmaurora = SwarmAuroraManager(self.__aurorax_obj)
          +
          +    @property
          +    def swarmaurora(self):
          +        """
          +        Access to the `swarmaurora` submodule from within a PyAuroraX object.
          +        """
          +        return self.__swarmaurora
          +
          +    def search(self,
          +               start: datetime.datetime,
          +               end: datetime.datetime,
          +               distance: Union[int, float, Dict],
          +               ground: Optional[List[Dict]] = [],
          +               space: Optional[List[Dict]] = [],
          +               events: Optional[List[Dict]] = [],
          +               conjunction_types: Optional[List[str]] = [],
          +               epoch_search_precision: Optional[int] = 60,
          +               response_format: Optional[Dict] = None,
          +               poll_interval: Optional[float] = __STANDARD_POLLING_SLEEP_TIME,
          +               return_immediately: Optional[bool] = False,
          +               verbose: Optional[bool] = False) -> ConjunctionSearch:
          +        """
          +        Search for conjunctions between data sources
          +
          +        By default, this function will block and wait until the request completes and
          +        all data is downloaded. If you don't want to wait, set the 'return_immediately`
          +        value to True. The Search object will be returned right after the search has been
          +        started, and you can use the helper functions as part of that object to get the
          +        data when it's done.
          +
          +        Args:
          +            start: start timestamp of the search (inclusive)
          +            end: end timestamp of the search (inclusive)
          +            distance: the maximum distance allowed between data sources when searching for
          +                conjunctions. This can either be a number (int or float), or a dictionary
          +                modified from the output of the "get_advanced_distances_combos()" function.
          +            ground: list of ground instrument search parameters, defaults to []
          +
          +                Example:
          +
          +                    [{
          +                        "programs": ["themis-asi"],
          +                        "platforms": ["gillam", "rabbit lake"],
          +                        "instrument_types": ["RGB"],
          +                        "ephemeris_metadata_filters": {
          +                            "logical_operator": "AND",
          +                            "expressions": [
          +                                {
          +                                    "key": "calgary_apa_ml_v1",
          +                                    "operator": "in",
          +                                    "values": [ "classified as APA" ]
          +                                }
          +                            ]
          +                        }
          +                    }]
          +            space: list of one or more space instrument search parameters, defaults to []
          +
          +                Example:
          +
          +                    [{
          +                        "programs": ["themis-asi", "swarm"],
          +                        "platforms": ["themisa", "swarma"],
          +                        "instrument_types": ["footprint"],
          +                        "ephemeris_metadata_filters": {
          +                            "logical_operator": "AND",
          +                            "expressions": [
          +                                {
          +                                    "key": "nbtrace_region",
          +                                    "operator": "in",
          +                                    "values": [ "north auroral oval" ]
          +                                }
          +                            ]
          +                        },
          +                        "hemisphere": [
          +                            "northern"
          +                        ]
          +                    }]
          +            events: list of one or more events search parameters, defaults to []
          +
          +                Example:
          +
          +                    [{
          +                        "programs": [ "events" ],
          +                        "instrument_types": [ "substorm onsets" ]
          +                    }]
          +            conjunction_types: list of conjunction types, defaults to [] (meaning all conjunction
          +                types). Options are in the pyaurorax.conjunctions module, or at the top level using
          +                the pyaurorax.CONJUNCTION_TYPE_* variables.
          +            epoch_search_precision: the time precision to which conjunctions are calculated. Can be
          +                30 or 60 seconds. Defaults to 60 seconds. Note - this parameter is under active
          +                development and still considered "alpha".
          +            response_format: JSON representation of desired data response format
          +            poll_interval: seconds to wait between polling calls, defaults to
          +                pyaurorax.requests.STANDARD_POLLING_SLEEP_TIME
          +            return_immediately: initiate the search and return without waiting for data to
          +                be received, defaults to False
          +            verbose: show the progress of the request using the request log, defaults
          +
          +        Returns:
          +            a `pyaurorax.search.ConjunctionSearch` object
          +        """
          +        return func_search(
          +            self.__aurorax_obj,
          +            start,
          +            end,
          +            distance,
          +            ground,
          +            space,
          +            events,
          +            conjunction_types,
          +            epoch_search_precision,
          +            response_format,
          +            poll_interval,
          +            return_immediately,
          +            verbose,
          +        )
          +
          +    def describe(self, search_obj: Optional[ConjunctionSearch] = None, query_dict: Optional[Dict] = None) -> str:
          +        """
          +        Describe a conjunction search as an "SQL-like" string. Either a ConjunctionSearch
          +        object can be supplied, or a dictionary of the raw JSON query.
          +
          +        Args:
          +            search_obj: the conjunction search to describe, optional
          +            query_dict: the conjunction search query represented as a raw dictionary, optional
          +
          +        Returns:
          +            the "SQL-like" string describing the conjunction search object
          +        """
          +        return func_describe(self.__aurorax_obj, search_obj, query_dict)
          +
          +    def get_request_url(self, request_id: str) -> str:
          +        """
          +        Get the conjunction search request URL for a given
          +        request ID. This URL can be used for subsequent
          +        pyaurorax.requests function calls. Primarily this method
          +        facilitates delving into details about a set of already-submitted
          +        conjunction searches.
          +
          +        Args:
          +            request_id: the request identifier
          +
          +        Returns:
          +            the request URL
          +        """
          +        return func_get_request_url(self.__aurorax_obj, request_id)
          +
          +

          Instance variables

          +
          +
          var swarmaurora
          +
          +

          Access to the pyaurorax.search.conjunctions.swarmaurora submodule from within a PyAuroraX object.

          +
          + +Expand source code + +
          @property
          +def swarmaurora(self):
          +    """
          +    Access to the `swarmaurora` submodule from within a PyAuroraX object.
          +    """
          +    return self.__swarmaurora
          +
          +
          +
          +

          Methods

          +
          +
          +def describe(self, search_obj: Optional[ConjunctionSearch] = None, query_dict: Optional[Dict] = None) ‑> str +
          +
          +

          Describe a conjunction search as an "SQL-like" string. Either a ConjunctionSearch +object can be supplied, or a dictionary of the raw JSON query.

          +

          Args

          +
          +
          search_obj
          +
          the conjunction search to describe, optional
          +
          query_dict
          +
          the conjunction search query represented as a raw dictionary, optional
          +
          +

          Returns

          +

          the "SQL-like" string describing the conjunction search object

          +
          + +Expand source code + +
          def describe(self, search_obj: Optional[ConjunctionSearch] = None, query_dict: Optional[Dict] = None) -> str:
          +    """
          +    Describe a conjunction search as an "SQL-like" string. Either a ConjunctionSearch
          +    object can be supplied, or a dictionary of the raw JSON query.
          +
          +    Args:
          +        search_obj: the conjunction search to describe, optional
          +        query_dict: the conjunction search query represented as a raw dictionary, optional
          +
          +    Returns:
          +        the "SQL-like" string describing the conjunction search object
          +    """
          +    return func_describe(self.__aurorax_obj, search_obj, query_dict)
          +
          +
          +
          +def get_request_url(self, request_id: str) ‑> str +
          +
          +

          Get the conjunction search request URL for a given +request ID. This URL can be used for subsequent +pyaurorax.requests function calls. Primarily this method +facilitates delving into details about a set of already-submitted +conjunction searches.

          +

          Args

          +
          +
          request_id
          +
          the request identifier
          +
          +

          Returns

          +

          the request URL

          +
          + +Expand source code + +
          def get_request_url(self, request_id: str) -> str:
          +    """
          +    Get the conjunction search request URL for a given
          +    request ID. This URL can be used for subsequent
          +    pyaurorax.requests function calls. Primarily this method
          +    facilitates delving into details about a set of already-submitted
          +    conjunction searches.
          +
          +    Args:
          +        request_id: the request identifier
          +
          +    Returns:
          +        the request URL
          +    """
          +    return func_get_request_url(self.__aurorax_obj, request_id)
          +
          +
          +
          +def search(self, start: datetime.datetime, end: datetime.datetime, distance: Union[int, float, Dict], ground: Optional[List[Dict]] = [], space: Optional[List[Dict]] = [], events: Optional[List[Dict]] = [], conjunction_types: Optional[List[str]] = [], epoch_search_precision: Optional[int] = 60, response_format: Optional[Dict] = None, poll_interval: Optional[float] = 1.0, return_immediately: Optional[bool] = False, verbose: Optional[bool] = False) ‑> ConjunctionSearch +
          +
          +

          Search for conjunctions between data sources

          +

          By default, this function will block and wait until the request completes and +all data is downloaded. If you don't want to wait, set the 'return_immediately` +value to True. The Search object will be returned right after the search has been +started, and you can use the helper functions as part of that object to get the +data when it's done.

          +

          Args

          +
          +
          start
          +
          start timestamp of the search (inclusive)
          +
          end
          +
          end timestamp of the search (inclusive)
          +
          distance
          +
          the maximum distance allowed between data sources when searching for +conjunctions. This can either be a number (int or float), or a dictionary +modified from the output of the "get_advanced_distances_combos()" function.
          +
          ground
          +
          +

          list of ground instrument search parameters, defaults to []

          +

          Example:

          +
          [{
          +    "programs": ["themis-asi"],
          +    "platforms": ["gillam", "rabbit lake"],
          +    "instrument_types": ["RGB"],
          +    "ephemeris_metadata_filters": {
          +        "logical_operator": "AND",
          +        "expressions": [
          +            {
          +                "key": "calgary_apa_ml_v1",
          +                "operator": "in",
          +                "values": [ "classified as APA" ]
          +            }
          +        ]
          +    }
          +}]
          +
          +
          +
          space
          +
          +

          list of one or more space instrument search parameters, defaults to []

          +

          Example:

          +
          [{
          +    "programs": ["themis-asi", "swarm"],
          +    "platforms": ["themisa", "swarma"],
          +    "instrument_types": ["footprint"],
          +    "ephemeris_metadata_filters": {
          +        "logical_operator": "AND",
          +        "expressions": [
          +            {
          +                "key": "nbtrace_region",
          +                "operator": "in",
          +                "values": [ "north auroral oval" ]
          +            }
          +        ]
          +    },
          +    "hemisphere": [
          +        "northern"
          +    ]
          +}]
          +
          +
          +
          events
          +
          +

          list of one or more events search parameters, defaults to []

          +

          Example:

          +
          [{
          +    "programs": [ "events" ],
          +    "instrument_types": [ "substorm onsets" ]
          +}]
          +
          +
          +
          conjunction_types
          +
          list of conjunction types, defaults to [] (meaning all conjunction +types). Options are in the pyaurorax.conjunctions module, or at the top level using +the pyaurorax.CONJUNCTION_TYPE_* variables.
          +
          epoch_search_precision
          +
          the time precision to which conjunctions are calculated. Can be +30 or 60 seconds. Defaults to 60 seconds. Note - this parameter is under active +development and still considered "alpha".
          +
          response_format
          +
          JSON representation of desired data response format
          +
          poll_interval
          +
          seconds to wait between polling calls, defaults to +pyaurorax.requests.STANDARD_POLLING_SLEEP_TIME
          +
          return_immediately
          +
          initiate the search and return without waiting for data to +be received, defaults to False
          +
          verbose
          +
          show the progress of the request using the request log, defaults
          +
          +

          Returns

          +

          a ConjunctionSearch object

          +
          + +Expand source code + +
          def search(self,
          +           start: datetime.datetime,
          +           end: datetime.datetime,
          +           distance: Union[int, float, Dict],
          +           ground: Optional[List[Dict]] = [],
          +           space: Optional[List[Dict]] = [],
          +           events: Optional[List[Dict]] = [],
          +           conjunction_types: Optional[List[str]] = [],
          +           epoch_search_precision: Optional[int] = 60,
          +           response_format: Optional[Dict] = None,
          +           poll_interval: Optional[float] = __STANDARD_POLLING_SLEEP_TIME,
          +           return_immediately: Optional[bool] = False,
          +           verbose: Optional[bool] = False) -> ConjunctionSearch:
          +    """
          +    Search for conjunctions between data sources
          +
          +    By default, this function will block and wait until the request completes and
          +    all data is downloaded. If you don't want to wait, set the 'return_immediately`
          +    value to True. The Search object will be returned right after the search has been
          +    started, and you can use the helper functions as part of that object to get the
          +    data when it's done.
          +
          +    Args:
          +        start: start timestamp of the search (inclusive)
          +        end: end timestamp of the search (inclusive)
          +        distance: the maximum distance allowed between data sources when searching for
          +            conjunctions. This can either be a number (int or float), or a dictionary
          +            modified from the output of the "get_advanced_distances_combos()" function.
          +        ground: list of ground instrument search parameters, defaults to []
          +
          +            Example:
          +
          +                [{
          +                    "programs": ["themis-asi"],
          +                    "platforms": ["gillam", "rabbit lake"],
          +                    "instrument_types": ["RGB"],
          +                    "ephemeris_metadata_filters": {
          +                        "logical_operator": "AND",
          +                        "expressions": [
          +                            {
          +                                "key": "calgary_apa_ml_v1",
          +                                "operator": "in",
          +                                "values": [ "classified as APA" ]
          +                            }
          +                        ]
          +                    }
          +                }]
          +        space: list of one or more space instrument search parameters, defaults to []
          +
          +            Example:
          +
          +                [{
          +                    "programs": ["themis-asi", "swarm"],
          +                    "platforms": ["themisa", "swarma"],
          +                    "instrument_types": ["footprint"],
          +                    "ephemeris_metadata_filters": {
          +                        "logical_operator": "AND",
          +                        "expressions": [
          +                            {
          +                                "key": "nbtrace_region",
          +                                "operator": "in",
          +                                "values": [ "north auroral oval" ]
          +                            }
          +                        ]
          +                    },
          +                    "hemisphere": [
          +                        "northern"
          +                    ]
          +                }]
          +        events: list of one or more events search parameters, defaults to []
          +
          +            Example:
          +
          +                [{
          +                    "programs": [ "events" ],
          +                    "instrument_types": [ "substorm onsets" ]
          +                }]
          +        conjunction_types: list of conjunction types, defaults to [] (meaning all conjunction
          +            types). Options are in the pyaurorax.conjunctions module, or at the top level using
          +            the pyaurorax.CONJUNCTION_TYPE_* variables.
          +        epoch_search_precision: the time precision to which conjunctions are calculated. Can be
          +            30 or 60 seconds. Defaults to 60 seconds. Note - this parameter is under active
          +            development and still considered "alpha".
          +        response_format: JSON representation of desired data response format
          +        poll_interval: seconds to wait between polling calls, defaults to
          +            pyaurorax.requests.STANDARD_POLLING_SLEEP_TIME
          +        return_immediately: initiate the search and return without waiting for data to
          +            be received, defaults to False
          +        verbose: show the progress of the request using the request log, defaults
          +
          +    Returns:
          +        a `pyaurorax.search.ConjunctionSearch` object
          +    """
          +    return func_search(
          +        self.__aurorax_obj,
          +        start,
          +        end,
          +        distance,
          +        ground,
          +        space,
          +        events,
          +        conjunction_types,
          +        epoch_search_precision,
          +        response_format,
          +        poll_interval,
          +        return_immediately,
          +        verbose,
          +    )
          +
          +
          +
          +
          +
          +
          +
          + +
          + + + \ No newline at end of file diff --git a/docs/code/pyaurorax_api_reference/pyaurorax/conjunctions/swarmaurora/tools.html b/docs/code/pyaurorax_api_reference/pyaurorax/search/conjunctions/swarmaurora/index.html similarity index 51% rename from docs/code/pyaurorax_api_reference/pyaurorax/conjunctions/swarmaurora/tools.html rename to docs/code/pyaurorax_api_reference/pyaurorax/search/conjunctions/swarmaurora/index.html index a5af493..c6ac2d3 100644 --- a/docs/code/pyaurorax_api_reference/pyaurorax/conjunctions/swarmaurora/tools.html +++ b/docs/code/pyaurorax_api_reference/pyaurorax/search/conjunctions/swarmaurora/index.html @@ -4,8 +4,8 @@ -pyaurorax.conjunctions.swarmaurora.tools API documentation - +pyaurorax.search.conjunctions.swarmaurora API documentation + @@ -19,108 +19,89 @@
          -

          Module pyaurorax.conjunctions.swarmaurora.tools

          +

          Module pyaurorax.search.conjunctions.swarmaurora

          -

          Functions for using conjunction searches with Swarm-Aurora

          Expand source code -
          """
          -Functions for using conjunction searches with Swarm-Aurora
          -"""
          +
          # Copyright 2024 University of Calgary
          +#
          +# Licensed under the Apache License, Version 2.0 (the "License");
          +# you may not use this file except in compliance with the License.
          +# You may obtain a copy of the License at
          +#
          +#     http://www.apache.org/licenses/LICENSE-2.0
          +#
          +# Unless required by applicable law or agreed to in writing, software
          +# distributed under the License is distributed on an "AS IS" BASIS,
          +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
          +# See the License for the specific language governing permissions and
          +# limitations under the License.
           
          -import webbrowser
          -import json
          -from typing import Dict, Union
          -from ...api import AuroraXRequest
          -from ...exceptions import AuroraXException
          -from ..classes.search import Search
          +from typing import Optional, Any
          +from ..classes.search import ConjunctionSearch
          +from ._swarmaurora import create_custom_import_file as func_create_custom_import_file
          +from ._swarmaurora import get_url as func_get_url
          +from ._swarmaurora import open_in_browser as func_open_in_browser
           
          -# pdoc init
          -__pdoc__: Dict = {}
          +__all__ = ["SwarmAuroraManager"]
           
           
          -def get_url(search_obj: Search) -> str:
          +class SwarmAuroraManager:
               """
          -    Get a URL that displays a conjunction search in the Swarm-Aurora
          -    Conjunction Finder
          -
          -    Args:
          -        search_obj: a conjunction search object, must be a completed
          -                    search with the 'request_id' value populated
          -
          -    Returns:
          -        the Swarm-Aurora Conjunction Finder URL for this conjunction search
          +    The SwarmAuroraManager object is initialized within every PyAuroraX object. It acts as a way to access 
          +    the submodules and carry over configuration information in the super class.
               """
          -    return "https://swarm-aurora.com/conjunctionFinder/?aurorax_request_id=%s" % (search_obj.request_id)
           
          +    def __init__(self, aurorax_obj):
          +        self.__aurorax_obj = aurorax_obj
           
          -def open_in_browser(search_obj: Search, browser: str = None) -> None:
          -    """
          -    In a browser, open a conjunction search in the Swarm-Aurora
          -    Conjunction Finder.
          +    def get_url(self, search_obj: ConjunctionSearch) -> str:
          +        """
          +        Get a URL that displays a conjunction search in the Swarm-Aurora
          +        Conjunction Finder
           
          -    Args:
          -        search_obj: a conjunction search object, must be a completed
          -                    search with the 'request_id' value populated
          -        browser: the browser type to load using. Default is your
          -                 default browser. Some common other options are
          -                 "google-chrome", "firefox", or "safari". For all available
          -                 options, refer to https://docs.python.org/3/library/webbrowser.html#webbrowser.get
          -    """
          -    url = get_url(search_obj)
          -    try:
          -        w = webbrowser.get(using=browser)
          -        w.open_new_tab(url)
          -    except Exception as e:
          -        if ("could not locate runnable browser" in str(e)):
          -            raise AuroraXException(("Error: selected browser '%s' not found, please try "
          -                                   "another. For the list of options, refer to "
          -                                    "https://docs.python.org/3/library/webbrowser.html#webbrowser.get") % (browser))
          +        Args:
          +            search_obj: a conjunction search object, must be a completed
          +                        search with the 'request_id' value populated
           
          +        Returns:
          +            the Swarm-Aurora Conjunction Finder URL for this conjunction search
          +        """
          +        return func_get_url(search_obj)
           
          -def create_custom_import_file(search_obj: Search,
          -                              filename: str = None,
          -                              returnDict: bool = False) -> Union[str, Dict]:
          -    """
          -    Generate a Swarm-Aurora custom import file for a given
          -    conjunction search
          -
          -    Args:
          -        search_obj: a conjunction search object, must be a completed
          -                    search with the 'request_id' value populated
          -        filename: the output filename, default is 'swarmaurora_custom_import_file_{requestID}.json'
          -        returnDict: return the custom import file contents as a dictionary
          -                    instead of saving a file, default is False
          +    def open_in_browser(self, search_obj: ConjunctionSearch, browser: Optional[str] = None) -> None:
          +        """
          +        In a browser, open a conjunction search in the Swarm-Aurora Conjunction Finder.
           
          -    Returns:
          -        the filename of the saved custom import file, or a dictionary with the
          -        file contents if `returnDict` is set to True
          -    """
          -    # make request
          -    url = "https://swarm-aurora.com/conjunctionFinder/generate_custom_import_json?aurorax_request_id=%s" % (
          -        search_obj.request_id)
          -    req = AuroraXRequest(method="get",
          -                         url=url,
          -                         body=search_obj.query)
          -    res = req.execute()
          -
          -    # return the contents as a dict if requested
          -    if (returnDict is True):
          -        return res.data
          +        Args:
          +            search_obj: a conjunction search object, must be a completed
          +                        search with the 'request_id' value populated
          +            browser: the browser type to load using. Default is your
          +                    default browser. Some common other options are
          +                    "google-chrome", "firefox", or "safari". For all available
          +                    options, refer to https://docs.python.org/3/library/webbrowser.html#webbrowser.get
          +        """
          +        return func_open_in_browser(search_obj, browser)
           
          -    # set default filename
          -    if (filename is None):
          -        filename = "swarmaurora_custom_import_%s.json" % (search_obj.request_id)
          +    def create_custom_import_file(self, search_obj: ConjunctionSearch, filename: Optional[str] = None, return_dict: bool = False) -> Any:
          +        """
          +        Generate a Swarm-Aurora custom import file for a given conjunction search
           
          -    # save data to file
          -    with open(filename, 'w', encoding='utf-8') as fp:
          -        json.dump(res.data, fp, indent=4)
          +        Args:
          +            search_obj: a conjunction search object, must be a completed
          +                        search with the 'request_id' value populated
          +            filename: the output filename, default is 'swarmaurora_custom_import_file_{requestID}.json'
          +            return_dict: return the custom import file contents as a dictionary
          +                        instead of saving a file, default is False
           
          -    # return
          -    return filename
          + Returns: + the filename of the saved custom import file, or a dictionary with the + file contents if `return_dict` is set to True + """ + return func_create_custom_import_file(self.__aurorax_obj, search_obj, filename, return_dict)
          @@ -128,14 +109,82 @@

          Module pyaurorax.conjunctions.swarmaurora.tools

          -

          Functions

          +
          +
          +

          Classes

          +
          +
          +class SwarmAuroraManager +(aurorax_obj) +
          +
          +

          The SwarmAuroraManager object is initialized within every PyAuroraX object. It acts as a way to access +the submodules and carry over configuration information in the super class.

          +
          + +Expand source code + +
          class SwarmAuroraManager:
          +    """
          +    The SwarmAuroraManager object is initialized within every PyAuroraX object. It acts as a way to access 
          +    the submodules and carry over configuration information in the super class.
          +    """
          +
          +    def __init__(self, aurorax_obj):
          +        self.__aurorax_obj = aurorax_obj
          +
          +    def get_url(self, search_obj: ConjunctionSearch) -> str:
          +        """
          +        Get a URL that displays a conjunction search in the Swarm-Aurora
          +        Conjunction Finder
          +
          +        Args:
          +            search_obj: a conjunction search object, must be a completed
          +                        search with the 'request_id' value populated
          +
          +        Returns:
          +            the Swarm-Aurora Conjunction Finder URL for this conjunction search
          +        """
          +        return func_get_url(search_obj)
          +
          +    def open_in_browser(self, search_obj: ConjunctionSearch, browser: Optional[str] = None) -> None:
          +        """
          +        In a browser, open a conjunction search in the Swarm-Aurora Conjunction Finder.
          +
          +        Args:
          +            search_obj: a conjunction search object, must be a completed
          +                        search with the 'request_id' value populated
          +            browser: the browser type to load using. Default is your
          +                    default browser. Some common other options are
          +                    "google-chrome", "firefox", or "safari". For all available
          +                    options, refer to https://docs.python.org/3/library/webbrowser.html#webbrowser.get
          +        """
          +        return func_open_in_browser(search_obj, browser)
          +
          +    def create_custom_import_file(self, search_obj: ConjunctionSearch, filename: Optional[str] = None, return_dict: bool = False) -> Any:
          +        """
          +        Generate a Swarm-Aurora custom import file for a given conjunction search
          +
          +        Args:
          +            search_obj: a conjunction search object, must be a completed
          +                        search with the 'request_id' value populated
          +            filename: the output filename, default is 'swarmaurora_custom_import_file_{requestID}.json'
          +            return_dict: return the custom import file contents as a dictionary
          +                        instead of saving a file, default is False
          +
          +        Returns:
          +            the filename of the saved custom import file, or a dictionary with the
          +            file contents if `return_dict` is set to True
          +        """
          +        return func_create_custom_import_file(self.__aurorax_obj, search_obj, filename, return_dict)
          +
          +

          Methods

          -
          -def create_custom_import_file(search_obj: Search, filename: str = None, returnDict: bool = False) ‑> Union[str, Dict] +
          +def create_custom_import_file(self, search_obj: ConjunctionSearch, filename: Optional[str] = None, return_dict: bool = False) ‑> Any
          -

          Generate a Swarm-Aurora custom import file for a given -conjunction search

          +

          Generate a Swarm-Aurora custom import file for a given conjunction search

          Args

          search_obj
          @@ -143,61 +192,37 @@

          Args

          search with the 'request_id' value populated
          filename
          the output filename, default is 'swarmaurora_custom_import_file_{requestID}.json'
          -
          returnDict
          +
          return_dict
          return the custom import file contents as a dictionary instead of saving a file, default is False

          Returns

          the filename of the saved custom import file, or a dictionary with the -file contents if returnDict is set to True

          +file contents if return_dict is set to True

          Expand source code -
          def create_custom_import_file(search_obj: Search,
          -                              filename: str = None,
          -                              returnDict: bool = False) -> Union[str, Dict]:
          +
          def create_custom_import_file(self, search_obj: ConjunctionSearch, filename: Optional[str] = None, return_dict: bool = False) -> Any:
               """
          -    Generate a Swarm-Aurora custom import file for a given
          -    conjunction search
          +    Generate a Swarm-Aurora custom import file for a given conjunction search
           
               Args:
                   search_obj: a conjunction search object, must be a completed
                               search with the 'request_id' value populated
                   filename: the output filename, default is 'swarmaurora_custom_import_file_{requestID}.json'
          -        returnDict: return the custom import file contents as a dictionary
          +        return_dict: return the custom import file contents as a dictionary
                               instead of saving a file, default is False
           
               Returns:
                   the filename of the saved custom import file, or a dictionary with the
          -        file contents if `returnDict` is set to True
          +        file contents if `return_dict` is set to True
               """
          -    # make request
          -    url = "https://swarm-aurora.com/conjunctionFinder/generate_custom_import_json?aurorax_request_id=%s" % (
          -        search_obj.request_id)
          -    req = AuroraXRequest(method="get",
          -                         url=url,
          -                         body=search_obj.query)
          -    res = req.execute()
          -
          -    # return the contents as a dict if requested
          -    if (returnDict is True):
          -        return res.data
          -
          -    # set default filename
          -    if (filename is None):
          -        filename = "swarmaurora_custom_import_%s.json" % (search_obj.request_id)
          -
          -    # save data to file
          -    with open(filename, 'w', encoding='utf-8') as fp:
          -        json.dump(res.data, fp, indent=4)
          -
          -    # return
          -    return filename
          + return func_create_custom_import_file(self.__aurorax_obj, search_obj, filename, return_dict)
          -
          -def get_url(search_obj: Search) ‑> str +
          +def get_url(self, search_obj: ConjunctionSearch) ‑> str

          Get a URL that displays a conjunction search in the Swarm-Aurora @@ -214,7 +239,7 @@

          Returns

          Expand source code -
          def get_url(search_obj: Search) -> str:
          +
          def get_url(self, search_obj: ConjunctionSearch) -> str:
               """
               Get a URL that displays a conjunction search in the Swarm-Aurora
               Conjunction Finder
          @@ -226,15 +251,14 @@ 

          Returns

          Returns: the Swarm-Aurora Conjunction Finder URL for this conjunction search """ - return "https://swarm-aurora.com/conjunctionFinder/?aurorax_request_id=%s" % (search_obj.request_id)
          + return func_get_url(search_obj)
          -
          -def open_in_browser(search_obj: Search, browser: str = None) ‑> None +
          +def open_in_browser(self, search_obj: ConjunctionSearch, browser: Optional[str] = None) ‑> None
          -

          In a browser, open a conjunction search in the Swarm-Aurora -Conjunction Finder.

          +

          In a browser, open a conjunction search in the Swarm-Aurora Conjunction Finder.

          Args

          search_obj
          @@ -250,33 +274,24 @@

          Args

          Expand source code -
          def open_in_browser(search_obj: Search, browser: str = None) -> None:
          +
          def open_in_browser(self, search_obj: ConjunctionSearch, browser: Optional[str] = None) -> None:
               """
          -    In a browser, open a conjunction search in the Swarm-Aurora
          -    Conjunction Finder.
          +    In a browser, open a conjunction search in the Swarm-Aurora Conjunction Finder.
           
               Args:
                   search_obj: a conjunction search object, must be a completed
                               search with the 'request_id' value populated
                   browser: the browser type to load using. Default is your
          -                 default browser. Some common other options are
          -                 "google-chrome", "firefox", or "safari". For all available
          -                 options, refer to https://docs.python.org/3/library/webbrowser.html#webbrowser.get
          +                default browser. Some common other options are
          +                "google-chrome", "firefox", or "safari". For all available
          +                options, refer to https://docs.python.org/3/library/webbrowser.html#webbrowser.get
               """
          -    url = get_url(search_obj)
          -    try:
          -        w = webbrowser.get(using=browser)
          -        w.open_new_tab(url)
          -    except Exception as e:
          -        if ("could not locate runnable browser" in str(e)):
          -            raise AuroraXException(("Error: selected browser '%s' not found, please try "
          -                                   "another. For the list of options, refer to "
          -                                    "https://docs.python.org/3/library/webbrowser.html#webbrowser.get") % (browser))
          + return func_open_in_browser(search_obj, browser)
          -
          -
          + +