Skip to content

Commit

Permalink
Minor style improvements, updated configuration for documentation, ad…
Browse files Browse the repository at this point in the history
…ded reference to Landman et al. (2023)
  • Loading branch information
tomasstolker committed Nov 23, 2023
1 parent 07daa00 commit 789cde9
Show file tree
Hide file tree
Showing 4 changed files with 58 additions and 81 deletions.
2 changes: 1 addition & 1 deletion README.rst
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ Documentation can be found at `http://pycrires.readthedocs.io <http://pycrires.r
Attribution
-----------

Please cite `Stolker & Landman (2023) <https://ui.adsabs.harvard.edu/abs/2023ascl.soft07040S/abstract>`_ when *pycrires* is used in a publication and Landman et al. (in prep.) specifically when using the dedicated routines for spatially-resolved sources.
Please cite `Stolker & Landman (2023) <https://ui.adsabs.harvard.edu/abs/2023ascl.soft07040S/abstract>`_ when *pycrires* is used in a publication and `Landman et al. (2023) <https://arxiv.org/abs/2311.13527>`_ specifically when using the dedicated routines for spatially-resolved sources.

Contributing
------------
Expand Down
2 changes: 1 addition & 1 deletion docs/about.rst
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ Contact
Attribution
-----------

Please cite `Stolker & Landman (2023) <https://ui.adsabs.harvard.edu/abs/2023ascl.soft07040S/abstract>`_ when *pycrires* is used in a publication and Landman et al. (in prep.) specifically when using the dedicated routines for spatially-resolved sources.
Please cite `Stolker & Landman (2023) <https://ui.adsabs.harvard.edu/abs/2023ascl.soft07040S/abstract>`_ when *pycrires* is used in a publication and `Landman et al. (2023) <https://arxiv.org/abs/2311.13527>`_ specifically when using the dedicated routines for spatially-resolved sources.

Contributing
------------
Expand Down
32 changes: 8 additions & 24 deletions docs/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@
import sys
sys.path.insert(0, os.path.abspath('../'))


# -- Project information -----------------------------------------------------

project = 'pycrires'
Expand All @@ -26,6 +25,7 @@
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.

extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
Expand All @@ -36,32 +36,17 @@

numpydoc_show_class_members = False

# Add any paths that contain templates here, relative to this directory.
templates_path = []

# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['.ipynb_checkpoints/*']

exclude_patterns = ['_build',
'Thumbs.db',
'.DS_Store',
'.ipynb_checkpoints/*']

# -- Options for HTML output -------------------------------------------------

# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'sphinx_book_theme'
#
# html_theme_options = {
# 'path_to_docs': 'docs',
# 'repository_url': 'https://github.com/tomasstolker/pycrires',
# 'repository_branch': 'main',
# 'use_edit_page_button': True,
# 'use_issues_button': True,
# 'use_repository_button': True,
# 'use_download_button': True,
# }

html_theme = 'pydata_sphinx_theme'

html_theme_options = {
Expand All @@ -76,7 +61,6 @@
"doc_path": "docs",
}

# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
html_static_path = ['_static']

html_search_language = 'en'
103 changes: 48 additions & 55 deletions pycrires/pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@

import glob
import json
import logging
import os
import shutil
import socket
Expand Down Expand Up @@ -32,17 +31,18 @@
from PyAstronomy.pyasl import fastRotBroad
from scipy import interpolate, ndimage, optimize, signal
from skimage.restoration import inpaint
from typeguard import config as typeguard_config
from typeguard import typechecked, CollectionCheckStrategy
from typeguard import typechecked

# from typeguard import config as typeguard_config
# from typeguard import CollectionCheckStrategy, typechecked

import pycrires
from pycrires import util


PIXEL_SCALE = 0.056 # (arcsec)

log_book = logging.getLogger(__name__)
typeguard_config.collection_check_strategy = CollectionCheckStrategy.ALL_ITEMS
# typeguard_config.collection_check_strategy = CollectionCheckStrategy.ALL_ITEMS


class Pipeline:
Expand All @@ -58,7 +58,7 @@ def __init__(self, path: Optional[str] = None) -> None:
"""
Parameters
----------
path : str
path : str, None
Path of the main reduction folder. The main folder should
contain a subfolder called ``raw`` where the raw data
(both science and calibration) from the ESO archive are
Expand Down Expand Up @@ -174,12 +174,12 @@ def __init__(self, path: Optional[str] = None) -> None:
# Check if there is a new version available

try:
contents = urllib.request.urlopen(
"https://pypi.org/pypi/pycrires/json", timeout=1.0
).read()
pypi_url = "https://pypi.org/pypi/pycrires/json"

data = json.loads(contents)
latest_version = data["info"]["version"]
with urllib.request.urlopen(pypi_url, timeout=1.0) as open_url:
url_content = open_url.read()
url_data = json.loads(url_content)
latest_version = url_data["info"]["version"]

except (urllib.error.URLError, socket.timeout):
latest_version = None
Expand Down Expand Up @@ -1624,9 +1624,7 @@ def cal_flat(self, verbose: bool = True) -> None:

# Update file dictionary with TraceWave table

fits_files = Path(self.path / "calib").glob(
"cr2res_cal_flat_*tw.fits"
)
fits_files = Path(self.path / "calib").glob("cr2res_cal_flat_*tw.fits")

for item in fits_files:
self._update_files("CAL_FLAT_TW", str(item))
Expand Down Expand Up @@ -2534,9 +2532,7 @@ def util_extract(self, calib_type: str, verbose: bool = True) -> None:

print("Creating SOF file:")

sof_file = Path(
self.path / f"calib/util_extract_{calib_type}/files.sof"
)
sof_file = Path(self.path / f"calib/util_extract_{calib_type}/files.sof")

# Find UTIL_CALIB file

Expand Down Expand Up @@ -2875,7 +2871,9 @@ def util_genlines(self, verbose: bool = True) -> None:
file_found = True

if not file_found:
url = f"https://home.strw.leidenuniv.nl/~stolker/pycrires/{file_tag}.fits"
url = (
f"https://home.strw.leidenuniv.nl/~stolker/pycrires/{file_tag}.fits"
)
line_file = output_dir / f"{file_tag}.fits"

if not os.path.exists(line_file):
Expand Down Expand Up @@ -2908,7 +2906,9 @@ def util_genlines(self, verbose: bool = True) -> None:

if line_file.suffix == (".fits"):
line_data = fits.getdata(line_file, hdu=1)
line_data = np.column_stack([line_data['Wavelength'], line_data['Emission']])
line_data = np.column_stack(
[line_data["Wavelength"], line_data["Emission"]]
)

line_file = output_dir / line_file.with_suffix(".dat").name

Expand Down Expand Up @@ -2963,13 +2963,18 @@ def util_genlines(self, verbose: bool = True) -> None:

print("Output files:")

fits_file = output_dir / line_file.with_suffix('.fits').name
fits_file = output_dir / line_file.with_suffix(".fits").name
self._update_files("EMISSION_LINES", str(fits_file))

indices = np.where(self.header_data["DPR.CATG"] == "SCIENCE")[0]
wlen_id = self.header_data["INS.WLEN.ID"][indices[0]]

fits_file = output_dir / line_file.with_name(line_file.stem + f"_{wlen_id}").with_suffix(".fits").name
fits_file = (
output_dir
/ line_file.with_name(line_file.stem + f"_{wlen_id}")
.with_suffix(".fits")
.name
)
self._update_files("EMISSION_LINES", str(fits_file))

# Write updated dictionary to JSON file
Expand Down Expand Up @@ -3940,9 +3945,7 @@ def obs_nodding(
output_dir / f"cr2res_obs_nodding_extractedB_{count_exp:03d}.fits"
)

spec_file = Path(
output_dir / "cr2res_obs_nodding_extracted_combined.fits"
)
spec_file = Path(output_dir / "cr2res_obs_nodding_extracted_combined.fits")
spec_file.rename(
output_dir
/ f"cr2res_obs_nodding_extracted_combined_{count_exp:03d}.fits"
Expand Down Expand Up @@ -3978,16 +3981,12 @@ def obs_nodding(
output_dir / f"cr2res_obs_nodding_slitfuncB_{count_exp:03d}.fits"
)

spec_file = Path(
output_dir / "cr2res_obs_nodding_trace_wave_A.fits"
)
spec_file = Path(output_dir / "cr2res_obs_nodding_trace_wave_A.fits")
spec_file.rename(
output_dir / f"cr2res_obs_nodding_trace_wave_A_{count_exp:03d}.fits"
)

spec_file = Path(
output_dir / "cr2res_obs_nodding_trace_wave_B.fits"
)
spec_file = Path(output_dir / "cr2res_obs_nodding_trace_wave_B.fits")
spec_file.rename(
output_dir / f"cr2res_obs_nodding_trace_wave_B_{count_exp:03d}.fits"
)
Expand Down Expand Up @@ -4120,9 +4119,11 @@ def obs_nodding_irregular(
print(f"Number of exposures at nod B: {nod_b_count}")

if nod_a_count != nod_b_count and unique_pairs is True:
warnings.warn(f"Nodding counts are unequal ({nod_a_count} "
f"A vs {nod_b_count} B). Reverting to "
"unique_pairs = False.")
warnings.warn(
f"Nodding counts are unequal ({nod_a_count} "
f"A vs {nod_b_count} B). Reverting to "
"unique_pairs = False."
)

unique_pairs = False

Expand Down Expand Up @@ -4154,25 +4155,27 @@ def obs_nodding_irregular(
print(f"Already reduced file {output_file}")
else:
print(f"\nCreating SOF file for {output_file}:")
sof_file = Path(
output_dir / f"files_{count_exp:03d}_{nod_ab}.sof"
)
sof_file = Path(output_dir / f"files_{count_exp:03d}_{nod_ab}.sof")

sof_open = open(sof_file, "w", encoding="utf-8")

file_0 = self.header_data["ORIGFILE"][i_row]

if nod_ab == "A":
if not unique_pairs:
closest_i_diffnod = b_i_rows[np.argmin(np.abs(i_row - b_i_rows))]
closest_i_diffnod = b_i_rows[
np.argmin(np.abs(i_row - b_i_rows))
]

else:
closest_i_diffnod = b_i_rows[B_counter]
B_counter += 1

elif nod_ab == "B":
if not unique_pairs:
closest_i_diffnod = a_i_rows[np.argmin(np.abs(i_row - a_i_rows))]
closest_i_diffnod = a_i_rows[
np.argmin(np.abs(i_row - a_i_rows))
]

else:
closest_i_diffnod = a_i_rows[A_counter]
Expand Down Expand Up @@ -4402,9 +4405,7 @@ def obs_nodding_irregular(
/ f"cr2res_obs_nodding_combined{nod_ab}_{count_exp:03d}.fits"
)

spec_file = Path(
output_dir / f"cr2res_obs_nodding_model{nod_ab}.fits"
)
spec_file = Path(output_dir / f"cr2res_obs_nodding_model{nod_ab}.fits")
spec_file.rename(
output_dir
/ f"cr2res_obs_nodding_model{nod_ab}_{count_exp:03d}.fits"
Expand Down Expand Up @@ -4465,11 +4466,11 @@ def obs_nodding_irregular(
json.dump(self.file_dict, json_file, indent=4)

if unique_pairs and verbose:
print('These were the file IDs of the A frames:')
print("These were the file IDs of the A frames:")
print(a_i_rows)
print('\n These were the file IDs of the B frames:')
print("\n These were the file IDs of the B frames:")
print(b_i_rows)
print('\n This is how they were paired in cr2res_obs_nodding:')
print("\n This is how they were paired in cr2res_obs_nodding:")
print(sequence)

@typechecked
Expand Down Expand Up @@ -5449,9 +5450,7 @@ def correct_wavelengths_2d(
# Save corrected wavelengths to all files for this order
for save_file in fits_files:
out_file = output_dir / (
Path(save_file).stem.replace(
"_corr", ""
)
Path(save_file).stem.replace("_corr", "")
+ "_corr.fits"
)
if det_idx == 0 and order_idx == 0:
Expand Down Expand Up @@ -6189,15 +6188,9 @@ def fit_gaussian(
)

input_folder = self.product_folder / extraction_input
fits_files = Path(input_folder).glob(
f"cr2res_combined{nod_ab}_*_extr2d.fits"
)
fits_files = Path(input_folder).glob(f"cr2res_combined{nod_ab}_*_extr2d.fits")
n_exp = len(
list(
Path(input_folder).glob(
f"cr2res_combined{nod_ab}_*_extr2d.fits"
)
)
list(Path(input_folder).glob(f"cr2res_combined{nod_ab}_*_extr2d.fits"))
)

print_msg = ""
Expand Down

0 comments on commit 789cde9

Please sign in to comment.