Skip to content

Commit

Permalink
Merge branch 'main' into flat_e2e
Browse files Browse the repository at this point in the history
  • Loading branch information
semaphoreP committed Sep 11, 2024
2 parents 30d52b0 + 4722fc5 commit 4736c55
Show file tree
Hide file tree
Showing 21 changed files with 4,630 additions and 135 deletions.
2 changes: 2 additions & 0 deletions corgidrp/check.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
"""
Module to hold input-checking functions to minimize repetition
Copied over from the II&T pipeline
"""
import numbers

Expand Down
37 changes: 25 additions & 12 deletions corgidrp/combine.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,15 +5,16 @@
import corgidrp.data as data


def combine_images(data_subset, err_subset, dq_subset, collapse="lower"):
def combine_images(data_subset, err_subset, dq_subset, collapse, num_frames_scaling):
"""
Combines several images together
Args:
data_subset (np.array): 3-D array of N 2-D images
err_subset (np.array): 4-D array of N 2-D error maps
dq_subset (np.array): 3-D array of N 2-D DQ maps
collapse (str): "mean" or "median". Regardless, the images are scaled by num_frames_per_group to ~conserve photons
collapse (str): "mean" or "median".
num_frames_scaling (bool): Multiply by number of frames in sequence in order to ~conserve photons
Returns:
np.array: 2-D array of combined images
Expand All @@ -30,11 +31,16 @@ def combine_images(data_subset, err_subset, dq_subset, collapse="lower"):
n_samples[bad] = 0
n_samples = np.sum(n_samples, axis=0)
if collapse.lower() == "mean":
data_collapse = np.nanmean(data_subset, axis=0) * tot_frames
err_collapse = np.sqrt(np.nanmean(err_subset**2, axis=0)) * tot_frames/np.sqrt(n_samples) # not sure if this is correct, but good enough for now
data_collapse = np.nanmean(data_subset, axis=0)
err_collapse = np.sqrt(np.nanmean(err_subset**2, axis=0)) /np.sqrt(n_samples) # not sure if this is correct, but good enough for now
elif collapse.lower() == "median":
data_collapse = np.nanmedian(data_subset, axis=0) * tot_frames
err_collapse = np.sqrt(np.nanmean(err_subset**2, axis=0)) * tot_frames/np.sqrt(n_samples) * np.sqrt(np.pi/2) # inflate median error
data_collapse = np.nanmedian(data_subset, axis=0)
err_collapse = np.sqrt(np.nanmean(err_subset**2, axis=0)) /np.sqrt(n_samples) * np.sqrt(np.pi/2) # inflate median error
if num_frames_scaling:
# scale up by the number of frames
data_collapse *= tot_frames
err_collapse *= tot_frames

# dq collpase: keep all flags on
dq_collapse = np.bitwise_or.reduce(dq_subset, axis=0)
# except those pixels that have been replaced
Expand All @@ -44,15 +50,20 @@ def combine_images(data_subset, err_subset, dq_subset, collapse="lower"):



def combine_subexposures(input_dataset, num_frames_per_group=None, collapse="mean"):
def combine_subexposures(input_dataset, num_frames_per_group=None, collapse="mean", num_frames_scaling=True):
"""
Combines a sequence of exposures assuming a constant nubmer of frames per group
The length of the dataset must be divisible by the number of frames per group
Combines a sequence of exposures assuming a constant nubmer of frames per group.
The length of the dataset must be divisible by the number of frames per group.
The combination is done with either the mean or median, but the collapsed image can be scaled
in order to ~conserve the total number of photons in the input dataset (this essentially turns a
median into a sum)
Args:
input_dataset (corgidrp.data.Dataset): input data.
num_frames_per_group (int): number of subexposures per group. If None, combines all images together
collapse (str): "mean" or "median". Regardless, the images are scaled by num_frames_per_group to ~conserve photons
collapse (str): "mean" or "median". (default: mean)
num_frames_scaling (bool): Multiply by number of frames in sequence in order to ~conserve photons (default: True)
Returns:
corgidrp.data.Dataset: dataset after combination of every "num_frames_per_group" frames together
Expand All @@ -73,15 +84,17 @@ def combine_subexposures(input_dataset, num_frames_per_group=None, collapse="mea
err_subset = np.copy(input_dataset.all_err[num_frames_per_group*i:num_frames_per_group*(i+1)])
dq_subset = input_dataset.all_dq[num_frames_per_group*i:num_frames_per_group*(i+1)]

data_collapse, err_collapse, dq_collapse = combine_images(data_subset, err_subset, dq_subset, collapse=collapse)
data_collapse, err_collapse, dq_collapse = combine_images(data_subset, err_subset, dq_subset, collapse=collapse,
num_frames_scaling=num_frames_scaling)

# grab the headers from the first frame in this sub sequence
pri_hdr = input_dataset[num_frames_per_group*i].pri_hdr.copy()
ext_hdr = input_dataset[num_frames_per_group*i].ext_hdr.copy()
err_hdr = input_dataset[num_frames_per_group*i].err_hdr.copy()
dq_hdr = input_dataset[num_frames_per_group*i].err_hdr.copy()
hdulist = input_dataset[num_frames_per_group*i].hdu_list.copy()
new_image = data.Image(data_collapse, pri_hdr=pri_hdr, ext_hdr=ext_hdr, err=err_collapse, dq=dq_collapse, err_hdr=err_hdr,
dq_hdr=dq_hdr, input_hdulist=input_dataset[num_frames_per_group*i].hdu_list)
dq_hdr=dq_hdr, input_hdulist=hdulist)
new_image.filename = input_dataset[num_frames_per_group*i].filename
new_image._record_parent_filenames(input_dataset[num_frames_per_group*i:num_frames_per_group*(i+1)])
new_dataset.append(new_image)
Expand Down
107 changes: 88 additions & 19 deletions corgidrp/data.py
Original file line number Diff line number Diff line change
Expand Up @@ -606,6 +606,7 @@ def add_extension_hdu(self, name, data = None, header=None):
if name in self.hdu_names:
raise ValueError("Extension name already exists in HDU list")
else:
self.hdu_names.append(name)
self.hdu_list.append(new_hdu)


Expand Down Expand Up @@ -740,9 +741,12 @@ class NonLinearityCalibration(Image):
- Row headers (dn counts) must be monotonically increasing
- Column headers (EM gains) must be monotonically increasing
- Data columns (relative gain curves) must straddle 1
- The first row will provide the the Gain axis values (accesssed via gain_ax = non_lin_correction.data[0, 1:])
- The first column will provide the "count" axis value (accessed via count_ax = non_lin_correction.data[1:, 0])
- The rest of the array will be the calibration data (accessed via relgains = non_lin_correction.data[1:, 1:])
- The first row will provide the the Gain axis values (accesssed via
gain_ax = non_lin_correction.data[0, 1:])
- The first column will provide the "count" axis value (accessed via
count_ax = non_lin_correction.data[1:, 0])
- The rest of the array will be the calibration data (accessed via
relgains = non_lin_correction.data[1:, 1:])
For example:
[
Expand All @@ -760,12 +764,18 @@ class NonLinearityCalibration(Image):
[0.900, 0.910, 0.950, 1.000] is the first of the four relative gain curves.
Args:
data_or_filepath (str or np.array): either the filepath to the FITS file to read in OR the 2D calibration data. See above for the required format.
pri_hdr (astropy.io.fits.Header): the primary header (required only if raw 2D data is passed in)
ext_hdr (astropy.io.fits.Header): the image extension header (required only if raw 2D data is passed in)
input_dataset (corgidrp.data.Dataset): the Image files combined together to make this NonLinearityCalibration file (required only if raw 2D data is passed in)
data_or_filepath (str or np.array): either the filepath to the FITS file
to read in OR the 2D calibration data. See above for the required format.
pri_hdr (astropy.io.fits.Header): the primary header (required only if
raw 2D data is passed in)
ext_hdr (astropy.io.fits.Header): the image extension header (required
only if raw 2D data is passed in)
input_dataset (corgidrp.data.Dataset): the Image files combined
together to make this NonLinearityCalibration file (required only if
raw 2D data is passed in)
"""
def __init__(self, data_or_filepath, pri_hdr=None, ext_hdr=None, input_dataset=None):
def __init__(self, data_or_filepath, pri_hdr=None, ext_hdr=None,
input_dataset=None):

# run the image class contructor
super().__init__(data_or_filepath, pri_hdr=pri_hdr, ext_hdr=ext_hdr)
Expand All @@ -774,21 +784,27 @@ def __init__(self, data_or_filepath, pri_hdr=None, ext_hdr=None, input_dataset=N
nonlin_raw = self.data
if nonlin_raw.ndim < 2 or nonlin_raw.shape[0] < 2 or \
nonlin_raw.shape[1] < 2:
raise ValueError('The non-linearity calibration array must be at least 2x2 (room for x '
'and y axes and one data point)')
raise ValueError('The non-linearity calibration array must be at'
'least 2x2 (room for x and y axes and one data'
'point)')
if not np.isnan(nonlin_raw[0, 0]):
raise ValueError('The first value of the non-linearity calibration array (upper left) must be set to '
'"nan"')
raise ValueError('The first value of the non-linearity calibration '
'array (upper left) must be set to "nan"')


# additional bookkeeping for a calibration file
# if this is a new calibration file, we need to bookkeep it in the header
# b/c of logic in the super.__init__, we just need to check this to see if it is a new NonLinearityCalibration file
# b/c of logic in the super.__init__, we just need to check this to see if
# it is a new NonLinearityCalibration file
if ext_hdr is not None:
if input_dataset is None:
# error check. this is required in this case
raise ValueError("This appears to be a new Non Linearity Correction. The dataset of input files needs to be passed in to the input_dataset keyword to record history of this calibration file.")
self.ext_hdr['DATATYPE'] = 'NonLinearityCalibration' # corgidrp specific keyword for saving to disk
raise ValueError("This appears to be a new Non Linearity "
"Correction. The dataset of input files needs"
"to be passed in to the input_dataset keyword"
"to record history of this calibration file.")
# corgidrp specific keyword for saving to disk
self.ext_hdr['DATATYPE'] = 'NonLinearityCalibration'

# log all the data that went into making this calibration file
self._record_parent_filenames(input_dataset)
Expand Down Expand Up @@ -1145,11 +1161,16 @@ def copy(self, copy_data = True):

class DetectorParams(Image):
"""
Class containing detector parameters that may change over time
Class containing detector parameters that may change over time.
To create a new instance of DetectorParams, you only need to pass in the values you would like to change from default values:
new_valid_date = astropy.time.Time("2027-01-01")
new_det_params = DetectorParams({'gmax' : 7500.0 }, date_valid=new_valid_date).
Args:
data_or_filepath (dict or str): either a filepath string or a dictionary of
parameters to modify from default values
data_or_filepath (dict or str): either a filepath string corresponding to an
existing DetectorParams file saved to disk or a
dictionary of parameters to modify from default values
date_valid (astropy.time.Time): date after which these parameters are valid
Attributes:
Expand Down Expand Up @@ -1328,6 +1349,53 @@ def __init__(self, data_or_filepath, pri_hdr=None, ext_hdr=None, input_dataset=N
if 'DATATYPE' not in self.ext_hdr or self.ext_hdr['DATATYPE'] != 'AstrometricCalibration':
raise ValueError("File that was loaded was not an AstrometricCalibration file.")

class TrapCalibration(Image):
"""
Class for data related to charge traps that cause charge transfer inefficiency.
The calibration is generated by trap-pumped data.
The format will be [n,10], where each entry will have:
[row, column, sub-electrode location, index numnber of trap at this pixel/electrode,
capture time constant, maximum amplitude of the dipole, energy level of hole,
cross section for holes, R^2 value of fit, release time constant]
Args:
data_or_filepath (str or np.array): either the filepath to the FITS file to read in OR the 2D image data
pri_hdr (astropy.io.fits.Header): the primary header (required only if raw 2D data is passed in)
ext_hdr (astropy.io.fits.Header): the image extension header (required only if raw 2D data is passed in)
input_dataset (corgidrp.data.Dataset): the Image files combined together to make the trap calibration
"""
def __init__(self,data_or_filepath, pri_hdr=None,ext_hdr=None, input_dataset=None):
# run the image class constructor
super().__init__(data_or_filepath,pri_hdr=pri_hdr, ext_hdr=ext_hdr)

# if this is a new calibration, we need to bookkeep it in the header
# b/c of logic in the super.__init__, we just need to check this to see if it is a new cal
if ext_hdr is not None:
if input_dataset is None:
# error check. this is required in this case
raise ValueError("This appears to be a new TrapCalibration. The dataset of input files needs to be "
"passed in to the input_dataset keyword to record history of this TrapCalibration.")
self.ext_hdr['DATATYPE'] = 'TrapCalibration' # corgidrp specific keyword for saving to disk

# log all the data that went into making this dark
self._record_parent_filenames(input_dataset)

# add to history
self.ext_hdr['HISTORY'] = "TrapCalibration created from {0} frames".format(self.ext_hdr['DRPNFILE'])

# give it a default filename using the first input file as the base
# strip off everything starting at .fits
orig_input_filename = input_dataset[0].filename.split(".fits")[0]
self.filename = "{0}_trapcal.fits".format(orig_input_filename)


# double check that this is actually a dark file that got read in
# since if only a filepath was passed in, any file could have been read in
if 'DATATYPE' not in self.ext_hdr or self.ext_hdr['DATATYPE'] != 'TrapCalibration':
raise ValueError("File that was loaded was not a TrapCalibration file.")

datatypes = { "Image" : Image,
"Dark" : Dark,
"NonLinearityCalibration" : NonLinearityCalibration,
Expand All @@ -1336,7 +1404,8 @@ def __init__(self, data_or_filepath, pri_hdr=None, ext_hdr=None, input_dataset=N
"DetectorNoiseMaps": DetectorNoiseMaps,
"FlatField" : FlatField,
"DetectorParams" : DetectorParams,
"AstrometricCalibration" : AstrometricCalibration }
"AstrometricCalibration" : AstrometricCalibration,
"TrapCalibration": TrapCalibration }

def autoload(filepath):
"""
Expand Down
12 changes: 10 additions & 2 deletions corgidrp/detector.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,9 @@ def get_relgains(frame, em_gain, non_lin_correction):
'prescan' : {
'rows': 1200,
'cols': 1088,
'r0c0': [0, 0]
'r0c0': [0, 0],
'col_start': 800,
'col_end': 1000,
},
'prescan_reliable' : {
'rows': 1200,
Expand Down Expand Up @@ -144,7 +146,9 @@ def get_relgains(frame, em_gain, non_lin_correction):
'prescan' : {
'rows': 2200,
'cols': 1088,
'r0c0': [0, 0]
'r0c0': [0, 0],
'col_start': 800,
'col_end': 1000,
},
'prescan_reliable' : {
'rows': 2200,
Expand Down Expand Up @@ -256,6 +260,8 @@ def slice_section(frame, obstype, key, detector_regions=None):
"""
Slice 2d section out of frame
Ported from II&T read_metadata.py
Args:
frame (np.ndarray): Full frame consistent with size given in frame_rows, frame_cols
obstype (str): Keyword referencing the observation type (e.g. 'ENG' or 'SCI')
Expand All @@ -276,6 +282,8 @@ def slice_section(frame, obstype, key, detector_regions=None):
raise Exception('Corners invalid. Tried to slice shape of {0} from {1} to {2} rows and {3} columns'.format(frame.shape, r0c0, rows, cols))
return section



def unpack_geom(obstype, key, detector_regions=None):
"""Safely check format of geom sub-dictionary and return values.
Expand Down
Loading

0 comments on commit 4736c55

Please sign in to comment.