From 3e2a099168d77ec390a43f867175111dd7aeed98 Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Fri, 23 Aug 2019 23:17:03 +0200 Subject: [PATCH 001/101] Check that the logic for the offset keyword is appropriate Ticket #71. Documentation has been updated following tests. --- examples/irdis_imaging_reduction.py | 2 +- vltpf/IFS.py | 6 ++++-- vltpf/IRDIS/ImagingReduction.py | 3 ++- vltpf/toolbox.py | 6 ++++-- 4 files changed, 11 insertions(+), 6 deletions(-) diff --git a/examples/irdis_imaging_reduction.py b/examples/irdis_imaging_reduction.py index ddc598b..9d4c341 100644 --- a/examples/irdis_imaging_reduction.py +++ b/examples/irdis_imaging_reduction.py @@ -36,7 +36,7 @@ #%% high-level science processing reduction.sph_ird_star_center(high_pass=False, offset=(0, 0), display=False, save=True) -reduction.sph_ird_combine_data(cpix=True, psf_dim=200, science_dim=200, correct_anamorphism=True, +reduction.sph_ird_combine_data(cpix=True, psf_dim=80, science_dim=200, correct_anamorphism=True, shift_method='interp', manual_center=None, skip_center=False, save_scaled=False) diff --git a/vltpf/IFS.py b/vltpf/IFS.py index 8b8e9e5..c9ac2d7 100644 --- a/vltpf/IFS.py +++ b/vltpf/IFS.py @@ -2125,7 +2125,8 @@ def sph_ifs_wavelength_recalibration(self, high_pass=False, offset=(0, 0), displ offset : tuple Apply an (x,y) offset to the default center position, for the waffle centering. - Default is no offset + The offset will move the search box of the waffle spots by the amount of + specified pixels in each direction. Default is no offset display : bool Display the fit of the satelitte spots. Default is False. @@ -2340,7 +2341,8 @@ def sph_ifs_star_center(self, high_pass=False, offset=(0, 0), display=False, sav offset : tuple Apply an (x,y) offset to the default center position, for the waffle centering. - Default is no offset + The offset will move the search box of the waffle spots by the amount of + specified pixels in each direction. Default is no offset display : bool Display the fit of the satelitte spots diff --git a/vltpf/IRDIS/ImagingReduction.py b/vltpf/IRDIS/ImagingReduction.py index d0b3ec0..99b4ab0 100644 --- a/vltpf/IRDIS/ImagingReduction.py +++ b/vltpf/IRDIS/ImagingReduction.py @@ -1132,7 +1132,8 @@ def sph_ird_star_center(self, high_pass=False, offset=(0, 0), display=False, sav offset : tuple Apply an (x,y) offset to the default center position, for the waffle centering. - Default is no offset + The offset will move the search box of the waffle spots by the amount of + specified pixels in each direction. Default is no offset display : bool Display the fit of the satelitte spots diff --git a/vltpf/toolbox.py b/vltpf/toolbox.py index 2a7d8dc..9a179e4 100644 --- a/vltpf/toolbox.py +++ b/vltpf/toolbox.py @@ -621,8 +621,10 @@ def star_centers_from_waffle_img_cube(cube, wave, instrument, waffle_orientation smoothing center_offset : tuple - Apply an (x,y) offset to the default center position. Default is no offset - + Apply an (x,y) offset to the default center position. The offset + will move the search box of the waffle spots by the amount of + specified pixels in each direction. Default is no offset + coro : bool Observation was performed with a coronagraph. Default is True From 02b836c735a0fa15ef857a800ca9ac78a3fdcec9 Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Fri, 23 Aug 2019 23:26:55 +0200 Subject: [PATCH 002/101] Final unit adjustments for ticket #64 --- vltpf/toolbox.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/vltpf/toolbox.py b/vltpf/toolbox.py index 9a179e4..48f0cf2 100644 --- a/vltpf/toolbox.py +++ b/vltpf/toolbox.py @@ -439,7 +439,7 @@ def star_centers_from_PSF_img_cube(cube, wave, pixel, display=False, save_path=N # loop over images img_centers = np.zeros((nwave, 2)) for idx, (wave, img) in enumerate(zip(wave, cube)): - print(' wave {0:2d}/{1:2d} ({2:.1f} nm)'.format(idx+1, nwave, wave)) + print(' wave {0:2d}/{1:2d} ({2:.0f} nm)'.format(idx+1, nwave, wave)) # remove any NaN img = np.nan_to_num(img) @@ -472,7 +472,7 @@ def star_centers_from_PSF_img_cube(cube, wave, pixel, display=False, save_path=N ax.imshow(img/img.max(), aspect='equal', vmin=1e-6, vmax=1, norm=colors.LogNorm(), interpolation='nearest') ax.plot([cx_final], [cy_final], marker='D', color='red') ax.add_patch(patches.Rectangle((cx-box, cy-box), 2*box, 2*box, ec='white', fc='none')) - ax.set_title(r'Image #{0} - {1:.1f} nm'.format(idx+1, wave)) + ax.set_title(r'Image #{0} - {1:.0f} nm'.format(idx+1, wave)) ext = 1000 / pixel ax.set_xlim(cx_final-ext, cx_final+ext) @@ -693,7 +693,7 @@ def star_centers_from_waffle_img_cube(cube, wave, instrument, waffle_orientation spot_dist = np.zeros((nwave, 6)) img_centers = np.zeros((nwave, 2)) for idx, (wave, img) in enumerate(zip(wave, cube)): - print(' wave {0:2d}/{1:2d} ({2:.1f} nm)'.format(idx+1, nwave, wave)) + print(' wave {0:2d}/{1:2d} ({2:.0f} nm)'.format(idx+1, nwave, wave)) # remove any NaN img = np.nan_to_num(img) @@ -723,7 +723,7 @@ def star_centers_from_waffle_img_cube(cube, wave, instrument, waffle_orientation col = ['red', 'blue', 'magenta', 'purple'] ax = fig.add_subplot(111) ax.imshow(img/img.max(), aspect='equal', vmin=1e-2, vmax=1, norm=colors.LogNorm(), interpolation='nearest') - ax.set_title(r'Image #{0} - {1:.1f} nm'.format(idx+1, wave)) + ax.set_title(r'Image #{0} - {1:.0f} nm'.format(idx+1, wave)) # satelitte spots for s in range(4): From 43f90256841dfa28a75bf3bec2d35e9ef9668a91 Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Fri, 23 Aug 2019 23:41:41 +0200 Subject: [PATCH 003/101] Name all figures to improve plotting Ticket #67 Also implement general improvements in the plotting routines --- vltpf/IFS.py | 53 +++++++++++++++++---------------- vltpf/IRDIS/SpectroReduction.py | 5 +++- vltpf/toolbox.py | 51 +++++++++++++++---------------- 3 files changed, 58 insertions(+), 51 deletions(-) diff --git a/vltpf/IFS.py b/vltpf/IFS.py index c9ac2d7..fe9f44e 100644 --- a/vltpf/IFS.py +++ b/vltpf/IFS.py @@ -315,6 +315,7 @@ def fit_peak(x, y, display=False): fit = fitter(g_init, x, y) if display: + plt.figure('Gaussian fit') plt.clf() plt.plot(x, y, color='k') plt.plot(x, fit(x), color='r') @@ -2291,33 +2292,35 @@ def sph_ifs_wavelength_recalibration(self, high_pass=False, offset=(0, 0), displ # summary plot # if save or display: - fig = plt.figure(1, figsize=(17, 5.5)) + plt.figure('Wavelength recalibration', figsize=(17, 5.5)) plt.clf() - ax = fig.add_subplot(131) - ax.plot(img_center[:, 0], img_center[:, 1], linestyle='none', marker='+') - ax.set_xlabel('x center [pix]') - ax.set_ylabel('y center [pix]') - ax.set_xlim(img_center[:, 0].mean()+np.array([-3, 3])) - ax.set_ylim(img_center[:, 1].mean()+np.array([-3, 3])) - ax.set_title('Frames centers') - - ax = fig.add_subplot(132) - ax.plot(wave_scales, linestyle='dotted') - ax.plot(wave_scale, color='k', label='Mean') - ax.set_xlabel('Spectral channel index') - ax.set_ylabel('Scaling factor') - ax.set_title('Spectral scaling') - ax.legend(loc='upper left') - - ax = fig.add_subplot(133) - ax.plot(wave_drh, wave_flux, linestyle='dotted', color='k', label='Original') - ax.plot(wave_final, wave_flux, color='r', label='Recalibrated') + + plt.subplot(131) + plt.plot(img_center[:, 0], img_center[:, 1], linestyle='none', marker='+') + plt.xlabel('x center [pix]') + plt.ylabel('y center [pix]') + plt.xlim(img_center[:, 0].mean()+np.array([-3, 3])) + plt.ylim(img_center[:, 1].mean()+np.array([-3, 3])) + plt.title('Frames centers') + + plt.subplot(132) + plt.plot(wave_scales, linestyle='dotted') + plt.plot(wave_scale, color='k', label='Mean') + plt.xlabel('Spectral channel index') + plt.ylabel('Scaling factor') + plt.title('Spectral scaling') + plt.legend(loc='upper left') + + plt.subplot(133) + plt.plot(wave_drh, wave_flux, linestyle='dotted', color='k', label='Original') + plt.plot(wave_final, wave_flux, color='r', label='Recalibrated') for w in self._wave_cal_lasers: - ax.axvline(x=w, linestyle='dashed', color='purple') - ax.set_xlabel(r'Wavelength [nm]') - ax.set_ylabel('Flux') - ax.legend(loc='upper right') - ax.set_title('Wavelength calibration') + plt.axvline(x=w, linestyle='dashed', color='purple') + plt.xlabel(r'Wavelength [nm]') + plt.ylabel('Flux') + plt.legend(loc='upper right') + plt.title('Wavelength calibration') + plt.tight_layout() if display: diff --git a/vltpf/IRDIS/SpectroReduction.py b/vltpf/IRDIS/SpectroReduction.py index a1d134f..ed43468 100644 --- a/vltpf/IRDIS/SpectroReduction.py +++ b/vltpf/IRDIS/SpectroReduction.py @@ -1518,8 +1518,9 @@ def sph_ird_wavelength_recalibration(self, fit_scaling=True, display=False, save # plot if save or display: - plt.figure(0, figsize=(10, 10)) + plt.figure('Wavelength recalibration', figsize=(10, 10)) plt.clf() + plt.subplot(211) plt.axvline(imin, color='k', linestyle='--') plt.plot(pix, wave, label='DRH', color='r', lw=3) @@ -1529,6 +1530,7 @@ def sph_ird_wavelength_recalibration(self, fit_scaling=True, display=False, save plt.ylabel('Wavelength r[nm]') plt.title('Field #{}'.format(fidx)) plt.xlim(1024, 0) + plt.subplot(212) plt.axvline(imin, color='k', linestyle='--') plt.plot(pix, wave-wave_final_raw) @@ -1536,6 +1538,7 @@ def sph_ird_wavelength_recalibration(self, fit_scaling=True, display=False, save plt.ylabel('Residuals r[nm]') plt.xlabel('Detector coordinate [pix]') plt.xlim(1024, 0) + plt.tight_layout() if save: diff --git a/vltpf/toolbox.py b/vltpf/toolbox.py index 48f0cf2..c36c649 100644 --- a/vltpf/toolbox.py +++ b/vltpf/toolbox.py @@ -465,18 +465,18 @@ def star_centers_from_PSF_img_cube(cube, wave, pixel, display=False, save_path=N img_centers[idx, 1] = cy_final if save_path or display: - fig = plt.figure(0, figsize=(8, 8)) + plt.figure('PSF center - imaging', figsize=(8, 8)) plt.clf() - ax = fig.add_subplot(111) - ax.imshow(img/img.max(), aspect='equal', vmin=1e-6, vmax=1, norm=colors.LogNorm(), interpolation='nearest') - ax.plot([cx_final], [cy_final], marker='D', color='red') - ax.add_patch(patches.Rectangle((cx-box, cy-box), 2*box, 2*box, ec='white', fc='none')) - ax.set_title(r'Image #{0} - {1:.0f} nm'.format(idx+1, wave)) + plt.subplot(111) + plt.imshow(img/img.max(), aspect='equal', vmin=1e-6, vmax=1, norm=colors.LogNorm(), interpolation='nearest') + plt.plot([cx_final], [cy_final], marker='D', color='red') + plt.gca().add_patch(patches.Rectangle((cx-box, cy-box), 2*box, 2*box, ec='white', fc='none')) + plt.title(r'Image #{0} - {1:.0f} nm'.format(idx+1, wave)) ext = 1000 / pixel - ax.set_xlim(cx_final-ext, cx_final+ext) - ax.set_ylim(cy_final-ext, cy_final+ext) + plt.xlim(cx_final-ext, cx_final+ext) + plt.ylim(cy_final-ext, cy_final+ext) plt.tight_layout() @@ -524,7 +524,7 @@ def star_centers_from_PSF_lss_cube(cube, wave_cube, pixel, display=False, save_p # prepare plot if save_path or display: - fig = plt.figure(0, figsize=(7, 12)) + plt.figure('PSF center - spectro', figsize=(7, 12)) plt.clf() # loop over fiels and wavelengths @@ -569,16 +569,16 @@ def star_centers_from_PSF_lss_cube(cube, wave_cube, pixel, display=False, save_p psf_centers[widx, fidx] = cx if save_path or display: - ax = fig.add_subplot(1, 2, fidx+1) + plt.subplot(1, 2, fidx+1) - ax.imshow(img/img.max(), aspect='equal', vmin=1e-3, vmax=1, norm=colors.LogNorm(), interpolation='nearest') - ax.plot(psf_centers[:, fidx], range(1024), marker='.', color='r', linestyle='none', ms=2, alpha=0.5) + plt.imshow(img/img.max(), aspect='equal', vmin=1e-3, vmax=1, norm=colors.LogNorm(), interpolation='nearest') + plt.plot(psf_centers[:, fidx], range(1024), marker='.', color='r', linestyle='none', ms=2, alpha=0.5) - ax.set_title(r'Field #{0}'.format(fidx+1)) + plt.title(r'Field #{0}'.format(fidx+1)) ext = 1000 / pixel - ax.set_xlim(cx_int-ext, cx_int+ext) - ax.set_ylim(0, 1024) + plt.xlim(cx_int-ext, cx_int+ext) + plt.ylim(0, 1024) if display: plt.tight_layout() @@ -718,8 +718,9 @@ def star_centers_from_waffle_img_cube(cube, wave, instrument, waffle_orientation # create plot if needed if save_path or display: - fig = plt.figure(0, figsize=(8, 8)) + fig = plt.figure('Waffle center - imaging', figsize=(8, 8)) plt.clf() + col = ['red', 'blue', 'magenta', 'purple'] ax = fig.add_subplot(111) ax.imshow(img/img.max(), aspect='equal', vmin=1e-2, vmax=1, norm=colors.LogNorm(), interpolation='nearest') @@ -866,7 +867,7 @@ def star_centers_from_waffle_lss_cube(cube_cen, cube_sci, wave_cube, centers, pi # prepare plot if save_path or display: - fig = plt.figure(0, figsize=(7, 12)) + plt.figure('Waffle centering - spectro', figsize=(7, 12)) plt.clf() # subtract science cube if provided @@ -927,17 +928,17 @@ def star_centers_from_waffle_lss_cube(cube_cen, cube_sci, wave_cube, centers, pi img_centers[widx, fidx] = (c0 + c1) / 2 if save_path or display: - ax = fig.add_subplot(1, 2, fidx+1) - ax.imshow(img/img.max(), aspect='equal', vmin=-1e-2, vmax=1e-2, interpolation='nearest') - ax.plot(spot_centers[:, fidx, 0], range(1024), marker='.', color='r', linestyle='none', ms=2, alpha=1) - ax.plot(spot_centers[:, fidx, 1], range(1024), marker='.', color='r', linestyle='none', ms=2, alpha=1) - ax.plot(img_centers[:, fidx], range(1024), marker='.', color='r', linestyle='none', ms=2, alpha=1) + plt.subplot(1, 2, fidx+1) + plt.imshow(img/img.max(), aspect='equal', vmin=-1e-2, vmax=1e-2, interpolation='nearest') + plt.plot(spot_centers[:, fidx, 0], range(1024), marker='.', color='r', linestyle='none', ms=2, alpha=1) + plt.plot(spot_centers[:, fidx, 1], range(1024), marker='.', color='r', linestyle='none', ms=2, alpha=1) + plt.plot(img_centers[:, fidx], range(1024), marker='.', color='r', linestyle='none', ms=2, alpha=1) - ax.set_title(r'Field #{0}'.format(fidx+1)) + plt.title(r'Field #{0}'.format(fidx+1)) ext = 1000 / pixel - ax.set_xlim(cx_int-ext, cx_int+ext) - ax.set_ylim(0, 1024) + plt.xlim(cx_int-ext, cx_int+ext) + plt.ylim(0, 1024) if display: plt.tight_layout() From fb276d51a04e71c7b94a9200e18cff1dd983c024 Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Fri, 23 Aug 2019 23:44:30 +0200 Subject: [PATCH 004/101] Move sph_ifs_science_cubes() into the preprocessing --- examples/ifs_reduction.py | 2 +- vltpf/IFS.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/ifs_reduction.py b/examples/ifs_reduction.py index 61c2deb..ad15072 100644 --- a/examples/ifs_reduction.py +++ b/examples/ifs_reduction.py @@ -33,9 +33,9 @@ collapse_science=True, collapse_type='mean', coadd_value=2, collapse_psf=True, collapse_center=True) reduction.sph_ifs_preprocess_wave() +reduction.sph_ifs_science_cubes(silent=True) #%% high-level science processing -reduction.sph_ifs_science_cubes(silent=True) reduction.sph_ifs_wavelength_recalibration(high_pass=True, offset=(-5, 0), display=False, save=True) reduction.sph_ifs_star_center(high_pass=True, offset=(-5, 0), display=False, save=True) reduction.sph_ifs_combine_data(cpix=True, psf_dim=80, science_dim=200, correct_anamorphism=True, diff --git a/vltpf/IFS.py b/vltpf/IFS.py index fe9f44e..be86b69 100644 --- a/vltpf/IFS.py +++ b/vltpf/IFS.py @@ -567,6 +567,7 @@ def preprocess_science(self): collapse_psf=config['preproc_collapse_psf'], collapse_center=config['preproc_collapse_center']) self.sph_ifs_preprocess_wave() + self.sph_ifs_science_cubes(silent=config['silent']) def process_science(self): @@ -577,7 +578,6 @@ def process_science(self): config = self._config - self.sph_ifs_science_cubes(silent=config['silent']) self.sph_ifs_wavelength_recalibration(high_pass=config['center_high_pass'], offset=config['center_offset'], display=config['center_display'], From 31c7cabcc1466225c8a9fd513fea63ddb18a8291 Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Sat, 24 Aug 2019 10:13:55 +0200 Subject: [PATCH 005/101] Change organisation of the config files Tickets #69 and #70 - change silent ==> misc_esorex_silent - remove nwave in IRDIS - remove center_display - remove center_save --- examples/irdis_spectro_reduction.py | 6 ++--- vltpf/IFS.py | 21 +++++++++-------- vltpf/IRDIS/ImagingReduction.py | 32 ++++++++++++++----------- vltpf/IRDIS/SpectroReduction.py | 36 +++++++++++++++++------------ vltpf/instruments/IFS.ini | 6 ++--- vltpf/instruments/IRDIS.ini | 19 +++++++-------- 6 files changed, 65 insertions(+), 55 deletions(-) diff --git a/examples/irdis_spectro_reduction.py b/examples/irdis_spectro_reduction.py index 56b4fee..9cf8de4 100644 --- a/examples/irdis_spectro_reduction.py +++ b/examples/irdis_spectro_reduction.py @@ -23,9 +23,9 @@ reduction.check_files_association() #%% static calibrations -reduction.sph_ird_cal_dark() -reduction.sph_ird_cal_detector_flat() -reduction.sph_ird_wave_calib() +reduction.sph_ird_cal_dark(silent=True) +reduction.sph_ird_cal_detector_flat(silent=True) +reduction.sph_ird_wave_calib(silent=True) #%% science pre-processing reduction.sph_ird_preprocess_science(subtract_background=True, fix_badpix=True, diff --git a/vltpf/IFS.py b/vltpf/IFS.py index be86b69..566a45e 100644 --- a/vltpf/IFS.py +++ b/vltpf/IFS.py @@ -394,6 +394,8 @@ def __init__(self, path): # instrument self._pixel = float(config.get('instrument', 'pixel')) self._nwave = int(config.get('instrument', 'nwave')) + + # calibration self._wave_cal_lasers = [float(w) for w in config.get('calibration', 'wave_cal_lasers').split(',')] # reduction @@ -490,11 +492,12 @@ def show_config(self): # dictionary dico = self._config - # silent parameter + # misc parameters print('{0:<30s}{1}'.format('Parameter', 'Value')) print('-'*35) - key = 'silent' - print('{0:<30s}{1}'.format(key, dico[key])) + keys = [key for key in dico if key.startswith('misc')] + for key in keys: + print('{0:<30s}{1}'.format(key, dico[key])) # pre-processing print('-'*35) @@ -544,11 +547,11 @@ def create_static_calibrations(self): config = self._config - self.sph_ifs_cal_dark(silent=config['silent']) - self.sph_ifs_cal_detector_flat(silent=config['silent']) - self.sph_ifs_cal_specpos(silent=config['silent']) - self.sph_ifs_cal_wave(silent=config['silent']) - self.sph_ifs_cal_ifu_flat(silent=config['silent']) + self.sph_ifs_cal_dark(silent=config['misc_silent_esorex']) + self.sph_ifs_cal_detector_flat(silent=config['misc_silent_esorex']) + self.sph_ifs_cal_specpos(silent=config['misc_silent_esorex']) + self.sph_ifs_cal_wave(silent=config['misc_silent_esorex']) + self.sph_ifs_cal_ifu_flat(silent=config['misc_silent_esorex']) def preprocess_science(self): @@ -567,7 +570,7 @@ def preprocess_science(self): collapse_psf=config['preproc_collapse_psf'], collapse_center=config['preproc_collapse_center']) self.sph_ifs_preprocess_wave() - self.sph_ifs_science_cubes(silent=config['silent']) + self.sph_ifs_science_cubes(silent=config['misc_silent_esorex']) def process_science(self): diff --git a/vltpf/IRDIS/ImagingReduction.py b/vltpf/IRDIS/ImagingReduction.py index 99b4ab0..10389bf 100644 --- a/vltpf/IRDIS/ImagingReduction.py +++ b/vltpf/IRDIS/ImagingReduction.py @@ -83,17 +83,22 @@ def __init__(self, path): # instrument self._pixel = float(config.get('instrument', 'pixel')) - self._nwave = int(config.get('instrument', 'nwave')) + self._nwave = 2 + + # calibration self._wave_cal_lasers = [float(w) for w in config.get('calibration', 'wave_cal_lasers').split(',')] # reduction - self._config = dict(config.items('reduction-imaging')) - for key, value in self._config.items(): - try: - val = eval(value) - except NameError: - val = value - self._config[key] = val + self._config = {} + for group in ['reduction', 'reduction-spectro']: + items = dict(config.items(group)) + self._config.update(items) + for key, value in items.items(): + try: + val = eval(value) + except NameError: + val = value + self._config[key] = val except configparser.Error as e: raise ValueError('Error reading configuration file for instrument {0}: {1}'.format(self._instrument, e.message)) @@ -169,11 +174,12 @@ def show_config(self): # dictionary dico = self._config - # silent parameter + # misc parameters print('{0:<30s}{1}'.format('Parameter', 'Value')) print('-'*35) - key = 'silent' - print('{0:<30s}{1}'.format(key, dico[key])) + keys = [key for key in dico if key.startswith('misc')] + for key in keys: + print('{0:<30s}{1}'.format(key, dico[key])) # pre-processing print('-'*35) @@ -223,8 +229,8 @@ def create_static_calibrations(self): config = self._config - self.sph_ird_cal_dark(silent=config['silent']) - self.sph_ird_cal_detector_flat(silent=config['silent']) + self.sph_ird_cal_dark(silent=config['misc_silent_esorex']) + self.sph_ird_cal_detector_flat(silent=config['misc_silent_esorex']) def preprocess_science(self): diff --git a/vltpf/IRDIS/SpectroReduction.py b/vltpf/IRDIS/SpectroReduction.py index ed43468..cffcfd4 100644 --- a/vltpf/IRDIS/SpectroReduction.py +++ b/vltpf/IRDIS/SpectroReduction.py @@ -125,17 +125,22 @@ def __init__(self, path): # instrument self._pixel = float(config.get('instrument', 'pixel')) - self._nwave = int(config.get('instrument', 'nwave')) + self._nwave = -1 + + # calibration self._wave_cal_lasers = [float(w) for w in config.get('calibration', 'wave_cal_lasers').split(',')] # reduction - self._config = dict(config.items('reduction-spectro')) - for key, value in self._config.items(): - try: - val = eval(value) - except NameError: - val = value - self._config[key] = val + self._config = {} + for group in ['reduction', 'reduction-spectro']: + items = dict(config.items(group)) + self._config.update(items) + for key, value in items.items(): + try: + val = eval(value) + except NameError: + val = value + self._config[key] = val except configparser.Error as e: raise ValueError('Error reading configuration file for instrument {0}: {1}'.format(self._instrument, e.message)) @@ -148,7 +153,7 @@ def __init__(self, path): 'sph_ifs_cal_detector_flat': False, 'sph_ird_wave_calib': False } - + # reload any existing data frames self.read_info() @@ -214,11 +219,12 @@ def show_config(self): # dictionary dico = self._config - # silent parameter + # misc parameters print('{0:<30s}{1}'.format('Parameter', 'Value')) print('-'*35) - key = 'silent' - print('{0:<30s}{1}'.format(key, dico[key])) + keys = [key for key in dico if key.startswith('misc')] + for key in keys: + print('{0:<30s}{1}'.format(key, dico[key])) # pre-processing print('-'*35) @@ -274,9 +280,9 @@ def create_static_calibrations(self): config = self._config - self.sph_ird_cal_dark(silent=config['silent']) - self.sph_ird_cal_detector_flat(silent=config['silent']) - self.sph_ird_wave_calib(silent=config['silent']) + self.sph_ird_cal_dark(silent=config['misc_silent_esorex']) + self.sph_ird_cal_detector_flat(silent=config['misc_silent_esorex']) + self.sph_ird_wave_calib(silent=config['misc_silent_esorex']) def preprocess_science(self): diff --git a/vltpf/instruments/IFS.ini b/vltpf/instruments/IFS.ini index 095640f..c22fddd 100644 --- a/vltpf/instruments/IFS.ini +++ b/vltpf/instruments/IFS.ini @@ -17,8 +17,8 @@ wave_cal_lasers = 987.72, 1123.71, 1309.37, 1545.07 # [reduction] -# silent esorex -silent = True +# misc options +misc_silent_esorex = True # pre-processing preproc_subtract_background = True @@ -33,8 +33,6 @@ preproc_collapse_center = True # center center_high_pass = False center_offset = (0, 0) -center_display = False -center_save = True # combine combine_cpix = True diff --git a/vltpf/instruments/IRDIS.ini b/vltpf/instruments/IRDIS.ini index f51fb76..edcb24c 100644 --- a/vltpf/instruments/IRDIS.ini +++ b/vltpf/instruments/IRDIS.ini @@ -4,7 +4,6 @@ [instrument] name = 'IRDIS' pixel = 12.25 -nwave = 2 # # calibration parameters @@ -12,14 +11,19 @@ nwave = 2 [calibration] wave_cal_lasers = 987.72, 1123.71, 1309.37, 1545.07, 1730.23, 2015.33 +# +# general reduction parameters +# +[reduction] + +# misc options +misc_silent_esorex = True + # # default reduction parameters for imaging # [reduction-imaging] -# silent esorex -silent = True - # pre-processing preproc_subtract_background = True preproc_fix_badpix = True @@ -32,8 +36,6 @@ preproc_collapse_center = True # center center_high_pass = False center_offset = (0, 0) -center_display = False -center_save = True # combine combine_cpix = True @@ -55,9 +57,6 @@ clean_delete_products = False # [reduction-spectro] -# silent esorex -silent = True - # pre-processing preproc_subtract_background = True preproc_fix_badpix = True @@ -67,8 +66,6 @@ preproc_collapse_center = True # center center_high_pass = False -center_display = False -center_save = True # wavelength calibration wave_fit_scaling = True From 0d06d47e421a7b2662d26609c595bed7a1d6b7d8 Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Sat, 24 Aug 2019 11:39:55 +0200 Subject: [PATCH 006/101] Simplify plotting option Ticket #69 Plotting now controlled by a single config keyword, misc_plot. All functions have been updated to use it --- examples/ifs_reduction.py | 4 +- examples/irdis_imaging_reduction.py | 2 +- examples/irdis_spectro_reduction.py | 4 +- vltpf/IFS.py | 44 ++++++------------ vltpf/IRDIS/ImagingReduction.py | 21 ++++----- vltpf/IRDIS/SpectroReduction.py | 46 +++++++------------ vltpf/instruments/IFS.ini | 1 + vltpf/instruments/IRDIS.ini | 1 + vltpf/toolbox.py | 71 ++++++++++------------------- 9 files changed, 71 insertions(+), 123 deletions(-) diff --git a/examples/ifs_reduction.py b/examples/ifs_reduction.py index ad15072..3f44099 100644 --- a/examples/ifs_reduction.py +++ b/examples/ifs_reduction.py @@ -36,8 +36,8 @@ reduction.sph_ifs_science_cubes(silent=True) #%% high-level science processing -reduction.sph_ifs_wavelength_recalibration(high_pass=True, offset=(-5, 0), display=False, save=True) -reduction.sph_ifs_star_center(high_pass=True, offset=(-5, 0), display=False, save=True) +reduction.sph_ifs_wavelength_recalibration(high_pass=True, offset=(-5, 0), plot=True) +reduction.sph_ifs_star_center(high_pass=True, offset=(-5, 0), plot=True) reduction.sph_ifs_combine_data(cpix=True, psf_dim=80, science_dim=200, correct_anamorphism=True, shift_method='interp', manual_center=None, skip_center=False, save_scaled=False) diff --git a/examples/irdis_imaging_reduction.py b/examples/irdis_imaging_reduction.py index 9d4c341..839ee17 100644 --- a/examples/irdis_imaging_reduction.py +++ b/examples/irdis_imaging_reduction.py @@ -35,7 +35,7 @@ collapse_psf=True, collapse_center=True) #%% high-level science processing -reduction.sph_ird_star_center(high_pass=False, offset=(0, 0), display=False, save=True) +reduction.sph_ird_star_center(high_pass=False, offset=(0, 0), plot=True) reduction.sph_ird_combine_data(cpix=True, psf_dim=80, science_dim=200, correct_anamorphism=True, shift_method='interp', manual_center=None, skip_center=False, save_scaled=False) diff --git a/examples/irdis_spectro_reduction.py b/examples/irdis_spectro_reduction.py index 9cf8de4..76feac4 100644 --- a/examples/irdis_spectro_reduction.py +++ b/examples/irdis_spectro_reduction.py @@ -33,8 +33,8 @@ collapse_center=True) #%% high-level science processing -reduction.sph_ird_star_center(high_pass=False, display=True, save=True) -reduction.sph_ird_wavelength_recalibration(fit_scaling=True, display=True, save=True) +reduction.sph_ird_star_center(high_pass=False, display=True, plot=True) +reduction.sph_ird_wavelength_recalibration(fit_scaling=True, plot=True) reduction.sph_ird_combine_data(cpix=True, psf_dim=80, science_dim=300, correct_mrs_chromatism=True, split_posang=True, shift_method='fft', manual_center=None, skip_center=False) diff --git a/vltpf/IFS.py b/vltpf/IFS.py index 566a45e..b76253b 100644 --- a/vltpf/IFS.py +++ b/vltpf/IFS.py @@ -583,12 +583,10 @@ def process_science(self): self.sph_ifs_wavelength_recalibration(high_pass=config['center_high_pass'], offset=config['center_offset'], - display=config['center_display'], - save=config['center_save']) + plot=config['misc_plot']) self.sph_ifs_star_center(high_pass=config['center_high_pass'], offset=config['center_offset'], - display=config['center_display'], - save=config['center_save']) + plot=config['misc_plot']) self.sph_ifs_combine_data(cpix=config['combine_cpix'], psf_dim=config['combine_psf_dim'], science_dim=config['combine_science_dim'], @@ -2112,7 +2110,7 @@ def sph_ifs_science_cubes(self, silent=True): self._recipe_execution['sph_ifs_science_cubes'] = True - def sph_ifs_wavelength_recalibration(self, high_pass=False, offset=(0, 0), display=False, save=True): + def sph_ifs_wavelength_recalibration(self, high_pass=False, offset=(0, 0), plot=True): '''Performs a recalibration of the wavelength, if star center frames are available @@ -2132,12 +2130,8 @@ def sph_ifs_wavelength_recalibration(self, high_pass=False, offset=(0, 0), displ The offset will move the search box of the waffle spots by the amount of specified pixels in each direction. Default is no offset - display : bool - Display the fit of the satelitte spots. Default is False. - - save : bool - Save the fit of the sattelite spot for quality check. Default is True, - although it is a bit slow. + plot : bool + Display and save diagnostic plot for quality check. Default is True ''' @@ -2195,14 +2189,14 @@ def sph_ifs_wavelength_recalibration(self, high_pass=False, offset=(0, 0), displ # compute centers from waffle spots waffle_orientation = hdr['HIERARCH ESO OCS WAFFLE ORIENT'] - if save: + if plot: save_path = os.path.join(path.products, fname+'spots_fitting.pdf') else: save_path = None spot_center, spot_dist, img_center \ = toolbox.star_centers_from_waffle_img_cube(cube, wave_drh, 'IFS', waffle_orientation, high_pass=high_pass, center_offset=offset, - coro=coro, display=display, save_path=save_path) + coro=coro, save_path=save_path) # final scaling wave_scales = spot_dist / np.full((nwave, 6), spot_dist[0]) @@ -2294,7 +2288,7 @@ def sph_ifs_wavelength_recalibration(self, high_pass=False, offset=(0, 0), displ # # summary plot # - if save or display: + if plot: plt.figure('Wavelength recalibration', figsize=(17, 5.5)) plt.clf() @@ -2326,17 +2320,13 @@ def sph_ifs_wavelength_recalibration(self, high_pass=False, offset=(0, 0), displ plt.tight_layout() - if display: - plt.pause(1e-3) - - if save: plt.savefig(os.path.join(path.products, 'wavelength_recalibration.pdf')) # update recipe execution self._recipe_execution['sph_ifs_wavelength_recalibration'] = True - def sph_ifs_star_center(self, high_pass=False, offset=(0, 0), display=False, save=True): + def sph_ifs_star_center(self, high_pass=False, offset=(0, 0), plot=True): '''Determines the star center for all frames where a center can be determined (OBJECT,CENTER and OBJECT,FLUX) @@ -2350,12 +2340,8 @@ def sph_ifs_star_center(self, high_pass=False, offset=(0, 0), display=False, sav The offset will move the search box of the waffle spots by the amount of specified pixels in each direction. Default is no offset - display : bool - Display the fit of the satelitte spots - - save : bool - Save the fit of the sattelite spot for quality check. Default is True, - although it is a bit slow. + plot : bool + Display and save diagnostic plot for quality check. Default is True ''' @@ -2392,11 +2378,11 @@ def sph_ifs_star_center(self, high_pass=False, offset=(0, 0), display=False, sav wave_drh = np.linspace(wave_min, wave_max, nwave) # centers - if save: + if plot: save_path = os.path.join(path.products, fname+'PSF_fitting.pdf') else: save_path = None - img_center = toolbox.star_centers_from_PSF_img_cube(cube, wave_drh, pixel, display=display, save_path=save_path) + img_center = toolbox.star_centers_from_PSF_img_cube(cube, wave_drh, pixel, save_path=save_path) # save fits.writeto(os.path.join(path.preproc, fname+'centers.fits'), img_center, overwrite=True) @@ -2420,14 +2406,14 @@ def sph_ifs_star_center(self, high_pass=False, offset=(0, 0), display=False, sav # centers waffle_orientation = hdr['HIERARCH ESO OCS WAFFLE ORIENT'] - if save: + if plot: save_path = os.path.join(path.products, fname+'spots_fitting.pdf') else: save_path = None spot_center, spot_dist, img_center \ = toolbox.star_centers_from_waffle_img_cube(cube, wave_drh, 'IFS', waffle_orientation, high_pass=high_pass, center_offset=offset, - display=display, save_path=save_path) + save_path=save_path) # save fits.writeto(os.path.join(path.preproc, fname+'centers.fits'), img_center, overwrite=True) diff --git a/vltpf/IRDIS/ImagingReduction.py b/vltpf/IRDIS/ImagingReduction.py index 10389bf..30dd51e 100644 --- a/vltpf/IRDIS/ImagingReduction.py +++ b/vltpf/IRDIS/ImagingReduction.py @@ -259,8 +259,7 @@ def process_science(self): self.sph_ird_star_center(high_pass=config['center_high_pass'], offset=config['center_offset'], - display=config['center_display'], - save=config['center_save']) + plot=config['misc_plot']) self.sph_ird_combine_data(cpix=config['combine_cpix'], psf_dim=config['combine_psf_dim'], science_dim=config['combine_science_dim'], @@ -1126,7 +1125,7 @@ def sph_ird_preprocess_science(self, self._recipe_execution['sph_ird_preprocess_science'] = True - def sph_ird_star_center(self, high_pass=False, offset=(0, 0), display=False, save=True): + def sph_ird_star_center(self, high_pass=False, offset=(0, 0), plot=True): '''Determines the star center for all frames where a center can be determined (OBJECT,CENTER and OBJECT,FLUX) @@ -1141,12 +1140,8 @@ def sph_ird_star_center(self, high_pass=False, offset=(0, 0), display=False, sav The offset will move the search box of the waffle spots by the amount of specified pixels in each direction. Default is no offset - display : bool - Display the fit of the satelitte spots - - save : bool - Save the fit of the sattelite spot for quality check. Default is True, - although it is a bit slow. + plot : bool + Display and save diagnostic plot for quality check. Default is True ''' @@ -1177,11 +1172,11 @@ def sph_ird_star_center(self, high_pass=False, offset=(0, 0), display=False, sav cube, hdr = fits.getdata(files[0], header=True) # centers - if save: + if plot: save_path = os.path.join(path.products, fname+'_PSF_fitting.pdf') else: save_path = None - img_center = toolbox.star_centers_from_PSF_img_cube(cube, wave, pixel, display=display, save_path=save_path) + img_center = toolbox.star_centers_from_PSF_img_cube(cube, wave, pixel, save_path=save_path) # save fits.writeto(os.path.join(path.preproc, fname+'_centers.fits'), img_center, overwrite=True) @@ -1207,14 +1202,14 @@ def sph_ird_star_center(self, high_pass=False, offset=(0, 0), display=False, sav # centers waffle_orientation = hdr['HIERARCH ESO OCS WAFFLE ORIENT'] - if save: + if plot: save_path = os.path.join(path.products, fname+'_spots_fitting.pdf') else: save_path = None spot_center, spot_dist, img_center \ = toolbox.star_centers_from_waffle_img_cube(cube, wave, 'IRDIS', waffle_orientation, high_pass=high_pass, center_offset=offset, - coro=coro, display=display, save_path=save_path) + coro=coro, save_path=save_path) # save fits.writeto(os.path.join(path.preproc, fname+'_centers.fits'), img_center, overwrite=True) diff --git a/vltpf/IRDIS/SpectroReduction.py b/vltpf/IRDIS/SpectroReduction.py index cffcfd4..ae2cdb7 100644 --- a/vltpf/IRDIS/SpectroReduction.py +++ b/vltpf/IRDIS/SpectroReduction.py @@ -308,9 +308,9 @@ def process_science(self): config = self._config self.sph_ird_star_center(high_pass=config['center_high_pass'], - display=config['center_display'], - save=config['center_save']) - self.sph_ird_wavelength_recalibration(fit_scaling=config['wave_fit_scaling']) + plot=config['misc_plot']) + self.sph_ird_wavelength_recalibration(fit_scaling=config['wave_fit_scaling'], + plot=config['misc_plot']) self.sph_ird_combine_data(cpix=config['combine_cpix'], psf_dim=config['combine_psf_dim'], science_dim=config['combine_science_dim'], @@ -1290,7 +1290,7 @@ def sph_ird_preprocess_science(self, self._recipe_execution['sph_ird_preprocess_science'] = True - def sph_ird_star_center(self, high_pass=False, display=False, save=True): + def sph_ird_star_center(self, high_pass=False, plot=True): '''Determines the star center for all frames where a center can be determined (OBJECT,CENTER and OBJECT,FLUX) @@ -1300,12 +1300,8 @@ def sph_ird_star_center(self, high_pass=False, display=False, save=True): Apply high-pass filter to the image before searching for the satelitte spots. Default is False - display : bool - Display the fit of the satelitte spots - - save : bool - Save the fit of the sattelite spot for quality check. Default is True, - although it is a bit slow. + plot : bool + Display and save diagnostic plot for quality check. Default is True ''' @@ -1352,11 +1348,11 @@ def sph_ird_star_center(self, high_pass=False, display=False, save=True): cube, hdr = fits.getdata(files[0], header=True) # centers - if save: + if plot: save_path = os.path.join(path.products, fname+'_PSF_fitting.pdf') else: save_path = None - psf_center = toolbox.star_centers_from_PSF_lss_cube(cube, wave_lin, pixel, display=display, save_path=save_path) + psf_center = toolbox.star_centers_from_PSF_lss_cube(cube, wave_lin, pixel, save_path=save_path) # save fits.writeto(os.path.join(path.preproc, fname+'_centers.fits'), psf_center, overwrite=True) @@ -1384,14 +1380,13 @@ def sph_ird_star_center(self, high_pass=False, display=False, save=True): cube_sci = None # centers - if save: + if plot: save_path = os.path.join(path.products, fname+'_spots_fitting.pdf') else: save_path = None spot_centers, spot_dist, img_centers \ = toolbox.star_centers_from_waffle_lss_cube(cube_cen, cube_sci, wave_lin, centers, pixel, - high_pass=high_pass, display=display, - save_path=save_path) + high_pass=high_pass, save_path=save_path) # save fits.writeto(os.path.join(path.preproc, fname+'_centers.fits'), img_centers, overwrite=True) @@ -1402,7 +1397,7 @@ def sph_ird_star_center(self, high_pass=False, display=False, save=True): self._recipe_execution['sph_ird_star_center'] = True - def sph_ird_wavelength_recalibration(self, fit_scaling=True, display=False, save=True): + def sph_ird_wavelength_recalibration(self, fit_scaling=True, plot=True): '''Performs a recalibration of the wavelength, if star center frames are available. @@ -1419,12 +1414,8 @@ def sph_ird_wavelength_recalibration(self, fit_scaling=True, display=False, save law. It helps removing high-frequency noise that can result from the waffle fitting. Default is True - display : bool - Display the result of the recalibration. Default is False. - - save : bool - Save the fit of the sattelite spot for quality check. Default is True, - although it is a bit slow. + plot : bool + Display and save diagnostic plot for quality check. Default is True ''' @@ -1435,7 +1426,6 @@ def sph_ird_wavelength_recalibration(self, fit_scaling=True, display=False, save # parameters path = self._path - pixel = self._pixel lasers = self._wave_cal_lasers files_info = self._files_info frames_info = self._frames_info_preproc @@ -1475,7 +1465,7 @@ def sph_ird_wavelength_recalibration(self, fit_scaling=True, display=False, save fname = '{0}_DIT{1:03d}_preproc_spot_distance'.format(starcen_files.index.values[0][0], starcen_files.index.values[0][1]) spot_dist = fits.getdata(os.path.join(path.preproc, fname+'.fits')) - if save: + if plot: pdf = PdfPages(os.path.join(path.products, 'wavelength_recalibration.pdf')) pix = np.arange(1024) @@ -1523,7 +1513,7 @@ def sph_ird_wavelength_recalibration(self, fit_scaling=True, display=False, save use_f = '' # plot - if save or display: + if plot: plt.figure('Wavelength recalibration', figsize=(10, 10)) plt.clf() @@ -1547,13 +1537,9 @@ def sph_ird_wavelength_recalibration(self, fit_scaling=True, display=False, save plt.tight_layout() - if save: pdf.savefig() - if display: - plt.pause(1e-3) - - if save: + if plot: pdf.close() # save diff --git a/vltpf/instruments/IFS.ini b/vltpf/instruments/IFS.ini index c22fddd..16a1e7a 100644 --- a/vltpf/instruments/IFS.ini +++ b/vltpf/instruments/IFS.ini @@ -19,6 +19,7 @@ wave_cal_lasers = 987.72, 1123.71, 1309.37, 1545.07 # misc options misc_silent_esorex = True +misc_plot = True # pre-processing preproc_subtract_background = True diff --git a/vltpf/instruments/IRDIS.ini b/vltpf/instruments/IRDIS.ini index edcb24c..0b5ee11 100644 --- a/vltpf/instruments/IRDIS.ini +++ b/vltpf/instruments/IRDIS.ini @@ -18,6 +18,7 @@ wave_cal_lasers = 987.72, 1123.71, 1309.37, 1545.07, 1730.23, 2015.33 # misc options misc_silent_esorex = True +misc_plot = True # # default reduction parameters for imaging diff --git a/vltpf/toolbox.py b/vltpf/toolbox.py index c36c649..d33747c 100644 --- a/vltpf/toolbox.py +++ b/vltpf/toolbox.py @@ -397,7 +397,7 @@ def lines_intersect(a1, a2, b1, b2): return (num / denom)*db + b1 -def star_centers_from_PSF_img_cube(cube, wave, pixel, display=False, save_path=None): +def star_centers_from_PSF_img_cube(cube, wave, pixel, save_path=None): ''' Compute star center from PSF images (IRDIS CI, IRDIS DBI, IFS) @@ -412,11 +412,9 @@ def star_centers_from_PSF_img_cube(cube, wave, pixel, display=False, save_path=N pixel : float Pixel scale, in mas/pixel - display : bool - Display the fit of the satelitte spots - save_path : str - Path where to save the fit images + Path where to save the fit images. Default is None, which means + that the plot is not produced Returns ------- @@ -464,7 +462,7 @@ def star_centers_from_PSF_img_cube(cube, wave, pixel, display=False, save_path=N img_centers[idx, 0] = cx_final img_centers[idx, 1] = cy_final - if save_path or display: + if save_path: plt.figure('PSF center - imaging', figsize=(8, 8)) plt.clf() @@ -480,11 +478,7 @@ def star_centers_from_PSF_img_cube(cube, wave, pixel, display=False, save_path=N plt.tight_layout() - if save_path: - pdf.savefig() - - if display: - plt.pause(1e-3) + pdf.savefig() if save_path: pdf.close() @@ -492,7 +486,7 @@ def star_centers_from_PSF_img_cube(cube, wave, pixel, display=False, save_path=N return img_centers -def star_centers_from_PSF_lss_cube(cube, wave_cube, pixel, display=False, save_path=None): +def star_centers_from_PSF_lss_cube(cube, wave_cube, pixel, save_path=None): ''' Compute star center from PSF LSS spectra (IRDIS LSS) @@ -507,12 +501,10 @@ def star_centers_from_PSF_lss_cube(cube, wave_cube, pixel, display=False, save_p pixel : float Pixel scale, in mas/pixel - display : bool - Display the fit of the satelitte spots - save_path : str - Path where to save the fit images - + Path where to save the fit images. Default is None, which means + that the plot is not produced + Returns ------- psf_centers : array_like @@ -523,7 +515,7 @@ def star_centers_from_PSF_lss_cube(cube, wave_cube, pixel, display=False, save_p box = 20 # prepare plot - if save_path or display: + if save_path: plt.figure('PSF center - spectro', figsize=(7, 12)) plt.clf() @@ -568,7 +560,7 @@ def star_centers_from_PSF_lss_cube(cube, wave_cube, pixel, display=False, save_p psf_centers[widx, fidx] = cx - if save_path or display: + if save_path: plt.subplot(1, 2, fidx+1) plt.imshow(img/img.max(), aspect='equal', vmin=1e-3, vmax=1, norm=colors.LogNorm(), interpolation='nearest') @@ -580,12 +572,9 @@ def star_centers_from_PSF_lss_cube(cube, wave_cube, pixel, display=False, save_p plt.xlim(cx_int-ext, cx_int+ext) plt.ylim(0, 1024) - if display: - plt.tight_layout() - plt.pause(1e-3) + plt.tight_layout() if save_path: - plt.tight_layout() plt.savefig(save_path) return psf_centers @@ -593,7 +582,7 @@ def star_centers_from_PSF_lss_cube(cube, wave_cube, pixel, display=False, save_p def star_centers_from_waffle_img_cube(cube, wave, instrument, waffle_orientation, high_pass=False, center_offset=(0, 0), smooth=0, - coro=True, display=False, save_path=None): + coro=True, save_path=None): ''' Compute star center from waffle images (IRDIS CI, IRDIS DBI, IFS) @@ -628,12 +617,10 @@ def star_centers_from_waffle_img_cube(cube, wave, instrument, waffle_orientation coro : bool Observation was performed with a coronagraph. Default is True - display : bool - Display the fit of the satelitte spots - save_path : str - Path where to save the fit images - + Path where to save the fit images. Default is None, which means + that the plot is not produced + Returns ------- spot_centers : array_like @@ -717,7 +704,7 @@ def star_centers_from_waffle_img_cube(cube, wave, instrument, waffle_orientation img *= mask # create plot if needed - if save_path or display: + if save_path: fig = plt.figure('Waffle center - imaging', figsize=(8, 8)) plt.clf() @@ -758,7 +745,7 @@ def star_centers_from_waffle_img_cube(cube, wave, instrument, waffle_orientation spot_centers[idx, s, 1] = cy_final # plot sattelite spots and fit - if save_path or display: + if save_path: ax.plot([cx_final], [cy_final], marker='D', color=col[s]) ax.add_patch(patches.Rectangle((cx-box, cy-box), 2*box, 2*box, ec='white', fc='none')) @@ -787,7 +774,7 @@ def star_centers_from_waffle_img_cube(cube, wave, instrument, waffle_orientation spot_dist[idx, 5] = np.sqrt(np.sum((spot_centers[idx, 2, :] - spot_centers[idx, 3, :])**2)) # finalize plot - if save_path or display: + if save_path: ax.plot([spot_centers[idx, 0, 0], spot_centers[idx, 2, 0]], [spot_centers[idx, 0, 1], spot_centers[idx, 2, 1]], color='w', linestyle='dashed') @@ -806,9 +793,6 @@ def star_centers_from_waffle_img_cube(cube, wave, instrument, waffle_orientation if save_path: pdf.savefig() - if display: - plt.pause(1e-3) - if save_path: pdf.close() @@ -816,7 +800,7 @@ def star_centers_from_waffle_img_cube(cube, wave, instrument, waffle_orientation def star_centers_from_waffle_lss_cube(cube_cen, cube_sci, wave_cube, centers, pixel, high_pass=False, - display=False, save_path=None): + save_path=None): ''' Compute star center from waffle LSS spectra (IRDIS LSS) @@ -841,11 +825,9 @@ def star_centers_from_waffle_lss_cube(cube_cen, cube_sci, wave_cube, centers, pi Apply high-pass filter to the image before searching for the satelitte spots. Default is False - display : bool - Display the fit of the satelitte spots - save_path : str - Path where to save the fit images + Path where to save the fit images. Default is None, which means + that the plot is not produced Returns ------- @@ -866,7 +848,7 @@ def star_centers_from_waffle_lss_cube(cube_cen, cube_sci, wave_cube, centers, pi nimg = len(cube_cen) # prepare plot - if save_path or display: + if save_path: plt.figure('Waffle centering - spectro', figsize=(7, 12)) plt.clf() @@ -927,7 +909,7 @@ def star_centers_from_waffle_lss_cube(cube_cen, cube_sci, wave_cube, centers, pi img_centers[widx, fidx] = (c0 + c1) / 2 - if save_path or display: + if save_path: plt.subplot(1, 2, fidx+1) plt.imshow(img/img.max(), aspect='equal', vmin=-1e-2, vmax=1e-2, interpolation='nearest') plt.plot(spot_centers[:, fidx, 0], range(1024), marker='.', color='r', linestyle='none', ms=2, alpha=1) @@ -940,12 +922,9 @@ def star_centers_from_waffle_lss_cube(cube_cen, cube_sci, wave_cube, centers, pi plt.xlim(cx_int-ext, cx_int+ext) plt.ylim(0, 1024) - if display: - plt.tight_layout() - plt.pause(1e-3) + plt.tight_layout() if save_path: - plt.tight_layout() plt.savefig(save_path) return spot_centers, spot_dist, img_centers From b45d83f8bcefe36f6ac156af56daf1b4f8e88c71 Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Sat, 24 Aug 2019 11:45:35 +0200 Subject: [PATCH 007/101] Small improvement to code organization in examples --- examples/ifs_reduction.py | 14 +++++++++----- examples/irdis_imaging_reduction.py | 3 ++- examples/irdis_spectro_reduction.py | 3 ++- 3 files changed, 13 insertions(+), 7 deletions(-) diff --git a/examples/ifs_reduction.py b/examples/ifs_reduction.py index 3f44099..73ab795 100644 --- a/examples/ifs_reduction.py +++ b/examples/ifs_reduction.py @@ -5,12 +5,16 @@ # # full reduction # -# reduction.config['preproc_collapse_science'] = True -# reduction.config['preproc_collapse_type'] = 'coadd' -# reduction.config['preproc_coadd_value'] = 2 -# reduction.config['clean'] = True -# reduction.full_reduction() +#%% configuration +reduction.config['preproc_collapse_science'] = True +reduction.config['preproc_collapse_type'] = 'coadd' +reduction.config['preproc_coadd_value'] = 2 +reduction.config['clean'] = True +reduction.show_config() + +#%% reduction +reduction.full_reduction() # # manual reduction diff --git a/examples/irdis_imaging_reduction.py b/examples/irdis_imaging_reduction.py index 839ee17..3e23f1a 100644 --- a/examples/irdis_imaging_reduction.py +++ b/examples/irdis_imaging_reduction.py @@ -12,8 +12,9 @@ # reduction.config['combine_shift_method'] = 'fft' # reduction.config['preproc_collapse_science'] = True # reduction.config['preproc_collapse_type'] = 'mean' +# reduction.show_config() -#%% full reduction +#%% reduction # reduction.full_reduction() # diff --git a/examples/irdis_spectro_reduction.py b/examples/irdis_spectro_reduction.py index 76feac4..12699ee 100644 --- a/examples/irdis_spectro_reduction.py +++ b/examples/irdis_spectro_reduction.py @@ -9,8 +9,9 @@ #%% configuration # reduction.config['combine_science_dim'] = 300 # reduction.config['clean'] = True +# reduction.show_config() -#%% full reduction +#%% reduction # reduction.full_reduction() # From 488f3600323b22ffcb9e1afccf95dee20f72df04 Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Sat, 24 Aug 2019 11:45:58 +0200 Subject: [PATCH 008/101] Better printing of configuration --- vltpf/IFS.py | 1 + vltpf/IRDIS/ImagingReduction.py | 1 + vltpf/IRDIS/SpectroReduction.py | 1 + 3 files changed, 3 insertions(+) diff --git a/vltpf/IFS.py b/vltpf/IFS.py index b76253b..a6e70d7 100644 --- a/vltpf/IFS.py +++ b/vltpf/IFS.py @@ -493,6 +493,7 @@ def show_config(self): dico = self._config # misc parameters + print() print('{0:<30s}{1}'.format('Parameter', 'Value')) print('-'*35) keys = [key for key in dico if key.startswith('misc')] diff --git a/vltpf/IRDIS/ImagingReduction.py b/vltpf/IRDIS/ImagingReduction.py index 30dd51e..f5340aa 100644 --- a/vltpf/IRDIS/ImagingReduction.py +++ b/vltpf/IRDIS/ImagingReduction.py @@ -175,6 +175,7 @@ def show_config(self): dico = self._config # misc parameters + print() print('{0:<30s}{1}'.format('Parameter', 'Value')) print('-'*35) keys = [key for key in dico if key.startswith('misc')] diff --git a/vltpf/IRDIS/SpectroReduction.py b/vltpf/IRDIS/SpectroReduction.py index ae2cdb7..d9ea9f9 100644 --- a/vltpf/IRDIS/SpectroReduction.py +++ b/vltpf/IRDIS/SpectroReduction.py @@ -220,6 +220,7 @@ def show_config(self): dico = self._config # misc parameters + print() print('{0:<30s}{1}'.format('Parameter', 'Value')) print('-'*35) keys = [key for key in dico if key.startswith('misc')] From 3ab4394dd25b71027b9831fb8447fc29a713b5b2 Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Sat, 24 Aug 2019 12:04:28 +0200 Subject: [PATCH 009/101] Fix reading of conguration reduction-spectro group was read instead of reduction-imaging --- vltpf/IRDIS/ImagingReduction.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vltpf/IRDIS/ImagingReduction.py b/vltpf/IRDIS/ImagingReduction.py index f5340aa..5acb377 100644 --- a/vltpf/IRDIS/ImagingReduction.py +++ b/vltpf/IRDIS/ImagingReduction.py @@ -90,7 +90,7 @@ def __init__(self, path): # reduction self._config = {} - for group in ['reduction', 'reduction-spectro']: + for group in ['reduction', 'reduction-imaging']: items = dict(config.items(group)) self._config.update(items) for key, value in items.items(): From add2a9c468bfdb8cfe54c069b1e67531164d704d Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Sat, 24 Aug 2019 12:13:55 +0200 Subject: [PATCH 010/101] Improve code organization of examples --- examples/ifs_reduction.py | 5 +++-- examples/irdis_imaging_reduction.py | 18 +++++++++--------- examples/irdis_spectro_reduction.py | 12 ++++++------ 3 files changed, 18 insertions(+), 17 deletions(-) diff --git a/examples/ifs_reduction.py b/examples/ifs_reduction.py index 73ab795..093e39f 100644 --- a/examples/ifs_reduction.py +++ b/examples/ifs_reduction.py @@ -2,7 +2,7 @@ reduction = IFS.Reduction('/Users/avigan/data/VLTPF-test-target/IFS/') -# +####################################################@ # full reduction # @@ -10,13 +10,14 @@ reduction.config['preproc_collapse_science'] = True reduction.config['preproc_collapse_type'] = 'coadd' reduction.config['preproc_coadd_value'] = 2 +reduction.config['center_offset'] = (-5, 0) reduction.config['clean'] = True reduction.show_config() #%% reduction reduction.full_reduction() -# +####################################################@ # manual reduction # diff --git a/examples/irdis_imaging_reduction.py b/examples/irdis_imaging_reduction.py index 3e23f1a..bbb29a2 100644 --- a/examples/irdis_imaging_reduction.py +++ b/examples/irdis_imaging_reduction.py @@ -2,22 +2,22 @@ reduction = IRDIS.ImagingReduction('/Users/avigan/data/VLTPF-test-target/IRD/DBI/') -# +####################################################@ # full reduction # #%% configuration -# reduction.config['combine_psf_dim'] = 80 -# reduction.config['combine_science_dim'] = 400 -# reduction.config['combine_shift_method'] = 'fft' -# reduction.config['preproc_collapse_science'] = True -# reduction.config['preproc_collapse_type'] = 'mean' -# reduction.show_config() +reduction.config['combine_psf_dim'] = 80 +reduction.config['combine_science_dim'] = 400 +reduction.config['combine_shift_method'] = 'fft' +reduction.config['preproc_collapse_science'] = True +reduction.config['preproc_collapse_type'] = 'mean' +reduction.show_config() #%% reduction -# reduction.full_reduction() +reduction.full_reduction() -# +####################################################@ # manual reduction # diff --git a/examples/irdis_spectro_reduction.py b/examples/irdis_spectro_reduction.py index 12699ee..d25561a 100644 --- a/examples/irdis_spectro_reduction.py +++ b/examples/irdis_spectro_reduction.py @@ -2,19 +2,19 @@ reduction = IRDIS.SpectroReduction('/Users/avigan/data/VLTPF-test-target/IRD/LSS/') -# +####################################################@ # full reduction # #%% configuration -# reduction.config['combine_science_dim'] = 300 -# reduction.config['clean'] = True -# reduction.show_config() +reduction.config['combine_science_dim'] = 300 +reduction.config['clean'] = True +reduction.show_config() #%% reduction -# reduction.full_reduction() +reduction.full_reduction() -# +####################################################@ # manual reduction # From 7f5bd9dbb11a0fa5e7461d5f3479affcfc83ea07 Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Sat, 24 Aug 2019 12:35:57 +0200 Subject: [PATCH 011/101] Add FIXME to make sure that we differentiate IRDIS modes Ticket #72 --- vltpf/SPHERE.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/vltpf/SPHERE.py b/vltpf/SPHERE.py index bf8445c..84befe0 100644 --- a/vltpf/SPHERE.py +++ b/vltpf/SPHERE.py @@ -455,7 +455,6 @@ def full_reduction(self): r.full_reduction() - ################################################## # Class methods ################################################## @@ -481,6 +480,8 @@ def _create_reductions(self): try: arm = hdr['HIERARCH ESO SEQ ARM'] if arm == 'IRDIS': + # FIXME: ticket #72. Make sure that we make a difference + # between imaging and spectro reductions instrument = 'IRDIS' reduction = IRDIS.ImagingReduction(reduction_path) self._IRDIS_reductions.append(reduction) From cb7e66d62bb3230e8f3718842871c6f3a7cd45e4 Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Sat, 24 Aug 2019 12:36:57 +0200 Subject: [PATCH 012/101] Fix plotting problem in LSS functions Ticket #69 --- vltpf/toolbox.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/vltpf/toolbox.py b/vltpf/toolbox.py index d33747c..b9c0f4f 100644 --- a/vltpf/toolbox.py +++ b/vltpf/toolbox.py @@ -572,9 +572,9 @@ def star_centers_from_PSF_lss_cube(cube, wave_cube, pixel, save_path=None): plt.xlim(cx_int-ext, cx_int+ext) plt.ylim(0, 1024) - plt.tight_layout() if save_path: + plt.tight_layout() plt.savefig(save_path) return psf_centers @@ -922,9 +922,8 @@ def star_centers_from_waffle_lss_cube(cube_cen, cube_sci, wave_cube, centers, pi plt.xlim(cx_int-ext, cx_int+ext) plt.ylim(0, 1024) - plt.tight_layout() - if save_path: + plt.tight_layout() plt.savefig(save_path) return spot_centers, spot_dist, img_centers From 16864f3aa741a9d735afb405b992d40423964481 Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Sat, 24 Aug 2019 12:37:30 +0200 Subject: [PATCH 013/101] Code improvement after testing new plot options Tickets #69 and #70 --- examples/ifs_reduction.py | 8 ++++++-- examples/irdis_imaging_reduction.py | 8 ++++++-- examples/irdis_spectro_reduction.py | 10 +++++++--- 3 files changed, 19 insertions(+), 7 deletions(-) diff --git a/examples/ifs_reduction.py b/examples/ifs_reduction.py index 093e39f..8eff4ca 100644 --- a/examples/ifs_reduction.py +++ b/examples/ifs_reduction.py @@ -1,11 +1,12 @@ import vltpf.IFS as IFS -reduction = IFS.Reduction('/Users/avigan/data/VLTPF-test-target/IFS/') - ####################################################@ # full reduction # +#%% init reduction +reduction = IFS.Reduction('/Users/avigan/data/VLTPF-test-target/IFS/') + #%% configuration reduction.config['preproc_collapse_science'] = True reduction.config['preproc_collapse_type'] = 'coadd' @@ -21,6 +22,9 @@ # manual reduction # +#%% init reduction +reduction = IFS.Reduction('/Users/avigan/data/VLTPF-test-target/IFS/') + #%% sorting reduction.sort_files() reduction.sort_frames() diff --git a/examples/irdis_imaging_reduction.py b/examples/irdis_imaging_reduction.py index bbb29a2..d647289 100644 --- a/examples/irdis_imaging_reduction.py +++ b/examples/irdis_imaging_reduction.py @@ -1,11 +1,12 @@ import vltpf.IRDIS as IRDIS -reduction = IRDIS.ImagingReduction('/Users/avigan/data/VLTPF-test-target/IRD/DBI/') - ####################################################@ # full reduction # +#%% init reduction +reduction = IRDIS.ImagingReduction('/Users/avigan/data/VLTPF-test-target/IRD/DBI/') + #%% configuration reduction.config['combine_psf_dim'] = 80 reduction.config['combine_science_dim'] = 400 @@ -21,6 +22,9 @@ # manual reduction # +#%% init reduction +reduction = IRDIS.ImagingReduction('/Users/avigan/data/VLTPF-test-target/IRD/DBI/') + #%% sorting reduction.sort_files() reduction.sort_frames() diff --git a/examples/irdis_spectro_reduction.py b/examples/irdis_spectro_reduction.py index d25561a..9bc3c3a 100644 --- a/examples/irdis_spectro_reduction.py +++ b/examples/irdis_spectro_reduction.py @@ -1,11 +1,12 @@ import vltpf.IRDIS as IRDIS -reduction = IRDIS.SpectroReduction('/Users/avigan/data/VLTPF-test-target/IRD/LSS/') - ####################################################@ # full reduction # +#%% init reduction +reduction = IRDIS.SpectroReduction('/Users/avigan/data/VLTPF-test-target/IRD/LSS/') + #%% configuration reduction.config['combine_science_dim'] = 300 reduction.config['clean'] = True @@ -18,6 +19,9 @@ # manual reduction # +#%% init reduction +reduction = IRDIS.SpectroReduction('/Users/avigan/data/VLTPF-test-target/IRD/LSS/') + #%% sorting reduction.sort_files() reduction.sort_frames() @@ -34,7 +38,7 @@ collapse_center=True) #%% high-level science processing -reduction.sph_ird_star_center(high_pass=False, display=True, plot=True) +reduction.sph_ird_star_center(high_pass=False, plot=True) reduction.sph_ird_wavelength_recalibration(fit_scaling=True, plot=True) reduction.sph_ird_combine_data(cpix=True, psf_dim=80, science_dim=300, correct_mrs_chromatism=True, split_posang=True, From e979ed5e27b865cef526f5d76323336214cff1d1 Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Sat, 24 Aug 2019 16:07:04 +0200 Subject: [PATCH 014/101] Minor update to documentation --- vltpf/IFS.py | 2 +- vltpf/IRDIS/ImagingReduction.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/vltpf/IFS.py b/vltpf/IFS.py index a6e70d7..451424a 100644 --- a/vltpf/IFS.py +++ b/vltpf/IFS.py @@ -369,7 +369,7 @@ def __init__(self, path): Parameters ---------- path : str - Path to the directory containing the raw data + Path to the directory containing the dataset ''' # expand path diff --git a/vltpf/IRDIS/ImagingReduction.py b/vltpf/IRDIS/ImagingReduction.py index 5acb377..81a9079 100644 --- a/vltpf/IRDIS/ImagingReduction.py +++ b/vltpf/IRDIS/ImagingReduction.py @@ -58,7 +58,7 @@ def __init__(self, path): Parameters ---------- path : str - Path to the directory containing the raw data + Path to the directory containing the dataset ''' From 87bbd82080fcc01a3ef28f0da12c753911f694c7 Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Sat, 24 Aug 2019 16:07:47 +0200 Subject: [PATCH 015/101] Implement function to classify an IRDIS dataset Ticket #72 --- vltpf/SPHERE.py | 48 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/vltpf/SPHERE.py b/vltpf/SPHERE.py index 84befe0..2cc64a5 100644 --- a/vltpf/SPHERE.py +++ b/vltpf/SPHERE.py @@ -6,6 +6,7 @@ import glob import shutil import math +import numpy as np import pandas as pd import xml.etree.ElementTree as etree @@ -293,6 +294,53 @@ def sort_files_from_fits(path, silent=True): for file in files: shutil.move(file, path_new) + +def classify_irdis_dataset(path): + '''Classify an IRDIS dataset based on the science files + + path : str + Path to the directory containing the dataset + + ''' + + # expand path + path = os.path.expanduser(os.path.join(path, '')) + + # zeroth-order reduction validation + raw = os.path.join(path, 'raw') + if not os.path.exists(raw): + raise ValueError('No raw/ subdirectory. {0} is not a valid reduction path!'.format(path)) + + # list all fits files + files = glob.glob(os.path.join(raw, '*.fits')) + if len(files) == 0: + return None + + # search for science files + modes = [] + for file in files: + hdr = fits.getheader(file) + + dpr_catg = hdr.get('HIERARCH ESO DPR CATG') + mode = hdr.get('HIERARCH ESO INS1 MODE') + + if dpr_catg == 'SCIENCE': + modes.append(mode) + + modes = np.array(modes) + + n_imaging = (modes == 'NIROBS').sum() + (modes == 'EXT').sum() + \ + (modes == 'DBI').sum() + (modes == 'CI').sum() + n_pola = (modes == 'DPI').sum() + n_spectro = (modes == 'LSS').sum() + + if (n_imaging >= n_pola) and (n_imaging >= n_spectro): + return 'imaging' + elif (n_pola >= n_imaging) and (n_pola >= n_spectro): + return 'pola' + else: + return 'spectro' + class Dataset: ''' From 87b6505706260d78d757647ff0963e58284a355e Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Sat, 24 Aug 2019 16:12:27 +0200 Subject: [PATCH 016/101] Example code to test SPHERE.Dataset() Ticket #72 --- examples/sphere_dataset.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) create mode 100644 examples/sphere_dataset.py diff --git a/examples/sphere_dataset.py b/examples/sphere_dataset.py new file mode 100644 index 0000000..c92ff03 --- /dev/null +++ b/examples/sphere_dataset.py @@ -0,0 +1,13 @@ +import vltpf.SPHERE as SPHERE + +ds = SPHERE.Dataset('/Users/avigan/data/VLTPF-test-target/') + +print('IRDIS reductions:') +for red in ds.IRDIS_reductions: + print(red) +print() + +print('IFS reductions:') +for red in ds.IFS_reductions: + print(red) +print() From 40316f5e208087968c07cc414a43c76287b4fd5c Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Sat, 24 Aug 2019 16:15:21 +0200 Subject: [PATCH 017/101] Implement classification of IRDIS data sets Concludes implementation of ticket #72 --- vltpf/SPHERE.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/vltpf/SPHERE.py b/vltpf/SPHERE.py index 2cc64a5..d99b25d 100644 --- a/vltpf/SPHERE.py +++ b/vltpf/SPHERE.py @@ -528,10 +528,16 @@ def _create_reductions(self): try: arm = hdr['HIERARCH ESO SEQ ARM'] if arm == 'IRDIS': - # FIXME: ticket #72. Make sure that we make a difference - # between imaging and spectro reductions + mode = classify_irdis_dataset(reduction_path) + instrument = 'IRDIS' - reduction = IRDIS.ImagingReduction(reduction_path) + if mode == 'imaging': + reduction = IRDIS.ImagingReduction(reduction_path) + elif mode == 'polar': + print('Warning: IRDIS DPI not supported yet') + elif mode == 'spectro': + reduction = IRDIS.SpectroReduction(reduction_path) + self._IRDIS_reductions.append(reduction) elif arm == 'IFS': instrument = 'IFS' From 357e869bbb94ad464b0283a9bb306a32d672999e Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Sat, 24 Aug 2019 16:34:29 +0200 Subject: [PATCH 018/101] Implement instrument mode in the different classes Ticket #72 --- vltpf/IFS.py | 15 ++++++++++++++- vltpf/IRDIS/ImagingReduction.py | 15 ++++++++++++++- vltpf/IRDIS/SpectroReduction.py | 15 ++++++++++++++- 3 files changed, 42 insertions(+), 3 deletions(-) diff --git a/vltpf/IFS.py b/vltpf/IFS.py index 451424a..3993d99 100644 --- a/vltpf/IFS.py +++ b/vltpf/IFS.py @@ -384,6 +384,9 @@ def __init__(self, path): self._path = ReductionPath.Path(path) self._instrument = 'IFS' + # instrument mode + self._mode = 'Unknown' + # configuration package_directory = os.path.dirname(os.path.abspath(__file__)) configfile = os.path.join(package_directory, 'instruments', self._instrument+'.ini') @@ -435,7 +438,7 @@ def __init__(self, path): ################################################## def __repr__(self): - return ''.format(self._instrument, self._path) + return ''.format(self._instrument, self._mode, self._path) def __format__(self): return self.__repr__() @@ -480,6 +483,10 @@ def recipe_execution(self): def config(self): return self._config + @property + def mode(self): + return self._mode + ################################################## # Generic class methods ################################################## @@ -665,6 +672,9 @@ def read_info(self): self._recipe_execution['sph_ifs_cal_wave'] = True if np.any(files_info['PRO CATG'] == 'IFS_IFU_FLAT_FIELD'): self._recipe_execution['sph_ifs_cal_ifu_flat'] = True + + # update instrument mode + self._mode = files_info.loc[files_info['DPR CATG'] == 'SCIENCE', 'INS2 MODE'][0] else: files_info = None @@ -810,6 +820,9 @@ def sort_files(self): files_info['DATE'] = pd.to_datetime(files_info['DATE'], utc=False) files_info['DET FRAM UTC'] = pd.to_datetime(files_info['DET FRAM UTC'], utc=False) + # update instrument mode + self._mode = files_info.loc[files_info['DPR CATG'] == 'SCIENCE', 'INS2 MODE'][0] + # sort by acquisition time files_info.sort_values(by='DATE-OBS', inplace=True) diff --git a/vltpf/IRDIS/ImagingReduction.py b/vltpf/IRDIS/ImagingReduction.py index 81a9079..c8b0737 100644 --- a/vltpf/IRDIS/ImagingReduction.py +++ b/vltpf/IRDIS/ImagingReduction.py @@ -74,6 +74,9 @@ def __init__(self, path): self._path = ReductionPath.Path(path) self._instrument = 'IRDIS' + # instrument mode + self._mode = 'Unknown' + # configuration package_directory = os.path.dirname(os.path.abspath(vltpf.__file__)) configfile = os.path.join(package_directory, 'instruments', self._instrument+'.ini') @@ -117,7 +120,7 @@ def __init__(self, path): ################################################## def __repr__(self): - return ''.format(self._instrument, self._path) + return ''.format(self._instrument, self._mode, self._path) def __format__(self): return self.__repr__() @@ -162,6 +165,10 @@ def recipe_execution(self): def config(self): return self._config + @property + def mode(self): + return self._mode + ################################################## # Generic class methods ################################################## @@ -333,6 +340,9 @@ def read_info(self): self._recipe_execution['sph_ird_cal_dark'] = True if np.any(files_info['PRO CATG'] == 'IRD_FLAT_FIELD'): self._recipe_execution['sph_ird_cal_detector_flat'] = True + + # update instrument mode + self._mode = files_info.loc[files_info['DPR CATG'] == 'SCIENCE', 'INS1 MODE'][0] else: files_info = None @@ -462,6 +472,9 @@ def sort_files(self): files_info['DATE'] = pd.to_datetime(files_info['DATE'], utc=False) files_info['DET FRAM UTC'] = pd.to_datetime(files_info['DET FRAM UTC'], utc=False) + # update instrument mode + self._mode = files_info.loc[files_info['DPR CATG'] == 'SCIENCE', 'INS1 MODE'][0] + # sort by acquisition time files_info.sort_values(by='DATE-OBS', inplace=True) diff --git a/vltpf/IRDIS/SpectroReduction.py b/vltpf/IRDIS/SpectroReduction.py index d9ea9f9..3f530f9 100644 --- a/vltpf/IRDIS/SpectroReduction.py +++ b/vltpf/IRDIS/SpectroReduction.py @@ -116,6 +116,9 @@ def __init__(self, path): self._path = ReductionPath.Path(path) self._instrument = 'IRDIS' + # instrument mode + self._mode = 'Unknown' + # configuration package_directory = os.path.dirname(os.path.abspath(vltpf.__file__)) configfile = os.path.join(package_directory, 'instruments', self._instrument+'.ini') @@ -162,7 +165,7 @@ def __init__(self, path): ################################################## def __repr__(self): - return ''.format(self._instrument, self._path) + return ''.format(self._instrument, self._mode, self._path) def __format__(self): return self.__repr__() @@ -206,6 +209,10 @@ def recipe_execution(self): @property def config(self): return self._config + + @property + def mode(self): + return self._mode ################################################## # Generic class methods @@ -385,6 +392,9 @@ def read_info(self): self._recipe_execution['sph_ird_cal_detector_flat'] = True if np.any(files_info['PRO CATG'] == 'IRD_WAVECALIB'): self._recipe_execution['sph_ird_wave_calib'] = True + + # update instrument mode + self._mode = files_info.loc[files_info['DPR CATG'] == 'SCIENCE', 'INS1 MODE'][0] else: files_info = None @@ -517,6 +527,9 @@ def sort_files(self): files_info['DATE'] = pd.to_datetime(files_info['DATE'], utc=False) files_info['DET FRAM UTC'] = pd.to_datetime(files_info['DET FRAM UTC'], utc=False) + # update instrument mode + self._mode = files_info.loc[files_info['DPR CATG'] == 'SCIENCE', 'INS1 MODE'][0] + # sort by acquisition time files_info.sort_values(by='DATE-OBS', inplace=True) From cff06b930d3ba90c609885439f00b4bc636e8495 Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Sat, 24 Aug 2019 16:59:05 +0200 Subject: [PATCH 019/101] Use pathlib.Path in ReductionPath class Ticket #62 --- vltpf/ReductionPath.py | 46 ++++++++++++++++++++++-------------------- 1 file changed, 24 insertions(+), 22 deletions(-) diff --git a/vltpf/ReductionPath.py b/vltpf/ReductionPath.py index b6ef3ff..4b5aa54 100644 --- a/vltpf/ReductionPath.py +++ b/vltpf/ReductionPath.py @@ -1,7 +1,9 @@ import os +from pathlib import Path -class Path(object): + +class ReductionPath(object): ''' Reduction path class @@ -13,15 +15,15 @@ class Path(object): ################################################## def __init__(self, path): - self._root = path + self._root = Path(path).expanduser() # update all subpaths - self._raw = os.path.join(self._root, 'raw/') - self._calib = os.path.join(self._root, 'calib/') - self._sof = os.path.join(self._root, 'sof/') - self._tmp = os.path.join(self._root, 'tmp/') - self._preproc = os.path.join(self._root, 'preproc/') - self._products = os.path.join(self._root, 'products/') + self._raw = self._root / 'raw' + self._calib = self._root / 'calib' + self._sof = self._root / 'sof' + self._tmp = self._root / 'tmp' + self._preproc = self._root / 'preproc' + self._products = self._root / 'products' # create directories self.create_subdirectories() @@ -31,7 +33,7 @@ def __init__(self, path): ################################################## def __repr__(self): - return self._root + return str(self._root) ################################################## # Properties @@ -43,15 +45,15 @@ def root(self): @root.setter def root(self, path): - self._root = os.path.expanduser(path) + self._root = Path(path).expanduser() # update all subpaths - self._raw = os.path.join(self._root, 'raw/') - self._calib = os.path.join(self._root, 'calib/') - self._sof = os.path.join(self._root, 'sof/') - self._tmp = os.path.join(self._root, 'tmp/') - self._preproc = os.path.join(self._root, 'preproc/') - self._products = os.path.join(self._root, 'products/') + self._raw = self._root / 'raw' + self._calib = self._root / 'calib' + self._sof = self._root / 'sof' + self._tmp = self._root / 'tmp' + self._preproc = self._root / 'preproc' + self._products = self._root / 'products' # create directories self.create_subdirectories() @@ -86,21 +88,21 @@ def products(self): def create_subdirectories(self): # create sub-directories if needed - if not os.path.exists(self._raw): + if not self._raw.exists(): os.makedirs(self._raw) - if not os.path.exists(self._calib): + if not self._calib.exists(): os.makedirs(self._calib) - if not os.path.exists(self._sof): + if not self._sof.exists(): os.makedirs(self._sof) - if not os.path.exists(self._tmp): + if not self._tmp.exists(): os.makedirs(self._tmp) - if not os.path.exists(self._preproc): + if not self._preproc.exists(): os.makedirs(self._preproc) - if not os.path.exists(self._products): + if not self._products.exists(): os.makedirs(self._products) From 52379ef277164e494ccacb5f37a17b4dfbda3673 Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Sat, 24 Aug 2019 17:34:07 +0200 Subject: [PATCH 020/101] Rename ReductionPath module --- vltpf/{ReductionPath.py => utils/reduction_path.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename vltpf/{ReductionPath.py => utils/reduction_path.py} (100%) diff --git a/vltpf/ReductionPath.py b/vltpf/utils/reduction_path.py similarity index 100% rename from vltpf/ReductionPath.py rename to vltpf/utils/reduction_path.py From 5777aed987c6eb689b4618ae71fbbb619188e457 Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Sat, 24 Aug 2019 18:00:19 +0200 Subject: [PATCH 021/101] Improve ReductionPath class to use only pathlib --- vltpf/__init__.py | 3 --- vltpf/utils/__init__.py | 1 + vltpf/utils/reduction_path.py | 14 ++++++-------- 3 files changed, 7 insertions(+), 11 deletions(-) create mode 100644 vltpf/utils/__init__.py diff --git a/vltpf/__init__.py b/vltpf/__init__.py index e6e3331..563ee73 100644 --- a/vltpf/__init__.py +++ b/vltpf/__init__.py @@ -1,6 +1,3 @@ __author__ = 'avigan' __copyright__ = 'Copyright (C) 2017 Arthur Vigan' __license__ = 'MIT' - - - diff --git a/vltpf/utils/__init__.py b/vltpf/utils/__init__.py new file mode 100644 index 0000000..ba55058 --- /dev/null +++ b/vltpf/utils/__init__.py @@ -0,0 +1 @@ +from .reduction_path import ReductionPath diff --git a/vltpf/utils/reduction_path.py b/vltpf/utils/reduction_path.py index 4b5aa54..58fee33 100644 --- a/vltpf/utils/reduction_path.py +++ b/vltpf/utils/reduction_path.py @@ -1,5 +1,3 @@ -import os - from pathlib import Path @@ -89,20 +87,20 @@ def products(self): def create_subdirectories(self): # create sub-directories if needed if not self._raw.exists(): - os.makedirs(self._raw) + self._raw.mkdir(exist_ok=True) if not self._calib.exists(): - os.makedirs(self._calib) + self._calib.mkdir(exist_ok=True) if not self._sof.exists(): - os.makedirs(self._sof) + self._sof.mkdir(exist_ok=True) if not self._tmp.exists(): - os.makedirs(self._tmp) + self._tmp.mkdir(exist_ok=True) if not self._preproc.exists(): - os.makedirs(self._preproc) + self._preproc.mkdir(exist_ok=True) if not self._products.exists(): - os.makedirs(self._products) + self._products.mkdir(exist_ok=True) From 7c4f6273725b3c2851460e8bf9c24806211033f4 Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Sat, 24 Aug 2019 18:32:59 +0200 Subject: [PATCH 022/101] Implement use of pathlib in all functions Ticket #62 --- vltpf/IRDIS/ImagingReduction.py | 153 +++++++++++++++----------------- 1 file changed, 73 insertions(+), 80 deletions(-) diff --git a/vltpf/IRDIS/ImagingReduction.py b/vltpf/IRDIS/ImagingReduction.py index c8b0737..6989775 100644 --- a/vltpf/IRDIS/ImagingReduction.py +++ b/vltpf/IRDIS/ImagingReduction.py @@ -1,5 +1,3 @@ -import os -import glob import pandas as pd import subprocess import numpy as np @@ -13,15 +11,16 @@ import matplotlib.colors as colors import configparser +from pathlib import Path from astropy.io import fits from astropy.modeling import models, fitting from matplotlib.backends.backend_pdf import PdfPages import vltpf +import vltpf.utils as utils import vltpf.utils.imutils as imutils import vltpf.utils.aperture as aperture import vltpf.transmission as transmission -import vltpf.ReductionPath as ReductionPath import vltpf.toolbox as toolbox @@ -63,23 +62,22 @@ def __init__(self, path): ''' # expand path - path = os.path.expanduser(os.path.join(path, '')) + path = Path(path).expanduser().resolve() # zeroth-order reduction validation - raw = os.path.join(path, 'raw') - if not os.path.exists(raw): + raw = path / 'raw' + if not raw.exists(): raise ValueError('No raw/ subdirectory. {0} is not a valid reduction path!'.format(path)) # init path and name - self._path = ReductionPath.Path(path) + self._path = utils.ReductionPath(path) self._instrument = 'IRDIS' # instrument mode self._mode = 'Unknown' # configuration - package_directory = os.path.dirname(os.path.abspath(vltpf.__file__)) - configfile = os.path.join(package_directory, 'instruments', self._instrument+'.ini') + configfile = Path(vltpf.__file__).parent / 'instruments' / '{}.ini'.format(self._instrument) config = configparser.ConfigParser() try: config.read(configfile) @@ -325,8 +323,8 @@ def read_info(self): path = self._path # files info - fname = os.path.join(path.preproc, 'files.csv') - if os.path.exists(fname): + fname = path.preproc / 'files.csv' + if fname.exists(): files_info = pd.read_csv(fname, index_col=0) # convert times @@ -346,8 +344,8 @@ def read_info(self): else: files_info = None - fname = os.path.join(path.preproc, 'frames.csv') - if os.path.exists(fname): + fname = path.preproc / 'frames.csv' + if fname.exists(): frames_info = pd.read_csv(fname, index_col=(0, 1)) # convert times @@ -363,8 +361,8 @@ def read_info(self): else: frames_info = None - fname = os.path.join(path.preproc, 'frames_preproc.csv') - if os.path.exists(fname): + fname = path.preproc / 'frames_preproc.csv' + if fname.exists(): frames_info_preproc = pd.read_csv(fname, index_col=(0, 1)) # convert times @@ -388,7 +386,7 @@ def read_info(self): files = frames_info_preproc.index for file, idx in files: fname = '{0}_DIT{1:03d}_preproc'.format(file, idx) - file = glob.glob(os.path.join(path.preproc, fname+'.fits')) + file = list(path.preproc.glob('{}.fits'.format(fname))) done = done and (len(file) == 1) self._recipe_execution['sph_ird_preprocess_science'] = done @@ -397,7 +395,7 @@ def read_info(self): (frames_info_preproc['DPR TYPE'] == 'OBJECT,CENTER')].index for file, idx in files: fname = '{0}_DIT{1:03d}_preproc_centers'.format(file, idx) - file = glob.glob(os.path.join(path.preproc, fname+'.fits')) + file = list(path.preproc.glob('{}.fits'.format(fname))) done = done and (len(file) == 1) self._recipe_execution['sph_ird_star_center'] = done @@ -416,8 +414,8 @@ def sort_files(self): path = self._path # list files - files = glob.glob(os.path.join(path.raw, '*.fits')) - files = [os.path.splitext(os.path.basename(f))[0] for f in files] + files = path.raw.glob('*.fits') + files = [f.stem for f in files] if len(files) == 0: raise ValueError('No raw FITS files in reduction path') @@ -425,9 +423,8 @@ def sort_files(self): print(' * found {0} FITS files in {1}'.format(len(files), path.raw)) # read list of keywords - package_directory = os.path.dirname(os.path.abspath(vltpf.__file__)) keywords = [] - file = open(os.path.join(package_directory, 'instruments', 'keywords.dat'), 'r') + file = open(Path(vltpf.__file__).parent / 'instruments' / 'keywords.dat', 'r') for line in file: line = line.strip() if line: @@ -446,7 +443,7 @@ def sort_files(self): files_info = pd.DataFrame(index=pd.Index(files, name='FILE'), columns=keywords_short, dtype='float') for f in files: - hdu = fits.open(os.path.join(path.raw, f+'.fits')) + hdu = fits.open(path.raw / '{}.fits'.format(f)) hdr = hdu[0].header for k, sk in zip(keywords, keywords_short): @@ -479,7 +476,7 @@ def sort_files(self): files_info.sort_values(by='DATE-OBS', inplace=True) # save files_info - files_info.to_csv(os.path.join(path.preproc, 'files.csv')) + files_info.to_csv(path.preproc / 'files.csv') self._files_info = files_info # update recipe execution @@ -533,7 +530,7 @@ def sort_frames(self): toolbox.compute_angles(frames_info) # save - frames_info.to_csv(os.path.join(path.preproc, 'frames.csv')) + frames_info.to_csv(path.preproc / 'frames.csv') self._frames_info = frames_info # update recipe execution @@ -708,10 +705,10 @@ def sph_ird_cal_dark(self, silent=True): print(' * {0} in filter {1} with DIT={2:.2f} sec ({3} files)'.format(ctype, cfilt, DIT, len(cfiles))) # create sof - sof = os.path.join(path.sof, 'dark_filt={0}_DIT={1:.2f}.sof'.format(cfilt, DIT)) + sof = path.sof / 'dark_filt={0}_DIT={1:.2f}.sof'.format(cfilt, DIT) file = open(sof, 'w') for f in files: - file.write('{0}{1}.fits {2}\n'.format(path.raw, f, 'IRD_DARK_RAW')) + file.write('{0}/{1}.fits {2}\n'.format(path.raw, f, 'IRD_DARK_RAW')) file.close() # products @@ -735,8 +732,8 @@ def sph_ird_cal_dark(self, silent=True): '--ird.master_dark.sigma_clip=5.0', '--ird.master_dark.save_addprod=TRUE', '--ird.master_dark.max_acceptable={0}'.format(max_level), - '--ird.master_dark.outfilename={0}{1}.fits'.format(path.calib, dark_file), - '--ird.master_dark.badpixfilename={0}{1}.fits'.format(path.calib, bpm_file), + '--ird.master_dark.outfilename={0}/{1}.fits'.format(path.calib, dark_file), + '--ird.master_dark.badpixfilename={0}/{1}.fits'.format(path.calib, bpm_file), sof] # check esorex @@ -776,7 +773,7 @@ def sph_ird_cal_dark(self, silent=True): files_info.loc[bpm_file, 'PRO CATG'] = 'IRD_STATIC_BADPIXELMAP' # save - files_info.to_csv(os.path.join(path.preproc, 'files.csv')) + files_info.to_csv(path.preproc / 'files.csv') # update recipe execution self._recipe_execution['sph_ird_cal_dark'] = True @@ -814,10 +811,10 @@ def sph_ird_cal_detector_flat(self, silent=True): print(' * filter {0} ({1} files)'.format(cfilt, len(cfiles))) # create sof - sof = os.path.join(path.sof, 'flat_filt={0}.sof'.format(cfilt)) + sof = path.sof / 'flat_filt={0}.sof'.format(cfilt) file = open(sof, 'w') for f in files: - file.write('{0}{1}.fits {2}\n'.format(path.raw, f, 'IRD_FLAT_FIELD_RAW')) + file.write('{0}/{1}.fits {2}\n'.format(path.raw, f, 'IRD_FLAT_FIELD_RAW')) file.close() # products @@ -830,8 +827,8 @@ def sph_ird_cal_detector_flat(self, silent=True): '--no-datamd5=TRUE', 'sph_ird_instrument_flat', '--ird.instrument_flat.save_addprod=TRUE', - '--ird.instrument_flat.outfilename={0}{1}.fits'.format(path.calib, flat_file), - '--ird.instrument_flat.badpixfilename={0}{1}.fits'.format(path.calib, bpm_file), + '--ird.instrument_flat.outfilename={0}/{1}.fits'.format(path.calib, flat_file), + '--ird.instrument_flat.badpixfilename={0}/{1}.fits'.format(path.calib, bpm_file), sof] # check esorex @@ -871,7 +868,7 @@ def sph_ird_cal_detector_flat(self, silent=True): files_info.loc[bpm_file, 'PRO CATG'] = 'IRD_NON_LINEAR_BADPIXELMAP' # save - files_info.to_csv(os.path.join(path.preproc, 'files.csv')) + files_info.to_csv(path.preproc / 'files.csv') # update recipe execution self._recipe_execution['sph_ird_cal_detector_flat'] = True @@ -941,9 +938,9 @@ def sph_ird_preprocess_science(self, frames_info = self._frames_info # clean before we start - files = glob.glob(os.path.join(path.preproc, '*_DIT???_preproc.fits')) + files = path.preproc.glob('*_DIT???_preproc.fits') for file in files: - os.remove(file) + file.unlink() # filter combination filter_comb = files_info.loc[files_info['DPR CATG'] == 'SCIENCE', 'INS COMB IFLT'].unique()[0] @@ -952,7 +949,7 @@ def sph_ird_preprocess_science(self, if fix_badpix: bpm_files = files_info[(files_info['PRO CATG'] == 'IRD_STATIC_BADPIXELMAP') | (files_info['PRO CATG'] == 'IRD_NON_LINEAR_BADPIXELMAP')].index - bpm_files = [os.path.join(path.calib, f+'.fits') for f in bpm_files] + bpm_files = [path.calib / '{}.fits'.format(f) for f in bpm_files] bpm = toolbox.compute_bad_pixel_map(bpm_files) @@ -968,7 +965,7 @@ def sph_ird_preprocess_science(self, (files_info['INS COMB IFLT'] == filter_comb)] if len(flat_file) != 1: raise ValueError('There should be exactly 1 flat file. Found {0}.'.format(len(flat_file))) - flat = fits.getdata(os.path.join(path.calib, flat_file.index[0]+'.fits')) + flat = fits.getdata(path.calib / '{}.fits'.format(flat_file.index[0])) # final dataframe index = pd.MultiIndex(names=['FILE', 'IMG'], levels=[[], []], codes=[[], []]) @@ -1006,7 +1003,7 @@ def sph_ird_preprocess_science(self, print('Warning: no background has been found. Pre-processing will continue but data quality will likely be affected') bkg = np.zeros((1024, 2048)) elif len(dfiles) == 1: - bkg = fits.getdata(os.path.join(path.calib, dfiles.index[0]+'.fits')) + bkg = fits.getdata(path.calib / '{}.fits'.format(dfiles.index[0])) elif len(dfiles) > 1: # FIXME: handle cases when multiple backgrounds are found? raise ValueError('Unexpected number of background files ({0})'.format(len(dfiles))) @@ -1020,7 +1017,7 @@ def sph_ird_preprocess_science(self, # read data print(' ==> read data') - img, hdr = fits.getdata(os.path.join(path.raw, fname+'.fits'), header=True) + img, hdr = fits.getdata(path.raw / '{}.fits'.format(fname), header=True) # add extra dimension to single images to make cubes if img.ndim == 2: @@ -1122,7 +1119,7 @@ def sph_ird_preprocess_science(self, for f in range(len(img)): frame = nimg[f, ...].squeeze() hdr['HIERARCH ESO DET NDIT'] = 1 - fits.writeto(os.path.join(path.preproc, fname+'_DIT{0:03d}_preproc.fits'.format(f)), frame, hdr, + fits.writeto(path.preproc / '{}_DIT{:03d}_preproc.fits'.format(fname, f), frame, hdr, overwrite=True, output_verify='silentfix') print() @@ -1131,7 +1128,7 @@ def sph_ird_preprocess_science(self, # sort and save final dataframe frames_info_preproc.sort_values(by='TIME', inplace=True) - frames_info_preproc.to_csv(os.path.join(path.preproc, 'frames_preproc.csv')) + frames_info_preproc.to_csv(path.preproc / 'frames_preproc.csv') self._frames_info_preproc = frames_info_preproc @@ -1182,18 +1179,17 @@ def sph_ird_star_center(self, high_pass=False, offset=(0, 0), plot=True): # read data fname = '{0}_DIT{1:03d}_preproc'.format(file, idx) - files = glob.glob(os.path.join(path.preproc, fname+'.fits')) - cube, hdr = fits.getdata(files[0], header=True) + cube, hdr = fits.getdata(path.preproc / '{}.fits'.format(fname), header=True) # centers if plot: - save_path = os.path.join(path.products, fname+'_PSF_fitting.pdf') + save_path = path.products / '{}_PSF_fitting.pdf'.format(fname) else: save_path = None img_center = toolbox.star_centers_from_PSF_img_cube(cube, wave, pixel, save_path=save_path) # save - fits.writeto(os.path.join(path.preproc, fname+'_centers.fits'), img_center, overwrite=True) + fits.writeto(path.preproc / '{}_centers.fits'.format(fname), img_center, overwrite=True) print() # then OBJECT,CENTER @@ -1204,8 +1200,7 @@ def sph_ird_star_center(self, high_pass=False, offset=(0, 0), plot=True): # read data fname = '{0}_DIT{1:03d}_preproc'.format(file, idx) - files = glob.glob(os.path.join(path.preproc, fname+'.fits')) - cube, hdr = fits.getdata(files[0], header=True) + cube, hdr = fits.getdata(path.preproc / '{}.fits'.format(fname), header=True) # coronagraph coro_name = starcen_files.loc[(file, idx), 'INS COMB ICOR'] @@ -1217,7 +1212,7 @@ def sph_ird_star_center(self, high_pass=False, offset=(0, 0), plot=True): # centers waffle_orientation = hdr['HIERARCH ESO OCS WAFFLE ORIENT'] if plot: - save_path = os.path.join(path.products, fname+'_spots_fitting.pdf') + save_path = path.products / '{}_spots_fitting.pdf'.format(fname) else: save_path = None spot_center, spot_dist, img_center \ @@ -1226,7 +1221,7 @@ def sph_ird_star_center(self, high_pass=False, offset=(0, 0), plot=True): coro=coro, save_path=save_path) # save - fits.writeto(os.path.join(path.preproc, fname+'_centers.fits'), img_center, overwrite=True) + fits.writeto(path.preproc / '{}_centers.fits'.format(fname), img_center, overwrite=True) print() # update recipe execution @@ -1318,7 +1313,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a wave, bandwidth = transmission.wavelength_bandwidth_filter(filter_comb) wave = np.array(wave) - fits.writeto(os.path.join(path.products, 'wavelength.fits'), wave, overwrite=True) + fits.writeto(path.products / 'wavelength.fits', wave, overwrite=True) # max images size if psf_dim > 1024: @@ -1369,9 +1364,8 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a # read data fname = '{0}_DIT{1:03d}_preproc'.format(file, idx) - files = glob.glob(os.path.join(path.preproc, fname+'.fits')) - cube = fits.getdata(files[0]) - centers = fits.getdata(os.path.join(path.preproc, fname+'_centers.fits')) + cube = fits.getdata(path.preproc / '{}.fits'.format(fname)) + centers = fits.getdata(path.preproc / '{}_centers.fits'.format(fname)) # neutral density ND = frames_info.loc[(file, idx), 'INS4 FILT2 NAME'] @@ -1407,12 +1401,12 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a psf_cube_scaled[wave_idx, file_idx] = imutils.scale(nimg, wave[0]/wave[wave_idx], method=shift_method) # save final cubes - flux_files.to_csv(os.path.join(path.products, 'psf_frames.csv')) - fits.writeto(os.path.join(path.products, 'psf_cube.fits'), psf_cube, overwrite=True) - fits.writeto(os.path.join(path.products, 'psf_parang.fits'), psf_parang, overwrite=True) - fits.writeto(os.path.join(path.products, 'psf_derot.fits'), psf_derot, overwrite=True) + flux_files.to_csv(path.products / 'psf_frames.csv') + fits.writeto(path.products / 'psf_cube.fits', psf_cube, overwrite=True) + fits.writeto(path.products / 'psf_parang.fits', psf_parang, overwrite=True) + fits.writeto(path.products / 'psf_derot.fits', psf_derot, overwrite=True) if save_scaled: - fits.writeto(os.path.join(path.products, 'psf_cube_scaled.fits'), psf_cube_scaled, overwrite=True) + fits.writeto(path.products / 'psf_cube_scaled.fits', psf_cube_scaled, overwrite=True) # delete big cubes del psf_cube @@ -1448,9 +1442,8 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a # read data fname = '{0}_DIT{1:03d}_preproc'.format(file, idx) - files = glob.glob(os.path.join(path.preproc, fname+'.fits')) - cube = fits.getdata(files[0]) - centers = fits.getdata(os.path.join(path.preproc, fname+'_centers.fits')) + cube = fits.getdata(path.preproc / '{}.fits'.format(fname)) + centers = fits.getdata(path.preproc / '{}_centers.fits'.format(fname)) # neutral density ND = frames_info.loc[(file, idx), 'INS4 FILT2 NAME'] @@ -1489,12 +1482,12 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a cen_cube_scaled[wave_idx, file_idx] = imutils.scale(nimg, wave[0]/wave[wave_idx], method=shift_method) # save final cubes - starcen_files.to_csv(os.path.join(path.products, 'starcenter_frames.csv')) - fits.writeto(os.path.join(path.products, 'starcenter_cube.fits'), cen_cube, overwrite=True) - fits.writeto(os.path.join(path.products, 'starcenter_parang.fits'), cen_parang, overwrite=True) - fits.writeto(os.path.join(path.products, 'starcenter_derot.fits'), cen_derot, overwrite=True) + starcen_files.to_csv(path.products / 'starcenter_frames.csv') + fits.writeto(path.products / 'starcenter_cube.fits', cen_cube, overwrite=True) + fits.writeto(path.products / 'starcenter_parang.fits', cen_parang, overwrite=True) + fits.writeto(path.products / 'starcenter_derot.fits', cen_derot, overwrite=True) if save_scaled: - fits.writeto(os.path.join(path.products, 'starcenter_cube_scaled.fits'), cen_cube_scaled, overwrite=True) + fits.writeto(path.products / 'starcenter_cube_scaled.fits', cen_cube_scaled, overwrite=True) # delete big cubes del cen_cube @@ -1530,7 +1523,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a dms_dy_ref = 0 else: fname = '{0}_DIT{1:03d}_preproc_centers.fits'.format(starcen_files.index.values[0][0], starcen_files.index.values[0][1]) - centers = fits.getdata(os.path.join(path.preproc, fname)) + centers = fits.getdata(path.preproc / fname) # Dithering Motion Stage for star center: value is in micron, # and the pixel size is 18 micron @@ -1556,7 +1549,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a # read data fname = '{0}_DIT{1:03d}_preproc'.format(file, idx) - files = glob.glob(os.path.join(path.preproc, fname+'*.fits')) + files = list(path.preproc.glob('{}*.fits'.format(fname))) cube = fits.getdata(files[0]) # neutral density @@ -1599,12 +1592,12 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a sci_cube_scaled[wave_idx, file_idx] = imutils.scale(nimg, wave[0]/wave[wave_idx], method=shift_method) # save final cubes - object_files.to_csv(os.path.join(path.products, 'science_frames.csv')) - fits.writeto(os.path.join(path.products, 'science_cube.fits'), sci_cube, overwrite=True) - fits.writeto(os.path.join(path.products, 'science_parang.fits'), sci_parang, overwrite=True) - fits.writeto(os.path.join(path.products, 'science_derot.fits'), sci_derot, overwrite=True) + object_files.to_csv(path.products / 'science_frames.csv') + fits.writeto(path.products / 'science_cube.fits', sci_cube, overwrite=True) + fits.writeto(path.products / 'science_parang.fits', sci_parang, overwrite=True) + fits.writeto(path.products / 'science_derot.fits', sci_derot, overwrite=True) if save_scaled: - fits.writeto(os.path.join(path.products, 'science_cube_scaled.fits'), sci_cube_scaled, overwrite=True) + fits.writeto(path.products / 'science_cube_scaled.fits', sci_cube_scaled, overwrite=True) # delete big cubes del sci_cube @@ -1634,27 +1627,27 @@ def sph_ird_clean(self, delete_raw=False, delete_products=False): path = self._path # tmp - if os.path.exists(path.tmp): + if path.tmp.exists(): shutil.rmtree(path.tmp, ignore_errors=True) # sof - if os.path.exists(path.sof): + if path.sof.exists(): shutil.rmtree(path.sof, ignore_errors=True) # calib - if os.path.exists(path.calib): + if path.calib.exists(): shutil.rmtree(path.calib, ignore_errors=True) # preproc - if os.path.exists(path.preproc): + if path.preproc.exists(): shutil.rmtree(path.preproc, ignore_errors=True) # raw if delete_raw: - if os.path.exists(path.raw): + if path.raw.exists(): shutil.rmtree(path.raw, ignore_errors=True) # products if delete_products: - if os.path.exists(path.products): + if path.products.exists(): shutil.rmtree(path.products, ignore_errors=True) From 3af46b35e8dbc0d346e1362a2951525eb3b6b766 Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Sat, 24 Aug 2019 20:04:44 +0200 Subject: [PATCH 023/101] Implement use of pathlib in all functions Ticket #62 --- examples/irdis_spectro_reduction.py | 4 +- vltpf/IRDIS/SpectroReduction.py | 213 +++++++++++++--------------- 2 files changed, 104 insertions(+), 113 deletions(-) diff --git a/examples/irdis_spectro_reduction.py b/examples/irdis_spectro_reduction.py index 9bc3c3a..940ef41 100644 --- a/examples/irdis_spectro_reduction.py +++ b/examples/irdis_spectro_reduction.py @@ -33,14 +33,14 @@ reduction.sph_ird_wave_calib(silent=True) #%% science pre-processing -reduction.sph_ird_preprocess_science(subtract_background=True, fix_badpix=True, +reduction.sph_ird_preprocess_science(subtract_background=True, fix_badpix=True, collapse_science=True, collapse_psf=True, collapse_center=True) #%% high-level science processing reduction.sph_ird_star_center(high_pass=False, plot=True) reduction.sph_ird_wavelength_recalibration(fit_scaling=True, plot=True) -reduction.sph_ird_combine_data(cpix=True, psf_dim=80, science_dim=300, +reduction.sph_ird_combine_data(cpix=True, psf_dim=80, science_dim=300, correct_mrs_chromatism=True, split_posang=True, shift_method='fft', manual_center=None, skip_center=False) diff --git a/vltpf/IRDIS/SpectroReduction.py b/vltpf/IRDIS/SpectroReduction.py index 3f530f9..fb417a9 100644 --- a/vltpf/IRDIS/SpectroReduction.py +++ b/vltpf/IRDIS/SpectroReduction.py @@ -1,5 +1,3 @@ -import os -import glob import pandas as pd import subprocess import numpy as np @@ -13,15 +11,16 @@ import matplotlib.colors as colors import configparser +from pathlib import Path from astropy.io import fits from astropy.modeling import models, fitting from matplotlib.backends.backend_pdf import PdfPages import vltpf +import vltpf.utils as utils import vltpf.utils.imutils as imutils import vltpf.utils.aperture as aperture import vltpf.transmission as transmission -import vltpf.ReductionPath as ReductionPath import vltpf.toolbox as toolbox @@ -105,23 +104,22 @@ def __init__(self, path): ''' # expand path - path = os.path.expanduser(os.path.join(path, '')) + path = Path(path).expanduser().resolve() # zeroth-order reduction validation - raw = os.path.join(path, 'raw') - if not os.path.exists(raw): + raw = path / 'raw' + if not raw.exists(): raise ValueError('No raw/ subdirectory. {0} is not a valid reduction path!'.format(path)) # init path and name - self._path = ReductionPath.Path(path) + self._path = utils.ReductionPath(path) self._instrument = 'IRDIS' # instrument mode self._mode = 'Unknown' # configuration - package_directory = os.path.dirname(os.path.abspath(vltpf.__file__)) - configfile = os.path.join(package_directory, 'instruments', self._instrument+'.ini') + configfile = Path(vltpf.__file__).parent / 'instruments' / '{}.ini'.format(self._instrument) config = configparser.ConfigParser() try: config.read(configfile) @@ -375,8 +373,8 @@ def read_info(self): path = self._path # files info - fname = os.path.join(path.preproc, 'files.csv') - if os.path.exists(fname): + fname = path.preproc / 'files.csv' + if fname.exists(): files_info = pd.read_csv(fname, index_col=0) # convert times @@ -398,8 +396,8 @@ def read_info(self): else: files_info = None - fname = os.path.join(path.preproc, 'frames.csv') - if os.path.exists(fname): + fname = path.preproc / 'frames.csv' + if fname.exists(): frames_info = pd.read_csv(fname, index_col=(0, 1)) # convert times @@ -415,8 +413,8 @@ def read_info(self): else: frames_info = None - fname = os.path.join(path.preproc, 'frames_preproc.csv') - if os.path.exists(fname): + fname = path.preproc / 'frames_preproc.csv' + if fname.exists(): frames_info_preproc = pd.read_csv(fname, index_col=(0, 1)) # convert times @@ -437,13 +435,13 @@ def read_info(self): # additional checks to update recipe execution if frames_info_preproc is not None: self._recipe_execution['sph_ird_wavelength_recalibration'] \ - = os.path.exists(os.path.join(path.preproc, 'wavelength_final.fits')) + = (path.preproc / 'wavelength_final.fits').exists() done = True files = frames_info_preproc.index for file, idx in files: fname = '{0}_DIT{1:03d}_preproc'.format(file, idx) - file = glob.glob(os.path.join(path.preproc, fname+'.fits')) + file = list(path.preproc.glob('{}.fits'.format(fname))) done = done and (len(file) == 1) self._recipe_execution['sph_ird_preprocess_science'] = done @@ -452,7 +450,7 @@ def read_info(self): (frames_info_preproc['DPR TYPE'] == 'OBJECT,CENTER')].index for file, idx in files: fname = '{0}_DIT{1:03d}_preproc_centers'.format(file, idx) - file = glob.glob(os.path.join(path.preproc, fname+'.fits')) + file = list(path.preproc.glob('{}.fits'.format(fname))) done = done and (len(file) == 1) self._recipe_execution['sph_ird_star_center'] = done @@ -471,8 +469,8 @@ def sort_files(self): path = self._path # list files - files = glob.glob(os.path.join(path.raw, '*.fits')) - files = [os.path.splitext(os.path.basename(f))[0] for f in files] + files = path.raw.glob('*.fits') + files = [f.stem for f in files] if len(files) == 0: raise ValueError('No raw FITS files in reduction path') @@ -480,9 +478,8 @@ def sort_files(self): print(' * found {0} FITS files in {1}'.format(len(files), path.raw)) # read list of keywords - package_directory = os.path.dirname(os.path.abspath(vltpf.__file__)) keywords = [] - file = open(os.path.join(package_directory, 'instruments', 'keywords.dat'), 'r') + file = open(Path(vltpf.__file__).parent / 'instruments' / 'keywords.dat', 'r') for line in file: line = line.strip() if line: @@ -501,7 +498,7 @@ def sort_files(self): files_info = pd.DataFrame(index=pd.Index(files, name='FILE'), columns=keywords_short, dtype='float') for f in files: - hdu = fits.open(os.path.join(path.raw, f+'.fits')) + hdu = fits.open(path.raw / '{}.fits'.format(f)) hdr = hdu[0].header for k, sk in zip(keywords, keywords_short): @@ -534,7 +531,7 @@ def sort_files(self): files_info.sort_values(by='DATE-OBS', inplace=True) # save files_info - files_info.to_csv(os.path.join(path.preproc, 'files.csv')) + files_info.to_csv(path.preproc / 'files.csv') self._files_info = files_info # update recipe execution @@ -588,7 +585,7 @@ def sort_frames(self): toolbox.compute_angles(frames_info) # save - frames_info.to_csv(os.path.join(path.preproc, 'frames.csv')) + frames_info.to_csv(path.preproc / 'frames.csv') self._frames_info = frames_info # update recipe execution @@ -740,7 +737,7 @@ def check_files_association(self): raise ValueError('There is {0} errors that should be solved before proceeding'.format(error_flag)) # save - files_info.to_csv(os.path.join(path.preproc, 'files.csv')) + files_info.to_csv(path.preproc / 'files.csv') self._files_info = files_info @@ -788,10 +785,10 @@ def sph_ird_cal_dark(self, silent=True): print(' * {0} in filter {1} with DIT={2:.2f} sec ({3} files)'.format(ctype, cfilt, DIT, len(cfiles))) # create sof - sof = os.path.join(path.sof, 'dark_filt={0}_DIT={1:.2f}.sof'.format(cfilt, DIT)) + sof = path.sof / 'dark_filt={0}_DIT={1:.2f}.sof'.format(cfilt, DIT) file = open(sof, 'w') for f in files: - file.write('{0}{1}.fits {2}\n'.format(path.raw, f, 'IRD_DARK_RAW')) + file.write('{0}/{1}.fits {2}\n'.format(path.raw, f, 'IRD_DARK_RAW')) file.close() # products @@ -815,8 +812,8 @@ def sph_ird_cal_dark(self, silent=True): '--ird.master_dark.sigma_clip=5.0', '--ird.master_dark.save_addprod=TRUE', '--ird.master_dark.max_acceptable={0}'.format(max_level), - '--ird.master_dark.outfilename={0}{1}.fits'.format(path.calib, dark_file), - '--ird.master_dark.badpixfilename={0}{1}.fits'.format(path.calib, bpm_file), + '--ird.master_dark.outfilename={0}/{1}.fits'.format(path.calib, dark_file), + '--ird.master_dark.badpixfilename={0}/{1}.fits'.format(path.calib, bpm_file), sof] # check esorex @@ -856,7 +853,7 @@ def sph_ird_cal_dark(self, silent=True): files_info.loc[bpm_file, 'PRO CATG'] = 'IRD_STATIC_BADPIXELMAP' # save - files_info.to_csv(os.path.join(path.preproc, 'files.csv')) + files_info.to_csv(path.preproc / 'files.csv') # update recipe execution self._recipe_execution['sph_ird_cal_dark'] = True @@ -893,10 +890,10 @@ def sph_ird_cal_detector_flat(self, silent=True): print(' * filter {0} ({1} files)'.format(cfilt, len(cfiles))) # create sof - sof = os.path.join(path.sof, 'flat_filt={0}.sof'.format(cfilt)) + sof = path.sof / 'flat_filt={0}.sof'.format(cfilt) file = open(sof, 'w') for f in files: - file.write('{0}{1}.fits {2}\n'.format(path.raw, f, 'IRD_FLAT_FIELD_RAW')) + file.write('{0}/{1}.fits {2}\n'.format(path.raw, f, 'IRD_FLAT_FIELD_RAW')) file.close() # products @@ -909,8 +906,8 @@ def sph_ird_cal_detector_flat(self, silent=True): '--no-datamd5=TRUE', 'sph_ird_instrument_flat', '--ird.instrument_flat.save_addprod=TRUE', - '--ird.instrument_flat.outfilename={0}{1}.fits'.format(path.calib, flat_file), - '--ird.instrument_flat.badpixfilename={0}{1}.fits'.format(path.calib, bpm_file), + '--ird.instrument_flat.outfilename={0}/{1}.fits'.format(path.calib, flat_file), + '--ird.instrument_flat.badpixfilename={0}/{1}.fits'.format(path.calib, bpm_file), sof] # check esorex @@ -950,7 +947,7 @@ def sph_ird_cal_detector_flat(self, silent=True): files_info.loc[bpm_file, 'PRO CATG'] = 'IRD_NON_LINEAR_BADPIXELMAP' # save - files_info.to_csv(os.path.join(path.preproc, 'files.csv')) + files_info.to_csv(path.preproc / 'files.csv') # update recipe execution self._recipe_execution['sph_ird_cal_detector_flat'] = True @@ -1001,12 +998,12 @@ def sph_ird_wave_calib(self, silent=True): # esorex parameters if filter_comb == 'S_LR': # create standard sof in LRS - sof = os.path.join(path.sof, 'wave.sof') + sof = path.sof / 'wave.sof' file = open(sof, 'w') - file.write('{0}{1}.fits {2}\n'.format(path.raw, wave_file, 'IRD_WAVECALIB_RAW')) - file.write('{0}{1}.fits {2}\n'.format(path.calib, dark_file.index[0], 'IRD_MASTER_DARK')) - file.write('{0}{1}.fits {2}\n'.format(path.calib, flat_file.index[0], 'IRD_FLAT_FIELD')) - file.write('{0}{1}.fits {2}\n'.format(path.calib, bpm_file.index[0], 'IRD_STATIC_BADPIXELMAP')) + file.write('{0}/{1}.fits {2}\n'.format(path.raw, wave_file, 'IRD_WAVECALIB_RAW')) + file.write('{0}/{1}.fits {2}\n'.format(path.calib, dark_file.index[0], 'IRD_MASTER_DARK')) + file.write('{0}/{1}.fits {2}\n'.format(path.calib, flat_file.index[0], 'IRD_FLAT_FIELD')) + file.write('{0}/{1}.fits {2}\n'.format(path.calib, bpm_file.index[0], 'IRD_STATIC_BADPIXELMAP')) file.close() args = ['esorex', @@ -1017,24 +1014,24 @@ def sph_ird_wave_calib(self, silent=True): '--ird.wave_calib.grism_mode=FALSE', '--ird.wave_calib.threshold=2000', '--ird.wave_calib.number_lines=6', - '--ird.wave_calib.outfilename={0}{1}.fits'.format(path.calib, wav_file), + '--ird.wave_calib.outfilename={0}/{1}.fits'.format(path.calib, wav_file), sof] elif filter_comb == 'S_MR': # masking of second order spectrum in MRS wave_fname = wave_file.index[0] - wave_data, hdr = fits.getdata(os.path.join(path.raw, wave_fname+'.fits'), header=True) + wave_data, hdr = fits.getdata(path.raw / '{}.fits'.format(wave_fname), header=True) wave_data = wave_data.squeeze() wave_data[:60, :] = 0 - fits.writeto(os.path.join(path.preproc, wave_fname+'_masked.fits'), wave_data, hdr, overwrite=True, + fits.writeto(path.preproc / '{}_masked.fits'.format(wave_fname), wave_data, hdr, overwrite=True, output_verify='silentfix') # create sof using the masked file - sof = os.path.join(path.sof, 'wave.sof') + sof = path.sof / 'wave.sof' file = open(sof, 'w') - file.write('{0}{1}_masked.fits {2}\n'.format(path.preproc, wave_fname, 'IRD_WAVECALIB_RAW')) - file.write('{0}{1}.fits {2}\n'.format(path.calib, dark_file.index[0], 'IRD_MASTER_DARK')) - file.write('{0}{1}.fits {2}\n'.format(path.calib, flat_file.index[0], 'IRD_FLAT_FIELD')) - file.write('{0}{1}.fits {2}\n'.format(path.calib, bpm_file.index[0], 'IRD_STATIC_BADPIXELMAP')) + file.write('{0}/{1}_masked.fits {2}\n'.format(path.preproc, wave_fname, 'IRD_WAVECALIB_RAW')) + file.write('{0}/{1}.fits {2}\n'.format(path.calib, dark_file.index[0], 'IRD_MASTER_DARK')) + file.write('{0}/{1}.fits {2}\n'.format(path.calib, flat_file.index[0], 'IRD_FLAT_FIELD')) + file.write('{0}/{1}.fits {2}\n'.format(path.calib, bpm_file.index[0], 'IRD_STATIC_BADPIXELMAP')) file.close() args = ['esorex', @@ -1045,7 +1042,7 @@ def sph_ird_wave_calib(self, silent=True): '--ird.wave_calib.grism_mode=TRUE', '--ird.wave_calib.threshold=1000', '--ird.wave_calib.number_lines=5', - '--ird.wave_calib.outfilename={0}{1}.fits'.format(path.calib, wav_file), + '--ird.wave_calib.outfilename={0}/{1}.fits'.format(path.calib, wav_file), sof] # check esorex @@ -1075,7 +1072,7 @@ def sph_ird_wave_calib(self, silent=True): files_info.loc[wav_file, 'PRO CATG'] = 'IRD_WAVECALIB' # save - files_info.to_csv(os.path.join(path.preproc, 'files.csv')) + files_info.to_csv(path.preproc / 'files.csv') # update recipe execution self._recipe_execution['sph_ird_wave_calib'] = True @@ -1130,9 +1127,9 @@ def sph_ird_preprocess_science(self, frames_info = self._frames_info # clean before we start - files = glob.glob(os.path.join(path.preproc, '*_DIT???_preproc.fits')) + files = path.preproc.glob('*_DIT???_preproc.fits') for file in files: - os.remove(file) + file.unlink() # filter combination filter_comb = files_info.loc[files_info['DPR CATG'] == 'SCIENCE', 'INS COMB IFLT'].unique()[0] @@ -1141,7 +1138,7 @@ def sph_ird_preprocess_science(self, if fix_badpix: bpm_files = files_info[(files_info['PRO CATG'] == 'IRD_STATIC_BADPIXELMAP') | (files_info['PRO CATG'] == 'IRD_NON_LINEAR_BADPIXELMAP')].index - bpm_files = [os.path.join(path.calib, f+'.fits') for f in bpm_files] + bpm_files = [path.calib / '{}.fits'.format(f) for f in bpm_files] bpm = toolbox.compute_bad_pixel_map(bpm_files) @@ -1157,7 +1154,7 @@ def sph_ird_preprocess_science(self, (files_info['INS COMB IFLT'] == filter_comb)] if len(flat_file) != 1: raise ValueError('There should be exactly 1 flat file. Found {0}.'.format(len(flat_file))) - flat = fits.getdata(os.path.join(path.calib, flat_file.index[0]+'.fits')) + flat = fits.getdata(path.calib / '{}.fits'.format(flat_file.index[0])) # final dataframe index = pd.MultiIndex(names=['FILE', 'IMG'], levels=[[], []], codes=[[], []]) @@ -1195,7 +1192,7 @@ def sph_ird_preprocess_science(self, print('Warning: no background has been found. Pre-processing will continue but data quality will likely be affected') bkg = np.zeros((1024, 2048)) elif len(dfiles) == 1: - bkg = fits.getdata(os.path.join(path.calib, dfiles.index[0]+'.fits')) + bkg = fits.getdata(path.calib / '{}.fits'.format(dfiles.index[0])) elif len(dfiles) > 1: # FIXME: handle cases when multiple backgrounds are found? raise ValueError('Unexpected number of background files ({0})'.format(len(dfiles))) @@ -1209,7 +1206,7 @@ def sph_ird_preprocess_science(self, # read data print(' ==> read data') - img, hdr = fits.getdata(os.path.join(path.raw, fname+'.fits'), header=True) + img, hdr = fits.getdata(path.raw / '{}.fits'.format(fname), header=True) # add extra dimension to single images to make cubes if img.ndim == 2: @@ -1287,7 +1284,7 @@ def sph_ird_preprocess_science(self, for f in range(len(img)): frame = nimg[f, ...].squeeze() hdr['HIERARCH ESO DET NDIT'] = 1 - fits.writeto(os.path.join(path.preproc, fname+'_DIT{0:03d}_preproc.fits'.format(f)), frame, hdr, + fits.writeto(path.preproc / '{}_DIT{:03d}_preproc.fits'.format(fname, f), frame, hdr, overwrite=True, output_verify='silentfix') print() @@ -1296,7 +1293,7 @@ def sph_ird_preprocess_science(self, # sort and save final dataframe frames_info_preproc.sort_values(by='TIME', inplace=True) - frames_info_preproc.to_csv(os.path.join(path.preproc, 'frames_preproc.csv')) + frames_info_preproc.to_csv(path.preproc / 'frames_preproc.csv') self._frames_info_preproc = frames_info_preproc @@ -1347,7 +1344,7 @@ def sph_ird_star_center(self, high_pass=False, plot=True): # wavelength map wave_file = files_info[files_info['PROCESSED'] & (files_info['PRO CATG'] == 'IRD_WAVECALIB')] - wave_calib = fits.getdata(os.path.join(path.calib, wave_file.index[0]+'.fits')) + wave_calib = fits.getdata(path.calib / '{}.fits'.format(wave_file.index[0])) wave_lin = get_wavelength_calibration(wave_calib, centers, wave_min, wave_max) # start with OBJECT,FLUX @@ -1358,18 +1355,17 @@ def sph_ird_star_center(self, high_pass=False, plot=True): # read data fname = '{0}_DIT{1:03d}_preproc'.format(file, idx) - files = glob.glob(os.path.join(path.preproc, fname+'.fits')) - cube, hdr = fits.getdata(files[0], header=True) + cube, hdr = fits.getdata(path.preproc / '{}.fits'.format(fname), header=True) # centers if plot: - save_path = os.path.join(path.products, fname+'_PSF_fitting.pdf') + save_path = path.products / '{}_PSF_fitting.pdf'.format(fname) else: save_path = None psf_center = toolbox.star_centers_from_PSF_lss_cube(cube, wave_lin, pixel, save_path=save_path) # save - fits.writeto(os.path.join(path.preproc, fname+'_centers.fits'), psf_center, overwrite=True) + fits.writeto(path.preproc / '{}_centers.fits'.format(fname), psf_center, overwrite=True) print() # then OBJECT,CENTER (if any) @@ -1382,20 +1378,18 @@ def sph_ird_star_center(self, high_pass=False, plot=True): # read center data fname = '{0}_DIT{1:03d}_preproc'.format(file, idx) - files = glob.glob(os.path.join(path.preproc, fname+'.fits')) - cube_cen, hdr = fits.getdata(files[0], header=True) + cube_cen, hdr = fits.getdata(path.preproc / '{}.fits'.format(fname), header=True) # read science data if len(starsci_files) != 0: fname2 = '{0}_DIT{1:03d}_preproc'.format(starsci_files.index[0][0], idx) - files2 = glob.glob(os.path.join(path.preproc, fname2+'.fits')) - cube_sci, hdr = fits.getdata(files2[0], header=True) + cube_sci, hdr = fits.getdata(path.preproc / '{}.fits'.format(fname2), header=True) else: cube_sci = None # centers if plot: - save_path = os.path.join(path.products, fname+'_spots_fitting.pdf') + save_path = path.products / '{}_spots_fitting.pdf'.format(fname) else: save_path = None spot_centers, spot_dist, img_centers \ @@ -1403,8 +1397,8 @@ def sph_ird_star_center(self, high_pass=False, plot=True): high_pass=high_pass, save_path=save_path) # save - fits.writeto(os.path.join(path.preproc, fname+'_centers.fits'), img_centers, overwrite=True) - fits.writeto(os.path.join(path.preproc, fname+'_spot_distance.fits'), spot_dist, overwrite=True) + fits.writeto(path.preproc / '{}_centers.fits'.format(fname), img_centers, overwrite=True) + fits.writeto(path.preproc / '{}_spot_distance.fits'.format(fname), spot_dist, overwrite=True) print() # update recipe execution @@ -1461,7 +1455,7 @@ def sph_ird_wavelength_recalibration(self, fit_scaling=True, plot=True): # wavelength map wave_file = files_info[files_info['PROCESSED'] & (files_info['PRO CATG'] == 'IRD_WAVECALIB')] - wave_calib = fits.getdata(os.path.join(path.calib, wave_file.index[0]+'.fits')) + wave_calib = fits.getdata(path.calib / '{}.fits'.format(wave_file.index[0])) wave_lin = get_wavelength_calibration(wave_calib, centers, wave_min, wave_max) # reference wavelength @@ -1473,14 +1467,14 @@ def sph_ird_wavelength_recalibration(self, fit_scaling=True, plot=True): if len(starcen_files) == 0: print(' ==> no OBJECT,CENTER file in the data set. Wavelength cannot be recalibrated. ' + 'The standard wavelength calibrated by the ESO pripeline will be used.') - fits.writeto(os.path.join(path.preproc, 'wavelength_final.fits'), wave_lin, overwrite=True) + fits.writeto(path.preproc / 'wavelength_final.fits', wave_lin, overwrite=True) return fname = '{0}_DIT{1:03d}_preproc_spot_distance'.format(starcen_files.index.values[0][0], starcen_files.index.values[0][1]) - spot_dist = fits.getdata(os.path.join(path.preproc, fname+'.fits')) + spot_dist = fits.getdata(path.preproc / '{}.fits'.format(fname)) if plot: - pdf = PdfPages(os.path.join(path.products, 'wavelength_recalibration.pdf')) + pdf = PdfPages(path.products / 'wavelength_recalibration.pdf') pix = np.arange(1024) wave_final = np.zeros((1024, 2)) @@ -1558,7 +1552,7 @@ def sph_ird_wavelength_recalibration(self, fit_scaling=True, plot=True): # save print(' * saving') - fits.writeto(os.path.join(path.preproc, 'wavelength_final.fits'), wave_final, overwrite=True) + fits.writeto(path.preproc / 'wavelength_final.fits', wave_final, overwrite=True) # update recipe execution @@ -1664,7 +1658,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m # wavelength solution: make sure we have the same number of # wave points in each field - wave = fits.getdata(os.path.join(path.preproc, 'wavelength_final.fits')) + wave = fits.getdata(path.preproc / 'wavelength_final.fits') mask = ((wave_min <= wave) & (wave <= wave_max)) iwave0 = np.where(mask[:, 0])[0] iwave1 = np.where(mask[:, 1])[0] @@ -1678,7 +1672,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m final_wave[:, 0] = wave[iwave[:, 0], 0] final_wave[:, 1] = wave[iwave[:, 1], 1] - fits.writeto(os.path.join(path.products, 'wavelength.fits'), final_wave.squeeze().T, overwrite=True) + fits.writeto(path.products / 'wavelength.fits', final_wave.squeeze().T, overwrite=True) # max images size if psf_dim > 1024: @@ -1729,9 +1723,8 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m # read data fname = '{0}_DIT{1:03d}_preproc'.format(file, idx) - files = glob.glob(os.path.join(path.preproc, fname+'.fits')) - cube = fits.getdata(files[0]) - centers = fits.getdata(os.path.join(path.preproc, fname+'_centers.fits')) + cube = fits.getdata(path.preproc / '{}.fits'.format(fname)) + centers = fits.getdata(path.preproc / '{}_centers.fits'.format(fname)) # DIT, angles, etc DIT = frames_info.loc[(file, idx), 'DET SEQ1 DIT'] @@ -1776,18 +1769,18 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m ii = np.where(psf_posang == pa)[0] # save metadata - flux_files[(flux_files['INS4 DROT2 POSANG'] + 90) == pa].to_csv(os.path.join(path.products, 'psf_posang={:06.2f}_frames.csv'.format(pa))) - fits.writeto(os.path.join(path.products, 'psf_posang={:06.2f}_posang.fits'.format(pa)), psf_posang[ii], overwrite=True) + flux_files[(flux_files['INS4 DROT2 POSANG'] + 90) == pa].to_csv(path.products / 'psf_posang={:06.2f}_frames.csv'.format(pa)) + fits.writeto(path.products / 'psf_posang={:06.2f}_posang.fits'.format(pa), psf_posang[ii], overwrite=True) # save final cubes - fits.writeto(os.path.join(path.products, 'psf_posang={:06.2f}_cube.fits'.format(pa)), psf_cube[:, ii], overwrite=True) + fits.writeto(path.products / 'psf_posang={:06.2f}_cube.fits'.format(pa), psf_cube[:, ii], overwrite=True) else: # save metadata - flux_files.to_csv(os.path.join(path.products, 'psf_posang=all_frames.csv')) - fits.writeto(os.path.join(path.products, 'psf_posang=all_posang.fits'), psf_posang, overwrite=True) + flux_files.to_csv(path.products / 'psf_posang=all_frames.csv') + fits.writeto(path.products / 'psf_posang=all_posang.fits', psf_posang, overwrite=True) # save final cubes - fits.writeto(os.path.join(path.products, 'psf_posang=all_cube.fits'), psf_cube, overwrite=True) + fits.writeto(path.products / 'psf_posang=all_cube.fits', psf_cube, overwrite=True) # delete big cubes del psf_cube @@ -1818,9 +1811,8 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m # read data fname = '{0}_DIT{1:03d}_preproc'.format(file, idx) - files = glob.glob(os.path.join(path.preproc, fname+'.fits')) - cube = fits.getdata(files[0]) - centers = fits.getdata(os.path.join(path.preproc, fname+'_centers.fits')) + cube = fits.getdata(path.preproc / '{}.fits'.format(fname)) + centers = fits.getdata(path.preproc / '{}_centers.fits'.format(fname)) # DIT, angles, etc DIT = frames_info.loc[(file, idx), 'DET SEQ1 DIT'] @@ -1865,18 +1857,18 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m ii = np.where(cen_posang == pa)[0] # save metadata - starcen_files[(starcen_files['INS4 DROT2 POSANG'] + 90) == pa].to_csv(os.path.join(path.products, 'starcenter_posang={:06.2f}_frames.csv'.format(pa))) - fits.writeto(os.path.join(path.products, 'starcenter_posang={:06.2f}_posang.fits'.format(pa)), cen_posang[ii], overwrite=True) + starcen_files[(starcen_files['INS4 DROT2 POSANG'] + 90) == pa].to_csv(path.products / 'starcenter_posang={:06.2f}_frames.csv'.format(pa)) + fits.writeto(path.products / 'starcenter_posang={:06.2f}_posang.fits'.format(pa), cen_posang[ii], overwrite=True) # save final cubes - fits.writeto(os.path.join(path.products, 'starcenter_posang={:06.2f}_cube.fits'.format(pa)), cen_cube[:, ii], overwrite=True) + fits.writeto(path.products / 'starcenter_posang={:06.2f}_cube.fits'.format(pa), cen_cube[:, ii], overwrite=True) else: # save metadata - starcen_files.to_csv(os.path.join(path.products, 'starcenter_posang=all_frames.csv')) - fits.writeto(os.path.join(path.products, 'starcenter_posang=all_posang.fits'), cen_posang, overwrite=True) + starcen_files.to_csv(path.products / 'starcenter_posang=all_frames.csv') + fits.writeto(path.products / 'starcenter_posang=all_posang.fits', cen_posang, overwrite=True) # save final cubes - fits.writeto(os.path.join(path.products, 'starcenter_posang=all_cube.fits'), cen_cube, overwrite=True) + fits.writeto(path.products / 'starcenter_posang=all_cube.fits', cen_cube, overwrite=True) # delete big cubes del cen_cube @@ -1910,7 +1902,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m centers = centers_default else: fname = '{0}_DIT{1:03d}_preproc_centers.fits'.format(starcen_files.index.values[0][0], starcen_files.index.values[0][1]) - centers = fits.getdata(os.path.join(path.preproc, fname)) + centers = fits.getdata(path.preproc / fname) # final center if cpix: @@ -1924,8 +1916,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m # read data fname = '{0}_DIT{1:03d}_preproc'.format(file, idx) - files = glob.glob(os.path.join(path.preproc, fname+'.fits')) - cube = fits.getdata(files[0]) + cube = fits.getdata(path.preproc / '{}.fits'.format(fname)) # DIT, angles, etc DIT = frames_info.loc[(file, idx), 'DET SEQ1 DIT'] @@ -1970,18 +1961,18 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m ii = np.where(sci_posang == pa)[0] # save metadata - object_files[(object_files['INS4 DROT2 POSANG'] + 90) == pa].to_csv(os.path.join(path.products, 'science_posang={:06.2f}_frames.csv'.format(pa))) - fits.writeto(os.path.join(path.products, 'science_posang={:06.2f}_posang.fits'.format(pa)), sci_posang[ii], overwrite=True) + object_files[(object_files['INS4 DROT2 POSANG'] + 90) == pa].to_csv(path.products / 'science_posang={:06.2f}_frames.csv'.format(pa)) + fits.writeto(path.products / 'science_posang={:06.2f}_posang.fits'.format(pa), sci_posang[ii], overwrite=True) # save final cubes - fits.writeto(os.path.join(path.products, 'science_posang={:06.2f}_cube.fits'.format(pa)), sci_cube[:, ii], overwrite=True) + fits.writeto(path.products / 'science_posang={:06.2f}_cube.fits'.format(pa), sci_cube[:, ii], overwrite=True) else: # save metadata - object_files.to_csv(os.path.join(path.products, 'science_posang=all_frames.csv')) - fits.writeto(os.path.join(path.products, 'science_posang=all_posang.fits'), sci_posang, overwrite=True) + object_files.to_csv(path.products, 'science_posang=all_frames.csv') + fits.writeto(path.products / 'science_posang=all_posang.fits', sci_posang, overwrite=True) # save final cubes - fits.writeto(os.path.join(path.products, 'science_posang=all_cube.fits'), sci_cube, overwrite=True) + fits.writeto(path.products / 'science_posang=all_cube.fits', sci_cube, overwrite=True) # delete big cubes del sci_cube @@ -2009,27 +2000,27 @@ def sph_ird_clean(self, delete_raw=False, delete_products=False): path = self._path # tmp - if os.path.exists(path.tmp): + if path.tmp.exists(): shutil.rmtree(path.tmp, ignore_errors=True) # sof - if os.path.exists(path.sof): + if path.sof.exists(): shutil.rmtree(path.sof, ignore_errors=True) # calib - if os.path.exists(path.calib): + if path.calib.exists(): shutil.rmtree(path.calib, ignore_errors=True) # preproc - if os.path.exists(path.preproc): + if path.preproc.exists(): shutil.rmtree(path.preproc, ignore_errors=True) # raw if delete_raw: - if os.path.exists(path.raw): + if path.raw.exists(): shutil.rmtree(path.raw, ignore_errors=True) # products if delete_products: - if os.path.exists(path.products): + if path.products.exists(): shutil.rmtree(path.products, ignore_errors=True) From 5828b2d34aea1a64f740b350171d5e2b5d7e2800 Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Sat, 24 Aug 2019 21:45:41 +0200 Subject: [PATCH 024/101] Implement use of pathlib in all functions Ticket #62 --- vltpf/IFS.py | 271 +++++++++++++++++++++++++-------------------------- 1 file changed, 134 insertions(+), 137 deletions(-) diff --git a/vltpf/IFS.py b/vltpf/IFS.py index 3993d99..ea1c771 100644 --- a/vltpf/IFS.py +++ b/vltpf/IFS.py @@ -1,5 +1,3 @@ -import os -import glob import pandas as pd import subprocess import numpy as np @@ -13,14 +11,16 @@ import matplotlib.colors as colors import configparser +from pathlib import Path from astropy.io import fits from astropy.modeling import models, fitting from matplotlib.backends.backend_pdf import PdfPages +import vltpf +import vltpf.utils as utils import vltpf.utils.imutils as imutils import vltpf.utils.aperture as aperture import vltpf.transmission as transmission -import vltpf.ReductionPath as ReductionPath import vltpf.toolbox as toolbox @@ -99,8 +99,7 @@ def compute_detector_flat(raw_flat_files, bpm_files=[], mask_vignetting=True): # apply IFU mask to avoid "edge effects" in the final images, # where the the lenslets are vignetted if mask_vignetting: - package_directory = os.path.dirname(os.path.abspath(__file__)) - ifu_mask = fits.getdata(os.path.join(package_directory, 'data', 'ifu_mask.fits')) + ifu_mask = fits.getdata(Path(vltpf.__file__).parent / 'data' / 'ifu_mask.fits') flat[ifu_mask == 0] = 1 return flat, bpm @@ -373,23 +372,22 @@ def __init__(self, path): ''' # expand path - path = os.path.expanduser(os.path.join(path, '')) + path = Path(path).expanduser().resolve() # zeroth-order reduction validation - raw = os.path.join(path, 'raw') - if not os.path.exists(raw): + raw = path / 'raw' + if not raw.exists(): raise ValueError('No raw/ subdirectory. {0} is not a valid reduction path!'.format(path)) # init path and name - self._path = ReductionPath.Path(path) + self._path = utils.ReductionPath(path) self._instrument = 'IFS' # instrument mode self._mode = 'Unknown' # configuration - package_directory = os.path.dirname(os.path.abspath(__file__)) - configfile = os.path.join(package_directory, 'instruments', self._instrument+'.ini') + configfile = Path(vltpf.__file__).parent / 'instruments' / '{}.ini'.format(self._instrument) config = configparser.ConfigParser() try: config.read(configfile) @@ -651,8 +649,8 @@ def read_info(self): path = self._path # files info - fname = os.path.join(path.preproc, 'files.csv') - if os.path.exists(fname): + fname = path.preproc / 'files.csv' + if fname.exists(): files_info = pd.read_csv(fname, index_col=0) # convert times @@ -678,8 +676,8 @@ def read_info(self): else: files_info = None - fname = os.path.join(path.preproc, 'frames.csv') - if os.path.exists(fname): + fname = path.preproc / 'frames.csv' + if fname.exists(): frames_info = pd.read_csv(fname, index_col=(0, 1)) # convert times @@ -695,8 +693,8 @@ def read_info(self): else: frames_info = None - fname = os.path.join(path.preproc, 'frames_preproc.csv') - if os.path.exists(fname): + fname = path.preproc / 'frames_preproc.csv' + if fname.exists(): frames_info_preproc = pd.read_csv(fname, index_col=(0, 1)) # convert times @@ -718,17 +716,17 @@ def read_info(self): if frames_info is not None: wave_file = files_info[np.logical_not(files_info['PROCESSED']) & (files_info['DPR TYPE'] == 'WAVE,LAMP')] self._recipe_execution['sph_ifs_preprocess_wave'] \ - = os.path.exists(os.path.join(path.preproc, wave_file.index[0]+'_preproc.fits')) + = (path.preproc / '{}_preproc.fits'.format(wave_file.index[0])).exists() self._recipe_execution['sph_ifs_wavelength_recalibration'] \ - = os.path.exists(os.path.join(path.products, 'wavelength.fits')) + = (path.products / 'wavelength.fits').exists() if frames_info_preproc is not None: done = True files = frames_info_preproc.index for file, idx in files: fname = '{0}_DIT{1:03d}_preproc'.format(file, idx) - file = glob.glob(os.path.join(path.preproc, fname+'.fits')) + file = list(path.preproc.glob('{}.fits'.format(fname))) done = done and (len(file) == 1) self._recipe_execution['sph_ifs_preprocess_science'] = done @@ -736,7 +734,7 @@ def read_info(self): files = frames_info_preproc.index for file, idx in files: fname = '{0}_DIT{1:03d}_preproc_?????'.format(file, idx) - file = glob.glob(os.path.join(path.preproc, fname+'.fits')) + file = list(path.preproc.glob('{}.fits'.format(fname))) done = done and (len(file) == 1) self._recipe_execution['sph_ifs_science_cubes'] = done @@ -745,7 +743,7 @@ def read_info(self): (frames_info_preproc['DPR TYPE'] == 'OBJECT,CENTER')].index for file, idx in files: fname = '{0}_DIT{1:03d}_preproc_centers'.format(file, idx) - file = glob.glob(os.path.join(path.preproc, fname+'.fits')) + file = list(path.preproc.glob('{}.fits'.format(fname))) done = done and (len(file) == 1) self._recipe_execution['sph_ifs_star_center'] = done @@ -764,8 +762,8 @@ def sort_files(self): path = self._path # list files - files = glob.glob(os.path.join(path.raw, '*.fits')) - files = [os.path.splitext(os.path.basename(f))[0] for f in files] + files = path.raw.glob('*.fits') + files = [f.stem for f in files] if len(files) == 0: raise ValueError('No raw FITS files in reduction path') @@ -773,9 +771,8 @@ def sort_files(self): print(' * found {0} FITS files in {1}'.format(len(files), path.raw)) # read list of keywords - package_directory = os.path.dirname(os.path.abspath(__file__)) keywords = [] - file = open(os.path.join(package_directory, 'instruments', 'keywords.dat'), 'r') + file = open(Path(vltpf.__file__).parent / 'instruments' / 'keywords.dat', 'r') for line in file: line = line.strip() if line: @@ -794,7 +791,7 @@ def sort_files(self): files_info = pd.DataFrame(index=pd.Index(files, name='FILE'), columns=keywords_short, dtype='float') for f in files: - hdu = fits.open(os.path.join(path.raw, f+'.fits')) + hdu = fits.open(path.raw / '{}.fits'.format(f)) hdr = hdu[0].header for k, sk in zip(keywords, keywords_short): @@ -827,7 +824,7 @@ def sort_files(self): files_info.sort_values(by='DATE-OBS', inplace=True) # save files_info - files_info.to_csv(os.path.join(path.preproc, 'files.csv')) + files_info.to_csv(path.preproc / 'files.csv') self._files_info = files_info # update recipe execution @@ -881,7 +878,7 @@ def sort_frames(self): toolbox.compute_angles(frames_info) # save - frames_info.to_csv(os.path.join(path.preproc, 'frames.csv')) + frames_info.to_csv(path.preproc / 'frames.csv') self._frames_info = frames_info # update recipe execution @@ -1165,7 +1162,7 @@ def check_files_association(self): raise ValueError('There is {0} errors that should be solved before proceeding'.format(error_flag)) # save - files_info.to_csv(os.path.join(path.preproc, 'files.csv')) + files_info.to_csv(path.preproc / 'files.csv') self._files_info = files_info @@ -1210,10 +1207,10 @@ def sph_ifs_cal_dark(self, silent=True): print(' * {0} with DIT={1:.2f} sec ({2} files)'.format(ctype, DIT, len(cfiles))) # create sof - sof = os.path.join(path.sof, 'dark_DIT={0:.2f}.sof'.format(DIT)) + sof = path.sof / 'dark_DIT={0:.2f}.sof'.format(DIT) file = open(sof, 'w') for f in files: - file.write('{0}{1}.fits {2}\n'.format(path.raw, f, 'IFS_DARK_RAW')) + file.write('{0}/{1}.fits {2}\n'.format(path.raw, f, 'IFS_DARK_RAW')) file.close() # products @@ -1234,8 +1231,8 @@ def sph_ifs_cal_dark(self, silent=True): '--ifs.master_dark.smoothing=5', '--ifs.master_dark.min_acceptable=0.0', '--ifs.master_dark.max_acceptable=2000.0', - '--ifs.master_dark.outfilename={0}{1}.fits'.format(path.calib, dark_file), - '--ifs.master_dark.badpixfilename={0}{1}.fits'.format(path.calib, bpm_file), + '--ifs.master_dark.outfilename={0}/{1}.fits'.format(path.calib, dark_file), + '--ifs.master_dark.badpixfilename={0}/{1}.fits'.format(path.calib, bpm_file), sof] # check esorex @@ -1269,7 +1266,7 @@ def sph_ifs_cal_dark(self, silent=True): files_info.loc[bpm_file, 'PRO CATG'] = 'IFS_STATIC_BADPIXELMAP' # save - files_info.to_csv(os.path.join(path.preproc, 'files.csv')) + files_info.to_csv(path.preproc / 'files.csv') # update recipe execution self._recipe_execution['sph_ifs_cal_dark'] = True @@ -1310,7 +1307,7 @@ def sph_ifs_cal_detector_flat(self, silent=True): # bpm files cfiles = files_info[files_info['PRO CATG'] == 'IFS_STATIC_BADPIXELMAP'].index - bpm_files = [os.path.join(path.calib, f+'.fits') for f in cfiles] + bpm_files = [path.calib / '{}.fits'.format(f) for f in cfiles] # loop on wavelengths waves = [ 0, 1020, 1230, 1300, 1550] @@ -1321,7 +1318,7 @@ def sph_ifs_cal_detector_flat(self, silent=True): print(' * flat for wavelength {0} nm (filter {1}, lamp {2})'.format(wave, comb, lamp)) cfiles = calibs[calibs['INS2 COMB IFS'] == '{0}_{1}'.format(comb, mode_short)] - files = [os.path.join(path.raw, f+'.fits') for f in cfiles.index] + files = [path.raw / '{}.fits'.format(f) for f in cfiles.index] if len(files) == 0: continue @@ -1339,9 +1336,9 @@ def sph_ifs_cal_detector_flat(self, silent=True): flat_file = 'master_detector_flat_{0}_l{1}'.format(wav, lamp) bpm_file = 'dff_badpixelname_{0}_l{1}'.format(wav, lamp) - hdu = fits.open(os.path.join(path.raw, files[0])) - fits.writeto(os.path.join(path.calib, flat_file+'.fits'), flat, header=hdu[0].header, output_verify='silentfix', overwrite=True) - fits.writeto(os.path.join(path.calib, bpm_file+'.fits'), bpm, header=hdu[0].header, output_verify='silentfix', overwrite=True) + hdu = fits.open(path.raw / files[0]) + fits.writeto(path.calib / '{}.fits'.format(flat_file), flat, header=hdu[0].header, output_verify='silentfix', overwrite=True) + fits.writeto(path.calib / '{}.fits'.format(bpm_file), bpm, header=hdu[0].header, output_verify='silentfix', overwrite=True) hdu.close() # store products @@ -1361,7 +1358,7 @@ def sph_ifs_cal_detector_flat(self, silent=True): files_info.loc[bpm_file, 'PRO CATG'] = 'IFS_STATIC_BADPIXELMAP' # save - files_info.to_csv(os.path.join(path.preproc, 'files.csv')) + files_info.to_csv(path.preproc / 'files.csv') # update recipe execution self._recipe_execution['sph_ifs_cal_detector_flat'] = True @@ -1406,10 +1403,10 @@ def sph_ifs_cal_specpos(self, silent=True): raise ValueError('Unknown IFS mode {0}'.format(mode)) # create sof - sof = os.path.join(path.sof, 'specpos.sof') + sof = path.sof / 'specpos.sof' file = open(sof, 'w') - file.write('{0}{1}.fits {2}\n'.format(path.raw, specpos_file.index[0], 'IFS_SPECPOS_RAW')) - file.write('{0}{1}.fits {2}\n'.format(path.calib, dark_file.index[0], 'IFS_MASTER_DARK')) + file.write('{0}/{1}.fits {2}\n'.format(path.raw, specpos_file.index[0], 'IFS_SPECPOS_RAW')) + file.write('{0}/{1}.fits {2}\n'.format(path.calib, dark_file.index[0], 'IFS_MASTER_DARK')) file.close() # products @@ -1421,7 +1418,7 @@ def sph_ifs_cal_specpos(self, silent=True): '--no-datamd5=TRUE', 'sph_ifs_spectra_positions', '--ifs.spectra_positions.hmode={0}'.format(Hmode), - '--ifs.spectra_positions.outfilename={0}{1}.fits'.format(path.calib, specp_file), + '--ifs.spectra_positions.outfilename={0}/{1}.fits'.format(path.calib, specp_file), sof] # check esorex @@ -1448,7 +1445,7 @@ def sph_ifs_cal_specpos(self, silent=True): files_info.loc[specp_file, 'PRO CATG'] = 'IFS_SPECPOS' # save - files_info.to_csv(os.path.join(path.preproc, 'files.csv')) + files_info.to_csv(path.preproc / 'files.csv') # update recipe execution self._recipe_execution['sph_ifs_cal_specpos'] = True @@ -1491,11 +1488,11 @@ def sph_ifs_cal_wave(self, silent=True): mode = files_info.loc[files_info['DPR CATG'] == 'SCIENCE', 'INS2 COMB IFS'].unique()[0] # create sof - sof = os.path.join(path.sof, 'wave.sof') + sof = path.sof / 'wave.sof' file = open(sof, 'w') - file.write('{0}{1}.fits {2}\n'.format(path.raw, wave_file.index[0], 'IFS_WAVECALIB_RAW')) - file.write('{0}{1}.fits {2}\n'.format(path.calib, specpos_file.index[0], 'IFS_SPECPOS')) - file.write('{0}{1}.fits {2}\n'.format(path.calib, dark_file.index[0], 'IFS_MASTER_DARK')) + file.write('{0}/{1}.fits {2}\n'.format(path.raw, wave_file.index[0], 'IFS_WAVECALIB_RAW')) + file.write('{0}/{1}.fits {2}\n'.format(path.calib, specpos_file.index[0], 'IFS_SPECPOS')) + file.write('{0}/{1}.fits {2}\n'.format(path.calib, dark_file.index[0], 'IFS_MASTER_DARK')) file.close() # products @@ -1511,7 +1508,7 @@ def sph_ifs_cal_wave(self, silent=True): '--ifs.wave_calib.wavelength_line1=0.9877', '--ifs.wave_calib.wavelength_line2=1.1237', '--ifs.wave_calib.wavelength_line3=1.3094', - '--ifs.wave_calib.outfilename={0}{1}.fits'.format(path.calib, wav_file), + '--ifs.wave_calib.outfilename={0}/{1}.fits'.format(path.calib, wav_file), sof] elif mode == 'OBS_H': args = ['esorex', @@ -1523,7 +1520,7 @@ def sph_ifs_cal_wave(self, silent=True): '--ifs.wave_calib.wavelength_line2=1.1237', '--ifs.wave_calib.wavelength_line3=1.3094', '--ifs.wave_calib.wavelength_line4=1.5451', - '--ifs.wave_calib.outfilename={0}{1}.fits'.format(path.calib, wav_file), + '--ifs.wave_calib.outfilename={0}/{1}.fits'.format(path.calib, wav_file), sof] # check esorex @@ -1550,7 +1547,7 @@ def sph_ifs_cal_wave(self, silent=True): files_info.loc[wav_file, 'PRO CATG'] = 'IFS_WAVECALIB' # save - files_info.to_csv(os.path.join(path.preproc, 'files.csv')) + files_info.to_csv(path.preproc / 'files.csv') # update recipe execution self._recipe_execution['sph_ifs_cal_wave'] = True @@ -1626,18 +1623,18 @@ def sph_ifs_cal_ifu_flat(self, silent=True): raise ValueError('There should be exactly 1 1550 nm flat file. Found {0}.'.format(len(flat_1550_file))) # create sof - sof = os.path.join(path.sof, 'ifu_flat.sof') + sof = path.sof / 'ifu_flat.sof' file = open(sof, 'w') - file.write('{0}{1}.fits {2}\n'.format(path.raw, ifu_flat_file.index[0], 'IFS_FLAT_FIELD_RAW')) - file.write('{0}{1}.fits {2}\n'.format(path.calib, wave_file.index[0], 'IFS_WAVECALIB')) - file.write('{0}{1}.fits {2}\n'.format(path.calib, dark_file.index[0], 'IFS_MASTER_DARK')) - file.write('{0}{1}.fits {2}\n'.format(path.calib, flat_white_file.index[0], 'IFS_MASTER_DFF_SHORT')) - file.write('{0}{1}.fits {2}\n'.format(path.calib, flat_white_file.index[0], 'IFS_MASTER_DFF_LONGBB')) - file.write('{0}{1}.fits {2}\n'.format(path.calib, flat_1020_file.index[0], 'IFS_MASTER_DFF_LONG1')) - file.write('{0}{1}.fits {2}\n'.format(path.calib, flat_1230_file.index[0], 'IFS_MASTER_DFF_LONG2')) - file.write('{0}{1}.fits {2}\n'.format(path.calib, flat_1300_file.index[0], 'IFS_MASTER_DFF_LONG3')) + file.write('{0}/{1}.fits {2}\n'.format(path.raw, ifu_flat_file.index[0], 'IFS_FLAT_FIELD_RAW')) + file.write('{0}/{1}.fits {2}\n'.format(path.calib, wave_file.index[0], 'IFS_WAVECALIB')) + file.write('{0}/{1}.fits {2}\n'.format(path.calib, dark_file.index[0], 'IFS_MASTER_DARK')) + file.write('{0}/{1}.fits {2}\n'.format(path.calib, flat_white_file.index[0], 'IFS_MASTER_DFF_SHORT')) + file.write('{0}/{1}.fits {2}\n'.format(path.calib, flat_white_file.index[0], 'IFS_MASTER_DFF_LONGBB')) + file.write('{0}/{1}.fits {2}\n'.format(path.calib, flat_1020_file.index[0], 'IFS_MASTER_DFF_LONG1')) + file.write('{0}/{1}.fits {2}\n'.format(path.calib, flat_1230_file.index[0], 'IFS_MASTER_DFF_LONG2')) + file.write('{0}/{1}.fits {2}\n'.format(path.calib, flat_1300_file.index[0], 'IFS_MASTER_DFF_LONG3')) if mode == 'OBS_H': - file.write('{0}{1}.fits {2}\n'.format(path.calib, flat_1550_file.index[0], 'IFS_MASTER_DFF_LONG4')) + file.write('{0}/{1}.fits {2}\n'.format(path.calib, flat_1550_file.index[0], 'IFS_MASTER_DFF_LONG4')) file.close() # products @@ -1649,7 +1646,7 @@ def sph_ifs_cal_ifu_flat(self, silent=True): '--no-datamd5=TRUE', 'sph_ifs_instrument_flat', '--ifs.instrument_flat.nofit=TRUE', - '--ifs.instrument_flat.ifu_filename={0}{1}.fits'.format(path.calib, ifu_file), + '--ifs.instrument_flat.ifu_filename={0}/{1}.fits'.format(path.calib, ifu_file), sof] # check esorex @@ -1676,7 +1673,7 @@ def sph_ifs_cal_ifu_flat(self, silent=True): files_info.loc[ifu_file, 'PRO CATG'] = 'IFS_IFU_FLAT_FIELD' # save - files_info.to_csv(os.path.join(path.preproc, 'files.csv')) + files_info.to_csv(path.preproc / 'files.csv') # update recipe execution self._recipe_execution['sph_ifs_cal_ifu_flat'] = True @@ -1750,14 +1747,14 @@ def sph_ifs_preprocess_science(self, frames_info = self._frames_info # clean before we start - files = glob.glob(os.path.join(path.preproc, '*_DIT???_preproc.fits')) + files = path.preproc.glob('*_DIT???_preproc.fits') for file in files: - os.remove(file) + file.unlink() # bpm if fix_badpix: bpm_files = files_info[files_info['PRO CATG'] == 'IFS_STATIC_BADPIXELMAP'].index - bpm_files = [os.path.join(path.calib, f+'.fits') for f in bpm_files] + bpm_files = [path.calib / '{}.fits'.format(f) for f in bpm_files] bpm = toolbox.compute_bad_pixel_map(bpm_files) @@ -1798,7 +1795,7 @@ def sph_ifs_preprocess_science(self, print('Warning: no background has been found. Pre-processing will continue but data quality will likely be affected') bkg = np.zeros((2048, 2048)) elif len(dfiles) == 1: - bkg = fits.getdata(os.path.join(path.calib, dfiles.index[0]+'.fits')) + bkg = fits.getdata(path.calib / '{}.fits'.format(dfiles.index[0])) elif len(dfiles) > 1: # FIXME: handle cases when multiple backgrounds are found? raise ValueError('Unexpected number of background files ({0})'.format(len(dfiles))) @@ -1812,7 +1809,7 @@ def sph_ifs_preprocess_science(self, # read data print(' ==> read data') - img, hdr = fits.getdata(os.path.join(path.raw, fname+'.fits'), header=True) + img, hdr = fits.getdata(path.raw / '{}.fits'.format(fname), header=True) # add extra dimension to single images to make cubes if img.ndim == 2: @@ -1821,14 +1818,14 @@ def sph_ifs_preprocess_science(self, # collapse if (typ == 'OBJECT,CENTER'): if collapse_center: - print(' ==> collapse: mean') + print(' ==> collapse: mean ({0} -> 1 frame, 0 dropped)'.format(len(img))) img = np.mean(img, axis=0, keepdims=True) frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'mean') else: frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'none') elif (typ == 'OBJECT,FLUX'): if collapse_psf: - print(' ==> collapse: mean') + print(' ==> collapse: mean ({0} -> 1 frame, 0 dropped)'.format(len(img))) img = np.mean(img, axis=0, keepdims=True) frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'mean') else: @@ -1908,7 +1905,7 @@ def sph_ifs_preprocess_science(self, for f in range(len(img)): frame = img[f].squeeze() hdr['HIERARCH ESO DET NDIT'] = 1 - fits.writeto(os.path.join(path.preproc, fname+'_DIT{0:03d}_preproc.fits'.format(f)), frame, hdr, + fits.writeto(path.preproc / '{}_DIT{:03d}_preproc.fits'.format(fname, f), frame, hdr, overwrite=True, output_verify='silentfix') print() @@ -1917,7 +1914,7 @@ def sph_ifs_preprocess_science(self, # sort and save final dataframe frames_info_preproc.sort_values(by='TIME', inplace=True) - frames_info_preproc.to_csv(os.path.join(path.preproc, 'frames_preproc.csv')) + frames_info_preproc.to_csv(path.preproc / 'frames_preproc.csv') self._frames_info_preproc = frames_info_preproc @@ -1942,7 +1939,7 @@ def sph_ifs_preprocess_wave(self): # bpm bpm_files = files_info[files_info['PRO CATG'] == 'IFS_STATIC_BADPIXELMAP'].index - bpm_files = [os.path.join(path.calib, f+'.fits') for f in bpm_files] + bpm_files = [path.calib / '{}.fits'.format(f) for f in bpm_files] bpm = toolbox.compute_bad_pixel_map(bpm_files) # dark @@ -1950,7 +1947,7 @@ def sph_ifs_preprocess_wave(self): (files_info['DPR CATG'] == 'CALIB') & (files_info['DET SEQ1 DIT'].round(2) == 1.65)] if len(dark_file) == 0: raise ValueError('There should at least 1 dark file for calibrations. Found none.') - bkg = fits.getdata(os.path.join(path.calib, dark_file.index[0]+'.fits')) + bkg = fits.getdata(path.calib / '{}.fits'.format(dark_file.index[0])) # wavelength calibration wave_file = files_info[np.logical_not(files_info['PROCESSED']) & (files_info['DPR TYPE'] == 'WAVE,LAMP')] @@ -1961,7 +1958,7 @@ def sph_ifs_preprocess_wave(self): # read data print(' * {0}'.format(fname)) print(' ==> read data') - img, hdr = fits.getdata(os.path.join(path.raw, fname+'.fits'), header=True) + img, hdr = fits.getdata(path.raw / '{}.fits'.format(fname), header=True) # collapse print(' ==> collapse: mean') @@ -1984,7 +1981,7 @@ def sph_ifs_preprocess_wave(self): hdr['HIERARCH ESO TEL TARG DELTA'] = -900000.0 # save - fits.writeto(os.path.join(path.preproc, fname+'_preproc.fits'), img, hdr, + fits.writeto(path.preproc / '{}_preproc.fits'.format(fname), img, hdr, overwrite=True, output_verify='silentfix') # update recipe execution @@ -2011,9 +2008,9 @@ def sph_ifs_science_cubes(self, silent=True): files_info = self._files_info # clean before we start - files = glob.glob(os.path.join(path.tmp, '*_DIT???_preproc_?????.fits')) + files = path.preproc.glob('*_DIT???_preproc_?????.fits') for file in files: - os.remove(file) + file.unlink() # IFS obs mode mode = files_info.loc[files_info['DPR CATG'] == 'SCIENCE', 'INS2 COMB IFS'].unique()[0] @@ -2025,7 +2022,7 @@ def sph_ifs_science_cubes(self, silent=True): raise ValueError('Unknown IFS mode {0}'.format(mode)) # get list of science files - sci_files = sorted(glob.glob(path.preproc+'*_preproc.fits')) + sci_files = sorted(list(path.preproc.glob('*_preproc.fits'))) print(' * found {0} pre-processed files'.format(len(sci_files))) # get list of calibration files @@ -2067,20 +2064,20 @@ def sph_ifs_science_cubes(self, silent=True): raise ValueError('There should be exactly 1 1550 nm flat file. Found {0}.'.format(len(flat_1550_file))) # create sof - sof = os.path.join(path.sof, 'science.sof') + sof = path.sof / 'science.sof' file = open(sof, 'w') for f in sci_files: file.write('{0} {1}\n'.format(f, 'IFS_SCIENCE_DR_RAW')) - file.write('{0}{1}.fits {2}\n'.format(path.calib, ifu_flat_file.index[0], 'IFS_IFU_FLAT_FIELD')) - file.write('{0}{1}.fits {2}\n'.format(path.calib, wave_file.index[0], 'IFS_WAVECALIB')) - file.write('{0}{1}.fits {2}\n'.format(path.calib, flat_white_file.index[0], 'IFS_MASTER_DFF_SHORT')) - file.write('{0}{1}.fits {2}\n'.format(path.calib, flat_white_file.index[0], 'IFS_MASTER_DFF_LONGBB')) - file.write('{0}{1}.fits {2}\n'.format(path.calib, bpm_file.index[0], 'IFS_STATIC_BADPIXELMAP')) - file.write('{0}{1}.fits {2}\n'.format(path.calib, flat_1020_file.index[0], 'IFS_MASTER_DFF_LONG1')) - file.write('{0}{1}.fits {2}\n'.format(path.calib, flat_1230_file.index[0], 'IFS_MASTER_DFF_LONG2')) - file.write('{0}{1}.fits {2}\n'.format(path.calib, flat_1300_file.index[0], 'IFS_MASTER_DFF_LONG3')) + file.write('{0}/{1}.fits {2}\n'.format(path.calib, ifu_flat_file.index[0], 'IFS_IFU_FLAT_FIELD')) + file.write('{0}/{1}.fits {2}\n'.format(path.calib, wave_file.index[0], 'IFS_WAVECALIB')) + file.write('{0}/{1}.fits {2}\n'.format(path.calib, flat_white_file.index[0], 'IFS_MASTER_DFF_SHORT')) + file.write('{0}/{1}.fits {2}\n'.format(path.calib, flat_white_file.index[0], 'IFS_MASTER_DFF_LONGBB')) + file.write('{0}/{1}.fits {2}\n'.format(path.calib, bpm_file.index[0], 'IFS_STATIC_BADPIXELMAP')) + file.write('{0}/{1}.fits {2}\n'.format(path.calib, flat_1020_file.index[0], 'IFS_MASTER_DFF_LONG1')) + file.write('{0}/{1}.fits {2}\n'.format(path.calib, flat_1230_file.index[0], 'IFS_MASTER_DFF_LONG2')) + file.write('{0}/{1}.fits {2}\n'.format(path.calib, flat_1300_file.index[0], 'IFS_MASTER_DFF_LONG3')) if mode == 'OBS_H': - file.write('{0}{1}.fits {2}\n'.format(path.calib, flat_1550_file.index[0], 'IFS_MASTER_DFF_LONG4')) + file.write('{0}/{1}.fits {2}\n'.format(path.calib, flat_1550_file.index[0], 'IFS_MASTER_DFF_LONG4')) file.close() # esorex parameters @@ -2110,7 +2107,7 @@ def sph_ifs_science_cubes(self, silent=True): # post-process print(' * post-processing files') - files = glob.glob(path.tmp+'*_preproc_*.fits') + files = list(path.tmp.glob('*_preproc_*.fits')) for f in files: # read and save only primary extension data, header = fits.getdata(f, header=True) @@ -2118,7 +2115,7 @@ def sph_ifs_science_cubes(self, silent=True): # move files to final directory for file in files: - shutil.move(file, os.path.join(path.preproc, os.path.basename(file))) + shutil.move(file, path.preproc / file.name) # update recipe execution self._recipe_execution['sph_ifs_science_cubes'] = True @@ -2168,7 +2165,7 @@ def sph_ifs_wavelength_recalibration(self, high_pass=False, offset=(0, 0), plot= # get header of any science file science_files = frames_info[frames_info['DPR CATG'] == 'SCIENCE'].index[0] fname = '{0}_DIT{1:03d}_preproc_'.format(science_files[0], science_files[1]) - files = glob.glob(os.path.join(path.preproc, fname+'*[0-9].fits')) + files = list(path.preproc.glob(fname+'*[0-9].fits')) hdr = fits.getheader(files[0]) wave_min = hdr['HIERARCH ESO DRS IFS MIN LAMBDA']*1000 @@ -2185,13 +2182,13 @@ def sph_ifs_wavelength_recalibration(self, high_pass=False, offset=(0, 0), plot= if len(starcen_files) == 0: print(' ==> no OBJECT,CENTER file in the data set. Wavelength cannot be recalibrated. ' + 'The standard wavelength calibrated by the ESO pripeline will be used.') - fits.writeto(os.path.join(path.products, 'wavelength.fits'), wave_drh, overwrite=True) + fits.writeto(path.products / 'wavelength.fits', wave_drh, overwrite=True) return ifs_mode = starcen_files['INS2 COMB IFS'].values[0] fname = '{0}_DIT{1:03d}_preproc_'.format(starcen_files.index.values[0][0], starcen_files.index.values[0][1]) - files = glob.glob(os.path.join(path.preproc, fname+'*[0-9].fits')) + files = list(path.preproc.glob(fname+'*[0-9].fits')) cube, hdr = fits.getdata(files[0], header=True) # coronagraph @@ -2204,7 +2201,7 @@ def sph_ifs_wavelength_recalibration(self, high_pass=False, offset=(0, 0), plot= # compute centers from waffle spots waffle_orientation = hdr['HIERARCH ESO OCS WAFFLE ORIENT'] if plot: - save_path = os.path.join(path.products, fname+'spots_fitting.pdf') + save_path = path.products / '{}spots_fitting.pdf'.format(fname) else: save_path = None spot_center, spot_dist, img_center \ @@ -2224,10 +2221,10 @@ def sph_ifs_wavelength_recalibration(self, high_pass=False, offset=(0, 0), plot= # find wavelength calibration file name wave_file = files_info[np.logical_not(files_info['PROCESSED']) & (files_info['DPR TYPE'] == 'WAVE,LAMP')].index[0] fname = '{0}_preproc_'.format(wave_file) - file = glob.glob(os.path.join(path.preproc, fname+'*.fits')) + files = list(path.preproc.glob(fname+'*.fits')) # read cube and measure mean flux in all channels - cube, hdr = fits.getdata(file[0], header=True) + cube, hdr = fits.getdata(files[0], header=True) wave_flux = np.zeros(nwave) aper = aperture.disc(cube.shape[-1], 100, diameter=True) mask = aper != 0 @@ -2297,7 +2294,7 @@ def sph_ifs_wavelength_recalibration(self, high_pass=False, offset=(0, 0), plot= # save print(' * saving') - fits.writeto(os.path.join(path.products, 'wavelength.fits'), wave_final, overwrite=True) + fits.writeto(path.products / 'wavelength.fits', wave_final, overwrite=True) # # summary plot @@ -2334,7 +2331,7 @@ def sph_ifs_wavelength_recalibration(self, high_pass=False, offset=(0, 0), plot= plt.tight_layout() - plt.savefig(os.path.join(path.products, 'wavelength_recalibration.pdf')) + plt.savefig(path.products / 'wavelength_recalibration.pdf') # update recipe execution self._recipe_execution['sph_ifs_wavelength_recalibration'] = True @@ -2378,7 +2375,7 @@ def sph_ifs_star_center(self, high_pass=False, offset=(0, 0), plot=True): # read data fname = '{0}_DIT{1:03d}_preproc_'.format(file, idx) - files = glob.glob(os.path.join(path.preproc, fname+'*[0-9].fits')) + files = list(path.preproc.glob(fname+'*[0-9].fits')) cube, hdr = fits.getdata(files[0], header=True) # mask edges (bad pixels can have higher values than the PSF peak) @@ -2393,13 +2390,13 @@ def sph_ifs_star_center(self, high_pass=False, offset=(0, 0), plot=True): # centers if plot: - save_path = os.path.join(path.products, fname+'PSF_fitting.pdf') + save_path = path.products / '{}PSF_fitting.pdf'.format(fname) else: save_path = None img_center = toolbox.star_centers_from_PSF_img_cube(cube, wave_drh, pixel, save_path=save_path) # save - fits.writeto(os.path.join(path.preproc, fname+'centers.fits'), img_center, overwrite=True) + fits.writeto(path.preproc / '{}centers.fits'.format(fname), img_center, overwrite=True) print() # then OBJECT,CENTER @@ -2410,7 +2407,7 @@ def sph_ifs_star_center(self, high_pass=False, offset=(0, 0), plot=True): # read data fname = '{0}_DIT{1:03d}_preproc_'.format(file, idx) - files = glob.glob(os.path.join(path.preproc, fname+'*[0-9].fits')) + files = list(path.preproc.glob(fname+'*[0-9].fits')) cube, hdr = fits.getdata(files[0], header=True) # wavelength @@ -2421,7 +2418,7 @@ def sph_ifs_star_center(self, high_pass=False, offset=(0, 0), plot=True): # centers waffle_orientation = hdr['HIERARCH ESO OCS WAFFLE ORIENT'] if plot: - save_path = os.path.join(path.products, fname+'spots_fitting.pdf') + save_path = path.products / '{}spots_fitting.pdf'.format(fname) else: save_path = None spot_center, spot_dist, img_center \ @@ -2430,7 +2427,7 @@ def sph_ifs_star_center(self, high_pass=False, offset=(0, 0), plot=True): save_path=save_path) # save - fits.writeto(os.path.join(path.preproc, fname+'centers.fits'), img_center, overwrite=True) + fits.writeto(path.preproc / '{}centers.fits'.format(fname), img_center, overwrite=True) print() # update recipe execution @@ -2525,8 +2522,8 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a frames_info = self._frames_info_preproc # read final wavelength calibration - fname = os.path.join(path.products, 'wavelength.fits') - if not os.path.exists(fname): + fname = path.products / 'wavelength.fits' + if not fname.exists(): raise FileExistsError('Missing wavelength.fits file. ' + 'You must first run the sph_ifs_wavelength_recalibration() method.') wave = fits.getdata(fname) @@ -2583,10 +2580,10 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a # read data fname = '{0}_DIT{1:03d}_preproc_'.format(file, idx) - files = glob.glob(os.path.join(path.preproc, fname+'?????.fits')) + files = list(path.preproc.glob(fname+'?????.fits')) cube = fits.getdata(files[0]) - centers = fits.getdata(os.path.join(path.preproc, fname+'centers.fits')) - + centers = fits.getdata(path.preproc / '{}centers.fits'.format(fname)) + # mask values outside of IFS FoV cube[cube == 0] = np.nan @@ -2624,12 +2621,12 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a psf_cube_scaled[wave_idx, file_idx] = imutils.scale(nimg, wave[0]/wave[wave_idx], method=shift_method) # save final cubes - flux_files.to_csv(os.path.join(path.products, 'psf_frames.csv')) - fits.writeto(os.path.join(path.products, 'psf_cube.fits'), psf_cube, overwrite=True) - fits.writeto(os.path.join(path.products, 'psf_parang.fits'), psf_parang, overwrite=True) - fits.writeto(os.path.join(path.products, 'psf_derot.fits'), psf_derot, overwrite=True) + flux_files.to_csv(path.products / 'psf_frames.csv') + fits.writeto(path.products / 'psf_cube.fits', psf_cube, overwrite=True) + fits.writeto(path.products / 'psf_parang.fits', psf_parang, overwrite=True) + fits.writeto(path.products / 'psf_derot.fits', psf_derot, overwrite=True) if save_scaled: - fits.writeto(os.path.join(path.products, 'psf_cube_scaled.fits'), psf_cube_scaled, overwrite=True) + fits.writeto(path.products / 'psf_cube_scaled.fits', psf_cube_scaled, overwrite=True) # delete big cubes del psf_cube @@ -2665,9 +2662,9 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a # read data fname = '{0}_DIT{1:03d}_preproc_'.format(file, idx) - files = glob.glob(os.path.join(path.preproc, fname+'?????.fits')) + files = list(path.preproc.glob(fname+'?????.fits')) cube = fits.getdata(files[0]) - centers = fits.getdata(os.path.join(path.preproc, fname+'centers.fits')) + centers = fits.getdata(path.preproc / '{}centers.fits'.format(fname)) # mask values outside of IFS FoV cube[cube == 0] = np.nan @@ -2709,12 +2706,12 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a cen_cube_scaled[wave_idx, file_idx] = imutils.scale(nimg, wave[0]/wave[wave_idx], method=shift_method) # save final cubes - starcen_files.to_csv(os.path.join(path.products, 'starcenter_frames.csv')) - fits.writeto(os.path.join(path.products, 'starcenter_cube.fits'), cen_cube, overwrite=True) - fits.writeto(os.path.join(path.products, 'starcenter_parang.fits'), cen_parang, overwrite=True) - fits.writeto(os.path.join(path.products, 'starcenter_derot.fits'), cen_derot, overwrite=True) + starcen_files.to_csv(path.products / 'starcenter_frames.csv') + fits.writeto(path.products / 'starcenter_cube.fits', cen_cube, overwrite=True) + fits.writeto(path.products / 'starcenter_parang.fits', cen_parang, overwrite=True) + fits.writeto(path.products / 'starcenter_derot.fits', cen_derot, overwrite=True) if save_scaled: - fits.writeto(os.path.join(path.products, 'starcenter_cube_scaled.fits'), cen_cube_scaled, overwrite=True) + fits.writeto(path.products / 'starcenter_cube_scaled.fits', cen_cube_scaled, overwrite=True) # delete big cubes del cen_cube @@ -2746,7 +2743,7 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a centers = centers_default else: fname = '{0}_DIT{1:03d}_preproc_centers.fits'.format(starcen_files.index.values[0][0], starcen_files.index.values[0][1]) - centers = fits.getdata(os.path.join(path.preproc, fname)) + centers = fits.getdata(path.preproc / fname) # final center if cpix: @@ -2767,7 +2764,7 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a # read data fname = '{0}_DIT{1:03d}_preproc_'.format(file, idx) - files = glob.glob(os.path.join(path.preproc, fname+'*.fits')) + files = list(path.preproc.glob(fname+'*.fits')) cube = fits.getdata(files[0]) # mask values outside of IFS FoV @@ -2804,12 +2801,12 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a sci_cube_scaled[wave_idx, file_idx] = imutils.scale(nimg, wave[0]/wave[wave_idx], method=shift_method) # save final cubes - object_files.to_csv(os.path.join(path.products, 'science_frames.csv')) - fits.writeto(os.path.join(path.products, 'science_cube.fits'), sci_cube, overwrite=True) - fits.writeto(os.path.join(path.products, 'science_parang.fits'), sci_parang, overwrite=True) - fits.writeto(os.path.join(path.products, 'science_derot.fits'), sci_derot, overwrite=True) + object_files.to_csv(path.products / 'science_frames.csv') + fits.writeto(path.products / 'science_cube.fits', sci_cube, overwrite=True) + fits.writeto(path.products / 'science_parang.fits', sci_parang, overwrite=True) + fits.writeto(path.products / 'science_derot.fits', sci_derot, overwrite=True) if save_scaled: - fits.writeto(os.path.join(path.products, 'science_cube_scaled.fits'), sci_cube_scaled, overwrite=True) + fits.writeto(path.products / 'science_cube_scaled.fits', sci_cube_scaled, overwrite=True) # delete big cubes del sci_cube @@ -2839,27 +2836,27 @@ def sph_ifs_clean(self, delete_raw=False, delete_products=False): path = self._path # tmp - if os.path.exists(path.tmp): + if path.tmp.exists(): shutil.rmtree(path.tmp, ignore_errors=True) # sof - if os.path.exists(path.sof): + if path.sof.exists(): shutil.rmtree(path.sof, ignore_errors=True) # calib - if os.path.exists(path.calib): + if path.calib.exists(): shutil.rmtree(path.calib, ignore_errors=True) # preproc - if os.path.exists(path.preproc): + if path.preproc.exists(): shutil.rmtree(path.preproc, ignore_errors=True) # raw if delete_raw: - if os.path.exists(path.raw): + if path.raw.exists(): shutil.rmtree(path.raw, ignore_errors=True) # products if delete_products: - if os.path.exists(path.products): + if path.products.exists(): shutil.rmtree(path.products, ignore_errors=True) From 4ad37fcc77299267a121f12b327322e15dc24c6c Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Sat, 24 Aug 2019 21:55:38 +0200 Subject: [PATCH 025/101] Remove of trailing white spaces --- vltpf/IFS.py | 336 +++++++++++++-------------- vltpf/IRDIS/ImagingReduction.py | 244 ++++++++++---------- vltpf/IRDIS/SpectroReduction.py | 396 ++++++++++++++++---------------- vltpf/toolbox.py | 197 ++++++++-------- vltpf/transmission.py | 93 ++++---- vltpf/utils/reduction_path.py | 21 +- 6 files changed, 642 insertions(+), 645 deletions(-) diff --git a/vltpf/IFS.py b/vltpf/IFS.py index ea1c771..6cffb70 100644 --- a/vltpf/IFS.py +++ b/vltpf/IFS.py @@ -40,7 +40,7 @@ def compute_detector_flat(raw_flat_files, bpm_files=[], mask_vignetting=True): Apply a mask on the flats to compensate the optical vignetting. The areas of the detector that are vignetted are replaced by a value of 1 in the flats. Default is True - + Returns ------- flat : array @@ -68,13 +68,13 @@ def compute_detector_flat(raw_flat_files, bpm_files=[], mask_vignetting=True): # create master flat DIT0 = hdr0['HIERARCH ESO DET SEQ1 DIT'] DIT1 = hdr1['HIERARCH ESO DET SEQ1 DIT'] - + if DIT0 > DIT1: flat = ff0 - ff1 else: flat = ff1 - ff0 - # bad pixels correction + # bad pixels correction flat = imutils.fix_badpix(flat, bpm_in, npix=12, weight=True) # flat = imutils.fix_badpix_vip(flat, bpm_in, box=5) @@ -92,7 +92,7 @@ def compute_detector_flat(raw_flat_files, bpm_files=[], mask_vignetting=True): # final products flat = flat / np.median(flat) - + bpm = (flat <= 0.9) | (flat >= 1.1) bpm = bpm.astype(np.uint8) @@ -101,7 +101,7 @@ def compute_detector_flat(raw_flat_files, bpm_files=[], mask_vignetting=True): if mask_vignetting: ifu_mask = fits.getdata(Path(vltpf.__file__).parent / 'data' / 'ifu_mask.fits') flat[ifu_mask == 0] = 1 - + return flat, bpm @@ -127,7 +127,7 @@ def sph_ifs_correct_spectral_xtalk(img): coded did not treat the edges in a clean way defined mathematically. The scipy.ndimage.convolve() function offers different possibilities for the edges that are all documented. - + Parameters ---------- img : array_like @@ -139,7 +139,7 @@ def sph_ifs_correct_spectral_xtalk(img): Science frame corrected from the spectral crosstalk ''' - + # definition of the dimension of the matrix sepmax = 20 dim = sepmax*2+1 @@ -198,7 +198,7 @@ def sph_ifs_fix_badpix(img, bpm): bpm[-ext-1:, :] = 0 bpm[:, -ext-1:] = 0 - # use NaN for identifying bad pixels directly in the image + # use NaN for identifying bad pixels directly in the image img_clean[bpm == 1] = np.nan # static indices for searching good pixels and for the linear fit @@ -243,7 +243,7 @@ def sph_ifs_fix_badpix(img, bpm): mask = np.isnan(img_clean) img_clean[mask] = img[mask] - return img_clean + return img_clean def wavelength_optimisation(wave_ref, wave_scale, wave_lasers, peak_position_lasers): @@ -262,7 +262,7 @@ def wavelength_optimisation(wave_ref, wave_scale, wave_lasers, peak_position_las wave_scale : array_like Wavelength scaling values - + wave_lasers : array_like Real wavelength of the calibration lasers; in nanometers. @@ -284,7 +284,7 @@ def wavelength_optimisation(wave_ref, wave_scale, wave_lasers, peak_position_las diff = wave_peaks - wave_lasers return np.max(np.abs(diff)) - + def fit_peak(x, y, display=False): ''' @@ -300,14 +300,14 @@ def fit_peak(x, y, display=False): display : bool Display the result of the fit - + Returns ------- - par + par Fit parameters: Gaussian amplitude, Gaussian mean, Gaussian stddev, line slope, line intercept ''' - + # fit: Gaussian + constant g_init = models.Gaussian1D(amplitude=y.max(), mean=x[np.argmax(y)]) + models.Linear1D(slope=0, intercept=0) fitter = fitting.LevMarLSQFitter() @@ -319,7 +319,7 @@ def fit_peak(x, y, display=False): plt.plot(x, y, color='k') plt.plot(x, fit(x), color='r') plt.tight_layout() - + return fit.parameters @@ -351,16 +351,16 @@ class Reduction(object): 'sph_ifs_cal_specpos', 'sph_ifs_cal_wave', 'sph_ifs_preprocess_science', 'sph_ifs_preprocess_wave'], 'sph_ifs_wavelength_recalibration': ['sort_files', 'sort_frames', 'sph_ifs_preprocess_wave', - 'sph_ifs_science_cubes'], + 'sph_ifs_science_cubes'], 'sph_ifs_star_center': ['sort_files', 'sort_frames', 'sph_ifs_science_cubes'], 'sph_ifs_combine_data': ['sort_files', 'sort_frames', 'sph_ifs_science_cubes', 'sph_ifs_wavelength_recalibration', 'sph_ifs_star_center'] } - + ################################################## # Constructor ################################################## - + def __init__(self, path): ''' Initialization of the IFSReduction @@ -373,19 +373,19 @@ def __init__(self, path): # expand path path = Path(path).expanduser().resolve() - + # zeroth-order reduction validation raw = path / 'raw' if not raw.exists(): raise ValueError('No raw/ subdirectory. {0} is not a valid reduction path!'.format(path)) - + # init path and name self._path = utils.ReductionPath(path) self._instrument = 'IFS' - + # instrument mode self._mode = 'Unknown' - + # configuration configfile = Path(vltpf.__file__).parent / 'instruments' / '{}.ini'.format(self._instrument) config = configparser.ConfigParser() @@ -395,7 +395,7 @@ def __init__(self, path): # instrument self._pixel = float(config.get('instrument', 'pixel')) self._nwave = int(config.get('instrument', 'nwave')) - + # calibration self._wave_cal_lasers = [float(w) for w in config.get('calibration', 'wave_cal_lasers').split(',')] @@ -405,11 +405,11 @@ def __init__(self, path): try: val = eval(value) except NameError: - val = value + val = value self._config[key] = val except configparser.Error as e: raise ValueError('Error reading configuration file for instrument {0}: {1}'.format(self._instrument, e.message)) - + # execution of recipes self._recipe_execution = { 'sort_files': False, @@ -427,24 +427,24 @@ def __init__(self, path): 'sph_ifs_star_center': False, 'sph_ifs_combine_data': False } - + # reload any existing data frames self.read_info() - + ################################################## # Representation ################################################## - + def __repr__(self): return ''.format(self._instrument, self._mode, self._path) def __format__(self): return self.__repr__() - + ################################################## # Properties ################################################## - + @property def instrument(self): return self._instrument @@ -452,11 +452,11 @@ def instrument(self): @property def pixel(self): return self._pixel - + @property def nwave(self): return self._nwave - + @property def path(self): return self._path @@ -464,11 +464,11 @@ def path(self): @property def files_info(self): return self._files_info - + @property def frames_info(self): return self._frames_info - + @property def frames_info_preproc(self): return self._frames_info_preproc @@ -479,7 +479,7 @@ def recipe_execution(self): @property def config(self): - return self._config + return self._config @property def mode(self): @@ -516,7 +516,7 @@ def show_config(self): keys = [key for key in dico if key.startswith('center')] for key in keys: print('{0:<30s}{1}'.format(key, dico[key])) - + # combining print('-'*35) keys = [key for key in dico if key.startswith('combine')] @@ -529,10 +529,10 @@ def show_config(self): for key in keys: print('{0:<30s}{1}'.format(key, dico[key])) print('-'*35) - + print() - + def init_reduction(self): ''' Sort files and frames, perform sanity check @@ -540,25 +540,25 @@ def init_reduction(self): # make sure we have sub-directories self._path.create_subdirectories() - + self.sort_files() self.sort_frames() self.check_files_association() - - + + def create_static_calibrations(self): ''' Create static calibrations, mainly with esorex ''' - + config = self._config - + self.sph_ifs_cal_dark(silent=config['misc_silent_esorex']) self.sph_ifs_cal_detector_flat(silent=config['misc_silent_esorex']) self.sph_ifs_cal_specpos(silent=config['misc_silent_esorex']) self.sph_ifs_cal_wave(silent=config['misc_silent_esorex']) self.sph_ifs_cal_ifu_flat(silent=config['misc_silent_esorex']) - + def preprocess_science(self): ''' @@ -566,7 +566,7 @@ def preprocess_science(self): ''' config = self._config - + self.sph_ifs_preprocess_science(subtract_background=config['preproc_subtract_background'], fix_badpix=config['preproc_fix_badpix'], correct_xtalk=config['preproc_fix_badpix'], @@ -586,7 +586,7 @@ def process_science(self): ''' config = self._config - + self.sph_ifs_wavelength_recalibration(high_pass=config['center_high_pass'], offset=config['center_offset'], plot=config['misc_plot']) @@ -602,25 +602,25 @@ def process_science(self): shift_method=config['combine_shift_method'], save_scaled=config['combine_save_scaled']) - + def clean(self): ''' Clean the reduction directory ''' - + config = self._config - + if config['clean']: self.sph_ifs_clean(delete_raw=config['clean_delete_raw'], delete_products=config['clean_delete_products']) - - + + def full_reduction(self): ''' Performs a full reduction of a data set, from the static calibrations to the final (x,y,time,lambda) cubes ''' - + self.init_reduction() self.create_static_calibrations() self.preprocess_science() @@ -630,7 +630,7 @@ def full_reduction(self): ################################################## # SPHERE/IFS methods ################################################## - + def read_info(self): ''' Read the files, calibs and frames information from disk @@ -657,7 +657,7 @@ def read_info(self): files_info['DATE-OBS'] = pd.to_datetime(files_info['DATE-OBS'], utc=False) files_info['DATE'] = pd.to_datetime(files_info['DATE'], utc=False) files_info['DET FRAM UTC'] = pd.to_datetime(files_info['DET FRAM UTC'], utc=False) - + # update recipe execution self._recipe_execution['sort_files'] = True if np.any(files_info['PRO CATG'] == 'IFS_MASTER_DARK'): @@ -703,7 +703,7 @@ def read_info(self): frames_info_preproc['DET FRAM UTC'] = pd.to_datetime(frames_info_preproc['DET FRAM UTC'], utc=False) frames_info_preproc['TIME START'] = pd.to_datetime(frames_info_preproc['TIME START'], utc=False) frames_info_preproc['TIME'] = pd.to_datetime(frames_info_preproc['TIME'], utc=False) - frames_info_preproc['TIME END'] = pd.to_datetime(frames_info_preproc['TIME END'], utc=False) + frames_info_preproc['TIME END'] = pd.to_datetime(frames_info_preproc['TIME END'], utc=False) else: frames_info_preproc = None @@ -729,7 +729,7 @@ def read_info(self): file = list(path.preproc.glob('{}.fits'.format(fname))) done = done and (len(file) == 1) self._recipe_execution['sph_ifs_preprocess_science'] = done - + done = True files = frames_info_preproc.index for file, idx in files: @@ -747,7 +747,7 @@ def read_info(self): done = done and (len(file) == 1) self._recipe_execution['sph_ifs_star_center'] = done - + def sort_files(self): ''' Sort all raw files and save result in a data frame @@ -760,7 +760,7 @@ def sort_files(self): # parameters path = self._path - + # list files files = path.raw.glob('*.fits') files = [f.stem for f in files] @@ -807,7 +807,7 @@ def sort_files(self): instru = files_info['SEQ ARM'].unique() if len(instru) != 1: raise ValueError('Sequence is mixing different instruments: {0}'.format(instru)) - + # processed column files_info.insert(len(files_info.columns), 'PROCESSED', False) files_info.insert(len(files_info.columns), 'PRO CATG', ' ') @@ -819,10 +819,10 @@ def sort_files(self): # update instrument mode self._mode = files_info.loc[files_info['DPR CATG'] == 'SCIENCE', 'INS2 MODE'][0] - + # sort by acquisition time files_info.sort_values(by='DATE-OBS', inplace=True) - + # save files_info files_info.to_csv(path.preproc / 'files.csv') self._files_info = files_info @@ -830,7 +830,7 @@ def sort_files(self): # update recipe execution self._recipe_execution['sort_files'] = True - + def sort_frames(self): ''' Extract the frames information from the science files and save @@ -844,18 +844,18 @@ def sort_frames(self): # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sort_frames', self.recipe_requirements) - + # parameters path = self._path files_info = self._files_info - + # science files - sci_files = files_info[(files_info['DPR CATG'] == 'SCIENCE') & (files_info['DPR TYPE'] != 'SKY')] + sci_files = files_info[(files_info['DPR CATG'] == 'SCIENCE') & (files_info['DPR TYPE'] != 'SKY')] # raise error when no science frames are present if len(sci_files) == 0: raise ValueError('This dataset contains no science frame. There should be at least one!') - + # build indices files = [] img = [] @@ -869,7 +869,7 @@ def sort_frames(self): frames_info = pd.DataFrame(columns=sci_files.columns, index=pd.MultiIndex.from_arrays([files, img], names=['FILE', 'IMG'])) # expand files_info into frames_info - frames_info = frames_info.align(files_info, level=0)[1] + frames_info = frames_info.align(files_info, level=0)[1] # compute timestamps toolbox.compute_times(frames_info) @@ -890,13 +890,13 @@ def sort_frames(self): cinfo = frames_info[frames_info['DPR TYPE'] == 'OBJECT'] if len(cinfo) == 0: cinfo = frames_info[frames_info['DPR TYPE'] == 'OBJECT,CENTER'] - + ra_drot = cinfo['INS4 DROT2 RA'][0] ra_drot_h = np.floor(ra_drot/1e4) ra_drot_m = np.floor((ra_drot - ra_drot_h*1e4)/1e2) ra_drot_s = ra_drot - ra_drot_h*1e4 - ra_drot_m*1e2 RA = '{:02.0f}:{:02.0f}:{:02.3f}'.format(ra_drot_h, ra_drot_m, ra_drot_s) - + dec_drot = cinfo['INS4 DROT2 DEC'][0] sign = np.sign(dec_drot) udec_drot = np.abs(dec_drot) @@ -910,9 +910,9 @@ def sort_frames(self): pa_end = cinfo['PARANG'][-1] posang = cinfo['INS4 DROT2 POSANG'].unique() - + date = str(cinfo['DATE'][0])[0:10] - + print(' * Object: {0}'.format(cinfo['OBJECT'][0])) print(' * RA / DEC: {0} / {1}'.format(RA, DEC)) print(' * Date: {0}'.format(date)) @@ -920,7 +920,7 @@ def sort_frames(self): print(' * Derotator: {0}'.format(cinfo['INS4 DROT2 MODE'][0])) print(' * Coronagraph: {0}'.format(cinfo['INS COMB ICOR'][0])) print(' * Mode: {0}'.format(cinfo['INS1 MODE'][0])) - print(' * Filter: {0}'.format(cinfo['INS2 COMB IFS'][0])) + print(' * Filter: {0}'.format(cinfo['INS2 COMB IFS'][0])) print(' * DIT: {0:.2f} sec'.format(cinfo['DET SEQ1 DIT'][0])) print(' * NDIT: {0:.0f}'.format(cinfo['DET NDIT'][0])) print(' * Texp: {0:.2f} min'.format(cinfo['DET SEQ1 DIT'].sum()/60)) @@ -938,18 +938,18 @@ def check_files_association(self): # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'check_files_association', self.recipe_requirements) - + print('Performing file association for calibrations') # parameters path = self._path files_info = self._files_info - + # instrument arm arm = files_info['SEQ ARM'].unique() if len(arm) != 1: raise ValueError('Sequence is mixing different instruments: {0}'.format(arm)) - + # IFS obs mode modes = files_info.loc[files_info['DPR CATG'] == 'SCIENCE', 'INS2 COMB IFS'].unique() if len(modes) != 1: @@ -986,7 +986,7 @@ def check_files_association(self): # find the two closest to science files sci_files = files_info[(files_info['DPR CATG'] == 'SCIENCE')] time_sci = sci_files['DATE-OBS'].min() - time_flat = cfiles['DATE-OBS'] + time_flat = cfiles['DATE-OBS'] time_delta = np.abs(time_sci - time_flat).argsort() # drop the others @@ -1004,7 +1004,7 @@ def check_files_association(self): # find the two closest to science files sci_files = files_info[(files_info['DPR CATG'] == 'SCIENCE')] time_sci = sci_files['DATE-OBS'].min() - time_flat = cfiles['DATE-OBS'] + time_flat = cfiles['DATE-OBS'] time_delta = np.abs(time_sci - time_flat).argsort() # drop the others @@ -1022,7 +1022,7 @@ def check_files_association(self): # find the two closest to science files sci_files = files_info[(files_info['DPR CATG'] == 'SCIENCE')] time_sci = sci_files['DATE-OBS'].min() - time_flat = cfiles['DATE-OBS'] + time_flat = cfiles['DATE-OBS'] time_delta = np.abs(time_sci - time_flat).argsort() # drop the others @@ -1040,7 +1040,7 @@ def check_files_association(self): # find the two closest to science files sci_files = files_info[(files_info['DPR CATG'] == 'SCIENCE')] time_sci = sci_files['DATE-OBS'].min() - time_flat = cfiles['DATE-OBS'] + time_flat = cfiles['DATE-OBS'] time_delta = np.abs(time_sci - time_flat).argsort() # drop the others @@ -1059,7 +1059,7 @@ def check_files_association(self): # find the two closest to science files sci_files = files_info[(files_info['DPR CATG'] == 'SCIENCE')] time_sci = sci_files['DATE-OBS'].min() - time_flat = cfiles['DATE-OBS'] + time_flat = cfiles['DATE-OBS'] time_delta = np.abs(time_sci - time_flat).argsort() # drop the others @@ -1077,7 +1077,7 @@ def check_files_association(self): # find the two closest to science files sci_files = files_info[(files_info['DPR CATG'] == 'SCIENCE')] time_sci = sci_files['DATE-OBS'].min() - time_flat = cfiles['DATE-OBS'] + time_flat = cfiles['DATE-OBS'] time_delta = np.abs(time_sci - time_flat).argsort() # drop the others @@ -1095,7 +1095,7 @@ def check_files_association(self): # find the two closest to science files sci_files = files_info[(files_info['DPR CATG'] == 'SCIENCE')] time_sci = sci_files['DATE-OBS'].min() - time_flat = cfiles['DATE-OBS'] + time_flat = cfiles['DATE-OBS'] time_delta = np.abs(time_sci - time_flat).argsort() # drop the others @@ -1113,7 +1113,7 @@ def check_files_association(self): # find the two closest to science files sci_files = files_info[(files_info['DPR CATG'] == 'SCIENCE')] time_sci = sci_files['DATE-OBS'].min() - time_flat = cfiles['DATE-OBS'] + time_flat = cfiles['DATE-OBS'] time_delta = np.abs(time_sci - time_flat).argsort() # drop the others @@ -1164,8 +1164,8 @@ def check_files_association(self): # save files_info.to_csv(path.preproc / 'files.csv') self._files_info = files_info - - + + def sph_ifs_cal_dark(self, silent=True): ''' Create the dark and background calibrations @@ -1178,13 +1178,13 @@ def sph_ifs_cal_dark(self, silent=True): # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ifs_cal_dark', self.recipe_requirements) - + print('Creating darks and backgrounds') # parameters path = self._path files_info = self._files_info - + # get list of files calibs = files_info[np.logical_not(files_info['PROCESSED']) & ((files_info['DPR TYPE'] == 'DARK') | @@ -1196,7 +1196,7 @@ def sph_ifs_cal_dark(self, silent=True): DITs = calibs['DET SEQ1 DIT'].unique().round(2) for ctype in types: - for DIT in DITs: + for DIT in DITs: cfiles = calibs[(calibs['DPR TYPE'] == ctype) & (calibs['DET SEQ1 DIT'].round(2) == DIT)] files = cfiles.index @@ -1221,7 +1221,7 @@ def sph_ifs_cal_dark(self, silent=True): dark_file = 'dark_{0}_DIT={1:.2f}'.format(loc, DIT) bpm_file = 'dark_{0}_bpm_DIT={1:.2f}'.format(loc, DIT) - # esorex parameters + # esorex parameters args = ['esorex', '--no-checksum=TRUE', '--no-datamd5=TRUE', @@ -1284,20 +1284,20 @@ def sph_ifs_cal_detector_flat(self, silent=True): # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ifs_cal_detector_flat', self.recipe_requirements) - + print('Creating flats') # parameters path = self._path files_info = self._files_info - + # get list of files calibs = files_info[np.logical_not(files_info['PROCESSED']) & ((files_info['DPR TYPE'] == 'FLAT,LAMP') | (files_info['DPR TECH'] == 'IMAGE'))] # IFS obs mode - mode = files_info.loc[files_info['DPR CATG'] == 'SCIENCE', 'INS2 COMB IFS'].unique()[0] + mode = files_info.loc[files_info['DPR CATG'] == 'SCIENCE', 'INS2 COMB IFS'].unique()[0] if mode == 'OBS_YJ': mode_short = 'YJ' elif mode == 'OBS_H': @@ -1334,7 +1334,7 @@ def sph_ifs_cal_detector_flat(self, silent=True): else: wav = str(int(wave)) flat_file = 'master_detector_flat_{0}_l{1}'.format(wav, lamp) - bpm_file = 'dff_badpixelname_{0}_l{1}'.format(wav, lamp) + bpm_file = 'dff_badpixelname_{0}_l{1}'.format(wav, lamp) hdu = fits.open(path.raw / files[0]) fits.writeto(path.calib / '{}.fits'.format(flat_file), flat, header=hdu[0].header, output_verify='silentfix', overwrite=True) @@ -1376,25 +1376,25 @@ def sph_ifs_cal_specpos(self, silent=True): # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ifs_cal_specpos', self.recipe_requirements) - + print('Creating specpos') # parameters path = self._path files_info = self._files_info - + # get list of files specpos_file = files_info[np.logical_not(files_info['PROCESSED']) & (files_info['DPR TYPE'] == 'SPECPOS,LAMP')] if len(specpos_file) != 1: raise ValueError('There should be exactly 1 raw specpos files. Found {0}.'.format(len(specpos_file))) - dark_file = files_info[files_info['PROCESSED'] & (files_info['PRO CATG'] == 'IFS_MASTER_DARK') & + dark_file = files_info[files_info['PROCESSED'] & (files_info['PRO CATG'] == 'IFS_MASTER_DARK') & (files_info['DPR CATG'] == 'CALIB') & (files_info['DET SEQ1 DIT'].round(2) == 1.65)] if len(dark_file) == 0: raise ValueError('There should at least 1 dark file for calibrations. Found none.') # IFS obs mode - mode = files_info.loc[files_info['DPR CATG'] == 'SCIENCE', 'INS2 COMB IFS'].unique()[0] + mode = files_info.loc[files_info['DPR CATG'] == 'SCIENCE', 'INS2 COMB IFS'].unique()[0] if mode == 'OBS_YJ': Hmode = 'FALSE' elif mode == 'OBS_H': @@ -1411,8 +1411,8 @@ def sph_ifs_cal_specpos(self, silent=True): # products specp_file = 'spectra_positions' - - # esorex parameters + + # esorex parameters args = ['esorex', '--no-checksum=TRUE', '--no-datamd5=TRUE', @@ -1425,7 +1425,7 @@ def sph_ifs_cal_specpos(self, silent=True): if shutil.which('esorex') is None: raise NameError('esorex does not appear to be in your PATH. Please make sure ' + 'that the ESO pipeline is properly installed before running VLTPF.') - + # execute esorex if silent: proc = subprocess.run(args, cwd=path.tmp, stdout=subprocess.DEVNULL) @@ -1463,13 +1463,13 @@ def sph_ifs_cal_wave(self, silent=True): # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ifs_cal_wave', self.recipe_requirements) - + print('Creating wavelength calibration') # parameters path = self._path files_info = self._files_info - + # get list of files wave_file = files_info[np.logical_not(files_info['PROCESSED']) & (files_info['DPR TYPE'] == 'WAVE,LAMP')] if len(wave_file) != 1: @@ -1479,13 +1479,13 @@ def sph_ifs_cal_wave(self, silent=True): if len(specpos_file) != 1: raise ValueError('There should be exactly 1 specpos file. Found {0}.'.format(len(specpos_file))) - dark_file = files_info[files_info['PROCESSED'] & (files_info['PRO CATG'] == 'IFS_MASTER_DARK') & + dark_file = files_info[files_info['PROCESSED'] & (files_info['PRO CATG'] == 'IFS_MASTER_DARK') & (files_info['DPR CATG'] == 'CALIB') & (files_info['DET SEQ1 DIT'].round(2) == 1.65)] if len(dark_file) == 0: raise ValueError('There should at least 1 dark file for calibrations. Found none.') # IFS obs mode - mode = files_info.loc[files_info['DPR CATG'] == 'SCIENCE', 'INS2 COMB IFS'].unique()[0] + mode = files_info.loc[files_info['DPR CATG'] == 'SCIENCE', 'INS2 COMB IFS'].unique()[0] # create sof sof = path.sof / 'wave.sof' @@ -1497,7 +1497,7 @@ def sph_ifs_cal_wave(self, silent=True): # products wav_file = 'wave_calib' - + # esorex parameters if mode == 'OBS_YJ': args = ['esorex', @@ -1527,7 +1527,7 @@ def sph_ifs_cal_wave(self, silent=True): if shutil.which('esorex') is None: raise NameError('esorex does not appear to be in your PATH. Please make sure ' + 'that the ESO pipeline is properly installed before running VLTPF.') - + # execute esorex if silent: proc = subprocess.run(args, cwd=path.tmp, stdout=subprocess.DEVNULL) @@ -1565,15 +1565,15 @@ def sph_ifs_cal_ifu_flat(self, silent=True): # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ifs_cal_ifu_flat', self.recipe_requirements) - + print('Creating IFU flat') # parameters path = self._path files_info = self._files_info - + # IFS obs mode - mode = files_info.loc[files_info['DPR CATG'] == 'SCIENCE', 'INS2 COMB IFS'].unique()[0] + mode = files_info.loc[files_info['DPR CATG'] == 'SCIENCE', 'INS2 COMB IFS'].unique()[0] if mode == 'OBS_YJ': mode_short = 'YJ' elif mode == 'OBS_H': @@ -1653,7 +1653,7 @@ def sph_ifs_cal_ifu_flat(self, silent=True): if shutil.which('esorex') is None: raise NameError('esorex does not appear to be in your PATH. Please make sure ' + 'that the ESO pipeline is properly installed before running VLTPF.') - + # execute esorex if silent: proc = subprocess.run(args, cwd=path.tmp, stdout=subprocess.DEVNULL) @@ -1738,14 +1738,14 @@ def sph_ifs_preprocess_science(self, # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ifs_preprocess_science', self.recipe_requirements) - + print('Pre-processing science files') # parameters path = self._path files_info = self._files_info frames_info = self._frames_info - + # clean before we start files = path.preproc.glob('*_DIT???_preproc.fits') for file in files: @@ -1757,7 +1757,7 @@ def sph_ifs_preprocess_science(self, bpm_files = [path.calib / '{}.fits'.format(f) for f in bpm_files] bpm = toolbox.compute_bad_pixel_map(bpm_files) - + # final dataframe index = pd.MultiIndex(names=['FILE', 'IMG'], levels=[[], []], codes=[[], []]) frames_info_preproc = pd.DataFrame(index=index, columns=frames_info.columns) @@ -1765,13 +1765,13 @@ def sph_ifs_preprocess_science(self, # loop on the different type of science files sci_types = ['OBJECT,CENTER', 'OBJECT,FLUX', 'OBJECT'] dark_types = ['SKY', 'DARK,BACKGROUND', 'DARK'] - for typ in sci_types: + for typ in sci_types: # science files sci_files = files_info[(files_info['DPR CATG'] == 'SCIENCE') & (files_info['DPR TYPE'] == typ)] sci_DITs = list(sci_files['DET SEQ1 DIT'].round(2).unique()) if len(sci_files) == 0: - continue + continue for DIT in sci_DITs: sfiles = sci_files[sci_files['DET SEQ1 DIT'].round(2) == DIT] @@ -1784,7 +1784,7 @@ def sph_ifs_preprocess_science(self, dfiles = [] for d in dark_types: dfiles = files_info[(files_info['PRO CATG'] == 'IFS_MASTER_DARK') & - (files_info['DPR TYPE'] == d) & + (files_info['DPR TYPE'] == d) & (files_info['DET SEQ1 DIT'].round(2) == DIT)] if len(dfiles) != 0: break @@ -1831,7 +1831,7 @@ def sph_ifs_preprocess_science(self, else: frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'none') elif (typ == 'OBJECT'): - if collapse_science: + if collapse_science: if collapse_type == 'mean': print(' ==> collapse: mean ({0} -> 1 frame, 0 dropped)'.format(len(img))) img = np.mean(img, axis=0, keepdims=True) @@ -1896,14 +1896,14 @@ def sph_ifs_preprocess_science(self, # if not, warn user and add fake one: it could be internal source data if hdr.get('HIERARCH ESO TEL TARG ALPHA') is None: print('Warning: no valid coordinates found in header. Adding fake ones to be able to produce (x,y,lambda) datacubes.') - + hdr['HIERARCH ESO TEL TARG ALPHA'] = 120000.0 hdr['HIERARCH ESO TEL TARG DELTA'] = -900000.0 - + # save DITs individually for f in range(len(img)): - frame = img[f].squeeze() + frame = img[f].squeeze() hdr['HIERARCH ESO DET NDIT'] = 1 fits.writeto(path.preproc / '{}_DIT{:03d}_preproc.fits'.format(fname, f), frame, hdr, overwrite=True, output_verify='silentfix') @@ -1921,7 +1921,7 @@ def sph_ifs_preprocess_science(self, # update recipe execution self._recipe_execution['sph_ifs_preprocess_science'] = True - + def sph_ifs_preprocess_wave(self): ''' Pre-processes the wavelength calibration frame for later @@ -1930,11 +1930,11 @@ def sph_ifs_preprocess_wave(self): # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ifs_preprocess_wave', self.recipe_requirements) - + # parameters path = self._path files_info = self._files_info - + print('Pre-processing wavelength calibration file') # bpm @@ -2000,20 +2000,20 @@ def sph_ifs_science_cubes(self, silent=True): # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ifs_science_cubes', self.recipe_requirements) - + print('Creating the (x,y,lambda) science cubes') # parameters path = self._path files_info = self._files_info - + # clean before we start files = path.preproc.glob('*_DIT???_preproc_?????.fits') for file in files: file.unlink() # IFS obs mode - mode = files_info.loc[files_info['DPR CATG'] == 'SCIENCE', 'INS2 COMB IFS'].unique()[0] + mode = files_info.loc[files_info['DPR CATG'] == 'SCIENCE', 'INS2 COMB IFS'].unique()[0] if mode == 'OBS_YJ': mode_short = 'YJ' elif mode == 'OBS_H': @@ -2027,7 +2027,7 @@ def sph_ifs_science_cubes(self, silent=True): # get list of calibration files bpm_file = files_info[files_info['PROCESSED'] & (files_info['PRO CATG'] == 'IFS_STATIC_BADPIXELMAP') & - (files_info['INS2 COMB IFS'] == 'CAL_BB_2_{0}'.format(mode_short))] + (files_info['INS2 COMB IFS'] == 'CAL_BB_2_{0}'.format(mode_short))] ifu_flat_file = files_info[files_info['PROCESSED'] & (files_info['PRO CATG'] == 'IFS_IFU_FLAT_FIELD')] if len(ifu_flat_file) != 1: @@ -2094,7 +2094,7 @@ def sph_ifs_science_cubes(self, silent=True): if shutil.which('esorex') is None: raise NameError('esorex does not appear to be in your PATH. Please make sure ' + 'that the ESO pipeline is properly installed before running VLTPF.') - + # execute esorex if silent: proc = subprocess.run(args, cwd=path.tmp, stdout=subprocess.DEVNULL) @@ -2138,9 +2138,9 @@ def sph_ifs_wavelength_recalibration(self, high_pass=False, offset=(0, 0), plot= offset : tuple Apply an (x,y) offset to the default center position, for the waffle centering. - The offset will move the search box of the waffle spots by the amount of + The offset will move the search box of the waffle spots by the amount of specified pixels in each direction. Default is no offset - + plot : bool Display and save diagnostic plot for quality check. Default is True @@ -2148,7 +2148,7 @@ def sph_ifs_wavelength_recalibration(self, high_pass=False, offset=(0, 0), plot= # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ifs_wavelength_recalibration', self.recipe_requirements) - + print('Recalibrating wavelength') # parameters @@ -2197,7 +2197,7 @@ def sph_ifs_wavelength_recalibration(self, high_pass=False, offset=(0, 0), plot= coro = False else: coro = True - + # compute centers from waffle spots waffle_orientation = hdr['HIERARCH ESO OCS WAFFLE ORIENT'] if plot: @@ -2215,7 +2215,7 @@ def sph_ifs_wavelength_recalibration(self, high_pass=False, offset=(0, 0), plot= # # wavelength recalibration - # + # print(' * recalibration') # find wavelength calibration file name @@ -2302,7 +2302,7 @@ def sph_ifs_wavelength_recalibration(self, high_pass=False, offset=(0, 0), plot= if plot: plt.figure('Wavelength recalibration', figsize=(17, 5.5)) plt.clf() - + plt.subplot(131) plt.plot(img_center[:, 0], img_center[:, 1], linestyle='none', marker='+') plt.xlabel('x center [pix]') @@ -2328,11 +2328,11 @@ def sph_ifs_wavelength_recalibration(self, high_pass=False, offset=(0, 0), plot= plt.ylabel('Flux') plt.legend(loc='upper right') plt.title('Wavelength calibration') - + plt.tight_layout() plt.savefig(path.products / 'wavelength_recalibration.pdf') - + # update recipe execution self._recipe_execution['sph_ifs_wavelength_recalibration'] = True @@ -2348,9 +2348,9 @@ def sph_ifs_star_center(self, high_pass=False, offset=(0, 0), plot=True): offset : tuple Apply an (x,y) offset to the default center position, for the waffle centering. - The offset will move the search box of the waffle spots by the amount of + The offset will move the search box of the waffle spots by the amount of specified pixels in each direction. Default is no offset - + plot : bool Display and save diagnostic plot for quality check. Default is True @@ -2358,7 +2358,7 @@ def sph_ifs_star_center(self, high_pass=False, offset=(0, 0), plot=True): # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ifs_star_center', self.recipe_requirements) - + print('Star centers determination') # parameters @@ -2366,7 +2366,7 @@ def sph_ifs_star_center(self, high_pass=False, offset=(0, 0), plot=True): nwave = self._nwave pixel = self._pixel frames_info = self._frames_info_preproc - + # start with OBJECT,FLUX flux_files = frames_info[frames_info['DPR TYPE'] == 'OBJECT,FLUX'] if len(flux_files) != 0: @@ -2374,7 +2374,7 @@ def sph_ifs_star_center(self, high_pass=False, offset=(0, 0), plot=True): print(' ==> OBJECT,FLUX: {0}'.format(file)) # read data - fname = '{0}_DIT{1:03d}_preproc_'.format(file, idx) + fname = '{0}_DIT{1:03d}_preproc_'.format(file, idx) files = list(path.preproc.glob(fname+'*[0-9].fits')) cube, hdr = fits.getdata(files[0], header=True) @@ -2382,7 +2382,7 @@ def sph_ifs_star_center(self, high_pass=False, offset=(0, 0), plot=True): cube[:, :40, :] = 0 cube[:, :, :25] = 0 cube[:, :, 250:] = 0 - + # wavelength wave_min = hdr['HIERARCH ESO DRS IFS MIN LAMBDA']*1000 wave_max = hdr['HIERARCH ESO DRS IFS MAX LAMBDA']*1000 @@ -2398,7 +2398,7 @@ def sph_ifs_star_center(self, high_pass=False, offset=(0, 0), plot=True): # save fits.writeto(path.preproc / '{}centers.fits'.format(fname), img_center, overwrite=True) print() - + # then OBJECT,CENTER starcen_files = frames_info[frames_info['DPR TYPE'] == 'OBJECT,CENTER'] if len(starcen_files) != 0: @@ -2425,7 +2425,7 @@ def sph_ifs_star_center(self, high_pass=False, offset=(0, 0), plot=True): = toolbox.star_centers_from_waffle_img_cube(cube, wave_drh, 'IFS', waffle_orientation, high_pass=high_pass, center_offset=offset, save_path=save_path) - + # save fits.writeto(path.preproc / '{}centers.fits'.format(fname), img_center, overwrite=True) print() @@ -2442,21 +2442,21 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a (OBJECT,FLUX), star centers (OBJECT,CENTER) and standard coronagraphic images (OBJECT). For each type of data, the method saves 3 or 4 different files: - + - *_cube: the (x,y,time,lambda) cube - + - *_parang: the parallactic angle vector - + - *_derot: the derotation angles vector. This vector takes into account the parallactic angle and any instrumental pupil offset. This is the values that need to be used for aligning the images with North up and East left. - + - *_frames: a csv file with all the information for every frames. There is one line by time step in the data cube. - + - *_cube_scaled: the (x,y,time,lambda) cube with images rescaled spectraly. This is useful if you plan to perform spectral differential @@ -2464,7 +2464,7 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a The method also save a frames.csv file with all the information extracted the raw files headers. - + Parameters ---------- cpix : bool @@ -2475,7 +2475,7 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a psf_dim : even int Size of the PSF images. Default is 80x80 pixels - science_dim : even int + science_dim : even int Size of the science images (star centers and standard coronagraphic images). Default is 290, 290 pixels @@ -2490,7 +2490,7 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a skip_center is ignored for the OBJECT,CENTER and OBJECT frames. Default is None - skip_center : bool + skip_center : bool Control if images are finely centered or not before being combined. However the images are still roughly centered by shifting them by an integer number of pixel to bring the @@ -2500,33 +2500,33 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a Default is False. Note that if skip_center is True, the save_scaled option is automatically disabled. - + shift_method : str Method to scaling and shifting the images: fft or interp. Default is fft - + save_scaled : bool Also save the wavelength-rescaled cubes. Makes the process much longer. The default is False ''' - + # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ifs_combine_data', self.recipe_requirements) - + print('Combine science data') # parameters path = self._path nwave = self._nwave frames_info = self._frames_info_preproc - + # read final wavelength calibration fname = path.products / 'wavelength.fits' if not fname.exists(): raise FileExistsError('Missing wavelength.fits file. ' + - 'You must first run the sph_ifs_wavelength_recalibration() method.') - wave = fits.getdata(fname) + 'You must first run the sph_ifs_wavelength_recalibration() method.') + wave = fits.getdata(fname) # max images size if psf_dim > 290: @@ -2547,7 +2547,7 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a manual_center = np.array(manual_center) if (manual_center.shape != (2,)) or (manual_center.shape != (nwave, 2)): raise ValueError('manual_center does not have the right number of dimensions.') - + if manual_center.shape == (2,): manual_center = np.full((nwave, 2), manual_center) @@ -2583,10 +2583,10 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a files = list(path.preproc.glob(fname+'?????.fits')) cube = fits.getdata(files[0]) centers = fits.getdata(path.preproc / '{}centers.fits'.format(fname)) - + # mask values outside of IFS FoV cube[cube == 0] = np.nan - + # neutral density ND = frames_info.loc[(file, idx), 'INS4 FILT2 NAME'] w, attenuation = transmission.transmission_nd(ND, wave=wave) @@ -2614,7 +2614,7 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a nimg = psf_cube[wave_idx, file_idx] nimg = imutils.scale(nimg, (1.0059, 1.0011), method='interp') psf_cube[wave_idx, file_idx] = nimg - + # wavelength-scaled version if save_scaled: nimg = psf_cube[wave_idx, file_idx] @@ -2668,7 +2668,7 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a # mask values outside of IFS FoV cube[cube == 0] = np.nan - + # neutral density ND = frames_info.loc[(file, idx), 'INS4 FILT2 NAME'] w, attenuation = transmission.transmission_nd(ND, wave=wave) @@ -2699,7 +2699,7 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a nimg = cen_cube[wave_idx, file_idx] nimg = imutils.scale(nimg, (1.0059, 1.0011), method='interp') cen_cube[wave_idx, file_idx] = nimg - + # wavelength-scaled version if save_scaled: nimg = cen_cube[wave_idx, file_idx] @@ -2769,7 +2769,7 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a # mask values outside of IFS FoV cube[cube == 0] = np.nan - + # neutral density ND = frames_info.loc[(file, idx), 'INS4 FILT2 NAME'] w, attenuation = transmission.transmission_nd(ND, wave=wave) @@ -2794,7 +2794,7 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a nimg = sci_cube[wave_idx, file_idx] nimg = imutils.scale(nimg, (1.0059, 1.0011), method='interp') sci_cube[wave_idx, file_idx] = nimg - + # wavelength-scaled version if save_scaled: nimg = sci_cube[wave_idx, file_idx] @@ -2834,7 +2834,7 @@ def sph_ifs_clean(self, delete_raw=False, delete_products=False): # parameters path = self._path - + # tmp if path.tmp.exists(): shutil.rmtree(path.tmp, ignore_errors=True) diff --git a/vltpf/IRDIS/ImagingReduction.py b/vltpf/IRDIS/ImagingReduction.py index 6989775..5623b66 100644 --- a/vltpf/IRDIS/ImagingReduction.py +++ b/vltpf/IRDIS/ImagingReduction.py @@ -46,11 +46,11 @@ class ImagingReduction(object): 'sph_ird_star_center': ['sort_files', 'sort_frames', 'sph_ird_preprocess_science'], 'sph_ird_combine_data': ['sort_files', 'sort_frames', 'sph_ird_preprocess_science', 'sph_ird_star_center'] } - + ################################################## # Constructor ################################################## - + def __init__(self, path): '''Initialization of the ImagingReduction instances @@ -63,16 +63,16 @@ def __init__(self, path): # expand path path = Path(path).expanduser().resolve() - + # zeroth-order reduction validation raw = path / 'raw' if not raw.exists(): raise ValueError('No raw/ subdirectory. {0} is not a valid reduction path!'.format(path)) - + # init path and name self._path = utils.ReductionPath(path) self._instrument = 'IRDIS' - + # instrument mode self._mode = 'Unknown' @@ -85,7 +85,7 @@ def __init__(self, path): # instrument self._pixel = float(config.get('instrument', 'pixel')) self._nwave = 2 - + # calibration self._wave_cal_lasers = [float(w) for w in config.get('calibration', 'wave_cal_lasers').split(',')] @@ -98,35 +98,35 @@ def __init__(self, path): try: val = eval(value) except NameError: - val = value + val = value self._config[key] = val except configparser.Error as e: raise ValueError('Error reading configuration file for instrument {0}: {1}'.format(self._instrument, e.message)) - + # execution of recipes self._recipe_execution = { 'sort_files': False, 'sort_frames': False, 'check_files_association': False } - + # reload any existing data frames self.read_info() - + ################################################## # Representation ################################################## - + def __repr__(self): return ''.format(self._instrument, self._mode, self._path) - + def __format__(self): return self.__repr__() - + ################################################## # Properties ################################################## - + @property def instrument(self): return self._instrument @@ -134,11 +134,11 @@ def instrument(self): @property def pixel(self): return self._pixel - + @property def nwave(self): return self._nwave - + @property def path(self): return self._path @@ -146,11 +146,11 @@ def path(self): @property def files_info(self): return self._files_info - + @property def frames_info(self): return self._frames_info - + @property def frames_info_preproc(self): return self._frames_info_preproc @@ -158,10 +158,10 @@ def frames_info_preproc(self): @property def recipe_execution(self): return self._recipe_execution - + @property def config(self): - return self._config + return self._config @property def mode(self): @@ -198,7 +198,7 @@ def show_config(self): keys = [key for key in dico if key.startswith('center')] for key in keys: print('{0:<30s}{1}'.format(key, dico[key])) - + # combining print('-'*35) keys = [key for key in dico if key.startswith('combine')] @@ -211,10 +211,10 @@ def show_config(self): for key in keys: print('{0:<30s}{1}'.format(key, dico[key])) print('-'*35) - + print() - - + + def init_reduction(self): ''' Sort files and frames, perform sanity check @@ -222,30 +222,30 @@ def init_reduction(self): # make sure we have sub-directories self._path.create_subdirectories() - + self.sort_files() self.sort_frames() self.check_files_association() - - + + def create_static_calibrations(self): ''' Create static calibrations with esorex ''' config = self._config - + self.sph_ird_cal_dark(silent=config['misc_silent_esorex']) self.sph_ird_cal_detector_flat(silent=config['misc_silent_esorex']) - + def preprocess_science(self): ''' Clean and collapse images ''' - + config = self._config - + self.sph_ird_preprocess_science(subtract_background=config['preproc_subtract_background'], fix_badpix=config['preproc_fix_badpix'], collapse_science=config['preproc_collapse_science'], @@ -260,9 +260,9 @@ def process_science(self): Perform star center, combine cubes into final (x,y,time,lambda) cubes, correct anamorphism and scale the images ''' - + config = self._config - + self.sph_ird_star_center(high_pass=config['center_high_pass'], offset=config['center_offset'], plot=config['misc_plot']) @@ -275,26 +275,26 @@ def process_science(self): shift_method=config['combine_shift_method'], save_scaled=config['combine_save_scaled']) - + def clean(self): ''' Clean the reduction directory, leaving only the raw and products sub-directory ''' - + config = self._config if config['clean']: self.sph_ird_clean(delete_raw=config['clean_delete_raw'], delete_products=config['clean_delete_products']) - - + + def full_reduction(self): ''' Performs a full reduction of a data set, from the static calibrations to the final (x,y,time,lambda) cubes ''' - + self.init_reduction() self.create_static_calibrations() self.preprocess_science() @@ -304,7 +304,7 @@ def full_reduction(self): ################################################## # SPHERE/IRDIS methods ################################################## - + def read_info(self): ''' Read the files, calibs and frames information from disk @@ -321,7 +321,7 @@ def read_info(self): # path path = self._path - + # files info fname = path.preproc / 'files.csv' if fname.exists(): @@ -331,7 +331,7 @@ def read_info(self): files_info['DATE-OBS'] = pd.to_datetime(files_info['DATE-OBS'], utc=False) files_info['DATE'] = pd.to_datetime(files_info['DATE'], utc=False) files_info['DET FRAM UTC'] = pd.to_datetime(files_info['DET FRAM UTC'], utc=False) - + # update recipe execution self._recipe_execution['sort_files'] = True if np.any(files_info['PRO CATG'] == 'IRD_MASTER_DARK'): @@ -371,7 +371,7 @@ def read_info(self): frames_info_preproc['DET FRAM UTC'] = pd.to_datetime(frames_info_preproc['DET FRAM UTC'], utc=False) frames_info_preproc['TIME START'] = pd.to_datetime(frames_info_preproc['TIME START'], utc=False) frames_info_preproc['TIME'] = pd.to_datetime(frames_info_preproc['TIME'], utc=False) - frames_info_preproc['TIME END'] = pd.to_datetime(frames_info_preproc['TIME END'], utc=False) + frames_info_preproc['TIME END'] = pd.to_datetime(frames_info_preproc['TIME END'], utc=False) else: frames_info_preproc = None @@ -399,7 +399,7 @@ def read_info(self): done = done and (len(file) == 1) self._recipe_execution['sph_ird_star_center'] = done - + def sort_files(self): ''' Sort all raw files and save result in a data frame @@ -412,7 +412,7 @@ def sort_files(self): # parameters path = self._path - + # list files files = path.raw.glob('*.fits') files = [f.stem for f in files] @@ -438,7 +438,7 @@ def sort_files(self): key = keywords_short[idx] if key.find('HIERARCH ESO ') != -1: keywords_short[idx] = key[13:] - + # files table files_info = pd.DataFrame(index=pd.Index(files, name='FILE'), columns=keywords_short, dtype='float') @@ -454,12 +454,12 @@ def sort_files(self): # drop files that are not handled, based on DPR keywords files_info.dropna(subset=['DPR TYPE'], inplace=True) files_info = files_info[(files_info['DPR CATG'] != 'ACQUISITION') & (files_info['DPR TYPE'] != 'OBJECT,AO')] - + # check instruments instru = files_info['SEQ ARM'].unique() if len(instru) != 1: raise ValueError('Sequence is mixing different instruments: {0}'.format(instru)) - + # processed column files_info.insert(len(files_info.columns), 'PROCESSED', False) files_info.insert(len(files_info.columns), 'PRO CATG', ' ') @@ -468,7 +468,7 @@ def sort_files(self): files_info['DATE-OBS'] = pd.to_datetime(files_info['DATE-OBS'], utc=False) files_info['DATE'] = pd.to_datetime(files_info['DATE'], utc=False) files_info['DET FRAM UTC'] = pd.to_datetime(files_info['DET FRAM UTC'], utc=False) - + # update instrument mode self._mode = files_info.loc[files_info['DPR CATG'] == 'SCIENCE', 'INS1 MODE'][0] @@ -482,7 +482,7 @@ def sort_files(self): # update recipe execution self._recipe_execution['sort_files'] = True - + def sort_frames(self): ''' Extract the frames information from the science files and save @@ -496,18 +496,18 @@ def sort_frames(self): # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sort_frames', self.recipe_requirements) - + # parameters path = self._path files_info = self._files_info - + # science files - sci_files = files_info[(files_info['DPR CATG'] == 'SCIENCE') & (files_info['DPR TYPE'] != 'SKY')] + sci_files = files_info[(files_info['DPR CATG'] == 'SCIENCE') & (files_info['DPR TYPE'] != 'SKY')] # raise error when no science frames are present if len(sci_files) == 0: raise ValueError('This dataset contains no science frame. There should be at least one!') - + # build indices files = [] img = [] @@ -521,7 +521,7 @@ def sort_frames(self): frames_info = pd.DataFrame(columns=sci_files.columns, index=pd.MultiIndex.from_arrays([files, img], names=['FILE', 'IMG'])) # expand files_info into frames_info - frames_info = frames_info.align(files_info, level=0)[1] + frames_info = frames_info.align(files_info, level=0)[1] # compute timestamps toolbox.compute_times(frames_info) @@ -535,20 +535,20 @@ def sort_frames(self): # update recipe execution self._recipe_execution['sort_frames'] = True - + # # print some info # cinfo = frames_info[frames_info['DPR TYPE'] == 'OBJECT'] if len(cinfo) == 0: cinfo = frames_info[frames_info['DPR TYPE'] == 'OBJECT,CENTER'] - + ra_drot = cinfo['INS4 DROT2 RA'][0] ra_drot_h = np.floor(ra_drot/1e4) ra_drot_m = np.floor((ra_drot - ra_drot_h*1e4)/1e2) ra_drot_s = ra_drot - ra_drot_h*1e4 - ra_drot_m*1e2 RA = '{:02.0f}:{:02.0f}:{:02.3f}'.format(ra_drot_h, ra_drot_m, ra_drot_s) - + dec_drot = cinfo['INS4 DROT2 DEC'][0] sign = np.sign(dec_drot) udec_drot = np.abs(dec_drot) @@ -562,9 +562,9 @@ def sort_frames(self): pa_end = cinfo['PARANG'][-1] posang = cinfo['INS4 DROT2 POSANG'].unique() - + date = str(cinfo['DATE'][0])[0:10] - + print(' * Object: {0}'.format(cinfo['OBJECT'][0])) print(' * RA / DEC: {0} / {1}'.format(RA, DEC)) print(' * Date: {0}'.format(date)) @@ -578,7 +578,7 @@ def sort_frames(self): print(' * Texp: {0:.2f} min'.format(cinfo['DET SEQ1 DIT'].sum()/60)) print(' * PA: {0:.2f}° ==> {1:.2f}° = {2:.2f}°'.format(pa_start, pa_end, np.abs(pa_end-pa_start))) print(' * POSANG: {0}'.format(', '.join(['{:.2f}°'.format(p) for p in posang]))) - + def check_files_association(self): ''' @@ -590,7 +590,7 @@ def check_files_association(self): # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'check_files_association', self.recipe_requirements) - + print('Performing file association for calibrations') # parameters @@ -600,7 +600,7 @@ def check_files_association(self): arm = files_info['SEQ ARM'].unique() if len(arm) != 1: raise ValueError('Sequence is mixing different instruments: {0}'.format(arm)) - + # IRDIS obs mode and filter combination modes = files_info.loc[files_info['DPR CATG'] == 'SCIENCE', 'INS1 MODE'].unique() if len(modes) != 1: @@ -615,7 +615,7 @@ def check_files_association(self): # keep static calibrations and sky backgrounds calibs = files_info[(files_info['DPR CATG'] == 'CALIB') | ((files_info['DPR CATG'] == 'SCIENCE') & (files_info['DPR TYPE'] == 'SKY'))] - + ############################################### # static calibrations not dependent on DIT ############################################### @@ -660,7 +660,7 @@ def check_files_association(self): if error_flag: raise ValueError('There is {0} errors that should be solved before proceeding'.format(error_flag)) - + def sph_ird_cal_dark(self, silent=True): ''' Create the dark and background calibrations @@ -673,13 +673,13 @@ def sph_ird_cal_dark(self, silent=True): # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_cal_dark', self.recipe_requirements) - + print('Creating darks and backgrounds') # parameters path = self._path files_info = self._files_info - + # get list of files calibs = files_info[np.logical_not(files_info['PROCESSED']) & ((files_info['DPR TYPE'] == 'DARK') | @@ -723,8 +723,8 @@ def sph_ird_cal_dark(self, silent=True): max_level = 1000 if cfilt in ['DB_K12', 'BB_Ks']: max_level = 15000 - - # esorex parameters + + # esorex parameters args = ['esorex', '--no-checksum=TRUE', '--no-datamd5=TRUE', @@ -791,25 +791,25 @@ def sph_ird_cal_detector_flat(self, silent=True): # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_cal_detector_flat', self.recipe_requirements) - + print('Creating flats') # parameters path = self._path files_info = self._files_info - + # get list of files calibs = files_info[np.logical_not(files_info['PROCESSED']) & ((files_info['DPR TYPE'] == 'FLAT,LAMP') | (files_info['DPR TECH'] == 'IMAGE'))] filter_combs = calibs['INS COMB IFLT'].unique() - + for cfilt in filter_combs: cfiles = calibs[calibs['INS COMB IFLT'] == cfilt] files = cfiles.index print(' * filter {0} ({1} files)'.format(cfilt, len(cfiles))) - + # create sof sof = path.sof / 'flat_filt={0}.sof'.format(cfilt) file = open(sof, 'w') @@ -820,8 +820,8 @@ def sph_ird_cal_detector_flat(self, silent=True): # products flat_file = 'flat_filt={0}'.format(cfilt) bpm_file = 'flat_bpm_filt={0}'.format(cfilt) - - # esorex parameters + + # esorex parameters args = ['esorex', '--no-checksum=TRUE', '--no-datamd5=TRUE', @@ -866,7 +866,7 @@ def sph_ird_cal_detector_flat(self, silent=True): files_info.loc[bpm_file, 'INS1 OPTI2 NAME'] = cfiles['INS1 OPTI2 NAME'][0] files_info.loc[bpm_file, 'PROCESSED'] = True files_info.loc[bpm_file, 'PRO CATG'] = 'IRD_NON_LINEAR_BADPIXELMAP' - + # save files_info.to_csv(path.preproc / 'files.csv') @@ -897,7 +897,7 @@ def sph_ird_preprocess_science(self, The pre-processed frames are saved in the preproc sub-directory and will be combined later. - + Parameters ---------- subtract_background : bool @@ -929,14 +929,14 @@ def sph_ird_preprocess_science(self, # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_preprocess_science', self.recipe_requirements) - + print('Pre-processing science files') # parameters path = self._path files_info = self._files_info frames_info = self._frames_info - + # clean before we start files = path.preproc.glob('*_DIT???_preproc.fits') for file in files: @@ -959,14 +959,14 @@ def sph_ird_preprocess_science(self, bpm[:, :50] = 0 bpm[:, 941:1078] = 0 bpm[:, 1966:] = 0 - - # flat + + # flat flat_file = files_info[files_info['PROCESSED'] & (files_info['PRO CATG'] == 'IRD_FLAT_FIELD') & (files_info['INS COMB IFLT'] == filter_comb)] if len(flat_file) != 1: raise ValueError('There should be exactly 1 flat file. Found {0}.'.format(len(flat_file))) flat = fits.getdata(path.calib / '{}.fits'.format(flat_file.index[0])) - + # final dataframe index = pd.MultiIndex(names=['FILE', 'IMG'], levels=[[], []], codes=[[], []]) frames_info_preproc = pd.DataFrame(index=index, columns=frames_info.columns) @@ -980,7 +980,7 @@ def sph_ird_preprocess_science(self, sci_DITs = list(sci_files['DET SEQ1 DIT'].round(2).unique()) if len(sci_files) == 0: - continue + continue for DIT in sci_DITs: sfiles = sci_files[sci_files['DET SEQ1 DIT'].round(2) == DIT] @@ -1018,7 +1018,7 @@ def sph_ird_preprocess_science(self, # read data print(' ==> read data') img, hdr = fits.getdata(path.raw / '{}.fits'.format(fname), header=True) - + # add extra dimension to single images to make cubes if img.ndim == 2: img = img[np.newaxis, ...] @@ -1029,7 +1029,7 @@ def sph_ird_preprocess_science(self, img[:, :, :50] = np.nan img[:, :, 941:1078] = np.nan img[:, :, 1966:] = np.nan - + # collapse if (typ == 'OBJECT,CENTER'): if collapse_center: @@ -1046,7 +1046,7 @@ def sph_ird_preprocess_science(self, else: frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'none') elif (typ == 'OBJECT'): - if collapse_science: + if collapse_science: if collapse_type == 'mean': print(' ==> collapse: mean ({0} -> 1 frame, 0 dropped)'.format(len(img))) img = np.mean(img, axis=0, keepdims=True) @@ -1079,7 +1079,7 @@ def sph_ird_preprocess_science(self, frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'none') frames_info_preproc = pd.concat((frames_info_preproc, frames_info_new)) - + # background subtraction if subtract_background: print(' ==> subtract background') @@ -1091,11 +1091,11 @@ def sph_ird_preprocess_science(self, print(' ==> divide by flat field') for f in range(len(img)): img[f] /= flat - + # bad pixels correction if fix_badpix: print(' ==> correct bad pixels') - for f in range(len(img)): + for f in range(len(img)): frame = img[f] frame = imutils.fix_badpix(frame, bpm, npix=12, weight=True) @@ -1114,10 +1114,10 @@ def sph_ird_preprocess_science(self, nimg[f, 0] = img[f, :, 0:1024] nimg[f, 1] = img[f, :, 1024:] img = nimg - + # save DITs individually for f in range(len(img)): - frame = nimg[f, ...].squeeze() + frame = nimg[f, ...].squeeze() hdr['HIERARCH ESO DET NDIT'] = 1 fits.writeto(path.preproc / '{}_DIT{:03d}_preproc.fits'.format(fname, f), frame, hdr, overwrite=True, output_verify='silentfix') @@ -1148,9 +1148,9 @@ def sph_ird_star_center(self, high_pass=False, offset=(0, 0), plot=True): offset : tuple Apply an (x,y) offset to the default center position, for the waffle centering. - The offset will move the search box of the waffle spots by the amount of + The offset will move the search box of the waffle spots by the amount of specified pixels in each direction. Default is no offset - + plot : bool Display and save diagnostic plot for quality check. Default is True @@ -1158,21 +1158,21 @@ def sph_ird_star_center(self, high_pass=False, offset=(0, 0), plot=True): # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_star_center', self.recipe_requirements) - + print('Star centers determination') # parameters path = self._path pixel = self._pixel frames_info = self._frames_info_preproc - + # wavelength filter_comb = frames_info['INS COMB IFLT'].unique()[0] - wave, bandwidth = transmission.wavelength_bandwidth_filter(filter_comb) + wave, bandwidth = transmission.wavelength_bandwidth_filter(filter_comb) wave = np.array(wave) - + # start with OBJECT,FLUX - flux_files = frames_info[frames_info['DPR TYPE'] == 'OBJECT,FLUX'] + flux_files = frames_info[frames_info['DPR TYPE'] == 'OBJECT,FLUX'] if len(flux_files) != 0: for file, idx in flux_files.index: print(' ==> OBJECT,FLUX: {0}'.format(file)) @@ -1208,7 +1208,7 @@ def sph_ird_star_center(self, high_pass=False, offset=(0, 0), plot=True): coro = False else: coro = True - + # centers waffle_orientation = hdr['HIERARCH ESO OCS WAFFLE ORIENT'] if plot: @@ -1236,11 +1236,11 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a (OBJECT,FLUX), star centers (OBJECT,CENTER) and standard coronagraphic images (OBJECT). For each type of data, the method saves 4 or 5 different files: - + - *_cube: the (x,y,time,lambda) cube - + - *_parang: the parallactic angle vector - + - *_derot: the derotation angles vector. This vector takes into account the parallactic angle and any instrumental pupil offset. This is the values @@ -1250,12 +1250,12 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a - *_frames: a csv file with all the information for every frames. There is one line by time step in the data cube. - + - *_cube_scaled: the (x,y,time,lambda) cube with images rescaled spectraly. This is useful if you plan to perform spectral differential - imaging in your analysis. - + imaging in your analysis. + Parameters ---------- cpix : bool @@ -1266,7 +1266,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a psf_dim : even int Size of the PSF images. Default is 80x80 pixels - science_dim : even int + science_dim : even int Size of the science images (star centers and standard coronagraphic images). Default is 290, 290 pixels @@ -1274,43 +1274,43 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a Correct the optical anamorphism of the instrument. Default is True. See user manual for details. - manual_center : array + manual_center : array User provided centers for the OBJECT,CENTER and OBJECT frames. This should be an array of 2x2 values (cx,cy for the 2 wavelengths). If a manual center is provided, the value of skip_center is ignored for the OBJECT,CENTER and OBJECT frames. Default is None - skip_center : bool + skip_center : bool Control if images are finely centered or not before being combined. However the images are still roughly centered by shifting them by an integer number of pixel to bring the center of the data close to the center of the images. This option is useful if fine centering must be done afterwards. - + shift_method : str Method to scaling and shifting the images: fft or interp. Default is fft - save_scaled : bool + save_scaled : bool Also save the wavelength-rescaled cubes. Makes the process much longer. The default is False ''' - + # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_combine_data', self.recipe_requirements) - + print('Combine science data') # parameters path = self._path nwave = self._nwave frames_info = self._frames_info_preproc - + # wavelength filter_comb = frames_info['INS COMB IFLT'].unique()[0] - wave, bandwidth = transmission.wavelength_bandwidth_filter(filter_comb) + wave, bandwidth = transmission.wavelength_bandwidth_filter(filter_comb) wave = np.array(wave) fits.writeto(path.products / 'wavelength.fits', wave, overwrite=True) @@ -1334,7 +1334,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a manual_center = np.array(manual_center) if manual_center.shape != (2, 2): raise ValueError('manual_center does not have the right number of dimensions.') - + print('Warning: images will be centered at the user-provided values.') # @@ -1366,11 +1366,11 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a fname = '{0}_DIT{1:03d}_preproc'.format(file, idx) cube = fits.getdata(path.preproc / '{}.fits'.format(fname)) centers = fits.getdata(path.preproc / '{}_centers.fits'.format(fname)) - + # neutral density ND = frames_info.loc[(file, idx), 'INS4 FILT2 NAME'] w, attenuation = transmission.transmission_nd(ND, wave=wave) - + # DIT, angles, etc DIT = frames_info.loc[(file, idx), 'DET SEQ1 DIT'] psf_parang[file_idx] = frames_info.loc[(file, idx), 'PARANG'] @@ -1394,7 +1394,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a nimg = psf_cube[wave_idx, file_idx] nimg = imutils.scale(nimg, (1.0000, 1.0062), method='interp') psf_cube[wave_idx, file_idx] = nimg - + # wavelength-scaled version if save_scaled: nimg = psf_cube[wave_idx, file_idx] @@ -1444,7 +1444,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a fname = '{0}_DIT{1:03d}_preproc'.format(file, idx) cube = fits.getdata(path.preproc / '{}.fits'.format(fname)) centers = fits.getdata(path.preproc / '{}_centers.fits'.format(fname)) - + # neutral density ND = frames_info.loc[(file, idx), 'INS4 FILT2 NAME'] w, attenuation = transmission.transmission_nd(ND, wave=wave) @@ -1475,7 +1475,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a nimg = cen_cube[wave_idx, file_idx] nimg = imutils.scale(nimg, (1.0000, 1.0062), method='interp') cen_cube[wave_idx, file_idx] = nimg - + # wavelength-scaled version if save_scaled: nimg = cen_cube[wave_idx, file_idx] @@ -1565,7 +1565,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a # and the pixel size is 18 micron dms_dx = frames_info.loc[(file, idx), 'INS1 PAC X'] / 18 dms_dy = frames_info.loc[(file, idx), 'INS1 PAC Y'] / 18 - + # center frames for wave_idx, img in enumerate(cube): cx, cy = centers[wave_idx, :] @@ -1585,7 +1585,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a nimg = sci_cube[wave_idx, file_idx] nimg = imutils.scale(nimg, (1.0000, 1.0062), method='interp') sci_cube[wave_idx, file_idx] = nimg - + # wavelength-scaled version if save_scaled: nimg = sci_cube[wave_idx, file_idx] @@ -1625,7 +1625,7 @@ def sph_ird_clean(self, delete_raw=False, delete_products=False): # parameters path = self._path - + # tmp if path.tmp.exists(): shutil.rmtree(path.tmp, ignore_errors=True) diff --git a/vltpf/IRDIS/SpectroReduction.py b/vltpf/IRDIS/SpectroReduction.py index fb417a9..51508df 100644 --- a/vltpf/IRDIS/SpectroReduction.py +++ b/vltpf/IRDIS/SpectroReduction.py @@ -45,20 +45,20 @@ def get_wavelength_calibration(wave_calib, centers, wave_min, wave_max): Returns ------- wave_lin : array - Array with the linear calibration for each field, as a function + Array with the linear calibration for each field, as a function of pixel coordinate ''' wave_map = np.zeros((2, 1024, 1024)) wave_map[0] = wave_calib[:, 0:1024] wave_map[1] = wave_calib[:, 1024:] wave_map[(wave_map < wave_min) | (wave_max < wave_map)] = np.nan - + wave_ext = 10 wave_lin = np.zeros((2, 1024)) - + wave_lin[0] = np.mean(wave_map[0, :, centers[0, 0]-wave_ext:centers[0, 0]+wave_ext], axis=1) wave_lin[1] = np.mean(wave_map[1, :, centers[1, 0]-wave_ext:centers[1, 0]+wave_ext], axis=1) - + return wave_lin @@ -80,7 +80,7 @@ class SpectroReduction(object): 'sph_ird_cal_dark': ['sort_files'], 'sph_ird_cal_detector_flat': ['sort_files'], 'sph_ird_wave_calib': ['sort_files', 'sph_ird_cal_detector_flat'], - 'sph_ird_preprocess_science': ['sort_files', 'sort_frames', 'sph_ird_cal_dark', + 'sph_ird_preprocess_science': ['sort_files', 'sort_frames', 'sph_ird_cal_dark', 'sph_ird_cal_detector_flat'], 'sph_ird_star_center': ['sort_files', 'sort_frames', 'sph_ird_wave_calib'], 'sph_ird_wavelength_recalibration': ['sort_files', 'sort_frames', 'sph_ird_wave_calib', @@ -88,11 +88,11 @@ class SpectroReduction(object): 'sph_ird_combine_data': ['sort_files', 'sort_frames', 'sph_ird_preprocess_science', 'sph_ird_star_center', 'sph_ird_wavelength_recalibration'] } - + ################################################## # Constructor ################################################## - + def __init__(self, path): '''Initialization of the SpectroReduction instances @@ -105,19 +105,19 @@ def __init__(self, path): # expand path path = Path(path).expanduser().resolve() - + # zeroth-order reduction validation raw = path / 'raw' if not raw.exists(): raise ValueError('No raw/ subdirectory. {0} is not a valid reduction path!'.format(path)) - + # init path and name self._path = utils.ReductionPath(path) self._instrument = 'IRDIS' - + # instrument mode self._mode = 'Unknown' - + # configuration configfile = Path(vltpf.__file__).parent / 'instruments' / '{}.ini'.format(self._instrument) config = configparser.ConfigParser() @@ -127,7 +127,7 @@ def __init__(self, path): # instrument self._pixel = float(config.get('instrument', 'pixel')) self._nwave = -1 - + # calibration self._wave_cal_lasers = [float(w) for w in config.get('calibration', 'wave_cal_lasers').split(',')] @@ -140,11 +140,11 @@ def __init__(self, path): try: val = eval(value) except NameError: - val = value + val = value self._config[key] = val except configparser.Error as e: raise ValueError('Error reading configuration file for instrument {0}: {1}'.format(self._instrument, e.message)) - + # execution of recipes self._recipe_execution = { 'sort_files': False, @@ -157,21 +157,21 @@ def __init__(self, path): # reload any existing data frames self.read_info() - + ################################################## # Representation ################################################## - + def __repr__(self): return ''.format(self._instrument, self._mode, self._path) - + def __format__(self): return self.__repr__() - + ################################################## # Properties ################################################## - + @property def instrument(self): return self._instrument @@ -179,11 +179,11 @@ def instrument(self): @property def pixel(self): return self._pixel - + @property def nwave(self): return self._nwave - + @property def path(self): return self._path @@ -191,11 +191,11 @@ def path(self): @property def files_info(self): return self._files_info - + @property def frames_info(self): return self._frames_info - + @property def frames_info_preproc(self): return self._frames_info_preproc @@ -203,11 +203,11 @@ def frames_info_preproc(self): @property def recipe_execution(self): return self._recipe_execution - + @property def config(self): - return self._config - + return self._config + @property def mode(self): return self._mode @@ -249,7 +249,7 @@ def show_config(self): keys = [key for key in dico if key.startswith('wave')] for key in keys: print('{0:<30s}{1}'.format(key, dico[key])) - + # combining print('-'*35) keys = [key for key in dico if key.startswith('combine')] @@ -262,10 +262,10 @@ def show_config(self): for key in keys: print('{0:<30s}{1}'.format(key, dico[key])) print('-'*35) - + print() - - + + def init_reduction(self): ''' Sort files and frames, perform sanity check @@ -273,46 +273,46 @@ def init_reduction(self): # make sure we have sub-directories self._path.create_subdirectories() - + self.sort_files() self.sort_frames() self.check_files_association() - - + + def create_static_calibrations(self): ''' Create static calibrations with esorex ''' config = self._config - + self.sph_ird_cal_dark(silent=config['misc_silent_esorex']) self.sph_ird_cal_detector_flat(silent=config['misc_silent_esorex']) self.sph_ird_wave_calib(silent=config['misc_silent_esorex']) - + def preprocess_science(self): ''' Clean and collapse images ''' - + config = self._config - + self.sph_ird_preprocess_science(subtract_background=config['preproc_subtract_background'], fix_badpix=config['preproc_fix_badpix'], collapse_science=config['preproc_collapse_science'], collapse_psf=config['preproc_collapse_psf'], collapse_center=config['preproc_collapse_center']) - + def process_science(self): ''' Perform star center, combine cubes into final (x,y,time,lambda) cubes, correct anamorphism and scale the images ''' - + config = self._config - + self.sph_ird_star_center(high_pass=config['center_high_pass'], plot=config['misc_plot']) self.sph_ird_wavelength_recalibration(fit_scaling=config['wave_fit_scaling'], @@ -325,26 +325,26 @@ def process_science(self): shift_method=config['combine_shift_method'], manual_center=config['combine_manual_center'], skip_center=config['combine_skip_center']) - + def clean(self): ''' Clean the reduction directory, leaving only the raw and products sub-directory ''' - + config = self._config if config['clean']: self.sph_ird_clean(delete_raw=config['clean_delete_raw'], delete_products=config['clean_delete_products']) - - + + def full_reduction(self): ''' Performs a full reduction of a data set, from the static calibrations to the final (x,y,time,lambda) cubes ''' - + self.init_reduction() self.create_static_calibrations() self.preprocess_science() @@ -354,7 +354,7 @@ def full_reduction(self): ################################################## # SPHERE/IRDIS methods ################################################## - + def read_info(self): ''' Read the files, calibs and frames information from disk @@ -371,7 +371,7 @@ def read_info(self): # path path = self._path - + # files info fname = path.preproc / 'files.csv' if fname.exists(): @@ -381,7 +381,7 @@ def read_info(self): files_info['DATE-OBS'] = pd.to_datetime(files_info['DATE-OBS'], utc=False) files_info['DATE'] = pd.to_datetime(files_info['DATE'], utc=False) files_info['DET FRAM UTC'] = pd.to_datetime(files_info['DET FRAM UTC'], utc=False) - + # update recipe execution self._recipe_execution['sort_files'] = True if np.any(files_info['PRO CATG'] == 'IRD_MASTER_DARK'): @@ -390,7 +390,7 @@ def read_info(self): self._recipe_execution['sph_ird_cal_detector_flat'] = True if np.any(files_info['PRO CATG'] == 'IRD_WAVECALIB'): self._recipe_execution['sph_ird_wave_calib'] = True - + # update instrument mode self._mode = files_info.loc[files_info['DPR CATG'] == 'SCIENCE', 'INS1 MODE'][0] else: @@ -423,7 +423,7 @@ def read_info(self): frames_info_preproc['DET FRAM UTC'] = pd.to_datetime(frames_info_preproc['DET FRAM UTC'], utc=False) frames_info_preproc['TIME START'] = pd.to_datetime(frames_info_preproc['TIME START'], utc=False) frames_info_preproc['TIME'] = pd.to_datetime(frames_info_preproc['TIME'], utc=False) - frames_info_preproc['TIME END'] = pd.to_datetime(frames_info_preproc['TIME END'], utc=False) + frames_info_preproc['TIME END'] = pd.to_datetime(frames_info_preproc['TIME END'], utc=False) else: frames_info_preproc = None @@ -436,7 +436,7 @@ def read_info(self): if frames_info_preproc is not None: self._recipe_execution['sph_ird_wavelength_recalibration'] \ = (path.preproc / 'wavelength_final.fits').exists() - + done = True files = frames_info_preproc.index for file, idx in files: @@ -454,7 +454,7 @@ def read_info(self): done = done and (len(file) == 1) self._recipe_execution['sph_ird_star_center'] = done - + def sort_files(self): ''' Sort all raw files and save result in a data frame @@ -467,7 +467,7 @@ def sort_files(self): # parameters path = self._path - + # list files files = path.raw.glob('*.fits') files = [f.stem for f in files] @@ -493,7 +493,7 @@ def sort_files(self): key = keywords_short[idx] if key.find('HIERARCH ESO ') != -1: keywords_short[idx] = key[13:] - + # files table files_info = pd.DataFrame(index=pd.Index(files, name='FILE'), columns=keywords_short, dtype='float') @@ -509,12 +509,12 @@ def sort_files(self): # drop files that are not handled, based on DPR keywords files_info.dropna(subset=['DPR TYPE'], inplace=True) files_info = files_info[(files_info['DPR CATG'] != 'ACQUISITION') & (files_info['DPR TYPE'] != 'OBJECT,AO')] - + # check instruments instru = files_info['SEQ ARM'].unique() if len(instru) != 1: raise ValueError('Sequence is mixing different instruments: {0}'.format(instru)) - + # processed column files_info.insert(len(files_info.columns), 'PROCESSED', False) files_info.insert(len(files_info.columns), 'PRO CATG', ' ') @@ -526,10 +526,10 @@ def sort_files(self): # update instrument mode self._mode = files_info.loc[files_info['DPR CATG'] == 'SCIENCE', 'INS1 MODE'][0] - + # sort by acquisition time files_info.sort_values(by='DATE-OBS', inplace=True) - + # save files_info files_info.to_csv(path.preproc / 'files.csv') self._files_info = files_info @@ -537,7 +537,7 @@ def sort_files(self): # update recipe execution self._recipe_execution['sort_files'] = True - + def sort_frames(self): ''' Extract the frames information from the science files and save @@ -551,18 +551,18 @@ def sort_frames(self): # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sort_frames', self.recipe_requirements) - + # parameters path = self._path files_info = self._files_info - + # science files - sci_files = files_info[(files_info['DPR CATG'] == 'SCIENCE') & (files_info['DPR TYPE'] != 'SKY')] + sci_files = files_info[(files_info['DPR CATG'] == 'SCIENCE') & (files_info['DPR TYPE'] != 'SKY')] # raise error when no science frames are present if len(sci_files) == 0: raise ValueError('This dataset contains no science frame. There should be at least one!') - + # build indices files = [] img = [] @@ -576,7 +576,7 @@ def sort_frames(self): frames_info = pd.DataFrame(columns=sci_files.columns, index=pd.MultiIndex.from_arrays([files, img], names=['FILE', 'IMG'])) # expand files_info into frames_info - frames_info = frames_info.align(files_info, level=0)[1] + frames_info = frames_info.align(files_info, level=0)[1] # compute timestamps toolbox.compute_times(frames_info) @@ -590,20 +590,20 @@ def sort_frames(self): # update recipe execution self._recipe_execution['sort_frames'] = True - + # # print some info # cinfo = frames_info[frames_info['DPR TYPE'] == 'OBJECT'] if len(cinfo) == 0: cinfo = frames_info[frames_info['DPR TYPE'] == 'OBJECT,CENTER'] - + ra_drot = cinfo['INS4 DROT2 RA'][0] ra_drot_h = np.floor(ra_drot/1e4) ra_drot_m = np.floor((ra_drot - ra_drot_h*1e4)/1e2) ra_drot_s = ra_drot - ra_drot_h*1e4 - ra_drot_m*1e2 RA = '{:02.0f}:{:02.0f}:{:02.3f}'.format(ra_drot_h, ra_drot_m, ra_drot_s) - + dec_drot = cinfo['INS4 DROT2 DEC'][0] sign = np.sign(dec_drot) udec_drot = np.abs(dec_drot) @@ -617,9 +617,9 @@ def sort_frames(self): pa_end = cinfo['PARANG'][-1] posang = cinfo['INS4 DROT2 POSANG'].unique() - + date = str(cinfo['DATE'][0])[0:10] - + print(' * Object: {0}'.format(cinfo['OBJECT'][0])) print(' * RA / DEC: {0} / {1}'.format(RA, DEC)) print(' * Date: {0}'.format(date)) @@ -634,7 +634,7 @@ def sort_frames(self): print(' * PA: {0:.2f}° ==> {1:.2f}° = {2:.2f}°'.format(pa_start, pa_end, np.abs(pa_end-pa_start))) print(' * POSANG: {0}'.format(', '.join(['{:.2f}°'.format(p) for p in posang]))) - + def check_files_association(self): ''' Performs the calibration files association as a sanity check. @@ -651,17 +651,17 @@ def check_files_association(self): # parameters path = self._path files_info = self._files_info - + # instrument arm arm = files_info['SEQ ARM'].unique() if len(arm) != 1: raise ValueError('Sequence is mixing different instruments: {0}'.format(arm)) - + # IRDIS obs mode and filter combination modes = files_info.loc[files_info['DPR CATG'] == 'SCIENCE', 'INS1 MODE'].unique() if len(modes) != 1: raise ValueError('Sequence is mixing different types of observations: {0}'.format(modes)) - + filter_combs = files_info.loc[files_info['DPR CATG'] == 'SCIENCE', 'INS COMB IFLT'].unique() if len(filter_combs) != 1: raise ValueError('Sequence is mixing different types of filters combinations: {0}'.format(filter_combs)) @@ -673,7 +673,7 @@ def check_files_association(self): # keep static calibrations and sky backgrounds calibs = files_info[(files_info['DPR CATG'] == 'CALIB') | ((files_info['DPR CATG'] == 'SCIENCE') & (files_info['DPR TYPE'] == 'SKY'))] - + ############################################### # static calibrations not dependent on DIT ############################################### @@ -685,7 +685,7 @@ def check_files_association(self): if len(cfiles) <= 1: error_flag += 1 print(' * Error: there should be more than 1 flat in filter combination {0}'.format(filter_comb)) - + # wave cfiles = calibs[(calibs['DPR TYPE'] == 'LAMP,WAVE') & (calibs['INS COMB IFLT'] == filter_comb)] if len(cfiles) == 0: @@ -698,19 +698,19 @@ def check_files_association(self): # find the two closest to science files sci_files = files_info[(files_info['DPR CATG'] == 'SCIENCE')] time_sci = sci_files['DATE-OBS'].min() - time_flat = cfiles['DATE-OBS'] + time_flat = cfiles['DATE-OBS'] time_delta = np.abs(time_sci - time_flat).argsort() # drop the others files_info.drop(time_delta[1:].index, inplace=True) - + ################################################## # static calibrations that depend on science DIT ################################################## - + obj = files_info.loc[files_info['DPR CATG'] == 'SCIENCE', 'DPR TYPE'].apply(lambda s: s[0:6]) DITs = files_info.loc[(files_info['DPR CATG'] == 'SCIENCE') & (obj == 'OBJECT'), 'DET SEQ1 DIT'].unique().round(2) - + # handle darks in a slightly different way because there might be several different DITs for DIT in DITs: # instrumental backgrounds @@ -740,7 +740,7 @@ def check_files_association(self): files_info.to_csv(path.preproc / 'files.csv') self._files_info = files_info - + def sph_ird_cal_dark(self, silent=True): ''' Create the dark and background calibrations @@ -753,13 +753,13 @@ def sph_ird_cal_dark(self, silent=True): # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_cal_dark', self.recipe_requirements) - + print('Creating darks and backgrounds') # parameters path = self._path files_info = self._files_info - + # get list of files calibs = files_info[np.logical_not(files_info['PROCESSED']) & ((files_info['DPR TYPE'] == 'DARK') | @@ -803,8 +803,8 @@ def sph_ird_cal_dark(self, silent=True): max_level = 1000 if cfilt in ['S_LR']: max_level = 15000 - - # esorex parameters + + # esorex parameters args = ['esorex', '--no-checksum=TRUE', '--no-datamd5=TRUE', @@ -871,24 +871,24 @@ def sph_ird_cal_detector_flat(self, silent=True): # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_cal_detector_flat', self.recipe_requirements) - + print('Creating flats') # parameters path = self._path files_info = self._files_info - + # get list of files calibs = files_info[np.logical_not(files_info['PROCESSED']) & (files_info['DPR TYPE'] == 'FLAT,LAMP')] filter_combs = calibs['INS COMB IFLT'].unique() - + for cfilt in filter_combs: cfiles = calibs[calibs['INS COMB IFLT'] == cfilt] files = cfiles.index print(' * filter {0} ({1} files)'.format(cfilt, len(cfiles))) - + # create sof sof = path.sof / 'flat_filt={0}.sof'.format(cfilt) file = open(sof, 'w') @@ -899,8 +899,8 @@ def sph_ird_cal_detector_flat(self, silent=True): # products flat_file = 'flat_filt={0}'.format(cfilt) bpm_file = 'flat_bpm_filt={0}'.format(cfilt) - - # esorex parameters + + # esorex parameters args = ['esorex', '--no-checksum=TRUE', '--no-datamd5=TRUE', @@ -945,14 +945,14 @@ def sph_ird_cal_detector_flat(self, silent=True): files_info.loc[bpm_file, 'INS1 OPTI2 NAME'] = cfiles['INS1 OPTI2 NAME'][0] files_info.loc[bpm_file, 'PROCESSED'] = True files_info.loc[bpm_file, 'PRO CATG'] = 'IRD_NON_LINEAR_BADPIXELMAP' - + # save files_info.to_csv(path.preproc / 'files.csv') # update recipe execution self._recipe_execution['sph_ird_cal_detector_flat'] = True - + def sph_ird_wave_calib(self, silent=True): ''' Create the wavelength calibration @@ -965,20 +965,20 @@ def sph_ird_wave_calib(self, silent=True): # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_wave_calib', self.recipe_requirements) - + print('Creating wavelength calibration') # parameters path = self._path files_info = self._files_info - + # get list of files wave_file = files_info[np.logical_not(files_info['PROCESSED']) & (files_info['DPR TYPE'] == 'LAMP,WAVE')] if len(wave_file) != 1: raise ValueError('There should be exactly 1 raw wavelength calibration file. Found {0}.'.format(len(wave_file))) - + DIT = wave_file['DET SEQ1 DIT'][0] - dark_file = files_info[files_info['PROCESSED'] & (files_info['PRO CATG'] == 'IRD_MASTER_DARK') & + dark_file = files_info[files_info['PROCESSED'] & (files_info['PRO CATG'] == 'IRD_MASTER_DARK') & (files_info['DPR CATG'] == 'CALIB') & (files_info['DET SEQ1 DIT'].round(2) == DIT)] if len(dark_file) == 0: raise ValueError('There should at least 1 dark file for wavelength calibration. Found none.') @@ -987,14 +987,14 @@ def sph_ird_wave_calib(self, silent=True): flat_file = files_info[files_info['PROCESSED'] & (files_info['PRO CATG'] == 'IRD_FLAT_FIELD')] if len(flat_file) == 0: raise ValueError('There should at least 1 flat file for wavelength calibration. Found none.') - + bpm_file = files_info[files_info['PROCESSED'] & (files_info['PRO CATG'] == 'IRD_NON_LINEAR_BADPIXELMAP')] if len(flat_file) == 0: raise ValueError('There should at least 1 bad pixel map file for wavelength calibration. Found none.') - + # products wav_file = 'wave_calib' - + # esorex parameters if filter_comb == 'S_LR': # create standard sof in LRS @@ -1005,7 +1005,7 @@ def sph_ird_wave_calib(self, silent=True): file.write('{0}/{1}.fits {2}\n'.format(path.calib, flat_file.index[0], 'IRD_FLAT_FIELD')) file.write('{0}/{1}.fits {2}\n'.format(path.calib, bpm_file.index[0], 'IRD_STATIC_BADPIXELMAP')) file.close() - + args = ['esorex', '--no-checksum=TRUE', '--no-datamd5=TRUE', @@ -1016,15 +1016,15 @@ def sph_ird_wave_calib(self, silent=True): '--ird.wave_calib.number_lines=6', '--ird.wave_calib.outfilename={0}/{1}.fits'.format(path.calib, wav_file), sof] - elif filter_comb == 'S_MR': + elif filter_comb == 'S_MR': # masking of second order spectrum in MRS wave_fname = wave_file.index[0] wave_data, hdr = fits.getdata(path.raw / '{}.fits'.format(wave_fname), header=True) wave_data = wave_data.squeeze() wave_data[:60, :] = 0 - fits.writeto(path.preproc / '{}_masked.fits'.format(wave_fname), wave_data, hdr, overwrite=True, + fits.writeto(path.preproc / '{}_masked.fits'.format(wave_fname), wave_data, hdr, overwrite=True, output_verify='silentfix') - + # create sof using the masked file sof = path.sof / 'wave.sof' file = open(sof, 'w') @@ -1049,7 +1049,7 @@ def sph_ird_wave_calib(self, silent=True): if shutil.which('esorex') is None: raise NameError('esorex does not appear to be in your PATH. Please make sure ' + 'that the ESO pipeline is properly installed before running VLTPF.') - + # execute esorex if silent: proc = subprocess.run(args, cwd=path.tmp, stdout=subprocess.DEVNULL) @@ -1070,7 +1070,7 @@ def sph_ird_wave_calib(self, silent=True): files_info.loc[wav_file, 'DET SEQ1 DIT'] = wave_file['DET SEQ1 DIT'][0] files_info.loc[wav_file, 'PROCESSED'] = True files_info.loc[wav_file, 'PRO CATG'] = 'IRD_WAVECALIB' - + # save files_info.to_csv(path.preproc / 'files.csv') @@ -1094,7 +1094,7 @@ def sph_ird_preprocess_science(self, The pre-processed frames are saved in the preproc sub-directory and will be combined later. - + Parameters ---------- subtract_background : bool @@ -1118,14 +1118,14 @@ def sph_ird_preprocess_science(self, # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_preprocess_science', self.recipe_requirements) - + print('Pre-processing science files') # parameters path = self._path files_info = self._files_info frames_info = self._frames_info - + # clean before we start files = path.preproc.glob('*_DIT???_preproc.fits') for file in files: @@ -1148,18 +1148,18 @@ def sph_ird_preprocess_science(self, bpm[:, :50] = 0 bpm[:, 941:1078] = 0 bpm[:, 1966:] = 0 - - # flat + + # flat flat_file = files_info[files_info['PROCESSED'] & (files_info['PRO CATG'] == 'IRD_FLAT_FIELD') & (files_info['INS COMB IFLT'] == filter_comb)] if len(flat_file) != 1: raise ValueError('There should be exactly 1 flat file. Found {0}.'.format(len(flat_file))) flat = fits.getdata(path.calib / '{}.fits'.format(flat_file.index[0])) - + # final dataframe index = pd.MultiIndex(names=['FILE', 'IMG'], levels=[[], []], codes=[[], []]) frames_info_preproc = pd.DataFrame(index=index, columns=frames_info.columns, dtype='float') - + # loop on the different type of science files sci_types = ['OBJECT,CENTER', 'OBJECT,FLUX', 'OBJECT'] dark_types = ['SKY', 'DARK,BACKGROUND', 'DARK'] @@ -1169,7 +1169,7 @@ def sph_ird_preprocess_science(self, sci_DITs = list(sci_files['DET SEQ1 DIT'].round(2).unique()) if len(sci_files) == 0: - continue + continue for DIT in sci_DITs: sfiles = sci_files[sci_files['DET SEQ1 DIT'].round(2) == DIT] @@ -1207,7 +1207,7 @@ def sph_ird_preprocess_science(self, # read data print(' ==> read data') img, hdr = fits.getdata(path.raw / '{}.fits'.format(fname), header=True) - + # add extra dimension to single images to make cubes if img.ndim == 2: img = img[np.newaxis, ...] @@ -1218,7 +1218,7 @@ def sph_ird_preprocess_science(self, img[:, :, :50] = np.nan img[:, :, 941:1078] = np.nan img[:, :, 1966:] = np.nan - + # collapse if (typ == 'OBJECT,CENTER'): if collapse_center: @@ -1244,7 +1244,7 @@ def sph_ird_preprocess_science(self, frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'none') frames_info_preproc = pd.concat((frames_info_preproc, frames_info_new)) - + # background subtraction if subtract_background: print(' ==> subtract background') @@ -1256,11 +1256,11 @@ def sph_ird_preprocess_science(self, print(' ==> divide by flat field') for f in range(len(img)): img[f] /= flat - + # bad pixels correction if fix_badpix: print(' ==> correct bad pixels') - for f in range(len(img)): + for f in range(len(img)): frame = img[f] frame = imutils.fix_badpix(frame, bpm, npix=12, weight=True) @@ -1279,10 +1279,10 @@ def sph_ird_preprocess_science(self, nimg[f, 0] = img[f, :, 0:1024] nimg[f, 1] = img[f, :, 1024:] img = nimg - + # save DITs individually for f in range(len(img)): - frame = nimg[f, ...].squeeze() + frame = nimg[f, ...].squeeze() hdr['HIERARCH ESO DET NDIT'] = 1 fits.writeto(path.preproc / '{}_DIT{:03d}_preproc.fits'.format(fname, f), frame, hdr, overwrite=True, output_verify='silentfix') @@ -1318,7 +1318,7 @@ def sph_ird_star_center(self, high_pass=False, plot=True): # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_star_center', self.recipe_requirements) - + print('Star centers determination') # parameters @@ -1332,23 +1332,23 @@ def sph_ird_star_center(self, high_pass=False, plot=True): # FIXME: centers should be stored in .ini files and passed to # function when needed (ticket #60) if filter_comb == 'S_LR': - centers = np.array(((484, 496), + centers = np.array(((484, 496), (488, 486))) wave_min = 920 wave_max = 2330 elif filter_comb == 'S_MR': - centers = np.array(((474, 519), + centers = np.array(((474, 519), (479, 509))) wave_min = 940 wave_max = 1820 - + # wavelength map wave_file = files_info[files_info['PROCESSED'] & (files_info['PRO CATG'] == 'IRD_WAVECALIB')] wave_calib = fits.getdata(path.calib / '{}.fits'.format(wave_file.index[0])) wave_lin = get_wavelength_calibration(wave_calib, centers, wave_min, wave_max) - + # start with OBJECT,FLUX - flux_files = frames_info[frames_info['DPR TYPE'] == 'OBJECT,FLUX'] + flux_files = frames_info[frames_info['DPR TYPE'] == 'OBJECT,FLUX'] if len(flux_files) != 0: for file, idx in flux_files.index: print(' ==> OBJECT,FLUX: {0}'.format(file)) @@ -1386,14 +1386,14 @@ def sph_ird_star_center(self, high_pass=False, plot=True): cube_sci, hdr = fits.getdata(path.preproc / '{}.fits'.format(fname2), header=True) else: cube_sci = None - + # centers if plot: save_path = path.products / '{}_spots_fitting.pdf'.format(fname) else: save_path = None spot_centers, spot_dist, img_centers \ - = toolbox.star_centers_from_waffle_lss_cube(cube_cen, cube_sci, wave_lin, centers, pixel, + = toolbox.star_centers_from_waffle_lss_cube(cube_cen, cube_sci, wave_lin, centers, pixel, high_pass=high_pass, save_path=save_path) # save @@ -1426,10 +1426,10 @@ def sph_ird_wavelength_recalibration(self, fit_scaling=True, plot=True): Display and save diagnostic plot for quality check. Default is True ''' - + # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_wavelength_recalibration', self.recipe_requirements) - + print('Wavelength recalibration') # parameters @@ -1443,16 +1443,16 @@ def sph_ird_wavelength_recalibration(self, fit_scaling=True, plot=True): # FIXME: centers should be stored in .ini files and passed to # function when needed (ticket #60) if filter_comb == 'S_LR': - centers = np.array(((484, 496), + centers = np.array(((484, 496), (488, 486))) wave_min = 920 wave_max = 2330 elif filter_comb == 'S_MR': - centers = np.array(((474, 519), + centers = np.array(((474, 519), (479, 509))) wave_min = 940 wave_max = 1820 - + # wavelength map wave_file = files_info[files_info['PROCESSED'] & (files_info['PRO CATG'] == 'IRD_WAVECALIB')] wave_calib = fits.getdata(path.calib / '{}.fits'.format(wave_file.index[0])) @@ -1461,7 +1461,7 @@ def sph_ird_wavelength_recalibration(self, fit_scaling=True, plot=True): # reference wavelength idx_ref = 3 wave_ref = lasers[idx_ref] - + # get spot distance from the first OBJECT,CENTER in the sequence starcen_files = frames_info[frames_info['DPR TYPE'] == 'OBJECT,CENTER'] if len(starcen_files) == 0: @@ -1472,23 +1472,23 @@ def sph_ird_wavelength_recalibration(self, fit_scaling=True, plot=True): fname = '{0}_DIT{1:03d}_preproc_spot_distance'.format(starcen_files.index.values[0][0], starcen_files.index.values[0][1]) spot_dist = fits.getdata(path.preproc / '{}.fits'.format(fname)) - + if plot: pdf = PdfPages(path.products / 'wavelength_recalibration.pdf') - + pix = np.arange(1024) wave_final = np.zeros((1024, 2)) for fidx in range(2): print(' field {0:2d}/{1:2d}'.format(fidx+1, 2)) - + wave = wave_lin[fidx] dist = spot_dist[:, fidx] imin = np.nanargmin(np.abs(wave-wave_ref)) - + # scaling factor scaling_raw = dist / dist[imin] - + if filter_comb == 'S_LR': # FIXME: implement smoothing of the scaling factor for # LRS mode @@ -1497,16 +1497,16 @@ def sph_ird_wavelength_recalibration(self, fit_scaling=True, plot=True): # linear fit with a 5-degree polynomial good = np.where(np.isfinite(wave)) p = np.polyfit(pix[good], scaling_raw[good], 5) - + scaling_fit = np.polyval(p, pix) - + wave_final_raw = wave[imin] * scaling_raw wave_final_fit = wave[imin] * scaling_fit bad = np.where(np.logical_not(np.isfinite(wave))) wave_final_raw[bad] = np.nan wave_final_fit[bad] = np.nan - + wave_diff = np.abs(wave_final_fit - wave) print(' ==> difference with calibrated wavelength: ' + 'min={0:.1f} nm, max={1:.1f} nm'.format(np.nanmin(wave_diff), np.nanmax(wave_diff))) @@ -1519,12 +1519,12 @@ def sph_ird_wavelength_recalibration(self, fit_scaling=True, plot=True): wave_final[:, fidx] = wave_final_raw use_r = ' <==' use_f = '' - + # plot if plot: plt.figure('Wavelength recalibration', figsize=(10, 10)) plt.clf() - + plt.subplot(211) plt.axvline(imin, color='k', linestyle='--') plt.plot(pix, wave, label='DRH', color='r', lw=3) @@ -1534,7 +1534,7 @@ def sph_ird_wavelength_recalibration(self, fit_scaling=True, plot=True): plt.ylabel('Wavelength r[nm]') plt.title('Field #{}'.format(fidx)) plt.xlim(1024, 0) - + plt.subplot(212) plt.axvline(imin, color='k', linestyle='--') plt.plot(pix, wave-wave_final_raw) @@ -1542,9 +1542,9 @@ def sph_ird_wavelength_recalibration(self, fit_scaling=True, plot=True): plt.ylabel('Residuals r[nm]') plt.xlabel('Detector coordinate [pix]') plt.xlim(1024, 0) - + plt.tight_layout() - + pdf.savefig() if plot: @@ -1554,7 +1554,7 @@ def sph_ird_wavelength_recalibration(self, fit_scaling=True, plot=True): print(' * saving') fits.writeto(path.preproc / 'wavelength_final.fits', wave_final, overwrite=True) - + # update recipe execution self._recipe_execution['sph_ird_wavelength_recalibration'] = True @@ -1565,18 +1565,18 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m All types of data are combined independently: PSFs (OBJECT,FLUX), star centers (OBJECT,CENTER) and standard - coronagraphic images (OBJECT). + coronagraphic images (OBJECT). Depending on the observing strategy, there can be several position angle positions in the sequence. Images taken at - different position angles can be either kept together or - split into different cubes. In either case a posang vector + different position angles can be either kept together or + split into different cubes. In either case a posang vector is saved alongside the science cube(s). For each type of data, the method saves 3 different files: - + - *_cube: the (x,y,time) cube - + - *_posang: the position angle vector. - *_frames: a csv file with all the information for every @@ -1584,7 +1584,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m data cube. Data are save separately for each field. - + Parameters ---------- cpix : bool @@ -1597,7 +1597,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m Size of the PSF images along in the spatial dimension. Default is 80x pixels - science_dim : even int + science_dim : even int Size of the science images (star centers and standard coronagraphic images) in the spatial dimension. Default is 800 pixels @@ -1608,7 +1608,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m wavelength. Default is True. split_posang : bool - Save data taken at different position angles in separate + Save data taken at different position angles in separate science files. Default is True manual_center : array @@ -1624,16 +1624,16 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m shifting them by an integer number of pixel to bring the center of the data close to the center of the images. This option is useful if fine centering must be done afterwards. - + shift_method : str Method to shifting the images: fft or interp. Default is fft ''' - + # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_combine_data', self.recipe_requirements) - + print('Combine science data') # parameters @@ -1646,16 +1646,16 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m # FIXME: centers should be stored in .ini files and passed to # function when needed (ticket #60) if filter_comb == 'S_LR': - centers = np.array(((484, 496), + centers = np.array(((484, 496), (488, 486))) wave_min = 920 wave_max = 2330 elif filter_comb == 'S_MR': - centers = np.array(((474, 519), + centers = np.array(((474, 519), (479, 509))) wave_min = 940 wave_max = 1820 - + # wavelength solution: make sure we have the same number of # wave points in each field wave = fits.getdata(path.preproc / 'wavelength_final.fits') @@ -1663,17 +1663,17 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m iwave0 = np.where(mask[:, 0])[0] iwave1 = np.where(mask[:, 1])[0] nwave = np.min([iwave0.size, iwave1.size]) - - iwave = np.empty((nwave, 2), dtype=np.int) + + iwave = np.empty((nwave, 2), dtype=np.int) iwave[:, 0] = iwave0[:nwave] iwave[:, 1] = iwave1[:nwave] final_wave = np.empty((nwave, 2)) final_wave[:, 0] = wave[iwave[:, 0], 0] final_wave[:, 1] = wave[iwave[:, 1], 1] - + fits.writeto(path.products / 'wavelength.fits', final_wave.squeeze().T, overwrite=True) - + # max images size if psf_dim > 1024: print('Warning: psf_dim cannot be larger than 1024 pix. A value of 1024 will be used.') @@ -1693,12 +1693,12 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m manual_center = np.array(manual_center) if manual_center.shape != (2,): raise ValueError('manual_center does not have the right number of dimensions.') - + print('Warning: images will be centered at the user-provided values.') if correct_mrs_chromatism and (filter_comb == 'S_MR'): print('Warning: fine centering will be done anyway to correct for MRS chromatism') - + # # OBJECT,FLUX # @@ -1725,12 +1725,12 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m fname = '{0}_DIT{1:03d}_preproc'.format(file, idx) cube = fits.getdata(path.preproc / '{}.fits'.format(fname)) centers = fits.getdata(path.preproc / '{}_centers.fits'.format(fname)) - + # DIT, angles, etc DIT = frames_info.loc[(file, idx), 'DET SEQ1 DIT'] psf_posang[file_idx] = frames_info.loc[(file, idx), 'INS4 DROT2 POSANG'] + 90 - # center + # center for field_idx, img in enumerate(cube): # wavelength solution for this field ciwave = iwave[:, field_idx] @@ -1739,26 +1739,26 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m img = img.astype(np.float) for wave_idx, widx in enumerate(ciwave): cx = centers[widx, field_idx] - + line = img[widx, :] nimg = imutils.shift(line, cc-cx, method=shift_method) nimg = nimg / DIT - + psf_cube[field_idx, file_idx, wave_idx] = nimg[:psf_dim] else: if skip_center: cx = centers_default[field_idx] else: cx = centers[ciwave, field_idx].mean() - + img = img.astype(np.float) nimg = imutils.shift(img, (cc-cx, 0), method=shift_method) nimg = nimg / DIT psf_cube[field_idx, file_idx] = nimg[ciwave, :psf_dim] - + # neutral density - cwave = final_wave[:, field_idx] + cwave = final_wave[:, field_idx] ND = frames_info.loc[(file, idx), 'INS4 FILT2 NAME'] w, attenuation = transmission.transmission_nd(ND, wave=cwave) psf_cube[field_idx, file_idx] = (psf_cube[field_idx, file_idx].T / attenuation).T @@ -1767,7 +1767,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m pas = np.unique(psf_posang) for pa in pas: ii = np.where(psf_posang == pa)[0] - + # save metadata flux_files[(flux_files['INS4 DROT2 POSANG'] + 90) == pa].to_csv(path.products / 'psf_posang={:06.2f}_frames.csv'.format(pa)) fits.writeto(path.products / 'psf_posang={:06.2f}_posang.fits'.format(pa), psf_posang[ii], overwrite=True) @@ -1818,8 +1818,8 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m DIT = frames_info.loc[(file, idx), 'DET SEQ1 DIT'] cen_posang[file_idx] = frames_info.loc[(file, idx), 'INS4 DROT2 POSANG'] + 90 - # center - for field_idx, img in enumerate(cube): + # center + for field_idx, img in enumerate(cube): # wavelength solution for this field ciwave = iwave[:, field_idx] @@ -1827,35 +1827,35 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m img = img.astype(np.float) for wave_idx, widx in enumerate(ciwave): cx = centers[widx, field_idx] - + line = img[widx, :] nimg = imutils.shift(line, cc-cx, method=shift_method) nimg = nimg / DIT - + cen_cube[field_idx, file_idx, wave_idx] = nimg[:science_dim] else: if skip_center: cx = centers_default[field_idx] else: cx = centers[ciwave, field_idx].mean() - + img = img.astype(np.float) nimg = imutils.shift(img, (cc-cx, 0), method=shift_method) nimg = nimg / DIT cen_cube[field_idx, file_idx] = nimg[ciwave, :science_dim] - + # neutral density - cwave = final_wave[:, field_idx] + cwave = final_wave[:, field_idx] ND = frames_info.loc[(file, idx), 'INS4 FILT2 NAME'] w, attenuation = transmission.transmission_nd(ND, wave=cwave) cen_cube[field_idx, file_idx] = (cen_cube[field_idx, file_idx].T / attenuation).T - + if split_posang: pas = np.unique(cen_posang) for pa in pas: ii = np.where(cen_posang == pa)[0] - + # save metadata starcen_files[(starcen_files['INS4 DROT2 POSANG'] + 90) == pa].to_csv(path.products / 'starcenter_posang={:06.2f}_frames.csv'.format(pa)) fits.writeto(path.products / 'starcenter_posang={:06.2f}_posang.fits'.format(pa), cen_posang[ii], overwrite=True) @@ -1903,7 +1903,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m else: fname = '{0}_DIT{1:03d}_preproc_centers.fits'.format(starcen_files.index.values[0][0], starcen_files.index.values[0][1]) centers = fits.getdata(path.preproc / fname) - + # final center if cpix: cc = science_dim // 2 @@ -1922,8 +1922,8 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m DIT = frames_info.loc[(file, idx), 'DET SEQ1 DIT'] sci_posang[file_idx] = frames_info.loc[(file, idx), 'INS4 DROT2 POSANG'] + 90 - # center - for field_idx, img in enumerate(cube): + # center + for field_idx, img in enumerate(cube): # wavelength solution for this field ciwave = iwave[:, field_idx] @@ -1931,35 +1931,35 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m img = img.astype(np.float) for wave_idx, widx in enumerate(ciwave): cx = centers[widx, field_idx] - + line = img[widx, :] nimg = imutils.shift(line, cc-cx, method=shift_method) nimg = nimg / DIT - + sci_cube[field_idx, file_idx, wave_idx] = nimg[:science_dim] else: if skip_center: cx = centers_default[field_idx] else: cx = centers[ciwave, field_idx].mean() - + img = img.astype(np.float) nimg = imutils.shift(img, (cc-cx, 0), method=shift_method) nimg = nimg / DIT sci_cube[field_idx, file_idx] = nimg[ciwave, :science_dim] - + # neutral density - cwave = final_wave[:, field_idx] + cwave = final_wave[:, field_idx] ND = frames_info.loc[(file, idx), 'INS4 FILT2 NAME'] w, attenuation = transmission.transmission_nd(ND, wave=cwave) sci_cube[field_idx, file_idx] = (sci_cube[field_idx, file_idx].T / attenuation).T - + if split_posang: pas = np.unique(sci_posang) for pa in pas: ii = np.where(sci_posang == pa)[0] - + # save metadata object_files[(object_files['INS4 DROT2 POSANG'] + 90) == pa].to_csv(path.products / 'science_posang={:06.2f}_frames.csv'.format(pa)) fits.writeto(path.products / 'science_posang={:06.2f}_posang.fits'.format(pa), sci_posang[ii], overwrite=True) @@ -1977,12 +1977,12 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m # delete big cubes del sci_cube - print() + print() # update recipe execution self._recipe_execution['sph_ird_combine_data'] = True - + def sph_ird_clean(self, delete_raw=False, delete_products=False): ''' Clean everything except for raw data and science products (by default) @@ -1998,7 +1998,7 @@ def sph_ird_clean(self, delete_raw=False, delete_products=False): # parameters path = self._path - + # tmp if path.tmp.exists(): shutil.rmtree(path.tmp, ignore_errors=True) diff --git a/vltpf/toolbox.py b/vltpf/toolbox.py index b9c0f4f..1e4b222 100644 --- a/vltpf/toolbox.py +++ b/vltpf/toolbox.py @@ -30,7 +30,7 @@ def check_recipe_execution(recipe_execution, recipe_name, recipe_requirements): recipe_requirements : dict Dictionary providing the recipe requirements - + Returns ------- execute_recipe : bool @@ -52,7 +52,7 @@ def check_recipe_execution(recipe_execution, recipe_name, recipe_requirements): return execute_recipe - + def parallatic_angle(ha, dec, geolat): ''' Parallactic angle of a source in degrees @@ -78,7 +78,7 @@ def parallatic_angle(ha, dec, geolat): if (dec >= geolat): pa[ha < 0] += 360*units.degree - + return np.degrees(pa) @@ -111,13 +111,13 @@ def compute_times(frames_info): utc = Time(ts_start.astype(str), scale='utc', location=(geolon, geolat, geoelev)) mjd_start = utc.mjd - + utc = Time(ts.astype(str), scale='utc', location=(geolon, geolat, geoelev)) mjd = utc.mjd utc = Time(ts_end.astype(str), scale='utc', location=(geolon, geolat, geoelev)) mjd_end = utc.mjd - + # update frames_info frames_info['TIME START'] = ts_start frames_info['TIME'] = ts @@ -147,7 +147,7 @@ def compute_angles(frames_info): pa_correction = np.degrees(np.arctan(np.tan(np.radians(alt-2.*drot2)))) else: pa_correction = 0 - + # RA/DEC ra_drot = frames_info['INS4 DROT2 RA'].values.astype(np.float) ra_drot_h = np.floor(ra_drot/1e4) @@ -166,24 +166,24 @@ def compute_angles(frames_info): dec_drot_d *= sign dec = coord.Angle((dec_drot_d, dec_drot_m, dec_drot_s), units.degree) frames_info['DEC'] = dec - + # calculate parallactic angles geolon = coord.Angle(frames_info['TEL GEOLON'].values[0], units.degree) geolat = coord.Angle(frames_info['TEL GEOLAT'].values[0], units.degree) geoelev = frames_info['TEL GEOELEV'].values[0] - + utc = Time(frames_info['TIME START'].values.astype(str), scale='utc', location=(geolon, geolat, geoelev)) lst = utc.sidereal_time('apparent') ha = lst - ra_hour - pa = parallatic_angle(ha, dec[0], geolat) + pa = parallatic_angle(ha, dec[0], geolat) frames_info['PARANG START'] = pa.value + pa_correction frames_info['HOUR ANGLE START'] = ha frames_info['LST START'] = lst - + utc = Time(frames_info['TIME'].values.astype(str), scale='utc', location=(geolon, geolat, geoelev)) lst = utc.sidereal_time('apparent') ha = lst - ra_hour - pa = parallatic_angle(ha, dec[0], geolat) + pa = parallatic_angle(ha, dec[0], geolat) frames_info['PARANG'] = pa.value + pa_correction frames_info['HOUR ANGLE'] = ha frames_info['LST'] = lst @@ -191,7 +191,7 @@ def compute_angles(frames_info): utc = Time(frames_info['TIME END'].values.astype(str), scale='utc', location=(geolon, geolat, geoelev)) lst = utc.sidereal_time('apparent') ha = lst - ra_hour - pa = parallatic_angle(ha, dec[0], geolat) + pa = parallatic_angle(ha, dec[0], geolat) frames_info['PARANG END'] = pa.value + pa_correction frames_info['HOUR ANGLE END'] = ha frames_info['LST END'] = lst @@ -203,7 +203,7 @@ def compute_angles(frames_info): # PUP_OFFSET = -135.99 ± 0.11 # INSTRUMENT_OFFSET # IFS = +100.48 ± 0.10 - # IRD = 0.00 ± 0.00 + # IRD = 0.00 ± 0.00 # instru = frames_info['SEQ ARM'].unique() if len(instru) != 1: @@ -214,7 +214,7 @@ def compute_angles(frames_info): instru_offset = 0.0 else: raise ValueError('Unkown instrument {0}'.format(instru)) - + drot_mode = frames_info['INS4 DROT2 MODE'].unique() if len(drot_mode) != 1: raise ValueError('Derotator mode has several values in the sequence') @@ -231,7 +231,7 @@ def compute_angles(frames_info): # final derotation value frames_info['DEROT ANGLE'] = frames_info['PARANG'] + pupoff - + def compute_bad_pixel_map(bpm_files, dtype=np.uint8): ''' @@ -244,7 +244,7 @@ def compute_bad_pixel_map(bpm_files, dtype=np.uint8): dtype : data type Data type for the final bpm - + Returns bpm : array_like Combined bad pixel map @@ -253,13 +253,13 @@ def compute_bad_pixel_map(bpm_files, dtype=np.uint8): # check that we have files if len(bpm_files) == 0: raise ValueError('No bad pixel map files provided') - + # get shape shape = fits.getdata(bpm_files[0]).shape - + # star with empty bpm bpm = np.zeros((shape[-2], shape[-1]), dtype=np.uint8) - + # fill if files are provided for f in bpm_files: data = fits.getdata(f) @@ -281,7 +281,7 @@ def collapse_frames_info(finfo, fname, collapse_type, coadd_value=2): fname : str The name of the current file - + collapse_type : str Type of collapse. Possible values are mean or coadd. Default is mean. @@ -295,9 +295,9 @@ def collapse_frames_info(finfo, fname, collapse_type, coadd_value=2): nfinfo : dataframe Collapsed data frame ''' - + print(' ==> collapse frames information') - + nfinfo = None if collapse_type == 'none': nfinfo = finfo @@ -311,14 +311,14 @@ def collapse_frames_info(finfo, fname, collapse_type, coadd_value=2): # copy data nfinfo.loc[(fname, 0)] = finfo.loc[(fname, imin)] - + # update time values nfinfo.loc[(fname, 0), 'DET NDIT'] = 1 nfinfo.loc[(fname, 0), 'TIME START'] = finfo.loc[(fname, imin), 'TIME START'] nfinfo.loc[(fname, 0), 'TIME END'] = finfo.loc[(fname, imax), 'TIME END'] nfinfo.loc[(fname, 0), 'TIME'] = finfo.loc[(fname, imin), 'TIME START'] + \ (finfo.loc[(fname, imax), 'TIME END'] - finfo.loc[(fname, imin), 'TIME START']) / 2 - + # recompute angles compute_angles(nfinfo) elif collapse_type == 'coadd': @@ -347,10 +347,10 @@ def collapse_frames_info(finfo, fname, collapse_type, coadd_value=2): # recompute angles compute_angles(nfinfo) else: - raise ValueError('Unknown collapse type {0}'.format(collapse_type)) - + raise ValueError('Unknown collapse type {0}'.format(collapse_type)) + return nfinfo - + def lines_intersect(a1, a2, b1, b2): ''' @@ -358,16 +358,16 @@ def lines_intersect(a1, a2, b1, b2): (a1,a2) and (b1,b2). See https://stackoverflow.com/questions/3252194/numpy-and-line-intersections - + Parameters ---------- - + a, b : 2D tuples Coordinates of points on line 1 - + c, d : 2D tuples Coordinates of points on line 2 - + Returns ------- val @@ -379,14 +379,14 @@ def lines_intersect(a1, a2, b1, b2): a2 = np.array(a2) b1 = np.array(b1) b2 = np.array(b2) - + # test lines da = a2 - a1 # vector from A1 to A2 db = b2 - b1 # vector from B1 to B2 dp = a1 - b1 pda = [-da[1], da[0]] # perpendicular to A1-A2 vector - # parallel lines + # parallel lines if (pda*db).sum() == 0: return None @@ -411,29 +411,29 @@ def star_centers_from_PSF_img_cube(cube, wave, pixel, save_path=None): pixel : float Pixel scale, in mas/pixel - + save_path : str Path where to save the fit images. Default is None, which means that the plot is not produced - + Returns ------- img_centers : array_like The star center in each frame of the cube ''' - + # standard parameters nwave = wave.size loD = wave*1e-9/8 * 180/np.pi * 3600*1000/pixel box = 30 - + # spot fitting xx, yy = np.meshgrid(np.arange(2*box), np.arange(2*box)) # multi-page PDF to save result if save_path is not None: pdf = PdfPages(save_path) - + # loop over images img_centers = np.zeros((nwave, 2)) for idx, (wave, img) in enumerate(zip(wave, cube)): @@ -441,7 +441,7 @@ def star_centers_from_PSF_img_cube(cube, wave, pixel, save_path=None): # remove any NaN img = np.nan_to_num(img) - + # center guess cy, cx = np.unravel_index(np.argmax(img), img.shape) @@ -461,12 +461,12 @@ def star_centers_from_PSF_img_cube(cube, wave, pixel, save_path=None): img_centers[idx, 0] = cx_final img_centers[idx, 1] = cy_final - + if save_path: plt.figure('PSF center - imaging', figsize=(8, 8)) plt.clf() - - plt.subplot(111) + + plt.subplot(111) plt.imshow(img/img.max(), aspect='equal', vmin=1e-6, vmax=1, norm=colors.LogNorm(), interpolation='nearest') plt.plot([cx_final], [cy_final], marker='D', color='red') plt.gca().add_patch(patches.Rectangle((cx-box, cy-box), 2*box, 2*box, ec='white', fc='none')) @@ -475,7 +475,7 @@ def star_centers_from_PSF_img_cube(cube, wave, pixel, save_path=None): ext = 1000 / pixel plt.xlim(cx_final-ext, cx_final+ext) plt.ylim(cy_final-ext, cy_final+ext) - + plt.tight_layout() pdf.savefig() @@ -500,7 +500,7 @@ def star_centers_from_PSF_lss_cube(cube, wave_cube, pixel, save_path=None): pixel : float Pixel scale, in mas/pixel - + save_path : str Path where to save the fit images. Default is None, which means that the plot is not produced @@ -510,7 +510,7 @@ def star_centers_from_PSF_lss_cube(cube, wave_cube, pixel, save_path=None): psf_centers : array_like The star center in each frame and wavelength of the cube ''' - + # standard parameters box = 20 @@ -518,24 +518,24 @@ def star_centers_from_PSF_lss_cube(cube, wave_cube, pixel, save_path=None): if save_path: plt.figure('PSF center - spectro', figsize=(7, 12)) plt.clf() - + # loop over fiels and wavelengths nimg = len(cube) psf_centers = np.full((1024, nimg), np.nan) for fidx, img in enumerate(cube): print(' field {0:2d}/{1:2d}'.format(fidx+1, nimg)) - + # remove any NaN img = np.nan_to_num(cube[fidx]) - + # approximate center prof = np.sum(img, axis=0) cx_int = np.int(np.argmax(prof)) - + # sub-image sub = img[:, cx_int-box:cx_int+box] xx = np.arange(2*box) - + # wavelengths for this field wave = wave_cube[fidx] @@ -543,29 +543,29 @@ def star_centers_from_PSF_lss_cube(cube, wave_cube, pixel, save_path=None): for widx in good: # lambda/D loD = wave[widx]*1e-9/8 * 180/np.pi * 3600*1000/pixel - + # current profile prof = sub[widx, :] - + # gaussian fit imax = np.argmax(prof) - + g_init = models.Gaussian1D(amplitude=prof.max(), mean=imax, stddev=loD) + \ models.Const1D(amplitude=0) - + fit_g = fitting.LevMarLSQFitter() par = fit_g(g_init, xx, prof) - + cx = par[0].mean.value - box + cx_int - + psf_centers[widx, fidx] = cx - + if save_path: plt.subplot(1, 2, fidx+1) plt.imshow(img/img.max(), aspect='equal', vmin=1e-3, vmax=1, norm=colors.LogNorm(), interpolation='nearest') plt.plot(psf_centers[:, fidx], range(1024), marker='.', color='r', linestyle='none', ms=2, alpha=0.5) - + plt.title(r'Field #{0}'.format(fidx+1)) ext = 1000 / pixel @@ -596,22 +596,22 @@ def star_centers_from_waffle_img_cube(cube, wave, instrument, waffle_orientation instrument : str Instrument, IFS or IRDIS - + waffle_orientation : str String giving the waffle orientation '+' or 'x' - high_pass : bool + high_pass : bool Apply high-pass filter to the image before searching for the satelitte spots. Default is False - smooth : int + smooth : int Apply a gaussian smoothing to the images to reduce noise. The value is the sigma of the gaussian in pixel. Default is no smoothing - + center_offset : tuple - Apply an (x,y) offset to the default center position. The offset - will move the search box of the waffle spots by the amount of + Apply an (x,y) offset to the default center position. The offset + will move the search box of the waffle spots by the amount of specified pixels in each direction. Default is no offset coro : bool @@ -633,7 +633,7 @@ def star_centers_from_waffle_img_cube(cube, wave, instrument, waffle_orientation The star center in each frame of the cube ''' - + # instrument # FIXME: pixel size should be stored in .ini files and passed to # function when needed (ticket #60) @@ -645,12 +645,12 @@ def star_centers_from_waffle_img_cube(cube, wave, instrument, waffle_orientation offset = 0 else: raise ValueError('Unknown instrument {0}'.format(instrument)) - + # standard parameters dim = cube.shape[-1] nwave = wave.size loD = wave*1e-9/8 * 180/np.pi * 3600*1000/pixel - + # waffle parameters freq = 10 * np.sqrt(2) * 0.97 box = 8 @@ -672,9 +672,9 @@ def star_centers_from_waffle_img_cube(cube, wave, instrument, waffle_orientation if instrument == 'IFS': center_guess = np.full((nwave, 2), ((dim // 2)+3, (dim // 2)-1)) elif instrument == 'IRDIS': - center_guess = np.array(((485, 520), + center_guess = np.array(((485, 520), (486, 508))) - + # loop over images spot_centers = np.zeros((nwave, 4, 2)) spot_dist = np.zeros((nwave, 6)) @@ -684,7 +684,7 @@ def star_centers_from_waffle_img_cube(cube, wave, instrument, waffle_orientation # remove any NaN img = np.nan_to_num(img) - + # center guess (+offset) cx_int = int(center_guess[idx, 0]) + center_offset[0] cy_int = int(center_guess[idx, 1]) + center_offset[1] @@ -702,17 +702,17 @@ def star_centers_from_waffle_img_cube(cube, wave, instrument, waffle_orientation mask = aperture.disc(cube[0].shape[-1], 5*loD[idx], diameter=False, center=(cx_int, cy_int), invert=True) img *= mask - + # create plot if needed if save_path: fig = plt.figure('Waffle center - imaging', figsize=(8, 8)) plt.clf() - + col = ['red', 'blue', 'magenta', 'purple'] ax = fig.add_subplot(111) ax.imshow(img/img.max(), aspect='equal', vmin=1e-2, vmax=1, norm=colors.LogNorm(), interpolation='nearest') ax.set_title(r'Image #{0} - {1:.0f} nm'.format(idx+1, wave)) - + # satelitte spots for s in range(4): cx = int(cx_int + freq*loD[idx] * np.cos(orient + np.pi/2*s)) @@ -721,14 +721,14 @@ def star_centers_from_waffle_img_cube(cube, wave, instrument, waffle_orientation sub = img[cy-box:cy+box, cx-box:cx+box] # bounds for fitting: spots slightly outside of the box are allowed - gbounds = { + gbounds = { 'amplitude': (0.0, None), 'x_mean': (-2.0, box*2+2), 'y_mean': (-2.0, box*2+2), 'x_stddev': (1.0, 20.0), 'y_stddev': (1.0, 20.0) } - + # fit: Gaussian + constant imax = np.unravel_index(np.argmax(sub), sub.shape) g_init = models.Gaussian2D(amplitude=sub.max(), x_mean=imax[1], y_mean=imax[0], @@ -748,7 +748,7 @@ def star_centers_from_waffle_img_cube(cube, wave, instrument, waffle_orientation if save_path: ax.plot([cx_final], [cy_final], marker='D', color=col[s]) ax.add_patch(patches.Rectangle((cx-box, cy-box), 2*box, 2*box, ec='white', fc='none')) - + axs = fig.add_axes((0.17+s*0.2, 0.17, 0.1, 0.1)) axs.imshow(sub, aspect='equal', vmin=0, vmax=sub.max(), interpolation='nearest') axs.plot([par[0].x_mean], [par[0].y_mean], marker='D', color=col[s]) @@ -764,7 +764,7 @@ def star_centers_from_waffle_img_cube(cube, wave, instrument, waffle_orientation intersect = lines_intersect(spot_centers[idx, 0, :], spot_centers[idx, 2, :], spot_centers[idx, 1, :], spot_centers[idx, 3, :]) img_centers[idx] = intersect - + # scaling spot_dist[idx, 0] = np.sqrt(np.sum((spot_centers[idx, 0, :] - spot_centers[idx, 2, :])**2)) spot_dist[idx, 1] = np.sqrt(np.sum((spot_centers[idx, 1, :] - spot_centers[idx, 3, :])**2)) @@ -787,7 +787,7 @@ def star_centers_from_waffle_img_cube(cube, wave, instrument, waffle_orientation ext = 1000 / pixel ax.set_xlim(intersect[0]-ext, intersect[0]+ext) ax.set_ylim(intersect[1]-ext, intersect[1]+ext) - + plt.tight_layout() if save_path: @@ -820,15 +820,15 @@ def star_centers_from_waffle_lss_cube(cube_cen, cube_sci, wave_cube, centers, pi pixel : float Pixel scale, in mas/pixel - - high_pass : bool + + high_pass : bool Apply high-pass filter to the image before searching for the satelitte spots. Default is False save_path : str Path where to save the fit images. Default is None, which means that the plot is not produced - + Returns ------- spot_centers : array_like @@ -843,7 +843,7 @@ def star_centers_from_waffle_lss_cube(cube_cen, cube_sci, wave_cube, centers, pi # standard parameters box = 120 - + # loop over fiels and wavelengths nimg = len(cube_cen) @@ -851,29 +851,29 @@ def star_centers_from_waffle_lss_cube(cube_cen, cube_sci, wave_cube, centers, pi if save_path: plt.figure('Waffle centering - spectro', figsize=(7, 12)) plt.clf() - + # subtract science cube if provided if cube_sci is not None: print(' ==> subtract science cube') cube_cen -= cube_sci - + spot_centers = np.full((1024, 2, 2), np.nan) spot_dist = np.full((1024, nimg), np.nan) img_centers = np.full((1024, nimg), np.nan) for fidx, img in enumerate(cube_cen): print(' field {0:2d}/{1:2d}'.format(fidx+1, nimg)) - + # remove any NaN img = np.nan_to_num(cube_cen[fidx]) - + if high_pass: img = img - ndimage.median_filter(img, 15, mode='mirror') - + # sub-image cx_int = centers[fidx, 0] sub = img[:, cx_int-box:cx_int+box] xx = np.arange(2*box) - + # wavelengths for this field wave = wave_cube[fidx] @@ -881,41 +881,41 @@ def star_centers_from_waffle_lss_cube(cube_cen, cube_sci, wave_cube, centers, pi for widx in good: # lambda/D loD = wave[widx]*1e-9/8 * 180/np.pi * 3600*1000/pixel - + # first waffle prof = sub[widx] * (xx < box).astype(np.int) imax = np.argmax(prof) g_init = models.Gaussian1D(amplitude=prof.max(), mean=imax, stddev=loD) + \ - models.Const1D(amplitude=0) + models.Const1D(amplitude=0) fit_g = fitting.LevMarLSQFitter() par = fit_g(g_init, xx, prof) - + c0 = par[0].mean.value - box + cx_int - + # second waffle prof = sub[widx] * (xx > box).astype(np.int) imax = np.argmax(prof) g_init = models.Gaussian1D(amplitude=prof.max(), mean=imax, stddev=loD) + \ - models.Const1D(amplitude=0) + models.Const1D(amplitude=0) fit_g = fitting.LevMarLSQFitter() par = fit_g(g_init, xx, prof) - + c1 = par[0].mean.value - box + cx_int spot_centers[widx, fidx, 0] = c0 spot_centers[widx, fidx, 1] = c1 - + spot_dist[widx, fidx] = np.abs(c1-c0) - + img_centers[widx, fidx] = (c0 + c1) / 2 - + if save_path: plt.subplot(1, 2, fidx+1) plt.imshow(img/img.max(), aspect='equal', vmin=-1e-2, vmax=1e-2, interpolation='nearest') plt.plot(spot_centers[:, fidx, 0], range(1024), marker='.', color='r', linestyle='none', ms=2, alpha=1) plt.plot(spot_centers[:, fidx, 1], range(1024), marker='.', color='r', linestyle='none', ms=2, alpha=1) plt.plot(img_centers[:, fidx], range(1024), marker='.', color='r', linestyle='none', ms=2, alpha=1) - + plt.title(r'Field #{0}'.format(fidx+1)) ext = 1000 / pixel @@ -927,4 +927,3 @@ def star_centers_from_waffle_lss_cube(cube_cen, cube_sci, wave_cube, centers, pi plt.savefig(save_path) return spot_centers, spot_dist, img_centers - diff --git a/vltpf/transmission.py b/vltpf/transmission.py index 9923d5b..a941206 100644 --- a/vltpf/transmission.py +++ b/vltpf/transmission.py @@ -47,7 +47,7 @@ 'DP_0_NB_ContK1': {'CFW': 'N_ContK1', 'DFW': 'P0-90', 'Wavelength': (2091, 2091), 'Bandwidth': (34, 34)}, 'DP_0_NB_ContK2': {'CFW': 'N_ContK2', 'DFW': 'P0-90', 'Wavelength': (2266, 2266), 'Bandwidth': (32, 32)}, 'DP_0_NB_FeII': {'CFW': 'N_FeII', 'DFW': 'P0-90', 'Wavelength': (1642, 1642), 'Bandwidth': (24, 24)}, - 'DP_0_NB_H2': {'CFW': 'N_H2', 'DFW': 'P0-90', 'Wavelength': (2124, 2124), 'Bandwidth': (31, 31)}, + 'DP_0_NB_H2': {'CFW': 'N_H2', 'DFW': 'P0-90', 'Wavelength': (2124, 2124), 'Bandwidth': (31, 31)}, 'DP_0_NB_HeI': {'CFW': 'N_HeI', 'DFW': 'P0-90', 'Wavelength': (1085, 1085), 'Bandwidth': (14, 14)}, 'DP_0_NB_PaB': {'CFW': 'N_PaB', 'DFW': 'P0-90', 'Wavelength': (1283, 1283), 'Bandwidth': (18, 18)}, @@ -63,7 +63,7 @@ 'DP_45_NB_ContK1': {'CFW': 'N_ContK1', 'DFW': 'P45-135', 'Wavelength': (2091, 2091), 'Bandwidth': (34, 34)}, 'DP_45_NB_ContK2': {'CFW': 'N_ContK2', 'DFW': 'P45-135', 'Wavelength': (2266, 2266), 'Bandwidth': (32, 32)}, 'DP_45_NB_FeII': {'CFW': 'N_FeII', 'DFW': 'P45-135', 'Wavelength': (1642, 1642), 'Bandwidth': (24, 24)}, - 'DP_45_NB_H2': {'CFW': 'N_H2', 'DFW': 'P45-135', 'Wavelength': (2124, 2124), 'Bandwidth': (31, 31)}, + 'DP_45_NB_H2': {'CFW': 'N_H2', 'DFW': 'P45-135', 'Wavelength': (2124, 2124), 'Bandwidth': (31, 31)}, 'DP_45_NB_HeI': {'CFW': 'N_HeI', 'DFW': 'P45-135', 'Wavelength': (1085, 1085), 'Bandwidth': (14, 14)}, 'DP_45_NB_PaB': {'CFW': 'N_PaB', 'DFW': 'P45-135', 'Wavelength': (1283, 1283), 'Bandwidth': (18, 18)} } @@ -84,10 +84,10 @@ def _reinterpolate(tr, wave, new_wave): wave : array_like Wavelengths vector, in nanometers - + new_wave : array_like New wavelengths vector, in nanometers - + Returns ------- tr_regular : array_like @@ -106,7 +106,7 @@ def _load(type, name): Parameters ---------- - type : str + type : str Type of transmission curve to read. Possible values are 'ndf', 'cfw', 'dfw' and 'ird_ndf'. @@ -122,7 +122,7 @@ def _load(type, name): if type == 'ndf': # transmission for NDF - + # find file package_directory = os.path.dirname(os.path.abspath(__file__)) filter_file = os.path.join(package_directory, 'data', 'SPHERE_CPI_ND.txt') @@ -139,7 +139,7 @@ def _load(type, name): return transmissions[name] elif type == 'cfw': # transmission for CFW - + # find file package_directory = os.path.dirname(os.path.abspath(__file__)) filter_file = os.path.join(package_directory, 'data', 'SPHERE_IRDIS_{0}.txt'.format(name)) @@ -150,28 +150,28 @@ def _load(type, name): # save for later calls transmissions[name] = _reinterpolate(cfw_tr[1], cfw_tr[0], wave_grid) - return transmissions[name] + return transmissions[name] elif type == 'ird_ndf': # transmission for IRDIS ND - + # find file package_directory = os.path.dirname(os.path.abspath(__file__)) filter_file = os.path.join(package_directory, 'data', 'SPHERE_IRDIS_ND.txt') - + # load data ird_ndf_tr = np.loadtxt(filter_file, unpack=False).T - + # save for later calls transmissions['IRD-ND'] = _reinterpolate(ird_ndf_tr[1], ird_ndf_tr[0], wave_grid) - + return transmissions['IRD-ND'] elif type == 'dfw': # transmission for DFW - + # find file package_directory = os.path.dirname(os.path.abspath(__file__)) filter_file = os.path.join(package_directory, 'data', 'SPHERE_IRDIS_{0}.txt'.format(name)) - + # load data dfw_tr_tmp = np.loadtxt(filter_file, unpack=False).T @@ -179,21 +179,21 @@ def _load(type, name): dfw_tr[0] = _reinterpolate(dfw_tr_tmp[1], dfw_tr_tmp[0], wave_grid) dfw_tr[1] = _reinterpolate(dfw_tr_tmp[2], dfw_tr_tmp[0], wave_grid) - # save for later calls + # save for later calls transmissions[name] = dfw_tr return transmissions[name] else: raise ValueError('Unknown type {0}'.format(type)) - - + + def irdis_nd(combination, nd_filter): ''' Provides the IRDIS transmission for a given neutral density and filter combination Description - ----------- + ----------- This function works for all the IRDIS broad-band (BB), dual-band (DB) and narrow-band (NB) filters. The names of the filter combinations are provided below. @@ -231,15 +231,15 @@ def irdis_nd(combination, nd_filter): transmission curve of the CPI neutral density filter. The transmission of each filter is read from text files stored on disk. - + Parameters ---------- - combination : str + combination : str Name of the filter combination. This parameter can be read directly from the header of any SPHERE/IRDIS raw file with the keyword 'HIERARCH ESO INS COMB IFLT' - nd_filter : str + nd_filter : str Name of the neutral density filter. This parameter can be read directly from the header of any SPHERE/IRDIS raw file with the keyword 'HIERARCH ESO INS4 FILT2 NAME' @@ -270,12 +270,12 @@ def irdis_nd(combination, nd_filter): ndf_tr = transmissions.get(ndf) if ndf_tr is None: ndf_tr = _load('ndf', ndf) - + # transmissions for CFW cfw_tr = transmissions.get(cfw) if cfw_tr is None: cfw_tr = _load('cfw', cfw) - + # transmission for IRDIS ND if cfw == 'B_ND-H': ird_ndf_tr = transmissions.get('IRD-ND') @@ -283,7 +283,7 @@ def irdis_nd(combination, nd_filter): ird_ndf_tr = _load('ird_ndf', None) else: ird_ndf_tr = np.ones(wave_grid.size) - + # get transmissions for DFW if dfw is not None: dfw_tr = transmissions.get(dfw) @@ -310,16 +310,16 @@ def transmission_nd(nd_filter, wave=None): Provides the transmission for a given neutral density Description - ----------- + ----------- The function provides the transmission curve of a given CPI neutral density filter. The user can provide an array of wavelengths. It works for both IRDIS and IFS, within the range of wavalength covered by SPHERE (0.95-2.3 microns). - + Parameters ---------- - nd_filter : str + nd_filter : str Name of the neutral density filter. This parameter can be read directly from the header of any SPHERE/IRDIS raw file with the keyword 'HIERARCH ESO INS4 FILT2 NAME' @@ -327,13 +327,13 @@ def transmission_nd(nd_filter, wave=None): wave : array_like, optional Wavelengths at which the transmission is needed, in nanometers. Default is None - + Returns ------- wave : array_like Wavelength, in nanometers - - tr : array_like + + tr : array_like Transmission of the neutral density filter ''' @@ -350,10 +350,10 @@ def transmission_nd(nd_filter, wave=None): ndf_tr = _load('ndf', ndf) if wave is None: - wave = wave_grid + wave = wave_grid else: - ndf_tr = _reinterpolate(ndf_tr, wave_grid, wave) - + ndf_tr = _reinterpolate(ndf_tr, wave_grid, wave) + return wave, ndf_tr @@ -362,7 +362,7 @@ def transmission_filter(combination): Provides the IRDIS transmission curve for a given filter combination Description - ----------- + ----------- This function works for all the IRDIS broad-band (BB), dual-band (DB) and narrow-band (NB) filters. The names of the filter combinations are provided below. @@ -395,10 +395,10 @@ def transmission_filter(combination): - NB_Hel - NB_PaB - + Parameters ---------- - combination : str + combination : str Name of the filter combination. This parameter can be read directly from the header of any SPHERE/IRDIS raw file with the keyword 'HIERARCH ESO INS COMB IFLT' @@ -407,7 +407,7 @@ def transmission_filter(combination): ------- wave : array_like Wavelength, in nanometers - + tr_0, tr_1 : array_like The transmissions of the instrument on the left and right IRDIS fields of view @@ -427,7 +427,7 @@ def transmission_filter(combination): cfw_tr = transmissions.get(cfw) if cfw_tr is None: cfw_tr = _load('cfw', cfw) - + # transmission for IRDIS ND if cfw == 'B_ND-H': ird_ndf_tr = transmissions.get('IRD-ND') @@ -435,7 +435,7 @@ def transmission_filter(combination): ird_ndf_tr = _load('ird_ndf', None) else: ird_ndf_tr = np.ones(wave_grid.size) - + # get transmissions for DFW if dfw is not None: dfw_tr = transmissions.get(dfw) @@ -444,7 +444,7 @@ def transmission_filter(combination): else: # if BB or NB mode, just use 1 for DBF dfw_tr = np.ones((2, wave_grid.size)) - + # integrated transmission value tr_0 = cfw_tr * dfw_tr[0] * ird_ndf_tr tr_1 = cfw_tr * dfw_tr[1] * ird_ndf_tr @@ -458,7 +458,7 @@ def wavelength_bandwidth_filter(combination): combination Description - ----------- + ----------- This function works for all the IRDIS broad-band (BB), dual-band (DB) and narrow-band (NB) filters. The names of the filter combinations are provided below. @@ -491,10 +491,10 @@ def wavelength_bandwidth_filter(combination): - NB_Hel - NB_PaB - + Parameters ---------- - combination : str + combination : str Name of the filter combination. This parameter can be read directly from the header of any SPHERE/IRDIS raw file with the keyword 'HIERARCH ESO INS COMB IFLT' @@ -503,17 +503,16 @@ def wavelength_bandwidth_filter(combination): ------- wave : array_like Tuple of central wavelengths, in nanometers - + bandwidth : array_like - Tuple of bandwidth, in nanometers + Tuple of bandwidth, in nanometers ''' setup = combinations.get(combination) if setup is None: raise ValueError('Unknown filter combination {0}'.format(combination)) - + wave = setup['Wavelength'] bandwidth = setup['Bandwidth'] return wave, bandwidth - diff --git a/vltpf/utils/reduction_path.py b/vltpf/utils/reduction_path.py index 58fee33..0696fb8 100644 --- a/vltpf/utils/reduction_path.py +++ b/vltpf/utils/reduction_path.py @@ -11,7 +11,7 @@ class ReductionPath(object): ################################################## # Constructor ################################################## - + def __init__(self, path): self._root = Path(path).expanduser() @@ -29,14 +29,14 @@ def __init__(self, path): ################################################## # Representation ################################################## - + def __repr__(self): return str(self._root) - + ################################################## # Properties ################################################## - + @property def root(self): return self._root @@ -59,23 +59,23 @@ def root(self, path): @property def raw(self): return self._raw - + @property def calib(self): return self._calib - + @property def sof(self): return self._sof - + @property def tmp(self): return self._tmp - + @property def preproc(self): return self._preproc - + @property def products(self): return self._products @@ -83,7 +83,7 @@ def products(self): ################################################## # Methods ################################################## - + def create_subdirectories(self): # create sub-directories if needed if not self._raw.exists(): @@ -103,4 +103,3 @@ def create_subdirectories(self): if not self._products.exists(): self._products.mkdir(exist_ok=True) - From 4df85615e13ef55691ce96cd553c2121743e54ef Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Sun, 25 Aug 2019 13:02:31 +0200 Subject: [PATCH 026/101] Added FIXME comment in preparation for ticket #60 --- vltpf/IFS.py | 3 +++ vltpf/IRDIS/ImagingReduction.py | 1 + vltpf/IRDIS/SpectroReduction.py | 2 ++ 3 files changed, 6 insertions(+) diff --git a/vltpf/IFS.py b/vltpf/IFS.py index 6cffb70..9434ebe 100644 --- a/vltpf/IFS.py +++ b/vltpf/IFS.py @@ -1500,6 +1500,7 @@ def sph_ifs_cal_wave(self, silent=True): # esorex parameters if mode == 'OBS_YJ': + # FIXME: use wave_cal_lasers in config args = ['esorex', '--no-checksum=TRUE', '--no-datamd5=TRUE', @@ -1511,6 +1512,7 @@ def sph_ifs_cal_wave(self, silent=True): '--ifs.wave_calib.outfilename={0}/{1}.fits'.format(path.calib, wav_file), sof] elif mode == 'OBS_H': + # FIXME: use wave_cal_lasers in config args = ['esorex', '--no-checksum=TRUE', '--no-datamd5=TRUE', @@ -2538,6 +2540,7 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a science_dim = 290 # centering + # FIXME: store default center in IFS.ini? centers_default = np.full((nwave, 2), 290//2) if skip_center: print('Warning: images will not be fine centered. They will just be combined.') diff --git a/vltpf/IRDIS/ImagingReduction.py b/vltpf/IRDIS/ImagingReduction.py index 5623b66..b85142b 100644 --- a/vltpf/IRDIS/ImagingReduction.py +++ b/vltpf/IRDIS/ImagingReduction.py @@ -1325,6 +1325,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a science_dim = 1024 # centering + # FIXME: store default center in IRDIS.ini? centers_default = np.array([[484, 517], [486, 508]]) if skip_center: print('Warning: images will not be fine centered. They will just be combined.') diff --git a/vltpf/IRDIS/SpectroReduction.py b/vltpf/IRDIS/SpectroReduction.py index 51508df..6c839a3 100644 --- a/vltpf/IRDIS/SpectroReduction.py +++ b/vltpf/IRDIS/SpectroReduction.py @@ -1006,6 +1006,7 @@ def sph_ird_wave_calib(self, silent=True): file.write('{0}/{1}.fits {2}\n'.format(path.calib, bpm_file.index[0], 'IRD_STATIC_BADPIXELMAP')) file.close() + # FIXME: use wave_lasers from .ini file args = ['esorex', '--no-checksum=TRUE', '--no-datamd5=TRUE', @@ -1034,6 +1035,7 @@ def sph_ird_wave_calib(self, silent=True): file.write('{0}/{1}.fits {2}\n'.format(path.calib, bpm_file.index[0], 'IRD_STATIC_BADPIXELMAP')) file.close() + # FIXME: use wave_lasers from .ini file args = ['esorex', '--no-checksum=TRUE', '--no-datamd5=TRUE', From 66ce18b617bbe9f8743e5565b6acb5ae00675b19 Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Sun, 25 Aug 2019 13:03:05 +0200 Subject: [PATCH 027/101] Store additional parameters in .ini files Ticket #60 --- vltpf/instruments/IFS.ini | 10 +++++++++- vltpf/instruments/IRDIS.ini | 22 ++++++++++++++++++++++ 2 files changed, 31 insertions(+), 1 deletion(-) diff --git a/vltpf/instruments/IFS.ini b/vltpf/instruments/IFS.ini index 16a1e7a..9737e60 100644 --- a/vltpf/instruments/IFS.ini +++ b/vltpf/instruments/IFS.ini @@ -10,7 +10,15 @@ nwave = 39 # calibration parameters # [calibration] -wave_cal_lasers = 987.72, 1123.71, 1309.37, 1545.07 + +# wavelength calibration lasers +wave_cal_lasers = 987.72, 1123.71, 1309.37, 1545.07, 1730.23, 2015.33 + +# default center of images +default_center = 145, 145 + +# field orientation offset +orientation_offset = 102 # # default reduction parameters diff --git a/vltpf/instruments/IRDIS.ini b/vltpf/instruments/IRDIS.ini index 0b5ee11..2f27889 100644 --- a/vltpf/instruments/IRDIS.ini +++ b/vltpf/instruments/IRDIS.ini @@ -9,8 +9,30 @@ pixel = 12.25 # calibration parameters # [calibration] + +# wavelength calibration lasers wave_cal_lasers = 987.72, 1123.71, 1309.37, 1545.07, 1730.23, 2015.33 +# field orientation offset +orientation_offset = 0 + +[calibration-imaging] + +# default center of images +default_center = ((485, 520), (486, 508)) + +[calibration-spectro] + +# LRS parameters +default_center_lrs = ((484, 496), (488, 486)) +wave_min_lrs = 940 +wave_max_lrs = 2330 + +# MRS parameters +default_center_mrs = ((474, 519), (479, 509)) +wave_min_mrs = 940 +wave_max_mrs = 1820 + # # general reduction parameters # From f890805a430bc0f3521be128b71485c329cc7291 Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Sun, 25 Aug 2019 13:16:21 +0200 Subject: [PATCH 028/101] Read new parameters from .ini files Ticket #60 --- vltpf/IFS.py | 6 ++++-- vltpf/IRDIS/ImagingReduction.py | 8 ++++++-- vltpf/IRDIS/SpectroReduction.py | 15 ++++++++++++--- vltpf/instruments/IFS.ini | 4 ++-- vltpf/instruments/IRDIS.ini | 8 ++++---- 5 files changed, 28 insertions(+), 13 deletions(-) diff --git a/vltpf/IFS.py b/vltpf/IFS.py index 9434ebe..eb6485a 100644 --- a/vltpf/IFS.py +++ b/vltpf/IFS.py @@ -397,9 +397,11 @@ def __init__(self, path): self._nwave = int(config.get('instrument', 'nwave')) # calibration - self._wave_cal_lasers = [float(w) for w in config.get('calibration', 'wave_cal_lasers').split(',')] + self._wave_cal_lasers = eval(config.get('calibration', 'wave_cal_lasers')) + self._default_center = eval(config.get('calibration', 'default_center')) + self._orientation_offset = eval(config.get('calibration', 'orientation_offset')) - # reduction + # reduction parameters self._config = dict(config.items('reduction')) for key, value in self._config.items(): try: diff --git a/vltpf/IRDIS/ImagingReduction.py b/vltpf/IRDIS/ImagingReduction.py index b85142b..a1ab879 100644 --- a/vltpf/IRDIS/ImagingReduction.py +++ b/vltpf/IRDIS/ImagingReduction.py @@ -87,9 +87,13 @@ def __init__(self, path): self._nwave = 2 # calibration - self._wave_cal_lasers = [float(w) for w in config.get('calibration', 'wave_cal_lasers').split(',')] + self._wave_cal_lasers = eval(config.get('calibration', 'wave_cal_lasers')) + + # imaging calibration + self._default_center = eval(config.get('calibration-imaging', 'default_center')) + self._orientation_offset = eval(config.get('calibration-imaging', 'orientation_offset')) - # reduction + # reduction parameters self._config = {} for group in ['reduction', 'reduction-imaging']: items = dict(config.items(group)) diff --git a/vltpf/IRDIS/SpectroReduction.py b/vltpf/IRDIS/SpectroReduction.py index 6c839a3..8140113 100644 --- a/vltpf/IRDIS/SpectroReduction.py +++ b/vltpf/IRDIS/SpectroReduction.py @@ -129,9 +129,18 @@ def __init__(self, path): self._nwave = -1 # calibration - self._wave_cal_lasers = [float(w) for w in config.get('calibration', 'wave_cal_lasers').split(',')] - - # reduction + self._wave_cal_lasers = eval(config.get('calibration', 'wave_cal_lasers')) + + # spectro calibration + self._default_center_lrs = eval(config.get('calibration-spectro', 'default_center_lrs')) + self._wave_min_lrs = eval(config.get('calibration-spectro', 'wave_min_lrs')) + self._wave_max_lrs = eval(config.get('calibration-spectro', 'wave_max_lrs')) + + self._default_center_mrs = eval(config.get('calibration-spectro', 'default_center_mrs')) + self._wave_min_mrs = eval(config.get('calibration-spectro', 'wave_min_mrs')) + self._wave_max_mrs = eval(config.get('calibration-spectro', 'wave_max_mrs')) + + # reduction parameters self._config = {} for group in ['reduction', 'reduction-spectro']: items = dict(config.items(group)) diff --git a/vltpf/instruments/IFS.ini b/vltpf/instruments/IFS.ini index 9737e60..a6e904e 100644 --- a/vltpf/instruments/IFS.ini +++ b/vltpf/instruments/IFS.ini @@ -12,10 +12,10 @@ nwave = 39 [calibration] # wavelength calibration lasers -wave_cal_lasers = 987.72, 1123.71, 1309.37, 1545.07, 1730.23, 2015.33 +wave_cal_lasers = (987.72, 1123.71, 1309.37, 1545.07, 1730.23, 2015.33) # default center of images -default_center = 145, 145 +default_center = (290//2, 290//2) # field orientation offset orientation_offset = 102 diff --git a/vltpf/instruments/IRDIS.ini b/vltpf/instruments/IRDIS.ini index 2f27889..fcb861e 100644 --- a/vltpf/instruments/IRDIS.ini +++ b/vltpf/instruments/IRDIS.ini @@ -11,16 +11,16 @@ pixel = 12.25 [calibration] # wavelength calibration lasers -wave_cal_lasers = 987.72, 1123.71, 1309.37, 1545.07, 1730.23, 2015.33 - -# field orientation offset -orientation_offset = 0 +wave_cal_lasers = (987.72, 1123.71, 1309.37, 1545.07, 1730.23, 2015.33) [calibration-imaging] # default center of images default_center = ((485, 520), (486, 508)) +# field orientation offset +orientation_offset = 0 + [calibration-spectro] # LRS parameters From 6d349bf5455c1136e480a8a4a8779293c343f401 Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Sun, 25 Aug 2019 13:31:42 +0200 Subject: [PATCH 029/101] Use laser wavelengths from the IRDIS.ini file Ticket #60 --- vltpf/IRDIS/SpectroReduction.py | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/vltpf/IRDIS/SpectroReduction.py b/vltpf/IRDIS/SpectroReduction.py index 8140113..5a3a641 100644 --- a/vltpf/IRDIS/SpectroReduction.py +++ b/vltpf/IRDIS/SpectroReduction.py @@ -1004,6 +1004,9 @@ def sph_ird_wave_calib(self, silent=True): # products wav_file = 'wave_calib' + # laser wavelengths + wave_lasers = self._wave_cal_lasers + # esorex parameters if filter_comb == 'S_LR': # create standard sof in LRS @@ -1015,7 +1018,6 @@ def sph_ird_wave_calib(self, silent=True): file.write('{0}/{1}.fits {2}\n'.format(path.calib, bpm_file.index[0], 'IRD_STATIC_BADPIXELMAP')) file.close() - # FIXME: use wave_lasers from .ini file args = ['esorex', '--no-checksum=TRUE', '--no-datamd5=TRUE', @@ -1024,6 +1026,12 @@ def sph_ird_wave_calib(self, silent=True): '--ird.wave_calib.grism_mode=FALSE', '--ird.wave_calib.threshold=2000', '--ird.wave_calib.number_lines=6', + '--ird.wave_calib.wavelength_line1={:.2f}'.format(wave_lasers[0]), + '--ird.wave_calib.wavelength_line2={:.2f}'.format(wave_lasers[1]), + '--ird.wave_calib.wavelength_line3={:.2f}'.format(wave_lasers[2]), + '--ird.wave_calib.wavelength_line4={:.2f}'.format(wave_lasers[3]), + '--ird.wave_calib.wavelength_line5={:.2f}'.format(wave_lasers[4]), + '--ird.wave_calib.wavelength_line6={:.2f}'.format(wave_lasers[5]), '--ird.wave_calib.outfilename={0}/{1}.fits'.format(path.calib, wav_file), sof] elif filter_comb == 'S_MR': @@ -1044,7 +1052,6 @@ def sph_ird_wave_calib(self, silent=True): file.write('{0}/{1}.fits {2}\n'.format(path.calib, bpm_file.index[0], 'IRD_STATIC_BADPIXELMAP')) file.close() - # FIXME: use wave_lasers from .ini file args = ['esorex', '--no-checksum=TRUE', '--no-datamd5=TRUE', @@ -1053,6 +1060,11 @@ def sph_ird_wave_calib(self, silent=True): '--ird.wave_calib.grism_mode=TRUE', '--ird.wave_calib.threshold=1000', '--ird.wave_calib.number_lines=5', + '--ird.wave_calib.wavelength_line1={:.2f}'.format(wave_lasers[0]), + '--ird.wave_calib.wavelength_line2={:.2f}'.format(wave_lasers[1]), + '--ird.wave_calib.wavelength_line3={:.2f}'.format(wave_lasers[2]), + '--ird.wave_calib.wavelength_line4={:.2f}'.format(wave_lasers[3]), + '--ird.wave_calib.wavelength_line5={:.2f}'.format(wave_lasers[4]), '--ird.wave_calib.outfilename={0}/{1}.fits'.format(path.calib, wav_file), sof] From 4e6de64e9a4c75074d31e4aecace5e6e3a0df6cb Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Sun, 25 Aug 2019 13:34:53 +0200 Subject: [PATCH 030/101] Use only parameters loaded from IRDIS.ini file Ticket #60. No values are hard-coded any more --- vltpf/IRDIS/SpectroReduction.py | 58 +++++++++++++-------------------- 1 file changed, 23 insertions(+), 35 deletions(-) diff --git a/vltpf/IRDIS/SpectroReduction.py b/vltpf/IRDIS/SpectroReduction.py index 5a3a641..0b279e9 100644 --- a/vltpf/IRDIS/SpectroReduction.py +++ b/vltpf/IRDIS/SpectroReduction.py @@ -1350,25 +1350,21 @@ def sph_ird_star_center(self, high_pass=False, plot=True): files_info = self._files_info frames_info = self._frames_info_preproc - # filter combination + # resolution-specific parameters filter_comb = frames_info['INS COMB IFLT'].unique()[0] - # FIXME: centers should be stored in .ini files and passed to - # function when needed (ticket #60) if filter_comb == 'S_LR': - centers = np.array(((484, 496), - (488, 486))) - wave_min = 920 - wave_max = 2330 + centers = self._default_center_lrs + wave_min = self._wave_min_lrs + wave_max = self._wave_max_lrs elif filter_comb == 'S_MR': - centers = np.array(((474, 519), - (479, 509))) - wave_min = 940 - wave_max = 1820 + centers = self._default_center_mrs + wave_min = self._wave_min_mrs + wave_max = self._wave_max_mrs # wavelength map wave_file = files_info[files_info['PROCESSED'] & (files_info['PRO CATG'] == 'IRD_WAVECALIB')] wave_calib = fits.getdata(path.calib / '{}.fits'.format(wave_file.index[0])) - wave_lin = get_wavelength_calibration(wave_calib, centers, wave_min, wave_max) + wave_lin = get_wavelength_calibration(wave_calib, centers, wave_min, wave_max) # start with OBJECT,FLUX flux_files = frames_info[frames_info['DPR TYPE'] == 'OBJECT,FLUX'] @@ -1461,25 +1457,21 @@ def sph_ird_wavelength_recalibration(self, fit_scaling=True, plot=True): files_info = self._files_info frames_info = self._frames_info_preproc - # filter combination + # resolution-specific parameters filter_comb = frames_info['INS COMB IFLT'].unique()[0] - # FIXME: centers should be stored in .ini files and passed to - # function when needed (ticket #60) if filter_comb == 'S_LR': - centers = np.array(((484, 496), - (488, 486))) - wave_min = 920 - wave_max = 2330 + centers = self._default_center_lrs + wave_min = self._wave_min_lrs + wave_max = self._wave_max_lrs elif filter_comb == 'S_MR': - centers = np.array(((474, 519), - (479, 509))) - wave_min = 940 - wave_max = 1820 + centers = self._default_center_mrs + wave_min = self._wave_min_mrs + wave_max = self._wave_max_mrs # wavelength map wave_file = files_info[files_info['PROCESSED'] & (files_info['PRO CATG'] == 'IRD_WAVECALIB')] wave_calib = fits.getdata(path.calib / '{}.fits'.format(wave_file.index[0])) - wave_lin = get_wavelength_calibration(wave_calib, centers, wave_min, wave_max) + wave_lin = get_wavelength_calibration(wave_calib, centers, wave_min, wave_max) # reference wavelength idx_ref = 3 @@ -1664,20 +1656,16 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m nwave = self._nwave frames_info = self._frames_info_preproc - # filter combination + # resolution-specific parameters filter_comb = frames_info['INS COMB IFLT'].unique()[0] - # FIXME: centers should be stored in .ini files and passed to - # function when needed (ticket #60) if filter_comb == 'S_LR': - centers = np.array(((484, 496), - (488, 486))) - wave_min = 920 - wave_max = 2330 + centers = self._default_center_lrs + wave_min = self._wave_min_lrs + wave_max = self._wave_max_lrs elif filter_comb == 'S_MR': - centers = np.array(((474, 519), - (479, 509))) - wave_min = 940 - wave_max = 1820 + centers = self._default_center_mrs + wave_min = self._wave_min_mrs + wave_max = self._wave_max_mrs # wavelength solution: make sure we have the same number of # wave points in each field From ac9a7f993154a4daae2bd3b2427fded63aee7b5a Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Sun, 25 Aug 2019 14:00:29 +0200 Subject: [PATCH 031/101] Instrumental parameters moved out of toolbox.star_centers_from_waffle_img_cube() Implement ticket #60 for IRDIS --- vltpf/IRDIS/ImagingReduction.py | 11 ++++++---- vltpf/toolbox.py | 37 ++++++++++++--------------------- 2 files changed, 20 insertions(+), 28 deletions(-) diff --git a/vltpf/IRDIS/ImagingReduction.py b/vltpf/IRDIS/ImagingReduction.py index a1ab879..9c0bca1 100644 --- a/vltpf/IRDIS/ImagingReduction.py +++ b/vltpf/IRDIS/ImagingReduction.py @@ -1168,7 +1168,9 @@ def sph_ird_star_center(self, high_pass=False, offset=(0, 0), plot=True): # parameters path = self._path pixel = self._pixel - frames_info = self._frames_info_preproc + orientation_offset = self._orientation_offset + center_guess = np.array(self._default_center) + frames_info = self._frames_info_preproc # wavelength filter_comb = frames_info['INS COMB IFLT'].unique()[0] @@ -1221,8 +1223,9 @@ def sph_ird_star_center(self, high_pass=False, offset=(0, 0), plot=True): save_path = None spot_center, spot_dist, img_center \ = toolbox.star_centers_from_waffle_img_cube(cube, wave, 'IRDIS', waffle_orientation, - high_pass=high_pass, center_offset=offset, - coro=coro, save_path=save_path) + pixel, orientation_offset, center_guess, + high_pass=high_pass, center_offset=offset, + coro=coro, save_path=save_path) # save fits.writeto(path.preproc / '{}_centers.fits'.format(fname), img_center, overwrite=True) @@ -1330,7 +1333,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a # centering # FIXME: store default center in IRDIS.ini? - centers_default = np.array([[484, 517], [486, 508]]) + centers_default = self._default_center if skip_center: print('Warning: images will not be fine centered. They will just be combined.') shift_method = 'roll' diff --git a/vltpf/toolbox.py b/vltpf/toolbox.py index 1e4b222..b7e9ec8 100644 --- a/vltpf/toolbox.py +++ b/vltpf/toolbox.py @@ -581,6 +581,7 @@ def star_centers_from_PSF_lss_cube(cube, wave_cube, pixel, save_path=None): def star_centers_from_waffle_img_cube(cube, wave, instrument, waffle_orientation, + pixel, orientation_offset, center_guess, high_pass=False, center_offset=(0, 0), smooth=0, coro=True, save_path=None): ''' @@ -600,6 +601,16 @@ def star_centers_from_waffle_img_cube(cube, wave, instrument, waffle_orientation waffle_orientation : str String giving the waffle orientation '+' or 'x' + pixel : float + Pixel scale, in mas/pixel + + orientation_offset : float + Field orientation offset, in degrees + + center_guess : array + Estimation of the image center as a function of wavelength. + This should be an array of shape nwave*2. + high_pass : bool Apply high-pass filter to the image before searching for the satelitte spots. Default is False @@ -634,20 +645,7 @@ def star_centers_from_waffle_img_cube(cube, wave, instrument, waffle_orientation ''' - # instrument - # FIXME: pixel size should be stored in .ini files and passed to - # function when needed (ticket #60) - if instrument == 'IFS': - pixel = 7.46 - offset = 102 - elif instrument == 'IRDIS': - pixel = 12.25 - offset = 0 - else: - raise ValueError('Unknown instrument {0}'.format(instrument)) - # standard parameters - dim = cube.shape[-1] nwave = wave.size loD = wave*1e-9/8 * 180/np.pi * 3600*1000/pixel @@ -655,9 +653,9 @@ def star_centers_from_waffle_img_cube(cube, wave, instrument, waffle_orientation freq = 10 * np.sqrt(2) * 0.97 box = 8 if waffle_orientation == '+': - orient = offset * np.pi / 180 + orient = orientation_offset * np.pi / 180 elif waffle_orientation == 'x': - orient = offset * np.pi / 180 + np.pi / 4 + orient = orientation_offset * np.pi / 180 + np.pi / 4 # spot fitting xx, yy = np.meshgrid(np.arange(2*box), np.arange(2*box)) @@ -666,15 +664,6 @@ def star_centers_from_waffle_img_cube(cube, wave, instrument, waffle_orientation if save_path is not None: pdf = PdfPages(save_path) - # center guess - # FIXME: centers should be stored in .ini files and passed to - # function when needed (ticket #60) - if instrument == 'IFS': - center_guess = np.full((nwave, 2), ((dim // 2)+3, (dim // 2)-1)) - elif instrument == 'IRDIS': - center_guess = np.array(((485, 520), - (486, 508))) - # loop over images spot_centers = np.zeros((nwave, 4, 2)) spot_dist = np.zeros((nwave, 6)) From b039707b640db75e928e2d14576073184e73a7c6 Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Sun, 25 Aug 2019 15:33:52 +0200 Subject: [PATCH 032/101] Use the new toolbox.star_centers_from_waffle_img_cube() Implement ticket #60 for the IFS. --- examples/ifs_reduction.py | 5 ++--- vltpf/IFS.py | 11 +++++++++-- vltpf/instruments/IFS.ini | 2 +- 3 files changed, 12 insertions(+), 6 deletions(-) diff --git a/examples/ifs_reduction.py b/examples/ifs_reduction.py index 8eff4ca..c14907c 100644 --- a/examples/ifs_reduction.py +++ b/examples/ifs_reduction.py @@ -11,7 +11,6 @@ reduction.config['preproc_collapse_science'] = True reduction.config['preproc_collapse_type'] = 'coadd' reduction.config['preproc_coadd_value'] = 2 -reduction.config['center_offset'] = (-5, 0) reduction.config['clean'] = True reduction.show_config() @@ -45,8 +44,8 @@ reduction.sph_ifs_science_cubes(silent=True) #%% high-level science processing -reduction.sph_ifs_wavelength_recalibration(high_pass=True, offset=(-5, 0), plot=True) -reduction.sph_ifs_star_center(high_pass=True, offset=(-5, 0), plot=True) +reduction.sph_ifs_wavelength_recalibration(high_pass=True, offset=(-3, 0), plot=True) +reduction.sph_ifs_star_center(high_pass=True, offset=(-3, 0), plot=True) reduction.sph_ifs_combine_data(cpix=True, psf_dim=80, science_dim=200, correct_anamorphism=True, shift_method='interp', manual_center=None, skip_center=False, save_scaled=False) diff --git a/vltpf/IFS.py b/vltpf/IFS.py index eb6485a..b6e6de9 100644 --- a/vltpf/IFS.py +++ b/vltpf/IFS.py @@ -2158,6 +2158,9 @@ def sph_ifs_wavelength_recalibration(self, high_pass=False, offset=(0, 0), plot= # parameters path = self._path nwave = self._nwave + pixel = self._pixel + orientation_offset = self._orientation_offset + center_guess = np.full((nwave, 2), self._default_center) files_info = self._files_info frames_info = self._frames_info_preproc @@ -2210,6 +2213,7 @@ def sph_ifs_wavelength_recalibration(self, high_pass=False, offset=(0, 0), plot= save_path = None spot_center, spot_dist, img_center \ = toolbox.star_centers_from_waffle_img_cube(cube, wave_drh, 'IFS', waffle_orientation, + pixel, orientation_offset, center_guess, high_pass=high_pass, center_offset=offset, coro=coro, save_path=save_path) @@ -2329,6 +2333,7 @@ def sph_ifs_wavelength_recalibration(self, high_pass=False, offset=(0, 0), plot= for w in self._wave_cal_lasers: plt.axvline(x=w, linestyle='dashed', color='purple') plt.xlabel(r'Wavelength [nm]') + plt.xlim(wave_min-50, wave_max+50) plt.ylabel('Flux') plt.legend(loc='upper right') plt.title('Wavelength calibration') @@ -2369,6 +2374,8 @@ def sph_ifs_star_center(self, high_pass=False, offset=(0, 0), plot=True): path = self._path nwave = self._nwave pixel = self._pixel + orientation_offset = self._orientation_offset + center_guess = np.full((nwave, 2), self._default_center) frames_info = self._frames_info_preproc # start with OBJECT,FLUX @@ -2427,6 +2434,7 @@ def sph_ifs_star_center(self, high_pass=False, offset=(0, 0), plot=True): save_path = None spot_center, spot_dist, img_center \ = toolbox.star_centers_from_waffle_img_cube(cube, wave_drh, 'IFS', waffle_orientation, + pixel, orientation_offset, center_guess, high_pass=high_pass, center_offset=offset, save_path=save_path) @@ -2542,8 +2550,7 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a science_dim = 290 # centering - # FIXME: store default center in IFS.ini? - centers_default = np.full((nwave, 2), 290//2) + centers_default = np.full((nwave, 2), self._default_center) if skip_center: print('Warning: images will not be fine centered. They will just be combined.') shift_method = 'roll' diff --git a/vltpf/instruments/IFS.ini b/vltpf/instruments/IFS.ini index a6e904e..2f87576 100644 --- a/vltpf/instruments/IFS.ini +++ b/vltpf/instruments/IFS.ini @@ -41,7 +41,7 @@ preproc_collapse_center = True # center center_high_pass = False -center_offset = (0, 0) +center_offset = (-3, 0) # combine combine_cpix = True From f9c98fdc722354c3c50ffd5188e859369e97c385 Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Sun, 25 Aug 2019 15:57:10 +0200 Subject: [PATCH 033/101] Final details for implementation of ticket #60 - change order of parameters in star_centers_from_waffle_img_cube() - make sure that centers and wavelengths are stored as numpy arrays --- examples/ifs_reduction.py | 2 +- vltpf/IFS.py | 22 ++++++++++------------ vltpf/IRDIS/ImagingReduction.py | 13 ++++++------- vltpf/IRDIS/SpectroReduction.py | 6 +++--- vltpf/toolbox.py | 30 +++++++++++++----------------- 5 files changed, 33 insertions(+), 40 deletions(-) diff --git a/examples/ifs_reduction.py b/examples/ifs_reduction.py index c14907c..bebd435 100644 --- a/examples/ifs_reduction.py +++ b/examples/ifs_reduction.py @@ -17,7 +17,7 @@ #%% reduction reduction.full_reduction() -####################################################@ +#################################################### # manual reduction # diff --git a/vltpf/IFS.py b/vltpf/IFS.py index b6e6de9..cd1ceab 100644 --- a/vltpf/IFS.py +++ b/vltpf/IFS.py @@ -397,8 +397,8 @@ def __init__(self, path): self._nwave = int(config.get('instrument', 'nwave')) # calibration - self._wave_cal_lasers = eval(config.get('calibration', 'wave_cal_lasers')) - self._default_center = eval(config.get('calibration', 'default_center')) + self._wave_cal_lasers = np.array(eval(config.get('calibration', 'wave_cal_lasers'))) + self._default_center = np.array(eval(config.get('calibration', 'default_center'))) self._orientation_offset = eval(config.get('calibration', 'orientation_offset')) # reduction parameters @@ -2212,10 +2212,9 @@ def sph_ifs_wavelength_recalibration(self, high_pass=False, offset=(0, 0), plot= else: save_path = None spot_center, spot_dist, img_center \ - = toolbox.star_centers_from_waffle_img_cube(cube, wave_drh, 'IFS', waffle_orientation, - pixel, orientation_offset, center_guess, - high_pass=high_pass, center_offset=offset, - coro=coro, save_path=save_path) + = toolbox.star_centers_from_waffle_img_cube(cube, wave_drh, waffle_orientation, center_guess, + pixel, orientation_offset, high_pass=high_pass, + center_offset=offset, coro=coro, save_path=save_path) # final scaling wave_scales = spot_dist / np.full((nwave, 6), spot_dist[0]) @@ -2325,7 +2324,7 @@ def sph_ifs_wavelength_recalibration(self, high_pass=False, offset=(0, 0), plot= plt.xlabel('Spectral channel index') plt.ylabel('Scaling factor') plt.title('Spectral scaling') - plt.legend(loc='upper left') + plt.legend(loc='upper left', fontsize='x-small') plt.subplot(133) plt.plot(wave_drh, wave_flux, linestyle='dotted', color='k', label='Original') @@ -2335,7 +2334,7 @@ def sph_ifs_wavelength_recalibration(self, high_pass=False, offset=(0, 0), plot= plt.xlabel(r'Wavelength [nm]') plt.xlim(wave_min-50, wave_max+50) plt.ylabel('Flux') - plt.legend(loc='upper right') + plt.legend(loc='upper right', fontsize='x-small') plt.title('Wavelength calibration') plt.tight_layout() @@ -2433,10 +2432,9 @@ def sph_ifs_star_center(self, high_pass=False, offset=(0, 0), plot=True): else: save_path = None spot_center, spot_dist, img_center \ - = toolbox.star_centers_from_waffle_img_cube(cube, wave_drh, 'IFS', waffle_orientation, - pixel, orientation_offset, center_guess, - high_pass=high_pass, center_offset=offset, - save_path=save_path) + = toolbox.star_centers_from_waffle_img_cube(cube, wave_drh, waffle_orientation, center_guess, + pixel, orientation_offset, high_pass=high_pass, + center_offset=offset, save_path=save_path) # save fits.writeto(path.preproc / '{}centers.fits'.format(fname), img_center, overwrite=True) diff --git a/vltpf/IRDIS/ImagingReduction.py b/vltpf/IRDIS/ImagingReduction.py index 9c0bca1..657437d 100644 --- a/vltpf/IRDIS/ImagingReduction.py +++ b/vltpf/IRDIS/ImagingReduction.py @@ -87,10 +87,10 @@ def __init__(self, path): self._nwave = 2 # calibration - self._wave_cal_lasers = eval(config.get('calibration', 'wave_cal_lasers')) + self._wave_cal_lasers = np.array(eval(config.get('calibration', 'wave_cal_lasers'))) # imaging calibration - self._default_center = eval(config.get('calibration-imaging', 'default_center')) + self._default_center = np.array(eval(config.get('calibration-imaging', 'default_center'))) self._orientation_offset = eval(config.get('calibration-imaging', 'orientation_offset')) # reduction parameters @@ -1169,7 +1169,7 @@ def sph_ird_star_center(self, high_pass=False, offset=(0, 0), plot=True): path = self._path pixel = self._pixel orientation_offset = self._orientation_offset - center_guess = np.array(self._default_center) + center_guess = self._default_center frames_info = self._frames_info_preproc # wavelength @@ -1222,10 +1222,9 @@ def sph_ird_star_center(self, high_pass=False, offset=(0, 0), plot=True): else: save_path = None spot_center, spot_dist, img_center \ - = toolbox.star_centers_from_waffle_img_cube(cube, wave, 'IRDIS', waffle_orientation, - pixel, orientation_offset, center_guess, - high_pass=high_pass, center_offset=offset, - coro=coro, save_path=save_path) + = toolbox.star_centers_from_waffle_img_cube(cube, wave, waffle_orientation, center_guess, + pixel, orientation_offset, high_pass=high_pass, + center_offset=offset, coro=coro, save_path=save_path) # save fits.writeto(path.preproc / '{}_centers.fits'.format(fname), img_center, overwrite=True) diff --git a/vltpf/IRDIS/SpectroReduction.py b/vltpf/IRDIS/SpectroReduction.py index 0b279e9..fd6aa28 100644 --- a/vltpf/IRDIS/SpectroReduction.py +++ b/vltpf/IRDIS/SpectroReduction.py @@ -129,14 +129,14 @@ def __init__(self, path): self._nwave = -1 # calibration - self._wave_cal_lasers = eval(config.get('calibration', 'wave_cal_lasers')) + self._wave_cal_lasers = np.array(eval(config.get('calibration', 'wave_cal_lasers'))) # spectro calibration - self._default_center_lrs = eval(config.get('calibration-spectro', 'default_center_lrs')) + self._default_center_lrs = np.array(eval(config.get('calibration-spectro', 'default_center_lrs'))) self._wave_min_lrs = eval(config.get('calibration-spectro', 'wave_min_lrs')) self._wave_max_lrs = eval(config.get('calibration-spectro', 'wave_max_lrs')) - self._default_center_mrs = eval(config.get('calibration-spectro', 'default_center_mrs')) + self._default_center_mrs = np.array(eval(config.get('calibration-spectro', 'default_center_mrs'))) self._wave_min_mrs = eval(config.get('calibration-spectro', 'wave_min_mrs')) self._wave_max_mrs = eval(config.get('calibration-spectro', 'wave_max_mrs')) diff --git a/vltpf/toolbox.py b/vltpf/toolbox.py index b7e9ec8..80767cc 100644 --- a/vltpf/toolbox.py +++ b/vltpf/toolbox.py @@ -580,37 +580,33 @@ def star_centers_from_PSF_lss_cube(cube, wave_cube, pixel, save_path=None): return psf_centers -def star_centers_from_waffle_img_cube(cube, wave, instrument, waffle_orientation, - pixel, orientation_offset, center_guess, - high_pass=False, center_offset=(0, 0), smooth=0, - coro=True, save_path=None): +def star_centers_from_waffle_img_cube(cube_cen, wave, waffle_orientation, center_guess, pixel, + orientation_offset, high_pass=False, center_offset=(0, 0), + smooth=0, coro=True, save_path=None): ''' Compute star center from waffle images (IRDIS CI, IRDIS DBI, IFS) Parameters ---------- - cube : array_like + cube_cen : array_like IRDIFS waffle cube wave : array_like Wavelength values, in nanometers - instrument : str - Instrument, IFS or IRDIS - waffle_orientation : str String giving the waffle orientation '+' or 'x' + center_guess : array + Estimation of the image center as a function of wavelength. + This should be an array of shape nwave*2. + pixel : float Pixel scale, in mas/pixel orientation_offset : float Field orientation offset, in degrees - center_guess : array - Estimation of the image center as a function of wavelength. - This should be an array of shape nwave*2. - high_pass : bool Apply high-pass filter to the image before searching for the satelitte spots. Default is False @@ -668,7 +664,7 @@ def star_centers_from_waffle_img_cube(cube, wave, instrument, waffle_orientation spot_centers = np.zeros((nwave, 4, 2)) spot_dist = np.zeros((nwave, 6)) img_centers = np.zeros((nwave, 2)) - for idx, (wave, img) in enumerate(zip(wave, cube)): + for idx, (wave, img) in enumerate(zip(wave, cube_cen)): print(' wave {0:2d}/{1:2d} ({2:.0f} nm)'.format(idx+1, nwave, wave)) # remove any NaN @@ -688,7 +684,7 @@ def star_centers_from_waffle_img_cube(cube, wave, instrument, waffle_orientation # mask for non-coronagraphic observations if not coro: - mask = aperture.disc(cube[0].shape[-1], 5*loD[idx], diameter=False, + mask = aperture.disc(cube_cen[0].shape[-1], 5*loD[idx], diameter=False, center=(cx_int, cy_int), invert=True) img *= mask @@ -788,7 +784,7 @@ def star_centers_from_waffle_img_cube(cube, wave, instrument, waffle_orientation return spot_centers, spot_dist, img_centers -def star_centers_from_waffle_lss_cube(cube_cen, cube_sci, wave_cube, centers, pixel, high_pass=False, +def star_centers_from_waffle_lss_cube(cube_cen, cube_sci, wave_cube, center_guess, pixel, high_pass=False, save_path=None): ''' Compute star center from waffle LSS spectra (IRDIS LSS) @@ -804,7 +800,7 @@ def star_centers_from_waffle_lss_cube(cube_cen, cube_sci, wave_cube, centers, pi wave_cube : array_like Wavelength values for each field, in nanometers - centers : tupple + center_guess : tupple Approximate center of the two fields pixel : float @@ -859,7 +855,7 @@ def star_centers_from_waffle_lss_cube(cube_cen, cube_sci, wave_cube, centers, pi img = img - ndimage.median_filter(img, 15, mode='mirror') # sub-image - cx_int = centers[fidx, 0] + cx_int = center_guess[fidx, 0] sub = img[:, cx_int-box:cx_int+box] xx = np.arange(2*box) From 386c19d43c8494134bfc52af7ddc6f20a9898db2 Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Sun, 25 Aug 2019 16:35:03 +0200 Subject: [PATCH 034/101] Improve plots for centering of IRDIFS imaging data --- vltpf/toolbox.py | 59 ++++++++++++++++++++++++++++++++---------------- 1 file changed, 40 insertions(+), 19 deletions(-) diff --git a/vltpf/toolbox.py b/vltpf/toolbox.py index 80767cc..333ae45 100644 --- a/vltpf/toolbox.py +++ b/vltpf/toolbox.py @@ -15,6 +15,8 @@ from astropy.modeling import models, fitting from matplotlib.backends.backend_pdf import PdfPages +global_cmap = 'inferno' + def check_recipe_execution(recipe_execution, recipe_name, recipe_requirements): ''' @@ -463,20 +465,23 @@ def star_centers_from_PSF_img_cube(cube, wave, pixel, save_path=None): img_centers[idx, 1] = cy_final if save_path: - plt.figure('PSF center - imaging', figsize=(8, 8)) + plt.figure('PSF center - imaging', figsize=(8.3, 8)) plt.clf() plt.subplot(111) - plt.imshow(img/img.max(), aspect='equal', vmin=1e-6, vmax=1, norm=colors.LogNorm(), interpolation='nearest') - plt.plot([cx_final], [cy_final], marker='D', color='red') + plt.imshow(img/img.max(), aspect='equal', vmin=1e-6, vmax=1, norm=colors.LogNorm(), + interpolation='nearest', cmap=global_cmap) + plt.plot([cx_final], [cy_final], marker='D', color='blue') plt.gca().add_patch(patches.Rectangle((cx-box, cy-box), 2*box, 2*box, ec='white', fc='none')) plt.title(r'Image #{0} - {1:.0f} nm'.format(idx+1, wave)) ext = 1000 / pixel plt.xlim(cx_final-ext, cx_final+ext) + plt.xlabel('x position [pix]') plt.ylim(cy_final-ext, cy_final+ext) + plt.ylabel('y position [pix]') - plt.tight_layout() + plt.subplots_adjust(left=0.1, right=0.98, bottom=0.1, top=0.95) pdf.savefig() @@ -563,8 +568,10 @@ def star_centers_from_PSF_lss_cube(cube, wave_cube, pixel, save_path=None): if save_path: plt.subplot(1, 2, fidx+1) - plt.imshow(img/img.max(), aspect='equal', vmin=1e-3, vmax=1, norm=colors.LogNorm(), interpolation='nearest') - plt.plot(psf_centers[:, fidx], range(1024), marker='.', color='r', linestyle='none', ms=2, alpha=0.5) + plt.imshow(img/img.max(), aspect='equal', vmin=1e-3, vmax=1, norm=colors.LogNorm(), + interpolation='nearest', cmap=global_cmap) + plt.plot(psf_centers[:, fidx], range(1024), marker='.', color='r', linestyle='none', + ms=2, alpha=0.5) plt.title(r'Field #{0}'.format(fidx+1)) @@ -690,13 +697,21 @@ def star_centers_from_waffle_img_cube(cube_cen, wave, waffle_orientation, center # create plot if needed if save_path: - fig = plt.figure('Waffle center - imaging', figsize=(8, 8)) + fig = plt.figure('Waffle center - imaging', figsize=(8.3, 8)) plt.clf() - col = ['red', 'blue', 'magenta', 'purple'] + if high_pass: + norm = colors.SymLogNorm(1e-4) + else: + norm = colors.LogNorm() + + col = ['green', 'blue', 'deepskyblue', 'purple'] ax = fig.add_subplot(111) - ax.imshow(img/img.max(), aspect='equal', vmin=1e-2, vmax=1, norm=colors.LogNorm(), interpolation='nearest') + ax.imshow(img/img.max(), aspect='equal', vmin=1e-2, vmax=1, norm=norm, + interpolation='nearest', cmap=global_cmap) ax.set_title(r'Image #{0} - {1:.0f} nm'.format(idx+1, wave)) + ax.set_xlabel('x position [pix]') + ax.set_ylabel('y position [pix]') # satelitte spots for s in range(4): @@ -731,17 +746,19 @@ def star_centers_from_waffle_img_cube(cube_cen, wave, waffle_orientation, center # plot sattelite spots and fit if save_path: - ax.plot([cx_final], [cy_final], marker='D', color=col[s]) + ax.plot([cx_final], [cy_final], marker='D', color=col[s], zorder=1000) ax.add_patch(patches.Rectangle((cx-box, cy-box), 2*box, 2*box, ec='white', fc='none')) axs = fig.add_axes((0.17+s*0.2, 0.17, 0.1, 0.1)) - axs.imshow(sub, aspect='equal', vmin=0, vmax=sub.max(), interpolation='nearest') + axs.imshow(sub, aspect='equal', vmin=0, vmax=sub.max(), interpolation='nearest', + cmap=global_cmap) axs.plot([par[0].x_mean], [par[0].y_mean], marker='D', color=col[s]) axs.set_xticks([]) axs.set_yticks([]) axs = fig.add_axes((0.17+s*0.2, 0.06, 0.1, 0.1)) - axs.imshow(fit, aspect='equal', vmin=0, vmax=sub.max(), interpolation='nearest') + axs.imshow(fit, aspect='equal', vmin=0, vmax=sub.max(), interpolation='nearest', + cmap=global_cmap) axs.set_xticks([]) axs.set_yticks([]) @@ -762,10 +779,10 @@ def star_centers_from_waffle_img_cube(cube_cen, wave, waffle_orientation, center if save_path: ax.plot([spot_centers[idx, 0, 0], spot_centers[idx, 2, 0]], [spot_centers[idx, 0, 1], spot_centers[idx, 2, 1]], - color='w', linestyle='dashed') + color='w', linestyle='dashed', zorder=900) ax.plot([spot_centers[idx, 1, 0], spot_centers[idx, 3, 0]], [spot_centers[idx, 1, 1], spot_centers[idx, 3, 1]], - color='w', linestyle='dashed') + color='w', linestyle='dashed', zorder=900) ax.plot([intersect[0]], [intersect[1]], marker='+', color='w', ms=15) @@ -773,7 +790,7 @@ def star_centers_from_waffle_img_cube(cube_cen, wave, waffle_orientation, center ax.set_xlim(intersect[0]-ext, intersect[0]+ext) ax.set_ylim(intersect[1]-ext, intersect[1]+ext) - plt.tight_layout() + plt.subplots_adjust(left=0.1, right=0.98, bottom=0.1, top=0.95) if save_path: pdf.savefig() @@ -896,10 +913,14 @@ def star_centers_from_waffle_lss_cube(cube_cen, cube_sci, wave_cube, center_gues if save_path: plt.subplot(1, 2, fidx+1) - plt.imshow(img/img.max(), aspect='equal', vmin=-1e-2, vmax=1e-2, interpolation='nearest') - plt.plot(spot_centers[:, fidx, 0], range(1024), marker='.', color='r', linestyle='none', ms=2, alpha=1) - plt.plot(spot_centers[:, fidx, 1], range(1024), marker='.', color='r', linestyle='none', ms=2, alpha=1) - plt.plot(img_centers[:, fidx], range(1024), marker='.', color='r', linestyle='none', ms=2, alpha=1) + plt.imshow(img/img.max(), aspect='equal', vmin=-1e-2, vmax=1e-2, interpolation='nearest', + cmap=global_cmap) + plt.plot(spot_centers[:, fidx, 0], range(1024), marker='.', color='r', linestyle='none', + ms=2, alpha=1) + plt.plot(spot_centers[:, fidx, 1], range(1024), marker='.', color='r', linestyle='none', + ms=2, alpha=1) + plt.plot(img_centers[:, fidx], range(1024), marker='.', color='r', linestyle='none', + ms=2, alpha=1) plt.title(r'Field #{0}'.format(fidx+1)) From 86617ca5b7485ff25483abcf5c61310c4ca596df Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Sun, 25 Aug 2019 17:14:06 +0200 Subject: [PATCH 035/101] Improve plots for centering of IRDIS LSS data --- vltpf/toolbox.py | 54 +++++++++++++++++++++++++++++++++--------------- 1 file changed, 37 insertions(+), 17 deletions(-) diff --git a/vltpf/toolbox.py b/vltpf/toolbox.py index 333ae45..74f00d0 100644 --- a/vltpf/toolbox.py +++ b/vltpf/toolbox.py @@ -521,7 +521,7 @@ def star_centers_from_PSF_lss_cube(cube, wave_cube, pixel, save_path=None): # prepare plot if save_path: - plt.figure('PSF center - spectro', figsize=(7, 12)) + plt.figure('PSF center - spectro', figsize=(6, 12)) plt.clf() # loop over fiels and wavelengths @@ -568,20 +568,25 @@ def star_centers_from_PSF_lss_cube(cube, wave_cube, pixel, save_path=None): if save_path: plt.subplot(1, 2, fidx+1) - plt.imshow(img/img.max(), aspect='equal', vmin=1e-3, vmax=1, norm=colors.LogNorm(), + plt.imshow(img/img.max(), aspect='equal', vmin=1e-6, vmax=1, norm=colors.LogNorm(), interpolation='nearest', cmap=global_cmap) - plt.plot(psf_centers[:, fidx], range(1024), marker='.', color='r', linestyle='none', + plt.plot(psf_centers[:, fidx], range(1024), marker='.', color='dodgerblue', linestyle='none', ms=2, alpha=0.5) plt.title(r'Field #{0}'.format(fidx+1)) ext = 1000 / pixel plt.xlim(cx_int-ext, cx_int+ext) + plt.xlabel('x position [pix]') + plt.ylim(0, 1024) - + if fidx == 0: + plt.ylabel('y position [pix]') + else: + plt.gca().yaxis.set_ticklabels([]) if save_path: - plt.tight_layout() + plt.subplots_adjust(left=0.15, right=0.98, bottom=0.07, top=0.965, wspace=0.05) plt.savefig(save_path) return psf_centers @@ -851,7 +856,7 @@ def star_centers_from_waffle_lss_cube(cube_cen, cube_sci, wave_cube, center_gues # prepare plot if save_path: - plt.figure('Waffle centering - spectro', figsize=(7, 12)) + plt.figure('Waffle centering - spectro', figsize=(6, 12)) plt.clf() # subtract science cube if provided @@ -910,26 +915,41 @@ def star_centers_from_waffle_lss_cube(cube_cen, cube_sci, wave_cube, center_gues spot_dist[widx, fidx] = np.abs(c1-c0) img_centers[widx, fidx] = (c0 + c1) / 2 - - if save_path: + + if save_path: + if high_pass or (cube_sci is not None): + norm = colors.PowerNorm(gamma=1) + vmin = -1e-2 + vmax = 1e-2 + else: + norm = colors.LogNorm() + vmin = 1e-5 + vmax = 1 + plt.subplot(1, 2, fidx+1) - plt.imshow(img/img.max(), aspect='equal', vmin=-1e-2, vmax=1e-2, interpolation='nearest', - cmap=global_cmap) - plt.plot(spot_centers[:, fidx, 0], range(1024), marker='.', color='r', linestyle='none', - ms=2, alpha=1) - plt.plot(spot_centers[:, fidx, 1], range(1024), marker='.', color='r', linestyle='none', - ms=2, alpha=1) - plt.plot(img_centers[:, fidx], range(1024), marker='.', color='r', linestyle='none', - ms=2, alpha=1) + plt.imshow(img/img.max(), aspect='equal', vmin=vmin, vmax=vmax, interpolation='nearest', + cmap=global_cmap, norm=norm) + plt.plot(spot_centers[:, fidx, 0], range(1024), marker='.', color='dodgerblue', + linestyle='none', ms=2, alpha=1) + plt.plot(spot_centers[:, fidx, 1], range(1024), marker='.', color='dodgerblue', + linestyle='none', ms=2, alpha=1) + plt.plot(img_centers[:, fidx], range(1024), marker='.', color='dodgerblue', + linestyle='none', ms=2, alpha=1) plt.title(r'Field #{0}'.format(fidx+1)) ext = 1000 / pixel plt.xlim(cx_int-ext, cx_int+ext) + plt.xlabel('x position [pix]') + plt.ylim(0, 1024) + if fidx == 0: + plt.ylabel('y position [pix]') + else: + plt.gca().yaxis.set_ticklabels([]) if save_path: - plt.tight_layout() + plt.subplots_adjust(left=0.15, right=0.98, bottom=0.07, top=0.965, wspace=0.05) plt.savefig(save_path) return spot_centers, spot_dist, img_centers From 277dd9d3ca3e0d6aaa6d6e95b9c6fec25fb9f208 Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Sun, 25 Aug 2019 17:19:40 +0200 Subject: [PATCH 036/101] Improve plots for IRDIFS imaging data --- examples/irdis_imaging_reduction.py | 2 +- vltpf/toolbox.py | 8 ++++++-- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/examples/irdis_imaging_reduction.py b/examples/irdis_imaging_reduction.py index d647289..a38f216 100644 --- a/examples/irdis_imaging_reduction.py +++ b/examples/irdis_imaging_reduction.py @@ -40,7 +40,7 @@ collapse_psf=True, collapse_center=True) #%% high-level science processing -reduction.sph_ird_star_center(high_pass=False, offset=(0, 0), plot=True) +reduction.sph_ird_star_center(high_pass=True, offset=(0, 0), plot=True) reduction.sph_ird_combine_data(cpix=True, psf_dim=80, science_dim=200, correct_anamorphism=True, shift_method='interp', manual_center=None, skip_center=False, save_scaled=False) diff --git a/vltpf/toolbox.py b/vltpf/toolbox.py index 74f00d0..d031a72 100644 --- a/vltpf/toolbox.py +++ b/vltpf/toolbox.py @@ -706,13 +706,17 @@ def star_centers_from_waffle_img_cube(cube_cen, wave, waffle_orientation, center plt.clf() if high_pass: - norm = colors.SymLogNorm(1e-4) + norm = colors.PowerNorm(gamma=1) + vmin = -1e-1 + vmax = 1e-1 else: norm = colors.LogNorm() + vmin = 1e-2 + vmax = 1 col = ['green', 'blue', 'deepskyblue', 'purple'] ax = fig.add_subplot(111) - ax.imshow(img/img.max(), aspect='equal', vmin=1e-2, vmax=1, norm=norm, + ax.imshow(img/img.max(), aspect='equal', vmin=vmin, vmax=vmax, norm=norm, interpolation='nearest', cmap=global_cmap) ax.set_title(r'Image #{0} - {1:.0f} nm'.format(idx+1, wave)) ax.set_xlabel('x position [pix]') From eba55a8ea7aac74b8051e822f760c79edcbde4fe Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Sun, 25 Aug 2019 17:19:40 +0200 Subject: [PATCH 037/101] Improve plots for IRDIFS imaging data --- examples/irdis_imaging_reduction.py | 2 +- vltpf/toolbox.py | 12 ++++++++---- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/examples/irdis_imaging_reduction.py b/examples/irdis_imaging_reduction.py index d647289..a38f216 100644 --- a/examples/irdis_imaging_reduction.py +++ b/examples/irdis_imaging_reduction.py @@ -40,7 +40,7 @@ collapse_psf=True, collapse_center=True) #%% high-level science processing -reduction.sph_ird_star_center(high_pass=False, offset=(0, 0), plot=True) +reduction.sph_ird_star_center(high_pass=True, offset=(0, 0), plot=True) reduction.sph_ird_combine_data(cpix=True, psf_dim=80, science_dim=200, correct_anamorphism=True, shift_method='interp', manual_center=None, skip_center=False, save_scaled=False) diff --git a/vltpf/toolbox.py b/vltpf/toolbox.py index 74f00d0..11c403a 100644 --- a/vltpf/toolbox.py +++ b/vltpf/toolbox.py @@ -706,13 +706,17 @@ def star_centers_from_waffle_img_cube(cube_cen, wave, waffle_orientation, center plt.clf() if high_pass: - norm = colors.SymLogNorm(1e-4) + norm = colors.PowerNorm(gamma=1) + vmin = -1e-1 + vmax = 1e-1 else: norm = colors.LogNorm() + vmin = 1e-2 + vmax = 1 col = ['green', 'blue', 'deepskyblue', 'purple'] ax = fig.add_subplot(111) - ax.imshow(img/img.max(), aspect='equal', vmin=1e-2, vmax=1, norm=norm, + ax.imshow(img/img.max(), aspect='equal', vmin=vmin, vmax=vmax, norm=norm, interpolation='nearest', cmap=global_cmap) ax.set_title(r'Image #{0} - {1:.0f} nm'.format(idx+1, wave)) ax.set_xlabel('x position [pix]') @@ -919,8 +923,8 @@ def star_centers_from_waffle_lss_cube(cube_cen, cube_sci, wave_cube, center_gues if save_path: if high_pass or (cube_sci is not None): norm = colors.PowerNorm(gamma=1) - vmin = -1e-2 - vmax = 1e-2 + vmin = -1e-1 + vmax = 1e-1 else: norm = colors.LogNorm() vmin = 1e-5 From cf3f2b3cc342a0501632891de0334301032f59a4 Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Sun, 25 Aug 2019 17:31:08 +0200 Subject: [PATCH 038/101] Improve wavelength recalibration plot for IRDIS LSS --- vltpf/IRDIS/SpectroReduction.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/vltpf/IRDIS/SpectroReduction.py b/vltpf/IRDIS/SpectroReduction.py index fd6aa28..68e8301 100644 --- a/vltpf/IRDIS/SpectroReduction.py +++ b/vltpf/IRDIS/SpectroReduction.py @@ -1549,6 +1549,7 @@ def sph_ird_wavelength_recalibration(self, fit_scaling=True, plot=True): plt.ylabel('Wavelength r[nm]') plt.title('Field #{}'.format(fidx)) plt.xlim(1024, 0) + plt.gca().xaxis.set_ticklabels([]) plt.subplot(212) plt.axvline(imin, color='k', linestyle='--') @@ -1558,7 +1559,7 @@ def sph_ird_wavelength_recalibration(self, fit_scaling=True, plot=True): plt.xlabel('Detector coordinate [pix]') plt.xlim(1024, 0) - plt.tight_layout() + plt.subplots_adjust(left=0.13, right=0.97, bottom=0.08, top=0.96, hspace=0.05) pdf.savefig() From 13d8534a5567f913555212749e5350a32504a4fd Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Tue, 27 Aug 2019 19:48:10 +0200 Subject: [PATCH 039/101] Start implementation of better centering keywords Ticket #68 - changed skip_center ==> coarse centering - added FIXME comments in preparation for future work --- examples/ifs_reduction.py | 2 +- examples/irdis_imaging_reduction.py | 2 +- examples/irdis_spectro_reduction.py | 2 +- vltpf/IFS.py | 24 ++++++++++++---------- vltpf/IRDIS/ImagingReduction.py | 26 ++++++++++++++---------- vltpf/IRDIS/SpectroReduction.py | 31 +++++++++++++++++++---------- vltpf/instruments/IFS.ini | 2 +- vltpf/instruments/IRDIS.ini | 4 ++-- 8 files changed, 56 insertions(+), 37 deletions(-) diff --git a/examples/ifs_reduction.py b/examples/ifs_reduction.py index bebd435..8aff319 100644 --- a/examples/ifs_reduction.py +++ b/examples/ifs_reduction.py @@ -47,7 +47,7 @@ reduction.sph_ifs_wavelength_recalibration(high_pass=True, offset=(-3, 0), plot=True) reduction.sph_ifs_star_center(high_pass=True, offset=(-3, 0), plot=True) reduction.sph_ifs_combine_data(cpix=True, psf_dim=80, science_dim=200, correct_anamorphism=True, - shift_method='interp', manual_center=None, skip_center=False, + shift_method='interp', manual_center=None, coarse_centering=False, save_scaled=False) #%% cleaning diff --git a/examples/irdis_imaging_reduction.py b/examples/irdis_imaging_reduction.py index a38f216..97ac150 100644 --- a/examples/irdis_imaging_reduction.py +++ b/examples/irdis_imaging_reduction.py @@ -42,7 +42,7 @@ #%% high-level science processing reduction.sph_ird_star_center(high_pass=True, offset=(0, 0), plot=True) reduction.sph_ird_combine_data(cpix=True, psf_dim=80, science_dim=200, correct_anamorphism=True, - shift_method='interp', manual_center=None, skip_center=False, + shift_method='interp', manual_center=None, coarse_centering=False, save_scaled=False) #%% cleaning diff --git a/examples/irdis_spectro_reduction.py b/examples/irdis_spectro_reduction.py index 940ef41..599ebd5 100644 --- a/examples/irdis_spectro_reduction.py +++ b/examples/irdis_spectro_reduction.py @@ -42,7 +42,7 @@ reduction.sph_ird_wavelength_recalibration(fit_scaling=True, plot=True) reduction.sph_ird_combine_data(cpix=True, psf_dim=80, science_dim=300, correct_mrs_chromatism=True, split_posang=True, - shift_method='fft', manual_center=None, skip_center=False) + shift_method='fft', manual_center=None, coarse_centering=False) #%% cleaning reduction.sph_ird_clean(delete_raw=False, delete_products=False) diff --git a/vltpf/IFS.py b/vltpf/IFS.py index cd1ceab..e0841e3 100644 --- a/vltpf/IFS.py +++ b/vltpf/IFS.py @@ -600,7 +600,7 @@ def process_science(self): science_dim=config['combine_science_dim'], correct_anamorphism=config['combine_correct_anamorphism'], manual_center=config['combine_manual_center'], - skip_center=config['combine_skip_center'], + coarse_centering=config['combine_coarse_centering'], shift_method=config['combine_shift_method'], save_scaled=config['combine_save_scaled']) @@ -2445,7 +2445,7 @@ def sph_ifs_star_center(self, high_pass=False, offset=(0, 0), plot=True): def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_anamorphism=True, - shift_method='fft', manual_center=None, skip_center=False, save_scaled=False): + shift_method='fft', manual_center=None, coarse_centering=False, save_scaled=False): '''Combine and save the science data into final cubes All types of data are combined independently: PSFs @@ -2472,8 +2472,8 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a plan to perform spectral differential imaging in your analysis. - The method also save a frames.csv file with all the - information extracted the raw files headers. + FIXME: proper documentation for centering. Ticket #68 + Centering: by default data are finely centered Parameters ---------- @@ -2497,10 +2497,10 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a User provided centers for the OBJECT,CENTER and OBJECT frames. This should be an array of either 2 or nwavex2 values. If a manual center is provided, the value of - skip_center is ignored for the OBJECT,CENTER and OBJECT + coarse_centering is ignored for the OBJECT,CENTER and OBJECT frames. Default is None - skip_center : bool + coarse_centering : bool Control if images are finely centered or not before being combined. However the images are still roughly centered by shifting them by an integer number of pixel to bring the @@ -2508,7 +2508,7 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a option is useful if fine centering must be done afterwards. - Default is False. Note that if skip_center is + Default is False. Note that if coarse_centering is True, the save_scaled option is automatically disabled. shift_method : str @@ -2548,8 +2548,9 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a science_dim = 290 # centering + # FIXME: better handling and documentation of centering keywords. Ticket #68 centers_default = np.full((nwave, 2), self._default_center) - if skip_center: + if coarse_centering: print('Warning: images will not be fine centered. They will just be combined.') shift_method = 'roll' @@ -2608,7 +2609,8 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a # center frames for wave_idx, img in enumerate(cube): - if skip_center: + # FIXME: better handling and documentation of centering keywords. Ticket #68 + if coarse_centering: cx, cy = centers_default[wave_idx, :] else: cx, cy = centers[wave_idx, :] @@ -2690,10 +2692,11 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a # center frames for wave_idx, img in enumerate(cube): + # FIXME: better handling and documentation of centering keywords. Ticket #68 if manual_center is not None: cx, cy = manual_center[wave_idx, :] else: - if skip_center: + if coarse_centering: cx, cy = centers_default[wave_idx, :] else: cx, cy = centers[wave_idx, :] @@ -2747,6 +2750,7 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a 'They will just be combined.') # choose between manual center or default centers + # FIXME: better handling and documentation of centering keywords. Ticket #68 if manual_center is not None: centers = manual_center else: diff --git a/vltpf/IRDIS/ImagingReduction.py b/vltpf/IRDIS/ImagingReduction.py index 657437d..e016eb1 100644 --- a/vltpf/IRDIS/ImagingReduction.py +++ b/vltpf/IRDIS/ImagingReduction.py @@ -18,7 +18,7 @@ import vltpf import vltpf.utils as utils -import vltpf.utils.imutils as imutils +import vltpf.utils.imutils as imutilséé import vltpf.utils.aperture as aperture import vltpf.transmission as transmission import vltpf.toolbox as toolbox @@ -275,7 +275,7 @@ def process_science(self): science_dim=config['combine_science_dim'], correct_anamorphism=config['combine_correct_anamorphism'], manual_center=config['combine_manual_center'], - skip_center=config['combine_skip_center'], + coarse_centering=config['combine_coarse_centering'], shift_method=config['combine_shift_method'], save_scaled=config['combine_save_scaled']) @@ -1235,7 +1235,7 @@ def sph_ird_star_center(self, high_pass=False, offset=(0, 0), plot=True): def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_anamorphism=True, - shift_method='fft', manual_center=None, skip_center=False, save_scaled=False): + shift_method='fft', manual_center=None, coarse_centering=False, save_scaled=False): '''Combine and save the science data into final cubes All types of data are combined independently: PSFs @@ -1262,6 +1262,9 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a plan to perform spectral differential imaging in your analysis. + FIXME: proper documentation for centering. Ticket #68 + Centering: by default data are finely centered + Parameters ---------- cpix : bool @@ -1284,10 +1287,10 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a User provided centers for the OBJECT,CENTER and OBJECT frames. This should be an array of 2x2 values (cx,cy for the 2 wavelengths). If a manual center is provided, the - value of skip_center is ignored for the OBJECT,CENTER and + value of coarse_centering is ignored for the OBJECT,CENTER and OBJECT frames. Default is None - skip_center : bool + coarse_centering : bool Control if images are finely centered or not before being combined. However the images are still roughly centered by shifting them by an integer number of pixel to bring the @@ -1331,9 +1334,9 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a science_dim = 1024 # centering - # FIXME: store default center in IRDIS.ini? + # FIXME: better handling and documentation of centering keywords. Ticket #68 centers_default = self._default_center - if skip_center: + if coarse_centering: print('Warning: images will not be fine centered. They will just be combined.') shift_method = 'roll' @@ -1385,7 +1388,8 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a # center frames for wave_idx, img in enumerate(cube): - if skip_center: + # FIXME: better handling and documentation of centering keywords. Ticket #68 + if coarse_centering: cx, cy = centers_default[wave_idx, :] else: cx, cy = centers[wave_idx, :] @@ -1463,10 +1467,11 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a # center frames for wave_idx, img in enumerate(cube): + # FIXME: better handling and documentation of centering keywords. Ticket #68 if manual_center is not None: cx, cy = manual_center[wave_idx, :] else: - if skip_center: + if coarse_centering: cx, cy = centers_default[wave_idx, :] else: cx, cy = centers[wave_idx, :] @@ -1515,11 +1520,12 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a # in the sequence, but it would be better to be able to # select which CENTER to use starcen_files = frames_info[frames_info['DPR TYPE'] == 'OBJECT,CENTER'] - if (len(starcen_files) == 0) or skip_center or (manual_center is not None): + if (len(starcen_files) == 0) or coarse_centering or (manual_center is not None): print('Warning: no OBJECT,CENTER file in the data set. Images cannot be accurately centred. ' + 'They will just be combined.') # choose between manual center or default centers + # FIXME: better handling and documentation of centering keywords. Ticket #68 if manual_center is not None: centers = manual_center else: diff --git a/vltpf/IRDIS/SpectroReduction.py b/vltpf/IRDIS/SpectroReduction.py index 68e8301..84ce15b 100644 --- a/vltpf/IRDIS/SpectroReduction.py +++ b/vltpf/IRDIS/SpectroReduction.py @@ -333,7 +333,7 @@ def process_science(self): split_posang=config['combine_split_posang'], shift_method=config['combine_shift_method'], manual_center=config['combine_manual_center'], - skip_center=config['combine_skip_center']) + coarse_centering=config['combine_coarse_centering']) def clean(self): ''' @@ -1576,7 +1576,7 @@ def sph_ird_wavelength_recalibration(self, fit_scaling=True, plot=True): def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_mrs_chromatism=True, - split_posang=True, shift_method='fft', manual_center=None, skip_center=False): + split_posang=True, shift_method='fft', manual_center=None, coarse_centering=False): '''Combine and save the science data into final cubes All types of data are combined independently: PSFs @@ -1591,7 +1591,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m For each type of data, the method saves 3 different files: - - *_cube: the (x,y,time) cube + - *_cube: the (x,y,time,nfield) cube - *_posang: the position angle vector. @@ -1599,7 +1599,8 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m frames. There is one line by time step in the data cube. - Data are save separately for each field. + FIXME: proper documentation for centering. Ticket #68 + Centering: by default data are finely centered Parameters ---------- @@ -1631,10 +1632,10 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m User provided spatial center for the OBJECT,CENTER and OBJECT frames. This should be an array of 2 values (cx for the 2 IRDIS fields). If a manual center is provided, the - value of skip_center is ignored for the OBJECT,CENTER and + value of coarse_centering is ignored for the OBJECT,CENTER and OBJECT frames. Default is None - skip_center : bool + coarse_centering : bool Control if images are finely centered or not before being combined. However the images are still roughly centered by shifting them by an integer number of pixel to bring the @@ -1696,8 +1697,9 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m science_dim = 1024 # centering + # FIXME: better handling and documentation of centering keywords. Ticket #68 centers_default = centers[:, 0] - if skip_center: + if coarse_centering: print('Warning: images will not be fine centered. They will just be combined.') shift_method = 'roll' @@ -1748,6 +1750,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m ciwave = iwave[:, field_idx] if correct_mrs_chromatism and (filter_comb == 'S_MR'): + # FIXME: better handling and documentation of centering keywords. Ticket #68 img = img.astype(np.float) for wave_idx, widx in enumerate(ciwave): cx = centers[widx, field_idx] @@ -1758,7 +1761,8 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m psf_cube[field_idx, file_idx, wave_idx] = nimg[:psf_dim] else: - if skip_center: + # FIXME: better handling and documentation of centering keywords. Ticket #68 + if coarse_centering: cx = centers_default[field_idx] else: cx = centers[ciwave, field_idx].mean() @@ -1836,6 +1840,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m ciwave = iwave[:, field_idx] if correct_mrs_chromatism and (filter_comb == 'S_MR'): + # FIXME: better handling and documentation of centering keywords. Ticket #68 img = img.astype(np.float) for wave_idx, widx in enumerate(ciwave): cx = centers[widx, field_idx] @@ -1846,7 +1851,8 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m cen_cube[field_idx, file_idx, wave_idx] = nimg[:science_dim] else: - if skip_center: + # FIXME: better handling and documentation of centering keywords. Ticket #68 + if coarse_centering: cx = centers_default[field_idx] else: cx = centers[ciwave, field_idx].mean() @@ -1903,11 +1909,12 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m # in the sequence, but it would be better to be able to # select which CENTER to use starcen_files = frames_info[frames_info['DPR TYPE'] == 'OBJECT,CENTER'] - if (len(starcen_files) == 0) or skip_center or (manual_center is not None): + if (len(starcen_files) == 0) or coarse_centering or (manual_center is not None): print('Warning: no OBJECT,CENTER file in the data set. Images cannot be accurately centred. ' + 'They will just be combined.') # choose between manual center or default centers + # FIXME: better handling and documentation of centering keywords. Ticket #68 if manual_center is not None: centers = manual_center else: @@ -1940,6 +1947,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m ciwave = iwave[:, field_idx] if correct_mrs_chromatism and (filter_comb == 'S_MR'): + # FIXME: better handling and documentation of centering keywords. Ticket #68 img = img.astype(np.float) for wave_idx, widx in enumerate(ciwave): cx = centers[widx, field_idx] @@ -1950,7 +1958,8 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m sci_cube[field_idx, file_idx, wave_idx] = nimg[:science_dim] else: - if skip_center: + # FIXME: better handling and documentation of centering keywords. Ticket #68 + if coarse_centering: cx = centers_default[field_idx] else: cx = centers[ciwave, field_idx].mean() diff --git a/vltpf/instruments/IFS.ini b/vltpf/instruments/IFS.ini index 2f87576..17ad14e 100644 --- a/vltpf/instruments/IFS.ini +++ b/vltpf/instruments/IFS.ini @@ -49,7 +49,7 @@ combine_psf_dim = 80 combine_science_dim = 290 combine_correct_anamorphism = True combine_manual_center = None -combine_skip_center = False +combine_coarse_centering = False combine_shift_method = fft combine_save_scaled = False diff --git a/vltpf/instruments/IRDIS.ini b/vltpf/instruments/IRDIS.ini index fcb861e..70c368c 100644 --- a/vltpf/instruments/IRDIS.ini +++ b/vltpf/instruments/IRDIS.ini @@ -66,7 +66,7 @@ combine_psf_dim = 100 combine_science_dim = 800 combine_correct_anamorphism = True combine_manual_center = None -combine_skip_center = False +combine_coarse_centering = False combine_shift_method = fft combine_save_scaled = False @@ -100,7 +100,7 @@ combine_science_dim = 800 combine_correct_mrs_chromatism = True combine_split_posang = True combine_manual_center = None -combine_skip_center = False +combine_coarse_centering = False combine_shift_method = fft # clean From 3e2061acc32a1a967c8c75a7079f1b0db4714c9f Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Mon, 2 Sep 2019 20:38:30 +0200 Subject: [PATCH 040/101] Improve wavelength calibration files handling --- vltpf/IRDIS/SpectroReduction.py | 46 ++++++++++++++++++++++++++++----- 1 file changed, 40 insertions(+), 6 deletions(-) diff --git a/vltpf/IRDIS/SpectroReduction.py b/vltpf/IRDIS/SpectroReduction.py index 84ce15b..4395da5 100644 --- a/vltpf/IRDIS/SpectroReduction.py +++ b/vltpf/IRDIS/SpectroReduction.py @@ -85,6 +85,11 @@ class SpectroReduction(object): 'sph_ird_star_center': ['sort_files', 'sort_frames', 'sph_ird_wave_calib'], 'sph_ird_wavelength_recalibration': ['sort_files', 'sort_frames', 'sph_ird_wave_calib', 'sph_ird_star_center'], + # FIXME: sph_ird_star_center and + # sph_ird_wavelength_recalibration could probably be removed + # from this list as they are not strictly required to combine + # the data at least at a basic level. To be tested + # Ticket #68 'sph_ird_combine_data': ['sort_files', 'sort_frames', 'sph_ird_preprocess_science', 'sph_ird_star_center', 'sph_ird_wavelength_recalibration'] } @@ -161,7 +166,11 @@ def __init__(self, path): 'check_files_association': False, 'sph_ifs_cal_dark': False, 'sph_ifs_cal_detector_flat': False, - 'sph_ird_wave_calib': False + 'sph_ird_wave_calib': False, + 'sph_ird_preprocess_science': False, + 'sph_ird_star_center': False, + 'sph_ird_wavelength_recalibration': False, + 'sph_ird_combine_data': False } # reload any existing data frames @@ -444,7 +453,10 @@ def read_info(self): # additional checks to update recipe execution if frames_info_preproc is not None: self._recipe_execution['sph_ird_wavelength_recalibration'] \ - = (path.preproc / 'wavelength_final.fits').exists() + = (path.preproc / 'wavelength_default.fits').exists() + + self._recipe_execution['sph_ird_wavelength_recalibration'] \ + = (path.preproc / 'wavelength_recalibrated.fits').exists() done = True files = frames_info_preproc.index @@ -1457,6 +1469,15 @@ def sph_ird_wavelength_recalibration(self, fit_scaling=True, plot=True): files_info = self._files_info frames_info = self._frames_info_preproc + # remove old files + wfile = path.preproc / 'wavelength_default.fits' + if wfile.exists(): + wfile.unlink() + + wfile = path.preproc / 'wavelength_recalibrated.fits' + if wfile.exists(): + wfile.unlink() + # resolution-specific parameters filter_comb = frames_info['INS COMB IFLT'].unique()[0] if filter_comb == 'S_LR': @@ -1482,7 +1503,11 @@ def sph_ird_wavelength_recalibration(self, fit_scaling=True, plot=True): if len(starcen_files) == 0: print(' ==> no OBJECT,CENTER file in the data set. Wavelength cannot be recalibrated. ' + 'The standard wavelength calibrated by the ESO pripeline will be used.') - fits.writeto(path.preproc / 'wavelength_final.fits', wave_lin, overwrite=True) + fits.writeto(path.preproc / 'wavelength_default.fits', wave_lin, overwrite=True) + + # update recipe execution + self._recipe_execution['sph_ird_wavelength_recalibration'] = True + return fname = '{0}_DIT{1:03d}_preproc_spot_distance'.format(starcen_files.index.values[0][0], starcen_files.index.values[0][1]) @@ -1568,8 +1593,7 @@ def sph_ird_wavelength_recalibration(self, fit_scaling=True, plot=True): # save print(' * saving') - fits.writeto(path.preproc / 'wavelength_final.fits', wave_final, overwrite=True) - + fits.writeto(path.preproc / 'wavelength_recalibrated.fits', wave_final, overwrite=True) # update recipe execution self._recipe_execution['sph_ird_wavelength_recalibration'] = True @@ -1669,9 +1693,19 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m wave_min = self._wave_min_mrs wave_max = self._wave_max_mrs + # read final wavelength calibration + wfile = path.preproc / 'wavelength_recalibrated.fits' + if wfile.exists(): + wave = fits.getdata(wfile) + else: + wfile = path.preproc / 'wavelength_default.fits' + if wfile.exists(): + wave = fits.getdata(wfile) + else: + raise FileExistsError('Missing wavelength_default.fits or wavelength_recalibrated.fits files. You must first run the sph_ird_wavelength_recalibration() method first.') + # wavelength solution: make sure we have the same number of # wave points in each field - wave = fits.getdata(path.preproc / 'wavelength_final.fits') mask = ((wave_min <= wave) & (wave <= wave_max)) iwave0 = np.where(mask[:, 0])[0] iwave1 = np.where(mask[:, 1])[0] From 021a2fe7f7a2b006bee6d6632306f05f13d59a56 Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Mon, 2 Sep 2019 22:19:18 +0200 Subject: [PATCH 041/101] Major update of the centering keywords - improve keywords, documentation and code implementation for centering (ticket #68) - update recipes requirements - handling of wavelength calibration files --- vltpf/IFS.py | 186 +++++++++++++++++++++++++++++++++------------------ 1 file changed, 121 insertions(+), 65 deletions(-) diff --git a/vltpf/IFS.py b/vltpf/IFS.py index e0841e3..b006ffb 100644 --- a/vltpf/IFS.py +++ b/vltpf/IFS.py @@ -352,9 +352,9 @@ class Reduction(object): 'sph_ifs_preprocess_science', 'sph_ifs_preprocess_wave'], 'sph_ifs_wavelength_recalibration': ['sort_files', 'sort_frames', 'sph_ifs_preprocess_wave', 'sph_ifs_science_cubes'], - 'sph_ifs_star_center': ['sort_files', 'sort_frames', 'sph_ifs_science_cubes'], + 'sph_ifs_star_center': ['sort_files', 'sort_frames', 'sph_ifs_science_cubes'], 'sph_ifs_combine_data': ['sort_files', 'sort_frames', 'sph_ifs_science_cubes', - 'sph_ifs_wavelength_recalibration', 'sph_ifs_star_center'] + 'sph_ifs_wavelength_recalibration'] } ################################################## @@ -721,7 +721,7 @@ def read_info(self): = (path.preproc / '{}_preproc.fits'.format(wave_file.index[0])).exists() self._recipe_execution['sph_ifs_wavelength_recalibration'] \ - = (path.products / 'wavelength.fits').exists() + = (path.preproc / 'wavelength_recalibrated.fits').exists() if frames_info_preproc is not None: done = True @@ -2164,6 +2164,15 @@ def sph_ifs_wavelength_recalibration(self, high_pass=False, offset=(0, 0), plot= files_info = self._files_info frames_info = self._frames_info_preproc + # remove old files + wfile = path.preproc / 'wavelength_default.fits' + if wfile.exists(): + wfile.unlink() + + wfile = path.preproc / 'wavelength_recalibrated.fits' + if wfile.exists(): + wfile.unlink() + # # DRH wavelength # @@ -2189,7 +2198,11 @@ def sph_ifs_wavelength_recalibration(self, high_pass=False, offset=(0, 0), plot= if len(starcen_files) == 0: print(' ==> no OBJECT,CENTER file in the data set. Wavelength cannot be recalibrated. ' + 'The standard wavelength calibrated by the ESO pripeline will be used.') - fits.writeto(path.products / 'wavelength.fits', wave_drh, overwrite=True) + fits.writeto(path.preproc / 'wavelength_default.fits', wave_drh, overwrite=True) + + # update recipe execution + self._recipe_execution['sph_ifs_wavelength_recalibration'] = True + return ifs_mode = starcen_files['INS2 COMB IFS'].values[0] @@ -2301,7 +2314,7 @@ def sph_ifs_wavelength_recalibration(self, high_pass=False, offset=(0, 0), plot= # save print(' * saving') - fits.writeto(path.products / 'wavelength.fits', wave_final, overwrite=True) + fits.writeto(path.preproc / 'wavelength_recalibrated.fits', wave_final, overwrite=True) # # summary plot @@ -2472,15 +2485,37 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a plan to perform spectral differential imaging in your analysis. - FIXME: proper documentation for centering. Ticket #68 - Centering: by default data are finely centered + Centering + --------- + + By default, a fine (sub-pixel) centering is performed if the + an OBJECT,CENTER frame was acquired in the sequence or if + there is a valid user-provided center. However, if the + coarse_centering keyword is set to True, only a "coarse + centering" is performed, which requires no interpolation: + + - only integer shifts (shift_method='roll') + - centering on an integer pixel (cpix=True) + - no correction of the anamorphism (correct_anamorphism=False) + - no saving of the rescaled frames (save_scaled=False) + + This option is useful if the user wants to perform a + posteriori centering of the frames, e.g. to fully preserve + photometry. + + If there was no OBJECT,CENTER acquired in the sequence, then + the centering will be performed with respect to a default, + pre-defined center that a representative of the typical center + of the coronagraph. Parameters ---------- cpix : bool If True the images are centered on the pixel at coordinate - (dim//2,dim//2). If False the images are centered between 4 - pixels, at coordinates ((dim-1)/2,(dim-1)/2). Default is True. + (dim//2,dim//2). If False the images are centered between + 4 pixels, at coordinates ((dim-1)/2,(dim-1)/2). The value + of cpix is automatically set to True when coarse_centering + is set to True. Default is True. psf_dim : even int Size of the PSF images. Default is 80x80 pixels @@ -2490,15 +2525,16 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a coronagraphic images). Default is 290, 290 pixels correct_anamorphism : bool - Correct the optical anamorphism of the instrument. Default - is True. See user manual for details. + Correct the optical anamorphism of the instrument (see + user manual for details). The value of correct_anamorphism + is automatically set to True when coarse_centering is set + to True. Default is True. manual_center : array User provided centers for the OBJECT,CENTER and OBJECT frames. This should be an array of either 2 or nwavex2 - values. If a manual center is provided, the value of - coarse_centering is ignored for the OBJECT,CENTER and OBJECT - frames. Default is None + values. For OBJECT,FLUX frames, the PSF is always + recentered. Default is None coarse_centering : bool Control if images are finely centered or not before being @@ -2517,7 +2553,9 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a save_scaled : bool Also save the wavelength-rescaled cubes. Makes the process - much longer. The default is False + much longer. The value of save_scaled is automatically set + to True when coarse_centering is set to True. The default + is False ''' @@ -2532,12 +2570,17 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a frames_info = self._frames_info_preproc # read final wavelength calibration - fname = path.products / 'wavelength.fits' - if not fname.exists(): - raise FileExistsError('Missing wavelength.fits file. ' + - 'You must first run the sph_ifs_wavelength_recalibration() method.') - wave = fits.getdata(fname) - + wfile = path.preproc / 'wavelength_recalibrated.fits' + if wfile.exists(): + wave = fits.getdata(wfile) + else: + wfile = path.preproc / 'wavelength_default.fits' + if wfile.exists(): + wave = fits.getdata(wfile) + else: + raise FileExistsError('Missing wavelength_default.fits or wavelength_recalibrated.fits files. You must first run the sph_ifs_wavelength_recalibration() method first.') + fits.writeto(path.products / 'wavelength.fits', wave, overwrite=True) + # max images size if psf_dim > 290: print('Warning: psf_dim cannot be larger than 290 pix. A value of 290 will be used.') @@ -2547,22 +2590,24 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a print('Warning: science_dim cannot be larger than 290 pix. A value of 290 will be used.') science_dim = 290 - # centering - # FIXME: better handling and documentation of centering keywords. Ticket #68 - centers_default = np.full((nwave, 2), self._default_center) + # centering configuration if coarse_centering: - print('Warning: images will not be fine centered. They will just be combined.') + print('Warning: images will be coarsely centered without any interpolation. Automatic settings for coarse centering: shift_method=\'roll\', cpix=True, correct_anamorphism=False, save_scaled=False') shift_method = 'roll' + cpix = True + correct_anamorphism = False + save_scaled = False if manual_center is not None: manual_center = np.array(manual_center) - if (manual_center.shape != (2,)) or (manual_center.shape != (nwave, 2)): + + if (manual_center.shape != (2,)) and (manual_center.shape != (nwave, 2)): raise ValueError('manual_center does not have the right number of dimensions.') if manual_center.shape == (2,): manual_center = np.full((nwave, 2), manual_center) - print('Warning: images will be centered at the user-provided values.') + print('Warning: images will be centered using the user-provided center ({},{})'.format(*manual_center[0])) # # OBJECT,FLUX @@ -2584,7 +2629,7 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a cc = psf_dim // 2 else: cc = (psf_dim - 1) / 2 - + # read and combine files for file_idx, (file, idx) in enumerate(flux_files.index): print(' ==> file {0}/{1}: {2}, DIT={3}'.format(file_idx+1, len(flux_files), file, idx)) @@ -2593,8 +2638,19 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a fname = '{0}_DIT{1:03d}_preproc_'.format(file, idx) files = list(path.preproc.glob(fname+'?????.fits')) cube = fits.getdata(files[0]) - centers = fits.getdata(path.preproc / '{}centers.fits'.format(fname)) + + # centers + cfile = path.preproc / '{}centers.fits'.format(fname) + if cfile.exists(): + centers = fits.getdata(cfile) + else: + print('Warning: sph_ifs_star_center() has not been executed. Images will be centered using default center ({},{})'.format(*self._default_center)) + centers = np.full((nwave, 2), self._default_center) + # make sure we have only integers if user wants coarse centering + if coarse_centering: + centers = centers.astype(np.int) + # mask values outside of IFS FoV cube[cube == 0] = np.nan @@ -2609,11 +2665,7 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a # center frames for wave_idx, img in enumerate(cube): - # FIXME: better handling and documentation of centering keywords. Ticket #68 - if coarse_centering: - cx, cy = centers_default[wave_idx, :] - else: - cx, cy = centers[wave_idx, :] + cx, cy = centers[wave_idx, :].astype(np.int) img = img[:-1, :-1].astype(np.float) nimg = imutils.shift(img, (cc-cx, cc-cy), method=shift_method) @@ -2646,7 +2698,7 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a del psf_cube_scaled print() - + # # OBJECT,CENTER # @@ -2672,12 +2724,20 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a for file_idx, (file, idx) in enumerate(starcen_files.index): print(' ==> file {0}/{1}: {2}, DIT={3}'.format(file_idx+1, len(starcen_files), file, idx)) - # read data - fname = '{0}_DIT{1:03d}_preproc_'.format(file, idx) - files = list(path.preproc.glob(fname+'?????.fits')) - cube = fits.getdata(files[0]) - centers = fits.getdata(path.preproc / '{}centers.fits'.format(fname)) - + # use manual center if explicitely requested + if manual_center is not None: + centers = manual_center + else: + # otherwise read center data + fname = '{0}_DIT{1:03d}_preproc_'.format(file, idx) + files = list(path.preproc.glob(fname+'?????.fits')) + cube = fits.getdata(files[0]) + centers = fits.getdata(path.preproc / '{}centers.fits'.format(fname)) + + # make sure we have only integers if user wants coarse centering + if coarse_centering: + centers = centers.astype(np.int) + # mask values outside of IFS FoV cube[cube == 0] = np.nan @@ -2692,14 +2752,7 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a # center frames for wave_idx, img in enumerate(cube): - # FIXME: better handling and documentation of centering keywords. Ticket #68 - if manual_center is not None: - cx, cy = manual_center[wave_idx, :] - else: - if coarse_centering: - cx, cy = centers_default[wave_idx, :] - else: - cx, cy = centers[wave_idx, :] + cx, cy = centers[wave_idx, :] img = img[:-1, :-1].astype(np.float) nimg = imutils.shift(img, (cc-cx, cc-cy), method=shift_method) @@ -2741,23 +2794,26 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a if nfiles != 0: print(' * OBJECT data') - # FIXME: ticket #12. Use first DIT of first OBJECT,CENTER - # in the sequence, but it would be better to be able to - # select which CENTER to use - starcen_files = frames_info[frames_info['DPR TYPE'] == 'OBJECT,CENTER'] - if len(starcen_files) == 0 or (manual_center is not None): - print('Warning: no OBJECT,CENTER file in the data set. Images cannot be accurately centred. ' + - 'They will just be combined.') - - # choose between manual center or default centers - # FIXME: better handling and documentation of centering keywords. Ticket #68 - if manual_center is not None: - centers = manual_center - else: - centers = centers_default + # use manual center if explicitely requested + if manual_center is not None: + centers = manual_center else: - fname = '{0}_DIT{1:03d}_preproc_centers.fits'.format(starcen_files.index.values[0][0], starcen_files.index.values[0][1]) - centers = fits.getdata(path.preproc / fname) + # otherwise, look whether we have an OBJECT,CENTER frame + + # FIXME: ticket #12. Use first DIT of first OBJECT,CENTER + # in the sequence, but it would be better to be able to + # select which CENTER to use + starcen_files = frames_info[frames_info['DPR TYPE'] == 'OBJECT,CENTER'] + if len(starcen_files) == 0: + print('Warning: no OBJECT,CENTER file in the dataset. Images will be centered using default center ({},{})'.format(*self._default_center)) + centers = np.full((nwave, 2), self._default_center) + else: + fname = '{0}_DIT{1:03d}_preproc_centers.fits'.format(starcen_files.index.values[0][0], starcen_files.index.values[0][1]) + centers = fits.getdata(path.preproc / fname) + + # make sure we have only integers if user wants coarse centering + if coarse_centering: + centers = centers.astype(np.int) # final center if cpix: From e0c3dba8dada2416bb7ca18e0a7c4342c206b785 Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Mon, 2 Sep 2019 22:26:37 +0200 Subject: [PATCH 042/101] Update missing recipe names in recipe_execution --- vltpf/IRDIS/ImagingReduction.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/vltpf/IRDIS/ImagingReduction.py b/vltpf/IRDIS/ImagingReduction.py index e016eb1..a285575 100644 --- a/vltpf/IRDIS/ImagingReduction.py +++ b/vltpf/IRDIS/ImagingReduction.py @@ -111,7 +111,12 @@ def __init__(self, path): self._recipe_execution = { 'sort_files': False, 'sort_frames': False, - 'check_files_association': False + 'check_files_association': False, + 'sph_ird_cal_dark': False, + 'sph_ird_cal_detector_flat': False, + 'sph_ird_preprocess_science': False, + 'sph_ird_star_center': False, + 'sph_ird_combine_data': False } # reload any existing data frames From c8799e8462d4ac4d95e6f456b59fdb8b59a10c62 Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Mon, 2 Sep 2019 22:31:37 +0200 Subject: [PATCH 043/101] Fix missing recipe execution check --- vltpf/IFS.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/vltpf/IFS.py b/vltpf/IFS.py index b006ffb..891423b 100644 --- a/vltpf/IFS.py +++ b/vltpf/IFS.py @@ -720,6 +720,9 @@ def read_info(self): self._recipe_execution['sph_ifs_preprocess_wave'] \ = (path.preproc / '{}_preproc.fits'.format(wave_file.index[0])).exists() + self._recipe_execution['sph_ifs_wavelength_recalibration'] \ + = (path.preproc / 'wavelength_default.fits').exists() + self._recipe_execution['sph_ifs_wavelength_recalibration'] \ = (path.preproc / 'wavelength_recalibrated.fits').exists() From bebeb93ccdbd4bba58048983e7b58ec31298ada2 Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Tue, 3 Sep 2019 09:02:54 +0200 Subject: [PATCH 044/101] Fix documentation Ticket #68 --- vltpf/IFS.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/vltpf/IFS.py b/vltpf/IFS.py index 891423b..644a56b 100644 --- a/vltpf/IFS.py +++ b/vltpf/IFS.py @@ -2545,10 +2545,7 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a shifting them by an integer number of pixel to bring the center of the data close to the center of the images. This option is useful if fine centering must be done - afterwards. - - Default is False. Note that if coarse_centering is - True, the save_scaled option is automatically disabled. + afterwards. Default is False. shift_method : str Method to scaling and shifting the images: fft or interp. From 2c529d8801a250ba389642294f6cfa4a974f5e0b Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Tue, 3 Sep 2019 09:33:24 +0200 Subject: [PATCH 045/101] Fix wrong conversion to int of the centers --- vltpf/IFS.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vltpf/IFS.py b/vltpf/IFS.py index 644a56b..a9243bb 100644 --- a/vltpf/IFS.py +++ b/vltpf/IFS.py @@ -2665,7 +2665,7 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a # center frames for wave_idx, img in enumerate(cube): - cx, cy = centers[wave_idx, :].astype(np.int) + cx, cy = centers[wave_idx, :] img = img[:-1, :-1].astype(np.float) nimg = imutils.shift(img, (cc-cx, cc-cy), method=shift_method) From 22f9b900a7d87c0ccd3653e99531f85d65317d4d Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Tue, 3 Sep 2019 09:33:53 +0200 Subject: [PATCH 046/101] Fix misplaced code to read the data cubes --- vltpf/IFS.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/vltpf/IFS.py b/vltpf/IFS.py index a9243bb..f1366ca 100644 --- a/vltpf/IFS.py +++ b/vltpf/IFS.py @@ -2724,14 +2724,16 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a for file_idx, (file, idx) in enumerate(starcen_files.index): print(' ==> file {0}/{1}: {2}, DIT={3}'.format(file_idx+1, len(starcen_files), file, idx)) + # read data + fname = '{0}_DIT{1:03d}_preproc_'.format(file, idx) + files = list(path.preproc.glob(fname+'?????.fits')) + cube = fits.getdata(files[0]) + # use manual center if explicitely requested if manual_center is not None: centers = manual_center else: # otherwise read center data - fname = '{0}_DIT{1:03d}_preproc_'.format(file, idx) - files = list(path.preproc.glob(fname+'?????.fits')) - cube = fits.getdata(files[0]) centers = fits.getdata(path.preproc / '{}centers.fits'.format(fname)) # make sure we have only integers if user wants coarse centering From bec73eace26c585275f019e84688e6477da88ba6 Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Tue, 3 Sep 2019 09:36:09 +0200 Subject: [PATCH 047/101] Major update of the centering keywords Improve keywords, documentation and code implementation for centering (ticket #68) --- vltpf/IRDIS/ImagingReduction.py | 175 +++++++++++++++++++++----------- 1 file changed, 113 insertions(+), 62 deletions(-) diff --git a/vltpf/IRDIS/ImagingReduction.py b/vltpf/IRDIS/ImagingReduction.py index a285575..5a324c9 100644 --- a/vltpf/IRDIS/ImagingReduction.py +++ b/vltpf/IRDIS/ImagingReduction.py @@ -18,7 +18,7 @@ import vltpf import vltpf.utils as utils -import vltpf.utils.imutils as imutilséé +import vltpf.utils.imutils as imutils import vltpf.utils.aperture as aperture import vltpf.transmission as transmission import vltpf.toolbox as toolbox @@ -42,9 +42,10 @@ class ImagingReduction(object): 'check_files_association': ['sort_files'], 'sph_ird_cal_dark': ['sort_files'], 'sph_ird_cal_detector_flat': ['sort_files'], - 'sph_ird_preprocess_science': ['sort_files', 'sort_frames', 'sph_ird_cal_dark', 'sph_ird_cal_detector_flat'], + 'sph_ird_preprocess_science': ['sort_files', 'sort_frames', 'sph_ird_cal_dark', + 'sph_ird_cal_detector_flat'], 'sph_ird_star_center': ['sort_files', 'sort_frames', 'sph_ird_preprocess_science'], - 'sph_ird_combine_data': ['sort_files', 'sort_frames', 'sph_ird_preprocess_science', 'sph_ird_star_center'] + 'sph_ird_combine_data': ['sort_files', 'sort_frames', 'sph_ird_preprocess_science'] } ################################################## @@ -1267,15 +1268,37 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a plan to perform spectral differential imaging in your analysis. - FIXME: proper documentation for centering. Ticket #68 - Centering: by default data are finely centered + Centering + --------- + + By default, a fine (sub-pixel) centering is performed if the + an OBJECT,CENTER frame was acquired in the sequence or if + there is a valid user-provided center. However, if the + coarse_centering keyword is set to True, only a "coarse + centering" is performed, which requires no interpolation: + + - only integer shifts (shift_method='roll') + - centering on an integer pixel (cpix=True) + - no correction of the anamorphism (correct_anamorphism=False) + - no saving of the rescaled frames (save_scaled=False) + + This option is useful if the user wants to perform a + posteriori centering of the frames, e.g. to fully preserve + photometry. + + If there was no OBJECT,CENTER acquired in the sequence, then + the centering will be performed with respect to a default, + pre-defined center that a representative of the typical center + of the coronagraph. Parameters ---------- cpix : bool If True the images are centered on the pixel at coordinate - (dim//2,dim//2). If False the images are centered between 4 - pixels, at coordinates ((dim-1)/2,(dim-1)/2). Default is True. + (dim//2,dim//2). If False the images are centered between + 4 pixels, at coordinates ((dim-1)/2,(dim-1)/2). The value + of cpix is automatically set to True when coarse_centering + is set to True. Default is True. psf_dim : even int Size of the PSF images. Default is 80x80 pixels @@ -1285,22 +1308,24 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a coronagraphic images). Default is 290, 290 pixels correct_anamorphism : bool - Correct the optical anamorphism of the instrument. Default - is True. See user manual for details. + Correct the optical anamorphism of the instrument (see + user manual for details). The value of correct_anamorphism + is automatically set to True when coarse_centering is set + to True. Default is True. manual_center : array User provided centers for the OBJECT,CENTER and OBJECT - frames. This should be an array of 2x2 values (cx,cy for - the 2 wavelengths). If a manual center is provided, the - value of coarse_centering is ignored for the OBJECT,CENTER and - OBJECT frames. Default is None + frames. This should be an array of either 2 or nwavex2 + values. For OBJECT,FLUX frames, the PSF is always + recentered. Default is None coarse_centering : bool Control if images are finely centered or not before being combined. However the images are still roughly centered by shifting them by an integer number of pixel to bring the center of the data close to the center of the images. This - option is useful if fine centering must be done afterwards. + option is useful if fine centering must be done + afterwards. Default is False. shift_method : str Method to scaling and shifting the images: fft or interp. @@ -1308,8 +1333,9 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a save_scaled : bool Also save the wavelength-rescaled cubes. Makes the process - much longer. The default is False - + much longer. The value of save_scaled is automatically set + to True when coarse_centering is set to True. The default + is False ''' # check if recipe can be executed @@ -1338,19 +1364,24 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a print('Warning: science_dim cannot be larger than 1024 pix. A value of 1024 will be used.') science_dim = 1024 - # centering - # FIXME: better handling and documentation of centering keywords. Ticket #68 - centers_default = self._default_center + # centering configuration if coarse_centering: - print('Warning: images will not be fine centered. They will just be combined.') + print('Warning: images will be coarsely centered without any interpolation. Automatic settings for coarse centering: shift_method=\'roll\', cpix=True, correct_anamorphism=False, save_scaled=False') shift_method = 'roll' + cpix = True + correct_anamorphism = False + save_scaled = False if manual_center is not None: manual_center = np.array(manual_center) - if manual_center.shape != (2, 2): + + if (manual_center.shape != (2,)) and (manual_center.shape != (nwave, 2)): raise ValueError('manual_center does not have the right number of dimensions.') - print('Warning: images will be centered at the user-provided values.') + if manual_center.shape == (2,): + manual_center = np.full((nwave, 2), manual_center) + + print('Warning: images will be centered using the user-provided center ({},{})'.format(*manual_center[0])) # # OBJECT,FLUX @@ -1380,7 +1411,17 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a # read data fname = '{0}_DIT{1:03d}_preproc'.format(file, idx) cube = fits.getdata(path.preproc / '{}.fits'.format(fname)) - centers = fits.getdata(path.preproc / '{}_centers.fits'.format(fname)) + + cfile = path.preproc / '{}_centers.fits'.format(fname) + if cfile.exists(): + centers = fits.getdata(cfile) + else: + print('Warning: sph_ifs_star_center() has not been executed. Images will be centered using default center ({},{})'.format(*self._default_center)) + centers = np.full((nwave, 2), self._default_center) + + # make sure we have only integers if user wants coarse centering + if coarse_centering: + centers = centers.astype(np.int) # neutral density ND = frames_info.loc[(file, idx), 'INS4 FILT2 NAME'] @@ -1393,11 +1434,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a # center frames for wave_idx, img in enumerate(cube): - # FIXME: better handling and documentation of centering keywords. Ticket #68 - if coarse_centering: - cx, cy = centers_default[wave_idx, :] - else: - cx, cy = centers[wave_idx, :] + cx, cy = centers[wave_idx, :] img = img.astype(np.float) nimg = imutils.shift(img, (cc-cx, cc-cy), method=shift_method) @@ -1459,8 +1496,18 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a # read data fname = '{0}_DIT{1:03d}_preproc'.format(file, idx) cube = fits.getdata(path.preproc / '{}.fits'.format(fname)) - centers = fits.getdata(path.preproc / '{}_centers.fits'.format(fname)) + # use manual center if explicitely requested + if manual_center is not None: + centers = manual_center + else: + # otherwise read center data + centers = fits.getdata(path.preproc / '{}_centers.fits'.format(fname)) + + # make sure we have only integers if user wants coarse centering + if coarse_centering: + centers = centers.astype(np.int) + # neutral density ND = frames_info.loc[(file, idx), 'INS4 FILT2 NAME'] w, attenuation = transmission.transmission_nd(ND, wave=wave) @@ -1472,14 +1519,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a # center frames for wave_idx, img in enumerate(cube): - # FIXME: better handling and documentation of centering keywords. Ticket #68 - if manual_center is not None: - cx, cy = manual_center[wave_idx, :] - else: - if coarse_centering: - cx, cy = centers_default[wave_idx, :] - else: - cx, cy = centers[wave_idx, :] + cx, cy = centers[wave_idx, :] img = img.astype(np.float) nimg = imutils.shift(img, (cc-cx, cc-cy), method=shift_method) @@ -1521,32 +1561,36 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a if nfiles != 0: print(' * OBJECT data') - # FIXME: ticket #12. Use first DIT of first OBJECT,CENTER - # in the sequence, but it would be better to be able to - # select which CENTER to use - starcen_files = frames_info[frames_info['DPR TYPE'] == 'OBJECT,CENTER'] - if (len(starcen_files) == 0) or coarse_centering or (manual_center is not None): - print('Warning: no OBJECT,CENTER file in the data set. Images cannot be accurately centred. ' + - 'They will just be combined.') - - # choose between manual center or default centers - # FIXME: better handling and documentation of centering keywords. Ticket #68 - if manual_center is not None: - centers = manual_center - else: - centers = centers_default - - # null value for Dithering Motion Stage - dms_dx_ref = 0 - dms_dy_ref = 0 + # use manual center if explicitely requested + if manual_center is not None: + centers = manual_center else: - fname = '{0}_DIT{1:03d}_preproc_centers.fits'.format(starcen_files.index.values[0][0], starcen_files.index.values[0][1]) - centers = fits.getdata(path.preproc / fname) - - # Dithering Motion Stage for star center: value is in micron, - # and the pixel size is 18 micron - dms_dx_ref = starcen_files['INS1 PAC X'][0] / 18 - dms_dy_ref = starcen_files['INS1 PAC Y'][0] / 18 + # otherwise, look whether we have an OBJECT,CENTER frame + + # FIXME: ticket #12. Use first DIT of first OBJECT,CENTER + # in the sequence, but it would be better to be able to + # select which CENTER to use + starcen_files = frames_info[frames_info['DPR TYPE'] == 'OBJECT,CENTER'] + if len(starcen_files) == 0: + print('Warning: no OBJECT,CENTER file in the dataset. Images will be centered using default center ({},{})'.format(*self._default_center)) + centers = np.full((nwave, 2), self._default_center) + + # null value for Dithering Motion Stage + dms_dx_ref = 0 + dms_dy_ref = 0 + else: + fname = '{0}_DIT{1:03d}_preproc_centers.fits'.format(starcen_files.index.values[0][0], starcen_files.index.values[0][1]) + centers = fits.getdata(path.preproc / fname) + fname = '{0}_DIT{1:03d}_preproc_centers.fits'.format(starcen_files.index.values[0][0], starcen_files.index.values[0][1]) + + # Dithering Motion Stage for star center: value is in micron, + # and the pixel size is 18 micron + dms_dx_ref = starcen_files['INS1 PAC X'][0] / 18 + dms_dy_ref = starcen_files['INS1 PAC Y'][0] / 18 + + # make sure we have only integers if user wants coarse centering + if coarse_centering: + centers = centers.astype(np.int) # final center if cpix: @@ -1584,6 +1628,11 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a dms_dx = frames_info.loc[(file, idx), 'INS1 PAC X'] / 18 dms_dy = frames_info.loc[(file, idx), 'INS1 PAC Y'] / 18 + # make sure we have only integers if user wants coarse centering + if coarse_centering: + dms_dx = np.int(dms_dx) + dms_dy = np.int(dms_dy) + # center frames for wave_idx, img in enumerate(cube): cx, cy = centers[wave_idx, :] @@ -1592,6 +1641,8 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a cx = cx + dms_dx_ref + dms_dx cy = cy + dms_dy_ref + dms_dy + print(cx, cy) + img = img.astype(np.float) nimg = imutils.shift(img, (cc-cx, cc-cy), method=shift_method) nimg = nimg / DIT / attenuation[wave_idx] From 29abefb262f3e3a2e3b959bc97dc74f1fb460746 Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Tue, 3 Sep 2019 09:49:55 +0200 Subject: [PATCH 048/101] Fix documentation Ticket #68 --- vltpf/IFS.py | 4 ++-- vltpf/IRDIS/ImagingReduction.py | 5 +++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/vltpf/IFS.py b/vltpf/IFS.py index f1366ca..31f99e9 100644 --- a/vltpf/IFS.py +++ b/vltpf/IFS.py @@ -2508,8 +2508,8 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a If there was no OBJECT,CENTER acquired in the sequence, then the centering will be performed with respect to a default, - pre-defined center that a representative of the typical center - of the coronagraph. + pre-defined center that is representative of the typical + center of the coronagraph. Parameters ---------- diff --git a/vltpf/IRDIS/ImagingReduction.py b/vltpf/IRDIS/ImagingReduction.py index 5a324c9..fc551e2 100644 --- a/vltpf/IRDIS/ImagingReduction.py +++ b/vltpf/IRDIS/ImagingReduction.py @@ -1288,8 +1288,8 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a If there was no OBJECT,CENTER acquired in the sequence, then the centering will be performed with respect to a default, - pre-defined center that a representative of the typical center - of the coronagraph. + pre-defined center that is representative of the typical + center of the coronagraph. Parameters ---------- @@ -1336,6 +1336,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a much longer. The value of save_scaled is automatically set to True when coarse_centering is set to True. The default is False + ''' # check if recipe can be executed From 96e19d863aae3889a6174a834f434d34bf44cdff Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Tue, 3 Sep 2019 11:09:59 +0200 Subject: [PATCH 049/101] Fix small issues with the new centering scheme Ticket #68 --- vltpf/IFS.py | 10 +++++++-- vltpf/IRDIS/ImagingReduction.py | 37 +++++++++++++++++++-------------- 2 files changed, 29 insertions(+), 18 deletions(-) diff --git a/vltpf/IFS.py b/vltpf/IFS.py index 31f99e9..6357da8 100644 --- a/vltpf/IFS.py +++ b/vltpf/IFS.py @@ -2704,7 +2704,7 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a # starcen_files = frames_info[frames_info['DPR TYPE'] == 'OBJECT,CENTER'] nfiles = len(starcen_files) - if nfiles != 0: + if (nfiles != 0) and (self._recipe_execution['sph_ird_star_center']): print(' * OBJECT,CENTER data') # final arrays @@ -2811,7 +2811,13 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a centers = np.full((nwave, 2), self._default_center) else: fname = '{0}_DIT{1:03d}_preproc_centers.fits'.format(starcen_files.index.values[0][0], starcen_files.index.values[0][1]) - centers = fits.getdata(path.preproc / fname) + fpath = path.preproc / fname + + if fpath.exists(): + centers = fits.getdata(fpath) + else: + print('Warning: sph_ifs_star_center() has not been executed. Images will be centered using default center ({},{})'.format(*self._default_center)) + centers = np.full((nwave, 2), self._default_center) # make sure we have only integers if user wants coarse centering if coarse_centering: diff --git a/vltpf/IRDIS/ImagingReduction.py b/vltpf/IRDIS/ImagingReduction.py index fc551e2..136698b 100644 --- a/vltpf/IRDIS/ImagingReduction.py +++ b/vltpf/IRDIS/ImagingReduction.py @@ -1417,8 +1417,8 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a if cfile.exists(): centers = fits.getdata(cfile) else: - print('Warning: sph_ifs_star_center() has not been executed. Images will be centered using default center ({},{})'.format(*self._default_center)) - centers = np.full((nwave, 2), self._default_center) + print('Warning: sph_ird_star_center() has not been executed. Images will be centered using default center ({},{})'.format(*self._default_center)) + centers = self._default_center # make sure we have only integers if user wants coarse centering if coarse_centering: @@ -1474,7 +1474,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a # starcen_files = frames_info[frames_info['DPR TYPE'] == 'OBJECT,CENTER'] nfiles = len(starcen_files) - if nfiles != 0: + if (nfiles != 0) and (self._recipe_execution['sph_ird_star_center']): print(' * OBJECT,CENTER data') # final arrays @@ -1562,6 +1562,10 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a if nfiles != 0: print(' * OBJECT data') + # null value for Dithering Motion Stage by default + dms_dx_ref = 0 + dms_dy_ref = 0 + # use manual center if explicitely requested if manual_center is not None: centers = manual_center @@ -1574,24 +1578,27 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a starcen_files = frames_info[frames_info['DPR TYPE'] == 'OBJECT,CENTER'] if len(starcen_files) == 0: print('Warning: no OBJECT,CENTER file in the dataset. Images will be centered using default center ({},{})'.format(*self._default_center)) - centers = np.full((nwave, 2), self._default_center) - - # null value for Dithering Motion Stage - dms_dx_ref = 0 - dms_dy_ref = 0 + centers = self._default_center else: fname = '{0}_DIT{1:03d}_preproc_centers.fits'.format(starcen_files.index.values[0][0], starcen_files.index.values[0][1]) - centers = fits.getdata(path.preproc / fname) - fname = '{0}_DIT{1:03d}_preproc_centers.fits'.format(starcen_files.index.values[0][0], starcen_files.index.values[0][1]) + fpath = path.preproc / fname + if fpath.exists(): + centers = fits.getdata(fpath) - # Dithering Motion Stage for star center: value is in micron, - # and the pixel size is 18 micron - dms_dx_ref = starcen_files['INS1 PAC X'][0] / 18 - dms_dy_ref = starcen_files['INS1 PAC Y'][0] / 18 + # Dithering Motion Stage for star center: value is in micron, + # and the pixel size is 18 micron + dms_dx_ref = starcen_files['INS1 PAC X'][0] / 18 + dms_dy_ref = starcen_files['INS1 PAC Y'][0] / 18 + else: + print('Warning: sph_ird_star_center() has not been executed. Images will be centered using default center ({},{})'.format(*self._default_center)) + centers = self._default_center + # make sure we have only integers if user wants coarse centering if coarse_centering: centers = centers.astype(np.int) + dms_dx_ref = np.int(dms_dx_ref) + dms_dy_ref = np.int(dms_dy_ref) # final center if cpix: @@ -1642,8 +1649,6 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a cx = cx + dms_dx_ref + dms_dx cy = cy + dms_dy_ref + dms_dy - print(cx, cy) - img = img.astype(np.float) nimg = imutils.shift(img, (cc-cx, cc-cy), method=shift_method) nimg = nimg / DIT / attenuation[wave_idx] From ce133d6be4f378974f7492d56caf9f354aca4b66 Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Tue, 3 Sep 2019 11:53:34 +0200 Subject: [PATCH 050/101] New version of module imutils --- vltpf/utils/imutils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vltpf/utils/imutils.py b/vltpf/utils/imutils.py index 0c8e475..0bb1cdc 100644 --- a/vltpf/utils/imutils.py +++ b/vltpf/utils/imutils.py @@ -154,7 +154,7 @@ def shift(array, shift_value, method='fft', mode='constant', cval=0): elif isinstance(shift_value, (int, float)): shift_value = np.full(Ndim, shift_value) else: - raise ValueError('Shift value of type \'{0}\' is not allowed'.format(type(shift).__name__)) + raise ValueError('Shift value of type \'{0}\' is not allowed'.format(type(shift_value).__name__)) # check if shift values are int and automatically change method in case they are if (shift_value.dtype.kind == 'i'): From 25b155a81dffe42a2093ffbcc34875a72188f0d9 Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Tue, 3 Sep 2019 13:57:19 +0200 Subject: [PATCH 051/101] Make sure manual_center is of type np.float Ticket #68 --- vltpf/IFS.py | 8 ++++---- vltpf/IRDIS/ImagingReduction.py | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/vltpf/IFS.py b/vltpf/IFS.py index 6357da8..6574329 100644 --- a/vltpf/IFS.py +++ b/vltpf/IFS.py @@ -2605,7 +2605,7 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a raise ValueError('manual_center does not have the right number of dimensions.') if manual_center.shape == (2,): - manual_center = np.full((nwave, 2), manual_center) + manual_center = np.full((nwave, 2), manual_center, dtype=np.float) print('Warning: images will be centered using the user-provided center ({},{})'.format(*manual_center[0])) @@ -2645,7 +2645,7 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a centers = fits.getdata(cfile) else: print('Warning: sph_ifs_star_center() has not been executed. Images will be centered using default center ({},{})'.format(*self._default_center)) - centers = np.full((nwave, 2), self._default_center) + centers = np.full((nwave, 2), self._default_center, dtype=np.float) # make sure we have only integers if user wants coarse centering if coarse_centering: @@ -2808,7 +2808,7 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a starcen_files = frames_info[frames_info['DPR TYPE'] == 'OBJECT,CENTER'] if len(starcen_files) == 0: print('Warning: no OBJECT,CENTER file in the dataset. Images will be centered using default center ({},{})'.format(*self._default_center)) - centers = np.full((nwave, 2), self._default_center) + centers = np.full((nwave, 2), self._default_center, dtype=np.float) else: fname = '{0}_DIT{1:03d}_preproc_centers.fits'.format(starcen_files.index.values[0][0], starcen_files.index.values[0][1]) fpath = path.preproc / fname @@ -2817,7 +2817,7 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a centers = fits.getdata(fpath) else: print('Warning: sph_ifs_star_center() has not been executed. Images will be centered using default center ({},{})'.format(*self._default_center)) - centers = np.full((nwave, 2), self._default_center) + centers = np.full((nwave, 2), self._default_center, dtype=np.float) # make sure we have only integers if user wants coarse centering if coarse_centering: diff --git a/vltpf/IRDIS/ImagingReduction.py b/vltpf/IRDIS/ImagingReduction.py index 136698b..8b7de46 100644 --- a/vltpf/IRDIS/ImagingReduction.py +++ b/vltpf/IRDIS/ImagingReduction.py @@ -1380,7 +1380,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a raise ValueError('manual_center does not have the right number of dimensions.') if manual_center.shape == (2,): - manual_center = np.full((nwave, 2), manual_center) + manual_center = np.full((nwave, 2), manual_center, dtype=np.float) print('Warning: images will be centered using the user-provided center ({},{})'.format(*manual_center[0])) From 636715b59ddaf92c4d18339d6a55a7e437fbaae7 Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Tue, 3 Sep 2019 13:58:02 +0200 Subject: [PATCH 052/101] Remove check of recipe execution Ticket #68 --- vltpf/IFS.py | 2 +- vltpf/IRDIS/ImagingReduction.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/vltpf/IFS.py b/vltpf/IFS.py index 6574329..ab34b4c 100644 --- a/vltpf/IFS.py +++ b/vltpf/IFS.py @@ -2704,7 +2704,7 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a # starcen_files = frames_info[frames_info['DPR TYPE'] == 'OBJECT,CENTER'] nfiles = len(starcen_files) - if (nfiles != 0) and (self._recipe_execution['sph_ird_star_center']): + if nfiles != 0: print(' * OBJECT,CENTER data') # final arrays diff --git a/vltpf/IRDIS/ImagingReduction.py b/vltpf/IRDIS/ImagingReduction.py index 8b7de46..07fa744 100644 --- a/vltpf/IRDIS/ImagingReduction.py +++ b/vltpf/IRDIS/ImagingReduction.py @@ -1474,7 +1474,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a # starcen_files = frames_info[frames_info['DPR TYPE'] == 'OBJECT,CENTER'] nfiles = len(starcen_files) - if (nfiles != 0) and (self._recipe_execution['sph_ird_star_center']): + if nfiles != 0: print(' * OBJECT,CENTER data') # final arrays From 2833b81c0fcf98a6fed7186fd969810520eee90b Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Tue, 3 Sep 2019 13:58:42 +0200 Subject: [PATCH 053/101] Major update of the centering keywords Improve keywords, documentation and code implementation for centering (ticket #68) --- vltpf/IRDIS/SpectroReduction.py | 150 ++++++++++++++++++-------------- 1 file changed, 85 insertions(+), 65 deletions(-) diff --git a/vltpf/IRDIS/SpectroReduction.py b/vltpf/IRDIS/SpectroReduction.py index 4395da5..de36fa5 100644 --- a/vltpf/IRDIS/SpectroReduction.py +++ b/vltpf/IRDIS/SpectroReduction.py @@ -83,15 +83,9 @@ class SpectroReduction(object): 'sph_ird_preprocess_science': ['sort_files', 'sort_frames', 'sph_ird_cal_dark', 'sph_ird_cal_detector_flat'], 'sph_ird_star_center': ['sort_files', 'sort_frames', 'sph_ird_wave_calib'], - 'sph_ird_wavelength_recalibration': ['sort_files', 'sort_frames', 'sph_ird_wave_calib', - 'sph_ird_star_center'], - # FIXME: sph_ird_star_center and - # sph_ird_wavelength_recalibration could probably be removed - # from this list as they are not strictly required to combine - # the data at least at a basic level. To be tested - # Ticket #68 + 'sph_ird_wavelength_recalibration': ['sort_files', 'sort_frames', 'sph_ird_wave_calib'], 'sph_ird_combine_data': ['sort_files', 'sort_frames', 'sph_ird_preprocess_science', - 'sph_ird_star_center', 'sph_ird_wavelength_recalibration'] + 'sph_ird_wavelength_recalibration'] } ################################################## @@ -1401,9 +1395,10 @@ def sph_ird_star_center(self, high_pass=False, plot=True): # then OBJECT,CENTER (if any) starcen_files = frames_info[frames_info['DPR TYPE'] == 'OBJECT,CENTER'] - DIT = starcen_files['DET SEQ1 DIT'].round(2)[0] - starsci_files = frames_info[(frames_info['DPR TYPE'] == 'OBJECT') & (frames_info['DET SEQ1 DIT'].round(2) == DIT)] if len(starcen_files) != 0: + DIT = starcen_files['DET SEQ1 DIT'].round(2)[0] + starsci_files = frames_info[(frames_info['DPR TYPE'] == 'OBJECT') & (frames_info['DET SEQ1 DIT'].round(2) == DIT)] + for file, idx in starcen_files.index: print(' ==> OBJECT,CENTER: {0}'.format(file)) @@ -1438,7 +1433,7 @@ def sph_ird_star_center(self, high_pass=False, plot=True): def sph_ird_wavelength_recalibration(self, fit_scaling=True, plot=True): '''Performs a recalibration of the wavelength, if star center frames - are available. + are available. Otherwise simply use the ESO pipeline-calibrated law. It follows a similar process to that used for the IFS data. The method for the IFS is described in Vigan et @@ -1503,7 +1498,7 @@ def sph_ird_wavelength_recalibration(self, fit_scaling=True, plot=True): if len(starcen_files) == 0: print(' ==> no OBJECT,CENTER file in the data set. Wavelength cannot be recalibrated. ' + 'The standard wavelength calibrated by the ESO pripeline will be used.') - fits.writeto(path.preproc / 'wavelength_default.fits', wave_lin, overwrite=True) + fits.writeto(path.preproc / 'wavelength_default.fits', wave_lin.T, overwrite=True) # update recipe execution self._recipe_execution['sph_ird_wavelength_recalibration'] = True @@ -1623,8 +1618,27 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m frames. There is one line by time step in the data cube. - FIXME: proper documentation for centering. Ticket #68 - Centering: by default data are finely centered + Centering + --------- + + By default, a fine (sub-pixel) centering is performed if the + an OBJECT,CENTER frame was acquired in the sequence or if + there is a valid user-provided center. However, if the + coarse_centering keyword is set to True, only a "coarse + centering" is performed, which requires no interpolation: + + - only integer shifts (shift_method='roll') + - centering on an integer pixel (cpix=True) + - no correction of the MRS chromatism (correct_mrs_chromatism=False) + + This option is useful if the user wants to perform a + posteriori centering of the spectrum, e.g. to fully preserve + photometry. + + If there was no OBJECT,CENTER acquired in the sequence, then + the centering will be performed with respect to a default, + pre-defined center that is representative of the typical center + of the coronagraph. Parameters ---------- @@ -1685,11 +1699,11 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m # resolution-specific parameters filter_comb = frames_info['INS COMB IFLT'].unique()[0] if filter_comb == 'S_LR': - centers = self._default_center_lrs + default_center = self._default_center_lrs wave_min = self._wave_min_lrs wave_max = self._wave_max_lrs elif filter_comb == 'S_MR': - centers = self._default_center_mrs + default_center = self._default_center_mrs wave_min = self._wave_min_mrs wave_max = self._wave_max_mrs @@ -1710,7 +1724,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m iwave0 = np.where(mask[:, 0])[0] iwave1 = np.where(mask[:, 1])[0] nwave = np.min([iwave0.size, iwave1.size]) - + iwave = np.empty((nwave, 2), dtype=np.int) iwave[:, 0] = iwave0[:nwave] iwave[:, 1] = iwave1[:nwave] @@ -1718,7 +1732,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m final_wave = np.empty((nwave, 2)) final_wave[:, 0] = wave[iwave[:, 0], 0] final_wave[:, 1] = wave[iwave[:, 1], 1] - + fits.writeto(path.products / 'wavelength.fits', final_wave.squeeze().T, overwrite=True) # max images size @@ -1730,22 +1744,22 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m print('Warning: science_dim cannot be larger than 1024 pix. A value of 1024 will be used.') science_dim = 1024 - # centering - # FIXME: better handling and documentation of centering keywords. Ticket #68 - centers_default = centers[:, 0] + # centering configuration if coarse_centering: - print('Warning: images will not be fine centered. They will just be combined.') + print('Warning: images will be coarsely centered without any interpolation. Automatic settings for coarse centering: shift_method=\'roll\', cpix=True, correct_mrs_chromatism=False') shift_method = 'roll' - + cpix = True + correct_mrs_chromatism = False + if manual_center is not None: manual_center = np.array(manual_center) + if manual_center.shape != (2,): raise ValueError('manual_center does not have the right number of dimensions.') - print('Warning: images will be centered at the user-provided values.') - - if correct_mrs_chromatism and (filter_comb == 'S_MR'): - print('Warning: fine centering will be done anyway to correct for MRS chromatism') + print('Warning: images will be centered using the user-provided center ({},{})'.format(*manual_center)) + + manual_center = np.full((1024, 2), manual_center, dtype=np.float) # # OBJECT,FLUX @@ -1772,7 +1786,17 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m # read data fname = '{0}_DIT{1:03d}_preproc'.format(file, idx) cube = fits.getdata(path.preproc / '{}.fits'.format(fname)) - centers = fits.getdata(path.preproc / '{}_centers.fits'.format(fname)) + + cfile = path.preproc / '{}_centers.fits'.format(fname) + if cfile.exists(): + centers = fits.getdata(cfile) + else: + print('Warning: sph_ird_star_center() has not been executed. Images will be centered using default centers ({}, {})'.format(*default_center[:, 0])) + centers = np.full((1024, 2), default_center[:, 0], dtype=np.float) + + # make sure we have only integers if user wants coarse centering + if coarse_centering: + centers = centers.astype(np.int) # DIT, angles, etc DIT = frames_info.loc[(file, idx), 'DET SEQ1 DIT'] @@ -1782,24 +1806,20 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m for field_idx, img in enumerate(cube): # wavelength solution for this field ciwave = iwave[:, field_idx] - + if correct_mrs_chromatism and (filter_comb == 'S_MR'): - # FIXME: better handling and documentation of centering keywords. Ticket #68 img = img.astype(np.float) for wave_idx, widx in enumerate(ciwave): cx = centers[widx, field_idx] line = img[widx, :] + nimg = imutils.shift(line, cc-cx, method=shift_method) nimg = nimg / DIT psf_cube[field_idx, file_idx, wave_idx] = nimg[:psf_dim] else: - # FIXME: better handling and documentation of centering keywords. Ticket #68 - if coarse_centering: - cx = centers_default[field_idx] - else: - cx = centers[ciwave, field_idx].mean() + cx = centers[ciwave, field_idx].mean() img = img.astype(np.float) nimg = imutils.shift(img, (cc-cx, 0), method=shift_method) @@ -1862,7 +1882,16 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m # read data fname = '{0}_DIT{1:03d}_preproc'.format(file, idx) cube = fits.getdata(path.preproc / '{}.fits'.format(fname)) - centers = fits.getdata(path.preproc / '{}_centers.fits'.format(fname)) + + # use manual center if explicitely requested + if manual_center is not None: + centers = manual_center + else: + centers = fits.getdata(path.preproc / '{}_centers.fits'.format(fname)) + + # make sure we have only integers if user wants coarse centering + if coarse_centering: + centers = centers.astype(np.int) # DIT, angles, etc DIT = frames_info.loc[(file, idx), 'DET SEQ1 DIT'] @@ -1874,7 +1903,6 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m ciwave = iwave[:, field_idx] if correct_mrs_chromatism and (filter_comb == 'S_MR'): - # FIXME: better handling and documentation of centering keywords. Ticket #68 img = img.astype(np.float) for wave_idx, widx in enumerate(ciwave): cx = centers[widx, field_idx] @@ -1885,11 +1913,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m cen_cube[field_idx, file_idx, wave_idx] = nimg[:science_dim] else: - # FIXME: better handling and documentation of centering keywords. Ticket #68 - if coarse_centering: - cx = centers_default[field_idx] - else: - cx = centers[ciwave, field_idx].mean() + cx = centers[ciwave, field_idx].mean() img = img.astype(np.float) nimg = imutils.shift(img, (cc-cx, 0), method=shift_method) @@ -1939,24 +1963,25 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m sci_cube = np.zeros((2, nfiles, nwave, science_dim)) sci_posang = np.zeros(nfiles) - # FIXME: ticket #12. Use first DIT of first OBJECT,CENTER - # in the sequence, but it would be better to be able to - # select which CENTER to use - starcen_files = frames_info[frames_info['DPR TYPE'] == 'OBJECT,CENTER'] - if (len(starcen_files) == 0) or coarse_centering or (manual_center is not None): - print('Warning: no OBJECT,CENTER file in the data set. Images cannot be accurately centred. ' + - 'They will just be combined.') - - # choose between manual center or default centers - # FIXME: better handling and documentation of centering keywords. Ticket #68 - if manual_center is not None: - centers = manual_center - else: - centers = centers_default + # use manual center if explicitely requested + if manual_center is not None: + centers = np.full((1024, 2), manual_center, dtype=np.float) else: - fname = '{0}_DIT{1:03d}_preproc_centers.fits'.format(starcen_files.index.values[0][0], starcen_files.index.values[0][1]) - centers = fits.getdata(path.preproc / fname) + # FIXME: ticket #12. Use first DIT of first OBJECT,CENTER + # in the sequence, but it would be better to be able to + # select which CENTER to use + starcen_files = frames_info[frames_info['DPR TYPE'] == 'OBJECT,CENTER'] + if len(starcen_files) == 0: + print('Warning: no OBJECT,CENTER file in the data set. Images will be centered using default center ({},{})'.format(*default_center[:, 0])) + centers = np.full((1024, 2), default_center[:, 0], dtype=np.float) + else: + fname = '{0}_DIT{1:03d}_preproc_centers.fits'.format(starcen_files.index.values[0][0], starcen_files.index.values[0][1]) + centers = fits.getdata(path.preproc / fname) + # make sure we have only integers if user wants coarse centering + if coarse_centering: + centers = centers.astype(np.int) + # final center if cpix: cc = science_dim // 2 @@ -1981,7 +2006,6 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m ciwave = iwave[:, field_idx] if correct_mrs_chromatism and (filter_comb == 'S_MR'): - # FIXME: better handling and documentation of centering keywords. Ticket #68 img = img.astype(np.float) for wave_idx, widx in enumerate(ciwave): cx = centers[widx, field_idx] @@ -1992,11 +2016,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m sci_cube[field_idx, file_idx, wave_idx] = nimg[:science_dim] else: - # FIXME: better handling and documentation of centering keywords. Ticket #68 - if coarse_centering: - cx = centers_default[field_idx] - else: - cx = centers[ciwave, field_idx].mean() + cx = centers[ciwave, field_idx].mean() img = img.astype(np.float) nimg = imutils.shift(img, (cc-cx, 0), method=shift_method) @@ -2023,7 +2043,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m fits.writeto(path.products / 'science_posang={:06.2f}_cube.fits'.format(pa), sci_cube[:, ii], overwrite=True) else: # save metadata - object_files.to_csv(path.products, 'science_posang=all_frames.csv') + object_files.to_csv(path.products / 'science_posang=all_frames.csv') fits.writeto(path.products / 'science_posang=all_posang.fits', sci_posang, overwrite=True) # save final cubes From 8750269de0d5e254b80c17a2285667f3bad7f7ad Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Tue, 3 Sep 2019 14:11:48 +0200 Subject: [PATCH 054/101] Update of documentation Finilization of ticket #68 --- vltpf/IFS.py | 5 ++--- vltpf/IRDIS/ImagingReduction.py | 5 ++--- vltpf/IRDIS/SpectroReduction.py | 16 +++++++++------- 3 files changed, 13 insertions(+), 13 deletions(-) diff --git a/vltpf/IFS.py b/vltpf/IFS.py index ab34b4c..4b03856 100644 --- a/vltpf/IFS.py +++ b/vltpf/IFS.py @@ -2535,9 +2535,8 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a manual_center : array User provided centers for the OBJECT,CENTER and OBJECT - frames. This should be an array of either 2 or nwavex2 - values. For OBJECT,FLUX frames, the PSF is always - recentered. Default is None + frames. This should be an array of either 2 or nwave*2 + values. Default is None coarse_centering : bool Control if images are finely centered or not before being diff --git a/vltpf/IRDIS/ImagingReduction.py b/vltpf/IRDIS/ImagingReduction.py index 07fa744..c744b43 100644 --- a/vltpf/IRDIS/ImagingReduction.py +++ b/vltpf/IRDIS/ImagingReduction.py @@ -1315,9 +1315,8 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a manual_center : array User provided centers for the OBJECT,CENTER and OBJECT - frames. This should be an array of either 2 or nwavex2 - values. For OBJECT,FLUX frames, the PSF is always - recentered. Default is None + frames. This should be an array of either 2 or nwave*2 + values. Default is None coarse_centering : bool Control if images are finely centered or not before being diff --git a/vltpf/IRDIS/SpectroReduction.py b/vltpf/IRDIS/SpectroReduction.py index de36fa5..8f731a1 100644 --- a/vltpf/IRDIS/SpectroReduction.py +++ b/vltpf/IRDIS/SpectroReduction.py @@ -1645,8 +1645,9 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m cpix : bool If True the images are centered on the pixel at coordinate dim//2 in the spatial dimension. If False the images are - centered between 2 pixels, at coordinates - (dim-1)/2. Default is True. + centered between 2 pixels, at coordinates (dim-1)/2. The + value of cpix is automatically set to True when + coarse_centering is set to True. Default is True. psf_dim : even int Size of the PSF images along in the spatial @@ -1660,7 +1661,9 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m correct_mrs_chromatism : bool Correct for the slight chromatism in the MRS mode. This chromatism induces a slight shift of the PSF center with - wavelength. Default is True. + wavelength. The value of correct_mrs_chromatism is + automatically set to True when coarse_centering is set to + True. Default is True. split_posang : bool Save data taken at different position angles in separate @@ -1669,16 +1672,15 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m manual_center : array User provided spatial center for the OBJECT,CENTER and OBJECT frames. This should be an array of 2 values (cx for - the 2 IRDIS fields). If a manual center is provided, the - value of coarse_centering is ignored for the OBJECT,CENTER and - OBJECT frames. Default is None + the 2 IRDIS fields). Default is None coarse_centering : bool Control if images are finely centered or not before being combined. However the images are still roughly centered by shifting them by an integer number of pixel to bring the center of the data close to the center of the images. This - option is useful if fine centering must be done afterwards. + option is useful if fine centering must be done + afterwards. Default is False. shift_method : str Method to shifting the images: fft or interp. Default is From 52841e14f35c4ecb7395bd76d4d76f478d5340ee Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Tue, 3 Sep 2019 16:52:10 +0200 Subject: [PATCH 055/101] Change how the default wavelength calibration is stored and handled Ticket #75 --- examples/ifs_reduction.py | 2 +- vltpf/IFS.py | 24 ++++++++++++------------ 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/examples/ifs_reduction.py b/examples/ifs_reduction.py index 8aff319..9fe2ec0 100644 --- a/examples/ifs_reduction.py +++ b/examples/ifs_reduction.py @@ -51,4 +51,4 @@ save_scaled=False) #%% cleaning -reduction.sph_ifs_clean(delete_raw=False, delete_products=False) +reduction.sph_ifs_clean(delete_raw=False, delete_products=True) diff --git a/vltpf/IFS.py b/vltpf/IFS.py index 4b03856..048e378 100644 --- a/vltpf/IFS.py +++ b/vltpf/IFS.py @@ -47,6 +47,7 @@ def compute_detector_flat(raw_flat_files, bpm_files=[], mask_vignetting=True): Master detector flat bpm : array + Bad pixel map from flat ''' @@ -353,8 +354,7 @@ class Reduction(object): 'sph_ifs_wavelength_recalibration': ['sort_files', 'sort_frames', 'sph_ifs_preprocess_wave', 'sph_ifs_science_cubes'], 'sph_ifs_star_center': ['sort_files', 'sort_frames', 'sph_ifs_science_cubes'], - 'sph_ifs_combine_data': ['sort_files', 'sort_frames', 'sph_ifs_science_cubes', - 'sph_ifs_wavelength_recalibration'] + 'sph_ifs_combine_data': ['sort_files', 'sort_frames', 'sph_ifs_science_cubes'] } ################################################## @@ -1556,6 +1556,15 @@ def sph_ifs_cal_wave(self, silent=True): # save files_info.to_csv(path.preproc / 'files.csv') + # store default wavelength calibration in preproc + hdr = fits.getheader(path.calib / '{}.fits'.format(wav_file)) + + wave_min = hdr['HIERARCH ESO DRS IFS MIN LAMBDA']*1000 + wave_max = hdr['HIERARCH ESO DRS IFS MAX LAMBDA']*1000 + wave_drh = np.linspace(wave_min, wave_max, self._nwave) + + fits.writeto(path.preproc / 'wavelength_default.fits', wave_drh, overwrite=True) + # update recipe execution self._recipe_execution['sph_ifs_cal_wave'] = True @@ -2167,11 +2176,7 @@ def sph_ifs_wavelength_recalibration(self, high_pass=False, offset=(0, 0), plot= files_info = self._files_info frames_info = self._frames_info_preproc - # remove old files - wfile = path.preproc / 'wavelength_default.fits' - if wfile.exists(): - wfile.unlink() - + # remove old file wfile = path.preproc / 'wavelength_recalibrated.fits' if wfile.exists(): wfile.unlink() @@ -2201,11 +2206,6 @@ def sph_ifs_wavelength_recalibration(self, high_pass=False, offset=(0, 0), plot= if len(starcen_files) == 0: print(' ==> no OBJECT,CENTER file in the data set. Wavelength cannot be recalibrated. ' + 'The standard wavelength calibrated by the ESO pripeline will be used.') - fits.writeto(path.preproc / 'wavelength_default.fits', wave_drh, overwrite=True) - - # update recipe execution - self._recipe_execution['sph_ifs_wavelength_recalibration'] = True - return ifs_mode = starcen_files['INS2 COMB IFS'].values[0] From f22f9c1f34fc1f24d6fe8a3c0b34bee1b05f9212 Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Tue, 3 Sep 2019 17:26:33 +0200 Subject: [PATCH 056/101] More explicit warning regarding wavelength calibration used --- vltpf/IFS.py | 3 ++- vltpf/IRDIS/SpectroReduction.py | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/vltpf/IFS.py b/vltpf/IFS.py index 048e378..e154651 100644 --- a/vltpf/IFS.py +++ b/vltpf/IFS.py @@ -2575,9 +2575,10 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a else: wfile = path.preproc / 'wavelength_default.fits' if wfile.exists(): + print('Warning: using default wavelength calibration.') wave = fits.getdata(wfile) else: - raise FileExistsError('Missing wavelength_default.fits or wavelength_recalibrated.fits files. You must first run the sph_ifs_wavelength_recalibration() method first.') + raise FileExistsError('Missing default or recalibrated wavelength calibration. You must first run either sph_ifs_wave_calib or sph_ifs_wavelength_recalibration().') fits.writeto(path.products / 'wavelength.fits', wave, overwrite=True) # max images size diff --git a/vltpf/IRDIS/SpectroReduction.py b/vltpf/IRDIS/SpectroReduction.py index 8f731a1..4fa0ad4 100644 --- a/vltpf/IRDIS/SpectroReduction.py +++ b/vltpf/IRDIS/SpectroReduction.py @@ -1716,9 +1716,10 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m else: wfile = path.preproc / 'wavelength_default.fits' if wfile.exists(): + print('Warning: using default wavelength calibration.') wave = fits.getdata(wfile) else: - raise FileExistsError('Missing wavelength_default.fits or wavelength_recalibrated.fits files. You must first run the sph_ird_wavelength_recalibration() method first.') + raise FileExistsError('Missing default or recalibrated wavelength calibration. You must first run either sph_ird_wave_calib or sph_ird_wavelength_recalibration().') # wavelength solution: make sure we have the same number of # wave points in each field From f43093ba5825565052e0c5beffb5d10e1bc93f77 Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Tue, 3 Sep 2019 17:27:21 +0200 Subject: [PATCH 057/101] Change how the default wavelength calibration is stored and handled Ticket #75. Implementation for IRDIS spectro --- examples/irdis_spectro_reduction.py | 2 +- vltpf/IRDIS/SpectroReduction.py | 27 ++++++++++++++++----------- 2 files changed, 17 insertions(+), 12 deletions(-) diff --git a/examples/irdis_spectro_reduction.py b/examples/irdis_spectro_reduction.py index 599ebd5..cd20ec6 100644 --- a/examples/irdis_spectro_reduction.py +++ b/examples/irdis_spectro_reduction.py @@ -45,4 +45,4 @@ shift_method='fft', manual_center=None, coarse_centering=False) #%% cleaning -reduction.sph_ird_clean(delete_raw=False, delete_products=False) +reduction.sph_ird_clean(delete_raw=False, delete_products=True) diff --git a/vltpf/IRDIS/SpectroReduction.py b/vltpf/IRDIS/SpectroReduction.py index 4fa0ad4..6e20727 100644 --- a/vltpf/IRDIS/SpectroReduction.py +++ b/vltpf/IRDIS/SpectroReduction.py @@ -84,8 +84,7 @@ class SpectroReduction(object): 'sph_ird_cal_detector_flat'], 'sph_ird_star_center': ['sort_files', 'sort_frames', 'sph_ird_wave_calib'], 'sph_ird_wavelength_recalibration': ['sort_files', 'sort_frames', 'sph_ird_wave_calib'], - 'sph_ird_combine_data': ['sort_files', 'sort_frames', 'sph_ird_preprocess_science', - 'sph_ird_wavelength_recalibration'] + 'sph_ird_combine_data': ['sort_files', 'sort_frames', 'sph_ird_preprocess_science'] } ################################################## @@ -1103,6 +1102,21 @@ def sph_ird_wave_calib(self, silent=True): # save files_info.to_csv(path.preproc / 'files.csv') + # store default wavelength calibration in preproc + if filter_comb == 'S_LR': + centers = self._default_center_lrs + wave_min = self._wave_min_lrs + wave_max = self._wave_max_lrs + elif filter_comb == 'S_MR': + centers = self._default_center_mrs + wave_min = self._wave_min_mrs + wave_max = self._wave_max_mrs + + wave_calib = fits.getdata(path.calib / '{}.fits'.format(wav_file)) + wave_lin = get_wavelength_calibration(wave_calib, centers, wave_min, wave_max) + + fits.writeto(path.preproc / 'wavelength_default.fits', wave_lin.T, overwrite=True) + # update recipe execution self._recipe_execution['sph_ird_wave_calib'] = True @@ -1465,10 +1479,6 @@ def sph_ird_wavelength_recalibration(self, fit_scaling=True, plot=True): frames_info = self._frames_info_preproc # remove old files - wfile = path.preproc / 'wavelength_default.fits' - if wfile.exists(): - wfile.unlink() - wfile = path.preproc / 'wavelength_recalibrated.fits' if wfile.exists(): wfile.unlink() @@ -1498,11 +1508,6 @@ def sph_ird_wavelength_recalibration(self, fit_scaling=True, plot=True): if len(starcen_files) == 0: print(' ==> no OBJECT,CENTER file in the data set. Wavelength cannot be recalibrated. ' + 'The standard wavelength calibrated by the ESO pripeline will be used.') - fits.writeto(path.preproc / 'wavelength_default.fits', wave_lin.T, overwrite=True) - - # update recipe execution - self._recipe_execution['sph_ird_wavelength_recalibration'] = True - return fname = '{0}_DIT{1:03d}_preproc_spot_distance'.format(starcen_files.index.values[0][0], starcen_files.index.values[0][1]) From ae1e84d33123a7d741b0d48d9f70345a827f315d Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Tue, 3 Sep 2019 22:22:15 +0200 Subject: [PATCH 058/101] Implement high-level logging in all modules Ticket #63 --- vltpf/IFS.py | 224 ++++++++++++++------------------ vltpf/IRDIS/ImagingReduction.py | 144 +++++++++----------- vltpf/IRDIS/SpectroReduction.py | 157 ++++++++++------------ vltpf/SPHERE.py | 100 ++++++-------- vltpf/__init__.py | 23 +++- vltpf/toolbox.py | 19 +-- 6 files changed, 305 insertions(+), 362 deletions(-) diff --git a/vltpf/IFS.py b/vltpf/IFS.py index e154651..3a7e246 100644 --- a/vltpf/IFS.py +++ b/vltpf/IFS.py @@ -1,5 +1,6 @@ import pandas as pd import subprocess +import logging import numpy as np import scipy.ndimage as ndimage import scipy.interpolate as interp @@ -23,6 +24,8 @@ import vltpf.transmission as transmission import vltpf.toolbox as toolbox +_log = logging.getLogger(__name__) + def compute_detector_flat(raw_flat_files, bpm_files=[], mask_vignetting=True): ''' @@ -761,7 +764,7 @@ def sort_files(self): Data frame with the information on raw files ''' - print('Sorting raw files') + _log.info('Sorting raw files') # parameters path = self._path @@ -773,7 +776,7 @@ def sort_files(self): if len(files) == 0: raise ValueError('No raw FITS files in reduction path') - print(' * found {0} FITS files in {1}'.format(len(files), path.raw)) + _log.info(' * found {0} FITS files in {1}'.format(len(files), path.raw)) # read list of keywords keywords = [] @@ -845,7 +848,7 @@ def sort_frames(self): A data frame with the information on all frames ''' - print('Extracting frames information') + _log.info('Extracting frames information') # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sort_frames', self.recipe_requirements) @@ -918,19 +921,19 @@ def sort_frames(self): date = str(cinfo['DATE'][0])[0:10] - print(' * Object: {0}'.format(cinfo['OBJECT'][0])) - print(' * RA / DEC: {0} / {1}'.format(RA, DEC)) - print(' * Date: {0}'.format(date)) - print(' * Instrument: {0}'.format(cinfo['SEQ ARM'][0])) - print(' * Derotator: {0}'.format(cinfo['INS4 DROT2 MODE'][0])) - print(' * Coronagraph: {0}'.format(cinfo['INS COMB ICOR'][0])) - print(' * Mode: {0}'.format(cinfo['INS1 MODE'][0])) - print(' * Filter: {0}'.format(cinfo['INS2 COMB IFS'][0])) - print(' * DIT: {0:.2f} sec'.format(cinfo['DET SEQ1 DIT'][0])) - print(' * NDIT: {0:.0f}'.format(cinfo['DET NDIT'][0])) - print(' * Texp: {0:.2f} min'.format(cinfo['DET SEQ1 DIT'].sum()/60)) - print(' * PA: {0:.2f}° ==> {1:.2f}° = {2:.2f}°'.format(pa_start, pa_end, np.abs(pa_end-pa_start))) - print(' * POSANG: {0}'.format(', '.join(['{:.2f}°'.format(p) for p in posang]))) + _log.info(' * Object: {0}'.format(cinfo['OBJECT'][0])) + _log.info(' * RA / DEC: {0} / {1}'.format(RA, DEC)) + _log.info(' * Date: {0}'.format(date)) + _log.info(' * Instrument: {0}'.format(cinfo['SEQ ARM'][0])) + _log.info(' * Derotator: {0}'.format(cinfo['INS4 DROT2 MODE'][0])) + _log.info(' * Coronagraph: {0}'.format(cinfo['INS COMB ICOR'][0])) + _log.info(' * Mode: {0}'.format(cinfo['INS1 MODE'][0])) + _log.info(' * Filter: {0}'.format(cinfo['INS2 COMB IFS'][0])) + _log.info(' * DIT: {0:.2f} sec'.format(cinfo['DET SEQ1 DIT'][0])) + _log.info(' * NDIT: {0:.0f}'.format(cinfo['DET NDIT'][0])) + _log.info(' * Texp: {0:.2f} min'.format(cinfo['DET SEQ1 DIT'].sum()/60)) + _log.info(' * PA: {0:.2f}° ==> {1:.2f}° = {2:.2f}°'.format(pa_start, pa_end, np.abs(pa_end-pa_start))) + _log.info(' * POSANG: {0}'.format(', '.join(['{:.2f}°'.format(p) for p in posang]))) def check_files_association(self): @@ -944,7 +947,7 @@ def check_files_association(self): # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'check_files_association', self.recipe_requirements) - print('Performing file association for calibrations') + _log.info('Performing file association for calibrations') # parameters path = self._path @@ -983,10 +986,10 @@ def check_files_association(self): cfiles = calibs[(calibs['DPR TYPE'] == 'FLAT,LAMP') & (calibs['INS2 COMB IFS'] == 'CAL_BB_2_{0}'.format(mode_short))] if len(cfiles) < 2: error_flag += 1 - print(' * Error: there should be 2 flat files for white lamp, found {0}'.format(len(cfiles))) + _log.error(' * there should be 2 flat files for white lamp, found {0}'.format(len(cfiles))) elif len(cfiles) > 2: warning_flag += 1 - print(' * Warning: there should be 2 flat files for white lamp, found {0}. Using the closest from science.'.format(len(cfiles))) + _log.warning(' * there should be 2 flat files for white lamp, found {0}. Using the closest from science.'.format(len(cfiles))) # find the two closest to science files sci_files = files_info[(files_info['DPR CATG'] == 'SCIENCE')] @@ -1001,10 +1004,10 @@ def check_files_association(self): cfiles = calibs[(calibs['DPR TYPE'] == 'FLAT,LAMP') & (calibs['INS2 COMB IFS'] == 'CAL_NB1_1_{0}'.format(mode_short))] if len(cfiles) < 2: error_flag += 1 - print(' * Error: there should be 2 flat files for 1020 nm filter, found {0}'.format(len(cfiles))) + _log.error(' * there should be 2 flat files for 1020 nm filter, found {0}'.format(len(cfiles))) elif len(cfiles) > 2: warning_flag += 1 - print(' * Warning: there should be 2 flat files for 1020 nm filter, found {0}. Using the closest from science.'.format(len(cfiles))) + _log.warning(' * there should be 2 flat files for 1020 nm filter, found {0}. Using the closest from science.'.format(len(cfiles))) # find the two closest to science files sci_files = files_info[(files_info['DPR CATG'] == 'SCIENCE')] @@ -1019,10 +1022,10 @@ def check_files_association(self): cfiles = calibs[(calibs['DPR TYPE'] == 'FLAT,LAMP') & (calibs['INS2 COMB IFS'] == 'CAL_NB2_1_{0}'.format(mode_short))] if len(cfiles) < 2: error_flag += 1 - print(' * Error: there should be 2 flat files for 1230 nm filter, found {0}'.format(len(cfiles))) + _log.error(' * there should be 2 flat files for 1230 nm filter, found {0}'.format(len(cfiles))) elif len(cfiles) > 2: warning_flag += 1 - print(' * Warning: there should be 2 flat files for 1230 nm filter, found {0}. Using the closest from science.'.format(len(cfiles))) + _log.warning(' * there should be 2 flat files for 1230 nm filter, found {0}. Using the closest from science.'.format(len(cfiles))) # find the two closest to science files sci_files = files_info[(files_info['DPR CATG'] == 'SCIENCE')] @@ -1037,10 +1040,10 @@ def check_files_association(self): cfiles = calibs[(calibs['DPR TYPE'] == 'FLAT,LAMP') & (calibs['INS2 COMB IFS'] == 'CAL_NB3_1_{0}'.format(mode_short))] if len(cfiles) < 2: error_flag += 1 - print(' * Error: there should be 2 flat files for 1300 nm filter, found {0}'.format(len(cfiles))) + _log.error(' * there should be 2 flat files for 1300 nm filter, found {0}'.format(len(cfiles))) elif len(cfiles) > 2: warning_flag += 1 - print(' * Warning: there should be 2 flat files for 1300 nm filter, found {0}. Using the closest from science.'.format(len(cfiles))) + _log.warning(' * there should be 2 flat files for 1300 nm filter, found {0}. Using the closest from science.'.format(len(cfiles))) # find the two closest to science files sci_files = files_info[(files_info['DPR CATG'] == 'SCIENCE')] @@ -1056,10 +1059,10 @@ def check_files_association(self): cfiles = calibs[(calibs['DPR TYPE'] == 'FLAT,LAMP') & (calibs['INS2 COMB IFS'] == 'CAL_NB4_2_{0}'.format(mode_short))] if len(cfiles) < 2: error_flag += 1 - print(' * Error: there should be 2 flat files for 1550 nm filter, found {0}'.format(len(cfiles))) + _log.error(' * there should be 2 flat files for 1550 nm filter, found {0}'.format(len(cfiles))) elif len(cfiles) > 2: warning_flag += 1 - print(' * Warning: there should be 2 flat files for 1550 nm filter, found {0}. Using the closest from science.'.format(len(cfiles))) + _log.warning(' * there should be 2 flat files for 1550 nm filter, found {0}. Using the closest from science.'.format(len(cfiles))) # find the two closest to science files sci_files = files_info[(files_info['DPR CATG'] == 'SCIENCE')] @@ -1074,10 +1077,10 @@ def check_files_association(self): cfiles = calibs[(calibs['DPR TYPE'] == 'SPECPOS,LAMP') & (calibs['INS2 COMB IFS'] == mode)] if len(cfiles) == 0: error_flag += 1 - print(' * Error: there should be 1 spectra position file, found none.') + _log.error(' * there should be 1 spectra position file, found none.') elif len(cfiles) > 1: warning_flag += 1 - print(' * Warning: there should be 1 spectra position file, found {0}. Using the closest from science.'.format(len(cfiles))) + _log.warning(' * there should be 1 spectra position file, found {0}. Using the closest from science.'.format(len(cfiles))) # find the two closest to science files sci_files = files_info[(files_info['DPR CATG'] == 'SCIENCE')] @@ -1092,10 +1095,10 @@ def check_files_association(self): cfiles = calibs[(calibs['DPR TYPE'] == 'WAVE,LAMP') & (calibs['INS2 COMB IFS'] == mode)] if len(cfiles) == 0: error_flag += 1 - print(' * Error: there should be 1 wavelength calibration file, found none.') + _log.error(' * there should be 1 wavelength calibration file, found none.') elif len(cfiles) > 1: warning_flag += 1 - print(' * Warning: there should be 1 wavelength calibration file, found {0}. Using the closest from science.'.format(len(cfiles))) + _log.warning(' * there should be 1 wavelength calibration file, found {0}. Using the closest from science.'.format(len(cfiles))) # find the two closest to science files sci_files = files_info[(files_info['DPR CATG'] == 'SCIENCE')] @@ -1110,10 +1113,10 @@ def check_files_association(self): cfiles = calibs[(calibs['DPR TYPE'] == 'FLAT,LAMP') & (calibs['INS2 COMB IFS'] == mode)] if len(cfiles) == 0: error_flag += 1 - print(' * Error: there should be 1 IFU flat file, found none') + _log.error(' * there should be 1 IFU flat file, found none') elif len(cfiles) > 1: warning_flag += 1 - print(' * Warning: there should be 1 IFU flat file, found {0}. Using the closest from science.'.format(len(cfiles))) + _log.warning(' * there should be 1 IFU flat file, found {0}. Using the closest from science.'.format(len(cfiles))) # find the two closest to science files sci_files = files_info[(files_info['DPR CATG'] == 'SCIENCE')] @@ -1129,10 +1132,7 @@ def check_files_association(self): (calibs['DET SEQ1 DIT'].round(2) == 1.65)] if len(cfiles) == 0: error_flag += 1 - print(' * Error: there is no dark/background for the basic calibrations (DIT=1.65 sec). ' + - 'It is mandatory to include one to obtain the best data reduction. ' + - 'A single dark/background file is sufficient, and it can easily be downloaded ' + - 'from the ESO archive') + _log.info(' * Error: there is no dark/background for the basic calibrations (DIT=1.65 sec). It is mandatory to include one to obtain the best data reduction. A single dark/background file is sufficient, and it can easily be downloaded from the ESO archive') ################################################## # static calibrations that depend on science DIT @@ -1148,21 +1148,16 @@ def check_files_association(self): (calibs['DET SEQ1 DIT'].round(2) == DIT)] if len(cfiles) == 0: warning_flag += 1 - print(' * Warning: there is no dark/background for science files with DIT={0} sec. '.format(DIT) + - 'It is *highly recommended* to include one to obtain the best data reduction. ' + - 'A single dark/background file is sufficient, and it can easily be downloaded ' + - 'from the ESO archive') + _log.warning(' * there is no dark/background for science files with DIT={0} sec. It is *highly recommended* to include one to obtain the best data reduction. A single dark/background file is sufficient, and it can easily be downloaded from the ESO archive'.format(DIT)) # sky backgrounds cfiles = files_info[(files_info['DPR TYPE'] == 'SKY') & (files_info['DET SEQ1 DIT'].round(2) == DIT)] if len(cfiles) == 0: warning_flag += 1 - print(' * Warning: there is no sky background for science files with DIT={0} sec. '.format(DIT) + - 'Using a sky background instead of an internal instrumental background can ' + - 'usually provide a cleaner data reduction') + _log.warning(' * there is no sky background for science files with DIT={0} sec. Using a sky background instead of an internal instrumental background can usually provide a cleaner data reduction'.format(DIT)) # error reporting - print('There are {0} warning(s) and {1} error(s) in the classification of files'.format(warning_flag, error_flag)) + _log.warning('There are {0} warning(s) and {1} error(s) in the classification of files'.format(warning_flag, error_flag)) if error_flag: raise ValueError('There is {0} errors that should be solved before proceeding'.format(error_flag)) @@ -1184,7 +1179,7 @@ def sph_ifs_cal_dark(self, silent=True): # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ifs_cal_dark', self.recipe_requirements) - print('Creating darks and backgrounds') + _log.info('Creating darks and backgrounds') # parameters path = self._path @@ -1209,7 +1204,7 @@ def sph_ifs_cal_dark(self, silent=True): if len(cfiles) == 0: continue - print(' * {0} with DIT={1:.2f} sec ({2} files)'.format(ctype, DIT, len(cfiles))) + _log.info(' * {0} with DIT={1:.2f} sec ({2} files)'.format(ctype, DIT, len(cfiles))) # create sof sof = path.sof / 'dark_DIT={0:.2f}.sof'.format(DIT) @@ -1290,7 +1285,7 @@ def sph_ifs_cal_detector_flat(self, silent=True): # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ifs_cal_detector_flat', self.recipe_requirements) - print('Creating flats') + _log.info('Creating flats') # parameters path = self._path @@ -1320,7 +1315,7 @@ def sph_ifs_cal_detector_flat(self, silent=True): lamps = [ 5, 1, 2, 3, 4] for wave, comb, lamp in zip(waves, combs, lamps): - print(' * flat for wavelength {0} nm (filter {1}, lamp {2})'.format(wave, comb, lamp)) + _log.info(' * flat for wavelength {0} nm (filter {1}, lamp {2})'.format(wave, comb, lamp)) cfiles = calibs[calibs['INS2 COMB IFS'] == '{0}_{1}'.format(comb, mode_short)] files = [path.raw / '{}.fits'.format(f) for f in cfiles.index] @@ -1382,7 +1377,7 @@ def sph_ifs_cal_specpos(self, silent=True): # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ifs_cal_specpos', self.recipe_requirements) - print('Creating specpos') + _log.info('Creating specpos') # parameters path = self._path @@ -1428,8 +1423,7 @@ def sph_ifs_cal_specpos(self, silent=True): # check esorex if shutil.which('esorex') is None: - raise NameError('esorex does not appear to be in your PATH. Please make sure ' + - 'that the ESO pipeline is properly installed before running VLTPF.') + raise NameError('esorex does not appear to be in your PATH. Please make sure that the ESO pipeline is properly installed before running VLTPF.') # execute esorex if silent: @@ -1469,7 +1463,7 @@ def sph_ifs_cal_wave(self, silent=True): # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ifs_cal_wave', self.recipe_requirements) - print('Creating wavelength calibration') + _log.info('Creating wavelength calibration') # parameters path = self._path @@ -1582,7 +1576,7 @@ def sph_ifs_cal_ifu_flat(self, silent=True): # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ifs_cal_ifu_flat', self.recipe_requirements) - print('Creating IFU flat') + _log.info('Creating IFU flat') # parameters path = self._path @@ -1667,8 +1661,7 @@ def sph_ifs_cal_ifu_flat(self, silent=True): # check esorex if shutil.which('esorex') is None: - raise NameError('esorex does not appear to be in your PATH. Please make sure ' + - 'that the ESO pipeline is properly installed before running VLTPF.') + raise NameError('esorex does not appear to be in your PATH. Please make sure that the ESO pipeline is properly installed before running VLTPF.') # execute esorex if silent: @@ -1755,7 +1748,7 @@ def sph_ifs_preprocess_science(self, # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ifs_preprocess_science', self.recipe_requirements) - print('Pre-processing science files') + _log.info('Pre-processing science files') # parameters path = self._path @@ -1792,7 +1785,7 @@ def sph_ifs_preprocess_science(self, for DIT in sci_DITs: sfiles = sci_files[sci_files['DET SEQ1 DIT'].round(2) == DIT] - print('{0} files of type {1} with DIT={2} sec'.format(len(sfiles), typ, DIT)) + _log.info('{0} files of type {1} with DIT={2} sec'.format(len(sfiles), typ, DIT)) if subtract_background: # look for sky, then background, then darks @@ -1804,11 +1797,11 @@ def sph_ifs_preprocess_science(self, (files_info['DET SEQ1 DIT'].round(2) == DIT)] if len(dfiles) != 0: break - print(' ==> found {0} corresponding {1} file'.format(len(dfiles), d)) + _log.info(' ==> found {0} corresponding {1} file'.format(len(dfiles), d)) if len(dfiles) == 0: # issue a warning if absolutely no background is found - print('Warning: no background has been found. Pre-processing will continue but data quality will likely be affected') + _log.warning('No background has been found. Pre-processing will continue but data quality will likely be affected') bkg = np.zeros((2048, 2048)) elif len(dfiles) == 1: bkg = fits.getdata(path.calib / '{}.fits'.format(dfiles.index[0])) @@ -1821,10 +1814,10 @@ def sph_ifs_preprocess_science(self, # frames_info extract finfo = frames_info.loc[(fname, slice(None)), :] - print(' * file {0}/{1}: {2}, NDIT={3}'.format(idx+1, len(sfiles), fname, len(finfo))) + _log.info(' * file {0}/{1}: {2}, NDIT={3}'.format(idx+1, len(sfiles), fname, len(finfo))) # read data - print(' ==> read data') + _log.info(' ==> read data') img, hdr = fits.getdata(path.raw / '{}.fits'.format(fname), header=True) # add extra dimension to single images to make cubes @@ -1834,14 +1827,14 @@ def sph_ifs_preprocess_science(self, # collapse if (typ == 'OBJECT,CENTER'): if collapse_center: - print(' ==> collapse: mean ({0} -> 1 frame, 0 dropped)'.format(len(img))) + _log.info(' ==> collapse: mean ({0} -> 1 frame, 0 dropped)'.format(len(img))) img = np.mean(img, axis=0, keepdims=True) frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'mean') else: frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'none') elif (typ == 'OBJECT,FLUX'): if collapse_psf: - print(' ==> collapse: mean ({0} -> 1 frame, 0 dropped)'.format(len(img))) + _log.info(' ==> collapse: mean ({0} -> 1 frame, 0 dropped)'.format(len(img))) img = np.mean(img, axis=0, keepdims=True) frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'mean') else: @@ -1849,7 +1842,7 @@ def sph_ifs_preprocess_science(self, elif (typ == 'OBJECT'): if collapse_science: if collapse_type == 'mean': - print(' ==> collapse: mean ({0} -> 1 frame, 0 dropped)'.format(len(img))) + _log.info(' ==> collapse: mean ({0} -> 1 frame, 0 dropped)'.format(len(img))) img = np.mean(img, axis=0, keepdims=True) frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'mean') @@ -1865,7 +1858,7 @@ def sph_ifs_preprocess_science(self, if coadd_value > NDIT: raise ValueError('coadd_value ({0}) must be < NDIT ({1})'.format(coadd_value, NDIT)) - print(' ==> collapse: coadd by {0} ({1} -> {2} frames, {3} dropped)'.format(coadd_value, NDIT, NDIT_new, dropped)) + _log.info(' ==> collapse: coadd by {0} ({1} -> {2} frames, {3} dropped)'.format(coadd_value, NDIT, NDIT_new, dropped)) # coadd frames nimg = np.empty((NDIT_new, 2048, 2048), dtype=img.dtype) @@ -1884,13 +1877,13 @@ def sph_ifs_preprocess_science(self, # background subtraction if subtract_background: - print(' ==> subtract background') + _log.info(' ==> subtract background') for f in range(len(img)): img[f] -= bkg # bad pixels correction if fix_badpix: - print(' ==> correct bad pixels') + _log.info(' ==> correct bad pixels') for f in range(len(img)): frame = img[f] @@ -1902,7 +1895,7 @@ def sph_ifs_preprocess_science(self, # spectral crosstalk correction if correct_xtalk: - print(' ==> correct spectral crosstalk') + _log.info(' ==> correct spectral crosstalk') for f in range(len(img)): frame = img[f] frame = sph_ifs_correct_spectral_xtalk(frame) @@ -1911,7 +1904,7 @@ def sph_ifs_preprocess_science(self, # check prensence of coordinates # if not, warn user and add fake one: it could be internal source data if hdr.get('HIERARCH ESO TEL TARG ALPHA') is None: - print('Warning: no valid coordinates found in header. Adding fake ones to be able to produce (x,y,lambda) datacubes.') + _log.warning('No valid coordinates found in header. Adding fake ones to be able to produce (x,y,lambda) datacubes.') hdr['HIERARCH ESO TEL TARG ALPHA'] = 120000.0 hdr['HIERARCH ESO TEL TARG DELTA'] = -900000.0 @@ -1924,10 +1917,6 @@ def sph_ifs_preprocess_science(self, fits.writeto(path.preproc / '{}_DIT{:03d}_preproc.fits'.format(fname, f), frame, hdr, overwrite=True, output_verify='silentfix') - print() - - print() - # sort and save final dataframe frames_info_preproc.sort_values(by='TIME', inplace=True) frames_info_preproc.to_csv(path.preproc / 'frames_preproc.csv') @@ -1951,7 +1940,7 @@ def sph_ifs_preprocess_wave(self): path = self._path files_info = self._files_info - print('Pre-processing wavelength calibration file') + _log.info('Pre-processing wavelength calibration file') # bpm bpm_files = files_info[files_info['PRO CATG'] == 'IFS_STATIC_BADPIXELMAP'].index @@ -1972,24 +1961,24 @@ def sph_ifs_preprocess_wave(self): fname = wave_file.index[0] # read data - print(' * {0}'.format(fname)) - print(' ==> read data') + _log.info(' * {0}'.format(fname)) + _log.info(' ==> read data') img, hdr = fits.getdata(path.raw / '{}.fits'.format(fname), header=True) # collapse - print(' ==> collapse: mean') + _log.info(' ==> collapse: mean') img = np.mean(img, axis=0, keepdims=False) # background subtraction - print(' ==> subtract background') + _log.info(' ==> subtract background') img -= bkg # bad pixels correction - print(' ==> correct bad pixels') + _log.info(' ==> correct bad pixels') img = sph_ifs_fix_badpix(img, bpm) # spectral crosstalk correction - print(' ==> correct spectral crosstalk') + _log.info(' ==> correct spectral crosstalk') img = sph_ifs_correct_spectral_xtalk(img) # add fake coordinates @@ -2017,7 +2006,7 @@ def sph_ifs_science_cubes(self, silent=True): # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ifs_science_cubes', self.recipe_requirements) - print('Creating the (x,y,lambda) science cubes') + _log.info('Creating the (x,y,lambda) science cubes') # parameters path = self._path @@ -2039,7 +2028,7 @@ def sph_ifs_science_cubes(self, silent=True): # get list of science files sci_files = sorted(list(path.preproc.glob('*_preproc.fits'))) - print(' * found {0} pre-processed files'.format(len(sci_files))) + _log.info(' * found {0} pre-processed files'.format(len(sci_files))) # get list of calibration files bpm_file = files_info[files_info['PROCESSED'] & (files_info['PRO CATG'] == 'IFS_STATIC_BADPIXELMAP') & @@ -2097,7 +2086,7 @@ def sph_ifs_science_cubes(self, silent=True): file.close() # esorex parameters - print(' * starting esorex') + _log.info(' * starting esorex') args = ['esorex', '--no-checksum=TRUE', '--no-datamd5=TRUE', @@ -2108,8 +2097,7 @@ def sph_ifs_science_cubes(self, silent=True): # check esorex if shutil.which('esorex') is None: - raise NameError('esorex does not appear to be in your PATH. Please make sure ' + - 'that the ESO pipeline is properly installed before running VLTPF.') + raise NameError('esorex does not appear to be in your PATH. Please make sure that the ESO pipeline is properly installed before running VLTPF.') # execute esorex if silent: @@ -2119,10 +2107,10 @@ def sph_ifs_science_cubes(self, silent=True): if proc.returncode != 0: # raise ValueError('esorex process was not successful') - print('Error: esorex was not successful. Trying to process some of the frames...') + _log.error('esorex was not successful. Trying to process some of the frames...') # post-process - print(' * post-processing files') + _log.info(' * post-processing files') files = list(path.tmp.glob('*_preproc_*.fits')) for f in files: # read and save only primary extension @@ -2165,7 +2153,7 @@ def sph_ifs_wavelength_recalibration(self, high_pass=False, offset=(0, 0), plot= # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ifs_wavelength_recalibration', self.recipe_requirements) - print('Recalibrating wavelength') + _log.info('Recalibrating wavelength') # parameters path = self._path @@ -2184,7 +2172,7 @@ def sph_ifs_wavelength_recalibration(self, high_pass=False, offset=(0, 0), plot= # # DRH wavelength # - print(' * extracting calibrated wavelength') + _log.info(' * extracting calibrated wavelength') # get header of any science file science_files = frames_info[frames_info['DPR CATG'] == 'SCIENCE'].index[0] @@ -2199,13 +2187,12 @@ def sph_ifs_wavelength_recalibration(self, high_pass=False, offset=(0, 0), plot= # # star center # - print(' * fitting satelitte spots') + _log.info(' * fitting satelitte spots') # get first DIT of first OBJECT,CENTER in the sequence starcen_files = frames_info[frames_info['DPR TYPE'] == 'OBJECT,CENTER'] if len(starcen_files) == 0: - print(' ==> no OBJECT,CENTER file in the data set. Wavelength cannot be recalibrated. ' + - 'The standard wavelength calibrated by the ESO pripeline will be used.') + _log.info(' ==> no OBJECT,CENTER file in the data set. Wavelength cannot be recalibrated. The standard wavelength calibrated by the ESO pripeline will be used.') return ifs_mode = starcen_files['INS2 COMB IFS'].values[0] @@ -2239,7 +2226,7 @@ def sph_ifs_wavelength_recalibration(self, high_pass=False, offset=(0, 0), plot= # # wavelength recalibration # - print(' * recalibration') + _log.info(' * recalibration') # find wavelength calibration file name wave_file = files_info[np.logical_not(files_info['PROCESSED']) & (files_info['DPR TYPE'] == 'WAVE,LAMP')].index[0] @@ -2312,11 +2299,10 @@ def sph_ifs_wavelength_recalibration(self, high_pass=False, offset=(0, 0), plot= wave_final = np.full(nwave, res.x) * wave_scale wave_diff = np.abs(wave_final - wave_drh)*1000 - print(' ==> difference with calibrated wavelength: ' + - 'min={0:.1f} nm, max={1:.1f} nm'.format(wave_diff.min(), wave_diff.max())) + _log.info(' ==> difference with calibrated wavelength: min={0:.1f} nm, max={1:.1f} nm'.format(wave_diff.min(), wave_diff.max())) # save - print(' * saving') + _log.info(' * saving') fits.writeto(path.preproc / 'wavelength_recalibrated.fits', wave_final, overwrite=True) # @@ -2383,7 +2369,7 @@ def sph_ifs_star_center(self, high_pass=False, offset=(0, 0), plot=True): # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ifs_star_center', self.recipe_requirements) - print('Star centers determination') + _log.info('Star centers determination') # parameters path = self._path @@ -2397,7 +2383,7 @@ def sph_ifs_star_center(self, high_pass=False, offset=(0, 0), plot=True): flux_files = frames_info[frames_info['DPR TYPE'] == 'OBJECT,FLUX'] if len(flux_files) != 0: for file, idx in flux_files.index: - print(' ==> OBJECT,FLUX: {0}'.format(file)) + _log.info(' ==> OBJECT,FLUX: {0}'.format(file)) # read data fname = '{0}_DIT{1:03d}_preproc_'.format(file, idx) @@ -2423,13 +2409,12 @@ def sph_ifs_star_center(self, high_pass=False, offset=(0, 0), plot=True): # save fits.writeto(path.preproc / '{}centers.fits'.format(fname), img_center, overwrite=True) - print() # then OBJECT,CENTER starcen_files = frames_info[frames_info['DPR TYPE'] == 'OBJECT,CENTER'] if len(starcen_files) != 0: for file, idx in starcen_files.index: - print(' ==> OBJECT,CENTER: {0}'.format(file)) + _log.info(' ==> OBJECT,CENTER: {0}'.format(file)) # read data fname = '{0}_DIT{1:03d}_preproc_'.format(file, idx) @@ -2454,7 +2439,6 @@ def sph_ifs_star_center(self, high_pass=False, offset=(0, 0), plot=True): # save fits.writeto(path.preproc / '{}centers.fits'.format(fname), img_center, overwrite=True) - print() # update recipe execution self._recipe_execution['sph_ifs_star_center'] = True @@ -2561,7 +2545,7 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ifs_combine_data', self.recipe_requirements) - print('Combine science data') + _log.info('Combine science data') # parameters path = self._path @@ -2575,7 +2559,7 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a else: wfile = path.preproc / 'wavelength_default.fits' if wfile.exists(): - print('Warning: using default wavelength calibration.') + _log.warning('Using default wavelength calibration.') wave = fits.getdata(wfile) else: raise FileExistsError('Missing default or recalibrated wavelength calibration. You must first run either sph_ifs_wave_calib or sph_ifs_wavelength_recalibration().') @@ -2583,16 +2567,16 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a # max images size if psf_dim > 290: - print('Warning: psf_dim cannot be larger than 290 pix. A value of 290 will be used.') + _log.warning('psf_dim cannot be larger than 290 pix. A value of 290 will be used.') psf_dim = 290 if science_dim > 290: - print('Warning: science_dim cannot be larger than 290 pix. A value of 290 will be used.') + _log.warning('science_dim cannot be larger than 290 pix. A value of 290 will be used.') science_dim = 290 # centering configuration if coarse_centering: - print('Warning: images will be coarsely centered without any interpolation. Automatic settings for coarse centering: shift_method=\'roll\', cpix=True, correct_anamorphism=False, save_scaled=False') + _log.warning('Images will be coarsely centered without any interpolation. Automatic settings for coarse centering: shift_method=\'roll\', cpix=True, correct_anamorphism=False, save_scaled=False') shift_method = 'roll' cpix = True correct_anamorphism = False @@ -2607,7 +2591,7 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a if manual_center.shape == (2,): manual_center = np.full((nwave, 2), manual_center, dtype=np.float) - print('Warning: images will be centered using the user-provided center ({},{})'.format(*manual_center[0])) + _log.warning('Images will be centered using the user-provided center ({},{})'.format(*manual_center[0])) # # OBJECT,FLUX @@ -2615,7 +2599,7 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a flux_files = frames_info[frames_info['DPR TYPE'] == 'OBJECT,FLUX'] nfiles = len(flux_files) if nfiles != 0: - print(' * OBJECT,FLUX data') + _log.info(' * OBJECT,FLUX data') # final arrays psf_cube = np.zeros((nwave, nfiles, psf_dim, psf_dim)) @@ -2632,7 +2616,7 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a # read and combine files for file_idx, (file, idx) in enumerate(flux_files.index): - print(' ==> file {0}/{1}: {2}, DIT={3}'.format(file_idx+1, len(flux_files), file, idx)) + _log.info(' ==> file {0}/{1}: {2}, DIT={3}'.format(file_idx+1, len(flux_files), file, idx)) # read data fname = '{0}_DIT{1:03d}_preproc_'.format(file, idx) @@ -2644,7 +2628,7 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a if cfile.exists(): centers = fits.getdata(cfile) else: - print('Warning: sph_ifs_star_center() has not been executed. Images will be centered using default center ({},{})'.format(*self._default_center)) + _log.warning('sph_ifs_star_center() has not been executed. Images will be centered using default center ({},{})'.format(*self._default_center)) centers = np.full((nwave, 2), self._default_center, dtype=np.float) # make sure we have only integers if user wants coarse centering @@ -2697,15 +2681,13 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a if save_scaled: del psf_cube_scaled - print() - # # OBJECT,CENTER # starcen_files = frames_info[frames_info['DPR TYPE'] == 'OBJECT,CENTER'] nfiles = len(starcen_files) if nfiles != 0: - print(' * OBJECT,CENTER data') + _log.info(' * OBJECT,CENTER data') # final arrays cen_cube = np.zeros((nwave, nfiles, science_dim, science_dim)) @@ -2722,7 +2704,7 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a # read and combine files for file_idx, (file, idx) in enumerate(starcen_files.index): - print(' ==> file {0}/{1}: {2}, DIT={3}'.format(file_idx+1, len(starcen_files), file, idx)) + _log.info(' ==> file {0}/{1}: {2}, DIT={3}'.format(file_idx+1, len(starcen_files), file, idx)) # read data fname = '{0}_DIT{1:03d}_preproc_'.format(file, idx) @@ -2786,15 +2768,13 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a if save_scaled: del cen_cube_scaled - print() - # # OBJECT # object_files = frames_info[frames_info['DPR TYPE'] == 'OBJECT'] nfiles = len(object_files) if nfiles != 0: - print(' * OBJECT data') + _log.info(' * OBJECT data') # use manual center if explicitely requested if manual_center is not None: @@ -2807,7 +2787,7 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a # select which CENTER to use starcen_files = frames_info[frames_info['DPR TYPE'] == 'OBJECT,CENTER'] if len(starcen_files) == 0: - print('Warning: no OBJECT,CENTER file in the dataset. Images will be centered using default center ({},{})'.format(*self._default_center)) + _log.warning('No OBJECT,CENTER file in the dataset. Images will be centered using default center ({},{})'.format(*self._default_center)) centers = np.full((nwave, 2), self._default_center, dtype=np.float) else: fname = '{0}_DIT{1:03d}_preproc_centers.fits'.format(starcen_files.index.values[0][0], starcen_files.index.values[0][1]) @@ -2816,7 +2796,7 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a if fpath.exists(): centers = fits.getdata(fpath) else: - print('Warning: sph_ifs_star_center() has not been executed. Images will be centered using default center ({},{})'.format(*self._default_center)) + _log.warning('sph_ifs_star_center() has not been executed. Images will be centered using default center ({},{})'.format(*self._default_center)) centers = np.full((nwave, 2), self._default_center, dtype=np.float) # make sure we have only integers if user wants coarse centering @@ -2838,7 +2818,7 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a # read and combine files for file_idx, (file, idx) in enumerate(object_files.index): - print(' ==> file {0}/{1}: {2}, DIT={3}'.format(file_idx+1, len(object_files), file, idx)) + _log.info(' ==> file {0}/{1}: {2}, DIT={3}'.format(file_idx+1, len(object_files), file, idx)) # read data fname = '{0}_DIT{1:03d}_preproc_'.format(file, idx) @@ -2891,8 +2871,6 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a if save_scaled: del sci_cube_scaled - print() - # update recipe execution self._recipe_execution['sph_ifs_combine_data'] = True diff --git a/vltpf/IRDIS/ImagingReduction.py b/vltpf/IRDIS/ImagingReduction.py index c744b43..2b2ab61 100644 --- a/vltpf/IRDIS/ImagingReduction.py +++ b/vltpf/IRDIS/ImagingReduction.py @@ -1,20 +1,16 @@ import pandas as pd import subprocess +import logging import numpy as np import scipy.ndimage as ndimage import scipy.interpolate as interp import scipy.optimize as optim import shutil -import matplotlib -import matplotlib.pyplot as plt -import matplotlib.patches as patches -import matplotlib.colors as colors import configparser from pathlib import Path from astropy.io import fits from astropy.modeling import models, fitting -from matplotlib.backends.backend_pdf import PdfPages import vltpf import vltpf.utils as utils @@ -23,6 +19,8 @@ import vltpf.transmission as transmission import vltpf.toolbox as toolbox +_log = logging.getLogger(__name__) + class ImagingReduction(object): ''' @@ -418,7 +416,7 @@ def sort_files(self): Data frame with the information on raw files ''' - print('Sorting raw files') + _log.info('Sorting raw files') # parameters path = self._path @@ -430,7 +428,7 @@ def sort_files(self): if len(files) == 0: raise ValueError('No raw FITS files in reduction path') - print(' * found {0} FITS files in {1}'.format(len(files), path.raw)) + _log.info(' * found {0} FITS files in {1}'.format(len(files), path.raw)) # read list of keywords keywords = [] @@ -502,7 +500,7 @@ def sort_frames(self): A data frame with the information on all frames ''' - print('Extracting frames information') + _log.info('Extracting frames information') # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sort_frames', self.recipe_requirements) @@ -575,19 +573,19 @@ def sort_frames(self): date = str(cinfo['DATE'][0])[0:10] - print(' * Object: {0}'.format(cinfo['OBJECT'][0])) - print(' * RA / DEC: {0} / {1}'.format(RA, DEC)) - print(' * Date: {0}'.format(date)) - print(' * Instrument: {0}'.format(cinfo['SEQ ARM'][0])) - print(' * Derotator: {0}'.format(cinfo['INS4 DROT2 MODE'][0])) - print(' * Coronagraph: {0}'.format(cinfo['INS COMB ICOR'][0])) - print(' * Mode: {0}'.format(cinfo['INS1 MODE'][0])) - print(' * Filter: {0}'.format(cinfo['INS COMB IFLT'][0])) - print(' * DIT: {0:.2f} sec'.format(cinfo['DET SEQ1 DIT'][0])) - print(' * NDIT: {0:.0f}'.format(cinfo['DET NDIT'][0])) - print(' * Texp: {0:.2f} min'.format(cinfo['DET SEQ1 DIT'].sum()/60)) - print(' * PA: {0:.2f}° ==> {1:.2f}° = {2:.2f}°'.format(pa_start, pa_end, np.abs(pa_end-pa_start))) - print(' * POSANG: {0}'.format(', '.join(['{:.2f}°'.format(p) for p in posang]))) + _log.info(' * Object: {0}'.format(cinfo['OBJECT'][0])) + _log.info(' * RA / DEC: {0} / {1}'.format(RA, DEC)) + _log.info(' * Date: {0}'.format(date)) + _log.info(' * Instrument: {0}'.format(cinfo['SEQ ARM'][0])) + _log.info(' * Derotator: {0}'.format(cinfo['INS4 DROT2 MODE'][0])) + _log.info(' * Coronagraph: {0}'.format(cinfo['INS COMB ICOR'][0])) + _log.info(' * Mode: {0}'.format(cinfo['INS1 MODE'][0])) + _log.info(' * Filter: {0}'.format(cinfo['INS COMB IFLT'][0])) + _log.info(' * DIT: {0:.2f} sec'.format(cinfo['DET SEQ1 DIT'][0])) + _log.info(' * NDIT: {0:.0f}'.format(cinfo['DET NDIT'][0])) + _log.info(' * Texp: {0:.2f} min'.format(cinfo['DET SEQ1 DIT'].sum()/60)) + _log.info(' * PA: {0:.2f}° ==> {1:.2f}° = {2:.2f}°'.format(pa_start, pa_end, np.abs(pa_end-pa_start))) + _log.info(' * POSANG: {0}'.format(', '.join(['{:.2f}°'.format(p) for p in posang]))) def check_files_association(self): @@ -601,7 +599,7 @@ def check_files_association(self): # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'check_files_association', self.recipe_requirements) - print('Performing file association for calibrations') + _log.info('Performing file association for calibrations') # parameters files_info = self._files_info @@ -636,7 +634,7 @@ def check_files_association(self): cfiles = calibs[(calibs['DPR TYPE'] == 'FLAT,LAMP') & (calibs['INS COMB IFLT'] == filter_comb)] if len(cfiles) <= 1: error_flag += 1 - print(' * Error: there should be more than 1 flat in filter combination {0}'.format(filter_comb)) + _log.error(' * there should be more than 1 flat in filter combination {0}'.format(filter_comb)) ################################################## # static calibrations that depend on science DIT @@ -652,21 +650,16 @@ def check_files_association(self): (calibs['DET SEQ1 DIT'].round(2) == DIT)] if len(cfiles) == 0: warning_flag += 1 - print(' * Warning: there is no dark/background for science files with DIT={0} sec. '.format(DIT) + - 'It is *highly recommended* to include one to obtain the best data reduction. ' + - 'A single dark/background file is sufficient, and it can easily be downloaded ' + - 'from the ESO archive') + _log.warning(' * there is no dark/background for science files with DIT={0} sec. It is *highly recommended* to include one to obtain the best data reduction. A single dark/background file is sufficient, and it can easily be downloaded from the ESO archive'.format(DIT)) # sky backgrounds cfiles = files_info[(files_info['DPR TYPE'] == 'SKY') & (files_info['DET SEQ1 DIT'].round(2) == DIT)] if len(cfiles) == 0: warning_flag += 1 - print(' * Warning: there is no sky background for science files with DIT={0} sec. '.format(DIT) + - 'Using a sky background instead of an internal instrumental background can ' + - 'usually provide a cleaner data reduction, especially in K-band') + _log.warning(' * there is no sky background for science files with DIT={0} sec. Using a sky background instead of an internal instrumental background can usually provide a cleaner data reduction, especially in K-band'.format(DIT)) # error reporting - print('There are {0} warning(s) and {1} error(s) in the classification of files'.format(warning_flag, error_flag)) + _log.info('There are {0} warning(s) and {1} error(s) in the classification of files'.format(warning_flag, error_flag)) if error_flag: raise ValueError('There is {0} errors that should be solved before proceeding'.format(error_flag)) @@ -684,7 +677,7 @@ def sph_ird_cal_dark(self, silent=True): # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_cal_dark', self.recipe_requirements) - print('Creating darks and backgrounds') + _log.info('Creating darks and backgrounds') # parameters path = self._path @@ -712,7 +705,7 @@ def sph_ird_cal_dark(self, silent=True): if len(cfiles) == 0: continue - print(' * {0} in filter {1} with DIT={2:.2f} sec ({3} files)'.format(ctype, cfilt, DIT, len(cfiles))) + _log.info(' * {0} in filter {1} with DIT={2:.2f} sec ({3} files)'.format(ctype, cfilt, DIT, len(cfiles))) # create sof sof = path.sof / 'dark_filt={0}_DIT={1:.2f}.sof'.format(cfilt, DIT) @@ -748,8 +741,7 @@ def sph_ird_cal_dark(self, silent=True): # check esorex if shutil.which('esorex') is None: - raise NameError('esorex does not appear to be in your PATH. Please make sure ' + - 'that the ESO pipeline is properly installed before running VLTPF.') + raise NameError('esorex does not appear to be in your PATH. Please make sure that the ESO pipeline is properly installed before running VLTPF.') # execute esorex if silent: @@ -802,7 +794,7 @@ def sph_ird_cal_detector_flat(self, silent=True): # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_cal_detector_flat', self.recipe_requirements) - print('Creating flats') + _log.info('Creating flats') # parameters path = self._path @@ -818,7 +810,7 @@ def sph_ird_cal_detector_flat(self, silent=True): cfiles = calibs[calibs['INS COMB IFLT'] == cfilt] files = cfiles.index - print(' * filter {0} ({1} files)'.format(cfilt, len(cfiles))) + _log.info(' * filter {0} ({1} files)'.format(cfilt, len(cfiles))) # create sof sof = path.sof / 'flat_filt={0}.sof'.format(cfilt) @@ -843,8 +835,7 @@ def sph_ird_cal_detector_flat(self, silent=True): # check esorex if shutil.which('esorex') is None: - raise NameError('esorex does not appear to be in your PATH. Please make sure ' + - 'that the ESO pipeline is properly installed before running VLTPF.') + raise NameError('esorex does not appear to be in your PATH. Please make sure that the ESO pipeline is properly installed before running VLTPF.') # execute esorex if silent: @@ -940,7 +931,7 @@ def sph_ird_preprocess_science(self, # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_preprocess_science', self.recipe_requirements) - print('Pre-processing science files') + _log.info('Pre-processing science files') # parameters path = self._path @@ -995,7 +986,7 @@ def sph_ird_preprocess_science(self, for DIT in sci_DITs: sfiles = sci_files[sci_files['DET SEQ1 DIT'].round(2) == DIT] - print('{0} files of type {1} with DIT={2} sec'.format(len(sfiles), typ, DIT)) + _log.info('{0} files of type {1} with DIT={2} sec'.format(len(sfiles), typ, DIT)) if subtract_background: # look for sky, then background, then darks @@ -1006,11 +997,11 @@ def sph_ird_preprocess_science(self, (files_info['DPR TYPE'] == d) & (files_info['DET SEQ1 DIT'].round(2) == DIT)] if len(dfiles) != 0: break - print(' ==> found {0} corresponding {1} file'.format(len(dfiles), d)) + _log.info(' ==> found {0} corresponding {1} file'.format(len(dfiles), d)) if len(dfiles) == 0: # issue a warning if absolutely no background is found - print('Warning: no background has been found. Pre-processing will continue but data quality will likely be affected') + _log.warning('No background has been found. Pre-processing will continue but data quality will likely be affected') bkg = np.zeros((1024, 2048)) elif len(dfiles) == 1: bkg = fits.getdata(path.calib / '{}.fits'.format(dfiles.index[0])) @@ -1023,10 +1014,10 @@ def sph_ird_preprocess_science(self, # frames_info extract finfo = frames_info.loc[(fname, slice(None)), :] - print(' * file {0}/{1}: {2}, NDIT={3}'.format(idx+1, len(sfiles), fname, len(finfo))) + _log.info(' * file {0}/{1}: {2}, NDIT={3}'.format(idx+1, len(sfiles), fname, len(finfo))) # read data - print(' ==> read data') + _log.info(' ==> read data') img, hdr = fits.getdata(path.raw / '{}.fits'.format(fname), header=True) # add extra dimension to single images to make cubes @@ -1043,14 +1034,14 @@ def sph_ird_preprocess_science(self, # collapse if (typ == 'OBJECT,CENTER'): if collapse_center: - print(' ==> collapse: mean') + _log.info(' ==> collapse: mean') img = np.mean(img, axis=0, keepdims=True) frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'mean') else: frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'none') elif (typ == 'OBJECT,FLUX'): if collapse_psf: - print(' ==> collapse: mean') + _log.info(' ==> collapse: mean') img = np.mean(img, axis=0, keepdims=True) frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'mean') else: @@ -1058,7 +1049,7 @@ def sph_ird_preprocess_science(self, elif (typ == 'OBJECT'): if collapse_science: if collapse_type == 'mean': - print(' ==> collapse: mean ({0} -> 1 frame, 0 dropped)'.format(len(img))) + _log.info(' ==> collapse: mean ({0} -> 1 frame, 0 dropped)'.format(len(img))) img = np.mean(img, axis=0, keepdims=True) frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'mean') @@ -1074,7 +1065,7 @@ def sph_ird_preprocess_science(self, if coadd_value > NDIT: raise ValueError('coadd_value ({0}) must be < NDIT ({1})'.format(coadd_value, NDIT)) - print(' ==> collapse: coadd by {0} ({1} -> {2} frames, {3} dropped)'.format(coadd_value, NDIT, NDIT_new, dropped)) + _log.info(' ==> collapse: coadd by {0} ({1} -> {2} frames, {3} dropped)'.format(coadd_value, NDIT, NDIT_new, dropped)) # coadd frames nimg = np.empty((NDIT_new, 1024, 2048), dtype=img.dtype) @@ -1092,19 +1083,19 @@ def sph_ird_preprocess_science(self, # background subtraction if subtract_background: - print(' ==> subtract background') + _log.info(' ==> subtract background') for f in range(len(img)): img[f] -= bkg # divide flat if subtract_background: - print(' ==> divide by flat field') + _log.info(' ==> divide by flat field') for f in range(len(img)): img[f] /= flat # bad pixels correction if fix_badpix: - print(' ==> correct bad pixels') + _log.info(' ==> correct bad pixels') for f in range(len(img)): frame = img[f] frame = imutils.fix_badpix(frame, bpm, npix=12, weight=True) @@ -1117,7 +1108,7 @@ def sph_ird_preprocess_science(self, img[f] = frame # reshape data - print(' ==> reshape data') + _log.info(' ==> reshape data') NDIT = img.shape[0] nimg = np.zeros((NDIT, 2, 1024, 1024)) for f in range(len(img)): @@ -1132,10 +1123,6 @@ def sph_ird_preprocess_science(self, fits.writeto(path.preproc / '{}_DIT{:03d}_preproc.fits'.format(fname, f), frame, hdr, overwrite=True, output_verify='silentfix') - print() - - print() - # sort and save final dataframe frames_info_preproc.sort_values(by='TIME', inplace=True) frames_info_preproc.to_csv(path.preproc / 'frames_preproc.csv') @@ -1169,7 +1156,7 @@ def sph_ird_star_center(self, high_pass=False, offset=(0, 0), plot=True): # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_star_center', self.recipe_requirements) - print('Star centers determination') + _log.info('Star centers determination') # parameters path = self._path @@ -1187,7 +1174,7 @@ def sph_ird_star_center(self, high_pass=False, offset=(0, 0), plot=True): flux_files = frames_info[frames_info['DPR TYPE'] == 'OBJECT,FLUX'] if len(flux_files) != 0: for file, idx in flux_files.index: - print(' ==> OBJECT,FLUX: {0}'.format(file)) + _log.info(' ==> OBJECT,FLUX: {0}'.format(file)) # read data fname = '{0}_DIT{1:03d}_preproc'.format(file, idx) @@ -1202,13 +1189,12 @@ def sph_ird_star_center(self, high_pass=False, offset=(0, 0), plot=True): # save fits.writeto(path.preproc / '{}_centers.fits'.format(fname), img_center, overwrite=True) - print() # then OBJECT,CENTER starcen_files = frames_info[frames_info['DPR TYPE'] == 'OBJECT,CENTER'] if len(starcen_files) != 0: for file, idx in starcen_files.index: - print(' ==> OBJECT,CENTER: {0}'.format(file)) + _log.info(' ==> OBJECT,CENTER: {0}'.format(file)) # read data fname = '{0}_DIT{1:03d}_preproc'.format(file, idx) @@ -1234,7 +1220,6 @@ def sph_ird_star_center(self, high_pass=False, offset=(0, 0), plot=True): # save fits.writeto(path.preproc / '{}_centers.fits'.format(fname), img_center, overwrite=True) - print() # update recipe execution self._recipe_execution['sph_ird_star_center'] = True @@ -1341,7 +1326,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_combine_data', self.recipe_requirements) - print('Combine science data') + _log.info('Combine science data') # parameters path = self._path @@ -1357,16 +1342,16 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a # max images size if psf_dim > 1024: - print('Warning: psf_dim cannot be larger than 1024 pix. A value of 1024 will be used.') + _log.warning('psf_dim cannot be larger than 1024 pix. A value of 1024 will be used.') psf_dim = 1024 if science_dim > 1024: - print('Warning: science_dim cannot be larger than 1024 pix. A value of 1024 will be used.') + _log.warning('science_dim cannot be larger than 1024 pix. A value of 1024 will be used.') science_dim = 1024 # centering configuration if coarse_centering: - print('Warning: images will be coarsely centered without any interpolation. Automatic settings for coarse centering: shift_method=\'roll\', cpix=True, correct_anamorphism=False, save_scaled=False') + _log.warning('Images will be coarsely centered without any interpolation. Automatic settings for coarse centering: shift_method=\'roll\', cpix=True, correct_anamorphism=False, save_scaled=False') shift_method = 'roll' cpix = True correct_anamorphism = False @@ -1381,7 +1366,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a if manual_center.shape == (2,): manual_center = np.full((nwave, 2), manual_center, dtype=np.float) - print('Warning: images will be centered using the user-provided center ({},{})'.format(*manual_center[0])) + _log.warning('Images will be centered using the user-provided center ({},{})'.format(*manual_center[0])) # # OBJECT,FLUX @@ -1389,7 +1374,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a flux_files = frames_info[frames_info['DPR TYPE'] == 'OBJECT,FLUX'] nfiles = len(flux_files) if nfiles != 0: - print(' * OBJECT,FLUX data') + _log.info(' * OBJECT,FLUX data') # final arrays psf_cube = np.zeros((nwave, nfiles, psf_dim, psf_dim)) @@ -1406,7 +1391,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a # read and combine files for file_idx, (file, idx) in enumerate(flux_files.index): - print(' ==> file {0}/{1}: {2}, DIT={3}'.format(file_idx+1, len(flux_files), file, idx)) + _log.info(' ==> file {0}/{1}: {2}, DIT={3}'.format(file_idx+1, len(flux_files), file, idx)) # read data fname = '{0}_DIT{1:03d}_preproc'.format(file, idx) @@ -1416,7 +1401,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a if cfile.exists(): centers = fits.getdata(cfile) else: - print('Warning: sph_ird_star_center() has not been executed. Images will be centered using default center ({},{})'.format(*self._default_center)) + _log.warning('sph_ird_star_center() has not been executed. Images will be centered using default center ({},{})'.format(*self._default_center)) centers = self._default_center # make sure we have only integers if user wants coarse centering @@ -1466,15 +1451,13 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a if save_scaled: del psf_cube_scaled - print() - # # OBJECT,CENTER # starcen_files = frames_info[frames_info['DPR TYPE'] == 'OBJECT,CENTER'] nfiles = len(starcen_files) if nfiles != 0: - print(' * OBJECT,CENTER data') + _log.info(' * OBJECT,CENTER data') # final arrays cen_cube = np.zeros((nwave, nfiles, science_dim, science_dim)) @@ -1491,7 +1474,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a # read and combine files for file_idx, (file, idx) in enumerate(starcen_files.index): - print(' ==> file {0}/{1}: {2}, DIT={3}'.format(file_idx+1, len(starcen_files), file, idx)) + _log.info(' ==> file {0}/{1}: {2}, DIT={3}'.format(file_idx+1, len(starcen_files), file, idx)) # read data fname = '{0}_DIT{1:03d}_preproc'.format(file, idx) @@ -1551,15 +1534,13 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a if save_scaled: del cen_cube_scaled - print() - # # OBJECT # object_files = frames_info[frames_info['DPR TYPE'] == 'OBJECT'] nfiles = len(object_files) if nfiles != 0: - print(' * OBJECT data') + _log.info(' * OBJECT data') # null value for Dithering Motion Stage by default dms_dx_ref = 0 @@ -1576,7 +1557,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a # select which CENTER to use starcen_files = frames_info[frames_info['DPR TYPE'] == 'OBJECT,CENTER'] if len(starcen_files) == 0: - print('Warning: no OBJECT,CENTER file in the dataset. Images will be centered using default center ({},{})'.format(*self._default_center)) + _log.warning('No OBJECT,CENTER file in the dataset. Images will be centered using default center ({},{})'.format(*self._default_center)) centers = self._default_center else: fname = '{0}_DIT{1:03d}_preproc_centers.fits'.format(starcen_files.index.values[0][0], starcen_files.index.values[0][1]) @@ -1589,9 +1570,8 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a dms_dx_ref = starcen_files['INS1 PAC X'][0] / 18 dms_dy_ref = starcen_files['INS1 PAC Y'][0] / 18 else: - print('Warning: sph_ird_star_center() has not been executed. Images will be centered using default center ({},{})'.format(*self._default_center)) + _log.warning('sph_ird_star_center() has not been executed. Images will be centered using default center ({},{})'.format(*self._default_center)) centers = self._default_center - # make sure we have only integers if user wants coarse centering if coarse_centering: @@ -1614,7 +1594,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a # read and combine files for file_idx, (file, idx) in enumerate(object_files.index): - print(' ==> file {0}/{1}: {2}, DIT={3}'.format(file_idx+1, len(object_files), file, idx)) + _log.info(' ==> file {0}/{1}: {2}, DIT={3}'.format(file_idx+1, len(object_files), file, idx)) # read data fname = '{0}_DIT{1:03d}_preproc'.format(file, idx) @@ -1678,8 +1658,6 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a if save_scaled: del sci_cube_scaled - print() - # update recipe execution self._recipe_execution['sph_ird_combine_data'] = True diff --git a/vltpf/IRDIS/SpectroReduction.py b/vltpf/IRDIS/SpectroReduction.py index 6e20727..9937d6c 100644 --- a/vltpf/IRDIS/SpectroReduction.py +++ b/vltpf/IRDIS/SpectroReduction.py @@ -1,5 +1,6 @@ import pandas as pd import subprocess +import logging import numpy as np import scipy.ndimage as ndimage import scipy.interpolate as interp @@ -23,6 +24,8 @@ import vltpf.transmission as transmission import vltpf.toolbox as toolbox +_log = logging.getLogger(__name__) + def get_wavelength_calibration(wave_calib, centers, wave_min, wave_max): ''' @@ -477,7 +480,7 @@ def sort_files(self): Data frame with the information on raw files ''' - print('Sorting raw files') + _log.info('Sorting raw files') # parameters path = self._path @@ -489,7 +492,7 @@ def sort_files(self): if len(files) == 0: raise ValueError('No raw FITS files in reduction path') - print(' * found {0} FITS files in {1}'.format(len(files), path.raw)) + _log.info(' * found {0} FITS files in {1}'.format(len(files), path.raw)) # read list of keywords keywords = [] @@ -561,7 +564,7 @@ def sort_frames(self): A data frame with the information on all frames ''' - print('Extracting frames information') + _log.info('Extracting frames information') # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sort_frames', self.recipe_requirements) @@ -634,19 +637,19 @@ def sort_frames(self): date = str(cinfo['DATE'][0])[0:10] - print(' * Object: {0}'.format(cinfo['OBJECT'][0])) - print(' * RA / DEC: {0} / {1}'.format(RA, DEC)) - print(' * Date: {0}'.format(date)) - print(' * Instrument: {0}'.format(cinfo['SEQ ARM'][0])) - print(' * Derotator: {0}'.format(cinfo['INS4 DROT2 MODE'][0])) - print(' * Coronagraph: {0}'.format(cinfo['INS COMB ICOR'][0])) - print(' * Mode: {0}'.format(cinfo['INS1 MODE'][0])) - print(' * Filter: {0}'.format(cinfo['INS COMB IFLT'][0])) - print(' * DIT: {0:.2f} sec'.format(cinfo['DET SEQ1 DIT'][0])) - print(' * NDIT: {0:.0f}'.format(cinfo['DET NDIT'][0])) - print(' * Texp: {0:.2f} min'.format(cinfo['DET SEQ1 DIT'].sum()/60)) - print(' * PA: {0:.2f}° ==> {1:.2f}° = {2:.2f}°'.format(pa_start, pa_end, np.abs(pa_end-pa_start))) - print(' * POSANG: {0}'.format(', '.join(['{:.2f}°'.format(p) for p in posang]))) + _log.info(' * Object: {0}'.format(cinfo['OBJECT'][0])) + _log.info(' * RA / DEC: {0} / {1}'.format(RA, DEC)) + _log.info(' * Date: {0}'.format(date)) + _log.info(' * Instrument: {0}'.format(cinfo['SEQ ARM'][0])) + _log.info(' * Derotator: {0}'.format(cinfo['INS4 DROT2 MODE'][0])) + _log.info(' * Coronagraph: {0}'.format(cinfo['INS COMB ICOR'][0])) + _log.info(' * Mode: {0}'.format(cinfo['INS1 MODE'][0])) + _log.info(' * Filter: {0}'.format(cinfo['INS COMB IFLT'][0])) + _log.info(' * DIT: {0:.2f} sec'.format(cinfo['DET SEQ1 DIT'][0])) + _log.info(' * NDIT: {0:.0f}'.format(cinfo['DET NDIT'][0])) + _log.info(' * Texp: {0:.2f} min'.format(cinfo['DET SEQ1 DIT'].sum()/60)) + _log.info(' * PA: {0:.2f}° ==> {1:.2f}° = {2:.2f}°'.format(pa_start, pa_end, np.abs(pa_end-pa_start))) + _log.info(' * POSANG: {0}'.format(', '.join(['{:.2f}°'.format(p) for p in posang]))) def check_files_association(self): @@ -660,7 +663,7 @@ def check_files_association(self): # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'check_files_association', self.recipe_requirements) - print('Performing file association for calibrations') + _log.info('Performing file association for calibrations') # parameters path = self._path @@ -698,16 +701,16 @@ def check_files_association(self): cfiles = calibs[(calibs['DPR TYPE'] == 'FLAT,LAMP') & (calibs['INS COMB IFLT'] == filter_comb)] if len(cfiles) <= 1: error_flag += 1 - print(' * Error: there should be more than 1 flat in filter combination {0}'.format(filter_comb)) + _log.error(' * there should be more than 1 flat in filter combination {0}'.format(filter_comb)) # wave cfiles = calibs[(calibs['DPR TYPE'] == 'LAMP,WAVE') & (calibs['INS COMB IFLT'] == filter_comb)] if len(cfiles) == 0: error_flag += 1 - print(' * Error: there should be 1 wavelength calibration file, found none.') + _log.error(' * there should be 1 wavelength calibration file, found none.') elif len(cfiles) > 1: warning_flag += 1 - print(' * Warning: there should be 1 wavelength calibration file, found {0}. Using the closest from science.'.format(len(cfiles))) + _log.warning(' * there should be 1 wavelength calibration file, found {0}. Using the closest from science.'.format(len(cfiles))) # find the two closest to science files sci_files = files_info[(files_info['DPR CATG'] == 'SCIENCE')] @@ -732,21 +735,16 @@ def check_files_association(self): (calibs['DET SEQ1 DIT'].round(2) == DIT)] if len(cfiles) == 0: warning_flag += 1 - print(' * Warning: there is no dark/background for science files with DIT={0} sec. '.format(DIT) + - 'It is *highly recommended* to include one to obtain the best data reduction. ' + - 'A single dark/background file is sufficient, and it can easily be downloaded ' + - 'from the ESO archive') + _log.warning(' * there is no dark/background for science files with DIT={0} sec. It is *highly recommended* to include one to obtain the best data reduction. A single dark/background file is sufficient, and it can easily be downloaded from the ESO archive'.format(DIT)) # sky backgrounds cfiles = files_info[(files_info['DPR TYPE'] == 'SKY') & (files_info['DET SEQ1 DIT'].round(2) == DIT)] if len(cfiles) == 0: warning_flag += 1 - print(' * Warning: there is no sky background for science files with DIT={0} sec. '.format(DIT) + - 'Using a sky background instead of an internal instrumental background can ' + - 'usually provide a cleaner data reduction') + _log.warning(' * there is no sky background for science files with DIT={0} sec. Using a sky background instead of an internal instrumental background can usually provide a cleaner data reduction'.format(DIT)) # error reporting - print('There are {0} warning(s) and {1} error(s) in the classification of files'.format(warning_flag, error_flag)) + _log.info('There are {0} warning(s) and {1} error(s) in the classification of files'.format(warning_flag, error_flag)) if error_flag: raise ValueError('There is {0} errors that should be solved before proceeding'.format(error_flag)) @@ -768,7 +766,7 @@ def sph_ird_cal_dark(self, silent=True): # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_cal_dark', self.recipe_requirements) - print('Creating darks and backgrounds') + _log.info('Creating darks and backgrounds') # parameters path = self._path @@ -796,7 +794,7 @@ def sph_ird_cal_dark(self, silent=True): if len(cfiles) == 0: continue - print(' * {0} in filter {1} with DIT={2:.2f} sec ({3} files)'.format(ctype, cfilt, DIT, len(cfiles))) + _log.info(' * {0} in filter {1} with DIT={2:.2f} sec ({3} files)'.format(ctype, cfilt, DIT, len(cfiles))) # create sof sof = path.sof / 'dark_filt={0}_DIT={1:.2f}.sof'.format(cfilt, DIT) @@ -832,8 +830,7 @@ def sph_ird_cal_dark(self, silent=True): # check esorex if shutil.which('esorex') is None: - raise NameError('esorex does not appear to be in your PATH. Please make sure ' + - 'that the ESO pipeline is properly installed before running VLTPF.') + raise NameError('esorex does not appear to be in your PATH. Please make sure that the ESO pipeline is properly installed before running VLTPF.') # execute esorex if silent: @@ -886,7 +883,7 @@ def sph_ird_cal_detector_flat(self, silent=True): # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_cal_detector_flat', self.recipe_requirements) - print('Creating flats') + _log.info('Creating flats') # parameters path = self._path @@ -901,7 +898,7 @@ def sph_ird_cal_detector_flat(self, silent=True): cfiles = calibs[calibs['INS COMB IFLT'] == cfilt] files = cfiles.index - print(' * filter {0} ({1} files)'.format(cfilt, len(cfiles))) + _log.info(' * filter {0} ({1} files)'.format(cfilt, len(cfiles))) # create sof sof = path.sof / 'flat_filt={0}.sof'.format(cfilt) @@ -926,8 +923,7 @@ def sph_ird_cal_detector_flat(self, silent=True): # check esorex if shutil.which('esorex') is None: - raise NameError('esorex does not appear to be in your PATH. Please make sure ' + - 'that the ESO pipeline is properly installed before running VLTPF.') + raise NameError('esorex does not appear to be in your PATH. Please make sure that the ESO pipeline is properly installed before running VLTPF.') # execute esorex if silent: @@ -980,7 +976,7 @@ def sph_ird_wave_calib(self, silent=True): # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_wave_calib', self.recipe_requirements) - print('Creating wavelength calibration') + _log.info('Creating wavelength calibration') # parameters path = self._path @@ -1075,8 +1071,7 @@ def sph_ird_wave_calib(self, silent=True): # check esorex if shutil.which('esorex') is None: - raise NameError('esorex does not appear to be in your PATH. Please make sure ' + - 'that the ESO pipeline is properly installed before running VLTPF.') + raise NameError('esorex does not appear to be in your PATH. Please make sure that the ESO pipeline is properly installed before running VLTPF.') # execute esorex if silent: @@ -1162,7 +1157,7 @@ def sph_ird_preprocess_science(self, # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_preprocess_science', self.recipe_requirements) - print('Pre-processing science files') + _log.info('Pre-processing science files') # parameters path = self._path @@ -1217,7 +1212,7 @@ def sph_ird_preprocess_science(self, for DIT in sci_DITs: sfiles = sci_files[sci_files['DET SEQ1 DIT'].round(2) == DIT] - print('{0} files of type {1} with DIT={2} sec'.format(len(sfiles), typ, DIT)) + _log.info('{0} files of type {1} with DIT={2} sec'.format(len(sfiles), typ, DIT)) if subtract_background: # look for sky, then background, then darks @@ -1228,11 +1223,11 @@ def sph_ird_preprocess_science(self, (files_info['DPR TYPE'] == d) & (files_info['DET SEQ1 DIT'].round(2) == DIT)] if len(dfiles) != 0: break - print(' ==> found {0} corresponding {1} file'.format(len(dfiles), d)) + _log.info(' ==> found {0} corresponding {1} file'.format(len(dfiles), d)) if len(dfiles) == 0: # issue a warning if absolutely no background is found - print('Warning: no background has been found. Pre-processing will continue but data quality will likely be affected') + _log.warning('No background has been found. Pre-processing will continue but data quality will likely be affected') bkg = np.zeros((1024, 2048)) elif len(dfiles) == 1: bkg = fits.getdata(path.calib / '{}.fits'.format(dfiles.index[0])) @@ -1245,10 +1240,10 @@ def sph_ird_preprocess_science(self, # frames_info extract finfo = frames_info.loc[(fname, slice(None)), :] - print(' * file {0}/{1}: {2}, NDIT={3}'.format(idx+1, len(sfiles), fname, len(finfo))) + _log.info(' * file {0}/{1}: {2}, NDIT={3}'.format(idx+1, len(sfiles), fname, len(finfo))) # read data - print(' ==> read data') + _log.info(' ==> read data') img, hdr = fits.getdata(path.raw / '{}.fits'.format(fname), header=True) # add extra dimension to single images to make cubes @@ -1265,21 +1260,21 @@ def sph_ird_preprocess_science(self, # collapse if (typ == 'OBJECT,CENTER'): if collapse_center: - print(' ==> collapse: mean') + _log.info(' ==> collapse: mean') img = np.mean(img, axis=0, keepdims=True) frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'mean') else: frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'none') elif (typ == 'OBJECT,FLUX'): if collapse_psf: - print(' ==> collapse: mean') + _log.info(' ==> collapse: mean') img = np.mean(img, axis=0, keepdims=True) frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'mean') else: frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'none') elif (typ == 'OBJECT'): if collapse_science: - print(' ==> collapse: mean ({0} -> 1 frame, 0 dropped)'.format(len(img))) + _log.info(' ==> collapse: mean ({0} -> 1 frame, 0 dropped)'.format(len(img))) img = np.mean(img, axis=0, keepdims=True) frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'mean') @@ -1290,19 +1285,19 @@ def sph_ird_preprocess_science(self, # background subtraction if subtract_background: - print(' ==> subtract background') + _log.info(' ==> subtract background') for f in range(len(img)): img[f] -= bkg # divide flat if subtract_background: - print(' ==> divide by flat field') + _log.info(' ==> divide by flat field') for f in range(len(img)): img[f] /= flat # bad pixels correction if fix_badpix: - print(' ==> correct bad pixels') + _log.info(' ==> correct bad pixels') for f in range(len(img)): frame = img[f] frame = imutils.fix_badpix(frame, bpm, npix=12, weight=True) @@ -1315,7 +1310,7 @@ def sph_ird_preprocess_science(self, img[f] = frame # reshape data - print(' ==> reshape data') + _log.info(' ==> reshape data') NDIT = img.shape[0] nimg = np.zeros((NDIT, 2, 1024, 1024)) for f in range(len(img)): @@ -1330,10 +1325,6 @@ def sph_ird_preprocess_science(self, fits.writeto(path.preproc / '{}_DIT{:03d}_preproc.fits'.format(fname, f), frame, hdr, overwrite=True, output_verify='silentfix') - print() - - print() - # sort and save final dataframe frames_info_preproc.sort_values(by='TIME', inplace=True) frames_info_preproc.to_csv(path.preproc / 'frames_preproc.csv') @@ -1362,7 +1353,7 @@ def sph_ird_star_center(self, high_pass=False, plot=True): # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_star_center', self.recipe_requirements) - print('Star centers determination') + _log.info('Star centers determination') # parameters path = self._path @@ -1390,7 +1381,7 @@ def sph_ird_star_center(self, high_pass=False, plot=True): flux_files = frames_info[frames_info['DPR TYPE'] == 'OBJECT,FLUX'] if len(flux_files) != 0: for file, idx in flux_files.index: - print(' ==> OBJECT,FLUX: {0}'.format(file)) + _log.info(' ==> OBJECT,FLUX: {0}'.format(file)) # read data fname = '{0}_DIT{1:03d}_preproc'.format(file, idx) @@ -1405,7 +1396,6 @@ def sph_ird_star_center(self, high_pass=False, plot=True): # save fits.writeto(path.preproc / '{}_centers.fits'.format(fname), psf_center, overwrite=True) - print() # then OBJECT,CENTER (if any) starcen_files = frames_info[frames_info['DPR TYPE'] == 'OBJECT,CENTER'] @@ -1414,7 +1404,7 @@ def sph_ird_star_center(self, high_pass=False, plot=True): starsci_files = frames_info[(frames_info['DPR TYPE'] == 'OBJECT') & (frames_info['DET SEQ1 DIT'].round(2) == DIT)] for file, idx in starcen_files.index: - print(' ==> OBJECT,CENTER: {0}'.format(file)) + _log.info(' ==> OBJECT,CENTER: {0}'.format(file)) # read center data fname = '{0}_DIT{1:03d}_preproc'.format(file, idx) @@ -1439,7 +1429,6 @@ def sph_ird_star_center(self, high_pass=False, plot=True): # save fits.writeto(path.preproc / '{}_centers.fits'.format(fname), img_centers, overwrite=True) fits.writeto(path.preproc / '{}_spot_distance.fits'.format(fname), spot_dist, overwrite=True) - print() # update recipe execution self._recipe_execution['sph_ird_star_center'] = True @@ -1470,7 +1459,7 @@ def sph_ird_wavelength_recalibration(self, fit_scaling=True, plot=True): # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_wavelength_recalibration', self.recipe_requirements) - print('Wavelength recalibration') + _log.info('Wavelength recalibration') # parameters path = self._path @@ -1506,8 +1495,7 @@ def sph_ird_wavelength_recalibration(self, fit_scaling=True, plot=True): # get spot distance from the first OBJECT,CENTER in the sequence starcen_files = frames_info[frames_info['DPR TYPE'] == 'OBJECT,CENTER'] if len(starcen_files) == 0: - print(' ==> no OBJECT,CENTER file in the data set. Wavelength cannot be recalibrated. ' + - 'The standard wavelength calibrated by the ESO pripeline will be used.') + _log.info(' ==> no OBJECT,CENTER file in the data set. Wavelength cannot be recalibrated. The standard wavelength calibrated by the ESO pripeline will be used.') return fname = '{0}_DIT{1:03d}_preproc_spot_distance'.format(starcen_files.index.values[0][0], starcen_files.index.values[0][1]) @@ -1519,7 +1507,7 @@ def sph_ird_wavelength_recalibration(self, fit_scaling=True, plot=True): pix = np.arange(1024) wave_final = np.zeros((1024, 2)) for fidx in range(2): - print(' field {0:2d}/{1:2d}'.format(fidx+1, 2)) + _log.info(' field {0:2d}/{1:2d}'.format(fidx+1, 2)) wave = wave_lin[fidx] dist = spot_dist[:, fidx] @@ -1548,8 +1536,7 @@ def sph_ird_wavelength_recalibration(self, fit_scaling=True, plot=True): wave_final_fit[bad] = np.nan wave_diff = np.abs(wave_final_fit - wave) - print(' ==> difference with calibrated wavelength: ' + - 'min={0:.1f} nm, max={1:.1f} nm'.format(np.nanmin(wave_diff), np.nanmax(wave_diff))) + _log.info(' ==> difference with calibrated wavelength: min={0:.1f} nm, max={1:.1f} nm'.format(np.nanmin(wave_diff), np.nanmax(wave_diff))) if fit_scaling: wave_final[:, fidx] = wave_final_fit @@ -1592,7 +1579,7 @@ def sph_ird_wavelength_recalibration(self, fit_scaling=True, plot=True): pdf.close() # save - print(' * saving') + _log.info(' * saving') fits.writeto(path.preproc / 'wavelength_recalibrated.fits', wave_final, overwrite=True) # update recipe execution @@ -1696,7 +1683,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_combine_data', self.recipe_requirements) - print('Combine science data') + _log.info('Combine science data') # parameters path = self._path @@ -1721,7 +1708,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m else: wfile = path.preproc / 'wavelength_default.fits' if wfile.exists(): - print('Warning: using default wavelength calibration.') + _log.warning('Using default wavelength calibration.') wave = fits.getdata(wfile) else: raise FileExistsError('Missing default or recalibrated wavelength calibration. You must first run either sph_ird_wave_calib or sph_ird_wavelength_recalibration().') @@ -1745,16 +1732,16 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m # max images size if psf_dim > 1024: - print('Warning: psf_dim cannot be larger than 1024 pix. A value of 1024 will be used.') + _log.warning('psf_dim cannot be larger than 1024 pix. A value of 1024 will be used.') psf_dim = 1024 if science_dim > 1024: - print('Warning: science_dim cannot be larger than 1024 pix. A value of 1024 will be used.') + _log.warning('science_dim cannot be larger than 1024 pix. A value of 1024 will be used.') science_dim = 1024 # centering configuration if coarse_centering: - print('Warning: images will be coarsely centered without any interpolation. Automatic settings for coarse centering: shift_method=\'roll\', cpix=True, correct_mrs_chromatism=False') + _log.warning('Images will be coarsely centered without any interpolation. Automatic settings for coarse centering: shift_method=\'roll\', cpix=True, correct_mrs_chromatism=False') shift_method = 'roll' cpix = True correct_mrs_chromatism = False @@ -1765,7 +1752,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m if manual_center.shape != (2,): raise ValueError('manual_center does not have the right number of dimensions.') - print('Warning: images will be centered using the user-provided center ({},{})'.format(*manual_center)) + _log.warning('Images will be centered using the user-provided center ({},{})'.format(*manual_center)) manual_center = np.full((1024, 2), manual_center, dtype=np.float) @@ -1775,7 +1762,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m flux_files = frames_info[frames_info['DPR TYPE'] == 'OBJECT,FLUX'] nfiles = len(flux_files) if nfiles != 0: - print(' * OBJECT,FLUX data') + _log.info(' * OBJECT,FLUX data') # final arrays psf_cube = np.zeros((2, nfiles, nwave, psf_dim)) @@ -1789,7 +1776,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m # read and combine files for file_idx, (file, idx) in enumerate(flux_files.index): - print(' ==> file {0}/{1}: {2}, DIT={3}'.format(file_idx+1, len(flux_files), file, idx)) + _log.info(' ==> file {0}/{1}: {2}, DIT={3}'.format(file_idx+1, len(flux_files), file, idx)) # read data fname = '{0}_DIT{1:03d}_preproc'.format(file, idx) @@ -1799,7 +1786,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m if cfile.exists(): centers = fits.getdata(cfile) else: - print('Warning: sph_ird_star_center() has not been executed. Images will be centered using default centers ({}, {})'.format(*default_center[:, 0])) + _log.warning('sph_ird_star_center() has not been executed. Images will be centered using default centers ({}, {})'.format(*default_center[:, 0])) centers = np.full((1024, 2), default_center[:, 0], dtype=np.float) # make sure we have only integers if user wants coarse centering @@ -1863,15 +1850,13 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m # delete big cubes del psf_cube - print() - # # OBJECT,CENTER # starcen_files = frames_info[frames_info['DPR TYPE'] == 'OBJECT,CENTER'] nfiles = len(starcen_files) if nfiles != 0: - print(' * OBJECT,CENTER data') + _log.info(' * OBJECT,CENTER data') # final arrays cen_cube = np.zeros((2, nfiles, nwave, science_dim)) @@ -1885,7 +1870,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m # read and combine files for file_idx, (file, idx) in enumerate(starcen_files.index): - print(' ==> file {0}/{1}: {2}, DIT={3}'.format(file_idx+1, len(starcen_files), file, idx)) + _log.info(' ==> file {0}/{1}: {2}, DIT={3}'.format(file_idx+1, len(starcen_files), file, idx)) # read data fname = '{0}_DIT{1:03d}_preproc'.format(file, idx) @@ -1957,15 +1942,13 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m # delete big cubes del cen_cube - print() - # # OBJECT # object_files = frames_info[frames_info['DPR TYPE'] == 'OBJECT'] nfiles = len(object_files) if nfiles != 0: - print(' * OBJECT data') + _log.info(' * OBJECT data') # final arrays sci_cube = np.zeros((2, nfiles, nwave, science_dim)) @@ -1980,7 +1963,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m # select which CENTER to use starcen_files = frames_info[frames_info['DPR TYPE'] == 'OBJECT,CENTER'] if len(starcen_files) == 0: - print('Warning: no OBJECT,CENTER file in the data set. Images will be centered using default center ({},{})'.format(*default_center[:, 0])) + _log.warning('No OBJECT,CENTER file in the data set. Images will be centered using default center ({},{})'.format(*default_center[:, 0])) centers = np.full((1024, 2), default_center[:, 0], dtype=np.float) else: fname = '{0}_DIT{1:03d}_preproc_centers.fits'.format(starcen_files.index.values[0][0], starcen_files.index.values[0][1]) @@ -1998,7 +1981,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m # read and combine files for file_idx, (file, idx) in enumerate(object_files.index): - print(' ==> file {0}/{1}: {2}, DIT={3}'.format(file_idx+1, len(object_files), file, idx)) + _log.info(' ==> file {0}/{1}: {2}, DIT={3}'.format(file_idx+1, len(object_files), file, idx)) # read data fname = '{0}_DIT{1:03d}_preproc'.format(file, idx) @@ -2060,8 +2043,6 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m # delete big cubes del sci_cube - print() - # update recipe execution self._recipe_execution['sph_ird_combine_data'] = True diff --git a/vltpf/SPHERE.py b/vltpf/SPHERE.py index d99b25d..0fd6b64 100644 --- a/vltpf/SPHERE.py +++ b/vltpf/SPHERE.py @@ -1,11 +1,8 @@ -''' -VLT/SPHERE primary module -''' - import os import glob import shutil import math +import logging import numpy as np import pandas as pd import xml.etree.ElementTree as etree @@ -16,6 +13,8 @@ from astropy.io import fits from astropy.time import Time +_log = logging.getLogger(__name__) + def process_mainFiles(mainFiles, files, silent=True): ''' @@ -38,7 +37,7 @@ def process_mainFiles(mainFiles, files, silent=True): files.append(fname) if not silent: - print(' ==> {0}'.format(fname)) + _log.info(' ==> {0}'.format(fname)) def process_association(tree, files, silent=True): @@ -59,25 +58,25 @@ def process_association(tree, files, silent=True): catg = tree.attrib['category'] if not silent: - print(catg) + _log.info(catg) # skip unused calibrations if (catg == 'IFS_STD_ASTROM') or (catg == 'IFS_STD_PHOT') or \ (catg == 'IFS_DIST') or (catg == 'IRD_CLI_PHOT') or \ (catg == 'IRD_DIST'): if not silent: - print(' ==> skipping') + _log.info(' ==> skipping') return # process differently mainFiles from associatedFiles for elt in tree: if elt.tag == 'mainFiles': if not silent: - print('mainFiles') + _log.info('mainFiles') process_mainFiles(elt, files) elif elt.tag == 'associatedFiles': if not silent: - print('associatedFiles') + _log.info('associatedFiles') for nelt in elt: process_association(nelt, files, silent=silent) @@ -114,15 +113,15 @@ def sort_files_from_xml(path, silent=True): xml_files = glob.glob(path+'*.xml') - print('Sort data based on XML files (ESO automated calibration selection)') - print(' ==> {0} XML files\n'.format(len(xml_files))) + _log.info('Sort data based on XML files (ESO automated calibration selection)') + _log.info(' ==> {0} XML files\n'.format(len(xml_files))) # sort files for file in xml_files: tree = etree.parse(file) root = tree.getroot() - print(os.path.basename(file)) + _log.info(os.path.basename(file)) # process only IFS and IRDIS science data catg = root.attrib['category'] @@ -138,7 +137,7 @@ def sort_files_from_xml(path, silent=True): filename = filename.replace(':', '_') if not os.path.exists(path+filename+'.fits'): - print(' ==> file {} does not exsist. Skipping'.format(filename)) + _log.info(' ==> file {} does not exsist. Skipping'.format(filename)) continue hdr = fits.getheader(path+filename+'.fits') @@ -184,7 +183,7 @@ def sort_files_from_xml(path, silent=True): # check if file actually exists if not os.path.exists(fpath): - print(' ==> file {} does not exist. Skipping.'.format(fpath)) + _log.info(' ==> file {} does not exist. Skipping.'.format(fpath)) continue # copy if needed @@ -194,10 +193,9 @@ def sort_files_from_xml(path, silent=True): # print status if not silent: - print('{0} - id={1}'.format(target, obs_id)) - print(' ==> found {0} files'.format(len(files))) - print(' ==> copied to {0}'.format(target_path)) - print() + _log.info('{0} - id={1}'.format(target, obs_id)) + _log.info(' ==> found {0} files'.format(len(files))) + _log.info(' ==> copied to {0}'.format(target_path)) # move all files path_new = os.path.join(path, 'all_files') @@ -237,8 +235,8 @@ def sort_files_from_fits(path, silent=True): fits_files = glob.glob(path+'*.fits') - print('Sort data based on FITS files') - print(' ==> {0} FITS files\n'.format(len(fits_files))) + _log.info('Sort data based on FITS files') + _log.info(' ==> {0} FITS files\n'.format(len(fits_files))) # sort files for file in fits_files: @@ -277,9 +275,8 @@ def sort_files_from_fits(path, silent=True): # print status if not silent: - print('{0} - id={1}'.format(target, obs_id)) - print(' ==> copied to {0}'.format(target_path)) - print() + _log.info('{0} - id={1}'.format(target, obs_id)) + _log.info(' ==> copied to {0}'.format(target_path)) # move all files path_new = os.path.join(path, 'unsorted_files') @@ -417,11 +414,9 @@ def init_reduction(self): ''' for r in self._reductions: - print() - print('*') - print('* Initialization of {0} reduction at path {1}'.format(r.instrument, r.path)) - print('*') - print() + _log.info('*') + _log.info('* Initialization of {0} reduction at path {1}'.format(r.instrument, r.path)) + _log.info('*') r.init_reduction() @@ -432,11 +427,9 @@ def create_static_calibrations(self): ''' for r in self._reductions: - print() - print('*') - print('* Static calibrations for {0} at path {1}'.format(r.instrument, r.path)) - print('*') - print() + _log.info('*') + _log.info('* Static calibrations for {0} at path {1}'.format(r.instrument, r.path)) + _log.info('*') r.create_static_calibrations() @@ -447,11 +440,9 @@ def preprocess_science(self): ''' for r in self._reductions: - print() - print('*') - print('* Pre-process data for {0} at path {1}'.format(r.instrument, r.path)) - print('*') - print() + _log.info('*') + _log.info('* Pre-process data for {0} at path {1}'.format(r.instrument, r.path)) + _log.info('*') r.preprocess_science() @@ -463,11 +454,9 @@ def process_science(self): ''' for r in self._reductions: - print() - print('*') - print('* Process data for {0} at path {1}'.format(r.instrument, r.path)) - print('*') - print() + _log.info('*') + _log.info('* Process data for {0} at path {1}'.format(r.instrument, r.path)) + _log.info('*') r.process_science() @@ -479,11 +468,9 @@ def clean(self): ''' for r in self._reductions: - print() - print('*') - print('* Clean {0} reduction at path {1}'.format(r.instrument, r.path)) - print('*') - print() + _log.info('*') + _log.info('* Clean {0} reduction at path {1}'.format(r.instrument, r.path)) + _log.info('*') r.clean() @@ -495,11 +482,9 @@ def full_reduction(self): ''' for r in self._reductions: - print() - print('*') - print('* Full {0} reduction at path {1}'.format(r.instrument, r.path)) - print('*') - print() + _log.info('*') + _log.info('* Full {0} reduction at path {1}'.format(r.instrument, r.path)) + _log.info('*') r.full_reduction() @@ -512,7 +497,7 @@ def _create_reductions(self): Detect and create valid reductions in path ''' - print('Create reductions from available data') + _log.info('Create reductions from available data') wpath = os.walk(self._path) for w in wpath: @@ -534,7 +519,7 @@ def _create_reductions(self): if mode == 'imaging': reduction = IRDIS.ImagingReduction(reduction_path) elif mode == 'polar': - print('Warning: IRDIS DPI not supported yet') + _log.info('Warning: IRDIS DPI not supported yet') elif mode == 'spectro': reduction = IRDIS.SpectroReduction(reduction_path) @@ -548,9 +533,8 @@ def _create_reductions(self): except: continue - print(reduction_path) - print(' ==> {0}, {1} files'.format(instrument, len(fits_files))) - print() + _log.info(reduction_path) + _log.info(' ==> {0}, {1} files'.format(instrument, len(fits_files))) # merge all reductions into a single list self._reductions = self._IFS_reductions + self._IRDIS_reductions diff --git a/vltpf/__init__.py b/vltpf/__init__.py index 563ee73..6b5a711 100644 --- a/vltpf/__init__.py +++ b/vltpf/__init__.py @@ -1,3 +1,24 @@ __author__ = 'avigan' -__copyright__ = 'Copyright (C) 2017 Arthur Vigan' +__copyright__ = 'Copyright (C) 2017-2019 Arthur Vigan' __license__ = 'MIT' + +import logging + +# define logging format for module +logging.basicConfig(format='[%(levelname)-7s] %(message)s') +_log = logging.getLogger(__name__) +_log.setLevel(logging.DEBUG) +_log.info('VLTPF init') + + +def set_loglevel(level): + ''' + Set the logging level for the module + + Parameters + ---------- + level : {"notset", "debug", "info", "warning", "error", "critical"} + The log level of the handler + ''' + + _log.setLevel(level.upper()) diff --git a/vltpf/toolbox.py b/vltpf/toolbox.py index 11c403a..baf8341 100644 --- a/vltpf/toolbox.py +++ b/vltpf/toolbox.py @@ -7,6 +7,7 @@ import matplotlib.pyplot as plt import matplotlib.patches as patches import matplotlib.colors as colors +import logging import vltpf.utils.aperture as aperture @@ -17,6 +18,8 @@ global_cmap = 'inferno' +_log = logging.getLogger(__name__) + def check_recipe_execution(recipe_execution, recipe_name, recipe_requirements): ''' @@ -48,9 +51,7 @@ def check_recipe_execution(recipe_execution, recipe_name, recipe_requirements): missing.append(r) if not execute_recipe: - raise ValueError('{0} cannot executed because some files have been '.format(recipe_name) + - 'removed from the reduction directory ' + - 'or the following recipes have not been executed: {0}. '.format(missing)) + raise ValueError('{0} cannot executed because some files have been removed from the reduction directory or the following recipes have not been executed: {0}. '.format(recipe_name, missing)) return execute_recipe @@ -298,7 +299,7 @@ def collapse_frames_info(finfo, fname, collapse_type, coadd_value=2): Collapsed data frame ''' - print(' ==> collapse frames information') + _log.info(' ==> collapse frames information') nfinfo = None if collapse_type == 'none': @@ -439,7 +440,7 @@ def star_centers_from_PSF_img_cube(cube, wave, pixel, save_path=None): # loop over images img_centers = np.zeros((nwave, 2)) for idx, (wave, img) in enumerate(zip(wave, cube)): - print(' wave {0:2d}/{1:2d} ({2:.0f} nm)'.format(idx+1, nwave, wave)) + _log.info(' wave {0:2d}/{1:2d} ({2:.0f} nm)'.format(idx+1, nwave, wave)) # remove any NaN img = np.nan_to_num(img) @@ -528,7 +529,7 @@ def star_centers_from_PSF_lss_cube(cube, wave_cube, pixel, save_path=None): nimg = len(cube) psf_centers = np.full((1024, nimg), np.nan) for fidx, img in enumerate(cube): - print(' field {0:2d}/{1:2d}'.format(fidx+1, nimg)) + _log.info(' field {0:2d}/{1:2d}'.format(fidx+1, nimg)) # remove any NaN img = np.nan_to_num(cube[fidx]) @@ -677,7 +678,7 @@ def star_centers_from_waffle_img_cube(cube_cen, wave, waffle_orientation, center spot_dist = np.zeros((nwave, 6)) img_centers = np.zeros((nwave, 2)) for idx, (wave, img) in enumerate(zip(wave, cube_cen)): - print(' wave {0:2d}/{1:2d} ({2:.0f} nm)'.format(idx+1, nwave, wave)) + _log.info(' wave {0:2d}/{1:2d} ({2:.0f} nm)'.format(idx+1, nwave, wave)) # remove any NaN img = np.nan_to_num(img) @@ -865,14 +866,14 @@ def star_centers_from_waffle_lss_cube(cube_cen, cube_sci, wave_cube, center_gues # subtract science cube if provided if cube_sci is not None: - print(' ==> subtract science cube') + _log.info(' ==> subtract science cube') cube_cen -= cube_sci spot_centers = np.full((1024, 2, 2), np.nan) spot_dist = np.full((1024, nimg), np.nan) img_centers = np.full((1024, nimg), np.nan) for fidx, img in enumerate(cube_cen): - print(' field {0:2d}/{1:2d}'.format(fidx+1, nimg)) + _log.info(' field {0:2d}/{1:2d}'.format(fidx+1, nimg)) # remove any NaN img = np.nan_to_num(cube_cen[fidx]) From 3aca6100f376acc2761d3e83f019ac7157e5d7ad Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Fri, 6 Sep 2019 10:18:07 +0200 Subject: [PATCH 059/101] Implement a first object-level logging This solution does not seem to work properly as it does not log the messages from other external modules such as toolbox.py --- examples/ifs_reduction.py | 1 - vltpf/IFS.py | 212 +++++++++++++++++++++----------------- vltpf/__init__.py | 2 +- 3 files changed, 116 insertions(+), 99 deletions(-) diff --git a/examples/ifs_reduction.py b/examples/ifs_reduction.py index 9fe2ec0..493be0c 100644 --- a/examples/ifs_reduction.py +++ b/examples/ifs_reduction.py @@ -11,7 +11,6 @@ reduction.config['preproc_collapse_science'] = True reduction.config['preproc_collapse_type'] = 'coadd' reduction.config['preproc_coadd_value'] = 2 -reduction.config['clean'] = True reduction.show_config() #%% reduction diff --git a/vltpf/IFS.py b/vltpf/IFS.py index 3a7e246..09d00ce 100644 --- a/vltpf/IFS.py +++ b/vltpf/IFS.py @@ -364,7 +364,7 @@ class Reduction(object): # Constructor ################################################## - def __init__(self, path): + def __init__(self, path, log_level='info'): ''' Initialization of the IFSReduction @@ -372,8 +372,11 @@ def __init__(self, path): ---------- path : str Path to the directory containing the dataset - ''' + level : {'debug', 'info', 'warning', 'error', 'critical'} + The log level of the handler + ''' + # expand path path = Path(path).expanduser().resolve() @@ -389,6 +392,21 @@ def __init__(self, path): # instrument mode self._mode = 'Unknown' + # configure logging + logger = logging.getLogger(str(path)) + logger.setLevel(log_level.upper()) + if logger.hasHandlers(): + for hdlr in logger.handlers: + logger.removeHandler(hdlr) + + handler = logging.FileHandler(self._path.products / 'reduction.log', mode='w', encoding='utf-8') + formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)8s - %(message)s') + formatter.default_msec_format = '%s.%03d' + handler.setFormatter(formatter) + logger.addHandler(handler) + + self._logger = logger + # configuration configfile = Path(vltpf.__file__).parent / 'instruments' / '{}.ini'.format(self._instrument) config = configparser.ConfigParser() @@ -764,7 +782,7 @@ def sort_files(self): Data frame with the information on raw files ''' - _log.info('Sorting raw files') + self._logger.info('Sorting raw files') # parameters path = self._path @@ -776,7 +794,7 @@ def sort_files(self): if len(files) == 0: raise ValueError('No raw FITS files in reduction path') - _log.info(' * found {0} FITS files in {1}'.format(len(files), path.raw)) + self._logger.info(' * found {0} FITS files in {1}'.format(len(files), path.raw)) # read list of keywords keywords = [] @@ -848,7 +866,7 @@ def sort_frames(self): A data frame with the information on all frames ''' - _log.info('Extracting frames information') + self._logger.info('Extracting frames information') # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sort_frames', self.recipe_requirements) @@ -921,19 +939,19 @@ def sort_frames(self): date = str(cinfo['DATE'][0])[0:10] - _log.info(' * Object: {0}'.format(cinfo['OBJECT'][0])) - _log.info(' * RA / DEC: {0} / {1}'.format(RA, DEC)) - _log.info(' * Date: {0}'.format(date)) - _log.info(' * Instrument: {0}'.format(cinfo['SEQ ARM'][0])) - _log.info(' * Derotator: {0}'.format(cinfo['INS4 DROT2 MODE'][0])) - _log.info(' * Coronagraph: {0}'.format(cinfo['INS COMB ICOR'][0])) - _log.info(' * Mode: {0}'.format(cinfo['INS1 MODE'][0])) - _log.info(' * Filter: {0}'.format(cinfo['INS2 COMB IFS'][0])) - _log.info(' * DIT: {0:.2f} sec'.format(cinfo['DET SEQ1 DIT'][0])) - _log.info(' * NDIT: {0:.0f}'.format(cinfo['DET NDIT'][0])) - _log.info(' * Texp: {0:.2f} min'.format(cinfo['DET SEQ1 DIT'].sum()/60)) - _log.info(' * PA: {0:.2f}° ==> {1:.2f}° = {2:.2f}°'.format(pa_start, pa_end, np.abs(pa_end-pa_start))) - _log.info(' * POSANG: {0}'.format(', '.join(['{:.2f}°'.format(p) for p in posang]))) + self._logger.info(' * Object: {0}'.format(cinfo['OBJECT'][0])) + self._logger.info(' * RA / DEC: {0} / {1}'.format(RA, DEC)) + self._logger.info(' * Date: {0}'.format(date)) + self._logger.info(' * Instrument: {0}'.format(cinfo['SEQ ARM'][0])) + self._logger.info(' * Derotator: {0}'.format(cinfo['INS4 DROT2 MODE'][0])) + self._logger.info(' * Coronagraph: {0}'.format(cinfo['INS COMB ICOR'][0])) + self._logger.info(' * Mode: {0}'.format(cinfo['INS1 MODE'][0])) + self._logger.info(' * Filter: {0}'.format(cinfo['INS2 COMB IFS'][0])) + self._logger.info(' * DIT: {0:.2f} sec'.format(cinfo['DET SEQ1 DIT'][0])) + self._logger.info(' * NDIT: {0:.0f}'.format(cinfo['DET NDIT'][0])) + self._logger.info(' * Texp: {0:.2f} min'.format(cinfo['DET SEQ1 DIT'].sum()/60)) + self._logger.info(' * PA: {0:.2f}° ==> {1:.2f}° = {2:.2f}°'.format(pa_start, pa_end, np.abs(pa_end-pa_start))) + self._logger.info(' * POSANG: {0}'.format(', '.join(['{:.2f}°'.format(p) for p in posang]))) def check_files_association(self): @@ -947,7 +965,7 @@ def check_files_association(self): # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'check_files_association', self.recipe_requirements) - _log.info('Performing file association for calibrations') + self._logger.info('Performing file association for calibrations') # parameters path = self._path @@ -986,10 +1004,10 @@ def check_files_association(self): cfiles = calibs[(calibs['DPR TYPE'] == 'FLAT,LAMP') & (calibs['INS2 COMB IFS'] == 'CAL_BB_2_{0}'.format(mode_short))] if len(cfiles) < 2: error_flag += 1 - _log.error(' * there should be 2 flat files for white lamp, found {0}'.format(len(cfiles))) + self._logger.error(' * there should be 2 flat files for white lamp, found {0}'.format(len(cfiles))) elif len(cfiles) > 2: warning_flag += 1 - _log.warning(' * there should be 2 flat files for white lamp, found {0}. Using the closest from science.'.format(len(cfiles))) + self._logger.warning(' * there should be 2 flat files for white lamp, found {0}. Using the closest from science.'.format(len(cfiles))) # find the two closest to science files sci_files = files_info[(files_info['DPR CATG'] == 'SCIENCE')] @@ -1004,10 +1022,10 @@ def check_files_association(self): cfiles = calibs[(calibs['DPR TYPE'] == 'FLAT,LAMP') & (calibs['INS2 COMB IFS'] == 'CAL_NB1_1_{0}'.format(mode_short))] if len(cfiles) < 2: error_flag += 1 - _log.error(' * there should be 2 flat files for 1020 nm filter, found {0}'.format(len(cfiles))) + self._logger.error(' * there should be 2 flat files for 1020 nm filter, found {0}'.format(len(cfiles))) elif len(cfiles) > 2: warning_flag += 1 - _log.warning(' * there should be 2 flat files for 1020 nm filter, found {0}. Using the closest from science.'.format(len(cfiles))) + self._logger.warning(' * there should be 2 flat files for 1020 nm filter, found {0}. Using the closest from science.'.format(len(cfiles))) # find the two closest to science files sci_files = files_info[(files_info['DPR CATG'] == 'SCIENCE')] @@ -1022,10 +1040,10 @@ def check_files_association(self): cfiles = calibs[(calibs['DPR TYPE'] == 'FLAT,LAMP') & (calibs['INS2 COMB IFS'] == 'CAL_NB2_1_{0}'.format(mode_short))] if len(cfiles) < 2: error_flag += 1 - _log.error(' * there should be 2 flat files for 1230 nm filter, found {0}'.format(len(cfiles))) + self._logger.error(' * there should be 2 flat files for 1230 nm filter, found {0}'.format(len(cfiles))) elif len(cfiles) > 2: warning_flag += 1 - _log.warning(' * there should be 2 flat files for 1230 nm filter, found {0}. Using the closest from science.'.format(len(cfiles))) + self._logger.warning(' * there should be 2 flat files for 1230 nm filter, found {0}. Using the closest from science.'.format(len(cfiles))) # find the two closest to science files sci_files = files_info[(files_info['DPR CATG'] == 'SCIENCE')] @@ -1040,10 +1058,10 @@ def check_files_association(self): cfiles = calibs[(calibs['DPR TYPE'] == 'FLAT,LAMP') & (calibs['INS2 COMB IFS'] == 'CAL_NB3_1_{0}'.format(mode_short))] if len(cfiles) < 2: error_flag += 1 - _log.error(' * there should be 2 flat files for 1300 nm filter, found {0}'.format(len(cfiles))) + self._logger.error(' * there should be 2 flat files for 1300 nm filter, found {0}'.format(len(cfiles))) elif len(cfiles) > 2: warning_flag += 1 - _log.warning(' * there should be 2 flat files for 1300 nm filter, found {0}. Using the closest from science.'.format(len(cfiles))) + self._logger.warning(' * there should be 2 flat files for 1300 nm filter, found {0}. Using the closest from science.'.format(len(cfiles))) # find the two closest to science files sci_files = files_info[(files_info['DPR CATG'] == 'SCIENCE')] @@ -1059,10 +1077,10 @@ def check_files_association(self): cfiles = calibs[(calibs['DPR TYPE'] == 'FLAT,LAMP') & (calibs['INS2 COMB IFS'] == 'CAL_NB4_2_{0}'.format(mode_short))] if len(cfiles) < 2: error_flag += 1 - _log.error(' * there should be 2 flat files for 1550 nm filter, found {0}'.format(len(cfiles))) + self._logger.error(' * there should be 2 flat files for 1550 nm filter, found {0}'.format(len(cfiles))) elif len(cfiles) > 2: warning_flag += 1 - _log.warning(' * there should be 2 flat files for 1550 nm filter, found {0}. Using the closest from science.'.format(len(cfiles))) + self._logger.warning(' * there should be 2 flat files for 1550 nm filter, found {0}. Using the closest from science.'.format(len(cfiles))) # find the two closest to science files sci_files = files_info[(files_info['DPR CATG'] == 'SCIENCE')] @@ -1077,10 +1095,10 @@ def check_files_association(self): cfiles = calibs[(calibs['DPR TYPE'] == 'SPECPOS,LAMP') & (calibs['INS2 COMB IFS'] == mode)] if len(cfiles) == 0: error_flag += 1 - _log.error(' * there should be 1 spectra position file, found none.') + self._logger.error(' * there should be 1 spectra position file, found none.') elif len(cfiles) > 1: warning_flag += 1 - _log.warning(' * there should be 1 spectra position file, found {0}. Using the closest from science.'.format(len(cfiles))) + self._logger.warning(' * there should be 1 spectra position file, found {0}. Using the closest from science.'.format(len(cfiles))) # find the two closest to science files sci_files = files_info[(files_info['DPR CATG'] == 'SCIENCE')] @@ -1095,10 +1113,10 @@ def check_files_association(self): cfiles = calibs[(calibs['DPR TYPE'] == 'WAVE,LAMP') & (calibs['INS2 COMB IFS'] == mode)] if len(cfiles) == 0: error_flag += 1 - _log.error(' * there should be 1 wavelength calibration file, found none.') + self._logger.error(' * there should be 1 wavelength calibration file, found none.') elif len(cfiles) > 1: warning_flag += 1 - _log.warning(' * there should be 1 wavelength calibration file, found {0}. Using the closest from science.'.format(len(cfiles))) + self._logger.warning(' * there should be 1 wavelength calibration file, found {0}. Using the closest from science.'.format(len(cfiles))) # find the two closest to science files sci_files = files_info[(files_info['DPR CATG'] == 'SCIENCE')] @@ -1113,10 +1131,10 @@ def check_files_association(self): cfiles = calibs[(calibs['DPR TYPE'] == 'FLAT,LAMP') & (calibs['INS2 COMB IFS'] == mode)] if len(cfiles) == 0: error_flag += 1 - _log.error(' * there should be 1 IFU flat file, found none') + self._logger.error(' * there should be 1 IFU flat file, found none') elif len(cfiles) > 1: warning_flag += 1 - _log.warning(' * there should be 1 IFU flat file, found {0}. Using the closest from science.'.format(len(cfiles))) + self._logger.warning(' * there should be 1 IFU flat file, found {0}. Using the closest from science.'.format(len(cfiles))) # find the two closest to science files sci_files = files_info[(files_info['DPR CATG'] == 'SCIENCE')] @@ -1132,7 +1150,7 @@ def check_files_association(self): (calibs['DET SEQ1 DIT'].round(2) == 1.65)] if len(cfiles) == 0: error_flag += 1 - _log.info(' * Error: there is no dark/background for the basic calibrations (DIT=1.65 sec). It is mandatory to include one to obtain the best data reduction. A single dark/background file is sufficient, and it can easily be downloaded from the ESO archive') + self._logger.info(' * Error: there is no dark/background for the basic calibrations (DIT=1.65 sec). It is mandatory to include one to obtain the best data reduction. A single dark/background file is sufficient, and it can easily be downloaded from the ESO archive') ################################################## # static calibrations that depend on science DIT @@ -1148,16 +1166,16 @@ def check_files_association(self): (calibs['DET SEQ1 DIT'].round(2) == DIT)] if len(cfiles) == 0: warning_flag += 1 - _log.warning(' * there is no dark/background for science files with DIT={0} sec. It is *highly recommended* to include one to obtain the best data reduction. A single dark/background file is sufficient, and it can easily be downloaded from the ESO archive'.format(DIT)) + self._logger.warning(' * there is no dark/background for science files with DIT={0} sec. It is *highly recommended* to include one to obtain the best data reduction. A single dark/background file is sufficient, and it can easily be downloaded from the ESO archive'.format(DIT)) # sky backgrounds cfiles = files_info[(files_info['DPR TYPE'] == 'SKY') & (files_info['DET SEQ1 DIT'].round(2) == DIT)] if len(cfiles) == 0: warning_flag += 1 - _log.warning(' * there is no sky background for science files with DIT={0} sec. Using a sky background instead of an internal instrumental background can usually provide a cleaner data reduction'.format(DIT)) + self._logger.warning(' * there is no sky background for science files with DIT={0} sec. Using a sky background instead of an internal instrumental background can usually provide a cleaner data reduction'.format(DIT)) # error reporting - _log.warning('There are {0} warning(s) and {1} error(s) in the classification of files'.format(warning_flag, error_flag)) + self._logger.warning('There are {0} warning(s) and {1} error(s) in the classification of files'.format(warning_flag, error_flag)) if error_flag: raise ValueError('There is {0} errors that should be solved before proceeding'.format(error_flag)) @@ -1179,7 +1197,7 @@ def sph_ifs_cal_dark(self, silent=True): # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ifs_cal_dark', self.recipe_requirements) - _log.info('Creating darks and backgrounds') + self._logger.info('Creating darks and backgrounds') # parameters path = self._path @@ -1204,7 +1222,7 @@ def sph_ifs_cal_dark(self, silent=True): if len(cfiles) == 0: continue - _log.info(' * {0} with DIT={1:.2f} sec ({2} files)'.format(ctype, DIT, len(cfiles))) + self._logger.info(' * {0} with DIT={1:.2f} sec ({2} files)'.format(ctype, DIT, len(cfiles))) # create sof sof = path.sof / 'dark_DIT={0:.2f}.sof'.format(DIT) @@ -1285,7 +1303,7 @@ def sph_ifs_cal_detector_flat(self, silent=True): # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ifs_cal_detector_flat', self.recipe_requirements) - _log.info('Creating flats') + self._logger.info('Creating flats') # parameters path = self._path @@ -1315,7 +1333,7 @@ def sph_ifs_cal_detector_flat(self, silent=True): lamps = [ 5, 1, 2, 3, 4] for wave, comb, lamp in zip(waves, combs, lamps): - _log.info(' * flat for wavelength {0} nm (filter {1}, lamp {2})'.format(wave, comb, lamp)) + self._logger.info(' * flat for wavelength {0} nm (filter {1}, lamp {2})'.format(wave, comb, lamp)) cfiles = calibs[calibs['INS2 COMB IFS'] == '{0}_{1}'.format(comb, mode_short)] files = [path.raw / '{}.fits'.format(f) for f in cfiles.index] @@ -1377,7 +1395,7 @@ def sph_ifs_cal_specpos(self, silent=True): # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ifs_cal_specpos', self.recipe_requirements) - _log.info('Creating specpos') + self._logger.info('Creating specpos') # parameters path = self._path @@ -1463,7 +1481,7 @@ def sph_ifs_cal_wave(self, silent=True): # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ifs_cal_wave', self.recipe_requirements) - _log.info('Creating wavelength calibration') + self._logger.info('Creating wavelength calibration') # parameters path = self._path @@ -1576,7 +1594,7 @@ def sph_ifs_cal_ifu_flat(self, silent=True): # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ifs_cal_ifu_flat', self.recipe_requirements) - _log.info('Creating IFU flat') + self._logger.info('Creating IFU flat') # parameters path = self._path @@ -1748,7 +1766,7 @@ def sph_ifs_preprocess_science(self, # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ifs_preprocess_science', self.recipe_requirements) - _log.info('Pre-processing science files') + self._logger.info('Pre-processing science files') # parameters path = self._path @@ -1785,7 +1803,7 @@ def sph_ifs_preprocess_science(self, for DIT in sci_DITs: sfiles = sci_files[sci_files['DET SEQ1 DIT'].round(2) == DIT] - _log.info('{0} files of type {1} with DIT={2} sec'.format(len(sfiles), typ, DIT)) + self._logger.info('{0} files of type {1} with DIT={2} sec'.format(len(sfiles), typ, DIT)) if subtract_background: # look for sky, then background, then darks @@ -1797,11 +1815,11 @@ def sph_ifs_preprocess_science(self, (files_info['DET SEQ1 DIT'].round(2) == DIT)] if len(dfiles) != 0: break - _log.info(' ==> found {0} corresponding {1} file'.format(len(dfiles), d)) + self._logger.info(' ==> found {0} corresponding {1} file'.format(len(dfiles), d)) if len(dfiles) == 0: # issue a warning if absolutely no background is found - _log.warning('No background has been found. Pre-processing will continue but data quality will likely be affected') + self._logger.warning('No background has been found. Pre-processing will continue but data quality will likely be affected') bkg = np.zeros((2048, 2048)) elif len(dfiles) == 1: bkg = fits.getdata(path.calib / '{}.fits'.format(dfiles.index[0])) @@ -1814,10 +1832,10 @@ def sph_ifs_preprocess_science(self, # frames_info extract finfo = frames_info.loc[(fname, slice(None)), :] - _log.info(' * file {0}/{1}: {2}, NDIT={3}'.format(idx+1, len(sfiles), fname, len(finfo))) + self._logger.info(' * file {0}/{1}: {2}, NDIT={3}'.format(idx+1, len(sfiles), fname, len(finfo))) # read data - _log.info(' ==> read data') + self._logger.info(' ==> read data') img, hdr = fits.getdata(path.raw / '{}.fits'.format(fname), header=True) # add extra dimension to single images to make cubes @@ -1827,14 +1845,14 @@ def sph_ifs_preprocess_science(self, # collapse if (typ == 'OBJECT,CENTER'): if collapse_center: - _log.info(' ==> collapse: mean ({0} -> 1 frame, 0 dropped)'.format(len(img))) + self._logger.info(' ==> collapse: mean ({0} -> 1 frame, 0 dropped)'.format(len(img))) img = np.mean(img, axis=0, keepdims=True) frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'mean') else: frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'none') elif (typ == 'OBJECT,FLUX'): if collapse_psf: - _log.info(' ==> collapse: mean ({0} -> 1 frame, 0 dropped)'.format(len(img))) + self._logger.info(' ==> collapse: mean ({0} -> 1 frame, 0 dropped)'.format(len(img))) img = np.mean(img, axis=0, keepdims=True) frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'mean') else: @@ -1842,7 +1860,7 @@ def sph_ifs_preprocess_science(self, elif (typ == 'OBJECT'): if collapse_science: if collapse_type == 'mean': - _log.info(' ==> collapse: mean ({0} -> 1 frame, 0 dropped)'.format(len(img))) + self._logger.info(' ==> collapse: mean ({0} -> 1 frame, 0 dropped)'.format(len(img))) img = np.mean(img, axis=0, keepdims=True) frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'mean') @@ -1858,7 +1876,7 @@ def sph_ifs_preprocess_science(self, if coadd_value > NDIT: raise ValueError('coadd_value ({0}) must be < NDIT ({1})'.format(coadd_value, NDIT)) - _log.info(' ==> collapse: coadd by {0} ({1} -> {2} frames, {3} dropped)'.format(coadd_value, NDIT, NDIT_new, dropped)) + self._logger.info(' ==> collapse: coadd by {0} ({1} -> {2} frames, {3} dropped)'.format(coadd_value, NDIT, NDIT_new, dropped)) # coadd frames nimg = np.empty((NDIT_new, 2048, 2048), dtype=img.dtype) @@ -1877,13 +1895,13 @@ def sph_ifs_preprocess_science(self, # background subtraction if subtract_background: - _log.info(' ==> subtract background') + self._logger.info(' ==> subtract background') for f in range(len(img)): img[f] -= bkg # bad pixels correction if fix_badpix: - _log.info(' ==> correct bad pixels') + self._logger.info(' ==> correct bad pixels') for f in range(len(img)): frame = img[f] @@ -1895,7 +1913,7 @@ def sph_ifs_preprocess_science(self, # spectral crosstalk correction if correct_xtalk: - _log.info(' ==> correct spectral crosstalk') + self._logger.info(' ==> correct spectral crosstalk') for f in range(len(img)): frame = img[f] frame = sph_ifs_correct_spectral_xtalk(frame) @@ -1904,7 +1922,7 @@ def sph_ifs_preprocess_science(self, # check prensence of coordinates # if not, warn user and add fake one: it could be internal source data if hdr.get('HIERARCH ESO TEL TARG ALPHA') is None: - _log.warning('No valid coordinates found in header. Adding fake ones to be able to produce (x,y,lambda) datacubes.') + self._logger.warning('No valid coordinates found in header. Adding fake ones to be able to produce (x,y,lambda) datacubes.') hdr['HIERARCH ESO TEL TARG ALPHA'] = 120000.0 hdr['HIERARCH ESO TEL TARG DELTA'] = -900000.0 @@ -1940,7 +1958,7 @@ def sph_ifs_preprocess_wave(self): path = self._path files_info = self._files_info - _log.info('Pre-processing wavelength calibration file') + self._logger.info('Pre-processing wavelength calibration file') # bpm bpm_files = files_info[files_info['PRO CATG'] == 'IFS_STATIC_BADPIXELMAP'].index @@ -1961,24 +1979,24 @@ def sph_ifs_preprocess_wave(self): fname = wave_file.index[0] # read data - _log.info(' * {0}'.format(fname)) - _log.info(' ==> read data') + self._logger.info(' * {0}'.format(fname)) + self._logger.info(' ==> read data') img, hdr = fits.getdata(path.raw / '{}.fits'.format(fname), header=True) # collapse - _log.info(' ==> collapse: mean') + self._logger.info(' ==> collapse: mean') img = np.mean(img, axis=0, keepdims=False) # background subtraction - _log.info(' ==> subtract background') + self._logger.info(' ==> subtract background') img -= bkg # bad pixels correction - _log.info(' ==> correct bad pixels') + self._logger.info(' ==> correct bad pixels') img = sph_ifs_fix_badpix(img, bpm) # spectral crosstalk correction - _log.info(' ==> correct spectral crosstalk') + self._logger.info(' ==> correct spectral crosstalk') img = sph_ifs_correct_spectral_xtalk(img) # add fake coordinates @@ -2006,7 +2024,7 @@ def sph_ifs_science_cubes(self, silent=True): # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ifs_science_cubes', self.recipe_requirements) - _log.info('Creating the (x,y,lambda) science cubes') + self._logger.info('Creating the (x,y,lambda) science cubes') # parameters path = self._path @@ -2028,7 +2046,7 @@ def sph_ifs_science_cubes(self, silent=True): # get list of science files sci_files = sorted(list(path.preproc.glob('*_preproc.fits'))) - _log.info(' * found {0} pre-processed files'.format(len(sci_files))) + self._logger.info(' * found {0} pre-processed files'.format(len(sci_files))) # get list of calibration files bpm_file = files_info[files_info['PROCESSED'] & (files_info['PRO CATG'] == 'IFS_STATIC_BADPIXELMAP') & @@ -2086,7 +2104,7 @@ def sph_ifs_science_cubes(self, silent=True): file.close() # esorex parameters - _log.info(' * starting esorex') + self._logger.info(' * starting esorex') args = ['esorex', '--no-checksum=TRUE', '--no-datamd5=TRUE', @@ -2107,10 +2125,10 @@ def sph_ifs_science_cubes(self, silent=True): if proc.returncode != 0: # raise ValueError('esorex process was not successful') - _log.error('esorex was not successful. Trying to process some of the frames...') + self._logger.error('esorex was not successful. Trying to process some of the frames...') # post-process - _log.info(' * post-processing files') + self._logger.info(' * post-processing files') files = list(path.tmp.glob('*_preproc_*.fits')) for f in files: # read and save only primary extension @@ -2153,7 +2171,7 @@ def sph_ifs_wavelength_recalibration(self, high_pass=False, offset=(0, 0), plot= # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ifs_wavelength_recalibration', self.recipe_requirements) - _log.info('Recalibrating wavelength') + self._logger.info('Recalibrating wavelength') # parameters path = self._path @@ -2172,7 +2190,7 @@ def sph_ifs_wavelength_recalibration(self, high_pass=False, offset=(0, 0), plot= # # DRH wavelength # - _log.info(' * extracting calibrated wavelength') + self._logger.info(' * extracting calibrated wavelength') # get header of any science file science_files = frames_info[frames_info['DPR CATG'] == 'SCIENCE'].index[0] @@ -2187,12 +2205,12 @@ def sph_ifs_wavelength_recalibration(self, high_pass=False, offset=(0, 0), plot= # # star center # - _log.info(' * fitting satelitte spots') + self._logger.info(' * fitting satelitte spots') # get first DIT of first OBJECT,CENTER in the sequence starcen_files = frames_info[frames_info['DPR TYPE'] == 'OBJECT,CENTER'] if len(starcen_files) == 0: - _log.info(' ==> no OBJECT,CENTER file in the data set. Wavelength cannot be recalibrated. The standard wavelength calibrated by the ESO pripeline will be used.') + self._logger.info(' ==> no OBJECT,CENTER file in the data set. Wavelength cannot be recalibrated. The standard wavelength calibrated by the ESO pripeline will be used.') return ifs_mode = starcen_files['INS2 COMB IFS'].values[0] @@ -2226,7 +2244,7 @@ def sph_ifs_wavelength_recalibration(self, high_pass=False, offset=(0, 0), plot= # # wavelength recalibration # - _log.info(' * recalibration') + self._logger.info(' * recalibration') # find wavelength calibration file name wave_file = files_info[np.logical_not(files_info['PROCESSED']) & (files_info['DPR TYPE'] == 'WAVE,LAMP')].index[0] @@ -2299,10 +2317,10 @@ def sph_ifs_wavelength_recalibration(self, high_pass=False, offset=(0, 0), plot= wave_final = np.full(nwave, res.x) * wave_scale wave_diff = np.abs(wave_final - wave_drh)*1000 - _log.info(' ==> difference with calibrated wavelength: min={0:.1f} nm, max={1:.1f} nm'.format(wave_diff.min(), wave_diff.max())) + self._logger.info(' ==> difference with calibrated wavelength: min={0:.1f} nm, max={1:.1f} nm'.format(wave_diff.min(), wave_diff.max())) # save - _log.info(' * saving') + self._logger.info(' * saving') fits.writeto(path.preproc / 'wavelength_recalibrated.fits', wave_final, overwrite=True) # @@ -2369,7 +2387,7 @@ def sph_ifs_star_center(self, high_pass=False, offset=(0, 0), plot=True): # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ifs_star_center', self.recipe_requirements) - _log.info('Star centers determination') + self._logger.info('Star centers determination') # parameters path = self._path @@ -2383,7 +2401,7 @@ def sph_ifs_star_center(self, high_pass=False, offset=(0, 0), plot=True): flux_files = frames_info[frames_info['DPR TYPE'] == 'OBJECT,FLUX'] if len(flux_files) != 0: for file, idx in flux_files.index: - _log.info(' ==> OBJECT,FLUX: {0}'.format(file)) + self._logger.info(' ==> OBJECT,FLUX: {0}'.format(file)) # read data fname = '{0}_DIT{1:03d}_preproc_'.format(file, idx) @@ -2414,7 +2432,7 @@ def sph_ifs_star_center(self, high_pass=False, offset=(0, 0), plot=True): starcen_files = frames_info[frames_info['DPR TYPE'] == 'OBJECT,CENTER'] if len(starcen_files) != 0: for file, idx in starcen_files.index: - _log.info(' ==> OBJECT,CENTER: {0}'.format(file)) + self._logger.info(' ==> OBJECT,CENTER: {0}'.format(file)) # read data fname = '{0}_DIT{1:03d}_preproc_'.format(file, idx) @@ -2545,7 +2563,7 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ifs_combine_data', self.recipe_requirements) - _log.info('Combine science data') + self._logger.info('Combine science data') # parameters path = self._path @@ -2559,7 +2577,7 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a else: wfile = path.preproc / 'wavelength_default.fits' if wfile.exists(): - _log.warning('Using default wavelength calibration.') + self._logger.warning('Using default wavelength calibration.') wave = fits.getdata(wfile) else: raise FileExistsError('Missing default or recalibrated wavelength calibration. You must first run either sph_ifs_wave_calib or sph_ifs_wavelength_recalibration().') @@ -2567,16 +2585,16 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a # max images size if psf_dim > 290: - _log.warning('psf_dim cannot be larger than 290 pix. A value of 290 will be used.') + self._logger.warning('psf_dim cannot be larger than 290 pix. A value of 290 will be used.') psf_dim = 290 if science_dim > 290: - _log.warning('science_dim cannot be larger than 290 pix. A value of 290 will be used.') + self._logger.warning('science_dim cannot be larger than 290 pix. A value of 290 will be used.') science_dim = 290 # centering configuration if coarse_centering: - _log.warning('Images will be coarsely centered without any interpolation. Automatic settings for coarse centering: shift_method=\'roll\', cpix=True, correct_anamorphism=False, save_scaled=False') + self._logger.warning('Images will be coarsely centered without any interpolation. Automatic settings for coarse centering: shift_method=\'roll\', cpix=True, correct_anamorphism=False, save_scaled=False') shift_method = 'roll' cpix = True correct_anamorphism = False @@ -2591,7 +2609,7 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a if manual_center.shape == (2,): manual_center = np.full((nwave, 2), manual_center, dtype=np.float) - _log.warning('Images will be centered using the user-provided center ({},{})'.format(*manual_center[0])) + self._logger.warning('Images will be centered using the user-provided center ({},{})'.format(*manual_center[0])) # # OBJECT,FLUX @@ -2599,7 +2617,7 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a flux_files = frames_info[frames_info['DPR TYPE'] == 'OBJECT,FLUX'] nfiles = len(flux_files) if nfiles != 0: - _log.info(' * OBJECT,FLUX data') + self._logger.info(' * OBJECT,FLUX data') # final arrays psf_cube = np.zeros((nwave, nfiles, psf_dim, psf_dim)) @@ -2616,7 +2634,7 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a # read and combine files for file_idx, (file, idx) in enumerate(flux_files.index): - _log.info(' ==> file {0}/{1}: {2}, DIT={3}'.format(file_idx+1, len(flux_files), file, idx)) + self._logger.info(' ==> file {0}/{1}: {2}, DIT={3}'.format(file_idx+1, len(flux_files), file, idx)) # read data fname = '{0}_DIT{1:03d}_preproc_'.format(file, idx) @@ -2628,7 +2646,7 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a if cfile.exists(): centers = fits.getdata(cfile) else: - _log.warning('sph_ifs_star_center() has not been executed. Images will be centered using default center ({},{})'.format(*self._default_center)) + self._logger.warning('sph_ifs_star_center() has not been executed. Images will be centered using default center ({},{})'.format(*self._default_center)) centers = np.full((nwave, 2), self._default_center, dtype=np.float) # make sure we have only integers if user wants coarse centering @@ -2687,7 +2705,7 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a starcen_files = frames_info[frames_info['DPR TYPE'] == 'OBJECT,CENTER'] nfiles = len(starcen_files) if nfiles != 0: - _log.info(' * OBJECT,CENTER data') + self._logger.info(' * OBJECT,CENTER data') # final arrays cen_cube = np.zeros((nwave, nfiles, science_dim, science_dim)) @@ -2704,7 +2722,7 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a # read and combine files for file_idx, (file, idx) in enumerate(starcen_files.index): - _log.info(' ==> file {0}/{1}: {2}, DIT={3}'.format(file_idx+1, len(starcen_files), file, idx)) + self._logger.info(' ==> file {0}/{1}: {2}, DIT={3}'.format(file_idx+1, len(starcen_files), file, idx)) # read data fname = '{0}_DIT{1:03d}_preproc_'.format(file, idx) @@ -2774,7 +2792,7 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a object_files = frames_info[frames_info['DPR TYPE'] == 'OBJECT'] nfiles = len(object_files) if nfiles != 0: - _log.info(' * OBJECT data') + self._logger.info(' * OBJECT data') # use manual center if explicitely requested if manual_center is not None: @@ -2787,7 +2805,7 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a # select which CENTER to use starcen_files = frames_info[frames_info['DPR TYPE'] == 'OBJECT,CENTER'] if len(starcen_files) == 0: - _log.warning('No OBJECT,CENTER file in the dataset. Images will be centered using default center ({},{})'.format(*self._default_center)) + self._logger.warning('No OBJECT,CENTER file in the dataset. Images will be centered using default center ({},{})'.format(*self._default_center)) centers = np.full((nwave, 2), self._default_center, dtype=np.float) else: fname = '{0}_DIT{1:03d}_preproc_centers.fits'.format(starcen_files.index.values[0][0], starcen_files.index.values[0][1]) @@ -2796,7 +2814,7 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a if fpath.exists(): centers = fits.getdata(fpath) else: - _log.warning('sph_ifs_star_center() has not been executed. Images will be centered using default center ({},{})'.format(*self._default_center)) + self._logger.warning('sph_ifs_star_center() has not been executed. Images will be centered using default center ({},{})'.format(*self._default_center)) centers = np.full((nwave, 2), self._default_center, dtype=np.float) # make sure we have only integers if user wants coarse centering @@ -2818,7 +2836,7 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a # read and combine files for file_idx, (file, idx) in enumerate(object_files.index): - _log.info(' ==> file {0}/{1}: {2}, DIT={3}'.format(file_idx+1, len(object_files), file, idx)) + self._logger.info(' ==> file {0}/{1}: {2}, DIT={3}'.format(file_idx+1, len(object_files), file, idx)) # read data fname = '{0}_DIT{1:03d}_preproc_'.format(file, idx) diff --git a/vltpf/__init__.py b/vltpf/__init__.py index 6b5a711..c975ab7 100644 --- a/vltpf/__init__.py +++ b/vltpf/__init__.py @@ -5,7 +5,7 @@ import logging # define logging format for module -logging.basicConfig(format='[%(levelname)-7s] %(message)s') +logging.basicConfig(format='[%(name)s - %(levelname)-8s] %(message)s') _log = logging.getLogger(__name__) _log.setLevel(logging.DEBUG) _log.info('VLTPF init') From a94470117a167bb0ffa27dcc333df8b699a39d02 Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Sun, 8 Sep 2019 20:07:46 +0200 Subject: [PATCH 060/101] Implement per-reduction object logging in IFS Ticket #63 --- vltpf/IFS.py | 27 ++++++++++++++------------- vltpf/__init__.py | 2 +- vltpf/toolbox.py | 24 ++++++++++++------------ 3 files changed, 27 insertions(+), 26 deletions(-) diff --git a/vltpf/IFS.py b/vltpf/IFS.py index 09d00ce..651a915 100644 --- a/vltpf/IFS.py +++ b/vltpf/IFS.py @@ -24,8 +24,6 @@ import vltpf.transmission as transmission import vltpf.toolbox as toolbox -_log = logging.getLogger(__name__) - def compute_detector_flat(raw_flat_files, bpm_files=[], mask_vignetting=True): ''' @@ -400,7 +398,7 @@ def __init__(self, path, log_level='info'): logger.removeHandler(hdlr) handler = logging.FileHandler(self._path.products / 'reduction.log', mode='w', encoding='utf-8') - formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)8s - %(message)s') + formatter = logging.Formatter('%(asctime)s %(levelname)8s %(message)s') formatter.default_msec_format = '%s.%03d' handler.setFormatter(formatter) logger.addHandler(handler) @@ -1847,23 +1845,23 @@ def sph_ifs_preprocess_science(self, if collapse_center: self._logger.info(' ==> collapse: mean ({0} -> 1 frame, 0 dropped)'.format(len(img))) img = np.mean(img, axis=0, keepdims=True) - frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'mean') + frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'mean', logger=self._logger) else: - frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'none') + frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'none', logger=self._logger) elif (typ == 'OBJECT,FLUX'): if collapse_psf: self._logger.info(' ==> collapse: mean ({0} -> 1 frame, 0 dropped)'.format(len(img))) img = np.mean(img, axis=0, keepdims=True) - frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'mean') + frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'mean', logger=self._logger) else: - frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'none') + frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'none', logger=self._logger) elif (typ == 'OBJECT'): if collapse_science: if collapse_type == 'mean': self._logger.info(' ==> collapse: mean ({0} -> 1 frame, 0 dropped)'.format(len(img))) img = np.mean(img, axis=0, keepdims=True) - frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'mean') + frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'mean', logger=self._logger) elif collapse_type == 'coadd': if (not isinstance(coadd_value, int)) or (coadd_value <= 1): raise TypeError('coadd_value must be an integer >1') @@ -1884,11 +1882,11 @@ def sph_ifs_preprocess_science(self, nimg[f] = np.mean(img[f*coadd_value:(f+1)*coadd_value], axis=0) img = nimg - frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'coadd', coadd_value=coadd_value) + frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'coadd', coadd_value=coadd_value, logger=self._logger) else: raise ValueError('Unknown collapse type {0}'.format(collapse_type)) else: - frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'none') + frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'none', logger=self._logger) # merge collapse collapsed frames_info frames_info_preproc = pd.concat((frames_info_preproc, frames_info_new)) @@ -2235,7 +2233,8 @@ def sph_ifs_wavelength_recalibration(self, high_pass=False, offset=(0, 0), plot= spot_center, spot_dist, img_center \ = toolbox.star_centers_from_waffle_img_cube(cube, wave_drh, waffle_orientation, center_guess, pixel, orientation_offset, high_pass=high_pass, - center_offset=offset, coro=coro, save_path=save_path) + center_offset=offset, coro=coro, save_path=save_path, + logger=self._logger) # final scaling wave_scales = spot_dist / np.full((nwave, 6), spot_dist[0]) @@ -2423,7 +2422,8 @@ def sph_ifs_star_center(self, high_pass=False, offset=(0, 0), plot=True): save_path = path.products / '{}PSF_fitting.pdf'.format(fname) else: save_path = None - img_center = toolbox.star_centers_from_PSF_img_cube(cube, wave_drh, pixel, save_path=save_path) + img_center = toolbox.star_centers_from_PSF_img_cube(cube, wave_drh, pixel, + save_path=save_path, logger=self._logger) # save fits.writeto(path.preproc / '{}centers.fits'.format(fname), img_center, overwrite=True) @@ -2453,7 +2453,8 @@ def sph_ifs_star_center(self, high_pass=False, offset=(0, 0), plot=True): spot_center, spot_dist, img_center \ = toolbox.star_centers_from_waffle_img_cube(cube, wave_drh, waffle_orientation, center_guess, pixel, orientation_offset, high_pass=high_pass, - center_offset=offset, save_path=save_path) + center_offset=offset, save_path=save_path, + logger=self._logger) # save fits.writeto(path.preproc / '{}centers.fits'.format(fname), img_center, overwrite=True) diff --git a/vltpf/__init__.py b/vltpf/__init__.py index c975ab7..563cd92 100644 --- a/vltpf/__init__.py +++ b/vltpf/__init__.py @@ -5,7 +5,7 @@ import logging # define logging format for module -logging.basicConfig(format='[%(name)s - %(levelname)-8s] %(message)s') +logging.basicConfig(format='[%(levelname)8s] %(message)s') _log = logging.getLogger(__name__) _log.setLevel(logging.DEBUG) _log.info('VLTPF init') diff --git a/vltpf/toolbox.py b/vltpf/toolbox.py index baf8341..a5862c7 100644 --- a/vltpf/toolbox.py +++ b/vltpf/toolbox.py @@ -21,7 +21,7 @@ _log = logging.getLogger(__name__) -def check_recipe_execution(recipe_execution, recipe_name, recipe_requirements): +def check_recipe_execution(recipe_execution, recipe_name, recipe_requirements, logger=_log): ''' Check execution of previous recipes for a given recipe. @@ -273,7 +273,7 @@ def compute_bad_pixel_map(bpm_files, dtype=np.uint8): return bpm -def collapse_frames_info(finfo, fname, collapse_type, coadd_value=2): +def collapse_frames_info(finfo, fname, collapse_type, coadd_value=2, logger=_log): ''' Collapse frame info to match the collapse operated on the data @@ -299,7 +299,7 @@ def collapse_frames_info(finfo, fname, collapse_type, coadd_value=2): Collapsed data frame ''' - _log.info(' ==> collapse frames information') + logger.info(' ==> collapse frames information') nfinfo = None if collapse_type == 'none': @@ -400,7 +400,7 @@ def lines_intersect(a1, a2, b1, b2): return (num / denom)*db + b1 -def star_centers_from_PSF_img_cube(cube, wave, pixel, save_path=None): +def star_centers_from_PSF_img_cube(cube, wave, pixel, save_path=None, logger=_log): ''' Compute star center from PSF images (IRDIS CI, IRDIS DBI, IFS) @@ -440,7 +440,7 @@ def star_centers_from_PSF_img_cube(cube, wave, pixel, save_path=None): # loop over images img_centers = np.zeros((nwave, 2)) for idx, (wave, img) in enumerate(zip(wave, cube)): - _log.info(' wave {0:2d}/{1:2d} ({2:.0f} nm)'.format(idx+1, nwave, wave)) + logger.info(' wave {0:2d}/{1:2d} ({2:.0f} nm)'.format(idx+1, nwave, wave)) # remove any NaN img = np.nan_to_num(img) @@ -492,7 +492,7 @@ def star_centers_from_PSF_img_cube(cube, wave, pixel, save_path=None): return img_centers -def star_centers_from_PSF_lss_cube(cube, wave_cube, pixel, save_path=None): +def star_centers_from_PSF_lss_cube(cube, wave_cube, pixel, save_path=None, logger=_log): ''' Compute star center from PSF LSS spectra (IRDIS LSS) @@ -529,7 +529,7 @@ def star_centers_from_PSF_lss_cube(cube, wave_cube, pixel, save_path=None): nimg = len(cube) psf_centers = np.full((1024, nimg), np.nan) for fidx, img in enumerate(cube): - _log.info(' field {0:2d}/{1:2d}'.format(fidx+1, nimg)) + logger.info(' field {0:2d}/{1:2d}'.format(fidx+1, nimg)) # remove any NaN img = np.nan_to_num(cube[fidx]) @@ -595,7 +595,7 @@ def star_centers_from_PSF_lss_cube(cube, wave_cube, pixel, save_path=None): def star_centers_from_waffle_img_cube(cube_cen, wave, waffle_orientation, center_guess, pixel, orientation_offset, high_pass=False, center_offset=(0, 0), - smooth=0, coro=True, save_path=None): + smooth=0, coro=True, save_path=None, logger=_log): ''' Compute star center from waffle images (IRDIS CI, IRDIS DBI, IFS) @@ -678,7 +678,7 @@ def star_centers_from_waffle_img_cube(cube_cen, wave, waffle_orientation, center spot_dist = np.zeros((nwave, 6)) img_centers = np.zeros((nwave, 2)) for idx, (wave, img) in enumerate(zip(wave, cube_cen)): - _log.info(' wave {0:2d}/{1:2d} ({2:.0f} nm)'.format(idx+1, nwave, wave)) + logger.info(' wave {0:2d}/{1:2d} ({2:.0f} nm)'.format(idx+1, nwave, wave)) # remove any NaN img = np.nan_to_num(img) @@ -812,7 +812,7 @@ def star_centers_from_waffle_img_cube(cube_cen, wave, waffle_orientation, center def star_centers_from_waffle_lss_cube(cube_cen, cube_sci, wave_cube, center_guess, pixel, high_pass=False, - save_path=None): + save_path=None, logger=_log): ''' Compute star center from waffle LSS spectra (IRDIS LSS) @@ -866,14 +866,14 @@ def star_centers_from_waffle_lss_cube(cube_cen, cube_sci, wave_cube, center_gues # subtract science cube if provided if cube_sci is not None: - _log.info(' ==> subtract science cube') + logger.info(' ==> subtract science cube') cube_cen -= cube_sci spot_centers = np.full((1024, 2, 2), np.nan) spot_dist = np.full((1024, nimg), np.nan) img_centers = np.full((1024, nimg), np.nan) for fidx, img in enumerate(cube_cen): - _log.info(' field {0:2d}/{1:2d}'.format(fidx+1, nimg)) + logger.info(' field {0:2d}/{1:2d}'.format(fidx+1, nimg)) # remove any NaN img = np.nan_to_num(cube_cen[fidx]) From 5a3568861294f62be29e09ff75ee0b0f4b3a225a Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Mon, 9 Sep 2019 10:08:59 +0200 Subject: [PATCH 061/101] Implement per-reduction object logging in IRDIS imaging Ticket #63 --- vltpf/IRDIS/ImagingReduction.py | 153 ++++++++++++++++++-------------- 1 file changed, 85 insertions(+), 68 deletions(-) diff --git a/vltpf/IRDIS/ImagingReduction.py b/vltpf/IRDIS/ImagingReduction.py index 2b2ab61..daa3281 100644 --- a/vltpf/IRDIS/ImagingReduction.py +++ b/vltpf/IRDIS/ImagingReduction.py @@ -19,8 +19,6 @@ import vltpf.transmission as transmission import vltpf.toolbox as toolbox -_log = logging.getLogger(__name__) - class ImagingReduction(object): ''' @@ -50,7 +48,7 @@ class ImagingReduction(object): # Constructor ################################################## - def __init__(self, path): + def __init__(self, path, log_level='info'): '''Initialization of the ImagingReduction instances Parameters @@ -58,6 +56,8 @@ def __init__(self, path): path : str Path to the directory containing the dataset + level : {'debug', 'info', 'warning', 'error', 'critical'} + The log level of the handler ''' # expand path @@ -75,6 +75,21 @@ def __init__(self, path): # instrument mode self._mode = 'Unknown' + # configure logging + logger = logging.getLogger(str(path)) + logger.setLevel(log_level.upper()) + if logger.hasHandlers(): + for hdlr in logger.handlers: + logger.removeHandler(hdlr) + + handler = logging.FileHandler(self._path.products / 'reduction.log', mode='w', encoding='utf-8') + formatter = logging.Formatter('%(asctime)s %(levelname)8s %(message)s') + formatter.default_msec_format = '%s.%03d' + handler.setFormatter(formatter) + logger.addHandler(handler) + + self._logger = logger + # configuration configfile = Path(vltpf.__file__).parent / 'instruments' / '{}.ini'.format(self._instrument) config = configparser.ConfigParser() @@ -416,7 +431,7 @@ def sort_files(self): Data frame with the information on raw files ''' - _log.info('Sorting raw files') + self._logger.info('Sorting raw files') # parameters path = self._path @@ -428,7 +443,7 @@ def sort_files(self): if len(files) == 0: raise ValueError('No raw FITS files in reduction path') - _log.info(' * found {0} FITS files in {1}'.format(len(files), path.raw)) + self._logger.info(' * found {0} FITS files in {1}'.format(len(files), path.raw)) # read list of keywords keywords = [] @@ -500,7 +515,7 @@ def sort_frames(self): A data frame with the information on all frames ''' - _log.info('Extracting frames information') + self._logger.info('Extracting frames information') # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sort_frames', self.recipe_requirements) @@ -573,19 +588,19 @@ def sort_frames(self): date = str(cinfo['DATE'][0])[0:10] - _log.info(' * Object: {0}'.format(cinfo['OBJECT'][0])) - _log.info(' * RA / DEC: {0} / {1}'.format(RA, DEC)) - _log.info(' * Date: {0}'.format(date)) - _log.info(' * Instrument: {0}'.format(cinfo['SEQ ARM'][0])) - _log.info(' * Derotator: {0}'.format(cinfo['INS4 DROT2 MODE'][0])) - _log.info(' * Coronagraph: {0}'.format(cinfo['INS COMB ICOR'][0])) - _log.info(' * Mode: {0}'.format(cinfo['INS1 MODE'][0])) - _log.info(' * Filter: {0}'.format(cinfo['INS COMB IFLT'][0])) - _log.info(' * DIT: {0:.2f} sec'.format(cinfo['DET SEQ1 DIT'][0])) - _log.info(' * NDIT: {0:.0f}'.format(cinfo['DET NDIT'][0])) - _log.info(' * Texp: {0:.2f} min'.format(cinfo['DET SEQ1 DIT'].sum()/60)) - _log.info(' * PA: {0:.2f}° ==> {1:.2f}° = {2:.2f}°'.format(pa_start, pa_end, np.abs(pa_end-pa_start))) - _log.info(' * POSANG: {0}'.format(', '.join(['{:.2f}°'.format(p) for p in posang]))) + self._logger.info(' * Object: {0}'.format(cinfo['OBJECT'][0])) + self._logger.info(' * RA / DEC: {0} / {1}'.format(RA, DEC)) + self._logger.info(' * Date: {0}'.format(date)) + self._logger.info(' * Instrument: {0}'.format(cinfo['SEQ ARM'][0])) + self._logger.info(' * Derotator: {0}'.format(cinfo['INS4 DROT2 MODE'][0])) + self._logger.info(' * Coronagraph: {0}'.format(cinfo['INS COMB ICOR'][0])) + self._logger.info(' * Mode: {0}'.format(cinfo['INS1 MODE'][0])) + self._logger.info(' * Filter: {0}'.format(cinfo['INS COMB IFLT'][0])) + self._logger.info(' * DIT: {0:.2f} sec'.format(cinfo['DET SEQ1 DIT'][0])) + self._logger.info(' * NDIT: {0:.0f}'.format(cinfo['DET NDIT'][0])) + self._logger.info(' * Texp: {0:.2f} min'.format(cinfo['DET SEQ1 DIT'].sum()/60)) + self._logger.info(' * PA: {0:.2f}° ==> {1:.2f}° = {2:.2f}°'.format(pa_start, pa_end, np.abs(pa_end-pa_start))) + self._logger.info(' * POSANG: {0}'.format(', '.join(['{:.2f}°'.format(p) for p in posang]))) def check_files_association(self): @@ -599,7 +614,7 @@ def check_files_association(self): # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'check_files_association', self.recipe_requirements) - _log.info('Performing file association for calibrations') + self._logger.info('Performing file association for calibrations') # parameters files_info = self._files_info @@ -634,7 +649,7 @@ def check_files_association(self): cfiles = calibs[(calibs['DPR TYPE'] == 'FLAT,LAMP') & (calibs['INS COMB IFLT'] == filter_comb)] if len(cfiles) <= 1: error_flag += 1 - _log.error(' * there should be more than 1 flat in filter combination {0}'.format(filter_comb)) + self._logger.error(' * there should be more than 1 flat in filter combination {0}'.format(filter_comb)) ################################################## # static calibrations that depend on science DIT @@ -650,16 +665,16 @@ def check_files_association(self): (calibs['DET SEQ1 DIT'].round(2) == DIT)] if len(cfiles) == 0: warning_flag += 1 - _log.warning(' * there is no dark/background for science files with DIT={0} sec. It is *highly recommended* to include one to obtain the best data reduction. A single dark/background file is sufficient, and it can easily be downloaded from the ESO archive'.format(DIT)) + self._logger.warning(' * there is no dark/background for science files with DIT={0} sec. It is *highly recommended* to include one to obtain the best data reduction. A single dark/background file is sufficient, and it can easily be downloaded from the ESO archive'.format(DIT)) # sky backgrounds cfiles = files_info[(files_info['DPR TYPE'] == 'SKY') & (files_info['DET SEQ1 DIT'].round(2) == DIT)] if len(cfiles) == 0: warning_flag += 1 - _log.warning(' * there is no sky background for science files with DIT={0} sec. Using a sky background instead of an internal instrumental background can usually provide a cleaner data reduction, especially in K-band'.format(DIT)) + self._logger.warning(' * there is no sky background for science files with DIT={0} sec. Using a sky background instead of an internal instrumental background can usually provide a cleaner data reduction, especially in K-band'.format(DIT)) # error reporting - _log.info('There are {0} warning(s) and {1} error(s) in the classification of files'.format(warning_flag, error_flag)) + self._logger.info('There are {0} warning(s) and {1} error(s) in the classification of files'.format(warning_flag, error_flag)) if error_flag: raise ValueError('There is {0} errors that should be solved before proceeding'.format(error_flag)) @@ -677,7 +692,7 @@ def sph_ird_cal_dark(self, silent=True): # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_cal_dark', self.recipe_requirements) - _log.info('Creating darks and backgrounds') + self._logger.info('Creating darks and backgrounds') # parameters path = self._path @@ -705,7 +720,7 @@ def sph_ird_cal_dark(self, silent=True): if len(cfiles) == 0: continue - _log.info(' * {0} in filter {1} with DIT={2:.2f} sec ({3} files)'.format(ctype, cfilt, DIT, len(cfiles))) + self._logger.info(' * {0} in filter {1} with DIT={2:.2f} sec ({3} files)'.format(ctype, cfilt, DIT, len(cfiles))) # create sof sof = path.sof / 'dark_filt={0}_DIT={1:.2f}.sof'.format(cfilt, DIT) @@ -794,7 +809,7 @@ def sph_ird_cal_detector_flat(self, silent=True): # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_cal_detector_flat', self.recipe_requirements) - _log.info('Creating flats') + self._logger.info('Creating flats') # parameters path = self._path @@ -810,7 +825,7 @@ def sph_ird_cal_detector_flat(self, silent=True): cfiles = calibs[calibs['INS COMB IFLT'] == cfilt] files = cfiles.index - _log.info(' * filter {0} ({1} files)'.format(cfilt, len(cfiles))) + self._logger.info(' * filter {0} ({1} files)'.format(cfilt, len(cfiles))) # create sof sof = path.sof / 'flat_filt={0}.sof'.format(cfilt) @@ -931,7 +946,7 @@ def sph_ird_preprocess_science(self, # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_preprocess_science', self.recipe_requirements) - _log.info('Pre-processing science files') + self._logger.info('Pre-processing science files') # parameters path = self._path @@ -986,7 +1001,7 @@ def sph_ird_preprocess_science(self, for DIT in sci_DITs: sfiles = sci_files[sci_files['DET SEQ1 DIT'].round(2) == DIT] - _log.info('{0} files of type {1} with DIT={2} sec'.format(len(sfiles), typ, DIT)) + self._logger.info('{0} files of type {1} with DIT={2} sec'.format(len(sfiles), typ, DIT)) if subtract_background: # look for sky, then background, then darks @@ -997,11 +1012,11 @@ def sph_ird_preprocess_science(self, (files_info['DPR TYPE'] == d) & (files_info['DET SEQ1 DIT'].round(2) == DIT)] if len(dfiles) != 0: break - _log.info(' ==> found {0} corresponding {1} file'.format(len(dfiles), d)) + self._logger.info(' ==> found {0} corresponding {1} file'.format(len(dfiles), d)) if len(dfiles) == 0: # issue a warning if absolutely no background is found - _log.warning('No background has been found. Pre-processing will continue but data quality will likely be affected') + self._logger.warning('No background has been found. Pre-processing will continue but data quality will likely be affected') bkg = np.zeros((1024, 2048)) elif len(dfiles) == 1: bkg = fits.getdata(path.calib / '{}.fits'.format(dfiles.index[0])) @@ -1014,10 +1029,10 @@ def sph_ird_preprocess_science(self, # frames_info extract finfo = frames_info.loc[(fname, slice(None)), :] - _log.info(' * file {0}/{1}: {2}, NDIT={3}'.format(idx+1, len(sfiles), fname, len(finfo))) + self._logger.info(' * file {0}/{1}: {2}, NDIT={3}'.format(idx+1, len(sfiles), fname, len(finfo))) # read data - _log.info(' ==> read data') + self._logger.info(' ==> read data') img, hdr = fits.getdata(path.raw / '{}.fits'.format(fname), header=True) # add extra dimension to single images to make cubes @@ -1034,25 +1049,25 @@ def sph_ird_preprocess_science(self, # collapse if (typ == 'OBJECT,CENTER'): if collapse_center: - _log.info(' ==> collapse: mean') + self._logger.info(' ==> collapse: mean') img = np.mean(img, axis=0, keepdims=True) - frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'mean') + frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'mean', logger=self._logger) else: - frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'none') + frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'none', logger=self._logger) elif (typ == 'OBJECT,FLUX'): if collapse_psf: - _log.info(' ==> collapse: mean') + self._logger.info(' ==> collapse: mean') img = np.mean(img, axis=0, keepdims=True) - frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'mean') + frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'mean', logger=self._logger) else: - frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'none') + frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'none', logger=self._logger) elif (typ == 'OBJECT'): if collapse_science: if collapse_type == 'mean': - _log.info(' ==> collapse: mean ({0} -> 1 frame, 0 dropped)'.format(len(img))) + self._logger.info(' ==> collapse: mean ({0} -> 1 frame, 0 dropped)'.format(len(img))) img = np.mean(img, axis=0, keepdims=True) - frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'mean') + frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'mean', logger=self._logger) elif collapse_type == 'coadd': if (not isinstance(coadd_value, int)) or (coadd_value <= 1): raise TypeError('coadd_value must be an integer >1') @@ -1065,7 +1080,7 @@ def sph_ird_preprocess_science(self, if coadd_value > NDIT: raise ValueError('coadd_value ({0}) must be < NDIT ({1})'.format(coadd_value, NDIT)) - _log.info(' ==> collapse: coadd by {0} ({1} -> {2} frames, {3} dropped)'.format(coadd_value, NDIT, NDIT_new, dropped)) + self._logger.info(' ==> collapse: coadd by {0} ({1} -> {2} frames, {3} dropped)'.format(coadd_value, NDIT, NDIT_new, dropped)) # coadd frames nimg = np.empty((NDIT_new, 1024, 2048), dtype=img.dtype) @@ -1073,29 +1088,29 @@ def sph_ird_preprocess_science(self, nimg[f] = np.mean(img[f*coadd_value:(f+1)*coadd_value], axis=0) img = nimg - frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'coadd', coadd_value=coadd_value) + frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'coadd', coadd_value=coadd_value, logger=self._logger) else: raise ValueError('Unknown collapse type {0}'.format(collapse_type)) else: - frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'none') + frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'none', logger=self._logger) frames_info_preproc = pd.concat((frames_info_preproc, frames_info_new)) # background subtraction if subtract_background: - _log.info(' ==> subtract background') + self._logger.info(' ==> subtract background') for f in range(len(img)): img[f] -= bkg # divide flat if subtract_background: - _log.info(' ==> divide by flat field') + self._logger.info(' ==> divide by flat field') for f in range(len(img)): img[f] /= flat # bad pixels correction if fix_badpix: - _log.info(' ==> correct bad pixels') + self._logger.info(' ==> correct bad pixels') for f in range(len(img)): frame = img[f] frame = imutils.fix_badpix(frame, bpm, npix=12, weight=True) @@ -1108,7 +1123,7 @@ def sph_ird_preprocess_science(self, img[f] = frame # reshape data - _log.info(' ==> reshape data') + self._logger.info(' ==> reshape data') NDIT = img.shape[0] nimg = np.zeros((NDIT, 2, 1024, 1024)) for f in range(len(img)): @@ -1156,7 +1171,7 @@ def sph_ird_star_center(self, high_pass=False, offset=(0, 0), plot=True): # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_star_center', self.recipe_requirements) - _log.info('Star centers determination') + self._logger.info('Star centers determination') # parameters path = self._path @@ -1174,7 +1189,7 @@ def sph_ird_star_center(self, high_pass=False, offset=(0, 0), plot=True): flux_files = frames_info[frames_info['DPR TYPE'] == 'OBJECT,FLUX'] if len(flux_files) != 0: for file, idx in flux_files.index: - _log.info(' ==> OBJECT,FLUX: {0}'.format(file)) + self._logger.info(' ==> OBJECT,FLUX: {0}'.format(file)) # read data fname = '{0}_DIT{1:03d}_preproc'.format(file, idx) @@ -1185,7 +1200,8 @@ def sph_ird_star_center(self, high_pass=False, offset=(0, 0), plot=True): save_path = path.products / '{}_PSF_fitting.pdf'.format(fname) else: save_path = None - img_center = toolbox.star_centers_from_PSF_img_cube(cube, wave, pixel, save_path=save_path) + img_center = toolbox.star_centers_from_PSF_img_cube(cube, wave, pixel, + save_path=save_path, logger=self._logger) # save fits.writeto(path.preproc / '{}_centers.fits'.format(fname), img_center, overwrite=True) @@ -1194,7 +1210,7 @@ def sph_ird_star_center(self, high_pass=False, offset=(0, 0), plot=True): starcen_files = frames_info[frames_info['DPR TYPE'] == 'OBJECT,CENTER'] if len(starcen_files) != 0: for file, idx in starcen_files.index: - _log.info(' ==> OBJECT,CENTER: {0}'.format(file)) + self._logger.info(' ==> OBJECT,CENTER: {0}'.format(file)) # read data fname = '{0}_DIT{1:03d}_preproc'.format(file, idx) @@ -1216,7 +1232,8 @@ def sph_ird_star_center(self, high_pass=False, offset=(0, 0), plot=True): spot_center, spot_dist, img_center \ = toolbox.star_centers_from_waffle_img_cube(cube, wave, waffle_orientation, center_guess, pixel, orientation_offset, high_pass=high_pass, - center_offset=offset, coro=coro, save_path=save_path) + center_offset=offset, coro=coro, save_path=save_path, + logger=self._logger) # save fits.writeto(path.preproc / '{}_centers.fits'.format(fname), img_center, overwrite=True) @@ -1326,7 +1343,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_combine_data', self.recipe_requirements) - _log.info('Combine science data') + self._logger.info('Combine science data') # parameters path = self._path @@ -1342,16 +1359,16 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a # max images size if psf_dim > 1024: - _log.warning('psf_dim cannot be larger than 1024 pix. A value of 1024 will be used.') + self._logger.warning('psf_dim cannot be larger than 1024 pix. A value of 1024 will be used.') psf_dim = 1024 if science_dim > 1024: - _log.warning('science_dim cannot be larger than 1024 pix. A value of 1024 will be used.') + self._logger.warning('science_dim cannot be larger than 1024 pix. A value of 1024 will be used.') science_dim = 1024 # centering configuration if coarse_centering: - _log.warning('Images will be coarsely centered without any interpolation. Automatic settings for coarse centering: shift_method=\'roll\', cpix=True, correct_anamorphism=False, save_scaled=False') + self._logger.warning('Images will be coarsely centered without any interpolation. Automatic settings for coarse centering: shift_method=\'roll\', cpix=True, correct_anamorphism=False, save_scaled=False') shift_method = 'roll' cpix = True correct_anamorphism = False @@ -1366,7 +1383,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a if manual_center.shape == (2,): manual_center = np.full((nwave, 2), manual_center, dtype=np.float) - _log.warning('Images will be centered using the user-provided center ({},{})'.format(*manual_center[0])) + self._logger.warning('Images will be centered using the user-provided center ({},{})'.format(*manual_center[0])) # # OBJECT,FLUX @@ -1374,7 +1391,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a flux_files = frames_info[frames_info['DPR TYPE'] == 'OBJECT,FLUX'] nfiles = len(flux_files) if nfiles != 0: - _log.info(' * OBJECT,FLUX data') + self._logger.info(' * OBJECT,FLUX data') # final arrays psf_cube = np.zeros((nwave, nfiles, psf_dim, psf_dim)) @@ -1391,7 +1408,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a # read and combine files for file_idx, (file, idx) in enumerate(flux_files.index): - _log.info(' ==> file {0}/{1}: {2}, DIT={3}'.format(file_idx+1, len(flux_files), file, idx)) + self._logger.info(' ==> file {0}/{1}: {2}, DIT={3}'.format(file_idx+1, len(flux_files), file, idx)) # read data fname = '{0}_DIT{1:03d}_preproc'.format(file, idx) @@ -1401,7 +1418,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a if cfile.exists(): centers = fits.getdata(cfile) else: - _log.warning('sph_ird_star_center() has not been executed. Images will be centered using default center ({},{})'.format(*self._default_center)) + self._logger.warning('sph_ird_star_center() has not been executed. Images will be centered using default center ({},{})'.format(*self._default_center)) centers = self._default_center # make sure we have only integers if user wants coarse centering @@ -1457,7 +1474,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a starcen_files = frames_info[frames_info['DPR TYPE'] == 'OBJECT,CENTER'] nfiles = len(starcen_files) if nfiles != 0: - _log.info(' * OBJECT,CENTER data') + self._logger.info(' * OBJECT,CENTER data') # final arrays cen_cube = np.zeros((nwave, nfiles, science_dim, science_dim)) @@ -1474,7 +1491,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a # read and combine files for file_idx, (file, idx) in enumerate(starcen_files.index): - _log.info(' ==> file {0}/{1}: {2}, DIT={3}'.format(file_idx+1, len(starcen_files), file, idx)) + self._logger.info(' ==> file {0}/{1}: {2}, DIT={3}'.format(file_idx+1, len(starcen_files), file, idx)) # read data fname = '{0}_DIT{1:03d}_preproc'.format(file, idx) @@ -1540,7 +1557,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a object_files = frames_info[frames_info['DPR TYPE'] == 'OBJECT'] nfiles = len(object_files) if nfiles != 0: - _log.info(' * OBJECT data') + self._logger.info(' * OBJECT data') # null value for Dithering Motion Stage by default dms_dx_ref = 0 @@ -1557,7 +1574,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a # select which CENTER to use starcen_files = frames_info[frames_info['DPR TYPE'] == 'OBJECT,CENTER'] if len(starcen_files) == 0: - _log.warning('No OBJECT,CENTER file in the dataset. Images will be centered using default center ({},{})'.format(*self._default_center)) + self._logger.warning('No OBJECT,CENTER file in the dataset. Images will be centered using default center ({},{})'.format(*self._default_center)) centers = self._default_center else: fname = '{0}_DIT{1:03d}_preproc_centers.fits'.format(starcen_files.index.values[0][0], starcen_files.index.values[0][1]) @@ -1570,7 +1587,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a dms_dx_ref = starcen_files['INS1 PAC X'][0] / 18 dms_dy_ref = starcen_files['INS1 PAC Y'][0] / 18 else: - _log.warning('sph_ird_star_center() has not been executed. Images will be centered using default center ({},{})'.format(*self._default_center)) + self._logger.warning('sph_ird_star_center() has not been executed. Images will be centered using default center ({},{})'.format(*self._default_center)) centers = self._default_center # make sure we have only integers if user wants coarse centering @@ -1594,7 +1611,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a # read and combine files for file_idx, (file, idx) in enumerate(object_files.index): - _log.info(' ==> file {0}/{1}: {2}, DIT={3}'.format(file_idx+1, len(object_files), file, idx)) + self._logger.info(' ==> file {0}/{1}: {2}, DIT={3}'.format(file_idx+1, len(object_files), file, idx)) # read data fname = '{0}_DIT{1:03d}_preproc'.format(file, idx) From 9d283b4766e0676bc3b1b609dd102865f98c5950 Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Mon, 9 Sep 2019 10:15:48 +0200 Subject: [PATCH 062/101] Implement per-reduction object logging in IRDIS spectro Ticket #63 --- vltpf/IRDIS/SpectroReduction.py | 163 ++++++++++++++++++-------------- 1 file changed, 91 insertions(+), 72 deletions(-) diff --git a/vltpf/IRDIS/SpectroReduction.py b/vltpf/IRDIS/SpectroReduction.py index 9937d6c..dcc656a 100644 --- a/vltpf/IRDIS/SpectroReduction.py +++ b/vltpf/IRDIS/SpectroReduction.py @@ -94,7 +94,7 @@ class SpectroReduction(object): # Constructor ################################################## - def __init__(self, path): + def __init__(self, path, log_level='info'): '''Initialization of the SpectroReduction instances Parameters @@ -102,6 +102,8 @@ def __init__(self, path): path : str Path to the directory containing the raw data + level : {'debug', 'info', 'warning', 'error', 'critical'} + The log level of the handler ''' # expand path @@ -119,6 +121,21 @@ def __init__(self, path): # instrument mode self._mode = 'Unknown' + # configure logging + logger = logging.getLogger(str(path)) + logger.setLevel(log_level.upper()) + if logger.hasHandlers(): + for hdlr in logger.handlers: + logger.removeHandler(hdlr) + + handler = logging.FileHandler(self._path.products / 'reduction.log', mode='w', encoding='utf-8') + formatter = logging.Formatter('%(asctime)s %(levelname)8s %(message)s') + formatter.default_msec_format = '%s.%03d' + handler.setFormatter(formatter) + logger.addHandler(handler) + + self._logger = logger + # configuration configfile = Path(vltpf.__file__).parent / 'instruments' / '{}.ini'.format(self._instrument) config = configparser.ConfigParser() @@ -480,7 +497,7 @@ def sort_files(self): Data frame with the information on raw files ''' - _log.info('Sorting raw files') + self._logger.info('Sorting raw files') # parameters path = self._path @@ -492,7 +509,7 @@ def sort_files(self): if len(files) == 0: raise ValueError('No raw FITS files in reduction path') - _log.info(' * found {0} FITS files in {1}'.format(len(files), path.raw)) + self._logger.info(' * found {0} FITS files in {1}'.format(len(files), path.raw)) # read list of keywords keywords = [] @@ -564,7 +581,7 @@ def sort_frames(self): A data frame with the information on all frames ''' - _log.info('Extracting frames information') + self._logger.info('Extracting frames information') # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sort_frames', self.recipe_requirements) @@ -637,19 +654,19 @@ def sort_frames(self): date = str(cinfo['DATE'][0])[0:10] - _log.info(' * Object: {0}'.format(cinfo['OBJECT'][0])) - _log.info(' * RA / DEC: {0} / {1}'.format(RA, DEC)) - _log.info(' * Date: {0}'.format(date)) - _log.info(' * Instrument: {0}'.format(cinfo['SEQ ARM'][0])) - _log.info(' * Derotator: {0}'.format(cinfo['INS4 DROT2 MODE'][0])) - _log.info(' * Coronagraph: {0}'.format(cinfo['INS COMB ICOR'][0])) - _log.info(' * Mode: {0}'.format(cinfo['INS1 MODE'][0])) - _log.info(' * Filter: {0}'.format(cinfo['INS COMB IFLT'][0])) - _log.info(' * DIT: {0:.2f} sec'.format(cinfo['DET SEQ1 DIT'][0])) - _log.info(' * NDIT: {0:.0f}'.format(cinfo['DET NDIT'][0])) - _log.info(' * Texp: {0:.2f} min'.format(cinfo['DET SEQ1 DIT'].sum()/60)) - _log.info(' * PA: {0:.2f}° ==> {1:.2f}° = {2:.2f}°'.format(pa_start, pa_end, np.abs(pa_end-pa_start))) - _log.info(' * POSANG: {0}'.format(', '.join(['{:.2f}°'.format(p) for p in posang]))) + self._logger.info(' * Object: {0}'.format(cinfo['OBJECT'][0])) + self._logger.info(' * RA / DEC: {0} / {1}'.format(RA, DEC)) + self._logger.info(' * Date: {0}'.format(date)) + self._logger.info(' * Instrument: {0}'.format(cinfo['SEQ ARM'][0])) + self._logger.info(' * Derotator: {0}'.format(cinfo['INS4 DROT2 MODE'][0])) + self._logger.info(' * Coronagraph: {0}'.format(cinfo['INS COMB ICOR'][0])) + self._logger.info(' * Mode: {0}'.format(cinfo['INS1 MODE'][0])) + self._logger.info(' * Filter: {0}'.format(cinfo['INS COMB IFLT'][0])) + self._logger.info(' * DIT: {0:.2f} sec'.format(cinfo['DET SEQ1 DIT'][0])) + self._logger.info(' * NDIT: {0:.0f}'.format(cinfo['DET NDIT'][0])) + self._logger.info(' * Texp: {0:.2f} min'.format(cinfo['DET SEQ1 DIT'].sum()/60)) + self._logger.info(' * PA: {0:.2f}° ==> {1:.2f}° = {2:.2f}°'.format(pa_start, pa_end, np.abs(pa_end-pa_start))) + self._logger.info(' * POSANG: {0}'.format(', '.join(['{:.2f}°'.format(p) for p in posang]))) def check_files_association(self): @@ -663,7 +680,7 @@ def check_files_association(self): # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'check_files_association', self.recipe_requirements) - _log.info('Performing file association for calibrations') + self._logger.info('Performing file association for calibrations') # parameters path = self._path @@ -701,16 +718,16 @@ def check_files_association(self): cfiles = calibs[(calibs['DPR TYPE'] == 'FLAT,LAMP') & (calibs['INS COMB IFLT'] == filter_comb)] if len(cfiles) <= 1: error_flag += 1 - _log.error(' * there should be more than 1 flat in filter combination {0}'.format(filter_comb)) + self._logger.error(' * there should be more than 1 flat in filter combination {0}'.format(filter_comb)) # wave cfiles = calibs[(calibs['DPR TYPE'] == 'LAMP,WAVE') & (calibs['INS COMB IFLT'] == filter_comb)] if len(cfiles) == 0: error_flag += 1 - _log.error(' * there should be 1 wavelength calibration file, found none.') + self._logger.error(' * there should be 1 wavelength calibration file, found none.') elif len(cfiles) > 1: warning_flag += 1 - _log.warning(' * there should be 1 wavelength calibration file, found {0}. Using the closest from science.'.format(len(cfiles))) + self._logger.warning(' * there should be 1 wavelength calibration file, found {0}. Using the closest from science.'.format(len(cfiles))) # find the two closest to science files sci_files = files_info[(files_info['DPR CATG'] == 'SCIENCE')] @@ -735,16 +752,16 @@ def check_files_association(self): (calibs['DET SEQ1 DIT'].round(2) == DIT)] if len(cfiles) == 0: warning_flag += 1 - _log.warning(' * there is no dark/background for science files with DIT={0} sec. It is *highly recommended* to include one to obtain the best data reduction. A single dark/background file is sufficient, and it can easily be downloaded from the ESO archive'.format(DIT)) + self._logger.warning(' * there is no dark/background for science files with DIT={0} sec. It is *highly recommended* to include one to obtain the best data reduction. A single dark/background file is sufficient, and it can easily be downloaded from the ESO archive'.format(DIT)) # sky backgrounds cfiles = files_info[(files_info['DPR TYPE'] == 'SKY') & (files_info['DET SEQ1 DIT'].round(2) == DIT)] if len(cfiles) == 0: warning_flag += 1 - _log.warning(' * there is no sky background for science files with DIT={0} sec. Using a sky background instead of an internal instrumental background can usually provide a cleaner data reduction'.format(DIT)) + self._logger.warning(' * there is no sky background for science files with DIT={0} sec. Using a sky background instead of an internal instrumental background can usually provide a cleaner data reduction'.format(DIT)) # error reporting - _log.info('There are {0} warning(s) and {1} error(s) in the classification of files'.format(warning_flag, error_flag)) + self._logger.info('There are {0} warning(s) and {1} error(s) in the classification of files'.format(warning_flag, error_flag)) if error_flag: raise ValueError('There is {0} errors that should be solved before proceeding'.format(error_flag)) @@ -766,7 +783,7 @@ def sph_ird_cal_dark(self, silent=True): # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_cal_dark', self.recipe_requirements) - _log.info('Creating darks and backgrounds') + self._logger.info('Creating darks and backgrounds') # parameters path = self._path @@ -794,7 +811,7 @@ def sph_ird_cal_dark(self, silent=True): if len(cfiles) == 0: continue - _log.info(' * {0} in filter {1} with DIT={2:.2f} sec ({3} files)'.format(ctype, cfilt, DIT, len(cfiles))) + self._logger.info(' * {0} in filter {1} with DIT={2:.2f} sec ({3} files)'.format(ctype, cfilt, DIT, len(cfiles))) # create sof sof = path.sof / 'dark_filt={0}_DIT={1:.2f}.sof'.format(cfilt, DIT) @@ -883,7 +900,7 @@ def sph_ird_cal_detector_flat(self, silent=True): # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_cal_detector_flat', self.recipe_requirements) - _log.info('Creating flats') + self._logger.info('Creating flats') # parameters path = self._path @@ -898,7 +915,7 @@ def sph_ird_cal_detector_flat(self, silent=True): cfiles = calibs[calibs['INS COMB IFLT'] == cfilt] files = cfiles.index - _log.info(' * filter {0} ({1} files)'.format(cfilt, len(cfiles))) + self._logger.info(' * filter {0} ({1} files)'.format(cfilt, len(cfiles))) # create sof sof = path.sof / 'flat_filt={0}.sof'.format(cfilt) @@ -976,7 +993,7 @@ def sph_ird_wave_calib(self, silent=True): # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_wave_calib', self.recipe_requirements) - _log.info('Creating wavelength calibration') + self._logger.info('Creating wavelength calibration') # parameters path = self._path @@ -1157,7 +1174,7 @@ def sph_ird_preprocess_science(self, # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_preprocess_science', self.recipe_requirements) - _log.info('Pre-processing science files') + self._logger.info('Pre-processing science files') # parameters path = self._path @@ -1212,7 +1229,7 @@ def sph_ird_preprocess_science(self, for DIT in sci_DITs: sfiles = sci_files[sci_files['DET SEQ1 DIT'].round(2) == DIT] - _log.info('{0} files of type {1} with DIT={2} sec'.format(len(sfiles), typ, DIT)) + self._logger.info('{0} files of type {1} with DIT={2} sec'.format(len(sfiles), typ, DIT)) if subtract_background: # look for sky, then background, then darks @@ -1223,11 +1240,11 @@ def sph_ird_preprocess_science(self, (files_info['DPR TYPE'] == d) & (files_info['DET SEQ1 DIT'].round(2) == DIT)] if len(dfiles) != 0: break - _log.info(' ==> found {0} corresponding {1} file'.format(len(dfiles), d)) + self._logger.info(' ==> found {0} corresponding {1} file'.format(len(dfiles), d)) if len(dfiles) == 0: # issue a warning if absolutely no background is found - _log.warning('No background has been found. Pre-processing will continue but data quality will likely be affected') + self._logger.warning('No background has been found. Pre-processing will continue but data quality will likely be affected') bkg = np.zeros((1024, 2048)) elif len(dfiles) == 1: bkg = fits.getdata(path.calib / '{}.fits'.format(dfiles.index[0])) @@ -1240,10 +1257,10 @@ def sph_ird_preprocess_science(self, # frames_info extract finfo = frames_info.loc[(fname, slice(None)), :] - _log.info(' * file {0}/{1}: {2}, NDIT={3}'.format(idx+1, len(sfiles), fname, len(finfo))) + self._logger.info(' * file {0}/{1}: {2}, NDIT={3}'.format(idx+1, len(sfiles), fname, len(finfo))) # read data - _log.info(' ==> read data') + self._logger.info(' ==> read data') img, hdr = fits.getdata(path.raw / '{}.fits'.format(fname), header=True) # add extra dimension to single images to make cubes @@ -1260,44 +1277,44 @@ def sph_ird_preprocess_science(self, # collapse if (typ == 'OBJECT,CENTER'): if collapse_center: - _log.info(' ==> collapse: mean') + self._logger.info(' ==> collapse: mean') img = np.mean(img, axis=0, keepdims=True) - frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'mean') + frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'mean', logger=self._logger) else: - frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'none') + frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'none', logger=self._logger) elif (typ == 'OBJECT,FLUX'): if collapse_psf: - _log.info(' ==> collapse: mean') + self._logger.info(' ==> collapse: mean') img = np.mean(img, axis=0, keepdims=True) - frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'mean') + frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'mean', logger=self._logger) else: - frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'none') + frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'none', logger=self._logger) elif (typ == 'OBJECT'): if collapse_science: - _log.info(' ==> collapse: mean ({0} -> 1 frame, 0 dropped)'.format(len(img))) + self._logger.info(' ==> collapse: mean ({0} -> 1 frame, 0 dropped)'.format(len(img))) img = np.mean(img, axis=0, keepdims=True) - frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'mean') + frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'mean', logger=self._logger) else: - frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'none') + frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'none', logger=self._logger) frames_info_preproc = pd.concat((frames_info_preproc, frames_info_new)) # background subtraction if subtract_background: - _log.info(' ==> subtract background') + self._logger.info(' ==> subtract background') for f in range(len(img)): img[f] -= bkg # divide flat if subtract_background: - _log.info(' ==> divide by flat field') + self._logger.info(' ==> divide by flat field') for f in range(len(img)): img[f] /= flat # bad pixels correction if fix_badpix: - _log.info(' ==> correct bad pixels') + self._logger.info(' ==> correct bad pixels') for f in range(len(img)): frame = img[f] frame = imutils.fix_badpix(frame, bpm, npix=12, weight=True) @@ -1310,7 +1327,7 @@ def sph_ird_preprocess_science(self, img[f] = frame # reshape data - _log.info(' ==> reshape data') + self._logger.info(' ==> reshape data') NDIT = img.shape[0] nimg = np.zeros((NDIT, 2, 1024, 1024)) for f in range(len(img)): @@ -1353,7 +1370,7 @@ def sph_ird_star_center(self, high_pass=False, plot=True): # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_star_center', self.recipe_requirements) - _log.info('Star centers determination') + self._logger.info('Star centers determination') # parameters path = self._path @@ -1381,7 +1398,7 @@ def sph_ird_star_center(self, high_pass=False, plot=True): flux_files = frames_info[frames_info['DPR TYPE'] == 'OBJECT,FLUX'] if len(flux_files) != 0: for file, idx in flux_files.index: - _log.info(' ==> OBJECT,FLUX: {0}'.format(file)) + self._logger.info(' ==> OBJECT,FLUX: {0}'.format(file)) # read data fname = '{0}_DIT{1:03d}_preproc'.format(file, idx) @@ -1392,7 +1409,8 @@ def sph_ird_star_center(self, high_pass=False, plot=True): save_path = path.products / '{}_PSF_fitting.pdf'.format(fname) else: save_path = None - psf_center = toolbox.star_centers_from_PSF_lss_cube(cube, wave_lin, pixel, save_path=save_path) + psf_center = toolbox.star_centers_from_PSF_lss_cube(cube, wave_lin, pixel, save_path=save_path, + logger=self._logger) # save fits.writeto(path.preproc / '{}_centers.fits'.format(fname), psf_center, overwrite=True) @@ -1404,7 +1422,7 @@ def sph_ird_star_center(self, high_pass=False, plot=True): starsci_files = frames_info[(frames_info['DPR TYPE'] == 'OBJECT') & (frames_info['DET SEQ1 DIT'].round(2) == DIT)] for file, idx in starcen_files.index: - _log.info(' ==> OBJECT,CENTER: {0}'.format(file)) + self._logger.info(' ==> OBJECT,CENTER: {0}'.format(file)) # read center data fname = '{0}_DIT{1:03d}_preproc'.format(file, idx) @@ -1424,7 +1442,8 @@ def sph_ird_star_center(self, high_pass=False, plot=True): save_path = None spot_centers, spot_dist, img_centers \ = toolbox.star_centers_from_waffle_lss_cube(cube_cen, cube_sci, wave_lin, centers, pixel, - high_pass=high_pass, save_path=save_path) + high_pass=high_pass, save_path=save_path, + logger=self._logger) # save fits.writeto(path.preproc / '{}_centers.fits'.format(fname), img_centers, overwrite=True) @@ -1459,7 +1478,7 @@ def sph_ird_wavelength_recalibration(self, fit_scaling=True, plot=True): # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_wavelength_recalibration', self.recipe_requirements) - _log.info('Wavelength recalibration') + self._logger.info('Wavelength recalibration') # parameters path = self._path @@ -1495,7 +1514,7 @@ def sph_ird_wavelength_recalibration(self, fit_scaling=True, plot=True): # get spot distance from the first OBJECT,CENTER in the sequence starcen_files = frames_info[frames_info['DPR TYPE'] == 'OBJECT,CENTER'] if len(starcen_files) == 0: - _log.info(' ==> no OBJECT,CENTER file in the data set. Wavelength cannot be recalibrated. The standard wavelength calibrated by the ESO pripeline will be used.') + self._logger.info(' ==> no OBJECT,CENTER file in the data set. Wavelength cannot be recalibrated. The standard wavelength calibrated by the ESO pripeline will be used.') return fname = '{0}_DIT{1:03d}_preproc_spot_distance'.format(starcen_files.index.values[0][0], starcen_files.index.values[0][1]) @@ -1507,7 +1526,7 @@ def sph_ird_wavelength_recalibration(self, fit_scaling=True, plot=True): pix = np.arange(1024) wave_final = np.zeros((1024, 2)) for fidx in range(2): - _log.info(' field {0:2d}/{1:2d}'.format(fidx+1, 2)) + self._logger.info(' field {0:2d}/{1:2d}'.format(fidx+1, 2)) wave = wave_lin[fidx] dist = spot_dist[:, fidx] @@ -1536,7 +1555,7 @@ def sph_ird_wavelength_recalibration(self, fit_scaling=True, plot=True): wave_final_fit[bad] = np.nan wave_diff = np.abs(wave_final_fit - wave) - _log.info(' ==> difference with calibrated wavelength: min={0:.1f} nm, max={1:.1f} nm'.format(np.nanmin(wave_diff), np.nanmax(wave_diff))) + self._logger.info(' ==> difference with calibrated wavelength: min={0:.1f} nm, max={1:.1f} nm'.format(np.nanmin(wave_diff), np.nanmax(wave_diff))) if fit_scaling: wave_final[:, fidx] = wave_final_fit @@ -1579,7 +1598,7 @@ def sph_ird_wavelength_recalibration(self, fit_scaling=True, plot=True): pdf.close() # save - _log.info(' * saving') + self._logger.info(' * saving') fits.writeto(path.preproc / 'wavelength_recalibrated.fits', wave_final, overwrite=True) # update recipe execution @@ -1683,7 +1702,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_combine_data', self.recipe_requirements) - _log.info('Combine science data') + self._logger.info('Combine science data') # parameters path = self._path @@ -1708,7 +1727,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m else: wfile = path.preproc / 'wavelength_default.fits' if wfile.exists(): - _log.warning('Using default wavelength calibration.') + self._logger.warning('Using default wavelength calibration.') wave = fits.getdata(wfile) else: raise FileExistsError('Missing default or recalibrated wavelength calibration. You must first run either sph_ird_wave_calib or sph_ird_wavelength_recalibration().') @@ -1732,16 +1751,16 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m # max images size if psf_dim > 1024: - _log.warning('psf_dim cannot be larger than 1024 pix. A value of 1024 will be used.') + self._logger.warning('psf_dim cannot be larger than 1024 pix. A value of 1024 will be used.') psf_dim = 1024 if science_dim > 1024: - _log.warning('science_dim cannot be larger than 1024 pix. A value of 1024 will be used.') + self._logger.warning('science_dim cannot be larger than 1024 pix. A value of 1024 will be used.') science_dim = 1024 # centering configuration if coarse_centering: - _log.warning('Images will be coarsely centered without any interpolation. Automatic settings for coarse centering: shift_method=\'roll\', cpix=True, correct_mrs_chromatism=False') + self._logger.warning('Images will be coarsely centered without any interpolation. Automatic settings for coarse centering: shift_method=\'roll\', cpix=True, correct_mrs_chromatism=False') shift_method = 'roll' cpix = True correct_mrs_chromatism = False @@ -1752,7 +1771,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m if manual_center.shape != (2,): raise ValueError('manual_center does not have the right number of dimensions.') - _log.warning('Images will be centered using the user-provided center ({},{})'.format(*manual_center)) + self._logger.warning('Images will be centered using the user-provided center ({},{})'.format(*manual_center)) manual_center = np.full((1024, 2), manual_center, dtype=np.float) @@ -1762,7 +1781,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m flux_files = frames_info[frames_info['DPR TYPE'] == 'OBJECT,FLUX'] nfiles = len(flux_files) if nfiles != 0: - _log.info(' * OBJECT,FLUX data') + self._logger.info(' * OBJECT,FLUX data') # final arrays psf_cube = np.zeros((2, nfiles, nwave, psf_dim)) @@ -1776,7 +1795,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m # read and combine files for file_idx, (file, idx) in enumerate(flux_files.index): - _log.info(' ==> file {0}/{1}: {2}, DIT={3}'.format(file_idx+1, len(flux_files), file, idx)) + self._logger.info(' ==> file {0}/{1}: {2}, DIT={3}'.format(file_idx+1, len(flux_files), file, idx)) # read data fname = '{0}_DIT{1:03d}_preproc'.format(file, idx) @@ -1786,7 +1805,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m if cfile.exists(): centers = fits.getdata(cfile) else: - _log.warning('sph_ird_star_center() has not been executed. Images will be centered using default centers ({}, {})'.format(*default_center[:, 0])) + self._logger.warning('sph_ird_star_center() has not been executed. Images will be centered using default centers ({}, {})'.format(*default_center[:, 0])) centers = np.full((1024, 2), default_center[:, 0], dtype=np.float) # make sure we have only integers if user wants coarse centering @@ -1856,7 +1875,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m starcen_files = frames_info[frames_info['DPR TYPE'] == 'OBJECT,CENTER'] nfiles = len(starcen_files) if nfiles != 0: - _log.info(' * OBJECT,CENTER data') + self._logger.info(' * OBJECT,CENTER data') # final arrays cen_cube = np.zeros((2, nfiles, nwave, science_dim)) @@ -1870,7 +1889,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m # read and combine files for file_idx, (file, idx) in enumerate(starcen_files.index): - _log.info(' ==> file {0}/{1}: {2}, DIT={3}'.format(file_idx+1, len(starcen_files), file, idx)) + self._logger.info(' ==> file {0}/{1}: {2}, DIT={3}'.format(file_idx+1, len(starcen_files), file, idx)) # read data fname = '{0}_DIT{1:03d}_preproc'.format(file, idx) @@ -1948,7 +1967,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m object_files = frames_info[frames_info['DPR TYPE'] == 'OBJECT'] nfiles = len(object_files) if nfiles != 0: - _log.info(' * OBJECT data') + self._logger.info(' * OBJECT data') # final arrays sci_cube = np.zeros((2, nfiles, nwave, science_dim)) @@ -1963,7 +1982,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m # select which CENTER to use starcen_files = frames_info[frames_info['DPR TYPE'] == 'OBJECT,CENTER'] if len(starcen_files) == 0: - _log.warning('No OBJECT,CENTER file in the data set. Images will be centered using default center ({},{})'.format(*default_center[:, 0])) + self._logger.warning('No OBJECT,CENTER file in the data set. Images will be centered using default center ({},{})'.format(*default_center[:, 0])) centers = np.full((1024, 2), default_center[:, 0], dtype=np.float) else: fname = '{0}_DIT{1:03d}_preproc_centers.fits'.format(starcen_files.index.values[0][0], starcen_files.index.values[0][1]) @@ -1981,7 +2000,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m # read and combine files for file_idx, (file, idx) in enumerate(object_files.index): - _log.info(' ==> file {0}/{1}: {2}, DIT={3}'.format(file_idx+1, len(object_files), file, idx)) + self._logger.info(' ==> file {0}/{1}: {2}, DIT={3}'.format(file_idx+1, len(object_files), file, idx)) # read data fname = '{0}_DIT{1:03d}_preproc'.format(file, idx) From a0fb9ae243dba9396f44f63c7b498d875a3ce1db Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Mon, 9 Sep 2019 10:18:03 +0200 Subject: [PATCH 063/101] Added log in cleanup function Ticket #63 --- vltpf/IFS.py | 2 ++ vltpf/IRDIS/ImagingReduction.py | 2 ++ vltpf/IRDIS/SpectroReduction.py | 2 ++ 3 files changed, 6 insertions(+) diff --git a/vltpf/IFS.py b/vltpf/IFS.py index 651a915..f14f2c9 100644 --- a/vltpf/IFS.py +++ b/vltpf/IFS.py @@ -2907,6 +2907,8 @@ def sph_ifs_clean(self, delete_raw=False, delete_products=False): Delete science products. Default is False ''' + self._logger.info('Cleaning') + # parameters path = self._path diff --git a/vltpf/IRDIS/ImagingReduction.py b/vltpf/IRDIS/ImagingReduction.py index daa3281..7eb382d 100644 --- a/vltpf/IRDIS/ImagingReduction.py +++ b/vltpf/IRDIS/ImagingReduction.py @@ -1692,6 +1692,8 @@ def sph_ird_clean(self, delete_raw=False, delete_products=False): Delete science products. Default is False ''' + self._logger.info('Cleaning') + # parameters path = self._path diff --git a/vltpf/IRDIS/SpectroReduction.py b/vltpf/IRDIS/SpectroReduction.py index dcc656a..a2d87bb 100644 --- a/vltpf/IRDIS/SpectroReduction.py +++ b/vltpf/IRDIS/SpectroReduction.py @@ -2079,6 +2079,8 @@ def sph_ird_clean(self, delete_raw=False, delete_products=False): Delete science products. Default is False ''' + self._logger.info('Cleaning') + # parameters path = self._path From 5aab75fd0142e51148f68652785c6f25a17a2959 Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Mon, 9 Sep 2019 16:21:38 +0200 Subject: [PATCH 064/101] Add log messages and update existing ones Ticket #63 --- vltpf/IFS.py | 41 +++++++++++++++++++++++---------- vltpf/IRDIS/ImagingReduction.py | 29 ++++++++++++++++++----- vltpf/IRDIS/SpectroReduction.py | 31 +++++++++++++++++++------ 3 files changed, 76 insertions(+), 25 deletions(-) diff --git a/vltpf/IFS.py b/vltpf/IFS.py index f14f2c9..e1753cf 100644 --- a/vltpf/IFS.py +++ b/vltpf/IFS.py @@ -404,11 +404,14 @@ def __init__(self, path, log_level='info'): logger.addHandler(handler) self._logger = logger + + self._logger.info('Creating IFS reduction at path {}'.format(path)) # configuration configfile = Path(vltpf.__file__).parent / 'instruments' / '{}.ini'.format(self._instrument) config = configparser.ConfigParser() try: + self._logger.debug('Read configuration') config.read(configfile) # instrument @@ -559,6 +562,8 @@ def init_reduction(self): Sort files and frames, perform sanity check ''' + self._logger.info('====> Init <====') + # make sure we have sub-directories self._path.create_subdirectories() @@ -572,6 +577,8 @@ def create_static_calibrations(self): Create static calibrations, mainly with esorex ''' + self._logger.info('====> Static calibrations <====') + config = self._config self.sph_ifs_cal_dark(silent=config['misc_silent_esorex']) @@ -586,6 +593,8 @@ def preprocess_science(self): Collapse and correct raw IFU images ''' + self._logger.info('====> Science pre-processing <====') + config = self._config self.sph_ifs_preprocess_science(subtract_background=config['preproc_subtract_background'], @@ -606,6 +615,8 @@ def process_science(self): center and combine cubes into final (x,y,time,lambda) cubes ''' + self._logger.info('====> Science processing <====') + config = self._config self.sph_ifs_wavelength_recalibration(high_pass=config['center_high_pass'], @@ -629,6 +640,8 @@ def clean(self): Clean the reduction directory ''' + self._logger.info('====> Clean-up <====') + config = self._config if config['clean']: @@ -641,6 +654,8 @@ def full_reduction(self): Performs a full reduction of a data set, from the static calibrations to the final (x,y,time,lambda) cubes ''' + + self._logger.info('====> Full reduction <====') self.init_reduction() self.create_static_calibrations() @@ -666,6 +681,8 @@ def read_info(self): The data frame with all the information on science frames after pre-processing ''' + self._logger.info('Read existing reduction information') + # path path = self._path @@ -780,7 +797,7 @@ def sort_files(self): Data frame with the information on raw files ''' - self._logger.info('Sorting raw files') + self._logger.info('Sort raw files') # parameters path = self._path @@ -864,7 +881,7 @@ def sort_frames(self): A data frame with the information on all frames ''' - self._logger.info('Extracting frames information') + self._logger.info('Extract frames information') # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sort_frames', self.recipe_requirements) @@ -963,7 +980,7 @@ def check_files_association(self): # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'check_files_association', self.recipe_requirements) - self._logger.info('Performing file association for calibrations') + self._logger.info('File association for calibrations') # parameters path = self._path @@ -1195,7 +1212,7 @@ def sph_ifs_cal_dark(self, silent=True): # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ifs_cal_dark', self.recipe_requirements) - self._logger.info('Creating darks and backgrounds') + self._logger.info('Darks and backgrounds') # parameters path = self._path @@ -1301,7 +1318,7 @@ def sph_ifs_cal_detector_flat(self, silent=True): # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ifs_cal_detector_flat', self.recipe_requirements) - self._logger.info('Creating flats') + self._logger.info('Detector flats') # parameters path = self._path @@ -1393,7 +1410,7 @@ def sph_ifs_cal_specpos(self, silent=True): # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ifs_cal_specpos', self.recipe_requirements) - self._logger.info('Creating specpos') + self._logger.info('Microspectra positions') # parameters path = self._path @@ -1479,7 +1496,7 @@ def sph_ifs_cal_wave(self, silent=True): # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ifs_cal_wave', self.recipe_requirements) - self._logger.info('Creating wavelength calibration') + self._logger.info('Wavelength calibration') # parameters path = self._path @@ -1592,7 +1609,7 @@ def sph_ifs_cal_ifu_flat(self, silent=True): # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ifs_cal_ifu_flat', self.recipe_requirements) - self._logger.info('Creating IFU flat') + self._logger.info('Integral-field unit flat') # parameters path = self._path @@ -1764,7 +1781,7 @@ def sph_ifs_preprocess_science(self, # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ifs_preprocess_science', self.recipe_requirements) - self._logger.info('Pre-processing science files') + self._logger.info('Pre-process science files') # parameters path = self._path @@ -1956,7 +1973,7 @@ def sph_ifs_preprocess_wave(self): path = self._path files_info = self._files_info - self._logger.info('Pre-processing wavelength calibration file') + self._logger.info('Pre-process wavelength calibration file') # bpm bpm_files = files_info[files_info['PRO CATG'] == 'IFS_STATIC_BADPIXELMAP'].index @@ -2022,7 +2039,7 @@ def sph_ifs_science_cubes(self, silent=True): # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ifs_science_cubes', self.recipe_requirements) - self._logger.info('Creating the (x,y,lambda) science cubes') + self._logger.info('Create science cubes') # parameters path = self._path @@ -2169,7 +2186,7 @@ def sph_ifs_wavelength_recalibration(self, high_pass=False, offset=(0, 0), plot= # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ifs_wavelength_recalibration', self.recipe_requirements) - self._logger.info('Recalibrating wavelength') + self._logger.info('Wavelength recalibration') # parameters path = self._path diff --git a/vltpf/IRDIS/ImagingReduction.py b/vltpf/IRDIS/ImagingReduction.py index 7eb382d..92248c2 100644 --- a/vltpf/IRDIS/ImagingReduction.py +++ b/vltpf/IRDIS/ImagingReduction.py @@ -90,10 +90,13 @@ def __init__(self, path, log_level='info'): self._logger = logger + self._logger.info('Creating IRDIS imaging reduction at path {}'.format(path)) + # configuration configfile = Path(vltpf.__file__).parent / 'instruments' / '{}.ini'.format(self._instrument) config = configparser.ConfigParser() try: + self._logger.debug('Read configuration') config.read(configfile) # instrument @@ -243,6 +246,8 @@ def init_reduction(self): Sort files and frames, perform sanity check ''' + self._logger.info('====> Init <====') + # make sure we have sub-directories self._path.create_subdirectories() @@ -255,6 +260,8 @@ def create_static_calibrations(self): ''' Create static calibrations with esorex ''' + + self._logger.info('====> Static calibrations <====') config = self._config @@ -267,6 +274,8 @@ def preprocess_science(self): Clean and collapse images ''' + self._logger.info('====> Science pre-processing <====') + config = self._config self.sph_ird_preprocess_science(subtract_background=config['preproc_subtract_background'], @@ -284,6 +293,8 @@ def process_science(self): cubes, correct anamorphism and scale the images ''' + self._logger.info('====> Science processing <====') + config = self._config self.sph_ird_star_center(high_pass=config['center_high_pass'], @@ -305,6 +316,8 @@ def clean(self): sub-directory ''' + self._logger.info('====> Clean-up <====') + config = self._config if config['clean']: @@ -318,6 +331,8 @@ def full_reduction(self): calibrations to the final (x,y,time,lambda) cubes ''' + self._logger.info('====> Full reduction <====') + self.init_reduction() self.create_static_calibrations() self.preprocess_science() @@ -342,6 +357,8 @@ def read_info(self): The data frame with all the information on science frames after pre-processing ''' + self._logger.info('Read existing reduction information') + # path path = self._path @@ -431,7 +448,7 @@ def sort_files(self): Data frame with the information on raw files ''' - self._logger.info('Sorting raw files') + self._logger.info('Sort raw files') # parameters path = self._path @@ -515,7 +532,7 @@ def sort_frames(self): A data frame with the information on all frames ''' - self._logger.info('Extracting frames information') + self._logger.info('Extract frames information') # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sort_frames', self.recipe_requirements) @@ -614,7 +631,7 @@ def check_files_association(self): # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'check_files_association', self.recipe_requirements) - self._logger.info('Performing file association for calibrations') + self._logger.info('File association for calibrations') # parameters files_info = self._files_info @@ -692,7 +709,7 @@ def sph_ird_cal_dark(self, silent=True): # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_cal_dark', self.recipe_requirements) - self._logger.info('Creating darks and backgrounds') + self._logger.info('Darks and backgrounds') # parameters path = self._path @@ -809,7 +826,7 @@ def sph_ird_cal_detector_flat(self, silent=True): # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_cal_detector_flat', self.recipe_requirements) - self._logger.info('Creating flats') + self._logger.info('Instrument flats') # parameters path = self._path @@ -946,7 +963,7 @@ def sph_ird_preprocess_science(self, # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_preprocess_science', self.recipe_requirements) - self._logger.info('Pre-processing science files') + self._logger.info('Pre-process science files') # parameters path = self._path diff --git a/vltpf/IRDIS/SpectroReduction.py b/vltpf/IRDIS/SpectroReduction.py index a2d87bb..d9cdc38 100644 --- a/vltpf/IRDIS/SpectroReduction.py +++ b/vltpf/IRDIS/SpectroReduction.py @@ -136,10 +136,13 @@ def __init__(self, path, log_level='info'): self._logger = logger + self._logger.info('Creating IRDIS spectroscopy reduction at path {}'.format(path)) + # configuration configfile = Path(vltpf.__file__).parent / 'instruments' / '{}.ini'.format(self._instrument) config = configparser.ConfigParser() try: + self._logger.debug('Read configuration') config.read(configfile) # instrument @@ -302,6 +305,8 @@ def init_reduction(self): Sort files and frames, perform sanity check ''' + self._logger.info('====> Init <====') + # make sure we have sub-directories self._path.create_subdirectories() @@ -315,6 +320,8 @@ def create_static_calibrations(self): Create static calibrations with esorex ''' + self._logger.info('====> Static calibrations <====') + config = self._config self.sph_ird_cal_dark(silent=config['misc_silent_esorex']) @@ -327,6 +334,8 @@ def preprocess_science(self): Clean and collapse images ''' + self._logger.info('====> Science pre-processing <====') + config = self._config self.sph_ird_preprocess_science(subtract_background=config['preproc_subtract_background'], @@ -341,6 +350,8 @@ def process_science(self): Perform star center, combine cubes into final (x,y,time,lambda) cubes, correct anamorphism and scale the images ''' + + self._logger.info('====> Science processing <====') config = self._config @@ -363,6 +374,8 @@ def clean(self): sub-directory ''' + self._logger.info('====> Clean-up <====') + config = self._config if config['clean']: @@ -376,6 +389,8 @@ def full_reduction(self): calibrations to the final (x,y,time,lambda) cubes ''' + self._logger.info('====> Full reduction <====') + self.init_reduction() self.create_static_calibrations() self.preprocess_science() @@ -400,6 +415,8 @@ def read_info(self): The data frame with all the information on science frames after pre-processing ''' + self._logger.info('Read existing reduction information') + # path path = self._path @@ -497,7 +514,7 @@ def sort_files(self): Data frame with the information on raw files ''' - self._logger.info('Sorting raw files') + self._logger.info('Sort raw files') # parameters path = self._path @@ -581,7 +598,7 @@ def sort_frames(self): A data frame with the information on all frames ''' - self._logger.info('Extracting frames information') + self._logger.info('Extract frames information') # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sort_frames', self.recipe_requirements) @@ -680,7 +697,7 @@ def check_files_association(self): # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'check_files_association', self.recipe_requirements) - self._logger.info('Performing file association for calibrations') + self._logger.info('File association for calibrations') # parameters path = self._path @@ -783,7 +800,7 @@ def sph_ird_cal_dark(self, silent=True): # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_cal_dark', self.recipe_requirements) - self._logger.info('Creating darks and backgrounds') + self._logger.info('Darks and backgrounds') # parameters path = self._path @@ -900,7 +917,7 @@ def sph_ird_cal_detector_flat(self, silent=True): # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_cal_detector_flat', self.recipe_requirements) - self._logger.info('Creating flats') + self._logger.info('Instrument flats') # parameters path = self._path @@ -993,7 +1010,7 @@ def sph_ird_wave_calib(self, silent=True): # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_wave_calib', self.recipe_requirements) - self._logger.info('Creating wavelength calibration') + self._logger.info('Wavelength calibration') # parameters path = self._path @@ -1174,7 +1191,7 @@ def sph_ird_preprocess_science(self, # check if recipe can be executed toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_preprocess_science', self.recipe_requirements) - self._logger.info('Pre-processing science files') + self._logger.info('Pre-process science files') # parameters path = self._path From 152b3456f6211adfc73ec6db44218b44e0774be2 Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Mon, 9 Sep 2019 19:45:51 +0200 Subject: [PATCH 065/101] Minor updates to logging Ticket #63 --- vltpf/IFS.py | 2 +- vltpf/IRDIS/ImagingReduction.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/vltpf/IFS.py b/vltpf/IFS.py index e1753cf..103d477 100644 --- a/vltpf/IFS.py +++ b/vltpf/IFS.py @@ -2924,7 +2924,7 @@ def sph_ifs_clean(self, delete_raw=False, delete_products=False): Delete science products. Default is False ''' - self._logger.info('Cleaning') + self._logger.info('Clean reduction data') # parameters path = self._path diff --git a/vltpf/IRDIS/ImagingReduction.py b/vltpf/IRDIS/ImagingReduction.py index 92248c2..b13c6fe 100644 --- a/vltpf/IRDIS/ImagingReduction.py +++ b/vltpf/IRDIS/ImagingReduction.py @@ -1709,7 +1709,7 @@ def sph_ird_clean(self, delete_raw=False, delete_products=False): Delete science products. Default is False ''' - self._logger.info('Cleaning') + self._logger.info('Clean reduction data') # parameters path = self._path From 3e6d3b225236114fa17125ba0868a197df422e5e Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Mon, 9 Sep 2019 19:50:11 +0200 Subject: [PATCH 066/101] Change file logging format Ticket #63 --- vltpf/IFS.py | 2 +- vltpf/IRDIS/ImagingReduction.py | 2 +- vltpf/IRDIS/SpectroReduction.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/vltpf/IFS.py b/vltpf/IFS.py index 103d477..36d3835 100644 --- a/vltpf/IFS.py +++ b/vltpf/IFS.py @@ -398,7 +398,7 @@ def __init__(self, path, log_level='info'): logger.removeHandler(hdlr) handler = logging.FileHandler(self._path.products / 'reduction.log', mode='w', encoding='utf-8') - formatter = logging.Formatter('%(asctime)s %(levelname)8s %(message)s') + formatter = logging.Formatter('%(asctime)s\t%(levelname)8s\t%(message)s') formatter.default_msec_format = '%s.%03d' handler.setFormatter(formatter) logger.addHandler(handler) diff --git a/vltpf/IRDIS/ImagingReduction.py b/vltpf/IRDIS/ImagingReduction.py index b13c6fe..fdb58fa 100644 --- a/vltpf/IRDIS/ImagingReduction.py +++ b/vltpf/IRDIS/ImagingReduction.py @@ -83,7 +83,7 @@ def __init__(self, path, log_level='info'): logger.removeHandler(hdlr) handler = logging.FileHandler(self._path.products / 'reduction.log', mode='w', encoding='utf-8') - formatter = logging.Formatter('%(asctime)s %(levelname)8s %(message)s') + formatter = logging.Formatter('%(asctime)s\t%(levelname)8s\t%(message)s') formatter.default_msec_format = '%s.%03d' handler.setFormatter(formatter) logger.addHandler(handler) diff --git a/vltpf/IRDIS/SpectroReduction.py b/vltpf/IRDIS/SpectroReduction.py index d9cdc38..f3e8937 100644 --- a/vltpf/IRDIS/SpectroReduction.py +++ b/vltpf/IRDIS/SpectroReduction.py @@ -129,7 +129,7 @@ def __init__(self, path, log_level='info'): logger.removeHandler(hdlr) handler = logging.FileHandler(self._path.products / 'reduction.log', mode='w', encoding='utf-8') - formatter = logging.Formatter('%(asctime)s %(levelname)8s %(message)s') + formatter = logging.Formatter('%(asctime)s\t%(levelname)8s\t%(message)s') formatter.default_msec_format = '%s.%03d' handler.setFormatter(formatter) logger.addHandler(handler) From d0f5e8c93923e7b047bacd019f8abf22c5837a54 Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Mon, 9 Sep 2019 19:55:23 +0200 Subject: [PATCH 067/101] Improve logging messages Ticket #63 --- vltpf/IFS.py | 8 +++++--- vltpf/IRDIS/ImagingReduction.py | 8 +++++--- vltpf/IRDIS/SpectroReduction.py | 11 +++++++---- 3 files changed, 17 insertions(+), 10 deletions(-) diff --git a/vltpf/IFS.py b/vltpf/IFS.py index 36d3835..9d59a17 100644 --- a/vltpf/IFS.py +++ b/vltpf/IFS.py @@ -2652,7 +2652,7 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a # read and combine files for file_idx, (file, idx) in enumerate(flux_files.index): - self._logger.info(' ==> file {0}/{1}: {2}, DIT={3}'.format(file_idx+1, len(flux_files), file, idx)) + self._logger.info(' ==> file {0}/{1}: {2}, DIT #{3}'.format(file_idx+1, len(flux_files), file, idx)) # read data fname = '{0}_DIT{1:03d}_preproc_'.format(file, idx) @@ -2740,7 +2740,7 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a # read and combine files for file_idx, (file, idx) in enumerate(starcen_files.index): - self._logger.info(' ==> file {0}/{1}: {2}, DIT={3}'.format(file_idx+1, len(starcen_files), file, idx)) + self._logger.info(' ==> file {0}/{1}: {2}, DIT #{3}'.format(file_idx+1, len(starcen_files), file, idx)) # read data fname = '{0}_DIT{1:03d}_preproc_'.format(file, idx) @@ -2854,7 +2854,7 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a # read and combine files for file_idx, (file, idx) in enumerate(object_files.index): - self._logger.info(' ==> file {0}/{1}: {2}, DIT={3}'.format(file_idx+1, len(object_files), file, idx)) + self._logger.info(' ==> file {0}/{1}: {2}, DIT #{3}'.format(file_idx+1, len(object_files), file, idx)) # read data fname = '{0}_DIT{1:03d}_preproc_'.format(file, idx) @@ -2948,9 +2948,11 @@ def sph_ifs_clean(self, delete_raw=False, delete_products=False): # raw if delete_raw: if path.raw.exists(): + self._logger.warning(' ==> delete raw files') shutil.rmtree(path.raw, ignore_errors=True) # products if delete_products: if path.products.exists(): + self._logger.warning(' ==> delete products') shutil.rmtree(path.products, ignore_errors=True) diff --git a/vltpf/IRDIS/ImagingReduction.py b/vltpf/IRDIS/ImagingReduction.py index fdb58fa..ed8368f 100644 --- a/vltpf/IRDIS/ImagingReduction.py +++ b/vltpf/IRDIS/ImagingReduction.py @@ -1425,7 +1425,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a # read and combine files for file_idx, (file, idx) in enumerate(flux_files.index): - self._logger.info(' ==> file {0}/{1}: {2}, DIT={3}'.format(file_idx+1, len(flux_files), file, idx)) + self._logger.info(' ==> file {0}/{1}: {2}, DIT #{3}'.format(file_idx+1, len(flux_files), file, idx)) # read data fname = '{0}_DIT{1:03d}_preproc'.format(file, idx) @@ -1508,7 +1508,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a # read and combine files for file_idx, (file, idx) in enumerate(starcen_files.index): - self._logger.info(' ==> file {0}/{1}: {2}, DIT={3}'.format(file_idx+1, len(starcen_files), file, idx)) + self._logger.info(' ==> file {0}/{1}: {2}, DIT #{3}'.format(file_idx+1, len(starcen_files), file, idx)) # read data fname = '{0}_DIT{1:03d}_preproc'.format(file, idx) @@ -1628,7 +1628,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a # read and combine files for file_idx, (file, idx) in enumerate(object_files.index): - self._logger.info(' ==> file {0}/{1}: {2}, DIT={3}'.format(file_idx+1, len(object_files), file, idx)) + self._logger.info(' ==> file {0}/{1}: {2}, DIT #{3}'.format(file_idx+1, len(object_files), file, idx)) # read data fname = '{0}_DIT{1:03d}_preproc'.format(file, idx) @@ -1733,9 +1733,11 @@ def sph_ird_clean(self, delete_raw=False, delete_products=False): # raw if delete_raw: if path.raw.exists(): + self._logger.warning(' ==> delete raw files') shutil.rmtree(path.raw, ignore_errors=True) # products if delete_products: if path.products.exists(): + self._logger.warning(' ==> delete products') shutil.rmtree(path.products, ignore_errors=True) diff --git a/vltpf/IRDIS/SpectroReduction.py b/vltpf/IRDIS/SpectroReduction.py index f3e8937..0984d0b 100644 --- a/vltpf/IRDIS/SpectroReduction.py +++ b/vltpf/IRDIS/SpectroReduction.py @@ -1812,7 +1812,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m # read and combine files for file_idx, (file, idx) in enumerate(flux_files.index): - self._logger.info(' ==> file {0}/{1}: {2}, DIT={3}'.format(file_idx+1, len(flux_files), file, idx)) + self._logger.info(' ==> file {0}/{1}: {2}, DIT #{3}'.format(file_idx+1, len(flux_files), file, idx)) # read data fname = '{0}_DIT{1:03d}_preproc'.format(file, idx) @@ -1906,7 +1906,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m # read and combine files for file_idx, (file, idx) in enumerate(starcen_files.index): - self._logger.info(' ==> file {0}/{1}: {2}, DIT={3}'.format(file_idx+1, len(starcen_files), file, idx)) + self._logger.info(' ==> file {0}/{1}: {2}, DIT #{3}'.format(file_idx+1, len(starcen_files), file, idx)) # read data fname = '{0}_DIT{1:03d}_preproc'.format(file, idx) @@ -2017,7 +2017,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m # read and combine files for file_idx, (file, idx) in enumerate(object_files.index): - self._logger.info(' ==> file {0}/{1}: {2}, DIT={3}'.format(file_idx+1, len(object_files), file, idx)) + self._logger.info(' ==> file {0}/{1}: {2}, DIT #{3}'.format(file_idx+1, len(object_files), file, idx)) # read data fname = '{0}_DIT{1:03d}_preproc'.format(file, idx) @@ -2096,7 +2096,7 @@ def sph_ird_clean(self, delete_raw=False, delete_products=False): Delete science products. Default is False ''' - self._logger.info('Cleaning') + self._logger.info('Clean reduction data') # parameters path = self._path @@ -2120,9 +2120,12 @@ def sph_ird_clean(self, delete_raw=False, delete_products=False): # raw if delete_raw: if path.raw.exists(): + self._logger.warning(' ==> delete raw files') shutil.rmtree(path.raw, ignore_errors=True) # products if delete_products: if path.products.exists(): + self._logger.warning(' ==> delete products') shutil.rmtree(path.products, ignore_errors=True) + From ffc5e0162a8074f07ec9d703ee8c964f74e8917a Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Mon, 9 Sep 2019 21:53:18 +0200 Subject: [PATCH 068/101] Major update to the formating of log messages Ticket #63 --- vltpf/IFS.py | 24 +++++++++++++----------- vltpf/IRDIS/ImagingReduction.py | 20 +++++++++++--------- vltpf/IRDIS/SpectroReduction.py | 20 +++++++++++--------- vltpf/toolbox.py | 10 +++++----- 4 files changed, 40 insertions(+), 34 deletions(-) diff --git a/vltpf/IFS.py b/vltpf/IFS.py index 9d59a17..2c6a856 100644 --- a/vltpf/IFS.py +++ b/vltpf/IFS.py @@ -809,7 +809,7 @@ def sort_files(self): if len(files) == 0: raise ValueError('No raw FITS files in reduction path') - self._logger.info(' * found {0} FITS files in {1}'.format(len(files), path.raw)) + self._logger.info(' * found {0} raw FITS files'.format(len(files))) # read list of keywords keywords = [] @@ -1190,9 +1190,11 @@ def check_files_association(self): self._logger.warning(' * there is no sky background for science files with DIT={0} sec. Using a sky background instead of an internal instrumental background can usually provide a cleaner data reduction'.format(DIT)) # error reporting - self._logger.warning('There are {0} warning(s) and {1} error(s) in the classification of files'.format(warning_flag, error_flag)) if error_flag: + self._logger.error('There are {0} warning(s) and {1} error(s) in the classification of files'.format(warning_flag, error_flag)) raise ValueError('There is {0} errors that should be solved before proceeding'.format(error_flag)) + else: + self._logger.warning('There are {0} warning(s) and {1} error(s) in the classification of files'.format(warning_flag, error_flag)) # save files_info.to_csv(path.preproc / 'files.csv') @@ -2220,12 +2222,12 @@ def sph_ifs_wavelength_recalibration(self, high_pass=False, offset=(0, 0), plot= # # star center # - self._logger.info(' * fitting satelitte spots') + self._logger.info(' * fitting satellite spots') # get first DIT of first OBJECT,CENTER in the sequence starcen_files = frames_info[frames_info['DPR TYPE'] == 'OBJECT,CENTER'] if len(starcen_files) == 0: - self._logger.info(' ==> no OBJECT,CENTER file in the data set. Wavelength cannot be recalibrated. The standard wavelength calibrated by the ESO pripeline will be used.') + self._logger.info(' ==> no OBJECT,CENTER file in the data set. Wavelength cannot be recalibrated. The standard wavelength calibrated by the ESO pripeline will be used.') return ifs_mode = starcen_files['INS2 COMB IFS'].values[0] @@ -2417,7 +2419,7 @@ def sph_ifs_star_center(self, high_pass=False, offset=(0, 0), plot=True): flux_files = frames_info[frames_info['DPR TYPE'] == 'OBJECT,FLUX'] if len(flux_files) != 0: for file, idx in flux_files.index: - self._logger.info(' ==> OBJECT,FLUX: {0}'.format(file)) + self._logger.info(' * OBJECT,FLUX: {0}'.format(file)) # read data fname = '{0}_DIT{1:03d}_preproc_'.format(file, idx) @@ -2449,7 +2451,7 @@ def sph_ifs_star_center(self, high_pass=False, offset=(0, 0), plot=True): starcen_files = frames_info[frames_info['DPR TYPE'] == 'OBJECT,CENTER'] if len(starcen_files) != 0: for file, idx in starcen_files.index: - self._logger.info(' ==> OBJECT,CENTER: {0}'.format(file)) + self._logger.info(' * OBJECT,CENTER: {0}'.format(file)) # read data fname = '{0}_DIT{1:03d}_preproc_'.format(file, idx) @@ -2652,7 +2654,7 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a # read and combine files for file_idx, (file, idx) in enumerate(flux_files.index): - self._logger.info(' ==> file {0}/{1}: {2}, DIT #{3}'.format(file_idx+1, len(flux_files), file, idx)) + self._logger.info(' ==> file {0}/{1}: {2}, DIT #{3}'.format(file_idx+1, len(flux_files), file, idx)) # read data fname = '{0}_DIT{1:03d}_preproc_'.format(file, idx) @@ -2740,7 +2742,7 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a # read and combine files for file_idx, (file, idx) in enumerate(starcen_files.index): - self._logger.info(' ==> file {0}/{1}: {2}, DIT #{3}'.format(file_idx+1, len(starcen_files), file, idx)) + self._logger.info(' ==> file {0}/{1}: {2}, DIT #{3}'.format(file_idx+1, len(starcen_files), file, idx)) # read data fname = '{0}_DIT{1:03d}_preproc_'.format(file, idx) @@ -2854,7 +2856,7 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a # read and combine files for file_idx, (file, idx) in enumerate(object_files.index): - self._logger.info(' ==> file {0}/{1}: {2}, DIT #{3}'.format(file_idx+1, len(object_files), file, idx)) + self._logger.info(' ==> file {0}/{1}: {2}, DIT #{3}'.format(file_idx+1, len(object_files), file, idx)) # read data fname = '{0}_DIT{1:03d}_preproc_'.format(file, idx) @@ -2948,11 +2950,11 @@ def sph_ifs_clean(self, delete_raw=False, delete_products=False): # raw if delete_raw: if path.raw.exists(): - self._logger.warning(' ==> delete raw files') + self._logger.warning(' ==> delete raw files') shutil.rmtree(path.raw, ignore_errors=True) # products if delete_products: if path.products.exists(): - self._logger.warning(' ==> delete products') + self._logger.warning(' ==> delete products') shutil.rmtree(path.products, ignore_errors=True) diff --git a/vltpf/IRDIS/ImagingReduction.py b/vltpf/IRDIS/ImagingReduction.py index ed8368f..fcc379b 100644 --- a/vltpf/IRDIS/ImagingReduction.py +++ b/vltpf/IRDIS/ImagingReduction.py @@ -460,7 +460,7 @@ def sort_files(self): if len(files) == 0: raise ValueError('No raw FITS files in reduction path') - self._logger.info(' * found {0} FITS files in {1}'.format(len(files), path.raw)) + self._logger.info(' * found {0} raw FITS files'.format(len(files))) # read list of keywords keywords = [] @@ -691,9 +691,11 @@ def check_files_association(self): self._logger.warning(' * there is no sky background for science files with DIT={0} sec. Using a sky background instead of an internal instrumental background can usually provide a cleaner data reduction, especially in K-band'.format(DIT)) # error reporting - self._logger.info('There are {0} warning(s) and {1} error(s) in the classification of files'.format(warning_flag, error_flag)) if error_flag: + self._logger.error('There are {0} warning(s) and {1} error(s) in the classification of files'.format(warning_flag, error_flag)) raise ValueError('There is {0} errors that should be solved before proceeding'.format(error_flag)) + else: + self._logger.warning('There are {0} warning(s) and {1} error(s) in the classification of files'.format(warning_flag, error_flag)) def sph_ird_cal_dark(self, silent=True): @@ -1206,7 +1208,7 @@ def sph_ird_star_center(self, high_pass=False, offset=(0, 0), plot=True): flux_files = frames_info[frames_info['DPR TYPE'] == 'OBJECT,FLUX'] if len(flux_files) != 0: for file, idx in flux_files.index: - self._logger.info(' ==> OBJECT,FLUX: {0}'.format(file)) + self._logger.info(' * OBJECT,FLUX: {0}'.format(file)) # read data fname = '{0}_DIT{1:03d}_preproc'.format(file, idx) @@ -1227,7 +1229,7 @@ def sph_ird_star_center(self, high_pass=False, offset=(0, 0), plot=True): starcen_files = frames_info[frames_info['DPR TYPE'] == 'OBJECT,CENTER'] if len(starcen_files) != 0: for file, idx in starcen_files.index: - self._logger.info(' ==> OBJECT,CENTER: {0}'.format(file)) + self._logger.info(' * OBJECT,CENTER: {0}'.format(file)) # read data fname = '{0}_DIT{1:03d}_preproc'.format(file, idx) @@ -1425,7 +1427,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a # read and combine files for file_idx, (file, idx) in enumerate(flux_files.index): - self._logger.info(' ==> file {0}/{1}: {2}, DIT #{3}'.format(file_idx+1, len(flux_files), file, idx)) + self._logger.info(' ==> file {0}/{1}: {2}, DIT #{3}'.format(file_idx+1, len(flux_files), file, idx)) # read data fname = '{0}_DIT{1:03d}_preproc'.format(file, idx) @@ -1508,7 +1510,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a # read and combine files for file_idx, (file, idx) in enumerate(starcen_files.index): - self._logger.info(' ==> file {0}/{1}: {2}, DIT #{3}'.format(file_idx+1, len(starcen_files), file, idx)) + self._logger.info(' ==> file {0}/{1}: {2}, DIT #{3}'.format(file_idx+1, len(starcen_files), file, idx)) # read data fname = '{0}_DIT{1:03d}_preproc'.format(file, idx) @@ -1628,7 +1630,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a # read and combine files for file_idx, (file, idx) in enumerate(object_files.index): - self._logger.info(' ==> file {0}/{1}: {2}, DIT #{3}'.format(file_idx+1, len(object_files), file, idx)) + self._logger.info(' ==> file {0}/{1}: {2}, DIT #{3}'.format(file_idx+1, len(object_files), file, idx)) # read data fname = '{0}_DIT{1:03d}_preproc'.format(file, idx) @@ -1733,11 +1735,11 @@ def sph_ird_clean(self, delete_raw=False, delete_products=False): # raw if delete_raw: if path.raw.exists(): - self._logger.warning(' ==> delete raw files') + self._logger.warning(' ==> delete raw files') shutil.rmtree(path.raw, ignore_errors=True) # products if delete_products: if path.products.exists(): - self._logger.warning(' ==> delete products') + self._logger.warning(' ==> delete products') shutil.rmtree(path.products, ignore_errors=True) diff --git a/vltpf/IRDIS/SpectroReduction.py b/vltpf/IRDIS/SpectroReduction.py index 0984d0b..dddc80c 100644 --- a/vltpf/IRDIS/SpectroReduction.py +++ b/vltpf/IRDIS/SpectroReduction.py @@ -526,7 +526,7 @@ def sort_files(self): if len(files) == 0: raise ValueError('No raw FITS files in reduction path') - self._logger.info(' * found {0} FITS files in {1}'.format(len(files), path.raw)) + self._logger.info(' * found {0} raw FITS files'.format(len(files))) # read list of keywords keywords = [] @@ -778,9 +778,11 @@ def check_files_association(self): self._logger.warning(' * there is no sky background for science files with DIT={0} sec. Using a sky background instead of an internal instrumental background can usually provide a cleaner data reduction'.format(DIT)) # error reporting - self._logger.info('There are {0} warning(s) and {1} error(s) in the classification of files'.format(warning_flag, error_flag)) if error_flag: + self._logger.error('There are {0} warning(s) and {1} error(s) in the classification of files'.format(warning_flag, error_flag)) raise ValueError('There is {0} errors that should be solved before proceeding'.format(error_flag)) + else: + self._logger.warning('There are {0} warning(s) and {1} error(s) in the classification of files'.format(warning_flag, error_flag)) # save files_info.to_csv(path.preproc / 'files.csv') @@ -1415,7 +1417,7 @@ def sph_ird_star_center(self, high_pass=False, plot=True): flux_files = frames_info[frames_info['DPR TYPE'] == 'OBJECT,FLUX'] if len(flux_files) != 0: for file, idx in flux_files.index: - self._logger.info(' ==> OBJECT,FLUX: {0}'.format(file)) + self._logger.info(' * OBJECT,FLUX: {0}'.format(file)) # read data fname = '{0}_DIT{1:03d}_preproc'.format(file, idx) @@ -1439,7 +1441,7 @@ def sph_ird_star_center(self, high_pass=False, plot=True): starsci_files = frames_info[(frames_info['DPR TYPE'] == 'OBJECT') & (frames_info['DET SEQ1 DIT'].round(2) == DIT)] for file, idx in starcen_files.index: - self._logger.info(' ==> OBJECT,CENTER: {0}'.format(file)) + self._logger.info(' * OBJECT,CENTER: {0}'.format(file)) # read center data fname = '{0}_DIT{1:03d}_preproc'.format(file, idx) @@ -1531,7 +1533,7 @@ def sph_ird_wavelength_recalibration(self, fit_scaling=True, plot=True): # get spot distance from the first OBJECT,CENTER in the sequence starcen_files = frames_info[frames_info['DPR TYPE'] == 'OBJECT,CENTER'] if len(starcen_files) == 0: - self._logger.info(' ==> no OBJECT,CENTER file in the data set. Wavelength cannot be recalibrated. The standard wavelength calibrated by the ESO pripeline will be used.') + self._logger.info(' ==> no OBJECT,CENTER file in the data set. Wavelength cannot be recalibrated. The standard wavelength calibrated by the ESO pripeline will be used.') return fname = '{0}_DIT{1:03d}_preproc_spot_distance'.format(starcen_files.index.values[0][0], starcen_files.index.values[0][1]) @@ -1543,7 +1545,7 @@ def sph_ird_wavelength_recalibration(self, fit_scaling=True, plot=True): pix = np.arange(1024) wave_final = np.zeros((1024, 2)) for fidx in range(2): - self._logger.info(' field {0:2d}/{1:2d}'.format(fidx+1, 2)) + self._logger.info(' ==> field {0:2d}/{1:2d}'.format(fidx+1, 2)) wave = wave_lin[fidx] dist = spot_dist[:, fidx] @@ -2008,7 +2010,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m # make sure we have only integers if user wants coarse centering if coarse_centering: centers = centers.astype(np.int) - + # final center if cpix: cc = science_dim // 2 @@ -2120,12 +2122,12 @@ def sph_ird_clean(self, delete_raw=False, delete_products=False): # raw if delete_raw: if path.raw.exists(): - self._logger.warning(' ==> delete raw files') + self._logger.warning(' ==> delete raw files') shutil.rmtree(path.raw, ignore_errors=True) # products if delete_products: if path.products.exists(): - self._logger.warning(' ==> delete products') + self._logger.warning(' ==> delete products') shutil.rmtree(path.products, ignore_errors=True) diff --git a/vltpf/toolbox.py b/vltpf/toolbox.py index a5862c7..7b6969b 100644 --- a/vltpf/toolbox.py +++ b/vltpf/toolbox.py @@ -440,7 +440,7 @@ def star_centers_from_PSF_img_cube(cube, wave, pixel, save_path=None, logger=_lo # loop over images img_centers = np.zeros((nwave, 2)) for idx, (wave, img) in enumerate(zip(wave, cube)): - logger.info(' wave {0:2d}/{1:2d} ({2:.0f} nm)'.format(idx+1, nwave, wave)) + logger.info(' ==> wave {0:2d}/{1:2d} ({2:.0f} nm)'.format(idx+1, nwave, wave)) # remove any NaN img = np.nan_to_num(img) @@ -529,7 +529,7 @@ def star_centers_from_PSF_lss_cube(cube, wave_cube, pixel, save_path=None, logge nimg = len(cube) psf_centers = np.full((1024, nimg), np.nan) for fidx, img in enumerate(cube): - logger.info(' field {0:2d}/{1:2d}'.format(fidx+1, nimg)) + logger.info(' ==> field {0:2d}/{1:2d}'.format(fidx+1, nimg)) # remove any NaN img = np.nan_to_num(cube[fidx]) @@ -678,7 +678,7 @@ def star_centers_from_waffle_img_cube(cube_cen, wave, waffle_orientation, center spot_dist = np.zeros((nwave, 6)) img_centers = np.zeros((nwave, 2)) for idx, (wave, img) in enumerate(zip(wave, cube_cen)): - logger.info(' wave {0:2d}/{1:2d} ({2:.0f} nm)'.format(idx+1, nwave, wave)) + logger.info(' ==> wave {0:2d}/{1:2d} ({2:.0f} nm)'.format(idx+1, nwave, wave)) # remove any NaN img = np.nan_to_num(img) @@ -866,14 +866,14 @@ def star_centers_from_waffle_lss_cube(cube_cen, cube_sci, wave_cube, center_gues # subtract science cube if provided if cube_sci is not None: - logger.info(' ==> subtract science cube') + logger.info(' ==> subtract science cube') cube_cen -= cube_sci spot_centers = np.full((1024, 2, 2), np.nan) spot_dist = np.full((1024, nimg), np.nan) img_centers = np.full((1024, nimg), np.nan) for fidx, img in enumerate(cube_cen): - logger.info(' field {0:2d}/{1:2d}'.format(fidx+1, nimg)) + logger.info(' ==> field {0:2d}/{1:2d}'.format(fidx+1, nimg)) # remove any NaN img = np.nan_to_num(cube_cen[fidx]) From 0edc2813e02012d9106bd33c0387148fb57d1632 Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Tue, 10 Sep 2019 12:24:18 +0200 Subject: [PATCH 069/101] Add debug log messages in IRDIS imaging Ticket #63 --- examples/irdis_imaging_reduction.py | 4 +- vltpf/IRDIS/ImagingReduction.py | 123 +++++++++++++++++++++++++--- vltpf/toolbox.py | 57 +++++++++++-- 3 files changed, 163 insertions(+), 21 deletions(-) diff --git a/examples/irdis_imaging_reduction.py b/examples/irdis_imaging_reduction.py index 97ac150..b05c94e 100644 --- a/examples/irdis_imaging_reduction.py +++ b/examples/irdis_imaging_reduction.py @@ -5,7 +5,7 @@ # #%% init reduction -reduction = IRDIS.ImagingReduction('/Users/avigan/data/VLTPF-test-target/IRD/DBI/') +reduction = IRDIS.ImagingReduction('/Users/avigan/data/VLTPF-test-target/IRD/DBI/', log_level='debug') #%% configuration reduction.config['combine_psf_dim'] = 80 @@ -23,7 +23,7 @@ # #%% init reduction -reduction = IRDIS.ImagingReduction('/Users/avigan/data/VLTPF-test-target/IRD/DBI/') +reduction = IRDIS.ImagingReduction('/Users/avigan/data/VLTPF-test-target/IRD/DBI/', log_level='debug') #%% sorting reduction.sort_files() diff --git a/vltpf/IRDIS/ImagingReduction.py b/vltpf/IRDIS/ImagingReduction.py index fcc379b..0f13157 100644 --- a/vltpf/IRDIS/ImagingReduction.py +++ b/vltpf/IRDIS/ImagingReduction.py @@ -96,7 +96,7 @@ def __init__(self, path, log_level='info'): configfile = Path(vltpf.__file__).parent / 'instruments' / '{}.ini'.format(self._instrument) config = configparser.ConfigParser() try: - self._logger.debug('Read configuration') + self._logger.debug('> read default configuration') config.read(configfile) # instrument @@ -365,6 +365,8 @@ def read_info(self): # files info fname = path.preproc / 'files.csv' if fname.exists(): + self._logger.debug('> read files.csv') + files_info = pd.read_csv(fname, index_col=0) # convert times @@ -386,6 +388,8 @@ def read_info(self): fname = path.preproc / 'frames.csv' if fname.exists(): + self._logger.debug('> read frames.csv') + frames_info = pd.read_csv(fname, index_col=(0, 1)) # convert times @@ -403,6 +407,8 @@ def read_info(self): fname = path.preproc / 'frames_preproc.csv' if fname.exists(): + self._logger.debug('> read frames_preproc.csv') + frames_info_preproc = pd.read_csv(fname, index_col=(0, 1)) # convert times @@ -429,6 +435,7 @@ def read_info(self): file = list(path.preproc.glob('{}.fits'.format(fname))) done = done and (len(file) == 1) self._recipe_execution['sph_ird_preprocess_science'] = done + self._logger.debug('> sph_ird_preprocess_science status = {}'.format(done)) done = True files = frames_info_preproc[(frames_info_preproc['DPR TYPE'] == 'OBJECT,FLUX') | @@ -438,6 +445,7 @@ def read_info(self): file = list(path.preproc.glob('{}.fits'.format(fname))) done = done and (len(file) == 1) self._recipe_execution['sph_ird_star_center'] = done + self._logger.debug('> sph_ird_star_center status = {}'.format(done)) def sort_files(self): @@ -463,6 +471,7 @@ def sort_files(self): self._logger.info(' * found {0} raw FITS files'.format(len(files))) # read list of keywords + self._logger.debug('> read keyword list') keywords = [] file = open(Path(vltpf.__file__).parent / 'instruments' / 'keywords.dat', 'r') for line in file: @@ -473,6 +482,7 @@ def sort_files(self): file.close() # short keywords + self._logger.debug('> translate into short keywords') keywords_short = keywords.copy() for idx in range(len(keywords_short)): key = keywords_short[idx] @@ -480,8 +490,10 @@ def sort_files(self): keywords_short[idx] = key[13:] # files table + self._logger.debug('> create files_info data frame') files_info = pd.DataFrame(index=pd.Index(files, name='FILE'), columns=keywords_short, dtype='float') + self._logger.debug('> read FITS keywords') for f in files: hdu = fits.open(path.raw / '{}.fits'.format(f)) hdr = hdu[0].header @@ -492,6 +504,7 @@ def sort_files(self): hdu.close() # drop files that are not handled, based on DPR keywords + self._logger.debug('> drop unsupported file types') files_info.dropna(subset=['DPR TYPE'], inplace=True) files_info = files_info[(files_info['DPR CATG'] != 'ACQUISITION') & (files_info['DPR TYPE'] != 'OBJECT,AO')] @@ -505,6 +518,7 @@ def sort_files(self): files_info.insert(len(files_info.columns), 'PRO CATG', ' ') # convert times + self._logger.debug('> convert times') files_info['DATE-OBS'] = pd.to_datetime(files_info['DATE-OBS'], utc=False) files_info['DATE'] = pd.to_datetime(files_info['DATE'], utc=False) files_info['DET FRAM UTC'] = pd.to_datetime(files_info['DET FRAM UTC'], utc=False) @@ -516,10 +530,12 @@ def sort_files(self): files_info.sort_values(by='DATE-OBS', inplace=True) # save files_info + self._logger.debug('> save files.csv') files_info.to_csv(path.preproc / 'files.csv') self._files_info = files_info # update recipe execution + self._logger.debug('> update recipe execution') self._recipe_execution['sort_files'] = True @@ -535,7 +551,8 @@ def sort_frames(self): self._logger.info('Extract frames information') # check if recipe can be executed - toolbox.check_recipe_execution(self._recipe_execution, 'sort_frames', self.recipe_requirements) + toolbox.check_recipe_execution(self._recipe_execution, 'sort_frames', self.recipe_requirements, + logger=self._logger) # parameters path = self._path @@ -558,27 +575,31 @@ def sort_frames(self): img.extend(list(np.arange(NDIT))) # create new dataframe + self._logger.debug('> create and fill frames_info data frame') frames_info = pd.DataFrame(columns=sci_files.columns, index=pd.MultiIndex.from_arrays([files, img], names=['FILE', 'IMG'])) # expand files_info into frames_info frames_info = frames_info.align(files_info, level=0)[1] # compute timestamps - toolbox.compute_times(frames_info) + toolbox.compute_times(frames_info, logger=self._logger) # compute angles (ra, dec, parang) - toolbox.compute_angles(frames_info) + toolbox.compute_angles(frames_info, logger=self._logger) # save + self._logger.debug('> save frames.csv') frames_info.to_csv(path.preproc / 'frames.csv') self._frames_info = frames_info # update recipe execution + self._logger.debug('> update recipe execution') self._recipe_execution['sort_frames'] = True # # print some info # + self._logger.debug('> print observation info') cinfo = frames_info[frames_info['DPR TYPE'] == 'OBJECT'] if len(cinfo) == 0: cinfo = frames_info[frames_info['DPR TYPE'] == 'OBJECT,CENTER'] @@ -629,7 +650,8 @@ def check_files_association(self): ''' # check if recipe can be executed - toolbox.check_recipe_execution(self._recipe_execution, 'check_files_association', self.recipe_requirements) + toolbox.check_recipe_execution(self._recipe_execution, 'check_files_association', self.recipe_requirements, + logger=self._logger) self._logger.info('File association for calibrations') @@ -653,6 +675,7 @@ def check_files_association(self): # specific data frame for calibrations # keep static calibrations and sky backgrounds + self._logger.debug('> select calib files') calibs = files_info[(files_info['DPR CATG'] == 'CALIB') | ((files_info['DPR CATG'] == 'SCIENCE') & (files_info['DPR TYPE'] == 'SKY'))] @@ -663,6 +686,7 @@ def check_files_association(self): warning_flag = 0 # flat + self._logger.debug('> check instrument flat requirements') cfiles = calibs[(calibs['DPR TYPE'] == 'FLAT,LAMP') & (calibs['INS COMB IFLT'] == filter_comb)] if len(cfiles) <= 1: error_flag += 1 @@ -672,10 +696,12 @@ def check_files_association(self): # static calibrations that depend on science DIT ################################################## + self._logger.debug('> select science files') obj = files_info.loc[files_info['DPR CATG'] == 'SCIENCE', 'DPR TYPE'].apply(lambda s: s[0:6]) DITs = files_info.loc[(files_info['DPR CATG'] == 'SCIENCE') & (obj == 'OBJECT'), 'DET SEQ1 DIT'].unique().round(2) # handle darks in a slightly different way because there might be several different DITs + self._logger.debug('> check dark/background requirements') for DIT in DITs: # instrumental backgrounds cfiles = calibs[((calibs['DPR TYPE'] == 'DARK') | (calibs['DPR TYPE'] == 'DARK,BACKGROUND')) & @@ -691,11 +717,16 @@ def check_files_association(self): self._logger.warning(' * there is no sky background for science files with DIT={0} sec. Using a sky background instead of an internal instrumental background can usually provide a cleaner data reduction, especially in K-band'.format(DIT)) # error reporting + self._logger.debug('> report status') if error_flag: self._logger.error('There are {0} warning(s) and {1} error(s) in the classification of files'.format(warning_flag, error_flag)) raise ValueError('There is {0} errors that should be solved before proceeding'.format(error_flag)) else: self._logger.warning('There are {0} warning(s) and {1} error(s) in the classification of files'.format(warning_flag, error_flag)) + + # update recipe execution + self._logger.debug('> update recipe execution') + self._recipe_execution['check_files_association'] = True def sph_ird_cal_dark(self, silent=True): @@ -709,7 +740,8 @@ def sph_ird_cal_dark(self, silent=True): ''' # check if recipe can be executed - toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_cal_dark', self.recipe_requirements) + toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_cal_dark', self.recipe_requirements, + logger=self._logger) self._logger.info('Darks and backgrounds') @@ -742,6 +774,7 @@ def sph_ird_cal_dark(self, silent=True): self._logger.info(' * {0} in filter {1} with DIT={2:.2f} sec ({3} files)'.format(ctype, cfilt, DIT, len(cfiles))) # create sof + self._logger.debug('> create sof file') sof = path.sof / 'dark_filt={0}_DIT={1:.2f}.sof'.format(cfilt, DIT) file = open(sof, 'w') for f in files: @@ -778,6 +811,7 @@ def sph_ird_cal_dark(self, silent=True): raise NameError('esorex does not appear to be in your PATH. Please make sure that the ESO pipeline is properly installed before running VLTPF.') # execute esorex + self._logger.debug('> execute esorex') if silent: proc = subprocess.run(args, cwd=path.tmp, stdout=subprocess.DEVNULL) else: @@ -787,6 +821,7 @@ def sph_ird_cal_dark(self, silent=True): raise ValueError('esorex process was not successful') # store products + self._logger.debug('> update files_info data frame') files_info.loc[dark_file, 'DPR CATG'] = cfiles['DPR CATG'][0] files_info.loc[dark_file, 'DPR TYPE'] = cfiles['DPR TYPE'][0] files_info.loc[dark_file, 'INS COMB IFLT'] = cfiles['INS COMB IFLT'][0] @@ -809,9 +844,11 @@ def sph_ird_cal_dark(self, silent=True): files_info.loc[bpm_file, 'PRO CATG'] = 'IRD_STATIC_BADPIXELMAP' # save + self._logger.debug('> save files.csv') files_info.to_csv(path.preproc / 'files.csv') # update recipe execution + self._logger.debug('> update recipe execution') self._recipe_execution['sph_ird_cal_dark'] = True @@ -826,7 +863,8 @@ def sph_ird_cal_detector_flat(self, silent=True): ''' # check if recipe can be executed - toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_cal_detector_flat', self.recipe_requirements) + toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_cal_detector_flat', self.recipe_requirements, + logger=self._logger) self._logger.info('Instrument flats') @@ -847,6 +885,7 @@ def sph_ird_cal_detector_flat(self, silent=True): self._logger.info(' * filter {0} ({1} files)'.format(cfilt, len(cfiles))) # create sof + self._logger.debug('> create sof file') sof = path.sof / 'flat_filt={0}.sof'.format(cfilt) file = open(sof, 'w') for f in files: @@ -872,6 +911,7 @@ def sph_ird_cal_detector_flat(self, silent=True): raise NameError('esorex does not appear to be in your PATH. Please make sure that the ESO pipeline is properly installed before running VLTPF.') # execute esorex + self._logger.debug('> execute esorex') if silent: proc = subprocess.run(args, cwd=path.tmp, stdout=subprocess.DEVNULL) else: @@ -881,6 +921,7 @@ def sph_ird_cal_detector_flat(self, silent=True): raise ValueError('esorex process was not successful') # store products + self._logger.debug('> update files_info data frame') files_info.loc[flat_file, 'DPR CATG'] = cfiles['DPR CATG'][0] files_info.loc[flat_file, 'DPR TYPE'] = cfiles['DPR TYPE'][0] files_info.loc[flat_file, 'INS COMB IFLT'] = cfiles['INS COMB IFLT'][0] @@ -903,9 +944,11 @@ def sph_ird_cal_detector_flat(self, silent=True): files_info.loc[bpm_file, 'PRO CATG'] = 'IRD_NON_LINEAR_BADPIXELMAP' # save + self._logger.debug('> save files.csv') files_info.to_csv(path.preproc / 'files.csv') # update recipe execution + self._logger.debug('> update recipe execution') self._recipe_execution['sph_ird_cal_detector_flat'] = True @@ -963,7 +1006,8 @@ def sph_ird_preprocess_science(self, ''' # check if recipe can be executed - toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_preprocess_science', self.recipe_requirements) + toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_preprocess_science', self.recipe_requirements, + logger=self._logger) self._logger.info('Pre-process science files') @@ -973,6 +1017,7 @@ def sph_ird_preprocess_science(self, frames_info = self._frames_info # clean before we start + self._logger.debug('> remove old preproc files') files = path.preproc.glob('*_DIT???_preproc.fits') for file in files: file.unlink() @@ -986,7 +1031,7 @@ def sph_ird_preprocess_science(self, (files_info['PRO CATG'] == 'IRD_NON_LINEAR_BADPIXELMAP')].index bpm_files = [path.calib / '{}.fits'.format(f) for f in bpm_files] - bpm = toolbox.compute_bad_pixel_map(bpm_files) + bpm = toolbox.compute_bad_pixel_map(bpm_files, logger=self._logger) # mask dead regions bpm[:15, :] = 0 @@ -1003,6 +1048,7 @@ def sph_ird_preprocess_science(self, flat = fits.getdata(path.calib / '{}.fits'.format(flat_file.index[0])) # final dataframe + self._logger.debug('> create frames_info_preproc data frame') index = pd.MultiIndex(names=['FILE', 'IMG'], levels=[[], []], codes=[[], []]) frames_info_preproc = pd.DataFrame(index=index, columns=frames_info.columns) @@ -1151,6 +1197,7 @@ def sph_ird_preprocess_science(self, img = nimg # save DITs individually + self._logger.debug('> save pre-processed images') for f in range(len(img)): frame = nimg[f, ...].squeeze() hdr['HIERARCH ESO DET NDIT'] = 1 @@ -1158,12 +1205,14 @@ def sph_ird_preprocess_science(self, overwrite=True, output_verify='silentfix') # sort and save final dataframe + self._logger.debug('> save frames_preproc.csv') frames_info_preproc.sort_values(by='TIME', inplace=True) frames_info_preproc.to_csv(path.preproc / 'frames_preproc.csv') self._frames_info_preproc = frames_info_preproc # update recipe execution + self._logger.debug('> update recipe execution') self._recipe_execution['sph_ird_preprocess_science'] = True @@ -1188,7 +1237,8 @@ def sph_ird_star_center(self, high_pass=False, offset=(0, 0), plot=True): ''' # check if recipe can be executed - toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_star_center', self.recipe_requirements) + toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_star_center', self.recipe_requirements, + logger=self._logger) self._logger.info('Star centers determination') @@ -1211,6 +1261,7 @@ def sph_ird_star_center(self, high_pass=False, offset=(0, 0), plot=True): self._logger.info(' * OBJECT,FLUX: {0}'.format(file)) # read data + self._logger.debug('> read data') fname = '{0}_DIT{1:03d}_preproc'.format(file, idx) cube, hdr = fits.getdata(path.preproc / '{}.fits'.format(fname), header=True) @@ -1232,6 +1283,7 @@ def sph_ird_star_center(self, high_pass=False, offset=(0, 0), plot=True): self._logger.info(' * OBJECT,CENTER: {0}'.format(file)) # read data + self._logger.debug('> read data') fname = '{0}_DIT{1:03d}_preproc'.format(file, idx) cube, hdr = fits.getdata(path.preproc / '{}.fits'.format(fname), header=True) @@ -1258,6 +1310,7 @@ def sph_ird_star_center(self, high_pass=False, offset=(0, 0), plot=True): fits.writeto(path.preproc / '{}_centers.fits'.format(fname), img_center, overwrite=True) # update recipe execution + self._logger.debug('> update recipe execution') self._recipe_execution['sph_ird_star_center'] = True @@ -1360,7 +1413,8 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a ''' # check if recipe can be executed - toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_combine_data', self.recipe_requirements) + toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_combine_data', self.recipe_requirements, + logger=self._logger) self._logger.info('Combine science data') @@ -1374,6 +1428,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a wave, bandwidth = transmission.wavelength_bandwidth_filter(filter_comb) wave = np.array(wave) + self._logger.debug('> save final wavelength') fits.writeto(path.products / 'wavelength.fits', wave, overwrite=True) # max images size @@ -1430,9 +1485,11 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a self._logger.info(' ==> file {0}/{1}: {2}, DIT #{3}'.format(file_idx+1, len(flux_files), file, idx)) # read data + self._logger.debug('> read data') fname = '{0}_DIT{1:03d}_preproc'.format(file, idx) cube = fits.getdata(path.preproc / '{}.fits'.format(fname)) + self._logger.debug('> read centers') cfile = path.preproc / '{}_centers.fits'.format(fname) if cfile.exists(): centers = fits.getdata(cfile) @@ -1441,22 +1498,26 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a centers = self._default_center # make sure we have only integers if user wants coarse centering - if coarse_centering: + if coarse_centering: centers = centers.astype(np.int) # neutral density + self._logger.debug('> read neutral density information') ND = frames_info.loc[(file, idx), 'INS4 FILT2 NAME'] w, attenuation = transmission.transmission_nd(ND, wave=wave) # DIT, angles, etc + self._logger.debug('> read angles') DIT = frames_info.loc[(file, idx), 'DET SEQ1 DIT'] psf_parang[file_idx] = frames_info.loc[(file, idx), 'PARANG'] psf_derot[file_idx] = frames_info.loc[(file, idx), 'DEROT ANGLE'] # center frames for wave_idx, img in enumerate(cube): + self._logger.debug('> wave {}'.format(wave_idx)) cx, cy = centers[wave_idx, :] + self._logger.debug('> shift and normalize') img = img.astype(np.float) nimg = imutils.shift(img, (cc-cx, cc-cy), method=shift_method) nimg = nimg / DIT / attenuation[wave_idx] @@ -1465,24 +1526,29 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a # correct anamorphism if correct_anamorphism: + self._logger.debug('> correct anamorphism') nimg = psf_cube[wave_idx, file_idx] nimg = imutils.scale(nimg, (1.0000, 1.0062), method='interp') psf_cube[wave_idx, file_idx] = nimg # wavelength-scaled version if save_scaled: + self._logger.debug('> spatial scaling') nimg = psf_cube[wave_idx, file_idx] psf_cube_scaled[wave_idx, file_idx] = imutils.scale(nimg, wave[0]/wave[wave_idx], method=shift_method) # save final cubes + self._logger.debug('> save final cubes and metadata') flux_files.to_csv(path.products / 'psf_frames.csv') fits.writeto(path.products / 'psf_cube.fits', psf_cube, overwrite=True) fits.writeto(path.products / 'psf_parang.fits', psf_parang, overwrite=True) fits.writeto(path.products / 'psf_derot.fits', psf_derot, overwrite=True) if save_scaled: + self._logger.debug('> save scaled cubes') fits.writeto(path.products / 'psf_cube_scaled.fits', psf_cube_scaled, overwrite=True) # delete big cubes + self._logger.debug('> free memory') del psf_cube if save_scaled: del psf_cube_scaled @@ -1513,10 +1579,12 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a self._logger.info(' ==> file {0}/{1}: {2}, DIT #{3}'.format(file_idx+1, len(starcen_files), file, idx)) # read data + self._logger.debug('> read data') fname = '{0}_DIT{1:03d}_preproc'.format(file, idx) cube = fits.getdata(path.preproc / '{}.fits'.format(fname)) # use manual center if explicitely requested + self._logger.debug('> read centers') if manual_center is not None: centers = manual_center else: @@ -1528,18 +1596,22 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a centers = centers.astype(np.int) # neutral density + self._logger.debug('> read neutral density information') ND = frames_info.loc[(file, idx), 'INS4 FILT2 NAME'] w, attenuation = transmission.transmission_nd(ND, wave=wave) # DIT, angles, etc + self._logger.debug('> read angles') DIT = frames_info.loc[(file, idx), 'DET SEQ1 DIT'] cen_parang[file_idx] = frames_info.loc[(file, idx), 'PARANG'] cen_derot[file_idx] = frames_info.loc[(file, idx), 'DEROT ANGLE'] # center frames for wave_idx, img in enumerate(cube): + self._logger.debug('> wave {}'.format(wave_idx)) cx, cy = centers[wave_idx, :] + self._logger.debug('> shift and normalize') img = img.astype(np.float) nimg = imutils.shift(img, (cc-cx, cc-cy), method=shift_method) nimg = nimg / DIT / attenuation[wave_idx] @@ -1548,24 +1620,29 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a # correct anamorphism if correct_anamorphism: + self._logger.debug('> correct anamorphism') nimg = cen_cube[wave_idx, file_idx] nimg = imutils.scale(nimg, (1.0000, 1.0062), method='interp') cen_cube[wave_idx, file_idx] = nimg # wavelength-scaled version if save_scaled: + self._logger.debug('> spatial scaling') nimg = cen_cube[wave_idx, file_idx] cen_cube_scaled[wave_idx, file_idx] = imutils.scale(nimg, wave[0]/wave[wave_idx], method=shift_method) # save final cubes + self._logger.debug('> save final cubes and metadata') starcen_files.to_csv(path.products / 'starcenter_frames.csv') fits.writeto(path.products / 'starcenter_cube.fits', cen_cube, overwrite=True) fits.writeto(path.products / 'starcenter_parang.fits', cen_parang, overwrite=True) fits.writeto(path.products / 'starcenter_derot.fits', cen_derot, overwrite=True) if save_scaled: + self._logger.debug('> save scaled cubes') fits.writeto(path.products / 'starcenter_cube_scaled.fits', cen_cube_scaled, overwrite=True) # delete big cubes + self._logger.debug('> free memory') del cen_cube if save_scaled: del cen_cube_scaled @@ -1583,6 +1660,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a dms_dy_ref = 0 # use manual center if explicitely requested + self._logger.debug('> read centers') if manual_center is not None: centers = manual_center else: @@ -1633,21 +1711,25 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a self._logger.info(' ==> file {0}/{1}: {2}, DIT #{3}'.format(file_idx+1, len(object_files), file, idx)) # read data + self._logger.debug('> read data') fname = '{0}_DIT{1:03d}_preproc'.format(file, idx) files = list(path.preproc.glob('{}*.fits'.format(fname))) cube = fits.getdata(files[0]) # neutral density + self._logger.debug('> read neutral density information') ND = frames_info.loc[(file, idx), 'INS4 FILT2 NAME'] w, attenuation = transmission.transmission_nd(ND, wave=wave) # DIT, angles, etc + self._logger.debug('> read angles') DIT = frames_info.loc[(file, idx), 'DET SEQ1 DIT'] sci_parang[file_idx] = frames_info.loc[(file, idx), 'PARANG'] sci_derot[file_idx] = frames_info.loc[(file, idx), 'DEROT ANGLE'] # Dithering Motion Stage for star center: value is in micron, # and the pixel size is 18 micron + self._logger.debug('> read DMS position') dms_dx = frames_info.loc[(file, idx), 'INS1 PAC X'] / 18 dms_dy = frames_info.loc[(file, idx), 'INS1 PAC Y'] / 18 @@ -1658,12 +1740,14 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a # center frames for wave_idx, img in enumerate(cube): + self._logger.debug('> wave {}'.format(wave_idx)) cx, cy = centers[wave_idx, :] # DMS contribution cx = cx + dms_dx_ref + dms_dx cy = cy + dms_dy_ref + dms_dy + self._logger.debug('> shift and normalize') img = img.astype(np.float) nimg = imutils.shift(img, (cc-cx, cc-cy), method=shift_method) nimg = nimg / DIT / attenuation[wave_idx] @@ -1672,16 +1756,19 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a # correct anamorphism if correct_anamorphism: + self._logger.debug('> correct anamorphism') nimg = sci_cube[wave_idx, file_idx] nimg = imutils.scale(nimg, (1.0000, 1.0062), method='interp') sci_cube[wave_idx, file_idx] = nimg # wavelength-scaled version if save_scaled: + self._logger.debug('> spatial scaling') nimg = sci_cube[wave_idx, file_idx] sci_cube_scaled[wave_idx, file_idx] = imutils.scale(nimg, wave[0]/wave[wave_idx], method=shift_method) # save final cubes + self._logger.debug('> save final cubes and metadata') object_files.to_csv(path.products / 'science_frames.csv') fits.writeto(path.products / 'science_cube.fits', sci_cube, overwrite=True) fits.writeto(path.products / 'science_parang.fits', sci_parang, overwrite=True) @@ -1690,11 +1777,13 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a fits.writeto(path.products / 'science_cube_scaled.fits', sci_cube_scaled, overwrite=True) # delete big cubes + self._logger.debug('> free memory') del sci_cube if save_scaled: del sci_cube_scaled # update recipe execution + self._logger.debug('> update recipe execution') self._recipe_execution['sph_ird_combine_data'] = True @@ -1718,28 +1807,38 @@ def sph_ird_clean(self, delete_raw=False, delete_products=False): # tmp if path.tmp.exists(): + self._logger.debug('> remove {}'.format(path.tmp)) shutil.rmtree(path.tmp, ignore_errors=True) # sof if path.sof.exists(): + self._logger.debug('> remove {}'.format(path.sof)) shutil.rmtree(path.sof, ignore_errors=True) # calib if path.calib.exists(): + self._logger.debug('> remove {}'.format(path.calib)) shutil.rmtree(path.calib, ignore_errors=True) # preproc if path.preproc.exists(): + self._logger.debug('> remove {}'.format(path.preproc)) shutil.rmtree(path.preproc, ignore_errors=True) # raw if delete_raw: if path.raw.exists(): + self._logger.debug('> remove {}'.format(path.raw)) self._logger.warning(' ==> delete raw files') shutil.rmtree(path.raw, ignore_errors=True) # products if delete_products: if path.products.exists(): + self._logger.debug('> remove {}'.format(path.products)) self._logger.warning(' ==> delete products') shutil.rmtree(path.products, ignore_errors=True) + + # update recipe execution + self._logger.debug('> update recipe execution') + self._recipe_execution['sph_ird_clean'] = True diff --git a/vltpf/toolbox.py b/vltpf/toolbox.py index 7b6969b..74d3728 100644 --- a/vltpf/toolbox.py +++ b/vltpf/toolbox.py @@ -36,6 +36,9 @@ def check_recipe_execution(recipe_execution, recipe_name, recipe_requirements, l recipe_requirements : dict Dictionary providing the recipe requirements + logger : logHandler object + Log handler for the reduction. Default is root logger + Returns ------- execute_recipe : bool @@ -53,6 +56,8 @@ def check_recipe_execution(recipe_execution, recipe_name, recipe_requirements, l if not execute_recipe: raise ValueError('{0} cannot executed because some files have been removed from the reduction directory or the following recipes have not been executed: {0}. '.format(recipe_name, missing)) + logger.debug('> execution requirements check for {}: {}'.format(recipe_name, execute_recipe)) + return execute_recipe @@ -85,7 +90,7 @@ def parallatic_angle(ha, dec, geolat): return np.degrees(pa) -def compute_times(frames_info): +def compute_times(frames_info, logger=_log): ''' Compute the various timestamps associated to frames @@ -93,8 +98,14 @@ def compute_times(frames_info): ---------- frames_info : dataframe The data frame with all the information on science frames + + logger : logHandler object + Log handler for the reduction. Default is root logger + ''' + logger.debug('> compute time stamps') + # get necessary values time_start = frames_info['DATE-OBS'].values time_end = frames_info['DET FRAM UTC'].values @@ -131,7 +142,7 @@ def compute_times(frames_info): frames_info['MJD END'] = mjd_end -def compute_angles(frames_info): +def compute_angles(frames_info, logger=_log): ''' Compute the various angles associated to frames: RA, DEC, parang, pupil offset, final derotation angle @@ -140,8 +151,14 @@ def compute_angles(frames_info): ---------- frames_info : dataframe The data frame with all the information on science frames + + logger : logHandler object + Log handler for the reduction. Default is root logger + ''' + logger.debug('> compute angles') + # derotator drift check and correction date_fix = Time('2016-07-12') if np.any(frames_info['MJD'].values <= date_fix.mjd): @@ -236,7 +253,7 @@ def compute_angles(frames_info): frames_info['DEROT ANGLE'] = frames_info['PARANG'] + pupoff -def compute_bad_pixel_map(bpm_files, dtype=np.uint8): +def compute_bad_pixel_map(bpm_files, dtype=np.uint8, logger=_log): ''' Compute a combined bad pixel map provided a list of files @@ -248,7 +265,11 @@ def compute_bad_pixel_map(bpm_files, dtype=np.uint8): dtype : data type Data type for the final bpm + logger : logHandler object + Log handler for the reduction. Default is root logger + Returns + ------- bpm : array_like Combined bad pixel map ''' @@ -257,6 +278,8 @@ def compute_bad_pixel_map(bpm_files, dtype=np.uint8): if len(bpm_files) == 0: raise ValueError('No bad pixel map files provided') + logger.debug('> compute master bad pixel map from {} files'.format(len(bpm_files))) + # get shape shape = fits.getdata(bpm_files[0]).shape @@ -293,6 +316,9 @@ def collapse_frames_info(finfo, fname, collapse_type, coadd_value=2, logger=_log Number of consecutive frames to be coadded when collapse_type is coadd. Default is 2 + logger : logHandler object + Log handler for the reduction. Default is root logger + Returns ------- nfinfo : dataframe @@ -304,10 +330,13 @@ def collapse_frames_info(finfo, fname, collapse_type, coadd_value=2, logger=_log nfinfo = None if collapse_type == 'none': nfinfo = finfo + logger.debug('> type=none: copy input data frame') elif collapse_type == 'mean': index = pd.MultiIndex.from_arrays([[fname], [0]], names=['FILE', 'IMG']) nfinfo = pd.DataFrame(columns=finfo.columns, index=index, dtype=np.float) + logger.debug('> type=mean: extract min/max values') + # get min/max indices imin = finfo.index.get_level_values(1).min() imax = finfo.index.get_level_values(1).max() @@ -321,14 +350,16 @@ def collapse_frames_info(finfo, fname, collapse_type, coadd_value=2, logger=_log nfinfo.loc[(fname, 0), 'TIME END'] = finfo.loc[(fname, imax), 'TIME END'] nfinfo.loc[(fname, 0), 'TIME'] = finfo.loc[(fname, imin), 'TIME START'] + \ (finfo.loc[(fname, imax), 'TIME END'] - finfo.loc[(fname, imin), 'TIME START']) / 2 - + # recompute angles - compute_angles(nfinfo) + compute_angles(nfinfo, logger=logger) elif collapse_type == 'coadd': coadd_value = int(coadd_value) NDIT = len(finfo) NDIT_new = NDIT // coadd_value + logger.debug('> type=coadd: extract sub-groups of {} frames'.format(coadd_value)) + index = pd.MultiIndex.from_arrays([np.full(NDIT_new, fname), np.arange(NDIT_new)], names=['FILE', 'IMG']) nfinfo = pd.DataFrame(columns=finfo.columns, index=index, dtype=np.float) @@ -346,9 +377,9 @@ def collapse_frames_info(finfo, fname, collapse_type, coadd_value=2, logger=_log nfinfo.loc[(fname, f), 'TIME END'] = finfo.loc[(fname, imax), 'TIME END'] nfinfo.loc[(fname, f), 'TIME'] = finfo.loc[(fname, imin), 'TIME START'] + \ (finfo.loc[(fname, imax), 'TIME END'] - finfo.loc[(fname, imin), 'TIME START']) / 2 - + # recompute angles - compute_angles(nfinfo) + compute_angles(nfinfo, logger=logger) else: raise ValueError('Unknown collapse type {0}'.format(collapse_type)) @@ -419,6 +450,9 @@ def star_centers_from_PSF_img_cube(cube, wave, pixel, save_path=None, logger=_lo Path where to save the fit images. Default is None, which means that the plot is not produced + logger : logHandler object + Log handler for the reduction. Default is root logger + Returns ------- img_centers : array_like @@ -511,6 +545,9 @@ def star_centers_from_PSF_lss_cube(cube, wave_cube, pixel, save_path=None, logge Path where to save the fit images. Default is None, which means that the plot is not produced + logger : logHandler object + Log handler for the reduction. Default is root logger + Returns ------- psf_centers : array_like @@ -641,6 +678,9 @@ def star_centers_from_waffle_img_cube(cube_cen, wave, waffle_orientation, center Path where to save the fit images. Default is None, which means that the plot is not produced + logger : logHandler object + Log handler for the reduction. Default is root logger + Returns ------- spot_centers : array_like @@ -841,6 +881,9 @@ def star_centers_from_waffle_lss_cube(cube_cen, cube_sci, wave_cube, center_gues Path where to save the fit images. Default is None, which means that the plot is not produced + logger : logHandler object + Log handler for the reduction. Default is root logger + Returns ------- spot_centers : array_like From c08af6873d70c4cee74fac571b4baa4afeafbe2c Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Tue, 10 Sep 2019 12:25:34 +0200 Subject: [PATCH 070/101] Add missing recipe in _recipe_execution --- vltpf/IRDIS/ImagingReduction.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/vltpf/IRDIS/ImagingReduction.py b/vltpf/IRDIS/ImagingReduction.py index 0f13157..fee1647 100644 --- a/vltpf/IRDIS/ImagingReduction.py +++ b/vltpf/IRDIS/ImagingReduction.py @@ -133,7 +133,8 @@ def __init__(self, path, log_level='info'): 'sph_ird_cal_detector_flat': False, 'sph_ird_preprocess_science': False, 'sph_ird_star_center': False, - 'sph_ird_combine_data': False + 'sph_ird_combine_data': False, + 'sph_ird_clean': False } # reload any existing data frames @@ -575,7 +576,7 @@ def sort_frames(self): img.extend(list(np.arange(NDIT))) # create new dataframe - self._logger.debug('> create and fill frames_info data frame') + self._logger.debug('> create frames_info data frame') frames_info = pd.DataFrame(columns=sci_files.columns, index=pd.MultiIndex.from_arrays([files, img], names=['FILE', 'IMG'])) # expand files_info into frames_info @@ -1205,7 +1206,7 @@ def sph_ird_preprocess_science(self, overwrite=True, output_verify='silentfix') # sort and save final dataframe - self._logger.debug('> save frames_preproc.csv') + self._logger.debug('> save frames_info_preproc.csv') frames_info_preproc.sort_values(by='TIME', inplace=True) frames_info_preproc.to_csv(path.preproc / 'frames_preproc.csv') @@ -1274,6 +1275,7 @@ def sph_ird_star_center(self, high_pass=False, offset=(0, 0), plot=True): save_path=save_path, logger=self._logger) # save + self._logger.debug('> save centers') fits.writeto(path.preproc / '{}_centers.fits'.format(fname), img_center, overwrite=True) # then OBJECT,CENTER @@ -1307,6 +1309,7 @@ def sph_ird_star_center(self, high_pass=False, offset=(0, 0), plot=True): logger=self._logger) # save + self._logger.debug('> save centers') fits.writeto(path.preproc / '{}_centers.fits'.format(fname), img_center, overwrite=True) # update recipe execution From 374c56e95ec943484947e94ae87d0d7aa470a49e Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Tue, 10 Sep 2019 13:21:47 +0200 Subject: [PATCH 071/101] Add debug log messages in IRDIS spectro Ticket #63 --- examples/irdis_spectro_reduction.py | 6 +- vltpf/IRDIS/SpectroReduction.py | 161 ++++++++++++++++++++++++---- 2 files changed, 146 insertions(+), 21 deletions(-) diff --git a/examples/irdis_spectro_reduction.py b/examples/irdis_spectro_reduction.py index cd20ec6..959715b 100644 --- a/examples/irdis_spectro_reduction.py +++ b/examples/irdis_spectro_reduction.py @@ -5,7 +5,7 @@ # #%% init reduction -reduction = IRDIS.SpectroReduction('/Users/avigan/data/VLTPF-test-target/IRD/LSS/') +reduction = IRDIS.SpectroReduction('/Users/avigan/data/VLTPF-test-target/IRD/LSS/', log_level='debug') #%% configuration reduction.config['combine_science_dim'] = 300 @@ -20,7 +20,7 @@ # #%% init reduction -reduction = IRDIS.SpectroReduction('/Users/avigan/data/VLTPF-test-target/IRD/LSS/') +reduction = IRDIS.SpectroReduction('/Users/avigan/data/VLTPF-test-target/IRD/LSS/', log_level='debug') #%% sorting reduction.sort_files() @@ -45,4 +45,4 @@ shift_method='fft', manual_center=None, coarse_centering=False) #%% cleaning -reduction.sph_ird_clean(delete_raw=False, delete_products=True) +reduction.sph_ird_clean(delete_raw=False, delete_products=False) diff --git a/vltpf/IRDIS/SpectroReduction.py b/vltpf/IRDIS/SpectroReduction.py index dddc80c..3f4e29f 100644 --- a/vltpf/IRDIS/SpectroReduction.py +++ b/vltpf/IRDIS/SpectroReduction.py @@ -142,7 +142,7 @@ def __init__(self, path, log_level='info'): configfile = Path(vltpf.__file__).parent / 'instruments' / '{}.ini'.format(self._instrument) config = configparser.ConfigParser() try: - self._logger.debug('Read configuration') + self._logger.debug('> read configuration') config.read(configfile) # instrument @@ -186,7 +186,8 @@ def __init__(self, path, log_level='info'): 'sph_ird_preprocess_science': False, 'sph_ird_star_center': False, 'sph_ird_wavelength_recalibration': False, - 'sph_ird_combine_data': False + 'sph_ird_combine_data': False, + 'sph_ird_clean': False } # reload any existing data frames @@ -423,6 +424,8 @@ def read_info(self): # files info fname = path.preproc / 'files.csv' if fname.exists(): + self._logger.debug('> read files.csv') + files_info = pd.read_csv(fname, index_col=0) # convert times @@ -446,6 +449,8 @@ def read_info(self): fname = path.preproc / 'frames.csv' if fname.exists(): + self._logger.debug('> read frames.csv') + frames_info = pd.read_csv(fname, index_col=(0, 1)) # convert times @@ -463,6 +468,8 @@ def read_info(self): fname = path.preproc / 'frames_preproc.csv' if fname.exists(): + self._logger.debug('> read frames_preproc.csv') + frames_info_preproc = pd.read_csv(fname, index_col=(0, 1)) # convert times @@ -482,11 +489,13 @@ def read_info(self): # additional checks to update recipe execution if frames_info_preproc is not None: - self._recipe_execution['sph_ird_wavelength_recalibration'] \ - = (path.preproc / 'wavelength_default.fits').exists() + done = (path.preproc / 'wavelength_default.fits').exists() + self._recipe_execution['sph_ird_wave_calib'] = done + self._logger.debug('> sph_ird_wave_calib status = {}'.format(done)) - self._recipe_execution['sph_ird_wavelength_recalibration'] \ - = (path.preproc / 'wavelength_recalibrated.fits').exists() + done = (path.preproc / 'wavelength_recalibrated.fits').exists() + self._recipe_execution['sph_ird_wavelength_recalibration'] = done + self._logger.debug('> sph_ird_wavelength_recalibration status = {}'.format(done)) done = True files = frames_info_preproc.index @@ -495,6 +504,7 @@ def read_info(self): file = list(path.preproc.glob('{}.fits'.format(fname))) done = done and (len(file) == 1) self._recipe_execution['sph_ird_preprocess_science'] = done + self._logger.debug('> sph_ird_preprocess_science status = {}'.format(done)) done = True files = frames_info_preproc[(frames_info_preproc['DPR TYPE'] == 'OBJECT,FLUX') | @@ -504,6 +514,7 @@ def read_info(self): file = list(path.preproc.glob('{}.fits'.format(fname))) done = done and (len(file) == 1) self._recipe_execution['sph_ird_star_center'] = done + self._logger.debug('> sph_ird_star_center status = {}'.format(done)) def sort_files(self): @@ -529,6 +540,7 @@ def sort_files(self): self._logger.info(' * found {0} raw FITS files'.format(len(files))) # read list of keywords + self._logger.debug('> read keyword list') keywords = [] file = open(Path(vltpf.__file__).parent / 'instruments' / 'keywords.dat', 'r') for line in file: @@ -539,6 +551,7 @@ def sort_files(self): file.close() # short keywords + self._logger.debug('> translate into short keywords') keywords_short = keywords.copy() for idx in range(len(keywords_short)): key = keywords_short[idx] @@ -546,8 +559,10 @@ def sort_files(self): keywords_short[idx] = key[13:] # files table + self._logger.debug('> create files_info data frame') files_info = pd.DataFrame(index=pd.Index(files, name='FILE'), columns=keywords_short, dtype='float') + self._logger.debug('> read FITS keywords') for f in files: hdu = fits.open(path.raw / '{}.fits'.format(f)) hdr = hdu[0].header @@ -558,6 +573,7 @@ def sort_files(self): hdu.close() # drop files that are not handled, based on DPR keywords + self._logger.debug('> drop unsupported file types') files_info.dropna(subset=['DPR TYPE'], inplace=True) files_info = files_info[(files_info['DPR CATG'] != 'ACQUISITION') & (files_info['DPR TYPE'] != 'OBJECT,AO')] @@ -571,6 +587,7 @@ def sort_files(self): files_info.insert(len(files_info.columns), 'PRO CATG', ' ') # convert times + self._logger.debug('> convert times') files_info['DATE-OBS'] = pd.to_datetime(files_info['DATE-OBS'], utc=False) files_info['DATE'] = pd.to_datetime(files_info['DATE'], utc=False) files_info['DET FRAM UTC'] = pd.to_datetime(files_info['DET FRAM UTC'], utc=False) @@ -582,10 +599,12 @@ def sort_files(self): files_info.sort_values(by='DATE-OBS', inplace=True) # save files_info + self._logger.debug('> save files.csv') files_info.to_csv(path.preproc / 'files.csv') self._files_info = files_info # update recipe execution + self._logger.debug('> update recipe execution') self._recipe_execution['sort_files'] = True @@ -601,7 +620,8 @@ def sort_frames(self): self._logger.info('Extract frames information') # check if recipe can be executed - toolbox.check_recipe_execution(self._recipe_execution, 'sort_frames', self.recipe_requirements) + toolbox.check_recipe_execution(self._recipe_execution, 'sort_frames', self.recipe_requirements, + logger=self._logger) # parameters path = self._path @@ -624,27 +644,31 @@ def sort_frames(self): img.extend(list(np.arange(NDIT))) # create new dataframe + self._logger.debug('> create frames_info data frame') frames_info = pd.DataFrame(columns=sci_files.columns, index=pd.MultiIndex.from_arrays([files, img], names=['FILE', 'IMG'])) # expand files_info into frames_info frames_info = frames_info.align(files_info, level=0)[1] # compute timestamps - toolbox.compute_times(frames_info) + toolbox.compute_times(frames_info, logger=self._logger) # compute angles (ra, dec, parang) - toolbox.compute_angles(frames_info) + toolbox.compute_angles(frames_info, logger=self._logger) # save + self._logger.debug('> save frames.csv') frames_info.to_csv(path.preproc / 'frames.csv') self._frames_info = frames_info # update recipe execution + self._logger.debug('> update recipe execution') self._recipe_execution['sort_frames'] = True # # print some info # + self._logger.debug('> print observation info') cinfo = frames_info[frames_info['DPR TYPE'] == 'OBJECT'] if len(cinfo) == 0: cinfo = frames_info[frames_info['DPR TYPE'] == 'OBJECT,CENTER'] @@ -695,7 +719,8 @@ def check_files_association(self): ''' # check if recipe can be executed - toolbox.check_recipe_execution(self._recipe_execution, 'check_files_association', self.recipe_requirements) + toolbox.check_recipe_execution(self._recipe_execution, 'check_files_association', self.recipe_requirements, + logger=self._logger) self._logger.info('File association for calibrations') @@ -722,6 +747,7 @@ def check_files_association(self): # specific data frame for calibrations # keep static calibrations and sky backgrounds + self._logger.debug('> select calib files') calibs = files_info[(files_info['DPR CATG'] == 'CALIB') | ((files_info['DPR CATG'] == 'SCIENCE') & (files_info['DPR TYPE'] == 'SKY'))] @@ -732,12 +758,14 @@ def check_files_association(self): warning_flag = 0 # flat + self._logger.debug('> check instrument flat requirements') cfiles = calibs[(calibs['DPR TYPE'] == 'FLAT,LAMP') & (calibs['INS COMB IFLT'] == filter_comb)] if len(cfiles) <= 1: error_flag += 1 self._logger.error(' * there should be more than 1 flat in filter combination {0}'.format(filter_comb)) # wave + self._logger.debug('> check wavelength calibration requirements') cfiles = calibs[(calibs['DPR TYPE'] == 'LAMP,WAVE') & (calibs['INS COMB IFLT'] == filter_comb)] if len(cfiles) == 0: error_flag += 1 @@ -759,10 +787,12 @@ def check_files_association(self): # static calibrations that depend on science DIT ################################################## + self._logger.debug('> select science files') obj = files_info.loc[files_info['DPR CATG'] == 'SCIENCE', 'DPR TYPE'].apply(lambda s: s[0:6]) DITs = files_info.loc[(files_info['DPR CATG'] == 'SCIENCE') & (obj == 'OBJECT'), 'DET SEQ1 DIT'].unique().round(2) # handle darks in a slightly different way because there might be several different DITs + self._logger.debug('> check dark/background requirements') for DIT in DITs: # instrumental backgrounds cfiles = calibs[((calibs['DPR TYPE'] == 'DARK') | (calibs['DPR TYPE'] == 'DARK,BACKGROUND')) & @@ -778,6 +808,7 @@ def check_files_association(self): self._logger.warning(' * there is no sky background for science files with DIT={0} sec. Using a sky background instead of an internal instrumental background can usually provide a cleaner data reduction'.format(DIT)) # error reporting + self._logger.debug('> report status') if error_flag: self._logger.error('There are {0} warning(s) and {1} error(s) in the classification of files'.format(warning_flag, error_flag)) raise ValueError('There is {0} errors that should be solved before proceeding'.format(error_flag)) @@ -788,6 +819,10 @@ def check_files_association(self): files_info.to_csv(path.preproc / 'files.csv') self._files_info = files_info + # update recipe execution + self._logger.debug('> update recipe execution') + self._recipe_execution['check_files_association'] = True + def sph_ird_cal_dark(self, silent=True): ''' @@ -800,7 +835,8 @@ def sph_ird_cal_dark(self, silent=True): ''' # check if recipe can be executed - toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_cal_dark', self.recipe_requirements) + toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_cal_dark', self.recipe_requirements, + logger=self._logger) self._logger.info('Darks and backgrounds') @@ -833,6 +869,7 @@ def sph_ird_cal_dark(self, silent=True): self._logger.info(' * {0} in filter {1} with DIT={2:.2f} sec ({3} files)'.format(ctype, cfilt, DIT, len(cfiles))) # create sof + self._logger.debug('> create sof file') sof = path.sof / 'dark_filt={0}_DIT={1:.2f}.sof'.format(cfilt, DIT) file = open(sof, 'w') for f in files: @@ -869,6 +906,7 @@ def sph_ird_cal_dark(self, silent=True): raise NameError('esorex does not appear to be in your PATH. Please make sure that the ESO pipeline is properly installed before running VLTPF.') # execute esorex + self._logger.debug('> execute esorex') if silent: proc = subprocess.run(args, cwd=path.tmp, stdout=subprocess.DEVNULL) else: @@ -878,6 +916,7 @@ def sph_ird_cal_dark(self, silent=True): raise ValueError('esorex process was not successful') # store products + self._logger.debug('> update files_info data frame') files_info.loc[dark_file, 'DPR CATG'] = cfiles['DPR CATG'][0] files_info.loc[dark_file, 'DPR TYPE'] = cfiles['DPR TYPE'][0] files_info.loc[dark_file, 'INS COMB IFLT'] = cfiles['INS COMB IFLT'][0] @@ -900,9 +939,11 @@ def sph_ird_cal_dark(self, silent=True): files_info.loc[bpm_file, 'PRO CATG'] = 'IRD_STATIC_BADPIXELMAP' # save + self._logger.debug('> save files.csv') files_info.to_csv(path.preproc / 'files.csv') # update recipe execution + self._logger.debug('> update recipe execution') self._recipe_execution['sph_ird_cal_dark'] = True @@ -917,7 +958,8 @@ def sph_ird_cal_detector_flat(self, silent=True): ''' # check if recipe can be executed - toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_cal_detector_flat', self.recipe_requirements) + toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_cal_detector_flat', self.recipe_requirements, + logger=self._logger) self._logger.info('Instrument flats') @@ -937,6 +979,7 @@ def sph_ird_cal_detector_flat(self, silent=True): self._logger.info(' * filter {0} ({1} files)'.format(cfilt, len(cfiles))) # create sof + self._logger.debug('> create sof file') sof = path.sof / 'flat_filt={0}.sof'.format(cfilt) file = open(sof, 'w') for f in files: @@ -962,6 +1005,7 @@ def sph_ird_cal_detector_flat(self, silent=True): raise NameError('esorex does not appear to be in your PATH. Please make sure that the ESO pipeline is properly installed before running VLTPF.') # execute esorex + self._logger.debug('> execute esorex') if silent: proc = subprocess.run(args, cwd=path.tmp, stdout=subprocess.DEVNULL) else: @@ -971,6 +1015,7 @@ def sph_ird_cal_detector_flat(self, silent=True): raise ValueError('esorex process was not successful') # store products + self._logger.debug('> update files_info data frame') files_info.loc[flat_file, 'DPR CATG'] = cfiles['DPR CATG'][0] files_info.loc[flat_file, 'DPR TYPE'] = cfiles['DPR TYPE'][0] files_info.loc[flat_file, 'INS COMB IFLT'] = cfiles['INS COMB IFLT'][0] @@ -993,9 +1038,11 @@ def sph_ird_cal_detector_flat(self, silent=True): files_info.loc[bpm_file, 'PRO CATG'] = 'IRD_NON_LINEAR_BADPIXELMAP' # save + self._logger.debug('> save files.csv') files_info.to_csv(path.preproc / 'files.csv') # update recipe execution + self._logger.debug('> update recipe execution') self._recipe_execution['sph_ird_cal_detector_flat'] = True @@ -1010,7 +1057,8 @@ def sph_ird_wave_calib(self, silent=True): ''' # check if recipe can be executed - toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_wave_calib', self.recipe_requirements) + toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_wave_calib', self.recipe_requirements, + logger=self._logger) self._logger.info('Wavelength calibration') @@ -1045,8 +1093,10 @@ def sph_ird_wave_calib(self, silent=True): wave_lasers = self._wave_cal_lasers # esorex parameters + self._logger.debug('> filter combination is {}'.format(filter_comb)) if filter_comb == 'S_LR': # create standard sof in LRS + self._logger.debug('> create sof file') sof = path.sof / 'wave.sof' file = open(sof, 'w') file.write('{0}/{1}.fits {2}\n'.format(path.raw, wave_file, 'IRD_WAVECALIB_RAW')) @@ -1073,6 +1123,7 @@ def sph_ird_wave_calib(self, silent=True): sof] elif filter_comb == 'S_MR': # masking of second order spectrum in MRS + self._logger.debug('> masking second order') wave_fname = wave_file.index[0] wave_data, hdr = fits.getdata(path.raw / '{}.fits'.format(wave_fname), header=True) wave_data = wave_data.squeeze() @@ -1081,6 +1132,7 @@ def sph_ird_wave_calib(self, silent=True): output_verify='silentfix') # create sof using the masked file + self._logger.debug('> create sof file') sof = path.sof / 'wave.sof' file = open(sof, 'w') file.write('{0}/{1}_masked.fits {2}\n'.format(path.preproc, wave_fname, 'IRD_WAVECALIB_RAW')) @@ -1110,6 +1162,7 @@ def sph_ird_wave_calib(self, silent=True): raise NameError('esorex does not appear to be in your PATH. Please make sure that the ESO pipeline is properly installed before running VLTPF.') # execute esorex + self._logger.debug('> execute esorex') if silent: proc = subprocess.run(args, cwd=path.tmp, stdout=subprocess.DEVNULL) else: @@ -1119,6 +1172,7 @@ def sph_ird_wave_calib(self, silent=True): raise ValueError('esorex process was not successful') # store products + self._logger.debug('> update files_info data frame') files_info.loc[wav_file, 'DPR CATG'] = wave_file['DPR CATG'][0] files_info.loc[wav_file, 'DPR TYPE'] = wave_file['DPR TYPE'][0] files_info.loc[wav_file, 'INS COMB IFLT'] = wave_file['INS COMB IFLT'][0] @@ -1131,9 +1185,11 @@ def sph_ird_wave_calib(self, silent=True): files_info.loc[wav_file, 'PRO CATG'] = 'IRD_WAVECALIB' # save + self._logger.debug('> save files.csv') files_info.to_csv(path.preproc / 'files.csv') # store default wavelength calibration in preproc + self._logger.debug('> compute default wavelength calibration') if filter_comb == 'S_LR': centers = self._default_center_lrs wave_min = self._wave_min_lrs @@ -1146,9 +1202,11 @@ def sph_ird_wave_calib(self, silent=True): wave_calib = fits.getdata(path.calib / '{}.fits'.format(wav_file)) wave_lin = get_wavelength_calibration(wave_calib, centers, wave_min, wave_max) + self._logger.debug('> save default wavelength calibration') fits.writeto(path.preproc / 'wavelength_default.fits', wave_lin.T, overwrite=True) # update recipe execution + self._logger.debug('> update recipe execution') self._recipe_execution['sph_ird_wave_calib'] = True @@ -1191,7 +1249,8 @@ def sph_ird_preprocess_science(self, ''' # check if recipe can be executed - toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_preprocess_science', self.recipe_requirements) + toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_preprocess_science', self.recipe_requirements, + logger=self._logger) self._logger.info('Pre-process science files') @@ -1201,6 +1260,7 @@ def sph_ird_preprocess_science(self, frames_info = self._frames_info # clean before we start + self._logger.debug('> remove old preproc files') files = path.preproc.glob('*_DIT???_preproc.fits') for file in files: file.unlink() @@ -1214,7 +1274,7 @@ def sph_ird_preprocess_science(self, (files_info['PRO CATG'] == 'IRD_NON_LINEAR_BADPIXELMAP')].index bpm_files = [path.calib / '{}.fits'.format(f) for f in bpm_files] - bpm = toolbox.compute_bad_pixel_map(bpm_files) + bpm = toolbox.compute_bad_pixel_map(bpm_files, logger=self._logger) # mask dead regions bpm[:15, :] = 0 @@ -1231,6 +1291,7 @@ def sph_ird_preprocess_science(self, flat = fits.getdata(path.calib / '{}.fits'.format(flat_file.index[0])) # final dataframe + self._logger.debug('> create frames_info_preproc data frame') index = pd.MultiIndex(names=['FILE', 'IMG'], levels=[[], []], codes=[[], []]) frames_info_preproc = pd.DataFrame(index=index, columns=frames_info.columns, dtype='float') @@ -1355,6 +1416,7 @@ def sph_ird_preprocess_science(self, img = nimg # save DITs individually + self._logger.debug('> save pre-processed images') for f in range(len(img)): frame = nimg[f, ...].squeeze() hdr['HIERARCH ESO DET NDIT'] = 1 @@ -1362,12 +1424,14 @@ def sph_ird_preprocess_science(self, overwrite=True, output_verify='silentfix') # sort and save final dataframe + self._logger.debug('> save frames_info_preproc.csv') frames_info_preproc.sort_values(by='TIME', inplace=True) frames_info_preproc.to_csv(path.preproc / 'frames_preproc.csv') self._frames_info_preproc = frames_info_preproc # update recipe execution + self._logger.debug('> update recipe execution') self._recipe_execution['sph_ird_preprocess_science'] = True @@ -1387,7 +1451,8 @@ def sph_ird_star_center(self, high_pass=False, plot=True): ''' # check if recipe can be executed - toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_star_center', self.recipe_requirements) + toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_star_center', self.recipe_requirements, + logger=self._logger) self._logger.info('Star centers determination') @@ -1409,6 +1474,7 @@ def sph_ird_star_center(self, high_pass=False, plot=True): wave_max = self._wave_max_mrs # wavelength map + self._logger.debug('> compute default wavelength calibration') wave_file = files_info[files_info['PROCESSED'] & (files_info['PRO CATG'] == 'IRD_WAVECALIB')] wave_calib = fits.getdata(path.calib / '{}.fits'.format(wave_file.index[0])) wave_lin = get_wavelength_calibration(wave_calib, centers, wave_min, wave_max) @@ -1420,6 +1486,7 @@ def sph_ird_star_center(self, high_pass=False, plot=True): self._logger.info(' * OBJECT,FLUX: {0}'.format(file)) # read data + self._logger.debug('> read data') fname = '{0}_DIT{1:03d}_preproc'.format(file, idx) cube, hdr = fits.getdata(path.preproc / '{}.fits'.format(fname), header=True) @@ -1432,6 +1499,7 @@ def sph_ird_star_center(self, high_pass=False, plot=True): logger=self._logger) # save + self._logger.debug('> save centers') fits.writeto(path.preproc / '{}_centers.fits'.format(fname), psf_center, overwrite=True) # then OBJECT,CENTER (if any) @@ -1444,11 +1512,14 @@ def sph_ird_star_center(self, high_pass=False, plot=True): self._logger.info(' * OBJECT,CENTER: {0}'.format(file)) # read center data + self._logger.debug('> read data') fname = '{0}_DIT{1:03d}_preproc'.format(file, idx) cube_cen, hdr = fits.getdata(path.preproc / '{}.fits'.format(fname), header=True) # read science data + self._logger.debug('> read matching science data') if len(starsci_files) != 0: + self._logger.debug('> read matiching science data') fname2 = '{0}_DIT{1:03d}_preproc'.format(starsci_files.index[0][0], idx) cube_sci, hdr = fits.getdata(path.preproc / '{}.fits'.format(fname2), header=True) else: @@ -1465,10 +1536,12 @@ def sph_ird_star_center(self, high_pass=False, plot=True): logger=self._logger) # save + self._logger.debug('> save centers') fits.writeto(path.preproc / '{}_centers.fits'.format(fname), img_centers, overwrite=True) fits.writeto(path.preproc / '{}_spot_distance.fits'.format(fname), spot_dist, overwrite=True) # update recipe execution + self._logger.debug('> update recipe execution') self._recipe_execution['sph_ird_star_center'] = True @@ -1495,7 +1568,8 @@ def sph_ird_wavelength_recalibration(self, fit_scaling=True, plot=True): ''' # check if recipe can be executed - toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_wavelength_recalibration', self.recipe_requirements) + toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_wavelength_recalibration', self.recipe_requirements, + logger=self._logger) self._logger.info('Wavelength recalibration') @@ -1506,6 +1580,7 @@ def sph_ird_wavelength_recalibration(self, fit_scaling=True, plot=True): frames_info = self._frames_info_preproc # remove old files + self._logger.debug('> remove old recalibrated wavelength calibration') wfile = path.preproc / 'wavelength_recalibrated.fits' if wfile.exists(): wfile.unlink() @@ -1522,6 +1597,7 @@ def sph_ird_wavelength_recalibration(self, fit_scaling=True, plot=True): wave_max = self._wave_max_mrs # wavelength map + self._logger.debug('> compute default wavelength calibration') wave_file = files_info[files_info['PROCESSED'] & (files_info['PRO CATG'] == 'IRD_WAVECALIB')] wave_calib = fits.getdata(path.calib / '{}.fits'.format(wave_file.index[0])) wave_lin = get_wavelength_calibration(wave_calib, centers, wave_min, wave_max) @@ -1531,6 +1607,7 @@ def sph_ird_wavelength_recalibration(self, fit_scaling=True, plot=True): wave_ref = lasers[idx_ref] # get spot distance from the first OBJECT,CENTER in the sequence + self._logger.debug('> read saved spot distances') starcen_files = frames_info[frames_info['DPR TYPE'] == 'OBJECT,CENTER'] if len(starcen_files) == 0: self._logger.info(' ==> no OBJECT,CENTER file in the data set. Wavelength cannot be recalibrated. The standard wavelength calibrated by the ESO pripeline will be used.') @@ -1553,8 +1630,10 @@ def sph_ird_wavelength_recalibration(self, fit_scaling=True, plot=True): imin = np.nanargmin(np.abs(wave-wave_ref)) # scaling factor + self._logger.debug('> compute wavelength scaling factor') scaling_raw = dist / dist[imin] + self._logger.debug('> polynomial fit for recalibration') if filter_comb == 'S_LR': # FIXME: implement smoothing of the scaling factor for # LRS mode @@ -1577,10 +1656,12 @@ def sph_ird_wavelength_recalibration(self, fit_scaling=True, plot=True): self._logger.info(' ==> difference with calibrated wavelength: min={0:.1f} nm, max={1:.1f} nm'.format(np.nanmin(wave_diff), np.nanmax(wave_diff))) if fit_scaling: + self._logger.debug('> use fitted scaling factor') wave_final[:, fidx] = wave_final_fit use_r = '' use_f = ' <==' else: + self._logger.debug('> use raw scaling factor') wave_final[:, fidx] = wave_final_raw use_r = ' <==' use_f = '' @@ -1621,6 +1702,7 @@ def sph_ird_wavelength_recalibration(self, fit_scaling=True, plot=True): fits.writeto(path.preproc / 'wavelength_recalibrated.fits', wave_final, overwrite=True) # update recipe execution + self._logger.debug('> update recipe execution') self._recipe_execution['sph_ird_wavelength_recalibration'] = True @@ -1719,7 +1801,8 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m ''' # check if recipe can be executed - toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_combine_data', self.recipe_requirements) + toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_combine_data', self.recipe_requirements, + logger=self._logger) self._logger.info('Combine science data') @@ -1740,6 +1823,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m wave_max = self._wave_max_mrs # read final wavelength calibration + self._logger.debug('> save final wavelength') wfile = path.preproc / 'wavelength_recalibrated.fits' if wfile.exists(): wave = fits.getdata(wfile) @@ -1817,9 +1901,11 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m self._logger.info(' ==> file {0}/{1}: {2}, DIT #{3}'.format(file_idx+1, len(flux_files), file, idx)) # read data + self._logger.debug('> read data') fname = '{0}_DIT{1:03d}_preproc'.format(file, idx) cube = fits.getdata(path.preproc / '{}.fits'.format(fname)) + self._logger.debug('> read centers') cfile = path.preproc / '{}_centers.fits'.format(fname) if cfile.exists(): centers = fits.getdata(cfile) @@ -1832,16 +1918,20 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m centers = centers.astype(np.int) # DIT, angles, etc + self._logger.debug('> read angles') DIT = frames_info.loc[(file, idx), 'DET SEQ1 DIT'] psf_posang[file_idx] = frames_info.loc[(file, idx), 'INS4 DROT2 POSANG'] + 90 # center for field_idx, img in enumerate(cube): + self._logger.debug('> field {}'.format(field_idx)) # wavelength solution for this field ciwave = iwave[:, field_idx] if correct_mrs_chromatism and (filter_comb == 'S_MR'): + self._logger.debug('> correct MRS chromatism') img = img.astype(np.float) + self._logger.debug('> shift and normalize') for wave_idx, widx in enumerate(ciwave): cx = centers[widx, field_idx] @@ -1854,6 +1944,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m else: cx = centers[ciwave, field_idx].mean() + self._logger.debug('> shift and normalize') img = img.astype(np.float) nimg = imutils.shift(img, (cc-cx, 0), method=shift_method) nimg = nimg / DIT @@ -1861,12 +1952,15 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m psf_cube[field_idx, file_idx] = nimg[ciwave, :psf_dim] # neutral density + self._logger.debug('> compensate for neutral density') cwave = final_wave[:, field_idx] ND = frames_info.loc[(file, idx), 'INS4 FILT2 NAME'] w, attenuation = transmission.transmission_nd(ND, wave=cwave) psf_cube[field_idx, file_idx] = (psf_cube[field_idx, file_idx].T / attenuation).T + self._logger.debug('> save final cubes and metadata') if split_posang: + self._logger.debug('> split position angles') pas = np.unique(psf_posang) for pa in pas: ii = np.where(psf_posang == pa)[0] @@ -1886,6 +1980,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m fits.writeto(path.products / 'psf_posang=all_cube.fits', psf_cube, overwrite=True) # delete big cubes + self._logger.debug('> free memory') del psf_cube # @@ -1911,10 +2006,12 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m self._logger.info(' ==> file {0}/{1}: {2}, DIT #{3}'.format(file_idx+1, len(starcen_files), file, idx)) # read data + self._logger.debug('> read data') fname = '{0}_DIT{1:03d}_preproc'.format(file, idx) cube = fits.getdata(path.preproc / '{}.fits'.format(fname)) # use manual center if explicitely requested + self._logger.debug('> read centers') if manual_center is not None: centers = manual_center else: @@ -1925,16 +2022,20 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m centers = centers.astype(np.int) # DIT, angles, etc + self._logger.debug('> read angles') DIT = frames_info.loc[(file, idx), 'DET SEQ1 DIT'] cen_posang[file_idx] = frames_info.loc[(file, idx), 'INS4 DROT2 POSANG'] + 90 # center for field_idx, img in enumerate(cube): + self._logger.debug('> field {}'.format(field_idx)) # wavelength solution for this field ciwave = iwave[:, field_idx] if correct_mrs_chromatism and (filter_comb == 'S_MR'): + self._logger.debug('> correct MRS chromatism') img = img.astype(np.float) + self._logger.debug('> shift and normalize') for wave_idx, widx in enumerate(ciwave): cx = centers[widx, field_idx] @@ -1946,6 +2047,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m else: cx = centers[ciwave, field_idx].mean() + self._logger.debug('> shift and normalize') img = img.astype(np.float) nimg = imutils.shift(img, (cc-cx, 0), method=shift_method) nimg = nimg / DIT @@ -1953,12 +2055,15 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m cen_cube[field_idx, file_idx] = nimg[ciwave, :science_dim] # neutral density + self._logger.debug('> compensate for neutral density') cwave = final_wave[:, field_idx] ND = frames_info.loc[(file, idx), 'INS4 FILT2 NAME'] w, attenuation = transmission.transmission_nd(ND, wave=cwave) cen_cube[field_idx, file_idx] = (cen_cube[field_idx, file_idx].T / attenuation).T + self._logger.debug('> save final cubes and metadata') if split_posang: + self._logger.debug('> split position angles') pas = np.unique(cen_posang) for pa in pas: ii = np.where(cen_posang == pa)[0] @@ -1993,6 +2098,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m sci_posang = np.zeros(nfiles) # use manual center if explicitely requested + self._logger.debug('> read centers') if manual_center is not None: centers = np.full((1024, 2), manual_center, dtype=np.float) else: @@ -2022,20 +2128,25 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m self._logger.info(' ==> file {0}/{1}: {2}, DIT #{3}'.format(file_idx+1, len(object_files), file, idx)) # read data + self._logger.debug('> read data') fname = '{0}_DIT{1:03d}_preproc'.format(file, idx) cube = fits.getdata(path.preproc / '{}.fits'.format(fname)) # DIT, angles, etc + self._logger.debug('> read angles') DIT = frames_info.loc[(file, idx), 'DET SEQ1 DIT'] sci_posang[file_idx] = frames_info.loc[(file, idx), 'INS4 DROT2 POSANG'] + 90 # center for field_idx, img in enumerate(cube): + self._logger.debug('> field {}'.format(field_idx)) # wavelength solution for this field ciwave = iwave[:, field_idx] if correct_mrs_chromatism and (filter_comb == 'S_MR'): + self._logger.debug('> correct MRS chromatism') img = img.astype(np.float) + self._logger.debug('> shift and normalize') for wave_idx, widx in enumerate(ciwave): cx = centers[widx, field_idx] @@ -2047,6 +2158,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m else: cx = centers[ciwave, field_idx].mean() + self._logger.debug('> shift and normalize') img = img.astype(np.float) nimg = imutils.shift(img, (cc-cx, 0), method=shift_method) nimg = nimg / DIT @@ -2054,12 +2166,15 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m sci_cube[field_idx, file_idx] = nimg[ciwave, :science_dim] # neutral density + self._logger.debug('> compensate for neutral density') cwave = final_wave[:, field_idx] ND = frames_info.loc[(file, idx), 'INS4 FILT2 NAME'] w, attenuation = transmission.transmission_nd(ND, wave=cwave) sci_cube[field_idx, file_idx] = (sci_cube[field_idx, file_idx].T / attenuation).T + self._logger.debug('> save final cubes and metadata') if split_posang: + self._logger.debug('> split position angles') pas = np.unique(sci_posang) for pa in pas: ii = np.where(sci_posang == pa)[0] @@ -2082,6 +2197,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m del sci_cube # update recipe execution + self._logger.debug('> update recipe execution') self._recipe_execution['sph_ird_combine_data'] = True @@ -2105,29 +2221,38 @@ def sph_ird_clean(self, delete_raw=False, delete_products=False): # tmp if path.tmp.exists(): + self._logger.debug('> remove {}'.format(path.tmp)) shutil.rmtree(path.tmp, ignore_errors=True) # sof if path.sof.exists(): + self._logger.debug('> remove {}'.format(path.sof)) shutil.rmtree(path.sof, ignore_errors=True) # calib if path.calib.exists(): + self._logger.debug('> remove {}'.format(path.calib)) shutil.rmtree(path.calib, ignore_errors=True) # preproc if path.preproc.exists(): + self._logger.debug('> remove {}'.format(path.preproc)) shutil.rmtree(path.preproc, ignore_errors=True) # raw if delete_raw: if path.raw.exists(): + self._logger.debug('> remove {}'.format(path.raw)) self._logger.warning(' ==> delete raw files') shutil.rmtree(path.raw, ignore_errors=True) # products if delete_products: if path.products.exists(): + self._logger.debug('> remove {}'.format(path.products)) self._logger.warning(' ==> delete products') shutil.rmtree(path.products, ignore_errors=True) + # update recipe execution + self._logger.debug('> update recipe execution') + self._recipe_execution['sph_ird_clean'] = True From d4d8e7b8c61c229f6714bc4f37e8d0551cae97bf Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Tue, 10 Sep 2019 15:04:26 +0200 Subject: [PATCH 072/101] Add missing debug log message Ticket #63 --- vltpf/IRDIS/ImagingReduction.py | 1 + 1 file changed, 1 insertion(+) diff --git a/vltpf/IRDIS/ImagingReduction.py b/vltpf/IRDIS/ImagingReduction.py index fee1647..cccd9ba 100644 --- a/vltpf/IRDIS/ImagingReduction.py +++ b/vltpf/IRDIS/ImagingReduction.py @@ -1777,6 +1777,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a fits.writeto(path.products / 'science_parang.fits', sci_parang, overwrite=True) fits.writeto(path.products / 'science_derot.fits', sci_derot, overwrite=True) if save_scaled: + self._logger.debug('> save scaled cubes') fits.writeto(path.products / 'science_cube_scaled.fits', sci_cube_scaled, overwrite=True) # delete big cubes From 469b2c998c811996be33b5e4ecd4ed90b4b8d94d Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Tue, 10 Sep 2019 15:34:54 +0200 Subject: [PATCH 073/101] Add waffle orientation debug log message Ticket #63 --- vltpf/IRDIS/ImagingReduction.py | 1 + 1 file changed, 1 insertion(+) diff --git a/vltpf/IRDIS/ImagingReduction.py b/vltpf/IRDIS/ImagingReduction.py index cccd9ba..8e7499e 100644 --- a/vltpf/IRDIS/ImagingReduction.py +++ b/vltpf/IRDIS/ImagingReduction.py @@ -1298,6 +1298,7 @@ def sph_ird_star_center(self, high_pass=False, offset=(0, 0), plot=True): # centers waffle_orientation = hdr['HIERARCH ESO OCS WAFFLE ORIENT'] + self._logger.debug('> waffle orientation: {}'.format(waffle_orientation)) if plot: save_path = path.products / '{}_spots_fitting.pdf'.format(fname) else: From d4cf94e0264cb725c31d628158c6fd6d802212db Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Tue, 10 Sep 2019 15:37:06 +0200 Subject: [PATCH 074/101] Add debug log messages in IFS Ticket #63 --- vltpf/IFS.py | 209 ++++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 181 insertions(+), 28 deletions(-) diff --git a/vltpf/IFS.py b/vltpf/IFS.py index 2c6a856..8cc4844 100644 --- a/vltpf/IFS.py +++ b/vltpf/IFS.py @@ -408,6 +408,7 @@ def __init__(self, path, log_level='info'): self._logger.info('Creating IFS reduction at path {}'.format(path)) # configuration + self._logger.debug('> read default configuration') configfile = Path(vltpf.__file__).parent / 'instruments' / '{}.ini'.format(self._instrument) config = configparser.ConfigParser() try: @@ -449,7 +450,8 @@ def __init__(self, path, log_level='info'): 'sph_ifs_science_cubes': False, 'sph_ifs_wavelength_recalibration': False, 'sph_ifs_star_center': False, - 'sph_ifs_combine_data': False + 'sph_ifs_combine_data': False, + 'sph_ifs_clean': False } # reload any existing data frames @@ -689,6 +691,8 @@ def read_info(self): # files info fname = path.preproc / 'files.csv' if fname.exists(): + self._logger.debug('> read files.csv') + files_info = pd.read_csv(fname, index_col=0) # convert times @@ -716,6 +720,8 @@ def read_info(self): fname = path.preproc / 'frames.csv' if fname.exists(): + self._logger.debug('> read frames.csv') + frames_info = pd.read_csv(fname, index_col=(0, 1)) # convert times @@ -733,6 +739,8 @@ def read_info(self): fname = path.preproc / 'frames_preproc.csv' if fname.exists(): + self._logger.debug('> read frames_preproc.csv') + frames_info_preproc = pd.read_csv(fname, index_col=(0, 1)) # convert times @@ -753,14 +761,17 @@ def read_info(self): # additional checks to update recipe execution if frames_info is not None: wave_file = files_info[np.logical_not(files_info['PROCESSED']) & (files_info['DPR TYPE'] == 'WAVE,LAMP')] - self._recipe_execution['sph_ifs_preprocess_wave'] \ - = (path.preproc / '{}_preproc.fits'.format(wave_file.index[0])).exists() + done = (path.preproc / '{}_preproc.fits'.format(wave_file.index[0])).exists() + self._recipe_execution['sph_ifs_preprocess_wave'] = done + self._logger.debug('> sph_ifs_preprocess_wave status = {}'.format(done)) - self._recipe_execution['sph_ifs_wavelength_recalibration'] \ - = (path.preproc / 'wavelength_default.fits').exists() + done = (path.preproc / 'wavelength_default.fits').exists() + self._recipe_execution['sph_ifs_wave_calib'] = done + self._logger.debug('> sph_ifs_wave_calib status = {}'.format(done)) - self._recipe_execution['sph_ifs_wavelength_recalibration'] \ - = (path.preproc / 'wavelength_recalibrated.fits').exists() + done = (path.preproc / 'wavelength_recalibrated.fits').exists() + self._recipe_execution['sph_ifs_wavelength_recalibration'] = done + self._logger.debug('> sph_ifs_wavelength_recalibration status = {}'.format(done)) if frames_info_preproc is not None: done = True @@ -770,7 +781,8 @@ def read_info(self): file = list(path.preproc.glob('{}.fits'.format(fname))) done = done and (len(file) == 1) self._recipe_execution['sph_ifs_preprocess_science'] = done - + self._logger.debug('> sph_ifs_preprocess_science status = {}'.format(done)) + done = True files = frames_info_preproc.index for file, idx in files: @@ -778,6 +790,7 @@ def read_info(self): file = list(path.preproc.glob('{}.fits'.format(fname))) done = done and (len(file) == 1) self._recipe_execution['sph_ifs_science_cubes'] = done + self._logger.debug('> sph_ifs_science_cubes status = {}'.format(done)) done = True files = frames_info_preproc[(frames_info_preproc['DPR TYPE'] == 'OBJECT,FLUX') | @@ -787,6 +800,7 @@ def read_info(self): file = list(path.preproc.glob('{}.fits'.format(fname))) done = done and (len(file) == 1) self._recipe_execution['sph_ifs_star_center'] = done + self._logger.debug('> sph_ifs_star_center status = {}'.format(done)) def sort_files(self): @@ -812,6 +826,7 @@ def sort_files(self): self._logger.info(' * found {0} raw FITS files'.format(len(files))) # read list of keywords + self._logger.debug('> read keyword list') keywords = [] file = open(Path(vltpf.__file__).parent / 'instruments' / 'keywords.dat', 'r') for line in file: @@ -822,6 +837,7 @@ def sort_files(self): file.close() # short keywords + self._logger.debug('> translate into short keywords') keywords_short = keywords.copy() for idx in range(len(keywords_short)): key = keywords_short[idx] @@ -829,8 +845,10 @@ def sort_files(self): keywords_short[idx] = key[13:] # files table + self._logger.debug('> create files_info data frame') files_info = pd.DataFrame(index=pd.Index(files, name='FILE'), columns=keywords_short, dtype='float') + self._logger.debug('> read FITS keywords') for f in files: hdu = fits.open(path.raw / '{}.fits'.format(f)) hdr = hdu[0].header @@ -841,6 +859,7 @@ def sort_files(self): hdu.close() # drop files that are not handled, based on DPR keywords + self._logger.debug('> drop unsupported file types') files_info.dropna(subset=['DPR TYPE'], inplace=True) files_info = files_info[(files_info['DPR CATG'] != 'ACQUISITION') & (files_info['DPR TYPE'] != 'OBJECT,AO')] @@ -854,6 +873,7 @@ def sort_files(self): files_info.insert(len(files_info.columns), 'PRO CATG', ' ') # convert times + self._logger.debug('> convert times') files_info['DATE-OBS'] = pd.to_datetime(files_info['DATE-OBS'], utc=False) files_info['DATE'] = pd.to_datetime(files_info['DATE'], utc=False) files_info['DET FRAM UTC'] = pd.to_datetime(files_info['DET FRAM UTC'], utc=False) @@ -865,10 +885,12 @@ def sort_files(self): files_info.sort_values(by='DATE-OBS', inplace=True) # save files_info + self._logger.debug('> save files.csv') files_info.to_csv(path.preproc / 'files.csv') self._files_info = files_info # update recipe execution + self._logger.debug('> update recipe execution') self._recipe_execution['sort_files'] = True @@ -884,7 +906,8 @@ def sort_frames(self): self._logger.info('Extract frames information') # check if recipe can be executed - toolbox.check_recipe_execution(self._recipe_execution, 'sort_frames', self.recipe_requirements) + toolbox.check_recipe_execution(self._recipe_execution, 'sort_frames', self.recipe_requirements, + logger=self._logger) # parameters path = self._path @@ -907,27 +930,31 @@ def sort_frames(self): img.extend(list(np.arange(NDIT))) # create new dataframe + self._logger.debug('> create frames_info data frame') frames_info = pd.DataFrame(columns=sci_files.columns, index=pd.MultiIndex.from_arrays([files, img], names=['FILE', 'IMG'])) # expand files_info into frames_info frames_info = frames_info.align(files_info, level=0)[1] # compute timestamps - toolbox.compute_times(frames_info) + toolbox.compute_times(frames_info, logger=self._logger) # compute angles (ra, dec, parang) - toolbox.compute_angles(frames_info) + toolbox.compute_angles(frames_info, logger=self._logger) # save + self._logger.debug('> save frames.csv') frames_info.to_csv(path.preproc / 'frames.csv') self._frames_info = frames_info # update recipe execution + self._logger.debug('> update recipe execution') self._recipe_execution['sort_frames'] = True # # print some info # + self._logger.debug('> print observation info') cinfo = frames_info[frames_info['DPR TYPE'] == 'OBJECT'] if len(cinfo) == 0: cinfo = frames_info[frames_info['DPR TYPE'] == 'OBJECT,CENTER'] @@ -978,7 +1005,8 @@ def check_files_association(self): ''' # check if recipe can be executed - toolbox.check_recipe_execution(self._recipe_execution, 'check_files_association', self.recipe_requirements) + toolbox.check_recipe_execution(self._recipe_execution, 'check_files_association', self.recipe_requirements, + logger=self._logger) self._logger.info('File association for calibrations') @@ -1006,6 +1034,7 @@ def check_files_association(self): # specific data frame for calibrations # keep static calibrations and sky backgrounds + self._logger.debug('> select calib files') calibs = files_info[(files_info['DPR CATG'] == 'CALIB') | ((files_info['DPR CATG'] == 'SCIENCE') & (files_info['DPR TYPE'] == 'SKY'))] @@ -1016,6 +1045,7 @@ def check_files_association(self): warning_flag = 0 # white flat + self._logger.debug('> check white flat requirements') cfiles = calibs[(calibs['DPR TYPE'] == 'FLAT,LAMP') & (calibs['INS2 COMB IFS'] == 'CAL_BB_2_{0}'.format(mode_short))] if len(cfiles) < 2: error_flag += 1 @@ -1034,6 +1064,7 @@ def check_files_association(self): files_info.drop(time_delta[2:].index, inplace=True) # 1020 nm flat + self._logger.debug('> check 1020 nm flat requirements') cfiles = calibs[(calibs['DPR TYPE'] == 'FLAT,LAMP') & (calibs['INS2 COMB IFS'] == 'CAL_NB1_1_{0}'.format(mode_short))] if len(cfiles) < 2: error_flag += 1 @@ -1052,6 +1083,7 @@ def check_files_association(self): files_info.drop(time_delta[2:].index, inplace=True) # 1230 nm flat + self._logger.debug('> check 1230 nm flat requirements') cfiles = calibs[(calibs['DPR TYPE'] == 'FLAT,LAMP') & (calibs['INS2 COMB IFS'] == 'CAL_NB2_1_{0}'.format(mode_short))] if len(cfiles) < 2: error_flag += 1 @@ -1070,6 +1102,7 @@ def check_files_association(self): files_info.drop(time_delta[2:].index, inplace=True) # 1300 nm flat + self._logger.debug('> check 1300 nm flat requirements') cfiles = calibs[(calibs['DPR TYPE'] == 'FLAT,LAMP') & (calibs['INS2 COMB IFS'] == 'CAL_NB3_1_{0}'.format(mode_short))] if len(cfiles) < 2: error_flag += 1 @@ -1089,6 +1122,7 @@ def check_files_association(self): # 1550 nm flat (YJH mode only) if mode_short == 'YJH': + self._logger.debug('> check 1550 nm flat requirements') cfiles = calibs[(calibs['DPR TYPE'] == 'FLAT,LAMP') & (calibs['INS2 COMB IFS'] == 'CAL_NB4_2_{0}'.format(mode_short))] if len(cfiles) < 2: error_flag += 1 @@ -1107,6 +1141,7 @@ def check_files_association(self): files_info.drop(time_delta[2:].index, inplace=True) # spectra position + self._logger.debug('> check specpos requirements') cfiles = calibs[(calibs['DPR TYPE'] == 'SPECPOS,LAMP') & (calibs['INS2 COMB IFS'] == mode)] if len(cfiles) == 0: error_flag += 1 @@ -1125,6 +1160,7 @@ def check_files_association(self): files_info.drop(time_delta[1:].index, inplace=True) # wavelength + self._logger.debug('> check wavelength calibration requirements') cfiles = calibs[(calibs['DPR TYPE'] == 'WAVE,LAMP') & (calibs['INS2 COMB IFS'] == mode)] if len(cfiles) == 0: error_flag += 1 @@ -1143,6 +1179,7 @@ def check_files_association(self): files_info.drop(time_delta[1:].index, inplace=True) # IFU flat + self._logger.debug('> check IFU flat requirements') cfiles = calibs[(calibs['DPR TYPE'] == 'FLAT,LAMP') & (calibs['INS2 COMB IFS'] == mode)] if len(cfiles) == 0: error_flag += 1 @@ -1161,6 +1198,7 @@ def check_files_association(self): files_info.drop(time_delta[1:].index, inplace=True) # calibs dark file + self._logger.debug('> check calibration dark requirements') cfiles = calibs[((calibs['DPR TYPE'] == 'DARK') | (calibs['DPR TYPE'] == 'DARK,BACKGROUND')) & (calibs['DET SEQ1 DIT'].round(2) == 1.65)] if len(cfiles) == 0: @@ -1171,10 +1209,12 @@ def check_files_association(self): # static calibrations that depend on science DIT ################################################## + self._logger.debug('> select science files') obj = files_info.loc[files_info['DPR CATG'] == 'SCIENCE', 'DPR TYPE'].apply(lambda s: s[0:6]) DITs = files_info.loc[(files_info['DPR CATG'] == 'SCIENCE') & (obj == 'OBJECT'), 'DET SEQ1 DIT'].unique().round(2) # handle darks in a slightly different way because there might be several different DITs + self._logger.debug('> check dark/background requirements') for DIT in DITs: # instrumental backgrounds cfiles = calibs[((calibs['DPR TYPE'] == 'DARK') | (calibs['DPR TYPE'] == 'DARK,BACKGROUND')) & @@ -1190,6 +1230,7 @@ def check_files_association(self): self._logger.warning(' * there is no sky background for science files with DIT={0} sec. Using a sky background instead of an internal instrumental background can usually provide a cleaner data reduction'.format(DIT)) # error reporting + self._logger.debug('> report status') if error_flag: self._logger.error('There are {0} warning(s) and {1} error(s) in the classification of files'.format(warning_flag, error_flag)) raise ValueError('There is {0} errors that should be solved before proceeding'.format(error_flag)) @@ -1197,9 +1238,14 @@ def check_files_association(self): self._logger.warning('There are {0} warning(s) and {1} error(s) in the classification of files'.format(warning_flag, error_flag)) # save + self._logger.debug('> save files.csv') files_info.to_csv(path.preproc / 'files.csv') self._files_info = files_info - + + # update recipe execution + self._logger.debug('> update recipe execution') + self._recipe_execution['check_files_association'] = True + def sph_ifs_cal_dark(self, silent=True): ''' @@ -1212,7 +1258,8 @@ def sph_ifs_cal_dark(self, silent=True): ''' # check if recipe can be executed - toolbox.check_recipe_execution(self._recipe_execution, 'sph_ifs_cal_dark', self.recipe_requirements) + toolbox.check_recipe_execution(self._recipe_execution, 'sph_ifs_cal_dark', self.recipe_requirements, + logger=self._logger) self._logger.info('Darks and backgrounds') @@ -1242,6 +1289,7 @@ def sph_ifs_cal_dark(self, silent=True): self._logger.info(' * {0} with DIT={1:.2f} sec ({2} files)'.format(ctype, DIT, len(cfiles))) # create sof + self._logger.debug('> create sof file') sof = path.sof / 'dark_DIT={0:.2f}.sof'.format(DIT) file = open(sof, 'w') for f in files: @@ -1276,6 +1324,7 @@ def sph_ifs_cal_dark(self, silent=True): 'that the ESO pipeline is properly installed before running VLTPF.') # execute esorex + self._logger.debug('> execute esorex') if silent: proc = subprocess.run(args, cwd=path.tmp, stdout=subprocess.DEVNULL) else: @@ -1285,6 +1334,7 @@ def sph_ifs_cal_dark(self, silent=True): raise ValueError('esorex process was not successful') # store products + self._logger.debug('> update files_info data frame') files_info.loc[dark_file, 'DPR CATG'] = cfiles['DPR CATG'][0] files_info.loc[dark_file, 'DPR TYPE'] = cfiles['DPR TYPE'][0] files_info.loc[dark_file, 'INS2 MODE'] = cfiles['INS2 MODE'][0] @@ -1301,9 +1351,11 @@ def sph_ifs_cal_dark(self, silent=True): files_info.loc[bpm_file, 'PRO CATG'] = 'IFS_STATIC_BADPIXELMAP' # save + self._logger.debug('> save files.csv') files_info.to_csv(path.preproc / 'files.csv') # update recipe execution + self._logger.debug('> update recipe execution') self._recipe_execution['sph_ifs_cal_dark'] = True @@ -1318,7 +1370,8 @@ def sph_ifs_cal_detector_flat(self, silent=True): ''' # check if recipe can be executed - toolbox.check_recipe_execution(self._recipe_execution, 'sph_ifs_cal_detector_flat', self.recipe_requirements) + toolbox.check_recipe_execution(self._recipe_execution, 'sph_ifs_cal_detector_flat', self.recipe_requirements, + logger=self._logger) self._logger.info('Detector flats') @@ -1377,6 +1430,7 @@ def sph_ifs_cal_detector_flat(self, silent=True): hdu.close() # store products + self._logger.debug('> update files_info data frame') files_info.loc[flat_file, 'DPR CATG'] = cfiles['DPR CATG'][0] files_info.loc[flat_file, 'DPR TYPE'] = cfiles['DPR TYPE'][0] files_info.loc[flat_file, 'INS2 MODE'] = cfiles['INS2 MODE'][0] @@ -1393,9 +1447,11 @@ def sph_ifs_cal_detector_flat(self, silent=True): files_info.loc[bpm_file, 'PRO CATG'] = 'IFS_STATIC_BADPIXELMAP' # save + self._logger.debug('> save files.csv') files_info.to_csv(path.preproc / 'files.csv') # update recipe execution + self._logger.debug('> update recipe execution') self._recipe_execution['sph_ifs_cal_detector_flat'] = True @@ -1410,7 +1466,8 @@ def sph_ifs_cal_specpos(self, silent=True): ''' # check if recipe can be executed - toolbox.check_recipe_execution(self._recipe_execution, 'sph_ifs_cal_specpos', self.recipe_requirements) + toolbox.check_recipe_execution(self._recipe_execution, 'sph_ifs_cal_specpos', self.recipe_requirements, + logger=self._logger) self._logger.info('Microspectra positions') @@ -1438,6 +1495,7 @@ def sph_ifs_cal_specpos(self, silent=True): raise ValueError('Unknown IFS mode {0}'.format(mode)) # create sof + self._logger.debug('> create sof file') sof = path.sof / 'specpos.sof' file = open(sof, 'w') file.write('{0}/{1}.fits {2}\n'.format(path.raw, specpos_file.index[0], 'IFS_SPECPOS_RAW')) @@ -1461,6 +1519,7 @@ def sph_ifs_cal_specpos(self, silent=True): raise NameError('esorex does not appear to be in your PATH. Please make sure that the ESO pipeline is properly installed before running VLTPF.') # execute esorex + self._logger.debug('> execute esorex') if silent: proc = subprocess.run(args, cwd=path.tmp, stdout=subprocess.DEVNULL) else: @@ -1470,6 +1529,7 @@ def sph_ifs_cal_specpos(self, silent=True): raise ValueError('esorex process was not successful') # store products + self._logger.debug('> update files_info data frame') files_info.loc[specp_file, 'DPR CATG'] = specpos_file['DPR CATG'][0] files_info.loc[specp_file, 'DPR TYPE'] = specpos_file['DPR TYPE'][0] files_info.loc[specp_file, 'INS2 MODE'] = specpos_file['INS2 MODE'][0] @@ -1479,9 +1539,11 @@ def sph_ifs_cal_specpos(self, silent=True): files_info.loc[specp_file, 'PRO CATG'] = 'IFS_SPECPOS' # save + self._logger.debug('> save files.csv') files_info.to_csv(path.preproc / 'files.csv') # update recipe execution + self._logger.debug('> update recipe execution') self._recipe_execution['sph_ifs_cal_specpos'] = True @@ -1496,7 +1558,8 @@ def sph_ifs_cal_wave(self, silent=True): ''' # check if recipe can be executed - toolbox.check_recipe_execution(self._recipe_execution, 'sph_ifs_cal_wave', self.recipe_requirements) + toolbox.check_recipe_execution(self._recipe_execution, 'sph_ifs_cal_wave', self.recipe_requirements, + logger=self._logger) self._logger.info('Wavelength calibration') @@ -1522,6 +1585,7 @@ def sph_ifs_cal_wave(self, silent=True): mode = files_info.loc[files_info['DPR CATG'] == 'SCIENCE', 'INS2 COMB IFS'].unique()[0] # create sof + self._logger.debug('> create sof file') sof = path.sof / 'wave.sof' file = open(sof, 'w') file.write('{0}/{1}.fits {2}\n'.format(path.raw, wave_file.index[0], 'IFS_WAVECALIB_RAW')) @@ -1533,8 +1597,8 @@ def sph_ifs_cal_wave(self, silent=True): wav_file = 'wave_calib' # esorex parameters + self._logger.debug('> IFS mode is {}'.format(mode)) if mode == 'OBS_YJ': - # FIXME: use wave_cal_lasers in config args = ['esorex', '--no-checksum=TRUE', '--no-datamd5=TRUE', @@ -1546,7 +1610,6 @@ def sph_ifs_cal_wave(self, silent=True): '--ifs.wave_calib.outfilename={0}/{1}.fits'.format(path.calib, wav_file), sof] elif mode == 'OBS_H': - # FIXME: use wave_cal_lasers in config args = ['esorex', '--no-checksum=TRUE', '--no-datamd5=TRUE', @@ -1565,6 +1628,7 @@ def sph_ifs_cal_wave(self, silent=True): 'that the ESO pipeline is properly installed before running VLTPF.') # execute esorex + self._logger.debug('> execute esorex') if silent: proc = subprocess.run(args, cwd=path.tmp, stdout=subprocess.DEVNULL) else: @@ -1574,6 +1638,7 @@ def sph_ifs_cal_wave(self, silent=True): raise ValueError('esorex process was not successful') # store products + self._logger.debug('> update files_info data frame') files_info.loc[wav_file, 'DPR CATG'] = wave_file['DPR CATG'][0] files_info.loc[wav_file, 'DPR TYPE'] = wave_file['DPR TYPE'][0] files_info.loc[wav_file, 'INS2 MODE'] = wave_file['INS2 MODE'][0] @@ -1583,18 +1648,22 @@ def sph_ifs_cal_wave(self, silent=True): files_info.loc[wav_file, 'PRO CATG'] = 'IFS_WAVECALIB' # save + self._logger.debug('> save files.csv') files_info.to_csv(path.preproc / 'files.csv') # store default wavelength calibration in preproc + self._logger.debug('> compute default wavelength calibration') hdr = fits.getheader(path.calib / '{}.fits'.format(wav_file)) wave_min = hdr['HIERARCH ESO DRS IFS MIN LAMBDA']*1000 wave_max = hdr['HIERARCH ESO DRS IFS MAX LAMBDA']*1000 wave_drh = np.linspace(wave_min, wave_max, self._nwave) + self._logger.debug('> save default wavelength calibration') fits.writeto(path.preproc / 'wavelength_default.fits', wave_drh, overwrite=True) # update recipe execution + self._logger.debug('> update recipe execution') self._recipe_execution['sph_ifs_cal_wave'] = True @@ -1609,7 +1678,8 @@ def sph_ifs_cal_ifu_flat(self, silent=True): ''' # check if recipe can be executed - toolbox.check_recipe_execution(self._recipe_execution, 'sph_ifs_cal_ifu_flat', self.recipe_requirements) + toolbox.check_recipe_execution(self._recipe_execution, 'sph_ifs_cal_ifu_flat', self.recipe_requirements, + logger=self._logger) self._logger.info('Integral-field unit flat') @@ -1668,6 +1738,7 @@ def sph_ifs_cal_ifu_flat(self, silent=True): raise ValueError('There should be exactly 1 1550 nm flat file. Found {0}.'.format(len(flat_1550_file))) # create sof + self._logger.debug('> create sof file') sof = path.sof / 'ifu_flat.sof' file = open(sof, 'w') file.write('{0}/{1}.fits {2}\n'.format(path.raw, ifu_flat_file.index[0], 'IFS_FLAT_FIELD_RAW')) @@ -1699,6 +1770,7 @@ def sph_ifs_cal_ifu_flat(self, silent=True): raise NameError('esorex does not appear to be in your PATH. Please make sure that the ESO pipeline is properly installed before running VLTPF.') # execute esorex + self._logger.debug('> execute esorex') if silent: proc = subprocess.run(args, cwd=path.tmp, stdout=subprocess.DEVNULL) else: @@ -1708,6 +1780,7 @@ def sph_ifs_cal_ifu_flat(self, silent=True): raise ValueError('esorex process was not successful') # store products + self._logger.debug('> update files_info data frame') files_info.loc[ifu_file, 'DPR CATG'] = ifu_flat_file['DPR CATG'][0] files_info.loc[ifu_file, 'DPR TYPE'] = ifu_flat_file['DPR TYPE'][0] files_info.loc[ifu_file, 'INS2 MODE'] = ifu_flat_file['INS2 MODE'][0] @@ -1717,9 +1790,11 @@ def sph_ifs_cal_ifu_flat(self, silent=True): files_info.loc[ifu_file, 'PRO CATG'] = 'IFS_IFU_FLAT_FIELD' # save + self._logger.debug('> save files.csv') files_info.to_csv(path.preproc / 'files.csv') # update recipe execution + self._logger.debug('> update recipe execution') self._recipe_execution['sph_ifs_cal_ifu_flat'] = True @@ -1781,7 +1856,8 @@ def sph_ifs_preprocess_science(self, ''' # check if recipe can be executed - toolbox.check_recipe_execution(self._recipe_execution, 'sph_ifs_preprocess_science', self.recipe_requirements) + toolbox.check_recipe_execution(self._recipe_execution, 'sph_ifs_preprocess_science', self.recipe_requirements, + logger=self._logger) self._logger.info('Pre-process science files') @@ -1791,6 +1867,7 @@ def sph_ifs_preprocess_science(self, frames_info = self._frames_info # clean before we start + self._logger.debug('> remove old preproc files') files = path.preproc.glob('*_DIT???_preproc.fits') for file in files: file.unlink() @@ -1800,9 +1877,10 @@ def sph_ifs_preprocess_science(self, bpm_files = files_info[files_info['PRO CATG'] == 'IFS_STATIC_BADPIXELMAP'].index bpm_files = [path.calib / '{}.fits'.format(f) for f in bpm_files] - bpm = toolbox.compute_bad_pixel_map(bpm_files) + bpm = toolbox.compute_bad_pixel_map(bpm_files, logger=self._logger) # final dataframe + self._logger.debug('> create frames_info_preproc data frame') index = pd.MultiIndex(names=['FILE', 'IMG'], levels=[[], []], codes=[[], []]) frames_info_preproc = pd.DataFrame(index=index, columns=frames_info.columns) @@ -1946,6 +2024,7 @@ def sph_ifs_preprocess_science(self, # save DITs individually + self._logger.debug('> save pre-processed images') for f in range(len(img)): frame = img[f].squeeze() hdr['HIERARCH ESO DET NDIT'] = 1 @@ -1953,12 +2032,14 @@ def sph_ifs_preprocess_science(self, overwrite=True, output_verify='silentfix') # sort and save final dataframe + self._logger.debug('> save frames_preproc.csv') frames_info_preproc.sort_values(by='TIME', inplace=True) frames_info_preproc.to_csv(path.preproc / 'frames_preproc.csv') self._frames_info_preproc = frames_info_preproc # update recipe execution + self._logger.debug('> update recipe execution') self._recipe_execution['sph_ifs_preprocess_science'] = True @@ -1969,7 +2050,8 @@ def sph_ifs_preprocess_wave(self): ''' # check if recipe can be executed - toolbox.check_recipe_execution(self._recipe_execution, 'sph_ifs_preprocess_wave', self.recipe_requirements) + toolbox.check_recipe_execution(self._recipe_execution, 'sph_ifs_preprocess_wave', self.recipe_requirements, + logger=self._logger) # parameters path = self._path @@ -1980,7 +2062,7 @@ def sph_ifs_preprocess_wave(self): # bpm bpm_files = files_info[files_info['PRO CATG'] == 'IFS_STATIC_BADPIXELMAP'].index bpm_files = [path.calib / '{}.fits'.format(f) for f in bpm_files] - bpm = toolbox.compute_bad_pixel_map(bpm_files) + bpm = toolbox.compute_bad_pixel_map(bpm_files, logger=self._logger) # dark dark_file = files_info[files_info['PROCESSED'] & (files_info['PRO CATG'] == 'IFS_MASTER_DARK') & @@ -2017,6 +2099,7 @@ def sph_ifs_preprocess_wave(self): img = sph_ifs_correct_spectral_xtalk(img) # add fake coordinates + self._logger.debug('> add fake coordinates') hdr['HIERARCH ESO TEL TARG ALPHA'] = 120000.0 hdr['HIERARCH ESO TEL TARG DELTA'] = -900000.0 @@ -2025,6 +2108,7 @@ def sph_ifs_preprocess_wave(self): overwrite=True, output_verify='silentfix') # update recipe execution + self._logger.debug('> update recipe execution') self._recipe_execution['sph_ifs_preprocess_wave'] = True @@ -2039,7 +2123,8 @@ def sph_ifs_science_cubes(self, silent=True): ''' # check if recipe can be executed - toolbox.check_recipe_execution(self._recipe_execution, 'sph_ifs_science_cubes', self.recipe_requirements) + toolbox.check_recipe_execution(self._recipe_execution, 'sph_ifs_science_cubes', self.recipe_requirements, + logger=self._logger) self._logger.info('Create science cubes') @@ -2048,6 +2133,7 @@ def sph_ifs_science_cubes(self, silent=True): files_info = self._files_info # clean before we start + self._logger.debug('> remove old preproc files') files = path.preproc.glob('*_DIT???_preproc_?????.fits') for file in files: file.unlink() @@ -2104,6 +2190,7 @@ def sph_ifs_science_cubes(self, silent=True): raise ValueError('There should be exactly 1 1550 nm flat file. Found {0}.'.format(len(flat_1550_file))) # create sof + self._logger.debug('> create sof file') sof = path.sof / 'science.sof' file = open(sof, 'w') for f in sci_files: @@ -2135,6 +2222,7 @@ def sph_ifs_science_cubes(self, silent=True): raise NameError('esorex does not appear to be in your PATH. Please make sure that the ESO pipeline is properly installed before running VLTPF.') # execute esorex + self._logger.debug('> execute esorex') if silent: proc = subprocess.run(args, cwd=path.tmp, stdout=subprocess.DEVNULL) else: @@ -2153,10 +2241,12 @@ def sph_ifs_science_cubes(self, silent=True): fits.writeto(f, data, header, overwrite=True, output_verify='silentfix') # move files to final directory + self._logger.debug('> move data cubes') for file in files: shutil.move(file, path.preproc / file.name) # update recipe execution + self._logger.debug('> update recipe execution') self._recipe_execution['sph_ifs_science_cubes'] = True @@ -2186,7 +2276,8 @@ def sph_ifs_wavelength_recalibration(self, high_pass=False, offset=(0, 0), plot= ''' # check if recipe can be executed - toolbox.check_recipe_execution(self._recipe_execution, 'sph_ifs_wavelength_recalibration', self.recipe_requirements) + toolbox.check_recipe_execution(self._recipe_execution, 'sph_ifs_wavelength_recalibration', self.recipe_requirements, + logger=self._logger) self._logger.info('Wavelength recalibration') @@ -2200,6 +2291,7 @@ def sph_ifs_wavelength_recalibration(self, high_pass=False, offset=(0, 0), plot= frames_info = self._frames_info_preproc # remove old file + self._logger.debug('> remove old recalibrated wavelength calibration') wfile = path.preproc / 'wavelength_recalibrated.fits' if wfile.exists(): wfile.unlink() @@ -2215,6 +2307,7 @@ def sph_ifs_wavelength_recalibration(self, high_pass=False, offset=(0, 0), plot= files = list(path.preproc.glob(fname+'*[0-9].fits')) hdr = fits.getheader(files[0]) + self._logger.debug('> compute default wavelength calibration') wave_min = hdr['HIERARCH ESO DRS IFS MIN LAMBDA']*1000 wave_max = hdr['HIERARCH ESO DRS IFS MAX LAMBDA']*1000 wave_drh = np.linspace(wave_min, wave_max, nwave) @@ -2245,6 +2338,7 @@ def sph_ifs_wavelength_recalibration(self, high_pass=False, offset=(0, 0), plot= # compute centers from waffle spots waffle_orientation = hdr['HIERARCH ESO OCS WAFFLE ORIENT'] + self._logger.debug('> waffle orientation: {}'.format(waffle_orientation)) if plot: save_path = path.products / '{}spots_fitting.pdf'.format(fname) else: @@ -2270,6 +2364,7 @@ def sph_ifs_wavelength_recalibration(self, high_pass=False, offset=(0, 0), plot= files = list(path.preproc.glob(fname+'*.fits')) # read cube and measure mean flux in all channels + self._logger.debug('> read data') cube, hdr = fits.getdata(files[0], header=True) wave_flux = np.zeros(nwave) aper = aperture.disc(cube.shape[-1], 100, diameter=True) @@ -2278,6 +2373,7 @@ def sph_ifs_wavelength_recalibration(self, high_pass=False, offset=(0, 0), plot= wave_flux[w] = f[mask].mean() # fit + self._logger.debug('> fit individual peaks') wave_idx = np.arange(nwave, dtype=np.float) peak_position_lasers = [] if ifs_mode == 'OBS_YJ': @@ -2329,6 +2425,7 @@ def sph_ifs_wavelength_recalibration(self, high_pass=False, offset=(0, 0), plot= # wavelengths wave_lasers = self._wave_cal_lasers[0:4] + self._logger.debug('> fit new wavelenth solution') res = optim.minimize(wavelength_optimisation, 0.9, method='Nelder-Mead', args=(wave_scale, wave_lasers, peak_position_lasers)) @@ -2380,6 +2477,7 @@ def sph_ifs_wavelength_recalibration(self, high_pass=False, offset=(0, 0), plot= plt.savefig(path.products / 'wavelength_recalibration.pdf') # update recipe execution + self._logger.debug('> update recipe execution') self._recipe_execution['sph_ifs_wavelength_recalibration'] = True @@ -2403,7 +2501,8 @@ def sph_ifs_star_center(self, high_pass=False, offset=(0, 0), plot=True): ''' # check if recipe can be executed - toolbox.check_recipe_execution(self._recipe_execution, 'sph_ifs_star_center', self.recipe_requirements) + toolbox.check_recipe_execution(self._recipe_execution, 'sph_ifs_star_center', self.recipe_requirements, + logger=self._logger) self._logger.info('Star centers determination') @@ -2422,6 +2521,7 @@ def sph_ifs_star_center(self, high_pass=False, offset=(0, 0), plot=True): self._logger.info(' * OBJECT,FLUX: {0}'.format(file)) # read data + self._logger.debug('> read data') fname = '{0}_DIT{1:03d}_preproc_'.format(file, idx) files = list(path.preproc.glob(fname+'*[0-9].fits')) cube, hdr = fits.getdata(files[0], header=True) @@ -2432,6 +2532,7 @@ def sph_ifs_star_center(self, high_pass=False, offset=(0, 0), plot=True): cube[:, :, 250:] = 0 # wavelength + self._logger.debug('> compute default wavelength calibration') wave_min = hdr['HIERARCH ESO DRS IFS MIN LAMBDA']*1000 wave_max = hdr['HIERARCH ESO DRS IFS MAX LAMBDA']*1000 wave_drh = np.linspace(wave_min, wave_max, nwave) @@ -2445,6 +2546,7 @@ def sph_ifs_star_center(self, high_pass=False, offset=(0, 0), plot=True): save_path=save_path, logger=self._logger) # save + self._logger.debug('> save centers') fits.writeto(path.preproc / '{}centers.fits'.format(fname), img_center, overwrite=True) # then OBJECT,CENTER @@ -2454,17 +2556,20 @@ def sph_ifs_star_center(self, high_pass=False, offset=(0, 0), plot=True): self._logger.info(' * OBJECT,CENTER: {0}'.format(file)) # read data + self._logger.debug('> read data') fname = '{0}_DIT{1:03d}_preproc_'.format(file, idx) files = list(path.preproc.glob(fname+'*[0-9].fits')) cube, hdr = fits.getdata(files[0], header=True) # wavelength + self._logger.debug('> compute default wavelength calibration') wave_min = hdr['HIERARCH ESO DRS IFS MIN LAMBDA']*1000 wave_max = hdr['HIERARCH ESO DRS IFS MAX LAMBDA']*1000 wave_drh = np.linspace(wave_min, wave_max, nwave) # centers waffle_orientation = hdr['HIERARCH ESO OCS WAFFLE ORIENT'] + self._logger.debug('> waffle orientation: {}'.format(waffle_orientation)) if plot: save_path = path.products / '{}spots_fitting.pdf'.format(fname) else: @@ -2476,9 +2581,11 @@ def sph_ifs_star_center(self, high_pass=False, offset=(0, 0), plot=True): logger=self._logger) # save + self._logger.debug('> save centers') fits.writeto(path.preproc / '{}centers.fits'.format(fname), img_center, overwrite=True) # update recipe execution + self._logger.debug('> update recipe execution') self._recipe_execution['sph_ifs_star_center'] = True @@ -2581,7 +2688,8 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a ''' # check if recipe can be executed - toolbox.check_recipe_execution(self._recipe_execution, 'sph_ifs_combine_data', self.recipe_requirements) + toolbox.check_recipe_execution(self._recipe_execution, 'sph_ifs_combine_data', self.recipe_requirements, + logger=self._logger) self._logger.info('Combine science data') @@ -2591,6 +2699,7 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a frames_info = self._frames_info_preproc # read final wavelength calibration + self._logger.debug('> save final wavelength') wfile = path.preproc / 'wavelength_recalibrated.fits' if wfile.exists(): wave = fits.getdata(wfile) @@ -2657,11 +2766,13 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a self._logger.info(' ==> file {0}/{1}: {2}, DIT #{3}'.format(file_idx+1, len(flux_files), file, idx)) # read data + self._logger.debug('> read data') fname = '{0}_DIT{1:03d}_preproc_'.format(file, idx) files = list(path.preproc.glob(fname+'?????.fits')) cube = fits.getdata(files[0]) # centers + self._logger.debug('> read centers') cfile = path.preproc / '{}centers.fits'.format(fname) if cfile.exists(): centers = fits.getdata(cfile) @@ -2677,18 +2788,22 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a cube[cube == 0] = np.nan # neutral density + self._logger.debug('> read neutral density information') ND = frames_info.loc[(file, idx), 'INS4 FILT2 NAME'] w, attenuation = transmission.transmission_nd(ND, wave=wave) # DIT, angles, etc + self._logger.debug('> read angles') DIT = frames_info.loc[(file, idx), 'DET SEQ1 DIT'] psf_parang[file_idx] = frames_info.loc[(file, idx), 'PARANG'] psf_derot[file_idx] = frames_info.loc[(file, idx), 'DEROT ANGLE'] # center frames for wave_idx, img in enumerate(cube): + self._logger.debug('> wave {}'.format(wave_idx)) cx, cy = centers[wave_idx, :] + self._logger.debug('> shift and normalize') img = img[:-1, :-1].astype(np.float) nimg = imutils.shift(img, (cc-cx, cc-cy), method=shift_method) nimg = nimg / DIT / attenuation[wave_idx] @@ -2697,24 +2812,29 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a # correct anamorphism if correct_anamorphism: + self._logger.debug('> correct anamorphism') nimg = psf_cube[wave_idx, file_idx] nimg = imutils.scale(nimg, (1.0059, 1.0011), method='interp') psf_cube[wave_idx, file_idx] = nimg # wavelength-scaled version if save_scaled: + self._logger.debug('> spatial scaling') nimg = psf_cube[wave_idx, file_idx] psf_cube_scaled[wave_idx, file_idx] = imutils.scale(nimg, wave[0]/wave[wave_idx], method=shift_method) # save final cubes + self._logger.debug('> save final cubes and metadata') flux_files.to_csv(path.products / 'psf_frames.csv') fits.writeto(path.products / 'psf_cube.fits', psf_cube, overwrite=True) fits.writeto(path.products / 'psf_parang.fits', psf_parang, overwrite=True) fits.writeto(path.products / 'psf_derot.fits', psf_derot, overwrite=True) if save_scaled: + self._logger.debug('> save scaled cubes') fits.writeto(path.products / 'psf_cube_scaled.fits', psf_cube_scaled, overwrite=True) # delete big cubes + self._logger.debug('> free memory') del psf_cube if save_scaled: del psf_cube_scaled @@ -2745,11 +2865,13 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a self._logger.info(' ==> file {0}/{1}: {2}, DIT #{3}'.format(file_idx+1, len(starcen_files), file, idx)) # read data + self._logger.debug('> read data') fname = '{0}_DIT{1:03d}_preproc_'.format(file, idx) files = list(path.preproc.glob(fname+'?????.fits')) cube = fits.getdata(files[0]) # use manual center if explicitely requested + self._logger.debug('> read centers') if manual_center is not None: centers = manual_center else: @@ -2764,18 +2886,22 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a cube[cube == 0] = np.nan # neutral density + self._logger.debug('> read neutral density information') ND = frames_info.loc[(file, idx), 'INS4 FILT2 NAME'] w, attenuation = transmission.transmission_nd(ND, wave=wave) # DIT, angles, etc + self._logger.debug('> read angles') DIT = frames_info.loc[(file, idx), 'DET SEQ1 DIT'] cen_parang[file_idx] = frames_info.loc[(file, idx), 'PARANG'] cen_derot[file_idx] = frames_info.loc[(file, idx), 'DEROT ANGLE'] # center frames for wave_idx, img in enumerate(cube): + self._logger.debug('> wave {}'.format(wave_idx)) cx, cy = centers[wave_idx, :] + self._logger.debug('> shift and normalize') img = img[:-1, :-1].astype(np.float) nimg = imutils.shift(img, (cc-cx, cc-cy), method=shift_method) nimg = nimg / DIT / attenuation[wave_idx] @@ -2784,24 +2910,29 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a # correct anamorphism if correct_anamorphism: + self._logger.debug('> correct anamorphism') nimg = cen_cube[wave_idx, file_idx] nimg = imutils.scale(nimg, (1.0059, 1.0011), method='interp') cen_cube[wave_idx, file_idx] = nimg # wavelength-scaled version if save_scaled: + self._logger.debug('> spatial scaling') nimg = cen_cube[wave_idx, file_idx] cen_cube_scaled[wave_idx, file_idx] = imutils.scale(nimg, wave[0]/wave[wave_idx], method=shift_method) # save final cubes + self._logger.debug('> save final cubes and metadata') starcen_files.to_csv(path.products / 'starcenter_frames.csv') fits.writeto(path.products / 'starcenter_cube.fits', cen_cube, overwrite=True) fits.writeto(path.products / 'starcenter_parang.fits', cen_parang, overwrite=True) fits.writeto(path.products / 'starcenter_derot.fits', cen_derot, overwrite=True) if save_scaled: + self._logger.debug('> save scaled cubes') fits.writeto(path.products / 'starcenter_cube_scaled.fits', cen_cube_scaled, overwrite=True) # delete big cubes + self._logger.debug('> free memory') del cen_cube if save_scaled: del cen_cube_scaled @@ -2815,6 +2946,7 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a self._logger.info(' * OBJECT data') # use manual center if explicitely requested + self._logger.debug('> read centers') if manual_center is not None: centers = manual_center else: @@ -2859,6 +2991,7 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a self._logger.info(' ==> file {0}/{1}: {2}, DIT #{3}'.format(file_idx+1, len(object_files), file, idx)) # read data + self._logger.debug('> read data') fname = '{0}_DIT{1:03d}_preproc_'.format(file, idx) files = list(path.preproc.glob(fname+'*.fits')) cube = fits.getdata(files[0]) @@ -2867,18 +3000,22 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a cube[cube == 0] = np.nan # neutral density + self._logger.debug('> read neutral density information') ND = frames_info.loc[(file, idx), 'INS4 FILT2 NAME'] w, attenuation = transmission.transmission_nd(ND, wave=wave) # DIT, angles, etc + self._logger.debug('> read angles') DIT = frames_info.loc[(file, idx), 'DET SEQ1 DIT'] sci_parang[file_idx] = frames_info.loc[(file, idx), 'PARANG'] sci_derot[file_idx] = frames_info.loc[(file, idx), 'DEROT ANGLE'] # center frames for wave_idx, img in enumerate(cube): + self._logger.debug('> wave {}'.format(wave_idx)) cx, cy = centers[wave_idx, :] + self._logger.debug('> shift and normalize') img = img[:-1, :-1].astype(np.float) nimg = imutils.shift(img, (cc-cx, cc-cy), method=shift_method) nimg = nimg / DIT / attenuation[wave_idx] @@ -2887,29 +3024,35 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a # correct anamorphism if correct_anamorphism: + self._logger.debug('> correct anamorphism') nimg = sci_cube[wave_idx, file_idx] nimg = imutils.scale(nimg, (1.0059, 1.0011), method='interp') sci_cube[wave_idx, file_idx] = nimg # wavelength-scaled version if save_scaled: + self._logger.debug('> spatial scaling') nimg = sci_cube[wave_idx, file_idx] sci_cube_scaled[wave_idx, file_idx] = imutils.scale(nimg, wave[0]/wave[wave_idx], method=shift_method) # save final cubes + self._logger.debug('> save final cubes and metadata') object_files.to_csv(path.products / 'science_frames.csv') fits.writeto(path.products / 'science_cube.fits', sci_cube, overwrite=True) fits.writeto(path.products / 'science_parang.fits', sci_parang, overwrite=True) fits.writeto(path.products / 'science_derot.fits', sci_derot, overwrite=True) if save_scaled: + self._logger.debug('> save scaled cubes') fits.writeto(path.products / 'science_cube_scaled.fits', sci_cube_scaled, overwrite=True) # delete big cubes + self._logger.debug('> free memory') del sci_cube if save_scaled: del sci_cube_scaled # update recipe execution + self._logger.debug('> update recipe execution') self._recipe_execution['sph_ifs_combine_data'] = True @@ -2933,28 +3076,38 @@ def sph_ifs_clean(self, delete_raw=False, delete_products=False): # tmp if path.tmp.exists(): + self._logger.debug('> remove {}'.format(path.tmp)) shutil.rmtree(path.tmp, ignore_errors=True) # sof if path.sof.exists(): + self._logger.debug('> remove {}'.format(path.sof)) shutil.rmtree(path.sof, ignore_errors=True) # calib if path.calib.exists(): + self._logger.debug('> remove {}'.format(path.calib)) shutil.rmtree(path.calib, ignore_errors=True) # preproc if path.preproc.exists(): + self._logger.debug('> remove {}'.format(path.preproc)) shutil.rmtree(path.preproc, ignore_errors=True) # raw if delete_raw: if path.raw.exists(): + self._logger.debug('> remove {}'.format(path.raw)) self._logger.warning(' ==> delete raw files') shutil.rmtree(path.raw, ignore_errors=True) # products if delete_products: if path.products.exists(): + self._logger.debug('> remove {}'.format(path.products)) self._logger.warning(' ==> delete products') shutil.rmtree(path.products, ignore_errors=True) + + # update recipe execution + self._logger.debug('> update recipe execution') + self._recipe_execution['sph_ifs_clean'] = True From 32efbf437a32f7a57d1992c5971b234bf841ec76 Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Tue, 10 Sep 2019 15:53:37 +0200 Subject: [PATCH 075/101] Add more debug logging Ticket #63 --- vltpf/IFS.py | 65 +++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 47 insertions(+), 18 deletions(-) diff --git a/vltpf/IFS.py b/vltpf/IFS.py index 8cc4844..0d9427b 100644 --- a/vltpf/IFS.py +++ b/vltpf/IFS.py @@ -24,8 +24,10 @@ import vltpf.transmission as transmission import vltpf.toolbox as toolbox +_log = logging.getLogger(__name__) -def compute_detector_flat(raw_flat_files, bpm_files=[], mask_vignetting=True): + +def compute_detector_flat(raw_flat_files, bpm_files=[], mask_vignetting=True, logger=_log): ''' Compute a master detector flat and associated bad pixel map @@ -42,6 +44,9 @@ def compute_detector_flat(raw_flat_files, bpm_files=[], mask_vignetting=True): vignetting. The areas of the detector that are vignetted are replaced by a value of 1 in the flats. Default is True + logger : logHandler object + Log handler for the reduction. Default is root logger + Returns ------- flat : array @@ -54,9 +59,10 @@ def compute_detector_flat(raw_flat_files, bpm_files=[], mask_vignetting=True): ''' # read bad pixel maps - bpm_in = toolbox.compute_bad_pixel_map(bpm_files, dtype=np.uint8) + bpm_in = toolbox.compute_bad_pixel_map(bpm_files, dtype=np.uint8, logger=logger) # read data + logger.debug('> read data') ff0, hdr0 = fits.getdata(raw_flat_files[0], header=True) ff1, hdr1 = fits.getdata(raw_flat_files[1], header=True) @@ -68,6 +74,7 @@ def compute_detector_flat(raw_flat_files, bpm_files=[], mask_vignetting=True): ff1 = np.median(ff1, axis=0) # create master flat + logger.debug('> create master flat') DIT0 = hdr0['HIERARCH ESO DET SEQ1 DIT'] DIT1 = hdr1['HIERARCH ESO DET SEQ1 DIT'] @@ -77,6 +84,7 @@ def compute_detector_flat(raw_flat_files, bpm_files=[], mask_vignetting=True): flat = ff1 - ff0 # bad pixels correction + logger.debug('> bad pixels correction (1/2)') flat = imutils.fix_badpix(flat, bpm_in, npix=12, weight=True) # flat = imutils.fix_badpix_vip(flat, bpm_in, box=5) @@ -84,15 +92,18 @@ def compute_detector_flat(raw_flat_files, bpm_files=[], mask_vignetting=True): flat = imutils.sigma_filter(flat, box=7, nsigma=3, iterate=True) # normalized flat + logger.debug('> normalize') flat = flat / np.median(flat) - # additional rounad of bad pixels correction + # additional round of bad pixels correction + logger.debug('> bad pixels correction (2/2)') bpm = (flat <= 0.9) | (flat >= 1.1) bpm = bpm.astype(np.uint8) flat = imutils.fix_badpix(flat, bpm, npix=12, weight=True) # flat = imutils.fix_badpix_vip(flat, bpm_in, box=5) # final products + logger.debug('> compute final flat') flat = flat / np.median(flat) bpm = (flat <= 0.9) | (flat >= 1.1) @@ -101,13 +112,14 @@ def compute_detector_flat(raw_flat_files, bpm_files=[], mask_vignetting=True): # apply IFU mask to avoid "edge effects" in the final images, # where the the lenslets are vignetted if mask_vignetting: + logger.debug('> apply mask vignetting') ifu_mask = fits.getdata(Path(vltpf.__file__).parent / 'data' / 'ifu_mask.fits') flat[ifu_mask == 0] = 1 return flat, bpm -def sph_ifs_correct_spectral_xtalk(img): +def sph_ifs_correct_spectral_xtalk(img, logger=_log): ''' Corrects a IFS frame from the spectral crosstalk @@ -135,6 +147,9 @@ def sph_ifs_correct_spectral_xtalk(img): img : array_like Input IFS science frame + logger : logHandler object + Log handler for the reduction. Default is root logger + Returns ------- img_corr : array_like @@ -142,6 +157,8 @@ def sph_ifs_correct_spectral_xtalk(img): ''' + logger.debug('> subtract IFS crosstalk') + # definition of the dimension of the matrix sepmax = 20 dim = sepmax*2+1 @@ -156,13 +173,15 @@ def sph_ifs_correct_spectral_xtalk(img): kernel[(np.abs(x) <= 1) & (np.abs(y) <= 1)] = 0 # convolution and subtraction + logger.debug('> compute convolution') conv = ndimage.convolve(img, kernel, mode='reflect') + logger.debug('> subtract convolution') img_corr = img - conv return img_corr -def sph_ifs_fix_badpix(img, bpm): +def sph_ifs_fix_badpix(img, bpm, logger=_log): ''' Clean the bad pixels in an IFU image @@ -181,6 +200,9 @@ def sph_ifs_fix_badpix(img, bpm): bpm : array_like Bad pixel map + logger : logHandler object + Log handler for the reduction. Default is root logger + Returns ------- img_clean : array_like @@ -188,6 +210,7 @@ def sph_ifs_fix_badpix(img, bpm): ''' # copy the original image + logger.debug('> copy input image') img_clean = img.copy() # extension over which the good pixels will be looked for along @@ -208,6 +231,7 @@ def sph_ifs_fix_badpix(img, bpm): idx_lh = np.arange(ext)+1 # loop over bad pixels + logger.debug('> loop over bad pixels') badpix = np.where(bpm == 1) for y, x in zip(badpix[0], badpix[1]): # extract sub-region along the spectral direction @@ -288,7 +312,7 @@ def wavelength_optimisation(wave_ref, wave_scale, wave_lasers, peak_position_las return np.max(np.abs(diff)) -def fit_peak(x, y, display=False): +def fit_peak(x, y, display=False, logger=_log): ''' Fit a Gaussian (with linear trend) @@ -303,6 +327,9 @@ def fit_peak(x, y, display=False): display : bool Display the result of the fit + logger : logHandler object + Log handler for the reduction. Default is root logger + Returns ------- par @@ -310,6 +337,8 @@ def fit_peak(x, y, display=False): stddev, line slope, line intercept ''' + logger.debug('> fit Gaussian peak') + # fit: Gaussian + constant g_init = models.Gaussian1D(amplitude=y.max(), mean=x[np.argmax(y)]) + models.Linear1D(slope=0, intercept=0) fitter = fitting.LevMarLSQFitter() @@ -1414,7 +1443,7 @@ def sph_ifs_cal_detector_flat(self, silent=True): raise ValueError('There should be exactly 2 raw flat files. Found {0}.'.format(len(files))) # create the flat and bpm - flat, bpm = compute_detector_flat(files, bpm_files=bpm_files, mask_vignetting=True) + flat, bpm = compute_detector_flat(files, bpm_files=bpm_files, mask_vignetting=True, logger=self._logger) # products if wave == 0: @@ -2003,7 +2032,7 @@ def sph_ifs_preprocess_science(self, # very aggressive sigma-filtering frame = imutils.sigma_filter(frame, box=5, nsigma=5, iterate=True) frame = imutils.sigma_filter(frame, box=7, nsigma=5, iterate=True) - frame = sph_ifs_fix_badpix(frame, bpm) + frame = sph_ifs_fix_badpix(frame, bpm, logger=self._logger) img[f] = frame # spectral crosstalk correction @@ -2011,7 +2040,7 @@ def sph_ifs_preprocess_science(self, self._logger.info(' ==> correct spectral crosstalk') for f in range(len(img)): frame = img[f] - frame = sph_ifs_correct_spectral_xtalk(frame) + frame = sph_ifs_correct_spectral_xtalk(frame, logger=self._logger) img[f] = frame # check prensence of coordinates @@ -2092,11 +2121,11 @@ def sph_ifs_preprocess_wave(self): # bad pixels correction self._logger.info(' ==> correct bad pixels') - img = sph_ifs_fix_badpix(img, bpm) + img = sph_ifs_fix_badpix(img, bpm, logger=self._logger) # spectral crosstalk correction self._logger.info(' ==> correct spectral crosstalk') - img = sph_ifs_correct_spectral_xtalk(img) + img = sph_ifs_correct_spectral_xtalk(img, logger=self._logger) # add fake coordinates self._logger.debug('> add fake coordinates') @@ -2380,19 +2409,19 @@ def sph_ifs_wavelength_recalibration(self, high_pass=False, offset=(0, 0), plot= # peak 1 sub_idx = wave_idx[0:11] sub_flux = wave_flux[0:11] - par = fit_peak(sub_idx, sub_flux) + par = fit_peak(sub_idx, sub_flux, logger=self._logger) peak_position_lasers.append(par[1]) # peak 2 sub_idx = wave_idx[10:27] sub_flux = wave_flux[10:27] - par = fit_peak(sub_idx, sub_flux) + par = fit_peak(sub_idx, sub_flux, logger=self._logger) peak_position_lasers.append(par[1]) # peak 3 sub_idx = wave_idx[26:] sub_flux = wave_flux[26:] - par = fit_peak(sub_idx, sub_flux) + par = fit_peak(sub_idx, sub_flux, logger=self._logger) peak_position_lasers.append(par[1]) # wavelengths @@ -2401,25 +2430,25 @@ def sph_ifs_wavelength_recalibration(self, high_pass=False, offset=(0, 0), plot= # peak 1 sub_idx = wave_idx[0:8] sub_flux = wave_flux[0:8] - par = fit_peak(sub_idx, sub_flux) + par = fit_peak(sub_idx, sub_flux, logger=self._logger) peak_position_lasers.append(par[1]) # peak 2 sub_idx = wave_idx[5:17] sub_flux = wave_flux[5:17] - par = fit_peak(sub_idx, sub_flux) + par = fit_peak(sub_idx, sub_flux, logger=self._logger) peak_position_lasers.append(par[1]) # peak 3 sub_idx = wave_idx[14:26] sub_flux = wave_flux[14:26] - par = fit_peak(sub_idx, sub_flux) + par = fit_peak(sub_idx, sub_flux, logger=self._logger) peak_position_lasers.append(par[1]) # peak 4 sub_idx = wave_idx[25:] sub_flux = wave_flux[25:] - par = fit_peak(sub_idx, sub_flux) + par = fit_peak(sub_idx, sub_flux, logger=self._logger) peak_position_lasers.append(par[1]) # wavelengths From f540c8d2cb26a657ffdea1a6e625eea3517fe627 Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Tue, 10 Sep 2019 16:31:39 +0200 Subject: [PATCH 076/101] Minor updates to logging messages Ticket #63 --- examples/ifs_reduction.py | 6 +++--- vltpf/toolbox.py | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/examples/ifs_reduction.py b/examples/ifs_reduction.py index 493be0c..d24c09f 100644 --- a/examples/ifs_reduction.py +++ b/examples/ifs_reduction.py @@ -5,7 +5,7 @@ # #%% init reduction -reduction = IFS.Reduction('/Users/avigan/data/VLTPF-test-target/IFS/') +reduction = IFS.Reduction('/Users/avigan/data/VLTPF-test-target/IFS/', log_level='debug') #%% configuration reduction.config['preproc_collapse_science'] = True @@ -21,7 +21,7 @@ # #%% init reduction -reduction = IFS.Reduction('/Users/avigan/data/VLTPF-test-target/IFS/') +reduction = IFS.Reduction('/Users/avigan/data/VLTPF-test-target/IFS/', log_level='debug') #%% sorting reduction.sort_files() @@ -50,4 +50,4 @@ save_scaled=False) #%% cleaning -reduction.sph_ifs_clean(delete_raw=False, delete_products=True) +reduction.sph_ifs_clean(delete_raw=False, delete_products=False) diff --git a/vltpf/toolbox.py b/vltpf/toolbox.py index 74d3728..94b8925 100644 --- a/vltpf/toolbox.py +++ b/vltpf/toolbox.py @@ -474,7 +474,7 @@ def star_centers_from_PSF_img_cube(cube, wave, pixel, save_path=None, logger=_lo # loop over images img_centers = np.zeros((nwave, 2)) for idx, (wave, img) in enumerate(zip(wave, cube)): - logger.info(' ==> wave {0:2d}/{1:2d} ({2:.0f} nm)'.format(idx+1, nwave, wave)) + logger.info(' ==> wave {0:2d}/{1:2d} ({2:4.0f} nm)'.format(idx+1, nwave, wave)) # remove any NaN img = np.nan_to_num(img) @@ -718,7 +718,7 @@ def star_centers_from_waffle_img_cube(cube_cen, wave, waffle_orientation, center spot_dist = np.zeros((nwave, 6)) img_centers = np.zeros((nwave, 2)) for idx, (wave, img) in enumerate(zip(wave, cube_cen)): - logger.info(' ==> wave {0:2d}/{1:2d} ({2:.0f} nm)'.format(idx+1, nwave, wave)) + logger.info(' ==> wave {0:2d}/{1:2d} ({2:4.0f} nm)'.format(idx+1, nwave, wave)) # remove any NaN img = np.nan_to_num(img) From adc809e59ea6cec51937e68ed587332e5f6cd0ec Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Tue, 10 Sep 2019 18:58:29 +0200 Subject: [PATCH 077/101] Improve logging in SPHERE dataset object Ticket #63 - log messages saved in log file - use Path() object for most paths - replaced silent option with logger.debug() calls --- examples/sphere_dataset.py | 2 +- vltpf/SPHERE.py | 234 +++++++++++++++++++------------------ 2 files changed, 122 insertions(+), 114 deletions(-) diff --git a/examples/sphere_dataset.py b/examples/sphere_dataset.py index c92ff03..ebf954a 100644 --- a/examples/sphere_dataset.py +++ b/examples/sphere_dataset.py @@ -1,6 +1,6 @@ import vltpf.SPHERE as SPHERE -ds = SPHERE.Dataset('/Users/avigan/data/VLTPF-test-target/') +ds = SPHERE.Dataset('/Users/avigan/data/VLTPF-test-target/test/', log_level='debug') print('IRDIS reductions:') for red in ds.IRDIS_reductions: diff --git a/vltpf/SPHERE.py b/vltpf/SPHERE.py index 0fd6b64..33076eb 100644 --- a/vltpf/SPHERE.py +++ b/vltpf/SPHERE.py @@ -10,13 +10,14 @@ import vltpf.IRDIS as IRDIS import vltpf.IFS as IFS +from pathlib import Path from astropy.io import fits from astropy.time import Time _log = logging.getLogger(__name__) -def process_mainFiles(mainFiles, files, silent=True): +def process_mainFiles(mainFiles, files, logger=_log): ''' Process top-level file association XML from the ESO archive @@ -28,19 +29,19 @@ def process_mainFiles(mainFiles, files, silent=True): files : list List where the files will be appended - silent : bool - Display the activity. Default is True + logger : logHandler object + Log handler for the reduction. Default is root logger + ''' # append file names to the list for file in mainFiles: fname = file.attrib['name'] files.append(fname) - if not silent: - _log.info(' ==> {0}'.format(fname)) - + logger.debug(' ==> {0}'.format(fname)) + -def process_association(tree, files, silent=True): +def process_association(tree, files, logger=_log): ''' Process file association XML from the ESO archive @@ -52,36 +53,33 @@ def process_association(tree, files, silent=True): files : list List where the files will be appended - silent : bool - Display the activity. Default is True + logger : logHandler object + Log handler for the reduction. Default is root logger + ''' catg = tree.attrib['category'] - if not silent: - _log.info(catg) + logger.debug(catg) # skip unused calibrations if (catg == 'IFS_STD_ASTROM') or (catg == 'IFS_STD_PHOT') or \ (catg == 'IFS_DIST') or (catg == 'IRD_CLI_PHOT') or \ (catg == 'IRD_DIST'): - if not silent: - _log.info(' ==> skipping') + logger.debug(' ==> skipping') return # process differently mainFiles from associatedFiles for elt in tree: if elt.tag == 'mainFiles': - if not silent: - _log.info('mainFiles') + logger.debug('mainFiles') process_mainFiles(elt, files) elif elt.tag == 'associatedFiles': - if not silent: - _log.info('associatedFiles') + logger.debug('associatedFiles') for nelt in elt: - process_association(nelt, files, silent=silent) + process_association(nelt, files, logger=logger) -def sort_files_from_xml(path, silent=True): +def sort_files_from_xml(path, logger=_log): '''Sort files downloaded from the ESO archive with associated raw calibrations @@ -106,22 +104,25 @@ def sort_files_from_xml(path, silent=True): Parameters ---------- - silent : bool - Display some status of the execution. Default is to be silent + path : str + Path where to look for XML files + + logger : logHandler object + Log handler for the reduction. Default is root logger ''' - xml_files = glob.glob(path+'*.xml') + xml_files = list(path.glob('*.xml')) - _log.info('Sort data based on XML files (ESO automated calibration selection)') - _log.info(' ==> {0} XML files\n'.format(len(xml_files))) + logger.info('Sort data based on XML files (ESO automated calibration selection)') + logger.info(' * {0} XML files\n'.format(len(xml_files))) # sort files for file in xml_files: tree = etree.parse(file) root = tree.getroot() - _log.info(os.path.basename(file)) + logger.info(' * {}'.format(file.name)) # process only IFS and IRDIS science data catg = root.attrib['category'] @@ -133,14 +134,14 @@ def sort_files_from_xml(path, silent=True): filename = scifiles[0].attrib['name'] # Mac OS X replaces : by _ in file names... - if not os.path.exists(path+filename+'.fits'): + if not (path / '{}.fits'.format(filename)).exists(): filename = filename.replace(':', '_') - if not os.path.exists(path+filename+'.fits'): - _log.info(' ==> file {} does not exsist. Skipping'.format(filename)) + if not (path / '{}.fits'.format(filename)).exists(): + logger.info(' ==> file {} does not exist. Skipping'.format(filename)) continue - hdr = fits.getheader(path+filename+'.fits') + hdr = fits.getheader(path / '{}.fits'.format(filename)) # target and arm target = hdr['HIERARCH ESO OBS NAME'] @@ -163,56 +164,53 @@ def sort_files_from_xml(path, silent=True): # get files files = [] - process_association(root, files, silent=True) + process_association(root, files, logger=logger) # target path directory = '{0}_id={1}'.format(target, obs_id) directory = '_'.join(directory.split()) - target_path = os.path.join(path, directory, night, instrument, 'raw') - if not os.path.exists(target_path): - os.makedirs(target_path) + target_path = path / directory / night / instrument / 'raw' + target_path.mkdir(parents=True, exist_ok=True) # copy files for filename in files: - fpath = os.path.join(path, filename+'.fits') + fpath = path / '{}.fits'.format(filename) # Mac OS X replaces : by _ in file names... - if not os.path.exists(fpath): + if not fpath.exists(): filename = filename.replace(':', '_') - fpath = os.path.join(path, filename+'.fits') + fpath = path / '{}.fits'.format(filename) # check if file actually exists - if not os.path.exists(fpath): - _log.info(' ==> file {} does not exist. Skipping.'.format(fpath)) + if not fpath.exists(): + logger.info(' ==> file {} does not exist. Skipping.'.format(fpath)) continue # copy if needed - nfpath = os.path.join(target_path, filename+'.fits') - if not os.path.exists(nfpath): + nfpath = target_path / '{}.fits'.format(filename) + if not nfpath.exists(): shutil.copy(fpath, nfpath) # print status - if not silent: - _log.info('{0} - id={1}'.format(target, obs_id)) - _log.info(' ==> found {0} files'.format(len(files))) - _log.info(' ==> copied to {0}'.format(target_path)) + logger.debug('{0} - id={1}'.format(target, obs_id)) + logger.debug(' ==> found {0} files'.format(len(files))) + logger.debug(' ==> copied to {0}'.format(target_path)) # move all files - path_new = os.path.join(path, 'all_files') - if not os.path.exists(path_new): - os.makedirs(path_new) + path_new = path / 'all_files' + path_new.mkdir(parents=True, exist_ok=True) files = [] - files.extend(glob.glob(os.path.join(path+'*.fits'))) - files.extend(glob.glob(os.path.join(path+'*.xml'))) - files.extend(glob.glob(os.path.join(path+'*.txt'))) + files.extend(list(path.glob('*.fits'))) + files.extend(list(path.glob('*.xml'))) + files.extend(list(path.glob('*.txt'))) if len(files) != 0: for file in files: - shutil.move(file, path_new) + file.rename(path_new / file.name) -def sort_files_from_fits(path, silent=True): +def sort_files_from_fits(path, logger=_log): '''Sort FITS files based only based on their headers Contrary to sort_files_from_xml(), this method is dumb in the @@ -228,18 +226,23 @@ def sort_files_from_fits(path, silent=True): Parameters ---------- - silent : bool - Display some status of the execution. Default is to be silent + path : str + Path where to look for FITS files + + logger : logHandler object + Log handler for the reduction. Default is root logger ''' - fits_files = glob.glob(path+'*.fits') + fits_files = list(path.glob('*.fits')) - _log.info('Sort data based on FITS files') - _log.info(' ==> {0} FITS files\n'.format(len(fits_files))) + logger.info('Sort data based on FITS files') + logger.info(' * {0} FITS files\n'.format(len(fits_files))) # sort files for file in fits_files: + logger.info(' * {}'.format(file.name)) + # target and arm hdr = fits.getheader(file) @@ -265,51 +268,49 @@ def sort_files_from_fits(path, silent=True): continue # target path - target_path = os.path.join(path, instrument, 'raw') - if not os.path.exists(target_path): - os.makedirs(target_path) + target_path = path / instrument / 'raw' + target_path.mkdir(parents=True, exist_ok=True) # move file - nfile = os.path.join(target_path, os.path.basename(file)) - shutil.move(file, nfile) + file.rename(target_path / file.name) # print status - if not silent: - _log.info('{0} - id={1}'.format(target, obs_id)) - _log.info(' ==> copied to {0}'.format(target_path)) + logger.debug('{0} - id={1}'.format(target, obs_id)) + logger.debug(' ==> copied to {0}'.format(target_path)) # move all files - path_new = os.path.join(path, 'unsorted_files') - if not os.path.exists(path_new): - os.makedirs(path_new) + path_new = path / 'unsorted_files' + path_new.mkdir(parents=True, exist_ok=True) files = [] - files.extend(glob.glob(os.path.join(path+'*.fits'))) - files.extend(glob.glob(os.path.join(path+'*.txt'))) + files.extend(list(path.glob('*.fits'))) + files.extend(list(path.glob('*.txt'))) if len(files) != 0: for file in files: - shutil.move(file, path_new) + file.rename(path_new / file.name) -def classify_irdis_dataset(path): +def classify_irdis_dataset(path, logger=_log): '''Classify an IRDIS dataset based on the science files + Parameters + ---------- path : str Path to the directory containing the dataset - ''' - - # expand path - path = os.path.expanduser(os.path.join(path, '')) + logger : logHandler object + Log handler for the reduction. Default is root logger + ''' + # zeroth-order reduction validation - raw = os.path.join(path, 'raw') - if not os.path.exists(raw): + raw = path / 'raw' + if not raw.exists(): raise ValueError('No raw/ subdirectory. {0} is not a valid reduction path!'.format(path)) # list all fits files - files = glob.glob(os.path.join(raw, '*.fits')) + files = list(raw.glob('*.fits')) if len(files) == 0: return None @@ -347,7 +348,7 @@ class Dataset: ###################### # Constructor ###################### - def __init__(self, path): + def __init__(self, path, log_level='info'): ''' Initialization code for a SPHERE dataset @@ -361,22 +362,40 @@ def __init__(self, path): raise ValueError('path must be a string') # path - path = os.path.expanduser(os.path.join(path, '')) + path = Path(path).expanduser().resolve() self._path = path + # configure logging + logger = logging.getLogger(str(path)) + logger.setLevel(log_level.upper()) + if logger.hasHandlers(): + for hdlr in logger.handlers: + logger.removeHandler(hdlr) + + handler = logging.FileHandler(self._path / 'dataset.log', mode='w', encoding='utf-8') + formatter = logging.Formatter('%(asctime)s\t%(levelname)8s\t%(message)s') + formatter.default_msec_format = '%s.%03d' + handler.setFormatter(formatter) + logger.addHandler(handler) + + self._log_level = log_level + self._logger = logger + + self._logger.info('Looking for SPHERE data sets at path {}'.format(path)) + # list of reductions self._IFS_reductions = [] self._IRDIS_reductions = [] # search for data with calibrations downloaded from ESO archive - xml_files = glob.glob(os.path.join(path, '*.xml')) + xml_files = list(path.glob('*.xml')) if len(xml_files) != 0: - sort_files_from_xml(path) + sort_files_from_xml(path, logger=self._logger) # directly search for data - fits_files = glob.glob(os.path.join(path, '*.fits')) + fits_files = list(path.glob('*.fits')) if len(fits_files) != 0: - sort_files_from_fits(path) + sort_files_from_fits(path, logger=self._logger) # recursively look for valid reduction self._create_reductions() @@ -414,9 +433,7 @@ def init_reduction(self): ''' for r in self._reductions: - _log.info('*') - _log.info('* Initialization of {0} reduction at path {1}'.format(r.instrument, r.path)) - _log.info('*') + self._logger.info('Init: {}'.format(r)) r.init_reduction() @@ -427,9 +444,7 @@ def create_static_calibrations(self): ''' for r in self._reductions: - _log.info('*') - _log.info('* Static calibrations for {0} at path {1}'.format(r.instrument, r.path)) - _log.info('*') + self._logger.info('Static calibrations: {}'.format(r)) r.create_static_calibrations() @@ -440,9 +455,7 @@ def preprocess_science(self): ''' for r in self._reductions: - _log.info('*') - _log.info('* Pre-process data for {0} at path {1}'.format(r.instrument, r.path)) - _log.info('*') + self._logger.info('Science pre-processing: {}'.format(r)) r.preprocess_science() @@ -454,9 +467,7 @@ def process_science(self): ''' for r in self._reductions: - _log.info('*') - _log.info('* Process data for {0} at path {1}'.format(r.instrument, r.path)) - _log.info('*') + self._logger.info('Science processing: {}'.format(r)) r.process_science() @@ -468,9 +479,7 @@ def clean(self): ''' for r in self._reductions: - _log.info('*') - _log.info('* Clean {0} reduction at path {1}'.format(r.instrument, r.path)) - _log.info('*') + self._logger.info('Clean-up: {}'.format(r)) r.clean() @@ -482,9 +491,7 @@ def full_reduction(self): ''' for r in self._reductions: - _log.info('*') - _log.info('* Full {0} reduction at path {1}'.format(r.instrument, r.path)) - _log.info('*') + self._logger.info('Full {0} reduction at path {1}'.format(r.instrument, r.path)) r.full_reduction() @@ -497,7 +504,7 @@ def _create_reductions(self): Detect and create valid reductions in path ''' - _log.info('Create reductions from available data') + self._logger.info('Create reductions from sorted data') wpath = os.walk(self._path) for w in wpath: @@ -513,28 +520,29 @@ def _create_reductions(self): try: arm = hdr['HIERARCH ESO SEQ ARM'] if arm == 'IRDIS': - mode = classify_irdis_dataset(reduction_path) + mode = classify_irdis_dataset(Path(reduction_path), logger=self._logger) - instrument = 'IRDIS' if mode == 'imaging': - reduction = IRDIS.ImagingReduction(reduction_path) + self._logger.info(' * IRDIS imaging reduction at path {}'.format(reduction_path)) + reduction = IRDIS.ImagingReduction(reduction_path, log_level=self._log_level) elif mode == 'polar': - _log.info('Warning: IRDIS DPI not supported yet') + self._logger.warning('IRDIS DPI not supported yet') elif mode == 'spectro': - reduction = IRDIS.SpectroReduction(reduction_path) + self._logger.info(' * IRDIS spectro reduction at path {}'.format(reduction_path)) + reduction = IRDIS.SpectroReduction(reduction_path, log_level=self._log_level) self._IRDIS_reductions.append(reduction) elif arm == 'IFS': - instrument = 'IFS' - reduction = IFS.Reduction(reduction_path) + self._logger.info(' * IFS reduction at path {}'.format(reduction_path)) + reduction = IFS.Reduction(reduction_path, log_level=self._log_level) self._IFS_reductions.append(reduction) else: raise NameError('Unknown arm {0}'.format(arm)) except: continue - _log.info(reduction_path) - _log.info(' ==> {0}, {1} files'.format(instrument, len(fits_files))) + # self._logger.info(reduction_path) + self._logger.info(' ==> {} files'.format(len(fits_files))) # merge all reductions into a single list self._reductions = self._IFS_reductions + self._IRDIS_reductions From e9f3a1edb8685ffa240c4fae7c0007e75c29e6be Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Tue, 10 Sep 2019 19:13:04 +0200 Subject: [PATCH 078/101] Small fixes to the _create_reductions() function --- vltpf/SPHERE.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/vltpf/SPHERE.py b/vltpf/SPHERE.py index 33076eb..4550911 100644 --- a/vltpf/SPHERE.py +++ b/vltpf/SPHERE.py @@ -513,14 +513,14 @@ def _create_reductions(self): # if directory has a raw/ sub-directory, make sure it # has FITS files and that they are from a valid # sub-system - reduction_path = w[0] - fits_files = glob.glob(os.path.join(reduction_path, 'raw', '*.fits')) + reduction_path = Path(w[0]) + fits_files = list((reduction_path / 'raw').glob('*.fits')) if len(fits_files) != 0: hdr = fits.getheader(fits_files[0]) try: arm = hdr['HIERARCH ESO SEQ ARM'] if arm == 'IRDIS': - mode = classify_irdis_dataset(Path(reduction_path), logger=self._logger) + mode = classify_irdis_dataset(reduction_path, logger=self._logger) if mode == 'imaging': self._logger.info(' * IRDIS imaging reduction at path {}'.format(reduction_path)) From b42d80aad996952958ae4908be2a305179a90feb Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Thu, 12 Sep 2019 19:15:23 +0200 Subject: [PATCH 079/101] Add custom instantiation function to handle non-valid reduction directories Ticket #41 The function __new__() now returns None if the path provided is not a valid reduction direcory and an error is logged in the root logger --- vltpf/IFS.py | 23 ++++++++++++++++++++++- vltpf/IRDIS/ImagingReduction.py | 22 ++++++++++++++++++++++ vltpf/IRDIS/SpectroReduction.py | 20 ++++++++++++++++++++ 3 files changed, 64 insertions(+), 1 deletion(-) diff --git a/vltpf/IFS.py b/vltpf/IFS.py index 0d9427b..8833364 100644 --- a/vltpf/IFS.py +++ b/vltpf/IFS.py @@ -391,6 +391,26 @@ class Reduction(object): # Constructor ################################################## + def __new__(cls, path, log_level='info'): + '''Custom instantiation for the class + + The customized instantiation enables to check that the + provided path is a valid reduction path. If not None will be + returned for the reduction being created + ''' + + # expand path + path = Path(path).expanduser().resolve() + + # zeroth-order reduction validation + raw = path / 'raw' + if not raw.exists(): + _log.error('No raw/ subdirectory. {0} is not a valid reduction path'.format(path)) + return None + else: + return super(Reduction, cls).__new__(cls) + + def __init__(self, path, log_level='info'): ''' Initialization of the IFSReduction @@ -491,7 +511,8 @@ def __init__(self, path, log_level='info'): ################################################## def __repr__(self): - return ''.format(self._instrument, self._mode, self._path) + if self is not None: + return ''.format(self._instrument, self._mode, self._path) def __format__(self): return self.__repr__() diff --git a/vltpf/IRDIS/ImagingReduction.py b/vltpf/IRDIS/ImagingReduction.py index 8e7499e..4b56d42 100644 --- a/vltpf/IRDIS/ImagingReduction.py +++ b/vltpf/IRDIS/ImagingReduction.py @@ -19,6 +19,8 @@ import vltpf.transmission as transmission import vltpf.toolbox as toolbox +_log = logging.getLogger(__name__) + class ImagingReduction(object): ''' @@ -48,6 +50,26 @@ class ImagingReduction(object): # Constructor ################################################## + def __new__(cls, path, log_level='info'): + '''Custom instantiation for the class + + The customized instantiation enables to check that the + provided path is a valid reduction path. If not None will be + returned for the reduction being created + ''' + + # expand path + path = Path(path).expanduser().resolve() + + # zeroth-order reduction validation + raw = path / 'raw' + if not raw.exists(): + _log.error('No raw/ subdirectory. {0} is not a valid reduction path'.format(path)) + return None + else: + return super(ImagingReduction, cls).__new__(cls) + + def __init__(self, path, log_level='info'): '''Initialization of the ImagingReduction instances diff --git a/vltpf/IRDIS/SpectroReduction.py b/vltpf/IRDIS/SpectroReduction.py index 3f4e29f..01b2de4 100644 --- a/vltpf/IRDIS/SpectroReduction.py +++ b/vltpf/IRDIS/SpectroReduction.py @@ -94,6 +94,26 @@ class SpectroReduction(object): # Constructor ################################################## + def __new__(cls, path, log_level='info'): + '''Custom instantiation for the class + + The customized instantiation enables to check that the + provided path is a valid reduction path. If not None will be + returned for the reduction being created + ''' + + # expand path + path = Path(path).expanduser().resolve() + + # zeroth-order reduction validation + raw = path / 'raw' + if not raw.exists(): + _log.error('No raw/ subdirectory. {0} is not a valid reduction path'.format(path)) + return None + else: + return super(SpectroReduction, cls).__new__(cls) + + def __init__(self, path, log_level='info'): '''Initialization of the SpectroReduction instances From eac5ed3212e1b7eeaee491748ea71c3aeebccdbf Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Thu, 12 Sep 2019 19:18:03 +0200 Subject: [PATCH 080/101] Remove check of valid reduction directory from __init__() Ticket #43 The check is now done at instantiation level in __new__() --- vltpf/IFS.py | 5 ----- vltpf/IRDIS/ImagingReduction.py | 5 ----- vltpf/IRDIS/SpectroReduction.py | 5 ----- 3 files changed, 15 deletions(-) diff --git a/vltpf/IFS.py b/vltpf/IFS.py index 8833364..f23bcb5 100644 --- a/vltpf/IFS.py +++ b/vltpf/IFS.py @@ -427,11 +427,6 @@ def __init__(self, path, log_level='info'): # expand path path = Path(path).expanduser().resolve() - # zeroth-order reduction validation - raw = path / 'raw' - if not raw.exists(): - raise ValueError('No raw/ subdirectory. {0} is not a valid reduction path!'.format(path)) - # init path and name self._path = utils.ReductionPath(path) self._instrument = 'IFS' diff --git a/vltpf/IRDIS/ImagingReduction.py b/vltpf/IRDIS/ImagingReduction.py index 4b56d42..eb6de32 100644 --- a/vltpf/IRDIS/ImagingReduction.py +++ b/vltpf/IRDIS/ImagingReduction.py @@ -85,11 +85,6 @@ def __init__(self, path, log_level='info'): # expand path path = Path(path).expanduser().resolve() - # zeroth-order reduction validation - raw = path / 'raw' - if not raw.exists(): - raise ValueError('No raw/ subdirectory. {0} is not a valid reduction path!'.format(path)) - # init path and name self._path = utils.ReductionPath(path) self._instrument = 'IRDIS' diff --git a/vltpf/IRDIS/SpectroReduction.py b/vltpf/IRDIS/SpectroReduction.py index 01b2de4..3a389db 100644 --- a/vltpf/IRDIS/SpectroReduction.py +++ b/vltpf/IRDIS/SpectroReduction.py @@ -129,11 +129,6 @@ def __init__(self, path, log_level='info'): # expand path path = Path(path).expanduser().resolve() - # zeroth-order reduction validation - raw = path / 'raw' - if not raw.exists(): - raise ValueError('No raw/ subdirectory. {0} is not a valid reduction path!'.format(path)) - # init path and name self._path = utils.ReductionPath(path) self._instrument = 'IRDIS' From e91b4948501fd3f186978010e3ed355594615698 Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Fri, 13 Sep 2019 08:29:09 +0200 Subject: [PATCH 081/101] Implement reduction statuses Ticket #41 --- vltpf/__init__.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/vltpf/__init__.py b/vltpf/__init__.py index 563cd92..b1e290f 100644 --- a/vltpf/__init__.py +++ b/vltpf/__init__.py @@ -10,7 +10,14 @@ _log.setLevel(logging.DEBUG) _log.info('VLTPF init') +# reduction status +SUCCESS = 0 +ERROR = 1 +INIT = -1 +INCOMPLETE = -2 + +# log level def set_loglevel(level): ''' Set the logging level for the module From d5514de37532dc176b5e5e1b6c485c74749f700f Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Fri, 13 Sep 2019 08:29:55 +0200 Subject: [PATCH 082/101] Reorganise __init__ in preparation for error handling Ticket #41 --- examples/ifs_reduction.py | 4 +- vltpf/IFS.py | 85 +++++++++++++++++++++++++-------------- 2 files changed, 56 insertions(+), 33 deletions(-) diff --git a/examples/ifs_reduction.py b/examples/ifs_reduction.py index d24c09f..980e72a 100644 --- a/examples/ifs_reduction.py +++ b/examples/ifs_reduction.py @@ -5,7 +5,7 @@ # #%% init reduction -reduction = IFS.Reduction('/Users/avigan/data/VLTPF-test-target/IFS/', log_level='debug') +reduction = IFS.Reduction('/Users/avigan/data/VLTPF-test-target/IFS/', log_level='info') #%% configuration reduction.config['preproc_collapse_science'] = True @@ -21,7 +21,7 @@ # #%% init reduction -reduction = IFS.Reduction('/Users/avigan/data/VLTPF-test-target/IFS/', log_level='debug') +reduction = IFS.Reduction('/Users/avigan/data/VLTPF-test-target/IFS/', log_level='info') #%% sorting reduction.sort_files() diff --git a/vltpf/IFS.py b/vltpf/IFS.py index f23bcb5..b4960c6 100644 --- a/vltpf/IFS.py +++ b/vltpf/IFS.py @@ -423,18 +423,25 @@ def __init__(self, path, log_level='info'): level : {'debug', 'info', 'warning', 'error', 'critical'} The log level of the handler ''' - - # expand path - path = Path(path).expanduser().resolve() + # + # basic init + # + + # set status of reduction + self._status = vltpf.INIT + # init path and name + path = Path(path).expanduser().resolve() self._path = utils.ReductionPath(path) + + # instrument and mode self._instrument = 'IFS' - - # instrument mode self._mode = 'Unknown' - # configure logging + # + # logging + # logger = logging.getLogger(str(path)) logger.setLevel(log_level.upper()) if logger.hasHandlers(): @@ -451,35 +458,37 @@ def __init__(self, path, log_level='info'): self._logger.info('Creating IFS reduction at path {}'.format(path)) + # # configuration + # self._logger.debug('> read default configuration') configfile = Path(vltpf.__file__).parent / 'instruments' / '{}.ini'.format(self._instrument) config = configparser.ConfigParser() - try: - self._logger.debug('Read configuration') - config.read(configfile) - - # instrument - self._pixel = float(config.get('instrument', 'pixel')) - self._nwave = int(config.get('instrument', 'nwave')) - - # calibration - self._wave_cal_lasers = np.array(eval(config.get('calibration', 'wave_cal_lasers'))) - self._default_center = np.array(eval(config.get('calibration', 'default_center'))) - self._orientation_offset = eval(config.get('calibration', 'orientation_offset')) - - # reduction parameters - self._config = dict(config.items('reduction')) - for key, value in self._config.items(): - try: - val = eval(value) - except NameError: - val = value - self._config[key] = val - except configparser.Error as e: - raise ValueError('Error reading configuration file for instrument {0}: {1}'.format(self._instrument, e.message)) + self._logger.debug('Read configuration') + config.read(configfile) + + # instrument + self._pixel = float(config.get('instrument', 'pixel')) + self._nwave = int(config.get('instrument', 'nwave')) + + # calibration + self._wave_cal_lasers = np.array(eval(config.get('calibration', 'wave_cal_lasers'))) + self._default_center = np.array(eval(config.get('calibration', 'default_center'))) + self._orientation_offset = eval(config.get('calibration', 'orientation_offset')) + + # reduction parameters + self._config = dict(config.items('reduction')) + for key, value in self._config.items(): + try: + val = eval(value) + except NameError: + val = value + self._config[key] = val + + # # execution of recipes + # self._recipe_execution = { 'sort_files': False, 'sort_frames': False, @@ -506,8 +515,18 @@ def __init__(self, path, log_level='info'): ################################################## def __repr__(self): - if self is not None: - return ''.format(self._instrument, self._mode, self._path) + if self._status == vltpf.INIT: + status = 'INIT' + elif self._status == vltpf.INCOMPLETE: + status = 'INCOMPLETE' + elif self._status == vltpf.ERROR: + status = 'ERROR' + elif self._status == vltpf.SUCCESS: + status = 'SUCCESS' + else: + status = 'UNKNOWN' + + return ''.format(self._instrument, self._mode, self._path, status) def __format__(self): return self.__repr__() @@ -556,6 +575,10 @@ def config(self): def mode(self): return self._mode + @property + def status(self): + return self._status + ################################################## # Generic class methods ################################################## From 3494528b879c103d0c16e40388527353212452eb Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Fri, 13 Sep 2019 09:03:16 +0200 Subject: [PATCH 083/101] Add more status levels Ticket #41 --- vltpf/__init__.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/vltpf/__init__.py b/vltpf/__init__.py index b1e290f..1e0f11d 100644 --- a/vltpf/__init__.py +++ b/vltpf/__init__.py @@ -13,8 +13,12 @@ # reduction status SUCCESS = 0 ERROR = 1 + INIT = -1 INCOMPLETE = -2 +COMPLETED = -3 + +NOTSET = -1000 # log level From dd6988825f3d82d7e3b07f7504dd13bcfb11898a Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Fri, 13 Sep 2019 09:04:29 +0200 Subject: [PATCH 084/101] Preparation for better recipe execution status Ticket #41 --- vltpf/IFS.py | 41 +++++++++++++++++------------------------ 1 file changed, 17 insertions(+), 24 deletions(-) diff --git a/vltpf/IFS.py b/vltpf/IFS.py index b4960c6..62ac84d 100644 --- a/vltpf/IFS.py +++ b/vltpf/IFS.py @@ -11,6 +11,7 @@ import matplotlib.patches as patches import matplotlib.colors as colors import configparser +import collections from pathlib import Path from astropy.io import fits @@ -427,10 +428,7 @@ def __init__(self, path, log_level='info'): # # basic init # - - # set status of reduction - self._status = vltpf.INIT - + # init path and name path = Path(path).expanduser().resolve() self._path = utils.ReductionPath(path) @@ -487,7 +485,7 @@ def __init__(self, path, log_level='info'): self._config[key] = val # - # execution of recipes + # reduction status # self._recipe_execution = { 'sort_files': False, @@ -506,27 +504,19 @@ def __init__(self, path, log_level='info'): 'sph_ifs_combine_data': False, 'sph_ifs_clean': False } + + self._reduction_status = vltpf.INIT + self._recipe_status = vltpf.NOTSET # reload any existing data frames - self.read_info() + self._read_info() ################################################## # Representation ################################################## def __repr__(self): - if self._status == vltpf.INIT: - status = 'INIT' - elif self._status == vltpf.INCOMPLETE: - status = 'INCOMPLETE' - elif self._status == vltpf.ERROR: - status = 'ERROR' - elif self._status == vltpf.SUCCESS: - status = 'SUCCESS' - else: - status = 'UNKNOWN' - - return ''.format(self._instrument, self._mode, self._path, status) + return ''.format(self._instrument, self._mode, self._path) def __format__(self): return self.__repr__() @@ -575,10 +565,6 @@ def config(self): def mode(self): return self._mode - @property - def status(self): - return self._status - ################################################## # Generic class methods ################################################## @@ -734,10 +720,10 @@ def full_reduction(self): self.clean() ################################################## - # SPHERE/IFS methods + # Private methods ################################################## - def read_info(self): + def _read_info(self): ''' Read the files, calibs and frames information from disk @@ -749,6 +735,9 @@ def read_info(self): frames_info_preproc : dataframe The data frame with all the information on science frames after pre-processing + + This function is not supposed to be called directly by the user. + ''' self._logger.info('Read existing reduction information') @@ -870,6 +859,10 @@ def read_info(self): self._recipe_execution['sph_ifs_star_center'] = done self._logger.debug('> sph_ifs_star_center status = {}'.format(done)) + + ################################################## + # SPHERE/IFS methods + ################################################## def sort_files(self): ''' From 39ff51ec566312c725839dc5dabe72ace981fc56 Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Fri, 13 Sep 2019 09:39:41 +0200 Subject: [PATCH 085/101] Change of the recipte_execution dict into an ordered dict Ticket #41 --- vltpf/IFS.py | 43 ++++++----- vltpf/IRDIS/ImagingReduction.py | 114 ++++++++++++++++------------- vltpf/IRDIS/SpectroReduction.py | 124 ++++++++++++++++++-------------- 3 files changed, 153 insertions(+), 128 deletions(-) diff --git a/vltpf/IFS.py b/vltpf/IFS.py index 62ac84d..58f39eb 100644 --- a/vltpf/IFS.py +++ b/vltpf/IFS.py @@ -429,7 +429,7 @@ def __init__(self, path, log_level='info'): # basic init # - # init path and name + # init path path = Path(path).expanduser().resolve() self._path = utils.ReductionPath(path) @@ -487,27 +487,24 @@ def __init__(self, path, log_level='info'): # # reduction status # - self._recipe_execution = { - 'sort_files': False, - 'sort_frames': False, - 'check_files_association': False, - 'sph_ifs_cal_dark': False, - 'sph_ifs_cal_detector_flat': False, - 'sph_ifs_cal_specpos': False, - 'sph_ifs_cal_wave': False, - 'sph_ifs_cal_ifu_flat': False, - 'sph_ifs_preprocess_science': False, - 'sph_ifs_preprocess_wave': False, - 'sph_ifs_science_cubes': False, - 'sph_ifs_wavelength_recalibration': False, - 'sph_ifs_star_center': False, - 'sph_ifs_combine_data': False, - 'sph_ifs_clean': False - } + self._recipe_execution = collections.OrderedDict( + [('sort_files', False), + ('sort_frames', False), + ('check_files_association', False), + ('sph_ifs_cal_dark', False), + ('sph_ifs_cal_detector_flat', False), + ('sph_ifs_cal_specpos', False), + ('sph_ifs_cal_wave', False), + ('sph_ifs_cal_ifu_flat', False), + ('sph_ifs_preprocess_science', False), + ('sph_ifs_preprocess_wave', False), + ('sph_ifs_science_cubes', False), + ('sph_ifs_wavelength_recalibration', False), + ('sph_ifs_star_center', False), + ('sph_ifs_combine_data', False), + ('sph_ifs_clean', False)] + ) - self._reduction_status = vltpf.INIT - self._recipe_status = vltpf.NOTSET - # reload any existing data frames self._read_info() @@ -823,8 +820,8 @@ def _read_info(self): self._logger.debug('> sph_ifs_preprocess_wave status = {}'.format(done)) done = (path.preproc / 'wavelength_default.fits').exists() - self._recipe_execution['sph_ifs_wave_calib'] = done - self._logger.debug('> sph_ifs_wave_calib status = {}'.format(done)) + self._recipe_execution['sph_ifs_cal_wave'] = done + self._logger.debug('> sph_ifs_cal_wave status = {}'.format(done)) done = (path.preproc / 'wavelength_recalibrated.fits').exists() self._recipe_execution['sph_ifs_wavelength_recalibration'] = done diff --git a/vltpf/IRDIS/ImagingReduction.py b/vltpf/IRDIS/ImagingReduction.py index eb6de32..1f71af8 100644 --- a/vltpf/IRDIS/ImagingReduction.py +++ b/vltpf/IRDIS/ImagingReduction.py @@ -7,6 +7,7 @@ import scipy.optimize as optim import shutil import configparser +import collections from pathlib import Path from astropy.io import fits @@ -82,17 +83,21 @@ def __init__(self, path, log_level='info'): The log level of the handler ''' - # expand path - path = Path(path).expanduser().resolve() + # + # basic init + # - # init path and name + # init path + path = Path(path).expanduser().resolve() self._path = utils.ReductionPath(path) - self._instrument = 'IRDIS' - # instrument mode + # instrument and mode + self._instrument = 'IRDIS' self._mode = 'Unknown' - # configure logging + # + # logging + # logger = logging.getLogger(str(path)) logger.setLevel(log_level.upper()) if logger.hasHandlers(): @@ -109,53 +114,55 @@ def __init__(self, path, log_level='info'): self._logger.info('Creating IRDIS imaging reduction at path {}'.format(path)) + # # configuration + # configfile = Path(vltpf.__file__).parent / 'instruments' / '{}.ini'.format(self._instrument) config = configparser.ConfigParser() - try: - self._logger.debug('> read default configuration') - config.read(configfile) - - # instrument - self._pixel = float(config.get('instrument', 'pixel')) - self._nwave = 2 - - # calibration - self._wave_cal_lasers = np.array(eval(config.get('calibration', 'wave_cal_lasers'))) - - # imaging calibration - self._default_center = np.array(eval(config.get('calibration-imaging', 'default_center'))) - self._orientation_offset = eval(config.get('calibration-imaging', 'orientation_offset')) - - # reduction parameters - self._config = {} - for group in ['reduction', 'reduction-imaging']: - items = dict(config.items(group)) - self._config.update(items) - for key, value in items.items(): - try: - val = eval(value) - except NameError: - val = value - self._config[key] = val - except configparser.Error as e: - raise ValueError('Error reading configuration file for instrument {0}: {1}'.format(self._instrument, e.message)) - - # execution of recipes - self._recipe_execution = { - 'sort_files': False, - 'sort_frames': False, - 'check_files_association': False, - 'sph_ird_cal_dark': False, - 'sph_ird_cal_detector_flat': False, - 'sph_ird_preprocess_science': False, - 'sph_ird_star_center': False, - 'sph_ird_combine_data': False, - 'sph_ird_clean': False - } + + self._logger.debug('> read default configuration') + config.read(configfile) + + # instrument + self._pixel = float(config.get('instrument', 'pixel')) + self._nwave = 2 + + # calibration + self._wave_cal_lasers = np.array(eval(config.get('calibration', 'wave_cal_lasers'))) + + # imaging calibration + self._default_center = np.array(eval(config.get('calibration-imaging', 'default_center'))) + self._orientation_offset = eval(config.get('calibration-imaging', 'orientation_offset')) + + # reduction parameters + self._config = {} + for group in ['reduction', 'reduction-imaging']: + items = dict(config.items(group)) + self._config.update(items) + for key, value in items.items(): + try: + val = eval(value) + except NameError: + val = value + self._config[key] = val + + # + # reduction status + # + self._recipe_execution = collections.OrderedDict( + [('sort_files', False), + ('sort_frames', False), + ('check_files_association', False), + ('sph_ird_cal_dark', False), + ('sph_ird_cal_detector_flat', False), + ('sph_ird_preprocess_science', False), + ('sph_ird_star_center', False), + ('sph_ird_combine_data', False), + ('sph_ird_clean', False)] + ) # reload any existing data frames - self.read_info() + self._read_info() ################################################## # Representation @@ -358,10 +365,10 @@ def full_reduction(self): self.clean() ################################################## - # SPHERE/IRDIS methods + # Private methods ################################################## - def read_info(self): + def _read_info(self): ''' Read the files, calibs and frames information from disk @@ -373,6 +380,9 @@ def read_info(self): frames_info_preproc : dataframe The data frame with all the information on science frames after pre-processing + + This function is not supposed to be called directly by the user. + ''' self._logger.info('Read existing reduction information') @@ -466,6 +476,10 @@ def read_info(self): self._logger.debug('> sph_ird_star_center status = {}'.format(done)) + ################################################## + # SPHERE/IRDIS methods + ################################################## + def sort_files(self): ''' Sort all raw files and save result in a data frame diff --git a/vltpf/IRDIS/SpectroReduction.py b/vltpf/IRDIS/SpectroReduction.py index 3a389db..b094413 100644 --- a/vltpf/IRDIS/SpectroReduction.py +++ b/vltpf/IRDIS/SpectroReduction.py @@ -11,6 +11,7 @@ import matplotlib.patches as patches import matplotlib.colors as colors import configparser +import collections from pathlib import Path from astropy.io import fits @@ -126,17 +127,21 @@ def __init__(self, path, log_level='info'): The log level of the handler ''' - # expand path + # + # basic init + # + + # init path path = Path(path).expanduser().resolve() - - # init path and name self._path = utils.ReductionPath(path) - self._instrument = 'IRDIS' - # instrument mode + # instrument and mode + self._instrument = 'IRDIS' self._mode = 'Unknown' - # configure logging + # + # logging + # logger = logging.getLogger(str(path)) logger.setLevel(log_level.upper()) if logger.hasHandlers(): @@ -153,60 +158,62 @@ def __init__(self, path, log_level='info'): self._logger.info('Creating IRDIS spectroscopy reduction at path {}'.format(path)) + # # configuration + # configfile = Path(vltpf.__file__).parent / 'instruments' / '{}.ini'.format(self._instrument) config = configparser.ConfigParser() - try: - self._logger.debug('> read configuration') - config.read(configfile) - - # instrument - self._pixel = float(config.get('instrument', 'pixel')) - self._nwave = -1 + + self._logger.debug('> read configuration') + config.read(configfile) + + # instrument + self._pixel = float(config.get('instrument', 'pixel')) + self._nwave = -1 + + # calibration + self._wave_cal_lasers = np.array(eval(config.get('calibration', 'wave_cal_lasers'))) + + # spectro calibration + self._default_center_lrs = np.array(eval(config.get('calibration-spectro', 'default_center_lrs'))) + self._wave_min_lrs = eval(config.get('calibration-spectro', 'wave_min_lrs')) + self._wave_max_lrs = eval(config.get('calibration-spectro', 'wave_max_lrs')) + + self._default_center_mrs = np.array(eval(config.get('calibration-spectro', 'default_center_mrs'))) + self._wave_min_mrs = eval(config.get('calibration-spectro', 'wave_min_mrs')) + self._wave_max_mrs = eval(config.get('calibration-spectro', 'wave_max_mrs')) + + # reduction parameters + self._config = {} + for group in ['reduction', 'reduction-spectro']: + items = dict(config.items(group)) + self._config.update(items) + for key, value in items.items(): + try: + val = eval(value) + except NameError: + val = value + self._config[key] = val - # calibration - self._wave_cal_lasers = np.array(eval(config.get('calibration', 'wave_cal_lasers'))) - - # spectro calibration - self._default_center_lrs = np.array(eval(config.get('calibration-spectro', 'default_center_lrs'))) - self._wave_min_lrs = eval(config.get('calibration-spectro', 'wave_min_lrs')) - self._wave_max_lrs = eval(config.get('calibration-spectro', 'wave_max_lrs')) - - self._default_center_mrs = np.array(eval(config.get('calibration-spectro', 'default_center_mrs'))) - self._wave_min_mrs = eval(config.get('calibration-spectro', 'wave_min_mrs')) - self._wave_max_mrs = eval(config.get('calibration-spectro', 'wave_max_mrs')) - - # reduction parameters - self._config = {} - for group in ['reduction', 'reduction-spectro']: - items = dict(config.items(group)) - self._config.update(items) - for key, value in items.items(): - try: - val = eval(value) - except NameError: - val = value - self._config[key] = val - except configparser.Error as e: - raise ValueError('Error reading configuration file for instrument {0}: {1}'.format(self._instrument, e.message)) - - # execution of recipes - self._recipe_execution = { - 'sort_files': False, - 'sort_frames': False, - 'check_files_association': False, - 'sph_ifs_cal_dark': False, - 'sph_ifs_cal_detector_flat': False, - 'sph_ird_wave_calib': False, - 'sph_ird_preprocess_science': False, - 'sph_ird_star_center': False, - 'sph_ird_wavelength_recalibration': False, - 'sph_ird_combine_data': False, - 'sph_ird_clean': False - } + # + # reduction status + # + self._recipe_execution = collections.OrderedDict( + [('sort_files', False), + ('sort_frames', False), + ('check_files_association', False), + ('sph_ifs_cal_dark', False), + ('sph_ifs_cal_detector_flat', False), + ('sph_ird_wave_calib', False), + ('sph_ird_preprocess_science', False), + ('sph_ird_star_center', False), + ('sph_ird_wavelength_recalibration', False), + ('sph_ird_combine_data', False), + ('sph_ird_clean', False)] + ) # reload any existing data frames - self.read_info() + self._read_info() ################################################## # Representation @@ -414,7 +421,7 @@ def full_reduction(self): self.clean() ################################################## - # SPHERE/IRDIS methods + # Private methods ################################################## def read_info(self): @@ -429,6 +436,9 @@ def read_info(self): frames_info_preproc : dataframe The data frame with all the information on science frames after pre-processing + + This function is not supposed to be called directly by the user. + ''' self._logger.info('Read existing reduction information') @@ -532,6 +542,10 @@ def read_info(self): self._logger.debug('> sph_ird_star_center status = {}'.format(done)) + ################################################## + # SPHERE/IRDIS methods + ################################################## + def sort_files(self): ''' Sort all raw files and save result in a data frame From 2123880f7742c4df3d25647a0e294131ed85d1ea Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Fri, 13 Sep 2019 18:31:35 +0200 Subject: [PATCH 086/101] Implement proper error handling in reduction execution for IRDIS imaging Ticket #41 - creation of class instances is handled with __new__ - __init__ has been removed - recipe execution now includes the status (success, error) instead of the just whether or not it was executed - all calls to raise have been removed - changed some status values - toolbox.check_recipe_execution() modified to take error status into acount --- examples/irdis_imaging_reduction.py | 4 +- vltpf/IRDIS/ImagingReduction.py | 293 +++++++++++++++++----------- vltpf/__init__.py | 11 +- vltpf/toolbox.py | 13 +- 4 files changed, 193 insertions(+), 128 deletions(-) diff --git a/examples/irdis_imaging_reduction.py b/examples/irdis_imaging_reduction.py index b05c94e..d9609ac 100644 --- a/examples/irdis_imaging_reduction.py +++ b/examples/irdis_imaging_reduction.py @@ -5,7 +5,7 @@ # #%% init reduction -reduction = IRDIS.ImagingReduction('/Users/avigan/data/VLTPF-test-target/IRD/DBI/', log_level='debug') +reduction = IRDIS.ImagingReduction('/Users/avigan/data/VLTPF-test-target/IRD/DBI/', log_level='info') #%% configuration reduction.config['combine_psf_dim'] = 80 @@ -23,7 +23,7 @@ # #%% init reduction -reduction = IRDIS.ImagingReduction('/Users/avigan/data/VLTPF-test-target/IRD/DBI/', log_level='debug') +reduction = IRDIS.ImagingReduction('/Users/avigan/data/VLTPF-test-target/IRD/DBI/', log_level='info') #%% sorting reduction.sort_files() diff --git a/vltpf/IRDIS/ImagingReduction.py b/vltpf/IRDIS/ImagingReduction.py index 1f71af8..c00d9a7 100644 --- a/vltpf/IRDIS/ImagingReduction.py +++ b/vltpf/IRDIS/ImagingReduction.py @@ -44,7 +44,8 @@ class ImagingReduction(object): 'sph_ird_preprocess_science': ['sort_files', 'sort_frames', 'sph_ird_cal_dark', 'sph_ird_cal_detector_flat'], 'sph_ird_star_center': ['sort_files', 'sort_frames', 'sph_ird_preprocess_science'], - 'sph_ird_combine_data': ['sort_files', 'sort_frames', 'sph_ird_preprocess_science'] + 'sph_ird_combine_data': ['sort_files', 'sort_frames', 'sph_ird_preprocess_science'], + 'sph_ird_clean': [] } ################################################## @@ -52,14 +53,29 @@ class ImagingReduction(object): ################################################## def __new__(cls, path, log_level='info'): - '''Custom instantiation for the class + '''Custom instantiation for the class and initialization for the + instances The customized instantiation enables to check that the - provided path is a valid reduction path. If not None will be - returned for the reduction being created + provided path is a valid reduction path. If not, None will be + returned for the reduction being created. Otherwise, an + instance is created and returned at the end. + + Parameters + ---------- + path : str + Path to the directory containing the dataset + + level : {'debug', 'info', 'warning', 'error', 'critical'} + The log level of the handler + ''' - # expand path + # + # make sure we are dealing with a proper reduction directory + # + + # init path path = Path(path).expanduser().resolve() # zeroth-order reduction validation @@ -68,32 +84,19 @@ def __new__(cls, path, log_level='info'): _log.error('No raw/ subdirectory. {0} is not a valid reduction path'.format(path)) return None else: - return super(ImagingReduction, cls).__new__(cls) - - - def __init__(self, path, log_level='info'): - '''Initialization of the ImagingReduction instances - - Parameters - ---------- - path : str - Path to the directory containing the dataset - - level : {'debug', 'info', 'warning', 'error', 'critical'} - The log level of the handler - ''' + # it's all good: create instance! + reduction = super(ImagingReduction, cls).__new__(cls) # # basic init # # init path - path = Path(path).expanduser().resolve() - self._path = utils.ReductionPath(path) + reduction._path = utils.ReductionPath(path) # instrument and mode - self._instrument = 'IRDIS' - self._mode = 'Unknown' + reduction._instrument = 'IRDIS' + reduction._mode = 'Unknown' # # logging @@ -104,65 +107,62 @@ def __init__(self, path, log_level='info'): for hdlr in logger.handlers: logger.removeHandler(hdlr) - handler = logging.FileHandler(self._path.products / 'reduction.log', mode='w', encoding='utf-8') + handler = logging.FileHandler(reduction._path.products / 'reduction.log', mode='w', encoding='utf-8') formatter = logging.Formatter('%(asctime)s\t%(levelname)8s\t%(message)s') formatter.default_msec_format = '%s.%03d' handler.setFormatter(formatter) logger.addHandler(handler) - self._logger = logger + reduction._logger = logger - self._logger.info('Creating IRDIS imaging reduction at path {}'.format(path)) + reduction._logger.info('Creating IRDIS imaging reduction at path {}'.format(path)) # # configuration # - configfile = Path(vltpf.__file__).parent / 'instruments' / '{}.ini'.format(self._instrument) + configfile = Path(vltpf.__file__).parent / 'instruments' / '{}.ini'.format(reduction._instrument) config = configparser.ConfigParser() - self._logger.debug('> read default configuration') + reduction._logger.debug('> read default configuration') config.read(configfile) # instrument - self._pixel = float(config.get('instrument', 'pixel')) - self._nwave = 2 + reduction._pixel = float(config.get('instrument', 'pixel')) + reduction._nwave = 2 # calibration - self._wave_cal_lasers = np.array(eval(config.get('calibration', 'wave_cal_lasers'))) + reduction._wave_cal_lasers = np.array(eval(config.get('calibration', 'wave_cal_lasers'))) # imaging calibration - self._default_center = np.array(eval(config.get('calibration-imaging', 'default_center'))) - self._orientation_offset = eval(config.get('calibration-imaging', 'orientation_offset')) + reduction._default_center = np.array(eval(config.get('calibration-imaging', 'default_center'))) + reduction._orientation_offset = eval(config.get('calibration-imaging', 'orientation_offset')) # reduction parameters - self._config = {} + reduction._config = {} for group in ['reduction', 'reduction-imaging']: items = dict(config.items(group)) - self._config.update(items) + reduction._config.update(items) for key, value in items.items(): try: val = eval(value) except NameError: val = value - self._config[key] = val + reduction._config[key] = val # # reduction status # - self._recipe_execution = collections.OrderedDict( - [('sort_files', False), - ('sort_frames', False), - ('check_files_association', False), - ('sph_ird_cal_dark', False), - ('sph_ird_cal_detector_flat', False), - ('sph_ird_preprocess_science', False), - ('sph_ird_star_center', False), - ('sph_ird_combine_data', False), - ('sph_ird_clean', False)] - ) + reduction._recipe_execution = collections.OrderedDict() + reduction._recipe_status = vltpf.NOTSET + # reduction._reduction_status = vltpf.INIT # reload any existing data frames - self._read_info() + reduction._read_info() + + # + # return instance + # + return reduction ################################################## # Representation @@ -403,11 +403,11 @@ def _read_info(self): files_info['DET FRAM UTC'] = pd.to_datetime(files_info['DET FRAM UTC'], utc=False) # update recipe execution - self._recipe_execution['sort_files'] = True + self._update_execution('sort_files', vltpf.SUCCESS) if np.any(files_info['PRO CATG'] == 'IRD_MASTER_DARK'): - self._recipe_execution['sph_ird_cal_dark'] = True + self._update_execution('sph_ird_cal_dark', vltpf.SUCCESS) if np.any(files_info['PRO CATG'] == 'IRD_FLAT_FIELD'): - self._recipe_execution['sph_ird_cal_detector_flat'] = True + self._update_execution('sph_ird_cal_detector_flat', vltpf.SUCCESS) # update instrument mode self._mode = files_info.loc[files_info['DPR CATG'] == 'SCIENCE', 'INS1 MODE'][0] @@ -429,7 +429,7 @@ def _read_info(self): frames_info['TIME END'] = pd.to_datetime(frames_info['TIME END'], utc=False) # update recipe execution - self._recipe_execution['sort_frames'] = True + self._update_execution('sort_frames', vltpf.SUCCESS) else: frames_info = None @@ -462,7 +462,8 @@ def _read_info(self): fname = '{0}_DIT{1:03d}_preproc'.format(file, idx) file = list(path.preproc.glob('{}.fits'.format(fname))) done = done and (len(file) == 1) - self._recipe_execution['sph_ird_preprocess_science'] = done + if done: + self._update_execution('sph_ird_preprocess_science', vltpf.SUCCESS) self._logger.debug('> sph_ird_preprocess_science status = {}'.format(done)) done = True @@ -472,9 +473,28 @@ def _read_info(self): fname = '{0}_DIT{1:03d}_preproc_centers'.format(file, idx) file = list(path.preproc.glob('{}.fits'.format(fname))) done = done and (len(file) == 1) - self._recipe_execution['sph_ird_star_center'] = done + if done: + self._update_execution('sph_ird_star_center', vltpf.SUCCESS) self._logger.debug('> sph_ird_star_center status = {}'.format(done)) + + def _update_execution(self, recipe, recipe_status): + '''Update execution status for reduction and recipe + + Parameters + ---------- + recipe : str + Recipe name + + recipe_status : vltpf status (int) + Status of the recipe. Can be either one of vltpf.NOTSET, + vltpf.SUCCESS or vltpf.ERROR + ''' + + self._logger.debug('> update recipe execution') + + self._recipe_execution[recipe] = recipe_status + self._recipe_execution.move_to_end(recipe) ################################################## # SPHERE/IRDIS methods @@ -490,6 +510,9 @@ def sort_files(self): self._logger.info('Sort raw files') + # update recipe execution + self._update_execution('sort_files', vltpf.NOTSET) + # parameters path = self._path @@ -498,7 +521,9 @@ def sort_files(self): files = [f.stem for f in files] if len(files) == 0: - raise ValueError('No raw FITS files in reduction path') + self._logger.error('No raw FITS files in reduction path') + self._update_execution('sort_files', vltpf.ERROR) + return self._logger.info(' * found {0} raw FITS files'.format(len(files))) @@ -543,7 +568,9 @@ def sort_files(self): # check instruments instru = files_info['SEQ ARM'].unique() if len(instru) != 1: - raise ValueError('Sequence is mixing different instruments: {0}'.format(instru)) + self._logger.error('Sequence is mixing different instruments: {0}'.format(instru)) + self._update_execution('sort_files', vltpf.ERROR) + return # processed column files_info.insert(len(files_info.columns), 'PROCESSED', False) @@ -567,8 +594,7 @@ def sort_files(self): self._files_info = files_info # update recipe execution - self._logger.debug('> update recipe execution') - self._recipe_execution['sort_files'] = True + self._update_execution('sort_files', vltpf.SUCCESS) def sort_frames(self): @@ -583,8 +609,9 @@ def sort_frames(self): self._logger.info('Extract frames information') # check if recipe can be executed - toolbox.check_recipe_execution(self._recipe_execution, 'sort_frames', self.recipe_requirements, - logger=self._logger) + if not toolbox.check_recipe_execution(self._recipe_execution, 'sort_frames', + self.recipe_requirements, logger=self._logger): + return # parameters path = self._path @@ -595,7 +622,9 @@ def sort_frames(self): # raise error when no science frames are present if len(sci_files) == 0: - raise ValueError('This dataset contains no science frame. There should be at least one!') + self._logger.error('This dataset contains no science frame. There should be at least one!') + self._update_execution('sort_frames', vltpf.ERROR) + return # build indices files = [] @@ -624,10 +653,6 @@ def sort_frames(self): frames_info.to_csv(path.preproc / 'frames.csv') self._frames_info = frames_info - # update recipe execution - self._logger.debug('> update recipe execution') - self._recipe_execution['sort_frames'] = True - # # print some info # @@ -671,6 +696,9 @@ def sort_frames(self): self._logger.info(' * Texp: {0:.2f} min'.format(cinfo['DET SEQ1 DIT'].sum()/60)) self._logger.info(' * PA: {0:.2f}° ==> {1:.2f}° = {2:.2f}°'.format(pa_start, pa_end, np.abs(pa_end-pa_start))) self._logger.info(' * POSANG: {0}'.format(', '.join(['{:.2f}°'.format(p) for p in posang]))) + + # update recipe execution + self._update_execution('sort_frames', vltpf.SUCCESS) def check_files_association(self): @@ -681,28 +709,35 @@ def check_files_association(self): interupted in case of error. ''' - # check if recipe can be executed - toolbox.check_recipe_execution(self._recipe_execution, 'check_files_association', self.recipe_requirements, - logger=self._logger) - self._logger.info('File association for calibrations') + # check if recipe can be executed + if not toolbox.check_recipe_execution(self._recipe_execution, 'check_files_association', + self.recipe_requirements, logger=self._logger): + return + # parameters files_info = self._files_info # instrument arm arm = files_info['SEQ ARM'].unique() if len(arm) != 1: - raise ValueError('Sequence is mixing different instruments: {0}'.format(arm)) + self._logger.error('Sequence is mixing different instruments: {0}'.format(arm)) + self._update_execution('check_files_association', vltpf.ERROR) + return # IRDIS obs mode and filter combination modes = files_info.loc[files_info['DPR CATG'] == 'SCIENCE', 'INS1 MODE'].unique() if len(modes) != 1: - raise ValueError('Sequence is mixing different types of observations: {0}'.format(modes)) + self._logger.error('Sequence is mixing different types of observations: {0}'.format(modes)) + self._update_execution('check_files_association', vltpf.ERROR) + return filter_combs = files_info.loc[files_info['DPR CATG'] == 'SCIENCE', 'INS COMB IFLT'].unique() if len(filter_combs) != 1: - raise ValueError('Sequence is mixing different types of filters combinations: {0}'.format(filter_combs)) + self._logger.error('Sequence is mixing different types of filters combinations: {0}'.format(filter_combs)) + self._update_execution('check_files_association', vltpf.ERROR) + return filter_comb = filter_combs[0] # specific data frame for calibrations @@ -752,13 +787,13 @@ def check_files_association(self): self._logger.debug('> report status') if error_flag: self._logger.error('There are {0} warning(s) and {1} error(s) in the classification of files'.format(warning_flag, error_flag)) - raise ValueError('There is {0} errors that should be solved before proceeding'.format(error_flag)) + self._update_execution('check_files_association', vltpf.ERROR) + return else: self._logger.warning('There are {0} warning(s) and {1} error(s) in the classification of files'.format(warning_flag, error_flag)) # update recipe execution - self._logger.debug('> update recipe execution') - self._recipe_execution['check_files_association'] = True + self._update_execution('sort_frames', vltpf.SUCCESS) def sph_ird_cal_dark(self, silent=True): @@ -771,12 +806,13 @@ def sph_ird_cal_dark(self, silent=True): Suppress esorex output. Default is True ''' - # check if recipe can be executed - toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_cal_dark', self.recipe_requirements, - logger=self._logger) - self._logger.info('Darks and backgrounds') + # check if recipe can be executed + if not toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_cal_dark', + self.recipe_requirements, logger=self._logger): + return + # parameters path = self._path files_info = self._files_info @@ -840,7 +876,9 @@ def sph_ird_cal_dark(self, silent=True): # check esorex if shutil.which('esorex') is None: - raise NameError('esorex does not appear to be in your PATH. Please make sure that the ESO pipeline is properly installed before running VLTPF.') + self._logger.error('esorex does not appear to be in your PATH. Please make sure that the ESO pipeline is properly installed before running VLTPF.') + self._update_execution('sph_ird_cal_dark', vltpf.ERROR) + return # execute esorex self._logger.debug('> execute esorex') @@ -850,7 +888,9 @@ def sph_ird_cal_dark(self, silent=True): proc = subprocess.run(args, cwd=path.tmp) if proc.returncode != 0: - raise ValueError('esorex process was not successful') + self._logger.error('esorex process was not successful') + self._update_execution('sph_ird_cal_dark', vltpf.ERROR) + return # store products self._logger.debug('> update files_info data frame') @@ -880,8 +920,7 @@ def sph_ird_cal_dark(self, silent=True): files_info.to_csv(path.preproc / 'files.csv') # update recipe execution - self._logger.debug('> update recipe execution') - self._recipe_execution['sph_ird_cal_dark'] = True + self._update_execution('sph_ird_cal_dark', vltpf.SUCCESS) def sph_ird_cal_detector_flat(self, silent=True): @@ -894,12 +933,13 @@ def sph_ird_cal_detector_flat(self, silent=True): Suppress esorex output. Default is True ''' - # check if recipe can be executed - toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_cal_detector_flat', self.recipe_requirements, - logger=self._logger) - self._logger.info('Instrument flats') + # check if recipe can be executed + if not toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_cal_detector_flat', + self.recipe_requirements, logger=self._logger): + return + # parameters path = self._path files_info = self._files_info @@ -940,7 +980,9 @@ def sph_ird_cal_detector_flat(self, silent=True): # check esorex if shutil.which('esorex') is None: - raise NameError('esorex does not appear to be in your PATH. Please make sure that the ESO pipeline is properly installed before running VLTPF.') + self._logger.error('esorex does not appear to be in your PATH. Please make sure that the ESO pipeline is properly installed before running VLTPF.') + self._update_execution('sph_ird_cal_detector_flat', vltpf.ERROR) + return # execute esorex self._logger.debug('> execute esorex') @@ -950,7 +992,9 @@ def sph_ird_cal_detector_flat(self, silent=True): proc = subprocess.run(args, cwd=path.tmp) if proc.returncode != 0: - raise ValueError('esorex process was not successful') + self._logger.error('esorex process was not successful') + self._update_execution('sph_ird_cal_detector_flat', vltpf.ERROR) + return # store products self._logger.debug('> update files_info data frame') @@ -980,8 +1024,7 @@ def sph_ird_cal_detector_flat(self, silent=True): files_info.to_csv(path.preproc / 'files.csv') # update recipe execution - self._logger.debug('> update recipe execution') - self._recipe_execution['sph_ird_cal_detector_flat'] = True + self._update_execution('sph_ird_cal_detector_flat', vltpf.SUCCESS) def sph_ird_preprocess_science(self, @@ -1037,12 +1080,13 @@ def sph_ird_preprocess_science(self, ''' - # check if recipe can be executed - toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_preprocess_science', self.recipe_requirements, - logger=self._logger) - self._logger.info('Pre-process science files') + # check if recipe can be executed + if not toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_preprocess_science', + self.recipe_requirements, logger=self._logger): + return + # parameters path = self._path files_info = self._files_info @@ -1076,7 +1120,9 @@ def sph_ird_preprocess_science(self, flat_file = files_info[files_info['PROCESSED'] & (files_info['PRO CATG'] == 'IRD_FLAT_FIELD') & (files_info['INS COMB IFLT'] == filter_comb)] if len(flat_file) != 1: - raise ValueError('There should be exactly 1 flat file. Found {0}.'.format(len(flat_file))) + self._logger.error('There should be exactly 1 flat file. Found {0}.'.format(len(flat_file))) + self._update_execution('sph_ird_preprocess_science', vltpf.ERROR) + return flat = fits.getdata(path.calib / '{}.fits'.format(flat_file.index[0])) # final dataframe @@ -1119,7 +1165,9 @@ def sph_ird_preprocess_science(self, bkg = fits.getdata(path.calib / '{}.fits'.format(dfiles.index[0])) elif len(dfiles) > 1: # FIXME: handle cases when multiple backgrounds are found? - raise ValueError('Unexpected number of background files ({0})'.format(len(dfiles))) + self._logger.error('Unexpected number of background files ({0})'.format(len(dfiles))) + self._update_execution('sph_ird_preprocess_science', vltpf.ERROR) + return # process files for idx, (fname, finfo) in enumerate(sfiles.iterrows()): @@ -1167,7 +1215,9 @@ def sph_ird_preprocess_science(self, frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'mean', logger=self._logger) elif collapse_type == 'coadd': if (not isinstance(coadd_value, int)) or (coadd_value <= 1): - raise TypeError('coadd_value must be an integer >1') + self._logger.error('coadd_value must be an integer >1') + self._update_execution('sph_ird_preprocess_science', vltpf.ERROR) + return coadd_value = int(coadd_value) NDIT = len(img) @@ -1175,7 +1225,9 @@ def sph_ird_preprocess_science(self, dropped = NDIT % coadd_value if coadd_value > NDIT: - raise ValueError('coadd_value ({0}) must be < NDIT ({1})'.format(coadd_value, NDIT)) + self._logger.error('coadd_value ({0}) must be < NDIT ({1})'.format(coadd_value, NDIT)) + self._update_execution('sph_ird_preprocess_science', vltpf.ERROR) + return self._logger.info(' ==> collapse: coadd by {0} ({1} -> {2} frames, {3} dropped)'.format(coadd_value, NDIT, NDIT_new, dropped)) @@ -1187,7 +1239,9 @@ def sph_ird_preprocess_science(self, frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'coadd', coadd_value=coadd_value, logger=self._logger) else: - raise ValueError('Unknown collapse type {0}'.format(collapse_type)) + self._logger.error('Unknown collapse type {0}'.format(collapse_type)) + self._update_execution('sph_ird_preprocess_science', vltpf.ERROR) + return else: frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'none', logger=self._logger) @@ -1244,8 +1298,7 @@ def sph_ird_preprocess_science(self, self._frames_info_preproc = frames_info_preproc # update recipe execution - self._logger.debug('> update recipe execution') - self._recipe_execution['sph_ird_preprocess_science'] = True + self._update_execution('sph_ird_preprocess_science', vltpf.SUCCESS) def sph_ird_star_center(self, high_pass=False, offset=(0, 0), plot=True): @@ -1268,12 +1321,13 @@ def sph_ird_star_center(self, high_pass=False, offset=(0, 0), plot=True): ''' - # check if recipe can be executed - toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_star_center', self.recipe_requirements, - logger=self._logger) - self._logger.info('Star centers determination') + # check if recipe can be executed + if not toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_star_center', + self.recipe_requirements, logger=self._logger): + return + # parameters path = self._path pixel = self._pixel @@ -1345,8 +1399,7 @@ def sph_ird_star_center(self, high_pass=False, offset=(0, 0), plot=True): fits.writeto(path.preproc / '{}_centers.fits'.format(fname), img_center, overwrite=True) # update recipe execution - self._logger.debug('> update recipe execution') - self._recipe_execution['sph_ird_star_center'] = True + self._update_execution('sph_ird_star_center', vltpf.SUCCESS) def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_anamorphism=True, @@ -1447,12 +1500,13 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a ''' - # check if recipe can be executed - toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_combine_data', self.recipe_requirements, - logger=self._logger) - self._logger.info('Combine science data') + # check if recipe can be executed + if not toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_combine_data', + self.recipe_requirements, logger=self._logger): + return + # parameters path = self._path nwave = self._nwave @@ -1487,7 +1541,9 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a manual_center = np.array(manual_center) if (manual_center.shape != (2,)) and (manual_center.shape != (nwave, 2)): - raise ValueError('manual_center does not have the right number of dimensions.') + self._logger.error('manual_center does not have the right number of dimensions.') + self._update_execution('sph_ird_combine_data', vltpf.ERROR) + return if manual_center.shape == (2,): manual_center = np.full((nwave, 2), manual_center, dtype=np.float) @@ -1819,8 +1875,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a del sci_cube_scaled # update recipe execution - self._logger.debug('> update recipe execution') - self._recipe_execution['sph_ird_combine_data'] = True + self._update_execution('sph_ird_combine_data', vltpf.SUCCESS) def sph_ird_clean(self, delete_raw=False, delete_products=False): @@ -1838,6 +1893,11 @@ def sph_ird_clean(self, delete_raw=False, delete_products=False): self._logger.info('Clean reduction data') + # check if recipe can be executed + if not toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_clean', + self.recipe_requirements, logger=self._logger): + return + # parameters path = self._path @@ -1876,5 +1936,4 @@ def sph_ird_clean(self, delete_raw=False, delete_products=False): shutil.rmtree(path.products, ignore_errors=True) # update recipe execution - self._logger.debug('> update recipe execution') - self._recipe_execution['sph_ird_clean'] = True + self._update_execution('sph_ird_clean', vltpf.SUCCESS) diff --git a/vltpf/__init__.py b/vltpf/__init__.py index 1e0f11d..e29daaa 100644 --- a/vltpf/__init__.py +++ b/vltpf/__init__.py @@ -10,15 +10,14 @@ _log.setLevel(logging.DEBUG) _log.info('VLTPF init') -# reduction status +# recipe execution status +NOTSET = -1 SUCCESS = 0 ERROR = 1 -INIT = -1 -INCOMPLETE = -2 -COMPLETED = -3 - -NOTSET = -1000 +# INIT = -1 +# INCOMPLETE = -2 +# COMPLETED = -3 # log level diff --git a/vltpf/toolbox.py b/vltpf/toolbox.py index 94b8925..979e0be 100644 --- a/vltpf/toolbox.py +++ b/vltpf/toolbox.py @@ -9,6 +9,7 @@ import matplotlib.colors as colors import logging +import vltpf import vltpf.utils.aperture as aperture from astropy.io import fits @@ -44,17 +45,23 @@ def check_recipe_execution(recipe_execution, recipe_name, recipe_requirements, l execute_recipe : bool Current recipe can be executed safely ''' + + recipes = recipe_execution.keys() requirements = recipe_requirements[recipe_name] - + execute_recipe = True missing = [] for r in requirements: - if not recipe_execution[r]: + if r not in recipes: + execute_recipe = False + missing.append(r) + elif recipe_execution[r] != vltpf.SUCCESS: execute_recipe = False missing.append(r) if not execute_recipe: - raise ValueError('{0} cannot executed because some files have been removed from the reduction directory or the following recipes have not been executed: {0}. '.format(recipe_name, missing)) + logger.error('{} cannot executed because the following recipes have not been executed or have result in unrecoverable errors: {}. '.format(recipe_name, missing)) + recipe_execution[recipe_name] = vltpf.ERROR logger.debug('> execution requirements check for {}: {}'.format(recipe_name, execute_recipe)) From ecc0639c4b43022621b9ddae9025baf033e0d897 Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Fri, 13 Sep 2019 18:47:57 +0200 Subject: [PATCH 087/101] More updates for error handling during execution Ticket #41 - renamed toolbox.check_recipe_execution() in toolbox.recipe_executable() - renamed ImagingReduction._recipe_execution in ImagingReduction._recipes_status --- vltpf/IRDIS/ImagingReduction.py | 111 ++++++++++++++++---------------- vltpf/toolbox.py | 12 ++-- 2 files changed, 61 insertions(+), 62 deletions(-) diff --git a/vltpf/IRDIS/ImagingReduction.py b/vltpf/IRDIS/ImagingReduction.py index c00d9a7..10a5875 100644 --- a/vltpf/IRDIS/ImagingReduction.py +++ b/vltpf/IRDIS/ImagingReduction.py @@ -152,8 +152,7 @@ def __new__(cls, path, log_level='info'): # # reduction status # - reduction._recipe_execution = collections.OrderedDict() - reduction._recipe_status = vltpf.NOTSET + reduction._recipes_status = collections.OrderedDict() # reduction._reduction_status = vltpf.INIT # reload any existing data frames @@ -207,8 +206,8 @@ def frames_info_preproc(self): return self._frames_info_preproc @property - def recipe_execution(self): - return self._recipe_execution + def recipes_status(self): + return self._recipes_status @property def config(self): @@ -403,11 +402,11 @@ def _read_info(self): files_info['DET FRAM UTC'] = pd.to_datetime(files_info['DET FRAM UTC'], utc=False) # update recipe execution - self._update_execution('sort_files', vltpf.SUCCESS) + self._update_recipe_status('sort_files', vltpf.SUCCESS) if np.any(files_info['PRO CATG'] == 'IRD_MASTER_DARK'): - self._update_execution('sph_ird_cal_dark', vltpf.SUCCESS) + self._update_recipe_status('sph_ird_cal_dark', vltpf.SUCCESS) if np.any(files_info['PRO CATG'] == 'IRD_FLAT_FIELD'): - self._update_execution('sph_ird_cal_detector_flat', vltpf.SUCCESS) + self._update_recipe_status('sph_ird_cal_detector_flat', vltpf.SUCCESS) # update instrument mode self._mode = files_info.loc[files_info['DPR CATG'] == 'SCIENCE', 'INS1 MODE'][0] @@ -429,7 +428,7 @@ def _read_info(self): frames_info['TIME END'] = pd.to_datetime(frames_info['TIME END'], utc=False) # update recipe execution - self._update_execution('sort_frames', vltpf.SUCCESS) + self._update_recipe_status('sort_frames', vltpf.SUCCESS) else: frames_info = None @@ -463,7 +462,7 @@ def _read_info(self): file = list(path.preproc.glob('{}.fits'.format(fname))) done = done and (len(file) == 1) if done: - self._update_execution('sph_ird_preprocess_science', vltpf.SUCCESS) + self._update_recipe_status('sph_ird_preprocess_science', vltpf.SUCCESS) self._logger.debug('> sph_ird_preprocess_science status = {}'.format(done)) done = True @@ -474,11 +473,11 @@ def _read_info(self): file = list(path.preproc.glob('{}.fits'.format(fname))) done = done and (len(file) == 1) if done: - self._update_execution('sph_ird_star_center', vltpf.SUCCESS) + self._update_recipe_status('sph_ird_star_center', vltpf.SUCCESS) self._logger.debug('> sph_ird_star_center status = {}'.format(done)) - def _update_execution(self, recipe, recipe_status): + def _update_recipe_status(self, recipe, recipe_status): '''Update execution status for reduction and recipe Parameters @@ -493,8 +492,8 @@ def _update_execution(self, recipe, recipe_status): self._logger.debug('> update recipe execution') - self._recipe_execution[recipe] = recipe_status - self._recipe_execution.move_to_end(recipe) + self._recipes_status[recipe] = recipe_status + self._recipes_status.move_to_end(recipe) ################################################## # SPHERE/IRDIS methods @@ -511,7 +510,7 @@ def sort_files(self): self._logger.info('Sort raw files') # update recipe execution - self._update_execution('sort_files', vltpf.NOTSET) + self._update_recipe_status('sort_files', vltpf.NOTSET) # parameters path = self._path @@ -522,7 +521,7 @@ def sort_files(self): if len(files) == 0: self._logger.error('No raw FITS files in reduction path') - self._update_execution('sort_files', vltpf.ERROR) + self._update_recipe_status('sort_files', vltpf.ERROR) return self._logger.info(' * found {0} raw FITS files'.format(len(files))) @@ -569,7 +568,7 @@ def sort_files(self): instru = files_info['SEQ ARM'].unique() if len(instru) != 1: self._logger.error('Sequence is mixing different instruments: {0}'.format(instru)) - self._update_execution('sort_files', vltpf.ERROR) + self._update_recipe_status('sort_files', vltpf.ERROR) return # processed column @@ -594,7 +593,7 @@ def sort_files(self): self._files_info = files_info # update recipe execution - self._update_execution('sort_files', vltpf.SUCCESS) + self._update_recipe_status('sort_files', vltpf.SUCCESS) def sort_frames(self): @@ -609,8 +608,8 @@ def sort_frames(self): self._logger.info('Extract frames information') # check if recipe can be executed - if not toolbox.check_recipe_execution(self._recipe_execution, 'sort_frames', - self.recipe_requirements, logger=self._logger): + if not toolbox.recipe_executable(self._recipes_status, 'sort_frames', + self.recipe_requirements, logger=self._logger): return # parameters @@ -623,7 +622,7 @@ def sort_frames(self): # raise error when no science frames are present if len(sci_files) == 0: self._logger.error('This dataset contains no science frame. There should be at least one!') - self._update_execution('sort_frames', vltpf.ERROR) + self._update_recipe_status('sort_frames', vltpf.ERROR) return # build indices @@ -698,7 +697,7 @@ def sort_frames(self): self._logger.info(' * POSANG: {0}'.format(', '.join(['{:.2f}°'.format(p) for p in posang]))) # update recipe execution - self._update_execution('sort_frames', vltpf.SUCCESS) + self._update_recipe_status('sort_frames', vltpf.SUCCESS) def check_files_association(self): @@ -712,8 +711,8 @@ def check_files_association(self): self._logger.info('File association for calibrations') # check if recipe can be executed - if not toolbox.check_recipe_execution(self._recipe_execution, 'check_files_association', - self.recipe_requirements, logger=self._logger): + if not toolbox.recipe_executable(self._recipes_status, 'check_files_association', + self.recipe_requirements, logger=self._logger): return # parameters @@ -723,20 +722,20 @@ def check_files_association(self): arm = files_info['SEQ ARM'].unique() if len(arm) != 1: self._logger.error('Sequence is mixing different instruments: {0}'.format(arm)) - self._update_execution('check_files_association', vltpf.ERROR) + self._update_recipe_status('check_files_association', vltpf.ERROR) return # IRDIS obs mode and filter combination modes = files_info.loc[files_info['DPR CATG'] == 'SCIENCE', 'INS1 MODE'].unique() if len(modes) != 1: self._logger.error('Sequence is mixing different types of observations: {0}'.format(modes)) - self._update_execution('check_files_association', vltpf.ERROR) + self._update_recipe_status('check_files_association', vltpf.ERROR) return filter_combs = files_info.loc[files_info['DPR CATG'] == 'SCIENCE', 'INS COMB IFLT'].unique() if len(filter_combs) != 1: self._logger.error('Sequence is mixing different types of filters combinations: {0}'.format(filter_combs)) - self._update_execution('check_files_association', vltpf.ERROR) + self._update_recipe_status('check_files_association', vltpf.ERROR) return filter_comb = filter_combs[0] @@ -787,13 +786,13 @@ def check_files_association(self): self._logger.debug('> report status') if error_flag: self._logger.error('There are {0} warning(s) and {1} error(s) in the classification of files'.format(warning_flag, error_flag)) - self._update_execution('check_files_association', vltpf.ERROR) + self._update_recipe_status('check_files_association', vltpf.ERROR) return else: self._logger.warning('There are {0} warning(s) and {1} error(s) in the classification of files'.format(warning_flag, error_flag)) # update recipe execution - self._update_execution('sort_frames', vltpf.SUCCESS) + self._update_recipe_status('sort_frames', vltpf.SUCCESS) def sph_ird_cal_dark(self, silent=True): @@ -809,8 +808,8 @@ def sph_ird_cal_dark(self, silent=True): self._logger.info('Darks and backgrounds') # check if recipe can be executed - if not toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_cal_dark', - self.recipe_requirements, logger=self._logger): + if not toolbox.recipe_executable(self._recipes_status, 'sph_ird_cal_dark', + self.recipe_requirements, logger=self._logger): return # parameters @@ -877,7 +876,7 @@ def sph_ird_cal_dark(self, silent=True): # check esorex if shutil.which('esorex') is None: self._logger.error('esorex does not appear to be in your PATH. Please make sure that the ESO pipeline is properly installed before running VLTPF.') - self._update_execution('sph_ird_cal_dark', vltpf.ERROR) + self._update_recipe_status('sph_ird_cal_dark', vltpf.ERROR) return # execute esorex @@ -889,7 +888,7 @@ def sph_ird_cal_dark(self, silent=True): if proc.returncode != 0: self._logger.error('esorex process was not successful') - self._update_execution('sph_ird_cal_dark', vltpf.ERROR) + self._update_recipe_status('sph_ird_cal_dark', vltpf.ERROR) return # store products @@ -920,7 +919,7 @@ def sph_ird_cal_dark(self, silent=True): files_info.to_csv(path.preproc / 'files.csv') # update recipe execution - self._update_execution('sph_ird_cal_dark', vltpf.SUCCESS) + self._update_recipe_status('sph_ird_cal_dark', vltpf.SUCCESS) def sph_ird_cal_detector_flat(self, silent=True): @@ -936,8 +935,8 @@ def sph_ird_cal_detector_flat(self, silent=True): self._logger.info('Instrument flats') # check if recipe can be executed - if not toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_cal_detector_flat', - self.recipe_requirements, logger=self._logger): + if not toolbox.recipe_executable(self._recipes_status, 'sph_ird_cal_detector_flat', + self.recipe_requirements, logger=self._logger): return # parameters @@ -981,7 +980,7 @@ def sph_ird_cal_detector_flat(self, silent=True): # check esorex if shutil.which('esorex') is None: self._logger.error('esorex does not appear to be in your PATH. Please make sure that the ESO pipeline is properly installed before running VLTPF.') - self._update_execution('sph_ird_cal_detector_flat', vltpf.ERROR) + self._update_recipe_status('sph_ird_cal_detector_flat', vltpf.ERROR) return # execute esorex @@ -993,7 +992,7 @@ def sph_ird_cal_detector_flat(self, silent=True): if proc.returncode != 0: self._logger.error('esorex process was not successful') - self._update_execution('sph_ird_cal_detector_flat', vltpf.ERROR) + self._update_recipe_status('sph_ird_cal_detector_flat', vltpf.ERROR) return # store products @@ -1024,7 +1023,7 @@ def sph_ird_cal_detector_flat(self, silent=True): files_info.to_csv(path.preproc / 'files.csv') # update recipe execution - self._update_execution('sph_ird_cal_detector_flat', vltpf.SUCCESS) + self._update_recipe_status('sph_ird_cal_detector_flat', vltpf.SUCCESS) def sph_ird_preprocess_science(self, @@ -1083,8 +1082,8 @@ def sph_ird_preprocess_science(self, self._logger.info('Pre-process science files') # check if recipe can be executed - if not toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_preprocess_science', - self.recipe_requirements, logger=self._logger): + if not toolbox.recipe_executable(self._recipes_status, 'sph_ird_preprocess_science', + self.recipe_requirements, logger=self._logger): return # parameters @@ -1121,7 +1120,7 @@ def sph_ird_preprocess_science(self, (files_info['INS COMB IFLT'] == filter_comb)] if len(flat_file) != 1: self._logger.error('There should be exactly 1 flat file. Found {0}.'.format(len(flat_file))) - self._update_execution('sph_ird_preprocess_science', vltpf.ERROR) + self._update_recipe_status('sph_ird_preprocess_science', vltpf.ERROR) return flat = fits.getdata(path.calib / '{}.fits'.format(flat_file.index[0])) @@ -1166,7 +1165,7 @@ def sph_ird_preprocess_science(self, elif len(dfiles) > 1: # FIXME: handle cases when multiple backgrounds are found? self._logger.error('Unexpected number of background files ({0})'.format(len(dfiles))) - self._update_execution('sph_ird_preprocess_science', vltpf.ERROR) + self._update_recipe_status('sph_ird_preprocess_science', vltpf.ERROR) return # process files @@ -1216,7 +1215,7 @@ def sph_ird_preprocess_science(self, elif collapse_type == 'coadd': if (not isinstance(coadd_value, int)) or (coadd_value <= 1): self._logger.error('coadd_value must be an integer >1') - self._update_execution('sph_ird_preprocess_science', vltpf.ERROR) + self._update_recipe_status('sph_ird_preprocess_science', vltpf.ERROR) return coadd_value = int(coadd_value) @@ -1226,7 +1225,7 @@ def sph_ird_preprocess_science(self, if coadd_value > NDIT: self._logger.error('coadd_value ({0}) must be < NDIT ({1})'.format(coadd_value, NDIT)) - self._update_execution('sph_ird_preprocess_science', vltpf.ERROR) + self._update_recipe_status('sph_ird_preprocess_science', vltpf.ERROR) return self._logger.info(' ==> collapse: coadd by {0} ({1} -> {2} frames, {3} dropped)'.format(coadd_value, NDIT, NDIT_new, dropped)) @@ -1240,7 +1239,7 @@ def sph_ird_preprocess_science(self, frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'coadd', coadd_value=coadd_value, logger=self._logger) else: self._logger.error('Unknown collapse type {0}'.format(collapse_type)) - self._update_execution('sph_ird_preprocess_science', vltpf.ERROR) + self._update_recipe_status('sph_ird_preprocess_science', vltpf.ERROR) return else: frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'none', logger=self._logger) @@ -1298,7 +1297,7 @@ def sph_ird_preprocess_science(self, self._frames_info_preproc = frames_info_preproc # update recipe execution - self._update_execution('sph_ird_preprocess_science', vltpf.SUCCESS) + self._update_recipe_status('sph_ird_preprocess_science', vltpf.SUCCESS) def sph_ird_star_center(self, high_pass=False, offset=(0, 0), plot=True): @@ -1324,8 +1323,8 @@ def sph_ird_star_center(self, high_pass=False, offset=(0, 0), plot=True): self._logger.info('Star centers determination') # check if recipe can be executed - if not toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_star_center', - self.recipe_requirements, logger=self._logger): + if not toolbox.recipe_executable(self._recipes_status, 'sph_ird_star_center', + self.recipe_requirements, logger=self._logger): return # parameters @@ -1399,7 +1398,7 @@ def sph_ird_star_center(self, high_pass=False, offset=(0, 0), plot=True): fits.writeto(path.preproc / '{}_centers.fits'.format(fname), img_center, overwrite=True) # update recipe execution - self._update_execution('sph_ird_star_center', vltpf.SUCCESS) + self._update_recipe_status('sph_ird_star_center', vltpf.SUCCESS) def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_anamorphism=True, @@ -1503,8 +1502,8 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a self._logger.info('Combine science data') # check if recipe can be executed - if not toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_combine_data', - self.recipe_requirements, logger=self._logger): + if not toolbox.recipe_executable(self._recipes_status, 'sph_ird_combine_data', + self.recipe_requirements, logger=self._logger): return # parameters @@ -1542,7 +1541,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a if (manual_center.shape != (2,)) and (manual_center.shape != (nwave, 2)): self._logger.error('manual_center does not have the right number of dimensions.') - self._update_execution('sph_ird_combine_data', vltpf.ERROR) + self._update_recipe_status('sph_ird_combine_data', vltpf.ERROR) return if manual_center.shape == (2,): @@ -1875,7 +1874,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a del sci_cube_scaled # update recipe execution - self._update_execution('sph_ird_combine_data', vltpf.SUCCESS) + self._update_recipe_status('sph_ird_combine_data', vltpf.SUCCESS) def sph_ird_clean(self, delete_raw=False, delete_products=False): @@ -1894,8 +1893,8 @@ def sph_ird_clean(self, delete_raw=False, delete_products=False): self._logger.info('Clean reduction data') # check if recipe can be executed - if not toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_clean', - self.recipe_requirements, logger=self._logger): + if not toolbox.recipe_executable(self._recipes_status, 'sph_ird_clean', + self.recipe_requirements, logger=self._logger): return # parameters @@ -1936,4 +1935,4 @@ def sph_ird_clean(self, delete_raw=False, delete_products=False): shutil.rmtree(path.products, ignore_errors=True) # update recipe execution - self._update_execution('sph_ird_clean', vltpf.SUCCESS) + self._update_recipe_status('sph_ird_clean', vltpf.SUCCESS) diff --git a/vltpf/toolbox.py b/vltpf/toolbox.py index 979e0be..fbb3710 100644 --- a/vltpf/toolbox.py +++ b/vltpf/toolbox.py @@ -22,13 +22,13 @@ _log = logging.getLogger(__name__) -def check_recipe_execution(recipe_execution, recipe_name, recipe_requirements, logger=_log): +def recipe_executable(recipes_status, recipe_name, recipe_requirements, logger=_log): ''' - Check execution of previous recipes for a given recipe. + Check if a recipe is executabled given the status of other recipes Parameters ---------- - recipe_execution : dict + recipes_status : dict Status of executed recipes recipe_name : str @@ -46,7 +46,7 @@ def check_recipe_execution(recipe_execution, recipe_name, recipe_requirements, l Current recipe can be executed safely ''' - recipes = recipe_execution.keys() + recipes = recipes_status.keys() requirements = recipe_requirements[recipe_name] execute_recipe = True @@ -55,13 +55,13 @@ def check_recipe_execution(recipe_execution, recipe_name, recipe_requirements, l if r not in recipes: execute_recipe = False missing.append(r) - elif recipe_execution[r] != vltpf.SUCCESS: + elif recipes_status[r] != vltpf.SUCCESS: execute_recipe = False missing.append(r) if not execute_recipe: logger.error('{} cannot executed because the following recipes have not been executed or have result in unrecoverable errors: {}. '.format(recipe_name, missing)) - recipe_execution[recipe_name] = vltpf.ERROR + recipes_status[recipe_name] = vltpf.ERROR logger.debug('> execution requirements check for {}: {}'.format(recipe_name, execute_recipe)) From 1fb85332e629994c3a17cb5af89526cc777a1007 Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Fri, 13 Sep 2019 19:27:34 +0200 Subject: [PATCH 088/101] Fix wrong recipe name Ticket #41 --- vltpf/IRDIS/ImagingReduction.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/vltpf/IRDIS/ImagingReduction.py b/vltpf/IRDIS/ImagingReduction.py index 10a5875..355ea9a 100644 --- a/vltpf/IRDIS/ImagingReduction.py +++ b/vltpf/IRDIS/ImagingReduction.py @@ -153,7 +153,6 @@ def __new__(cls, path, log_level='info'): # reduction status # reduction._recipes_status = collections.OrderedDict() - # reduction._reduction_status = vltpf.INIT # reload any existing data frames reduction._read_info() @@ -792,7 +791,7 @@ def check_files_association(self): self._logger.warning('There are {0} warning(s) and {1} error(s) in the classification of files'.format(warning_flag, error_flag)) # update recipe execution - self._update_recipe_status('sort_frames', vltpf.SUCCESS) + self._update_recipe_status('check_files_association', vltpf.SUCCESS) def sph_ird_cal_dark(self, silent=True): From dbbc61111cf037285510d2b570bff35bc6b01e9a Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Fri, 13 Sep 2019 20:13:16 +0200 Subject: [PATCH 089/101] Implement proper error handling in reduction execution for IRDIS spectro Ticket #41 Implementation of error handling in IRDIS.SpectroReduction --- examples/irdis_spectro_reduction.py | 4 +- vltpf/IRDIS/SpectroReduction.py | 372 +++++++++++++++++----------- 2 files changed, 226 insertions(+), 150 deletions(-) diff --git a/examples/irdis_spectro_reduction.py b/examples/irdis_spectro_reduction.py index 959715b..e067e91 100644 --- a/examples/irdis_spectro_reduction.py +++ b/examples/irdis_spectro_reduction.py @@ -5,7 +5,7 @@ # #%% init reduction -reduction = IRDIS.SpectroReduction('/Users/avigan/data/VLTPF-test-target/IRD/LSS/', log_level='debug') +reduction = IRDIS.SpectroReduction('/Users/avigan/data/VLTPF-test-target/IRD/LSS/', log_level='info') #%% configuration reduction.config['combine_science_dim'] = 300 @@ -20,7 +20,7 @@ # #%% init reduction -reduction = IRDIS.SpectroReduction('/Users/avigan/data/VLTPF-test-target/IRD/LSS/', log_level='debug') +reduction = IRDIS.SpectroReduction('/Users/avigan/data/VLTPF-test-target/IRD/LSS/', log_level='info') #%% sorting reduction.sort_files() diff --git a/vltpf/IRDIS/SpectroReduction.py b/vltpf/IRDIS/SpectroReduction.py index b094413..e4e9e5e 100644 --- a/vltpf/IRDIS/SpectroReduction.py +++ b/vltpf/IRDIS/SpectroReduction.py @@ -88,7 +88,8 @@ class SpectroReduction(object): 'sph_ird_cal_detector_flat'], 'sph_ird_star_center': ['sort_files', 'sort_frames', 'sph_ird_wave_calib'], 'sph_ird_wavelength_recalibration': ['sort_files', 'sort_frames', 'sph_ird_wave_calib'], - 'sph_ird_combine_data': ['sort_files', 'sort_frames', 'sph_ird_preprocess_science'] + 'sph_ird_combine_data': ['sort_files', 'sort_frames', 'sph_ird_preprocess_science'], + 'sph_ird_clean': [] } ################################################## @@ -96,14 +97,29 @@ class SpectroReduction(object): ################################################## def __new__(cls, path, log_level='info'): - '''Custom instantiation for the class + '''Custom instantiation for the class and initialization for the + instances The customized instantiation enables to check that the - provided path is a valid reduction path. If not None will be - returned for the reduction being created + provided path is a valid reduction path. If not, None will be + returned for the reduction being created. Otherwise, an + instance is created and returned at the end. + + Parameters + ---------- + path : str + Path to the directory containing the dataset + + level : {'debug', 'info', 'warning', 'error', 'critical'} + The log level of the handler + ''' - # expand path + # + # make sure we are dealing with a proper reduction directory + # + + # init path path = Path(path).expanduser().resolve() # zeroth-order reduction validation @@ -112,32 +128,19 @@ def __new__(cls, path, log_level='info'): _log.error('No raw/ subdirectory. {0} is not a valid reduction path'.format(path)) return None else: - return super(SpectroReduction, cls).__new__(cls) - - - def __init__(self, path, log_level='info'): - '''Initialization of the SpectroReduction instances - - Parameters - ---------- - path : str - Path to the directory containing the raw data - - level : {'debug', 'info', 'warning', 'error', 'critical'} - The log level of the handler - ''' + # it's all good: create instance! + reduction = super(SpectroReduction, cls).__new__(cls) # # basic init # - + # init path - path = Path(path).expanduser().resolve() - self._path = utils.ReductionPath(path) + reduction._path = utils.ReductionPath(path) # instrument and mode - self._instrument = 'IRDIS' - self._mode = 'Unknown' + reduction._instrument = 'IRDIS' + reduction._mode = 'Unknown' # # logging @@ -148,72 +151,65 @@ def __init__(self, path, log_level='info'): for hdlr in logger.handlers: logger.removeHandler(hdlr) - handler = logging.FileHandler(self._path.products / 'reduction.log', mode='w', encoding='utf-8') + handler = logging.FileHandler(reduction._path.products / 'reduction.log', mode='w', encoding='utf-8') formatter = logging.Formatter('%(asctime)s\t%(levelname)8s\t%(message)s') formatter.default_msec_format = '%s.%03d' handler.setFormatter(formatter) logger.addHandler(handler) - self._logger = logger + reduction._logger = logger - self._logger.info('Creating IRDIS spectroscopy reduction at path {}'.format(path)) + reduction._logger.info('Creating IRDIS spectroscopy reduction at path {}'.format(path)) # # configuration # - configfile = Path(vltpf.__file__).parent / 'instruments' / '{}.ini'.format(self._instrument) + configfile = Path(vltpf.__file__).parent / 'instruments' / '{}.ini'.format(reduction._instrument) config = configparser.ConfigParser() - self._logger.debug('> read configuration') + reduction._logger.debug('> read configuration') config.read(configfile) # instrument - self._pixel = float(config.get('instrument', 'pixel')) - self._nwave = -1 + reduction._pixel = float(config.get('instrument', 'pixel')) + reduction._nwave = -1 # calibration - self._wave_cal_lasers = np.array(eval(config.get('calibration', 'wave_cal_lasers'))) + reduction._wave_cal_lasers = np.array(eval(config.get('calibration', 'wave_cal_lasers'))) # spectro calibration - self._default_center_lrs = np.array(eval(config.get('calibration-spectro', 'default_center_lrs'))) - self._wave_min_lrs = eval(config.get('calibration-spectro', 'wave_min_lrs')) - self._wave_max_lrs = eval(config.get('calibration-spectro', 'wave_max_lrs')) + reduction._default_center_lrs = np.array(eval(config.get('calibration-spectro', 'default_center_lrs'))) + reduction._wave_min_lrs = eval(config.get('calibration-spectro', 'wave_min_lrs')) + reduction._wave_max_lrs = eval(config.get('calibration-spectro', 'wave_max_lrs')) - self._default_center_mrs = np.array(eval(config.get('calibration-spectro', 'default_center_mrs'))) - self._wave_min_mrs = eval(config.get('calibration-spectro', 'wave_min_mrs')) - self._wave_max_mrs = eval(config.get('calibration-spectro', 'wave_max_mrs')) + reduction._default_center_mrs = np.array(eval(config.get('calibration-spectro', 'default_center_mrs'))) + reduction._wave_min_mrs = eval(config.get('calibration-spectro', 'wave_min_mrs')) + reduction._wave_max_mrs = eval(config.get('calibration-spectro', 'wave_max_mrs')) # reduction parameters - self._config = {} + reduction._config = {} for group in ['reduction', 'reduction-spectro']: items = dict(config.items(group)) - self._config.update(items) + reduction._config.update(items) for key, value in items.items(): try: val = eval(value) except NameError: val = value - self._config[key] = val + reduction._config[key] = val # # reduction status # - self._recipe_execution = collections.OrderedDict( - [('sort_files', False), - ('sort_frames', False), - ('check_files_association', False), - ('sph_ifs_cal_dark', False), - ('sph_ifs_cal_detector_flat', False), - ('sph_ird_wave_calib', False), - ('sph_ird_preprocess_science', False), - ('sph_ird_star_center', False), - ('sph_ird_wavelength_recalibration', False), - ('sph_ird_combine_data', False), - ('sph_ird_clean', False)] - ) + reduction._recipes_status = collections.OrderedDict() # reload any existing data frames - self._read_info() + reduction._read_info() + + # + # return instance + # + return reduction ################################################## # Representation @@ -258,8 +254,8 @@ def frames_info_preproc(self): return self._frames_info_preproc @property - def recipe_execution(self): - return self._recipe_execution + def recipes_status(self): + return self._recipes_status @property def config(self): @@ -424,7 +420,7 @@ def full_reduction(self): # Private methods ################################################## - def read_info(self): + def _read_info(self): ''' Read the files, calibs and frames information from disk @@ -459,13 +455,14 @@ def read_info(self): files_info['DET FRAM UTC'] = pd.to_datetime(files_info['DET FRAM UTC'], utc=False) # update recipe execution - self._recipe_execution['sort_files'] = True + self._update_recipe_status('sort_files', vltpf.SUCCESS) if np.any(files_info['PRO CATG'] == 'IRD_MASTER_DARK'): - self._recipe_execution['sph_ird_cal_dark'] = True + self._update_recipe_status('sph_ird_cal_dark', vltpf.SUCCESS) if np.any(files_info['PRO CATG'] == 'IRD_FLAT_FIELD'): - self._recipe_execution['sph_ird_cal_detector_flat'] = True + self._update_recipe_status('sph_ird_cal_detector_flat', vltpf.SUCCESS) if np.any(files_info['PRO CATG'] == 'IRD_WAVECALIB'): - self._recipe_execution['sph_ird_wave_calib'] = True + # FIXME: change wave_calib into cal_wave + self._update_recipe_status('sph_ird_wave_calib', vltpf.SUCCESS) # update instrument mode self._mode = files_info.loc[files_info['DPR CATG'] == 'SCIENCE', 'INS1 MODE'][0] @@ -487,7 +484,7 @@ def read_info(self): frames_info['TIME END'] = pd.to_datetime(frames_info['TIME END'], utc=False) # update recipe execution - self._recipe_execution['sort_frames'] = True + self._update_recipe_status('sort_frames', vltpf.SUCCESS) else: frames_info = None @@ -515,11 +512,13 @@ def read_info(self): # additional checks to update recipe execution if frames_info_preproc is not None: done = (path.preproc / 'wavelength_default.fits').exists() - self._recipe_execution['sph_ird_wave_calib'] = done + if done: + self._update_recipe_status('sph_ird_wave_calib', vltpf.SUCCESS) self._logger.debug('> sph_ird_wave_calib status = {}'.format(done)) done = (path.preproc / 'wavelength_recalibrated.fits').exists() - self._recipe_execution['sph_ird_wavelength_recalibration'] = done + if done: + self._update_recipe_status('sph_ird_wavelength_recalibration', vltpf.SUCCESS) self._logger.debug('> sph_ird_wavelength_recalibration status = {}'.format(done)) done = True @@ -528,7 +527,8 @@ def read_info(self): fname = '{0}_DIT{1:03d}_preproc'.format(file, idx) file = list(path.preproc.glob('{}.fits'.format(fname))) done = done and (len(file) == 1) - self._recipe_execution['sph_ird_preprocess_science'] = done + if done: + self._update_recipe_status('sph_ird_preprocess_science', vltpf.SUCCESS) self._logger.debug('> sph_ird_preprocess_science status = {}'.format(done)) done = True @@ -538,10 +538,29 @@ def read_info(self): fname = '{0}_DIT{1:03d}_preproc_centers'.format(file, idx) file = list(path.preproc.glob('{}.fits'.format(fname))) done = done and (len(file) == 1) - self._recipe_execution['sph_ird_star_center'] = done + if done: + self._update_recipe_status('sph_ird_star_center', vltpf.SUCCESS) self._logger.debug('> sph_ird_star_center status = {}'.format(done)) + def _update_recipe_status(self, recipe, recipe_status): + '''Update execution status for reduction and recipe + + Parameters + ---------- + recipe : str + Recipe name + + recipe_status : vltpf status (int) + Status of the recipe. Can be either one of vltpf.NOTSET, + vltpf.SUCCESS or vltpf.ERROR + ''' + + self._logger.debug('> update recipe execution') + + self._recipes_status[recipe] = recipe_status + self._recipes_status.move_to_end(recipe) + ################################################## # SPHERE/IRDIS methods ################################################## @@ -556,6 +575,9 @@ def sort_files(self): self._logger.info('Sort raw files') + # update recipe execution + self._update_recipe_status('sort_files', vltpf.NOTSET) + # parameters path = self._path @@ -564,7 +586,9 @@ def sort_files(self): files = [f.stem for f in files] if len(files) == 0: - raise ValueError('No raw FITS files in reduction path') + self._logger.error('No raw FITS files in reduction path') + self._update_recipe_status('sort_files', vltpf.ERROR) + return self._logger.info(' * found {0} raw FITS files'.format(len(files))) @@ -609,7 +633,9 @@ def sort_files(self): # check instruments instru = files_info['SEQ ARM'].unique() if len(instru) != 1: - raise ValueError('Sequence is mixing different instruments: {0}'.format(instru)) + self._logger.error('Sequence is mixing different instruments: {0}'.format(instru)) + self._update_recipe_status('sort_files', vltpf.ERROR) + return # processed column files_info.insert(len(files_info.columns), 'PROCESSED', False) @@ -633,8 +659,7 @@ def sort_files(self): self._files_info = files_info # update recipe execution - self._logger.debug('> update recipe execution') - self._recipe_execution['sort_files'] = True + self._update_recipe_status('sort_files', vltpf.SUCCESS) def sort_frames(self): @@ -649,8 +674,9 @@ def sort_frames(self): self._logger.info('Extract frames information') # check if recipe can be executed - toolbox.check_recipe_execution(self._recipe_execution, 'sort_frames', self.recipe_requirements, - logger=self._logger) + if not toolbox.recipe_executable(self._recipes_status, 'sort_frames', + self.recipe_requirements, logger=self._logger): + return # parameters path = self._path @@ -659,9 +685,11 @@ def sort_frames(self): # science files sci_files = files_info[(files_info['DPR CATG'] == 'SCIENCE') & (files_info['DPR TYPE'] != 'SKY')] - # raise error when no science frames are present + # report error when no science frames are present if len(sci_files) == 0: - raise ValueError('This dataset contains no science frame. There should be at least one!') + self._logger.error('This dataset contains no science frame. There should be at least one!') + self._update_recipe_status('sort_frames', vltpf.ERROR) + return # build indices files = [] @@ -690,10 +718,6 @@ def sort_frames(self): frames_info.to_csv(path.preproc / 'frames.csv') self._frames_info = frames_info - # update recipe execution - self._logger.debug('> update recipe execution') - self._recipe_execution['sort_frames'] = True - # # print some info # @@ -738,6 +762,9 @@ def sort_frames(self): self._logger.info(' * PA: {0:.2f}° ==> {1:.2f}° = {2:.2f}°'.format(pa_start, pa_end, np.abs(pa_end-pa_start))) self._logger.info(' * POSANG: {0}'.format(', '.join(['{:.2f}°'.format(p) for p in posang]))) + # update recipe execution + self._update_recipe_status('sort_frames', vltpf.SUCCESS) + def check_files_association(self): ''' @@ -747,12 +774,13 @@ def check_files_association(self): interupted in case of error. ''' - # check if recipe can be executed - toolbox.check_recipe_execution(self._recipe_execution, 'check_files_association', self.recipe_requirements, - logger=self._logger) - self._logger.info('File association for calibrations') + # check if recipe can be executed + if not toolbox.recipe_executable(self._recipes_status, 'check_files_association', + self.recipe_requirements, logger=self._logger): + return + # parameters path = self._path files_info = self._files_info @@ -760,19 +788,28 @@ def check_files_association(self): # instrument arm arm = files_info['SEQ ARM'].unique() if len(arm) != 1: - raise ValueError('Sequence is mixing different instruments: {0}'.format(arm)) + self._logger.error('Sequence is mixing different instruments: {0}'.format(arm)) + self._update_recipe_status('check_files_association', vltpf.ERROR) + return # IRDIS obs mode and filter combination modes = files_info.loc[files_info['DPR CATG'] == 'SCIENCE', 'INS1 MODE'].unique() if len(modes) != 1: - raise ValueError('Sequence is mixing different types of observations: {0}'.format(modes)) + self._logger.eror('Sequence is mixing different types of observations: {0}'.format(modes)) + self._update_recipe_status('check_files_association', vltpf.ERROR) + return filter_combs = files_info.loc[files_info['DPR CATG'] == 'SCIENCE', 'INS COMB IFLT'].unique() if len(filter_combs) != 1: - raise ValueError('Sequence is mixing different types of filters combinations: {0}'.format(filter_combs)) + self._logger.error('Sequence is mixing different types of filters combinations: {0}'.format(filter_combs)) + self._update_recipe_status('check_files_association', vltpf.ERROR) + return + filter_comb = filter_combs[0] if (filter_comb != 'S_LR') and (filter_comb != 'S_MR'): - raise ValueError('Unknown IRDIS-LSS filter combination/mode {0}'.format(filter_comb)) + self._logger.error('Unknown IRDIS-LSS filter combination/mode {0}'.format(filter_comb)) + self._update_recipe_status('check_files_association', vltpf.ERROR) + return # specific data frame for calibrations # keep static calibrations and sky backgrounds @@ -840,7 +877,8 @@ def check_files_association(self): self._logger.debug('> report status') if error_flag: self._logger.error('There are {0} warning(s) and {1} error(s) in the classification of files'.format(warning_flag, error_flag)) - raise ValueError('There is {0} errors that should be solved before proceeding'.format(error_flag)) + self._update_recipe_status('check_files_association', vltpf.ERROR) + return else: self._logger.warning('There are {0} warning(s) and {1} error(s) in the classification of files'.format(warning_flag, error_flag)) @@ -849,8 +887,7 @@ def check_files_association(self): self._files_info = files_info # update recipe execution - self._logger.debug('> update recipe execution') - self._recipe_execution['check_files_association'] = True + self._update_recipe_status('check_files_association', vltpf.SUCCESS) def sph_ird_cal_dark(self, silent=True): @@ -863,12 +900,13 @@ def sph_ird_cal_dark(self, silent=True): Suppress esorex output. Default is True ''' - # check if recipe can be executed - toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_cal_dark', self.recipe_requirements, - logger=self._logger) - self._logger.info('Darks and backgrounds') + # check if recipe can be executed + if not toolbox.recipe_executable(self._recipes_status, 'sph_ird_cal_dark', + self.recipe_requirements, logger=self._logger): + return + # parameters path = self._path files_info = self._files_info @@ -932,7 +970,9 @@ def sph_ird_cal_dark(self, silent=True): # check esorex if shutil.which('esorex') is None: - raise NameError('esorex does not appear to be in your PATH. Please make sure that the ESO pipeline is properly installed before running VLTPF.') + self._logger.error('esorex does not appear to be in your PATH. Please make sure that the ESO pipeline is properly installed before running VLTPF.') + self._update_recipe_status('sph_ird_cal_dark', vltpf.ERROR) + return # execute esorex self._logger.debug('> execute esorex') @@ -942,7 +982,9 @@ def sph_ird_cal_dark(self, silent=True): proc = subprocess.run(args, cwd=path.tmp) if proc.returncode != 0: - raise ValueError('esorex process was not successful') + self._logger.error('esorex process was not successful') + self._update_recipe_status('sph_ird_cal_dark', vltpf.ERROR) + return # store products self._logger.debug('> update files_info data frame') @@ -972,8 +1014,7 @@ def sph_ird_cal_dark(self, silent=True): files_info.to_csv(path.preproc / 'files.csv') # update recipe execution - self._logger.debug('> update recipe execution') - self._recipe_execution['sph_ird_cal_dark'] = True + self._update_recipe_status('sph_ird_cal_dark', vltpf.SUCCESS) def sph_ird_cal_detector_flat(self, silent=True): @@ -986,12 +1027,13 @@ def sph_ird_cal_detector_flat(self, silent=True): Suppress esorex output. Default is True ''' - # check if recipe can be executed - toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_cal_detector_flat', self.recipe_requirements, - logger=self._logger) - self._logger.info('Instrument flats') + # check if recipe can be executed + if not toolbox.recipe_executable(self._recipes_status, 'sph_ird_cal_detector_flat', + self.recipe_requirements, logger=self._logger): + return + # parameters path = self._path files_info = self._files_info @@ -1031,7 +1073,9 @@ def sph_ird_cal_detector_flat(self, silent=True): # check esorex if shutil.which('esorex') is None: - raise NameError('esorex does not appear to be in your PATH. Please make sure that the ESO pipeline is properly installed before running VLTPF.') + self._logger.error('esorex does not appear to be in your PATH. Please make sure that the ESO pipeline is properly installed before running VLTPF.') + self._update_recipe_status('sph_ird_cal_detector_flat', vltpf.ERROR) + return # execute esorex self._logger.debug('> execute esorex') @@ -1041,7 +1085,9 @@ def sph_ird_cal_detector_flat(self, silent=True): proc = subprocess.run(args, cwd=path.tmp) if proc.returncode != 0: - raise ValueError('esorex process was not successful') + self._logger.error('esorex process was not successful') + self._update_recipe_status('sph_ird_cal_detector_flat', vltpf.ERROR) + return # store products self._logger.debug('> update files_info data frame') @@ -1071,10 +1117,10 @@ def sph_ird_cal_detector_flat(self, silent=True): files_info.to_csv(path.preproc / 'files.csv') # update recipe execution - self._logger.debug('> update recipe execution') - self._recipe_execution['sph_ird_cal_detector_flat'] = True + self._update_recipe_status('sph_ird_cal_detector_flat', vltpf.SUCCESS) + # FIXME: change wave_calib into cal_wave def sph_ird_wave_calib(self, silent=True): ''' Create the wavelength calibration @@ -1085,12 +1131,13 @@ def sph_ird_wave_calib(self, silent=True): Suppress esorex output. Default is True ''' - # check if recipe can be executed - toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_wave_calib', self.recipe_requirements, - logger=self._logger) - self._logger.info('Wavelength calibration') + # check if recipe can be executed + if not toolbox.recipe_executable(self._recipes_status, 'sph_ird_wave_calib', + self.recipe_requirements, logger=self._logger): + return + # parameters path = self._path files_info = self._files_info @@ -1098,22 +1145,30 @@ def sph_ird_wave_calib(self, silent=True): # get list of files wave_file = files_info[np.logical_not(files_info['PROCESSED']) & (files_info['DPR TYPE'] == 'LAMP,WAVE')] if len(wave_file) != 1: - raise ValueError('There should be exactly 1 raw wavelength calibration file. Found {0}.'.format(len(wave_file))) + self._logger.error('There should be exactly 1 raw wavelength calibration file. Found {0}.'.format(len(wave_file))) + self._update_recipe_status('sph_ird_wave_calib', vltpf.ERROR) + return DIT = wave_file['DET SEQ1 DIT'][0] dark_file = files_info[files_info['PROCESSED'] & (files_info['PRO CATG'] == 'IRD_MASTER_DARK') & (files_info['DPR CATG'] == 'CALIB') & (files_info['DET SEQ1 DIT'].round(2) == DIT)] if len(dark_file) == 0: - raise ValueError('There should at least 1 dark file for wavelength calibration. Found none.') + self._logger.error('There should at least 1 dark file for wavelength calibration. Found none.') + self._update_recipe_status('sph_ird_wave_calib', vltpf.ERROR) + return filter_comb = wave_file['INS COMB IFLT'][0] flat_file = files_info[files_info['PROCESSED'] & (files_info['PRO CATG'] == 'IRD_FLAT_FIELD')] if len(flat_file) == 0: - raise ValueError('There should at least 1 flat file for wavelength calibration. Found none.') + self._logger.error('There should at least 1 flat file for wavelength calibration. Found none.') + self._update_recipe_status('sph_ird_wave_calib', vltpf.ERROR) + return bpm_file = files_info[files_info['PROCESSED'] & (files_info['PRO CATG'] == 'IRD_NON_LINEAR_BADPIXELMAP')] if len(flat_file) == 0: - raise ValueError('There should at least 1 bad pixel map file for wavelength calibration. Found none.') + self._logger.error('There should at least 1 bad pixel map file for wavelength calibration. Found none.') + self._update_recipe_status('sph_ird_wave_calib', vltpf.ERROR) + return # products wav_file = 'wave_calib' @@ -1188,7 +1243,9 @@ def sph_ird_wave_calib(self, silent=True): # check esorex if shutil.which('esorex') is None: - raise NameError('esorex does not appear to be in your PATH. Please make sure that the ESO pipeline is properly installed before running VLTPF.') + self._logger.error('esorex does not appear to be in your PATH. Please make sure that the ESO pipeline is properly installed before running VLTPF.') + self._update_recipe_status('sph_ird_wave_calib', vltpf.ERROR) + return # execute esorex self._logger.debug('> execute esorex') @@ -1198,7 +1255,9 @@ def sph_ird_wave_calib(self, silent=True): proc = subprocess.run(args, cwd=path.tmp) if proc.returncode != 0: - raise ValueError('esorex process was not successful') + self._logger.error('esorex process was not successful') + self._update_recipe_status('sph_ird_wave_calib', vltpf.ERROR) + return # store products self._logger.debug('> update files_info data frame') @@ -1235,8 +1294,7 @@ def sph_ird_wave_calib(self, silent=True): fits.writeto(path.preproc / 'wavelength_default.fits', wave_lin.T, overwrite=True) # update recipe execution - self._logger.debug('> update recipe execution') - self._recipe_execution['sph_ird_wave_calib'] = True + self._update_recipe_status('sph_ird_wave_calib', vltpf.SUCCESS) def sph_ird_preprocess_science(self, @@ -1277,12 +1335,13 @@ def sph_ird_preprocess_science(self, ''' - # check if recipe can be executed - toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_preprocess_science', self.recipe_requirements, - logger=self._logger) - self._logger.info('Pre-process science files') + # check if recipe can be executed + if not toolbox.recipe_executable(self._recipes_status, 'sph_ird_preprocess_science', + self.recipe_requirements, logger=self._logger): + return + # parameters path = self._path files_info = self._files_info @@ -1316,7 +1375,9 @@ def sph_ird_preprocess_science(self, flat_file = files_info[files_info['PROCESSED'] & (files_info['PRO CATG'] == 'IRD_FLAT_FIELD') & (files_info['INS COMB IFLT'] == filter_comb)] if len(flat_file) != 1: - raise ValueError('There should be exactly 1 flat file. Found {0}.'.format(len(flat_file))) + self._logger.error('There should be exactly 1 flat file. Found {0}.'.format(len(flat_file))) + self._update_recipe_status('sph_ird_preprocess_science', vltpf.ERROR) + return flat = fits.getdata(path.calib / '{}.fits'.format(flat_file.index[0])) # final dataframe @@ -1359,7 +1420,9 @@ def sph_ird_preprocess_science(self, bkg = fits.getdata(path.calib / '{}.fits'.format(dfiles.index[0])) elif len(dfiles) > 1: # FIXME: handle cases when multiple backgrounds are found? - raise ValueError('Unexpected number of background files ({0})'.format(len(dfiles))) + self._logger.error('Unexpected number of background files ({0})'.format(len(dfiles))) + self._update_recipe_status('sph_ird_preprocess_science', vltpf.ERROR) + return # process files for idx, (fname, finfo) in enumerate(sfiles.iterrows()): @@ -1460,8 +1523,7 @@ def sph_ird_preprocess_science(self, self._frames_info_preproc = frames_info_preproc # update recipe execution - self._logger.debug('> update recipe execution') - self._recipe_execution['sph_ird_preprocess_science'] = True + self._update_recipe_status('sph_ird_preprocess_science', vltpf.SUCCESS) def sph_ird_star_center(self, high_pass=False, plot=True): @@ -1479,12 +1541,13 @@ def sph_ird_star_center(self, high_pass=False, plot=True): ''' - # check if recipe can be executed - toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_star_center', self.recipe_requirements, - logger=self._logger) - self._logger.info('Star centers determination') + # check if recipe can be executed + if not toolbox.recipe_executable(self._recipes_status, 'sph_ird_star_center', + self.recipe_requirements, logger=self._logger): + return + # parameters path = self._path pixel = self._pixel @@ -1570,8 +1633,7 @@ def sph_ird_star_center(self, high_pass=False, plot=True): fits.writeto(path.preproc / '{}_spot_distance.fits'.format(fname), spot_dist, overwrite=True) # update recipe execution - self._logger.debug('> update recipe execution') - self._recipe_execution['sph_ird_star_center'] = True + self._update_recipe_status('sph_ird_star_center', vltpf.SUCCESS) def sph_ird_wavelength_recalibration(self, fit_scaling=True, plot=True): @@ -1596,11 +1658,12 @@ def sph_ird_wavelength_recalibration(self, fit_scaling=True, plot=True): ''' - # check if recipe can be executed - toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_wavelength_recalibration', self.recipe_requirements, - logger=self._logger) - self._logger.info('Wavelength recalibration') + + # check if recipe can be executed + if not toolbox.recipe_executable(self._recipes_status, 'sph_ird_wavelength_recalibration', + self.recipe_requirements, logger=self._logger): + return # parameters path = self._path @@ -1666,7 +1729,9 @@ def sph_ird_wavelength_recalibration(self, fit_scaling=True, plot=True): if filter_comb == 'S_LR': # FIXME: implement smoothing of the scaling factor for # LRS mode - raise ValueError('Wavelength recalibration is not yet implemented for IRDIS-LRS mode') + self._logger.error('Wavelength recalibration is not yet implemented for IRDIS-LRS mode') + self._update_recipe_status('sph_ird_wavelength_recalibration', vltpf.ERROR) + return elif filter_comb == 'S_MR': # linear fit with a 5-degree polynomial good = np.where(np.isfinite(wave)) @@ -1731,8 +1796,7 @@ def sph_ird_wavelength_recalibration(self, fit_scaling=True, plot=True): fits.writeto(path.preproc / 'wavelength_recalibrated.fits', wave_final, overwrite=True) # update recipe execution - self._logger.debug('> update recipe execution') - self._recipe_execution['sph_ird_wavelength_recalibration'] = True + self._update_recipe_status('sph_ird_wavelength_recalibration', vltpf.SUCCESS) def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_mrs_chromatism=True, @@ -1829,12 +1893,13 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m ''' - # check if recipe can be executed - toolbox.check_recipe_execution(self._recipe_execution, 'sph_ird_combine_data', self.recipe_requirements, - logger=self._logger) - self._logger.info('Combine science data') + # check if recipe can be executed + if not toolbox.recipe_executable(self._recipes_status, 'sph_ird_combine_data', + self.recipe_requirements, logger=self._logger): + return + # parameters path = self._path nwave = self._nwave @@ -1862,7 +1927,9 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m self._logger.warning('Using default wavelength calibration.') wave = fits.getdata(wfile) else: - raise FileExistsError('Missing default or recalibrated wavelength calibration. You must first run either sph_ird_wave_calib or sph_ird_wavelength_recalibration().') + self._logger.error('Missing default or recalibrated wavelength calibration. You must first run either sph_ird_wave_calib or sph_ird_wavelength_recalibration().') + self._update_recipe_status('sph_ird_combine_data', vltpf.ERROR) + return # wavelength solution: make sure we have the same number of # wave points in each field @@ -1901,7 +1968,9 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m manual_center = np.array(manual_center) if manual_center.shape != (2,): - raise ValueError('manual_center does not have the right number of dimensions.') + self._logger.error('manual_center does not have the right number of dimensions.') + self._update_recipe_status('sph_ird_combine_data', vltpf.ERROR) + return self._logger.warning('Images will be centered using the user-provided center ({},{})'.format(*manual_center)) @@ -2226,8 +2295,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m del sci_cube # update recipe execution - self._logger.debug('> update recipe execution') - self._recipe_execution['sph_ird_combine_data'] = True + self._update_recipe_status('sph_ird_combine_data', vltpf.SUCCESS) def sph_ird_clean(self, delete_raw=False, delete_products=False): @@ -2245,6 +2313,11 @@ def sph_ird_clean(self, delete_raw=False, delete_products=False): self._logger.info('Clean reduction data') + # check if recipe can be executed + if not toolbox.recipe_executable(self._recipes_status, 'sph_ird_clean', + self.recipe_requirements, logger=self._logger): + return + # parameters path = self._path @@ -2284,4 +2357,7 @@ def sph_ird_clean(self, delete_raw=False, delete_products=False): # update recipe execution self._logger.debug('> update recipe execution') - self._recipe_execution['sph_ird_clean'] = True + self._recipes_status['sph_ird_clean'] = True + + # update recipe execution + self._update_recipe_status('sph_ird_clean', vltpf.SUCCESS) From 7ab5290cf5a68ae1f2d952976d5120691c2396ca Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Fri, 13 Sep 2019 20:15:17 +0200 Subject: [PATCH 090/101] Rename sph_ird_wave_calib() into sph_ird_cal_wave() Change to be consistent with IFS.Reduction --- vltpf/IRDIS/SpectroReduction.py | 36 ++++++++++++++++----------------- 1 file changed, 17 insertions(+), 19 deletions(-) diff --git a/vltpf/IRDIS/SpectroReduction.py b/vltpf/IRDIS/SpectroReduction.py index e4e9e5e..2ad0fd3 100644 --- a/vltpf/IRDIS/SpectroReduction.py +++ b/vltpf/IRDIS/SpectroReduction.py @@ -83,11 +83,11 @@ class SpectroReduction(object): 'check_files_association': ['sort_files'], 'sph_ird_cal_dark': ['sort_files'], 'sph_ird_cal_detector_flat': ['sort_files'], - 'sph_ird_wave_calib': ['sort_files', 'sph_ird_cal_detector_flat'], + 'sph_ird_cal_wave': ['sort_files', 'sph_ird_cal_detector_flat'], 'sph_ird_preprocess_science': ['sort_files', 'sort_frames', 'sph_ird_cal_dark', 'sph_ird_cal_detector_flat'], - 'sph_ird_star_center': ['sort_files', 'sort_frames', 'sph_ird_wave_calib'], - 'sph_ird_wavelength_recalibration': ['sort_files', 'sort_frames', 'sph_ird_wave_calib'], + 'sph_ird_star_center': ['sort_files', 'sort_frames', 'sph_ird_cal_wave'], + 'sph_ird_wavelength_recalibration': ['sort_files', 'sort_frames', 'sph_ird_cal_wave'], 'sph_ird_combine_data': ['sort_files', 'sort_frames', 'sph_ird_preprocess_science'], 'sph_ird_clean': [] } @@ -345,7 +345,7 @@ def create_static_calibrations(self): self.sph_ird_cal_dark(silent=config['misc_silent_esorex']) self.sph_ird_cal_detector_flat(silent=config['misc_silent_esorex']) - self.sph_ird_wave_calib(silent=config['misc_silent_esorex']) + self.sph_ird_cal_wave(silent=config['misc_silent_esorex']) def preprocess_science(self): @@ -461,8 +461,7 @@ def _read_info(self): if np.any(files_info['PRO CATG'] == 'IRD_FLAT_FIELD'): self._update_recipe_status('sph_ird_cal_detector_flat', vltpf.SUCCESS) if np.any(files_info['PRO CATG'] == 'IRD_WAVECALIB'): - # FIXME: change wave_calib into cal_wave - self._update_recipe_status('sph_ird_wave_calib', vltpf.SUCCESS) + self._update_recipe_status('sph_ird_cal_wave', vltpf.SUCCESS) # update instrument mode self._mode = files_info.loc[files_info['DPR CATG'] == 'SCIENCE', 'INS1 MODE'][0] @@ -513,8 +512,8 @@ def _read_info(self): if frames_info_preproc is not None: done = (path.preproc / 'wavelength_default.fits').exists() if done: - self._update_recipe_status('sph_ird_wave_calib', vltpf.SUCCESS) - self._logger.debug('> sph_ird_wave_calib status = {}'.format(done)) + self._update_recipe_status('sph_ird_cal_wave', vltpf.SUCCESS) + self._logger.debug('> sph_ird_cal_wave status = {}'.format(done)) done = (path.preproc / 'wavelength_recalibrated.fits').exists() if done: @@ -1120,8 +1119,7 @@ def sph_ird_cal_detector_flat(self, silent=True): self._update_recipe_status('sph_ird_cal_detector_flat', vltpf.SUCCESS) - # FIXME: change wave_calib into cal_wave - def sph_ird_wave_calib(self, silent=True): + def sph_ird_cal_wave(self, silent=True): ''' Create the wavelength calibration @@ -1134,7 +1132,7 @@ def sph_ird_wave_calib(self, silent=True): self._logger.info('Wavelength calibration') # check if recipe can be executed - if not toolbox.recipe_executable(self._recipes_status, 'sph_ird_wave_calib', + if not toolbox.recipe_executable(self._recipes_status, 'sph_ird_cal_wave', self.recipe_requirements, logger=self._logger): return @@ -1146,7 +1144,7 @@ def sph_ird_wave_calib(self, silent=True): wave_file = files_info[np.logical_not(files_info['PROCESSED']) & (files_info['DPR TYPE'] == 'LAMP,WAVE')] if len(wave_file) != 1: self._logger.error('There should be exactly 1 raw wavelength calibration file. Found {0}.'.format(len(wave_file))) - self._update_recipe_status('sph_ird_wave_calib', vltpf.ERROR) + self._update_recipe_status('sph_ird_cal_wave', vltpf.ERROR) return DIT = wave_file['DET SEQ1 DIT'][0] @@ -1154,20 +1152,20 @@ def sph_ird_wave_calib(self, silent=True): (files_info['DPR CATG'] == 'CALIB') & (files_info['DET SEQ1 DIT'].round(2) == DIT)] if len(dark_file) == 0: self._logger.error('There should at least 1 dark file for wavelength calibration. Found none.') - self._update_recipe_status('sph_ird_wave_calib', vltpf.ERROR) + self._update_recipe_status('sph_ird_cal_wave', vltpf.ERROR) return filter_comb = wave_file['INS COMB IFLT'][0] flat_file = files_info[files_info['PROCESSED'] & (files_info['PRO CATG'] == 'IRD_FLAT_FIELD')] if len(flat_file) == 0: self._logger.error('There should at least 1 flat file for wavelength calibration. Found none.') - self._update_recipe_status('sph_ird_wave_calib', vltpf.ERROR) + self._update_recipe_status('sph_ird_cal_wave', vltpf.ERROR) return bpm_file = files_info[files_info['PROCESSED'] & (files_info['PRO CATG'] == 'IRD_NON_LINEAR_BADPIXELMAP')] if len(flat_file) == 0: self._logger.error('There should at least 1 bad pixel map file for wavelength calibration. Found none.') - self._update_recipe_status('sph_ird_wave_calib', vltpf.ERROR) + self._update_recipe_status('sph_ird_cal_wave', vltpf.ERROR) return # products @@ -1244,7 +1242,7 @@ def sph_ird_wave_calib(self, silent=True): # check esorex if shutil.which('esorex') is None: self._logger.error('esorex does not appear to be in your PATH. Please make sure that the ESO pipeline is properly installed before running VLTPF.') - self._update_recipe_status('sph_ird_wave_calib', vltpf.ERROR) + self._update_recipe_status('sph_ird_cal_wave', vltpf.ERROR) return # execute esorex @@ -1256,7 +1254,7 @@ def sph_ird_wave_calib(self, silent=True): if proc.returncode != 0: self._logger.error('esorex process was not successful') - self._update_recipe_status('sph_ird_wave_calib', vltpf.ERROR) + self._update_recipe_status('sph_ird_cal_wave', vltpf.ERROR) return # store products @@ -1294,7 +1292,7 @@ def sph_ird_wave_calib(self, silent=True): fits.writeto(path.preproc / 'wavelength_default.fits', wave_lin.T, overwrite=True) # update recipe execution - self._update_recipe_status('sph_ird_wave_calib', vltpf.SUCCESS) + self._update_recipe_status('sph_ird_cal_wave', vltpf.SUCCESS) def sph_ird_preprocess_science(self, @@ -1927,7 +1925,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m self._logger.warning('Using default wavelength calibration.') wave = fits.getdata(wfile) else: - self._logger.error('Missing default or recalibrated wavelength calibration. You must first run either sph_ird_wave_calib or sph_ird_wavelength_recalibration().') + self._logger.error('Missing default or recalibrated wavelength calibration. You must first run either sph_ird_cal_wave or sph_ird_wavelength_recalibration().') self._update_recipe_status('sph_ird_combine_data', vltpf.ERROR) return From 519153b6eeeed944e402606aa25f24bfa67cc330 Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Fri, 13 Sep 2019 20:15:54 +0200 Subject: [PATCH 091/101] Update to comment --- vltpf/IRDIS/ImagingReduction.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vltpf/IRDIS/ImagingReduction.py b/vltpf/IRDIS/ImagingReduction.py index 355ea9a..f92332e 100644 --- a/vltpf/IRDIS/ImagingReduction.py +++ b/vltpf/IRDIS/ImagingReduction.py @@ -618,7 +618,7 @@ def sort_frames(self): # science files sci_files = files_info[(files_info['DPR CATG'] == 'SCIENCE') & (files_info['DPR TYPE'] != 'SKY')] - # raise error when no science frames are present + # report error when no science frames are present if len(sci_files) == 0: self._logger.error('This dataset contains no science frame. There should be at least one!') self._update_recipe_status('sort_frames', vltpf.ERROR) From 7a1c6b7de19c4956667462173a99edd4b1a5cdbc Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Sat, 14 Sep 2019 19:31:20 +0200 Subject: [PATCH 092/101] Remove trailing space --- vltpf/IRDIS/ImagingReduction.py | 112 ++++++++++++++-------------- vltpf/IRDIS/SpectroReduction.py | 128 ++++++++++++++++---------------- 2 files changed, 120 insertions(+), 120 deletions(-) diff --git a/vltpf/IRDIS/ImagingReduction.py b/vltpf/IRDIS/ImagingReduction.py index f92332e..6ea058b 100644 --- a/vltpf/IRDIS/ImagingReduction.py +++ b/vltpf/IRDIS/ImagingReduction.py @@ -41,7 +41,7 @@ class ImagingReduction(object): 'check_files_association': ['sort_files'], 'sph_ird_cal_dark': ['sort_files'], 'sph_ird_cal_detector_flat': ['sort_files'], - 'sph_ird_preprocess_science': ['sort_files', 'sort_frames', 'sph_ird_cal_dark', + 'sph_ird_preprocess_science': ['sort_files', 'sort_frames', 'sph_ird_cal_dark', 'sph_ird_cal_detector_flat'], 'sph_ird_star_center': ['sort_files', 'sort_frames', 'sph_ird_preprocess_science'], 'sph_ird_combine_data': ['sort_files', 'sort_frames', 'sph_ird_preprocess_science'], @@ -70,11 +70,11 @@ def __new__(cls, path, log_level='info'): The log level of the handler ''' - + # # make sure we are dealing with a proper reduction directory # - + # init path path = Path(path).expanduser().resolve() @@ -106,23 +106,23 @@ def __new__(cls, path, log_level='info'): if logger.hasHandlers(): for hdlr in logger.handlers: logger.removeHandler(hdlr) - + handler = logging.FileHandler(reduction._path.products / 'reduction.log', mode='w', encoding='utf-8') formatter = logging.Formatter('%(asctime)s\t%(levelname)8s\t%(message)s') - formatter.default_msec_format = '%s.%03d' + formatter.default_msec_format = '%s.%03d' handler.setFormatter(formatter) logger.addHandler(handler) - + reduction._logger = logger - - reduction._logger.info('Creating IRDIS imaging reduction at path {}'.format(path)) - + + reduction._logger.info('Creating IRDIS imaging reduction at path {}'.format(path)) + # # configuration # configfile = Path(vltpf.__file__).parent / 'instruments' / '{}.ini'.format(reduction._instrument) config = configparser.ConfigParser() - + reduction._logger.debug('> read default configuration') config.read(configfile) @@ -156,7 +156,7 @@ def __new__(cls, path, log_level='info'): # reload any existing data frames reduction._read_info() - + # # return instance # @@ -270,7 +270,7 @@ def init_reduction(self): ''' self._logger.info('====> Init <====') - + # make sure we have sub-directories self._path.create_subdirectories() @@ -283,7 +283,7 @@ def create_static_calibrations(self): ''' Create static calibrations with esorex ''' - + self._logger.info('====> Static calibrations <====') config = self._config @@ -298,7 +298,7 @@ def preprocess_science(self): ''' self._logger.info('====> Science pre-processing <====') - + config = self._config self.sph_ird_preprocess_science(subtract_background=config['preproc_subtract_background'], @@ -317,7 +317,7 @@ def process_science(self): ''' self._logger.info('====> Science processing <====') - + config = self._config self.sph_ird_star_center(high_pass=config['center_high_pass'], @@ -340,7 +340,7 @@ def clean(self): ''' self._logger.info('====> Clean-up <====') - + config = self._config if config['clean']: @@ -355,7 +355,7 @@ def full_reduction(self): ''' self._logger.info('====> Full reduction <====') - + self.init_reduction() self.create_static_calibrations() self.preprocess_science() @@ -384,7 +384,7 @@ def _read_info(self): ''' self._logger.info('Read existing reduction information') - + # path path = self._path @@ -392,7 +392,7 @@ def _read_info(self): fname = path.preproc / 'files.csv' if fname.exists(): self._logger.debug('> read files.csv') - + files_info = pd.read_csv(fname, index_col=0) # convert times @@ -415,7 +415,7 @@ def _read_info(self): fname = path.preproc / 'frames.csv' if fname.exists(): self._logger.debug('> read frames.csv') - + frames_info = pd.read_csv(fname, index_col=(0, 1)) # convert times @@ -434,7 +434,7 @@ def _read_info(self): fname = path.preproc / 'frames_preproc.csv' if fname.exists(): self._logger.debug('> read frames_preproc.csv') - + frames_info_preproc = pd.read_csv(fname, index_col=(0, 1)) # convert times @@ -475,7 +475,7 @@ def _read_info(self): self._update_recipe_status('sph_ird_star_center', vltpf.SUCCESS) self._logger.debug('> sph_ird_star_center status = {}'.format(done)) - + # FIXME: move into toolbox def _update_recipe_status(self, recipe, recipe_status): '''Update execution status for reduction and recipe @@ -488,16 +488,16 @@ def _update_recipe_status(self, recipe, recipe_status): Status of the recipe. Can be either one of vltpf.NOTSET, vltpf.SUCCESS or vltpf.ERROR ''' - + self._logger.debug('> update recipe execution') - + self._recipes_status[recipe] = recipe_status self._recipes_status.move_to_end(recipe) ################################################## # SPHERE/IRDIS methods ################################################## - + def sort_files(self): ''' Sort all raw files and save result in a data frame @@ -510,7 +510,7 @@ def sort_files(self): # update recipe execution self._update_recipe_status('sort_files', vltpf.NOTSET) - + # parameters path = self._path @@ -607,7 +607,7 @@ def sort_frames(self): self._logger.info('Extract frames information') # check if recipe can be executed - if not toolbox.recipe_executable(self._recipes_status, 'sort_frames', + if not toolbox.recipe_executable(self._recipes_status, 'sort_frames', self.recipe_requirements, logger=self._logger): return @@ -694,9 +694,9 @@ def sort_frames(self): self._logger.info(' * Texp: {0:.2f} min'.format(cinfo['DET SEQ1 DIT'].sum()/60)) self._logger.info(' * PA: {0:.2f}° ==> {1:.2f}° = {2:.2f}°'.format(pa_start, pa_end, np.abs(pa_end-pa_start))) self._logger.info(' * POSANG: {0}'.format(', '.join(['{:.2f}°'.format(p) for p in posang]))) - + # update recipe execution - self._update_recipe_status('sort_frames', vltpf.SUCCESS) + self._update_recipe_status('sort_frames', vltpf.SUCCESS) def check_files_association(self): @@ -710,7 +710,7 @@ def check_files_association(self): self._logger.info('File association for calibrations') # check if recipe can be executed - if not toolbox.recipe_executable(self._recipes_status, 'check_files_association', + if not toolbox.recipe_executable(self._recipes_status, 'check_files_association', self.recipe_requirements, logger=self._logger): return @@ -789,7 +789,7 @@ def check_files_association(self): return else: self._logger.warning('There are {0} warning(s) and {1} error(s) in the classification of files'.format(warning_flag, error_flag)) - + # update recipe execution self._update_recipe_status('check_files_association', vltpf.SUCCESS) @@ -807,7 +807,7 @@ def sph_ird_cal_dark(self, silent=True): self._logger.info('Darks and backgrounds') # check if recipe can be executed - if not toolbox.recipe_executable(self._recipes_status, 'sph_ird_cal_dark', + if not toolbox.recipe_executable(self._recipes_status, 'sph_ird_cal_dark', self.recipe_requirements, logger=self._logger): return @@ -934,7 +934,7 @@ def sph_ird_cal_detector_flat(self, silent=True): self._logger.info('Instrument flats') # check if recipe can be executed - if not toolbox.recipe_executable(self._recipes_status, 'sph_ird_cal_detector_flat', + if not toolbox.recipe_executable(self._recipes_status, 'sph_ird_cal_detector_flat', self.recipe_requirements, logger=self._logger): return @@ -1081,7 +1081,7 @@ def sph_ird_preprocess_science(self, self._logger.info('Pre-process science files') # check if recipe can be executed - if not toolbox.recipe_executable(self._recipes_status, 'sph_ird_preprocess_science', + if not toolbox.recipe_executable(self._recipes_status, 'sph_ird_preprocess_science', self.recipe_requirements, logger=self._logger): return @@ -1322,16 +1322,16 @@ def sph_ird_star_center(self, high_pass=False, offset=(0, 0), plot=True): self._logger.info('Star centers determination') # check if recipe can be executed - if not toolbox.recipe_executable(self._recipes_status, 'sph_ird_star_center', + if not toolbox.recipe_executable(self._recipes_status, 'sph_ird_star_center', self.recipe_requirements, logger=self._logger): return - + # parameters path = self._path pixel = self._pixel orientation_offset = self._orientation_offset center_guess = self._default_center - frames_info = self._frames_info_preproc + frames_info = self._frames_info_preproc # wavelength filter_comb = frames_info['INS COMB IFLT'].unique()[0] @@ -1354,7 +1354,7 @@ def sph_ird_star_center(self, high_pass=False, offset=(0, 0), plot=True): save_path = path.products / '{}_PSF_fitting.pdf'.format(fname) else: save_path = None - img_center = toolbox.star_centers_from_PSF_img_cube(cube, wave, pixel, + img_center = toolbox.star_centers_from_PSF_img_cube(cube, wave, pixel, save_path=save_path, logger=self._logger) # save @@ -1388,8 +1388,8 @@ def sph_ird_star_center(self, high_pass=False, offset=(0, 0), plot=True): save_path = None spot_center, spot_dist, img_center \ = toolbox.star_centers_from_waffle_img_cube(cube, wave, waffle_orientation, center_guess, - pixel, orientation_offset, high_pass=high_pass, - center_offset=offset, coro=coro, save_path=save_path, + pixel, orientation_offset, high_pass=high_pass, + center_offset=offset, coro=coro, save_path=save_path, logger=self._logger) # save @@ -1443,8 +1443,8 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a - no saving of the rescaled frames (save_scaled=False) This option is useful if the user wants to perform a - posteriori centering of the frames, e.g. to fully preserve - photometry. + posteriori centering of the frames, e.g. to fully preserve + photometry. If there was no OBJECT,CENTER acquired in the sequence, then the centering will be performed with respect to a default, @@ -1501,10 +1501,10 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a self._logger.info('Combine science data') # check if recipe can be executed - if not toolbox.recipe_executable(self._recipes_status, 'sph_ird_combine_data', + if not toolbox.recipe_executable(self._recipes_status, 'sph_ird_combine_data', self.recipe_requirements, logger=self._logger): return - + # parameters path = self._path nwave = self._nwave @@ -1537,7 +1537,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a if manual_center is not None: manual_center = np.array(manual_center) - + if (manual_center.shape != (2,)) and (manual_center.shape != (nwave, 2)): self._logger.error('manual_center does not have the right number of dimensions.') self._update_recipe_status('sph_ird_combine_data', vltpf.ERROR) @@ -1577,7 +1577,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a self._logger.debug('> read data') fname = '{0}_DIT{1:03d}_preproc'.format(file, idx) cube = fits.getdata(path.preproc / '{}.fits'.format(fname)) - + self._logger.debug('> read centers') cfile = path.preproc / '{}_centers.fits'.format(fname) if cfile.exists(): @@ -1587,7 +1587,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a centers = self._default_center # make sure we have only integers if user wants coarse centering - if coarse_centering: + if coarse_centering: centers = centers.astype(np.int) # neutral density @@ -1679,11 +1679,11 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a else: # otherwise read center data centers = fits.getdata(path.preproc / '{}_centers.fits'.format(fname)) - + # make sure we have only integers if user wants coarse centering if coarse_centering: centers = centers.astype(np.int) - + # neutral density self._logger.debug('> read neutral density information') ND = frames_info.loc[(file, idx), 'INS4 FILT2 NAME'] @@ -1747,14 +1747,14 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a # null value for Dithering Motion Stage by default dms_dx_ref = 0 dms_dy_ref = 0 - + # use manual center if explicitely requested self._logger.debug('> read centers') if manual_center is not None: centers = manual_center else: # otherwise, look whether we have an OBJECT,CENTER frame - + # FIXME: ticket #12. Use first DIT of first OBJECT,CENTER # in the sequence, but it would be better to be able to # select which CENTER to use @@ -1767,7 +1767,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a fpath = path.preproc / fname if fpath.exists(): centers = fits.getdata(fpath) - + # Dithering Motion Stage for star center: value is in micron, # and the pixel size is 18 micron dms_dx_ref = starcen_files['INS1 PAC X'][0] / 18 @@ -1826,7 +1826,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a if coarse_centering: dms_dx = np.int(dms_dx) dms_dy = np.int(dms_dy) - + # center frames for wave_idx, img in enumerate(cube): self._logger.debug('> wave {}'.format(wave_idx)) @@ -1890,12 +1890,12 @@ def sph_ird_clean(self, delete_raw=False, delete_products=False): ''' self._logger.info('Clean reduction data') - + # check if recipe can be executed - if not toolbox.recipe_executable(self._recipes_status, 'sph_ird_clean', + if not toolbox.recipe_executable(self._recipes_status, 'sph_ird_clean', self.recipe_requirements, logger=self._logger): return - + # parameters path = self._path diff --git a/vltpf/IRDIS/SpectroReduction.py b/vltpf/IRDIS/SpectroReduction.py index 2ad0fd3..ef8f794 100644 --- a/vltpf/IRDIS/SpectroReduction.py +++ b/vltpf/IRDIS/SpectroReduction.py @@ -114,11 +114,11 @@ def __new__(cls, path, log_level='info'): The log level of the handler ''' - + # # make sure we are dealing with a proper reduction directory # - + # init path path = Path(path).expanduser().resolve() @@ -150,23 +150,23 @@ def __new__(cls, path, log_level='info'): if logger.hasHandlers(): for hdlr in logger.handlers: logger.removeHandler(hdlr) - + handler = logging.FileHandler(reduction._path.products / 'reduction.log', mode='w', encoding='utf-8') formatter = logging.Formatter('%(asctime)s\t%(levelname)8s\t%(message)s') - formatter.default_msec_format = '%s.%03d' + formatter.default_msec_format = '%s.%03d' handler.setFormatter(formatter) logger.addHandler(handler) - + reduction._logger = logger - + reduction._logger.info('Creating IRDIS spectroscopy reduction at path {}'.format(path)) - + # # configuration # configfile = Path(vltpf.__file__).parent / 'instruments' / '{}.ini'.format(reduction._instrument) config = configparser.ConfigParser() - + reduction._logger.debug('> read configuration') config.read(configfile) @@ -205,7 +205,7 @@ def __new__(cls, path, log_level='info'): # reload any existing data frames reduction._read_info() - + # # return instance # @@ -325,7 +325,7 @@ def init_reduction(self): ''' self._logger.info('====> Init <====') - + # make sure we have sub-directories self._path.create_subdirectories() @@ -340,7 +340,7 @@ def create_static_calibrations(self): ''' self._logger.info('====> Static calibrations <====') - + config = self._config self.sph_ird_cal_dark(silent=config['misc_silent_esorex']) @@ -354,7 +354,7 @@ def preprocess_science(self): ''' self._logger.info('====> Science pre-processing <====') - + config = self._config self.sph_ird_preprocess_science(subtract_background=config['preproc_subtract_background'], @@ -369,7 +369,7 @@ def process_science(self): Perform star center, combine cubes into final (x,y,time,lambda) cubes, correct anamorphism and scale the images ''' - + self._logger.info('====> Science processing <====') config = self._config @@ -394,7 +394,7 @@ def clean(self): ''' self._logger.info('====> Clean-up <====') - + config = self._config if config['clean']: @@ -409,7 +409,7 @@ def full_reduction(self): ''' self._logger.info('====> Full reduction <====') - + self.init_reduction() self.create_static_calibrations() self.preprocess_science() @@ -438,7 +438,7 @@ def _read_info(self): ''' self._logger.info('Read existing reduction information') - + # path path = self._path @@ -446,7 +446,7 @@ def _read_info(self): fname = path.preproc / 'files.csv' if fname.exists(): self._logger.debug('> read files.csv') - + files_info = pd.read_csv(fname, index_col=0) # convert times @@ -471,7 +471,7 @@ def _read_info(self): fname = path.preproc / 'frames.csv' if fname.exists(): self._logger.debug('> read frames.csv') - + frames_info = pd.read_csv(fname, index_col=(0, 1)) # convert times @@ -490,7 +490,7 @@ def _read_info(self): fname = path.preproc / 'frames_preproc.csv' if fname.exists(): self._logger.debug('> read frames_preproc.csv') - + frames_info_preproc = pd.read_csv(fname, index_col=(0, 1)) # convert times @@ -541,7 +541,7 @@ def _read_info(self): self._update_recipe_status('sph_ird_star_center', vltpf.SUCCESS) self._logger.debug('> sph_ird_star_center status = {}'.format(done)) - + # FIXME: move into toolbox def _update_recipe_status(self, recipe, recipe_status): '''Update execution status for reduction and recipe @@ -554,16 +554,16 @@ def _update_recipe_status(self, recipe, recipe_status): Status of the recipe. Can be either one of vltpf.NOTSET, vltpf.SUCCESS or vltpf.ERROR ''' - + self._logger.debug('> update recipe execution') - + self._recipes_status[recipe] = recipe_status self._recipes_status.move_to_end(recipe) ################################################## # SPHERE/IRDIS methods ################################################## - + def sort_files(self): ''' Sort all raw files and save result in a data frame @@ -576,7 +576,7 @@ def sort_files(self): # update recipe execution self._update_recipe_status('sort_files', vltpf.NOTSET) - + # parameters path = self._path @@ -673,7 +673,7 @@ def sort_frames(self): self._logger.info('Extract frames information') # check if recipe can be executed - if not toolbox.recipe_executable(self._recipes_status, 'sort_frames', + if not toolbox.recipe_executable(self._recipes_status, 'sort_frames', self.recipe_requirements, logger=self._logger): return @@ -688,7 +688,7 @@ def sort_frames(self): if len(sci_files) == 0: self._logger.error('This dataset contains no science frame. There should be at least one!') self._update_recipe_status('sort_frames', vltpf.ERROR) - return + return # build indices files = [] @@ -762,8 +762,8 @@ def sort_frames(self): self._logger.info(' * POSANG: {0}'.format(', '.join(['{:.2f}°'.format(p) for p in posang]))) # update recipe execution - self._update_recipe_status('sort_frames', vltpf.SUCCESS) - + self._update_recipe_status('sort_frames', vltpf.SUCCESS) + def check_files_association(self): ''' @@ -776,7 +776,7 @@ def check_files_association(self): self._logger.info('File association for calibrations') # check if recipe can be executed - if not toolbox.recipe_executable(self._recipes_status, 'check_files_association', + if not toolbox.recipe_executable(self._recipes_status, 'check_files_association', self.recipe_requirements, logger=self._logger): return @@ -803,7 +803,7 @@ def check_files_association(self): self._logger.error('Sequence is mixing different types of filters combinations: {0}'.format(filter_combs)) self._update_recipe_status('check_files_association', vltpf.ERROR) return - + filter_comb = filter_combs[0] if (filter_comb != 'S_LR') and (filter_comb != 'S_MR'): self._logger.error('Unknown IRDIS-LSS filter combination/mode {0}'.format(filter_comb)) @@ -902,10 +902,10 @@ def sph_ird_cal_dark(self, silent=True): self._logger.info('Darks and backgrounds') # check if recipe can be executed - if not toolbox.recipe_executable(self._recipes_status, 'sph_ird_cal_dark', + if not toolbox.recipe_executable(self._recipes_status, 'sph_ird_cal_dark', self.recipe_requirements, logger=self._logger): return - + # parameters path = self._path files_info = self._files_info @@ -1029,10 +1029,10 @@ def sph_ird_cal_detector_flat(self, silent=True): self._logger.info('Instrument flats') # check if recipe can be executed - if not toolbox.recipe_executable(self._recipes_status, 'sph_ird_cal_detector_flat', + if not toolbox.recipe_executable(self._recipes_status, 'sph_ird_cal_detector_flat', self.recipe_requirements, logger=self._logger): return - + # parameters path = self._path files_info = self._files_info @@ -1132,10 +1132,10 @@ def sph_ird_cal_wave(self, silent=True): self._logger.info('Wavelength calibration') # check if recipe can be executed - if not toolbox.recipe_executable(self._recipes_status, 'sph_ird_cal_wave', + if not toolbox.recipe_executable(self._recipes_status, 'sph_ird_cal_wave', self.recipe_requirements, logger=self._logger): return - + # parameters path = self._path files_info = self._files_info @@ -1173,7 +1173,7 @@ def sph_ird_cal_wave(self, silent=True): # laser wavelengths wave_lasers = self._wave_cal_lasers - + # esorex parameters self._logger.debug('> filter combination is {}'.format(filter_comb)) if filter_comb == 'S_LR': @@ -1290,7 +1290,7 @@ def sph_ird_cal_wave(self, silent=True): self._logger.debug('> save default wavelength calibration') fits.writeto(path.preproc / 'wavelength_default.fits', wave_lin.T, overwrite=True) - + # update recipe execution self._update_recipe_status('sph_ird_cal_wave', vltpf.SUCCESS) @@ -1336,10 +1336,10 @@ def sph_ird_preprocess_science(self, self._logger.info('Pre-process science files') # check if recipe can be executed - if not toolbox.recipe_executable(self._recipes_status, 'sph_ird_preprocess_science', + if not toolbox.recipe_executable(self._recipes_status, 'sph_ird_preprocess_science', self.recipe_requirements, logger=self._logger): return - + # parameters path = self._path files_info = self._files_info @@ -1375,7 +1375,7 @@ def sph_ird_preprocess_science(self, if len(flat_file) != 1: self._logger.error('There should be exactly 1 flat file. Found {0}.'.format(len(flat_file))) self._update_recipe_status('sph_ird_preprocess_science', vltpf.ERROR) - return + return flat = fits.getdata(path.calib / '{}.fits'.format(flat_file.index[0])) # final dataframe @@ -1420,7 +1420,7 @@ def sph_ird_preprocess_science(self, # FIXME: handle cases when multiple backgrounds are found? self._logger.error('Unexpected number of background files ({0})'.format(len(dfiles))) self._update_recipe_status('sph_ird_preprocess_science', vltpf.ERROR) - return + return # process files for idx, (fname, finfo) in enumerate(sfiles.iterrows()): @@ -1542,10 +1542,10 @@ def sph_ird_star_center(self, high_pass=False, plot=True): self._logger.info('Star centers determination') # check if recipe can be executed - if not toolbox.recipe_executable(self._recipes_status, 'sph_ird_star_center', + if not toolbox.recipe_executable(self._recipes_status, 'sph_ird_star_center', self.recipe_requirements, logger=self._logger): return - + # parameters path = self._path pixel = self._pixel @@ -1585,7 +1585,7 @@ def sph_ird_star_center(self, high_pass=False, plot=True): save_path = path.products / '{}_PSF_fitting.pdf'.format(fname) else: save_path = None - psf_center = toolbox.star_centers_from_PSF_lss_cube(cube, wave_lin, pixel, save_path=save_path, + psf_center = toolbox.star_centers_from_PSF_lss_cube(cube, wave_lin, pixel, save_path=save_path, logger=self._logger) # save @@ -1622,7 +1622,7 @@ def sph_ird_star_center(self, high_pass=False, plot=True): save_path = None spot_centers, spot_dist, img_centers \ = toolbox.star_centers_from_waffle_lss_cube(cube_cen, cube_sci, wave_lin, centers, pixel, - high_pass=high_pass, save_path=save_path, + high_pass=high_pass, save_path=save_path, logger=self._logger) # save @@ -1657,9 +1657,9 @@ def sph_ird_wavelength_recalibration(self, fit_scaling=True, plot=True): ''' self._logger.info('Wavelength recalibration') - + # check if recipe can be executed - if not toolbox.recipe_executable(self._recipes_status, 'sph_ird_wavelength_recalibration', + if not toolbox.recipe_executable(self._recipes_status, 'sph_ird_wavelength_recalibration', self.recipe_requirements, logger=self._logger): return @@ -1674,7 +1674,7 @@ def sph_ird_wavelength_recalibration(self, fit_scaling=True, plot=True): wfile = path.preproc / 'wavelength_recalibrated.fits' if wfile.exists(): wfile.unlink() - + # resolution-specific parameters filter_comb = frames_info['INS COMB IFLT'].unique()[0] if filter_comb == 'S_LR': @@ -1729,7 +1729,7 @@ def sph_ird_wavelength_recalibration(self, fit_scaling=True, plot=True): # LRS mode self._logger.error('Wavelength recalibration is not yet implemented for IRDIS-LRS mode') self._update_recipe_status('sph_ird_wavelength_recalibration', vltpf.ERROR) - return + return elif filter_comb == 'S_MR': # linear fit with a 5-degree polynomial good = np.where(np.isfinite(wave)) @@ -1894,7 +1894,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m self._logger.info('Combine science data') # check if recipe can be executed - if not toolbox.recipe_executable(self._recipes_status, 'sph_ird_combine_data', + if not toolbox.recipe_executable(self._recipes_status, 'sph_ird_combine_data', self.recipe_requirements, logger=self._logger): return @@ -1927,7 +1927,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m else: self._logger.error('Missing default or recalibrated wavelength calibration. You must first run either sph_ird_cal_wave or sph_ird_wavelength_recalibration().') self._update_recipe_status('sph_ird_combine_data', vltpf.ERROR) - return + return # wavelength solution: make sure we have the same number of # wave points in each field @@ -1935,7 +1935,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m iwave0 = np.where(mask[:, 0])[0] iwave1 = np.where(mask[:, 1])[0] nwave = np.min([iwave0.size, iwave1.size]) - + iwave = np.empty((nwave, 2), dtype=np.int) iwave[:, 0] = iwave0[:nwave] iwave[:, 1] = iwave1[:nwave] @@ -1943,7 +1943,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m final_wave = np.empty((nwave, 2)) final_wave[:, 0] = wave[iwave[:, 0], 0] final_wave[:, 1] = wave[iwave[:, 1], 1] - + fits.writeto(path.products / 'wavelength.fits', final_wave.squeeze().T, overwrite=True) # max images size @@ -1961,17 +1961,17 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m shift_method = 'roll' cpix = True correct_mrs_chromatism = False - + if manual_center is not None: manual_center = np.array(manual_center) - + if manual_center.shape != (2,): self._logger.error('manual_center does not have the right number of dimensions.') self._update_recipe_status('sph_ird_combine_data', vltpf.ERROR) - return + return self._logger.warning('Images will be centered using the user-provided center ({},{})'.format(*manual_center)) - + manual_center = np.full((1024, 2), manual_center, dtype=np.float) # @@ -2000,7 +2000,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m self._logger.debug('> read data') fname = '{0}_DIT{1:03d}_preproc'.format(file, idx) cube = fits.getdata(path.preproc / '{}.fits'.format(fname)) - + self._logger.debug('> read centers') cfile = path.preproc / '{}_centers.fits'.format(fname) if cfile.exists(): @@ -2023,7 +2023,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m self._logger.debug('> field {}'.format(field_idx)) # wavelength solution for this field ciwave = iwave[:, field_idx] - + if correct_mrs_chromatism and (filter_comb == 'S_MR'): self._logger.debug('> correct MRS chromatism') img = img.astype(np.float) @@ -2032,7 +2032,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m cx = centers[widx, field_idx] line = img[widx, :] - + nimg = imutils.shift(line, cc-cx, method=shift_method) nimg = nimg / DIT @@ -2105,7 +2105,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m self._logger.debug('> read data') fname = '{0}_DIT{1:03d}_preproc'.format(file, idx) cube = fits.getdata(path.preproc / '{}.fits'.format(fname)) - + # use manual center if explicitely requested self._logger.debug('> read centers') if manual_center is not None: @@ -2310,9 +2310,9 @@ def sph_ird_clean(self, delete_raw=False, delete_products=False): ''' self._logger.info('Clean reduction data') - + # check if recipe can be executed - if not toolbox.recipe_executable(self._recipes_status, 'sph_ird_clean', + if not toolbox.recipe_executable(self._recipes_status, 'sph_ird_clean', self.recipe_requirements, logger=self._logger): return From 28e637d155e4d0481d7d0669e07489d3d018abd6 Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Sun, 15 Sep 2019 09:36:18 +0200 Subject: [PATCH 093/101] Implement proper error handling in reduction execution for IFS Ticket #41 --- vltpf/IFS.py | 529 +++++++++++++++++++++++++++++++-------------------- 1 file changed, 322 insertions(+), 207 deletions(-) diff --git a/vltpf/IFS.py b/vltpf/IFS.py index 58f39eb..ad97adc 100644 --- a/vltpf/IFS.py +++ b/vltpf/IFS.py @@ -385,7 +385,8 @@ class Reduction(object): 'sph_ifs_wavelength_recalibration': ['sort_files', 'sort_frames', 'sph_ifs_preprocess_wave', 'sph_ifs_science_cubes'], 'sph_ifs_star_center': ['sort_files', 'sort_frames', 'sph_ifs_science_cubes'], - 'sph_ifs_combine_data': ['sort_files', 'sort_frames', 'sph_ifs_science_cubes'] + 'sph_ifs_combine_data': ['sort_files', 'sort_frames', 'sph_ifs_science_cubes'], + 'sph_ifs_clean': [] } ################################################## @@ -396,11 +397,25 @@ def __new__(cls, path, log_level='info'): '''Custom instantiation for the class The customized instantiation enables to check that the - provided path is a valid reduction path. If not None will be - returned for the reduction being created + provided path is a valid reduction path. If not, None will be + returned for the reduction being created. Otherwise, an + instance is created and returned at the end. + + Parameters + ---------- + path : str + Path to the directory containing the dataset + + level : {'debug', 'info', 'warning', 'error', 'critical'} + The log level of the handler + ''' - # expand path + # + # make sure we are dealing with a proper reduction directory + # + + # init path path = Path(path).expanduser().resolve() # zeroth-order reduction validation @@ -409,33 +424,18 @@ def __new__(cls, path, log_level='info'): _log.error('No raw/ subdirectory. {0} is not a valid reduction path'.format(path)) return None else: - return super(Reduction, cls).__new__(cls) + reduction = super(Reduction, cls).__new__(cls) - - def __init__(self, path, log_level='info'): - ''' - Initialization of the IFSReduction - - Parameters - ---------- - path : str - Path to the directory containing the dataset - - level : {'debug', 'info', 'warning', 'error', 'critical'} - The log level of the handler - ''' - # # basic init # # init path - path = Path(path).expanduser().resolve() - self._path = utils.ReductionPath(path) + reduction._path = utils.ReductionPath(path) # instrument and mode - self._instrument = 'IFS' - self._mode = 'Unknown' + reduction._instrument = 'IFS' + reduction._mode = 'Unknown' # # logging @@ -446,67 +446,56 @@ def __init__(self, path, log_level='info'): for hdlr in logger.handlers: logger.removeHandler(hdlr) - handler = logging.FileHandler(self._path.products / 'reduction.log', mode='w', encoding='utf-8') + handler = logging.FileHandler(reduction._path.products / 'reduction.log', mode='w', encoding='utf-8') formatter = logging.Formatter('%(asctime)s\t%(levelname)8s\t%(message)s') formatter.default_msec_format = '%s.%03d' handler.setFormatter(formatter) logger.addHandler(handler) - self._logger = logger + reduction._logger = logger - self._logger.info('Creating IFS reduction at path {}'.format(path)) + reduction._logger.info('Creating IFS reduction at path {}'.format(path)) # # configuration # - self._logger.debug('> read default configuration') - configfile = Path(vltpf.__file__).parent / 'instruments' / '{}.ini'.format(self._instrument) + reduction._logger.debug('> read default configuration') + configfile = Path(vltpf.__file__).parent / 'instruments' / '{}.ini'.format(reduction._instrument) config = configparser.ConfigParser() - self._logger.debug('Read configuration') + reduction._logger.debug('Read configuration') config.read(configfile) # instrument - self._pixel = float(config.get('instrument', 'pixel')) - self._nwave = int(config.get('instrument', 'nwave')) + reduction._pixel = float(config.get('instrument', 'pixel')) + reduction._nwave = int(config.get('instrument', 'nwave')) # calibration - self._wave_cal_lasers = np.array(eval(config.get('calibration', 'wave_cal_lasers'))) - self._default_center = np.array(eval(config.get('calibration', 'default_center'))) - self._orientation_offset = eval(config.get('calibration', 'orientation_offset')) + reduction._wave_cal_lasers = np.array(eval(config.get('calibration', 'wave_cal_lasers'))) + reduction._default_center = np.array(eval(config.get('calibration', 'default_center'))) + reduction._orientation_offset = eval(config.get('calibration', 'orientation_offset')) # reduction parameters - self._config = dict(config.items('reduction')) - for key, value in self._config.items(): + reduction._config = dict(config.items('reduction')) + for key, value in reduction._config.items(): try: val = eval(value) except NameError: val = value - self._config[key] = val + reduction._config[key] = val # # reduction status # - self._recipe_execution = collections.OrderedDict( - [('sort_files', False), - ('sort_frames', False), - ('check_files_association', False), - ('sph_ifs_cal_dark', False), - ('sph_ifs_cal_detector_flat', False), - ('sph_ifs_cal_specpos', False), - ('sph_ifs_cal_wave', False), - ('sph_ifs_cal_ifu_flat', False), - ('sph_ifs_preprocess_science', False), - ('sph_ifs_preprocess_wave', False), - ('sph_ifs_science_cubes', False), - ('sph_ifs_wavelength_recalibration', False), - ('sph_ifs_star_center', False), - ('sph_ifs_combine_data', False), - ('sph_ifs_clean', False)] - ) + reduction._recipes_status = collections.OrderedDict() # reload any existing data frames - self._read_info() + reduction._read_info() + + # + # return instance + # + return reduction ################################################## # Representation @@ -551,8 +540,8 @@ def frames_info_preproc(self): return self._frames_info_preproc @property - def recipe_execution(self): - return self._recipe_execution + def recipe_status(self): + return self._recipes_status @property def config(self): @@ -755,17 +744,17 @@ def _read_info(self): files_info['DET FRAM UTC'] = pd.to_datetime(files_info['DET FRAM UTC'], utc=False) # update recipe execution - self._recipe_execution['sort_files'] = True + self._update_recipe_status('sort_files', vltpf.SUCCESS) if np.any(files_info['PRO CATG'] == 'IFS_MASTER_DARK'): - self._recipe_execution['sph_ifs_cal_dark'] = True + self._update_recipe_status('sph_ifs_cal_dark', vltpf.SUCCESS) if np.any(files_info['PRO CATG'] == 'IFS_MASTER_DFF'): - self._recipe_execution['sph_ifs_cal_detector_flat'] = True + self._update_recipe_status('sph_ifs_cal_detector_flat', vltpf.SUCCESS) if np.any(files_info['PRO CATG'] == 'IFS_SPECPOS'): - self._recipe_execution['sph_ifs_cal_specpos'] = True + self._update_recipe_status('sph_ifs_cal_specpos', vltpf.SUCCESS) if np.any(files_info['PRO CATG'] == 'IFS_WAVECALIB'): - self._recipe_execution['sph_ifs_cal_wave'] = True + self._update_recipe_status('sph_ifs_cal_wave', vltpf.SUCCESS) if np.any(files_info['PRO CATG'] == 'IFS_IFU_FLAT_FIELD'): - self._recipe_execution['sph_ifs_cal_ifu_flat'] = True + self._update_recipe_status('sph_ifs_cal_ifu_flat', vltpf.SUCCESS) # update instrument mode self._mode = files_info.loc[files_info['DPR CATG'] == 'SCIENCE', 'INS2 MODE'][0] @@ -787,7 +776,7 @@ def _read_info(self): frames_info['TIME END'] = pd.to_datetime(frames_info['TIME END'], utc=False) # update recipe execution - self._recipe_execution['sort_frames'] = True + self._update_recipe_status('sort_frames', vltpf.SUCCESS) else: frames_info = None @@ -816,15 +805,18 @@ def _read_info(self): if frames_info is not None: wave_file = files_info[np.logical_not(files_info['PROCESSED']) & (files_info['DPR TYPE'] == 'WAVE,LAMP')] done = (path.preproc / '{}_preproc.fits'.format(wave_file.index[0])).exists() - self._recipe_execution['sph_ifs_preprocess_wave'] = done + if done: + self._update_recipe_status('sph_ifs_preprocess_wave', vltpf.SUCCESS) self._logger.debug('> sph_ifs_preprocess_wave status = {}'.format(done)) done = (path.preproc / 'wavelength_default.fits').exists() - self._recipe_execution['sph_ifs_cal_wave'] = done + if done: + self._update_recipe_status('sph_ifs_cal_wave', vltpf.SUCCESS) self._logger.debug('> sph_ifs_cal_wave status = {}'.format(done)) done = (path.preproc / 'wavelength_recalibrated.fits').exists() - self._recipe_execution['sph_ifs_wavelength_recalibration'] = done + if done: + self._update_recipe_status('sph_ifs_wavelength_recalibration', vltpf.SUCCESS) self._logger.debug('> sph_ifs_wavelength_recalibration status = {}'.format(done)) if frames_info_preproc is not None: @@ -834,7 +826,8 @@ def _read_info(self): fname = '{0}_DIT{1:03d}_preproc'.format(file, idx) file = list(path.preproc.glob('{}.fits'.format(fname))) done = done and (len(file) == 1) - self._recipe_execution['sph_ifs_preprocess_science'] = done + if done: + self._update_recipe_status('sph_ifs_preprocess_wave', vltpf.SUCCESS) self._logger.debug('> sph_ifs_preprocess_science status = {}'.format(done)) done = True @@ -843,7 +836,8 @@ def _read_info(self): fname = '{0}_DIT{1:03d}_preproc_?????'.format(file, idx) file = list(path.preproc.glob('{}.fits'.format(fname))) done = done and (len(file) == 1) - self._recipe_execution['sph_ifs_science_cubes'] = done + if done: + self._update_recipe_status('sph_ifs_science_cubes', vltpf.SUCCESS) self._logger.debug('> sph_ifs_science_cubes status = {}'.format(done)) done = True @@ -853,9 +847,28 @@ def _read_info(self): fname = '{0}_DIT{1:03d}_preproc_centers'.format(file, idx) file = list(path.preproc.glob('{}.fits'.format(fname))) done = done and (len(file) == 1) - self._recipe_execution['sph_ifs_star_center'] = done + if done: + self._update_recipe_status('sph_ifs_star_center', vltpf.SUCCESS) self._logger.debug('> sph_ifs_star_center status = {}'.format(done)) + # FIXME: move into toolbox + def _update_recipe_status(self, recipe, recipe_status): + '''Update execution status for reduction and recipe + + Parameters + ---------- + recipe : str + Recipe name + + recipe_status : vltpf status (int) + Status of the recipe. Can be either one of vltpf.NOTSET, + vltpf.SUCCESS or vltpf.ERROR + ''' + + self._logger.debug('> update recipe execution') + + self._recipes_status[recipe] = recipe_status + self._recipes_status.move_to_end(recipe) ################################################## # SPHERE/IFS methods @@ -871,6 +884,9 @@ def sort_files(self): self._logger.info('Sort raw files') + # update recipe execution + self._update_recipe_status('sort_files', vltpf.NOTSET) + # parameters path = self._path @@ -879,7 +895,9 @@ def sort_files(self): files = [f.stem for f in files] if len(files) == 0: - raise ValueError('No raw FITS files in reduction path') + self._logger.error('No raw FITS files in reduction path') + self._update_recipe_status('sort_files', vltpf.ERROR) + return self._logger.info(' * found {0} raw FITS files'.format(len(files))) @@ -924,7 +942,9 @@ def sort_files(self): # check instruments instru = files_info['SEQ ARM'].unique() if len(instru) != 1: - raise ValueError('Sequence is mixing different instruments: {0}'.format(instru)) + self._logger.error('Sequence is mixing different instruments: {0}'.format(instru)) + self._update_recipe_status('sort_files', vltpf.ERROR) + return # processed column files_info.insert(len(files_info.columns), 'PROCESSED', False) @@ -948,8 +968,7 @@ def sort_files(self): self._files_info = files_info # update recipe execution - self._logger.debug('> update recipe execution') - self._recipe_execution['sort_files'] = True + self._update_recipe_status('sort_files', vltpf.SUCCESS) def sort_frames(self): @@ -964,8 +983,9 @@ def sort_frames(self): self._logger.info('Extract frames information') # check if recipe can be executed - toolbox.check_recipe_execution(self._recipe_execution, 'sort_frames', self.recipe_requirements, - logger=self._logger) + if not toolbox.recipe_executable(self._recipes_status, 'sort_frames', + self.recipe_requirements, logger=self._logger): + return # parameters path = self._path @@ -974,9 +994,11 @@ def sort_frames(self): # science files sci_files = files_info[(files_info['DPR CATG'] == 'SCIENCE') & (files_info['DPR TYPE'] != 'SKY')] - # raise error when no science frames are present + # report error when no science frames are present if len(sci_files) == 0: - raise ValueError('This dataset contains no science frame. There should be at least one!') + self._logger.error('This dataset contains no science frame. There should be at least one!') + self._update_recipe_status('sort_frames', vltpf.ERROR) + return # build indices files = [] @@ -1005,10 +1027,6 @@ def sort_frames(self): frames_info.to_csv(path.preproc / 'frames.csv') self._frames_info = frames_info - # update recipe execution - self._logger.debug('> update recipe execution') - self._recipe_execution['sort_frames'] = True - # # print some info # @@ -1053,7 +1071,10 @@ def sort_frames(self): self._logger.info(' * PA: {0:.2f}° ==> {1:.2f}° = {2:.2f}°'.format(pa_start, pa_end, np.abs(pa_end-pa_start))) self._logger.info(' * POSANG: {0}'.format(', '.join(['{:.2f}°'.format(p) for p in posang]))) + # update recipe execution + self._update_recipe_status('sort_frames', vltpf.SUCCESS) + def check_files_association(self): ''' Performs the calibration files association as a sanity check @@ -1062,12 +1083,13 @@ def check_files_association(self): interupted in case of error. ''' - # check if recipe can be executed - toolbox.check_recipe_execution(self._recipe_execution, 'check_files_association', self.recipe_requirements, - logger=self._logger) - self._logger.info('File association for calibrations') + # check if recipe can be executed + if not toolbox.recipe_executable(self._recipes_status, 'check_files_association', + self.recipe_requirements, logger=self._logger): + return + # parameters path = self._path files_info = self._files_info @@ -1075,12 +1097,16 @@ def check_files_association(self): # instrument arm arm = files_info['SEQ ARM'].unique() if len(arm) != 1: - raise ValueError('Sequence is mixing different instruments: {0}'.format(arm)) + self._logger.error('Sequence is mixing different instruments: {0}'.format(arm)) + self._update_recipe_status('check_files_association', vltpf.ERROR) + return # IFS obs mode modes = files_info.loc[files_info['DPR CATG'] == 'SCIENCE', 'INS2 COMB IFS'].unique() if len(modes) != 1: - raise ValueError('Sequence is mixing YJ and YJH observations.') + self._logger.error('Sequence is mixing YJ and YJH observations.') + self._update_recipe_status('check_files_association', vltpf.ERROR) + return mode = modes[0] if mode == 'OBS_YJ': @@ -1088,7 +1114,9 @@ def check_files_association(self): elif mode == 'OBS_H': mode_short = 'YJH' else: - raise ValueError('Unknown IFS mode {0}'.format(mode)) + self._logger.error('Unknown IFS mode {0}'.format(mode)) + self._update_recipe_status('check_files_association', vltpf.ERROR) + return # specific data frame for calibrations # keep static calibrations and sky backgrounds @@ -1291,7 +1319,8 @@ def check_files_association(self): self._logger.debug('> report status') if error_flag: self._logger.error('There are {0} warning(s) and {1} error(s) in the classification of files'.format(warning_flag, error_flag)) - raise ValueError('There is {0} errors that should be solved before proceeding'.format(error_flag)) + self._update_recipe_status('check_files_association', vltpf.ERROR) + return else: self._logger.warning('There are {0} warning(s) and {1} error(s) in the classification of files'.format(warning_flag, error_flag)) @@ -1301,8 +1330,7 @@ def check_files_association(self): self._files_info = files_info # update recipe execution - self._logger.debug('> update recipe execution') - self._recipe_execution['check_files_association'] = True + self._update_recipe_status('check_files_association', vltpf.SUCCESS) def sph_ifs_cal_dark(self, silent=True): @@ -1315,11 +1343,12 @@ def sph_ifs_cal_dark(self, silent=True): Suppress esorex output. Default is True ''' - # check if recipe can be executed - toolbox.check_recipe_execution(self._recipe_execution, 'sph_ifs_cal_dark', self.recipe_requirements, - logger=self._logger) - self._logger.info('Darks and backgrounds') + + # check if recipe can be executed + if not toolbox.recipe_executable(self._recipes_status, 'sph_ifs_cal_dark', + self.recipe_requirements, logger=self._logger): + return # parameters path = self._path @@ -1378,8 +1407,9 @@ def sph_ifs_cal_dark(self, silent=True): # check esorex if shutil.which('esorex') is None: - raise NameError('esorex does not appear to be in your PATH. Please make sure ' + - 'that the ESO pipeline is properly installed before running VLTPF.') + self._logger.error('esorex does not appear to be in your PATH. Please make sure that the ESO pipeline is properly installed before running VLTPF.') + self._update_recipe_status('sph_ifs_cal_dark', vltpf.ERROR) + return # execute esorex self._logger.debug('> execute esorex') @@ -1389,7 +1419,9 @@ def sph_ifs_cal_dark(self, silent=True): proc = subprocess.run(args, cwd=path.tmp) if proc.returncode != 0: - raise ValueError('esorex process was not successful') + self._logger.error('esorex process was not successful') + self._update_recipe_status('sph_ifs_cal_dark', vltpf.ERROR) + return # store products self._logger.debug('> update files_info data frame') @@ -1413,9 +1445,8 @@ def sph_ifs_cal_dark(self, silent=True): files_info.to_csv(path.preproc / 'files.csv') # update recipe execution - self._logger.debug('> update recipe execution') - self._recipe_execution['sph_ifs_cal_dark'] = True - + self._update_recipe_status('sph_ifs_cal_dark', vltpf.SUCCESS) + def sph_ifs_cal_detector_flat(self, silent=True): ''' @@ -1427,12 +1458,13 @@ def sph_ifs_cal_detector_flat(self, silent=True): Suppress esorex output. Default is True ''' - # check if recipe can be executed - toolbox.check_recipe_execution(self._recipe_execution, 'sph_ifs_cal_detector_flat', self.recipe_requirements, - logger=self._logger) - self._logger.info('Detector flats') + # check if recipe can be executed + if not toolbox.recipe_executable(self._recipes_status, 'sph_ifs_cal_detector_flat', + self.recipe_requirements, logger=self._logger): + return + # parameters path = self._path files_info = self._files_info @@ -1449,7 +1481,9 @@ def sph_ifs_cal_detector_flat(self, silent=True): elif mode == 'OBS_H': mode_short = 'YJH' else: - raise ValueError('Unknown IFS mode {0}'.format(mode)) + self._logger.error('Unknown IFS mode {0}'.format(mode)) + self._update_recipe_status('sph_ifs_cal_detector_flat', vltpf.ERROR) + return # bpm files cfiles = files_info[files_info['PRO CATG'] == 'IFS_STATIC_BADPIXELMAP'].index @@ -1469,7 +1503,9 @@ def sph_ifs_cal_detector_flat(self, silent=True): if len(files) == 0: continue elif len(files) != 2: - raise ValueError('There should be exactly 2 raw flat files. Found {0}.'.format(len(files))) + self._logger.error('There should be exactly 2 raw flat files. Found {0}.'.format(len(files))) + self._update_recipe_status('sph_ifs_cal_detector_flat', vltpf.ERROR) + return # create the flat and bpm flat, bpm = compute_detector_flat(files, bpm_files=bpm_files, mask_vignetting=True, logger=self._logger) @@ -1509,8 +1545,7 @@ def sph_ifs_cal_detector_flat(self, silent=True): files_info.to_csv(path.preproc / 'files.csv') # update recipe execution - self._logger.debug('> update recipe execution') - self._recipe_execution['sph_ifs_cal_detector_flat'] = True + self._update_recipe_status('sph_ifs_cal_detector_flat', vltpf.SUCCESS) def sph_ifs_cal_specpos(self, silent=True): @@ -1523,12 +1558,13 @@ def sph_ifs_cal_specpos(self, silent=True): Suppress esorex output. Default is True ''' - # check if recipe can be executed - toolbox.check_recipe_execution(self._recipe_execution, 'sph_ifs_cal_specpos', self.recipe_requirements, - logger=self._logger) - self._logger.info('Microspectra positions') + # check if recipe can be executed + if not toolbox.recipe_executable(self._recipes_status, 'sph_ifs_cal_specpos', + self.recipe_requirements, logger=self._logger): + return + # parameters path = self._path files_info = self._files_info @@ -1536,12 +1572,16 @@ def sph_ifs_cal_specpos(self, silent=True): # get list of files specpos_file = files_info[np.logical_not(files_info['PROCESSED']) & (files_info['DPR TYPE'] == 'SPECPOS,LAMP')] if len(specpos_file) != 1: - raise ValueError('There should be exactly 1 raw specpos files. Found {0}.'.format(len(specpos_file))) + self._logger.error('There should be exactly 1 raw specpos files. Found {0}.'.format(len(specpos_file))) + self._update_recipe_status('sph_ifs_cal_specpos', vltpf.ERROR) + return dark_file = files_info[files_info['PROCESSED'] & (files_info['PRO CATG'] == 'IFS_MASTER_DARK') & (files_info['DPR CATG'] == 'CALIB') & (files_info['DET SEQ1 DIT'].round(2) == 1.65)] if len(dark_file) == 0: - raise ValueError('There should at least 1 dark file for calibrations. Found none.') + self._logger.error('There should at least 1 dark file for calibrations. Found none.') + self._update_recipe_status('sph_ifs_cal_specpos', vltpf.ERROR) + return # IFS obs mode mode = files_info.loc[files_info['DPR CATG'] == 'SCIENCE', 'INS2 COMB IFS'].unique()[0] @@ -1550,7 +1590,9 @@ def sph_ifs_cal_specpos(self, silent=True): elif mode == 'OBS_H': Hmode = 'TRUE' else: - raise ValueError('Unknown IFS mode {0}'.format(mode)) + self._logger.error('Unknown IFS mode {0}'.format(mode)) + self._update_recipe_status('sph_ifs_cal_specpos', vltpf.ERROR) + return # create sof self._logger.debug('> create sof file') @@ -1574,7 +1616,9 @@ def sph_ifs_cal_specpos(self, silent=True): # check esorex if shutil.which('esorex') is None: - raise NameError('esorex does not appear to be in your PATH. Please make sure that the ESO pipeline is properly installed before running VLTPF.') + self._logger.error('esorex does not appear to be in your PATH. Please make sure that the ESO pipeline is properly installed before running VLTPF.') + self._update_recipe_status('sph_ifs_cal_specpos', vltpf.ERROR) + return # execute esorex self._logger.debug('> execute esorex') @@ -1584,7 +1628,9 @@ def sph_ifs_cal_specpos(self, silent=True): proc = subprocess.run(args, cwd=path.tmp) if proc.returncode != 0: - raise ValueError('esorex process was not successful') + self._logger.error('esorex process was not successful') + self._update_recipe_status('sph_ifs_cal_specpos', vltpf.ERROR) + return # store products self._logger.debug('> update files_info data frame') @@ -1601,8 +1647,7 @@ def sph_ifs_cal_specpos(self, silent=True): files_info.to_csv(path.preproc / 'files.csv') # update recipe execution - self._logger.debug('> update recipe execution') - self._recipe_execution['sph_ifs_cal_specpos'] = True + self._update_recipe_status('sph_ifs_cal_specpos', vltpf.SUCCESS) def sph_ifs_cal_wave(self, silent=True): @@ -1615,12 +1660,13 @@ def sph_ifs_cal_wave(self, silent=True): Suppress esorex output. Default is True ''' - # check if recipe can be executed - toolbox.check_recipe_execution(self._recipe_execution, 'sph_ifs_cal_wave', self.recipe_requirements, - logger=self._logger) - self._logger.info('Wavelength calibration') + # check if recipe can be executed + if not toolbox.recipe_executable(self._recipes_status, 'sph_ifs_cal_wave', + self.recipe_requirements, logger=self._logger): + return + # parameters path = self._path files_info = self._files_info @@ -1628,16 +1674,22 @@ def sph_ifs_cal_wave(self, silent=True): # get list of files wave_file = files_info[np.logical_not(files_info['PROCESSED']) & (files_info['DPR TYPE'] == 'WAVE,LAMP')] if len(wave_file) != 1: - raise ValueError('There should be exactly 1 raw wavelength calibration file. Found {0}.'.format(len(wave_file))) + self._logger.error('There should be exactly 1 raw wavelength calibration file. Found {0}.'.format(len(wave_file))) + self._update_recipe_status('sph_ifs_cal_wave', vltpf.ERROR) + return specpos_file = files_info[files_info['PROCESSED'] & (files_info['PRO CATG'] == 'IFS_SPECPOS')] if len(specpos_file) != 1: - raise ValueError('There should be exactly 1 specpos file. Found {0}.'.format(len(specpos_file))) - + self._logger.error('There should be exactly 1 specpos file. Found {0}.'.format(len(specpos_file))) + self._update_recipe_status('sph_ifs_cal_wave', vltpf.ERROR) + return + dark_file = files_info[files_info['PROCESSED'] & (files_info['PRO CATG'] == 'IFS_MASTER_DARK') & (files_info['DPR CATG'] == 'CALIB') & (files_info['DET SEQ1 DIT'].round(2) == 1.65)] if len(dark_file) == 0: - raise ValueError('There should at least 1 dark file for calibrations. Found none.') + self._logger.error('There should at least 1 dark file for calibrations. Found none.') + self._update_recipe_status('sph_ifs_cal_wave', vltpf.ERROR) + return # IFS obs mode mode = files_info.loc[files_info['DPR CATG'] == 'SCIENCE', 'INS2 COMB IFS'].unique()[0] @@ -1682,8 +1734,9 @@ def sph_ifs_cal_wave(self, silent=True): # check esorex if shutil.which('esorex') is None: - raise NameError('esorex does not appear to be in your PATH. Please make sure ' + - 'that the ESO pipeline is properly installed before running VLTPF.') + self._logger.error('esorex does not appear to be in your PATH. Please make sure that the ESO pipeline is properly installed before running VLTPF.') + self._update_recipe_status('sph_ifs_cal_wave', vltpf.ERROR) + return # execute esorex self._logger.debug('> execute esorex') @@ -1693,7 +1746,9 @@ def sph_ifs_cal_wave(self, silent=True): proc = subprocess.run(args, cwd=path.tmp) if proc.returncode != 0: - raise ValueError('esorex process was not successful') + self._logger.error('esorex process was not successful') + self._update_recipe_status('sph_ifs_cal_wave', vltpf.ERROR) + return # store products self._logger.debug('> update files_info data frame') @@ -1721,8 +1776,7 @@ def sph_ifs_cal_wave(self, silent=True): fits.writeto(path.preproc / 'wavelength_default.fits', wave_drh, overwrite=True) # update recipe execution - self._logger.debug('> update recipe execution') - self._recipe_execution['sph_ifs_cal_wave'] = True + self._update_recipe_status('sph_ifs_cal_wave', vltpf.SUCCESS) def sph_ifs_cal_ifu_flat(self, silent=True): @@ -1735,12 +1789,13 @@ def sph_ifs_cal_ifu_flat(self, silent=True): Suppress esorex output. Default is True ''' - # check if recipe can be executed - toolbox.check_recipe_execution(self._recipe_execution, 'sph_ifs_cal_ifu_flat', self.recipe_requirements, - logger=self._logger) - self._logger.info('Integral-field unit flat') + # check if recipe can be executed + if not toolbox.recipe_executable(self._recipes_status, 'sph_ifs_cal_ifu_flat', + self.recipe_requirements, logger=self._logger): + return + # parameters path = self._path files_info = self._files_info @@ -1752,48 +1807,66 @@ def sph_ifs_cal_ifu_flat(self, silent=True): elif mode == 'OBS_H': mode_short = 'YJH' else: - raise ValueError('Unknown IFS mode {0}'.format(mode)) + self._logger.error('Unknown IFS mode {0}'.format(mode)) + self._update_recipe_status('sph_ifs_cal_ifu_flat', vltpf.ERROR) + return # get list of files ifu_flat_file = files_info[np.logical_not(files_info['PROCESSED']) & (files_info['DPR TYPE'] == 'FLAT,LAMP') & (files_info['DPR TECH'] == 'IFU')] if len(ifu_flat_file) != 1: - raise ValueError('There should be exactly 1 raw IFU flat file. Found {0}.'.format(len(ifu_flat_file))) + self._logger.error('There should be exactly 1 raw IFU flat file. Found {0}.'.format(len(ifu_flat_file))) + self._update_recipe_status('sph_ifs_cal_ifu_flat', vltpf.ERROR) + return wave_file = files_info[files_info['PROCESSED'] & (files_info['PRO CATG'] == 'IFS_WAVECALIB')] if len(wave_file) != 1: - raise ValueError('There should be exactly 1 wavelength calibration file. Found {0}.'.format(len(wave_file))) + self._logger.error('There should be exactly 1 wavelength calibration file. Found {0}.'.format(len(wave_file))) + self._update_recipe_status('sph_ifs_cal_ifu_flat', vltpf.ERROR) + return dark_file = files_info[files_info['PROCESSED'] & (files_info['PRO CATG'] == 'IFS_MASTER_DARK') & (files_info['DPR CATG'] == 'CALIB') & (files_info['DET SEQ1 DIT'].round(2) == 1.65)] if len(dark_file) == 0: - raise ValueError('There should at least 1 dark file for calibrations. Found none.') + self._logger.error('There should at least 1 dark file for calibrations. Found none.') + self._update_recipe_status('sph_ifs_cal_ifu_flat', vltpf.ERROR) + return flat_white_file = files_info[files_info['PROCESSED'] & (files_info['PRO CATG'] == 'IFS_MASTER_DFF') & (files_info['INS2 COMB IFS'] == 'CAL_BB_2_{0}'.format(mode_short))] if len(flat_white_file) != 1: - raise ValueError('There should be exactly 1 white flat file. Found {0}.'.format(len(flat_white_file))) + self._logger.error('There should be exactly 1 white flat file. Found {0}.'.format(len(flat_white_file))) + self._update_recipe_status('sph_ifs_cal_ifu_flat', vltpf.ERROR) + return flat_1020_file = files_info[files_info['PROCESSED'] & (files_info['PRO CATG'] == 'IFS_MASTER_DFF') & (files_info['INS2 COMB IFS'] == 'CAL_NB1_1_{0}'.format(mode_short))] if len(flat_1020_file) != 1: - raise ValueError('There should be exactly 1 1020 nm flat file. Found {0}.'.format(len(flat_1020_file))) + self._logger.error('There should be exactly 1 1020 nm flat file. Found {0}.'.format(len(flat_1020_file))) + self._update_recipe_status('sph_ifs_cal_ifu_flat', vltpf.ERROR) + return flat_1230_file = files_info[files_info['PROCESSED'] & (files_info['PRO CATG'] == 'IFS_MASTER_DFF') & (files_info['INS2 COMB IFS'] == 'CAL_NB2_1_{0}'.format(mode_short))] if len(flat_1230_file) != 1: - raise ValueError('There should be exactly 1 1230 nm flat file. Found {0}.'.format(len(flat_1230_file))) + self._logger.error('There should be exactly 1 1230 nm flat file. Found {0}.'.format(len(flat_1230_file))) + self._update_recipe_status('sph_ifs_cal_ifu_flat', vltpf.ERROR) + return flat_1300_file = files_info[files_info['PROCESSED'] & (files_info['PRO CATG'] == 'IFS_MASTER_DFF') & (files_info['INS2 COMB IFS'] == 'CAL_NB3_1_{0}'.format(mode_short))] if len(flat_1300_file) != 1: - raise ValueError('There should be exactly 1 1300 nm flat file. Found {0}.'.format(len(flat_1300_file))) + self._logger.error('There should be exactly 1 1300 nm flat file. Found {0}.'.format(len(flat_1300_file))) + self._update_recipe_status('sph_ifs_cal_ifu_flat', vltpf.ERROR) + return if mode == 'OBS_H': flat_1550_file = files_info[files_info['PROCESSED'] & (files_info['PRO CATG'] == 'IFS_MASTER_DFF') & (files_info['INS2 COMB IFS'] == 'CAL_NB4_2_{0}'.format(mode_short))] if len(flat_1550_file) != 1: - raise ValueError('There should be exactly 1 1550 nm flat file. Found {0}.'.format(len(flat_1550_file))) + self._logger.error('There should be exactly 1 1550 nm flat file. Found {0}.'.format(len(flat_1550_file))) + self._update_recipe_status('sph_ifs_cal_ifu_flat', vltpf.ERROR) + return # create sof self._logger.debug('> create sof file') @@ -1825,7 +1898,9 @@ def sph_ifs_cal_ifu_flat(self, silent=True): # check esorex if shutil.which('esorex') is None: - raise NameError('esorex does not appear to be in your PATH. Please make sure that the ESO pipeline is properly installed before running VLTPF.') + self._logger.error('esorex does not appear to be in your PATH. Please make sure that the ESO pipeline is properly installed before running VLTPF.') + self._update_recipe_status('sph_ifs_cal_ifu_flat', vltpf.ERROR) + return # execute esorex self._logger.debug('> execute esorex') @@ -1835,7 +1910,9 @@ def sph_ifs_cal_ifu_flat(self, silent=True): proc = subprocess.run(args, cwd=path.tmp) if proc.returncode != 0: - raise ValueError('esorex process was not successful') + self._logger.error('esorex process was not successful') + self._update_recipe_status('sph_ifs_cal_ifu_flat', vltpf.ERROR) + return # store products self._logger.debug('> update files_info data frame') @@ -1852,8 +1929,7 @@ def sph_ifs_cal_ifu_flat(self, silent=True): files_info.to_csv(path.preproc / 'files.csv') # update recipe execution - self._logger.debug('> update recipe execution') - self._recipe_execution['sph_ifs_cal_ifu_flat'] = True + self._update_recipe_status('sph_ifs_cal_ifu_flat', vltpf.SUCCESS) def sph_ifs_preprocess_science(self, @@ -1913,12 +1989,13 @@ def sph_ifs_preprocess_science(self, ''' - # check if recipe can be executed - toolbox.check_recipe_execution(self._recipe_execution, 'sph_ifs_preprocess_science', self.recipe_requirements, - logger=self._logger) - self._logger.info('Pre-process science files') + # check if recipe can be executed + if not toolbox.recipe_executable(self._recipes_status, 'sph_ifs_preprocess_science', + self.recipe_requirements, logger=self._logger): + return + # parameters path = self._path files_info = self._files_info @@ -1978,7 +2055,9 @@ def sph_ifs_preprocess_science(self, bkg = fits.getdata(path.calib / '{}.fits'.format(dfiles.index[0])) elif len(dfiles) > 1: # FIXME: handle cases when multiple backgrounds are found? - raise ValueError('Unexpected number of background files ({0})'.format(len(dfiles))) + self._logger.error('Unexpected number of background files ({0})'.format(len(dfiles))) + self._update_recipe_status('sph_ifs_preprocess_science', vltpf.ERROR) + return # process files for idx, (fname, finfo) in enumerate(sfiles.iterrows()): @@ -2019,7 +2098,9 @@ def sph_ifs_preprocess_science(self, frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'mean', logger=self._logger) elif collapse_type == 'coadd': if (not isinstance(coadd_value, int)) or (coadd_value <= 1): - raise TypeError('coadd_value must be an integer >1') + self._logger.error('coadd_value must be an integer >1') + self._update_recipe_status('sph_ifs_preprocess_science', vltpf.ERROR) + return coadd_value = int(coadd_value) NDIT = len(img) @@ -2027,7 +2108,9 @@ def sph_ifs_preprocess_science(self, dropped = NDIT % coadd_value if coadd_value > NDIT: - raise ValueError('coadd_value ({0}) must be < NDIT ({1})'.format(coadd_value, NDIT)) + self._logger.error('coadd_value ({0}) must be < NDIT ({1})'.format(coadd_value, NDIT)) + self._update_recipe_status('sph_ifs_preprocess_science', vltpf.ERROR) + return self._logger.info(' ==> collapse: coadd by {0} ({1} -> {2} frames, {3} dropped)'.format(coadd_value, NDIT, NDIT_new, dropped)) @@ -2039,7 +2122,9 @@ def sph_ifs_preprocess_science(self, frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'coadd', coadd_value=coadd_value, logger=self._logger) else: - raise ValueError('Unknown collapse type {0}'.format(collapse_type)) + self._logger.error('Unknown collapse type {0}'.format(collapse_type)) + self._update_recipe_status('sph_ifs_preprocess_science', vltpf.ERROR) + return else: frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'none', logger=self._logger) @@ -2097,8 +2182,7 @@ def sph_ifs_preprocess_science(self, self._frames_info_preproc = frames_info_preproc # update recipe execution - self._logger.debug('> update recipe execution') - self._recipe_execution['sph_ifs_preprocess_science'] = True + self._update_recipe_status('sph_ifs_preprocess_science', vltpf.SUCCESS) def sph_ifs_preprocess_wave(self): @@ -2107,16 +2191,17 @@ def sph_ifs_preprocess_wave(self): recalibration of the wavelength ''' + self._logger.info('Pre-process wavelength calibration file') + # check if recipe can be executed - toolbox.check_recipe_execution(self._recipe_execution, 'sph_ifs_preprocess_wave', self.recipe_requirements, - logger=self._logger) + if not toolbox.recipe_executable(self._recipes_status, 'sph_ifs_preprocess_wave', + self.recipe_requirements, logger=self._logger): + return # parameters path = self._path files_info = self._files_info - self._logger.info('Pre-process wavelength calibration file') - # bpm bpm_files = files_info[files_info['PRO CATG'] == 'IFS_STATIC_BADPIXELMAP'].index bpm_files = [path.calib / '{}.fits'.format(f) for f in bpm_files] @@ -2126,13 +2211,17 @@ def sph_ifs_preprocess_wave(self): dark_file = files_info[files_info['PROCESSED'] & (files_info['PRO CATG'] == 'IFS_MASTER_DARK') & (files_info['DPR CATG'] == 'CALIB') & (files_info['DET SEQ1 DIT'].round(2) == 1.65)] if len(dark_file) == 0: - raise ValueError('There should at least 1 dark file for calibrations. Found none.') + self._logger.error('There should at least 1 dark file for calibrations. Found none.') + self._update_recipe_status('sph_ifs_preprocess_wave', vltpf.ERROR) + return bkg = fits.getdata(path.calib / '{}.fits'.format(dark_file.index[0])) # wavelength calibration wave_file = files_info[np.logical_not(files_info['PROCESSED']) & (files_info['DPR TYPE'] == 'WAVE,LAMP')] if len(wave_file) != 1: - raise ValueError('There should be exactly 1 raw wavelength calibration file. Found {0}.'.format(len(wave_file))) + self._logger.error('There should be exactly 1 raw wavelength calibration file. Found {0}.'.format(len(wave_file))) + self._update_recipe_status('sph_ifs_preprocess_wave', vltpf.ERROR) + return fname = wave_file.index[0] # read data @@ -2166,8 +2255,7 @@ def sph_ifs_preprocess_wave(self): overwrite=True, output_verify='silentfix') # update recipe execution - self._logger.debug('> update recipe execution') - self._recipe_execution['sph_ifs_preprocess_wave'] = True + self._update_recipe_status('sph_ifs_preprocess_wave', vltpf.SUCCESS) def sph_ifs_science_cubes(self, silent=True): @@ -2180,12 +2268,13 @@ def sph_ifs_science_cubes(self, silent=True): Suppress esorex output. Default is True ''' - # check if recipe can be executed - toolbox.check_recipe_execution(self._recipe_execution, 'sph_ifs_science_cubes', self.recipe_requirements, - logger=self._logger) - self._logger.info('Create science cubes') + # check if recipe can be executed + if not toolbox.recipe_executable(self._recipes_status, 'sph_ifs_science_cubes', + self.recipe_requirements, logger=self._logger): + return + # parameters path = self._path files_info = self._files_info @@ -2203,7 +2292,9 @@ def sph_ifs_science_cubes(self, silent=True): elif mode == 'OBS_H': mode_short = 'YJH' else: - raise ValueError('Unknown IFS mode {0}'.format(mode)) + self._logger.error('Unknown IFS mode {0}'.format(mode)) + self._update_recipe_status('sph_ifs_science_cubes', vltpf.ERROR) + return # get list of science files sci_files = sorted(list(path.preproc.glob('*_preproc.fits'))) @@ -2215,37 +2306,51 @@ def sph_ifs_science_cubes(self, silent=True): ifu_flat_file = files_info[files_info['PROCESSED'] & (files_info['PRO CATG'] == 'IFS_IFU_FLAT_FIELD')] if len(ifu_flat_file) != 1: - raise ValueError('There should be exactly 1 IFU flat file. Found {0}.'.format(len(ifu_flat_file))) + self._logger.error('There should be exactly 1 IFU flat file. Found {0}.'.format(len(ifu_flat_file))) + self._update_recipe_status('sph_ifs_science_cubes', vltpf.ERROR) + return wave_file = files_info[files_info['PROCESSED'] & (files_info['PRO CATG'] == 'IFS_WAVECALIB')] if len(wave_file) != 1: - raise ValueError('There should be exactly 1 wavelength calibration file. Found {0}.'.format(len(wave_file))) + self._logger.error('There should be exactly 1 wavelength calibration file. Found {0}.'.format(len(wave_file))) + self._update_recipe_status('sph_ifs_science_cubes', vltpf.ERROR) + return flat_white_file = files_info[files_info['PROCESSED'] & (files_info['PRO CATG'] == 'IFS_MASTER_DFF') & (files_info['INS2 COMB IFS'] == 'CAL_BB_2_{0}'.format(mode_short))] if len(flat_white_file) != 1: - raise ValueError('There should be exactly 1 white flat file. Found {0}.'.format(len(flat_white_file))) + self._logger.error('There should be exactly 1 white flat file. Found {0}.'.format(len(flat_white_file))) + self._update_recipe_status('sph_ifs_science_cubes', vltpf.ERROR) + return flat_1020_file = files_info[files_info['PROCESSED'] & (files_info['PRO CATG'] == 'IFS_MASTER_DFF') & (files_info['INS2 COMB IFS'] == 'CAL_NB1_1_{0}'.format(mode_short))] if len(flat_1020_file) != 1: - raise ValueError('There should be exactly 1 1020 nm flat file. Found {0}.'.format(len(flat_1020_file))) + self._logger.error('There should be exactly 1 1020 nm flat file. Found {0}.'.format(len(flat_1020_file))) + self._update_recipe_status('sph_ifs_science_cubes', vltpf.ERROR) + return flat_1230_file = files_info[files_info['PROCESSED'] & (files_info['PRO CATG'] == 'IFS_MASTER_DFF') & (files_info['INS2 COMB IFS'] == 'CAL_NB2_1_{0}'.format(mode_short))] if len(flat_1230_file) != 1: - raise ValueError('There should be exactly 1 1230 nm flat file. Found {0}.'.format(len(flat_1230_file))) + self._logger.error('There should be exactly 1 1230 nm flat file. Found {0}.'.format(len(flat_1230_file))) + self._update_recipe_status('sph_ifs_science_cubes', vltpf.ERROR) + return flat_1300_file = files_info[files_info['PROCESSED'] & (files_info['PRO CATG'] == 'IFS_MASTER_DFF') & (files_info['INS2 COMB IFS'] == 'CAL_NB3_1_{0}'.format(mode_short))] if len(flat_1300_file) != 1: - raise ValueError('There should be exactly 1 1300 nm flat file. Found {0}.'.format(len(flat_1300_file))) + self._logger.error('There should be exactly 1 1300 nm flat file. Found {0}.'.format(len(flat_1300_file))) + self._update_recipe_status('sph_ifs_science_cubes', vltpf.ERROR) + return if mode == 'OBS_H': flat_1550_file = files_info[files_info['PROCESSED'] & (files_info['PRO CATG'] == 'IFS_MASTER_DFF') & (files_info['INS2 COMB IFS'] == 'CAL_NB4_2_{0}'.format(mode_short))] if len(flat_1550_file) != 1: - raise ValueError('There should be exactly 1 1550 nm flat file. Found {0}.'.format(len(flat_1550_file))) + self._logger.error('There should be exactly 1 1550 nm flat file. Found {0}.'.format(len(flat_1550_file))) + self._update_recipe_status('sph_ifs_science_cubes', vltpf.ERROR) + return # create sof self._logger.debug('> create sof file') @@ -2277,7 +2382,9 @@ def sph_ifs_science_cubes(self, silent=True): # check esorex if shutil.which('esorex') is None: - raise NameError('esorex does not appear to be in your PATH. Please make sure that the ESO pipeline is properly installed before running VLTPF.') + self._logger.error('esorex does not appear to be in your PATH. Please make sure that the ESO pipeline is properly installed before running VLTPF.') + self._update_recipe_status('sph_ifs_science_cubes', vltpf.ERROR) + return # execute esorex self._logger.debug('> execute esorex') @@ -2287,8 +2394,9 @@ def sph_ifs_science_cubes(self, silent=True): proc = subprocess.run(args, cwd=path.tmp) if proc.returncode != 0: - # raise ValueError('esorex process was not successful') self._logger.error('esorex was not successful. Trying to process some of the frames...') + self._update_recipe_status('sph_ifs_science_cubes', vltpf.ERROR) + return # post-process self._logger.info(' * post-processing files') @@ -2304,8 +2412,7 @@ def sph_ifs_science_cubes(self, silent=True): shutil.move(file, path.preproc / file.name) # update recipe execution - self._logger.debug('> update recipe execution') - self._recipe_execution['sph_ifs_science_cubes'] = True + self._update_recipe_status('sph_ifs_science_cubes', vltpf.SUCCESS) def sph_ifs_wavelength_recalibration(self, high_pass=False, offset=(0, 0), plot=True): @@ -2333,12 +2440,13 @@ def sph_ifs_wavelength_recalibration(self, high_pass=False, offset=(0, 0), plot= ''' - # check if recipe can be executed - toolbox.check_recipe_execution(self._recipe_execution, 'sph_ifs_wavelength_recalibration', self.recipe_requirements, - logger=self._logger) - self._logger.info('Wavelength recalibration') + # check if recipe can be executed + if not toolbox.recipe_executable(self._recipes_status, 'sph_ifs_wavelength_recalibration', + self.recipe_requirements, logger=self._logger): + return + # parameters path = self._path nwave = self._nwave @@ -2535,8 +2643,7 @@ def sph_ifs_wavelength_recalibration(self, high_pass=False, offset=(0, 0), plot= plt.savefig(path.products / 'wavelength_recalibration.pdf') # update recipe execution - self._logger.debug('> update recipe execution') - self._recipe_execution['sph_ifs_wavelength_recalibration'] = True + self._update_recipe_status('sph_ifs_wavelength_recalibration', vltpf.SUCCESS) def sph_ifs_star_center(self, high_pass=False, offset=(0, 0), plot=True): @@ -2558,12 +2665,13 @@ def sph_ifs_star_center(self, high_pass=False, offset=(0, 0), plot=True): ''' - # check if recipe can be executed - toolbox.check_recipe_execution(self._recipe_execution, 'sph_ifs_star_center', self.recipe_requirements, - logger=self._logger) - self._logger.info('Star centers determination') + # check if recipe can be executed + if not toolbox.recipe_executable(self._recipes_status, 'sph_ifs_star_center', + self.recipe_requirements, logger=self._logger): + return + # parameters path = self._path nwave = self._nwave @@ -2643,8 +2751,7 @@ def sph_ifs_star_center(self, high_pass=False, offset=(0, 0), plot=True): fits.writeto(path.preproc / '{}centers.fits'.format(fname), img_center, overwrite=True) # update recipe execution - self._logger.debug('> update recipe execution') - self._recipe_execution['sph_ifs_star_center'] = True + self._update_recipe_status('sph_ifs_star_center', vltpf.SUCCESS) def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_anamorphism=True, @@ -2745,12 +2852,13 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a ''' - # check if recipe can be executed - toolbox.check_recipe_execution(self._recipe_execution, 'sph_ifs_combine_data', self.recipe_requirements, - logger=self._logger) - self._logger.info('Combine science data') + # check if recipe can be executed + if not toolbox.recipe_executable(self._recipes_status, 'sph_ifs_combine_data', + self.recipe_requirements, logger=self._logger): + return + # parameters path = self._path nwave = self._nwave @@ -2767,7 +2875,9 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a self._logger.warning('Using default wavelength calibration.') wave = fits.getdata(wfile) else: - raise FileExistsError('Missing default or recalibrated wavelength calibration. You must first run either sph_ifs_wave_calib or sph_ifs_wavelength_recalibration().') + self._logger.error('Missing default or recalibrated wavelength calibration. You must first run either sph_ifs_wave_calib or sph_ifs_wavelength_recalibration().') + self._update_recipe_status('sph_ifs_combine_data', vltpf.ERROR) + return fits.writeto(path.products / 'wavelength.fits', wave, overwrite=True) # max images size @@ -2791,7 +2901,9 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a manual_center = np.array(manual_center) if (manual_center.shape != (2,)) and (manual_center.shape != (nwave, 2)): - raise ValueError('manual_center does not have the right number of dimensions.') + self._logger.error('manual_center does not have the right number of dimensions.') + self._update_recipe_status('sph_ifs_combine_data', vltpf.ERROR) + return if manual_center.shape == (2,): manual_center = np.full((nwave, 2), manual_center, dtype=np.float) @@ -3110,8 +3222,7 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a del sci_cube_scaled # update recipe execution - self._logger.debug('> update recipe execution') - self._recipe_execution['sph_ifs_combine_data'] = True + self._update_recipe_status('sph_ifs_combine_data', vltpf.SUCCESS) def sph_ifs_clean(self, delete_raw=False, delete_products=False): @@ -3129,6 +3240,11 @@ def sph_ifs_clean(self, delete_raw=False, delete_products=False): self._logger.info('Clean reduction data') + # check if recipe can be executed + if not toolbox.recipe_executable(self._recipes_status, 'sph_ifs_clean', + self.recipe_requirements, logger=self._logger): + return + # parameters path = self._path @@ -3167,5 +3283,4 @@ def sph_ifs_clean(self, delete_raw=False, delete_products=False): shutil.rmtree(path.products, ignore_errors=True) # update recipe execution - self._logger.debug('> update recipe execution') - self._recipe_execution['sph_ifs_clean'] = True + self._update_recipe_status('sph_ifs_clean', vltpf.SUCCESS) From c5e5fa6e8506968a9e3e12c000b58d246105c123 Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Sun, 15 Sep 2019 14:26:07 +0200 Subject: [PATCH 094/101] Improve error handling in SPHERE.Dataset Ticket #41 - improve error handling in toolbox, plus associated changes in IRDIS and IFS reduction classes - several changes in SPHERE.Dataset --- examples/sphere_dataset.py | 6 +- vltpf/IFS.py | 27 +++++++- vltpf/IRDIS/ImagingReduction.py | 18 +++++- vltpf/IRDIS/SpectroReduction.py | 16 ++++- vltpf/SPHERE.py | 110 ++++++++++++++++++++------------ vltpf/toolbox.py | 31 +++++---- 6 files changed, 149 insertions(+), 59 deletions(-) diff --git a/examples/sphere_dataset.py b/examples/sphere_dataset.py index ebf954a..d8cbcdd 100644 --- a/examples/sphere_dataset.py +++ b/examples/sphere_dataset.py @@ -1,6 +1,7 @@ import vltpf.SPHERE as SPHERE -ds = SPHERE.Dataset('/Users/avigan/data/VLTPF-test-target/test/', log_level='debug') +#%% init data set +ds = SPHERE.Dataset('/Users/avigan/data/VLTPF-test-target/test/', log_level='info') print('IRDIS reductions:') for red in ds.IRDIS_reductions: @@ -11,3 +12,6 @@ for red in ds.IFS_reductions: print(red) print() + +#%% full reduction with default parameters +ds.full_reduction() diff --git a/vltpf/IFS.py b/vltpf/IFS.py index ad97adc..0977e02 100644 --- a/vltpf/IFS.py +++ b/vltpf/IFS.py @@ -1020,7 +1020,10 @@ def sort_frames(self): toolbox.compute_times(frames_info, logger=self._logger) # compute angles (ra, dec, parang) - toolbox.compute_angles(frames_info, logger=self._logger) + ret = toolbox.compute_angles(frames_info, logger=self._logger) + if ret == vltpf.ERROR: + self._update_recipe_status('sort_frames', vltpf.ERROR) + return # save self._logger.debug('> save frames.csv') @@ -1488,6 +1491,10 @@ def sph_ifs_cal_detector_flat(self, silent=True): # bpm files cfiles = files_info[files_info['PRO CATG'] == 'IFS_STATIC_BADPIXELMAP'].index bpm_files = [path.calib / '{}.fits'.format(f) for f in cfiles] + if len(bpm_files) == 0: + self._logger.error('Could not fin any bad pixel maps') + self._update_recipe_status('sph_ifs_cal_detector_flat', vltpf.ERROR) + return # loop on wavelengths waves = [ 0, 1020, 1230, 1300, 1550] @@ -2012,6 +2019,11 @@ def sph_ifs_preprocess_science(self, bpm_files = files_info[files_info['PRO CATG'] == 'IFS_STATIC_BADPIXELMAP'].index bpm_files = [path.calib / '{}.fits'.format(f) for f in bpm_files] + if len(bpm_files) == 0: + self._logger.error('Could not fin any bad pixel maps') + self._update_recipe_status('sph_ifs_preprocess_science', vltpf.ERROR) + return + bpm = toolbox.compute_bad_pixel_map(bpm_files, logger=self._logger) # final dataframe @@ -2128,7 +2140,13 @@ def sph_ifs_preprocess_science(self, else: frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'none', logger=self._logger) - # merge collapse collapsed frames_info + # check for any error during collapse of frame information + if frames_info_new is None: + self._logger.error('An error occured when collapsing frames info') + self._update_recipe_status('sph_ifs_preprocess_science', vltpf.ERROR) + return + + # merge frames_info frames_info_preproc = pd.concat((frames_info_preproc, frames_info_new)) # background subtraction @@ -2205,6 +2223,11 @@ def sph_ifs_preprocess_wave(self): # bpm bpm_files = files_info[files_info['PRO CATG'] == 'IFS_STATIC_BADPIXELMAP'].index bpm_files = [path.calib / '{}.fits'.format(f) for f in bpm_files] + if len(bpm_files) == 0: + self._logger.error('Could not fin any bad pixel maps') + self._update_recipe_status('sph_ifs_preprocess_wave', vltpf.ERROR) + return + bpm = toolbox.compute_bad_pixel_map(bpm_files, logger=self._logger) # dark diff --git a/vltpf/IRDIS/ImagingReduction.py b/vltpf/IRDIS/ImagingReduction.py index 6ea058b..8815d77 100644 --- a/vltpf/IRDIS/ImagingReduction.py +++ b/vltpf/IRDIS/ImagingReduction.py @@ -644,7 +644,10 @@ def sort_frames(self): toolbox.compute_times(frames_info, logger=self._logger) # compute angles (ra, dec, parang) - toolbox.compute_angles(frames_info, logger=self._logger) + ret = toolbox.compute_angles(frames_info, logger=self._logger) + if ret == vltpf.ERROR: + self._update_recipe_status('sort_frames', vltpf.ERROR) + return # save self._logger.debug('> save frames.csv') @@ -1104,7 +1107,11 @@ def sph_ird_preprocess_science(self, bpm_files = files_info[(files_info['PRO CATG'] == 'IRD_STATIC_BADPIXELMAP') | (files_info['PRO CATG'] == 'IRD_NON_LINEAR_BADPIXELMAP')].index bpm_files = [path.calib / '{}.fits'.format(f) for f in bpm_files] - + if len(bpm_files) == 0: + self._logger.error('Could not fin any bad pixel maps') + self._update_recipe_status('sph_ird_preprocess_science', vltpf.ERROR) + return + bpm = toolbox.compute_bad_pixel_map(bpm_files, logger=self._logger) # mask dead regions @@ -1243,6 +1250,13 @@ def sph_ird_preprocess_science(self, else: frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'none', logger=self._logger) + # check for any error during collapse of frame information + if frames_info_new is None: + self._logger.error('An error occured when collapsing frames info') + self._update_recipe_status('sph_ird_preprocess_science', vltpf.ERROR) + return + + # merge frames info frames_info_preproc = pd.concat((frames_info_preproc, frames_info_new)) # background subtraction diff --git a/vltpf/IRDIS/SpectroReduction.py b/vltpf/IRDIS/SpectroReduction.py index ef8f794..5916a1e 100644 --- a/vltpf/IRDIS/SpectroReduction.py +++ b/vltpf/IRDIS/SpectroReduction.py @@ -710,7 +710,10 @@ def sort_frames(self): toolbox.compute_times(frames_info, logger=self._logger) # compute angles (ra, dec, parang) - toolbox.compute_angles(frames_info, logger=self._logger) + ret = toolbox.compute_angles(frames_info, logger=self._logger) + if ret == vltpf.ERROR: + self._update_recipe_status('sort_frames', vltpf.ERROR) + return # save self._logger.debug('> save frames.csv') @@ -1359,6 +1362,10 @@ def sph_ird_preprocess_science(self, bpm_files = files_info[(files_info['PRO CATG'] == 'IRD_STATIC_BADPIXELMAP') | (files_info['PRO CATG'] == 'IRD_NON_LINEAR_BADPIXELMAP')].index bpm_files = [path.calib / '{}.fits'.format(f) for f in bpm_files] + if len(bpm_files) == 0: + self._logger.error('Could not fin any bad pixel maps') + self._update_recipe_status('sph_ird_preprocess_science', vltpf.ERROR) + return bpm = toolbox.compute_bad_pixel_map(bpm_files, logger=self._logger) @@ -1468,6 +1475,13 @@ def sph_ird_preprocess_science(self, else: frames_info_new = toolbox.collapse_frames_info(finfo, fname, 'none', logger=self._logger) + # check for any error during collapse of frame information + if frames_info_new is None: + self._logger.error('An error occured when collapsing frames info') + self._update_recipe_status('sph_ird_preprocess_science', vltpf.ERROR) + return + + # merge frames info frames_info_preproc = pd.concat((frames_info_preproc, frames_info_new)) # background subtraction diff --git a/vltpf/SPHERE.py b/vltpf/SPHERE.py index 4550911..0eac686 100644 --- a/vltpf/SPHERE.py +++ b/vltpf/SPHERE.py @@ -141,7 +141,8 @@ def sort_files_from_xml(path, logger=_log): logger.info(' ==> file {} does not exist. Skipping'.format(filename)) continue - hdr = fits.getheader(path / '{}.fits'.format(filename)) + fpath = path / '{}.fits'.format(filename) + hdr = fits.getheader(fpath) # target and arm target = hdr['HIERARCH ESO OBS NAME'] @@ -153,13 +154,16 @@ def sort_files_from_xml(path, logger=_log): else: try: arm = hdr['HIERARCH ESO SEQ ARM'] - if arm == 'IRDIS': - instrument = 'IRDIS' - elif arm == 'IFS': - instrument = 'IFS' - else: - raise NameError('Unknown arm {0}'.format(arm)) - except NameError: + except KeyError: + logger.error('No \'HIERARCH ESO SEQ ARM\' keyword in {}'.format(fpath)) + continue + + if arm == 'IRDIS': + instrument = 'IRDIS' + elif arm == 'IFS': + instrument = 'IFS' + else: + logger.error('Unknown arm {0}'.format(arm)) continue # get files @@ -250,7 +254,8 @@ def sort_files_from_fits(path, logger=_log): target = hdr['HIERARCH ESO OBS NAME'] obs_id = hdr['HIERARCH ESO OBS ID'] dpr_type = hdr['HIERARCH ESO DPR TYPE'] - except: + except KeyError: + logger.error('Missing ESO HIERARCH keywords in {}'.format(file)) continue if dpr_type == 'OBJECT,AO': @@ -258,13 +263,16 @@ def sort_files_from_fits(path, logger=_log): else: try: arm = hdr['HIERARCH ESO SEQ ARM'] - if arm == 'IRDIS': - instrument = 'IRDIS' - elif arm == 'IFS': - instrument = 'IFS' - else: - raise NameError('Unknown arm {0}'.format(arm)) - except: + except KeyError: + logger.error('No \'HIERARCH ESO SEQ ARM\' keyword in {}'.format(file)) + continue + + if arm == 'IRDIS': + instrument = 'IRDIS' + elif arm == 'IFS': + instrument = 'IFS' + else: + logger.error('Unknown arm {0}'.format(arm)) continue # target path @@ -302,12 +310,19 @@ def classify_irdis_dataset(path, logger=_log): logger : logHandler object Log handler for the reduction. Default is root logger + Returns + ------- + mode : str + Generic string representing the name of the mode. None in case + of failure. + ''' # zeroth-order reduction validation raw = path / 'raw' if not raw.exists(): - raise ValueError('No raw/ subdirectory. {0} is not a valid reduction path!'.format(path)) + logger.error('No raw/ subdirectory. {0} is not a valid reduction path!'.format(path)) + return None # list all fits files files = list(raw.glob('*.fits')) @@ -433,7 +448,7 @@ def init_reduction(self): ''' for r in self._reductions: - self._logger.info('Init: {}'.format(r)) + self._logger.info('Init: {}'.format(str(r))) r.init_reduction() @@ -444,7 +459,7 @@ def create_static_calibrations(self): ''' for r in self._reductions: - self._logger.info('Static calibrations: {}'.format(r)) + self._logger.info('Static calibrations: {}'.format(str(r))) r.create_static_calibrations() @@ -455,7 +470,7 @@ def preprocess_science(self): ''' for r in self._reductions: - self._logger.info('Science pre-processing: {}'.format(r)) + self._logger.info('Science pre-processing: {}'.format(str(r))) r.preprocess_science() @@ -467,7 +482,7 @@ def process_science(self): ''' for r in self._reductions: - self._logger.info('Science processing: {}'.format(r)) + self._logger.info('Science processing: {}'.format(str(r))) r.process_science() @@ -479,7 +494,8 @@ def clean(self): ''' for r in self._reductions: - self._logger.info('Clean-up: {}'.format(r)) + print(r) + self._logger.info('Clean-up: {}'.format(str(r))) r.clean() @@ -491,7 +507,7 @@ def full_reduction(self): ''' for r in self._reductions: - self._logger.info('Full {0} reduction at path {1}'.format(r.instrument, r.path)) + self._logger.info('Full reduction: {}'.format(str(r))) r.full_reduction() @@ -519,26 +535,38 @@ def _create_reductions(self): hdr = fits.getheader(fits_files[0]) try: arm = hdr['HIERARCH ESO SEQ ARM'] - if arm == 'IRDIS': - mode = classify_irdis_dataset(reduction_path, logger=self._logger) - - if mode == 'imaging': - self._logger.info(' * IRDIS imaging reduction at path {}'.format(reduction_path)) - reduction = IRDIS.ImagingReduction(reduction_path, log_level=self._log_level) - elif mode == 'polar': - self._logger.warning('IRDIS DPI not supported yet') - elif mode == 'spectro': - self._logger.info(' * IRDIS spectro reduction at path {}'.format(reduction_path)) - reduction = IRDIS.SpectroReduction(reduction_path, log_level=self._log_level) - + except KeyError: + self._logger.error('No \'HIERARCH ESO SEQ ARM\' keyword in {}'.format(fits_files[0])) + continue + + if arm == 'IRDIS': + mode = classify_irdis_dataset(reduction_path, logger=self._logger) + + # an error occured in dataset classification + if mode is None: + continue + + if mode == 'imaging': + self._logger.info(' * IRDIS imaging reduction at path {}'.format(reduction_path)) + reduction = IRDIS.ImagingReduction(reduction_path, log_level=self._log_level) + elif mode == 'polar': + self._logger.warning('IRDIS DPI not supported yet') + elif mode == 'spectro': + self._logger.info(' * IRDIS spectro compute master bad pixel map from {} files'.format(len(bpm_files))) # get shape @@ -329,7 +331,7 @@ def collapse_frames_info(finfo, fname, collapse_type, coadd_value=2, logger=_log Returns ------- nfinfo : dataframe - Collapsed data frame + Collapsed data frame, or None in case of error ''' logger.info(' ==> collapse frames information') @@ -359,7 +361,9 @@ def collapse_frames_info(finfo, fname, collapse_type, coadd_value=2, logger=_log (finfo.loc[(fname, imax), 'TIME END'] - finfo.loc[(fname, imin), 'TIME START']) / 2 # recompute angles - compute_angles(nfinfo, logger=logger) + ret = compute_angles(nfinfo, logger=logger) + if ret == vltpf.ERROR: + return None elif collapse_type == 'coadd': coadd_value = int(coadd_value) NDIT = len(finfo) @@ -386,9 +390,12 @@ def collapse_frames_info(finfo, fname, collapse_type, coadd_value=2, logger=_log (finfo.loc[(fname, imax), 'TIME END'] - finfo.loc[(fname, imin), 'TIME START']) / 2 # recompute angles - compute_angles(nfinfo, logger=logger) + ret = compute_angles(nfinfo, logger=logger) + if ret == vltpf.ERROR: + return None else: - raise ValueError('Unknown collapse type {0}'.format(collapse_type)) + logger.error('Unknown collapse type {0}'.format(collapse_type)) + return None return nfinfo From 2f292d56f19f24e8f35ca050ec8d6d3d89305c9c Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Sun, 15 Sep 2019 14:47:05 +0200 Subject: [PATCH 095/101] Rename some parameters in toolbox.recipe_executable() --- vltpf/toolbox.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/vltpf/toolbox.py b/vltpf/toolbox.py index a654fe9..a480185 100644 --- a/vltpf/toolbox.py +++ b/vltpf/toolbox.py @@ -22,7 +22,7 @@ _log = logging.getLogger(__name__) -def recipe_executable(recipes_status, recipe_name, recipe_requirements, logger=_log): +def recipe_executable(recipes_status, recipe, requirements, logger=_log): ''' Check if a recipe is executabled given the status of other recipes @@ -31,10 +31,10 @@ def recipe_executable(recipes_status, recipe_name, recipe_requirements, logger=_ recipes_status : dict Status of executed recipes - recipe_name : str + recipe : str Name of the current recipe - recipe_requirements : dict + requirements : dict Dictionary providing the recipe requirements logger : logHandler object @@ -47,7 +47,7 @@ def recipe_executable(recipes_status, recipe_name, recipe_requirements, logger=_ ''' recipes = recipes_status.keys() - requirements = recipe_requirements[recipe_name] + requirements = requirements[recipe] execute_recipe = True missing = [] @@ -60,10 +60,10 @@ def recipe_executable(recipes_status, recipe_name, recipe_requirements, logger=_ missing.append(r) if not execute_recipe: - logger.error('{} cannot executed because the following recipes have not been executed or have result in unrecoverable errors: {}. '.format(recipe_name, missing)) - recipes_status[recipe_name] = vltpf.ERROR + logger.error('{} cannot executed because the following recipes have not been executed or have result in unrecoverable errors: {}. '.format(recipe, missing)) + recipes_status[recipe] = vltpf.ERROR - logger.debug('> execution requirements check for {}: {}'.format(recipe_name, execute_recipe)) + logger.debug('> execution requirements check for {}: {}'.format(recipe, execute_recipe)) return execute_recipe From 6357f85d698191cfefff49d061972c4cbc853a5c Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Sun, 15 Sep 2019 14:50:09 +0200 Subject: [PATCH 096/101] Rename some parameters in _update_recipe_status() --- vltpf/IFS.py | 12 ++++++------ vltpf/IRDIS/ImagingReduction.py | 7 +++---- vltpf/IRDIS/SpectroReduction.py | 8 ++++---- 3 files changed, 13 insertions(+), 14 deletions(-) diff --git a/vltpf/IFS.py b/vltpf/IFS.py index 0977e02..a1e00d3 100644 --- a/vltpf/IFS.py +++ b/vltpf/IFS.py @@ -851,8 +851,8 @@ def _read_info(self): self._update_recipe_status('sph_ifs_star_center', vltpf.SUCCESS) self._logger.debug('> sph_ifs_star_center status = {}'.format(done)) - # FIXME: move into toolbox - def _update_recipe_status(self, recipe, recipe_status): + + def _update_recipe_status(self, recipe, status): '''Update execution status for reduction and recipe Parameters @@ -860,14 +860,14 @@ def _update_recipe_status(self, recipe, recipe_status): recipe : str Recipe name - recipe_status : vltpf status (int) + status : vltpf status (int) Status of the recipe. Can be either one of vltpf.NOTSET, vltpf.SUCCESS or vltpf.ERROR ''' - + self._logger.debug('> update recipe execution') - - self._recipes_status[recipe] = recipe_status + + self._recipes_status[recipe] = status self._recipes_status.move_to_end(recipe) ################################################## diff --git a/vltpf/IRDIS/ImagingReduction.py b/vltpf/IRDIS/ImagingReduction.py index 8815d77..e9a0452 100644 --- a/vltpf/IRDIS/ImagingReduction.py +++ b/vltpf/IRDIS/ImagingReduction.py @@ -475,8 +475,7 @@ def _read_info(self): self._update_recipe_status('sph_ird_star_center', vltpf.SUCCESS) self._logger.debug('> sph_ird_star_center status = {}'.format(done)) - # FIXME: move into toolbox - def _update_recipe_status(self, recipe, recipe_status): + def _update_recipe_status(self, recipe, status): '''Update execution status for reduction and recipe Parameters @@ -484,14 +483,14 @@ def _update_recipe_status(self, recipe, recipe_status): recipe : str Recipe name - recipe_status : vltpf status (int) + status : vltpf status (int) Status of the recipe. Can be either one of vltpf.NOTSET, vltpf.SUCCESS or vltpf.ERROR ''' self._logger.debug('> update recipe execution') - self._recipes_status[recipe] = recipe_status + self._recipes_status[recipe] = status self._recipes_status.move_to_end(recipe) ################################################## diff --git a/vltpf/IRDIS/SpectroReduction.py b/vltpf/IRDIS/SpectroReduction.py index 5916a1e..dfc227a 100644 --- a/vltpf/IRDIS/SpectroReduction.py +++ b/vltpf/IRDIS/SpectroReduction.py @@ -541,8 +541,8 @@ def _read_info(self): self._update_recipe_status('sph_ird_star_center', vltpf.SUCCESS) self._logger.debug('> sph_ird_star_center status = {}'.format(done)) - # FIXME: move into toolbox - def _update_recipe_status(self, recipe, recipe_status): + + def _update_recipe_status(self, recipe, status): '''Update execution status for reduction and recipe Parameters @@ -550,14 +550,14 @@ def _update_recipe_status(self, recipe, recipe_status): recipe : str Recipe name - recipe_status : vltpf status (int) + status : vltpf status (int) Status of the recipe. Can be either one of vltpf.NOTSET, vltpf.SUCCESS or vltpf.ERROR ''' self._logger.debug('> update recipe execution') - self._recipes_status[recipe] = recipe_status + self._recipes_status[recipe] = status self._recipes_status.move_to_end(recipe) ################################################## From 2a0aae08d344e479dd14e53f3c71b89c408d1ef8 Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Sun, 15 Sep 2019 17:14:23 +0200 Subject: [PATCH 097/101] Implement a general reduction status Following-up on ticket #41 Enables to avoid carrying on with the execution of a reduction if some critical errors have been encountered (no FITS files, mixing instruments, no SCIENCE files) --- examples/sphere_dataset.py | 2 +- vltpf/IFS.py | 98 +++++++++++++++++++++++++-------- vltpf/IRDIS/ImagingReduction.py | 96 ++++++++++++++++++++++---------- vltpf/IRDIS/SpectroReduction.py | 78 ++++++++++++++++++++------ vltpf/__init__.py | 7 ++- vltpf/toolbox.py | 11 +++- 6 files changed, 217 insertions(+), 75 deletions(-) diff --git a/examples/sphere_dataset.py b/examples/sphere_dataset.py index d8cbcdd..4e92269 100644 --- a/examples/sphere_dataset.py +++ b/examples/sphere_dataset.py @@ -1,7 +1,7 @@ import vltpf.SPHERE as SPHERE #%% init data set -ds = SPHERE.Dataset('/Users/avigan/data/VLTPF-test-target/test/', log_level='info') +ds = SPHERE.Dataset('/Users/avigan/data/VLTPF-test-target/', log_level='info') print('IRDIS reductions:') for red in ds.IRDIS_reductions: diff --git a/vltpf/IFS.py b/vltpf/IFS.py index a1e00d3..44c775f 100644 --- a/vltpf/IFS.py +++ b/vltpf/IFS.py @@ -487,6 +487,7 @@ def __new__(cls, path, log_level='info'): # # reduction status # + reduction._status = vltpf.INIT reduction._recipes_status = collections.OrderedDict() # reload any existing data frames @@ -851,6 +852,9 @@ def _read_info(self): self._update_recipe_status('sph_ifs_star_center', vltpf.SUCCESS) self._logger.debug('> sph_ifs_star_center status = {}'.format(done)) + # reduction status + self._status = vltpf.INCOMPLETE + def _update_recipe_status(self, recipe, status): '''Update execution status for reduction and recipe @@ -895,8 +899,9 @@ def sort_files(self): files = [f.stem for f in files] if len(files) == 0: - self._logger.error('No raw FITS files in reduction path') + self._logger.critical('No raw FITS files in reduction path') self._update_recipe_status('sort_files', vltpf.ERROR) + self._status = vltpf.FATAL return self._logger.info(' * found {0} raw FITS files'.format(len(files))) @@ -942,10 +947,19 @@ def sort_files(self): # check instruments instru = files_info['SEQ ARM'].unique() if len(instru) != 1: - self._logger.error('Sequence is mixing different instruments: {0}'.format(instru)) + self._logger.critical('Sequence is mixing different instruments: {0}'.format(instru)) self._update_recipe_status('sort_files', vltpf.ERROR) + self._status = vltpf.FATAL return + # check science files + sci_files = files_info[(files_info['DPR CATG'] == 'SCIENCE') & (files_info['DPR TYPE'] != 'SKY')] + if len(sci_files) == 0: + self._logger.critical('This dataset contains no science frame. There should be at least one!') + self._update_recipe_status('sort_frames', vltpf.ERROR) + self._status = vltpf.FATAL + return + # processed column files_info.insert(len(files_info.columns), 'PROCESSED', False) files_info.insert(len(files_info.columns), 'PRO CATG', ' ') @@ -970,6 +984,9 @@ def sort_files(self): # update recipe execution self._update_recipe_status('sort_files', vltpf.SUCCESS) + # reduction status + self._status = vltpf.INCOMPLETE + def sort_frames(self): ''' @@ -983,7 +1000,7 @@ def sort_frames(self): self._logger.info('Extract frames information') # check if recipe can be executed - if not toolbox.recipe_executable(self._recipes_status, 'sort_frames', + if not toolbox.recipe_executable(self._recipes_status, self._status, 'sort_frames', self.recipe_requirements, logger=self._logger): return @@ -994,12 +1011,6 @@ def sort_frames(self): # science files sci_files = files_info[(files_info['DPR CATG'] == 'SCIENCE') & (files_info['DPR TYPE'] != 'SKY')] - # report error when no science frames are present - if len(sci_files) == 0: - self._logger.error('This dataset contains no science frame. There should be at least one!') - self._update_recipe_status('sort_frames', vltpf.ERROR) - return - # build indices files = [] img = [] @@ -1023,6 +1034,7 @@ def sort_frames(self): ret = toolbox.compute_angles(frames_info, logger=self._logger) if ret == vltpf.ERROR: self._update_recipe_status('sort_frames', vltpf.ERROR) + self._status = vltpf.FATAL return # save @@ -1077,6 +1089,9 @@ def sort_frames(self): # update recipe execution self._update_recipe_status('sort_frames', vltpf.SUCCESS) + # reduction status + self._status = vltpf.INCOMPLETE + def check_files_association(self): ''' @@ -1089,7 +1104,7 @@ def check_files_association(self): self._logger.info('File association for calibrations') # check if recipe can be executed - if not toolbox.recipe_executable(self._recipes_status, 'check_files_association', + if not toolbox.recipe_executable(self._recipes_status, self._status, 'check_files_association', self.recipe_requirements, logger=self._logger): return @@ -1335,6 +1350,9 @@ def check_files_association(self): # update recipe execution self._update_recipe_status('check_files_association', vltpf.SUCCESS) + # reduction status + self._status = vltpf.INCOMPLETE + def sph_ifs_cal_dark(self, silent=True): ''' @@ -1349,7 +1367,7 @@ def sph_ifs_cal_dark(self, silent=True): self._logger.info('Darks and backgrounds') # check if recipe can be executed - if not toolbox.recipe_executable(self._recipes_status, 'sph_ifs_cal_dark', + if not toolbox.recipe_executable(self._recipes_status, self._status, 'sph_ifs_cal_dark', self.recipe_requirements, logger=self._logger): return @@ -1450,6 +1468,9 @@ def sph_ifs_cal_dark(self, silent=True): # update recipe execution self._update_recipe_status('sph_ifs_cal_dark', vltpf.SUCCESS) + # reduction status + self._status = vltpf.INCOMPLETE + def sph_ifs_cal_detector_flat(self, silent=True): ''' @@ -1464,7 +1485,7 @@ def sph_ifs_cal_detector_flat(self, silent=True): self._logger.info('Detector flats') # check if recipe can be executed - if not toolbox.recipe_executable(self._recipes_status, 'sph_ifs_cal_detector_flat', + if not toolbox.recipe_executable(self._recipes_status, self._status, 'sph_ifs_cal_detector_flat', self.recipe_requirements, logger=self._logger): return @@ -1554,6 +1575,9 @@ def sph_ifs_cal_detector_flat(self, silent=True): # update recipe execution self._update_recipe_status('sph_ifs_cal_detector_flat', vltpf.SUCCESS) + # reduction status + self._status = vltpf.INCOMPLETE + def sph_ifs_cal_specpos(self, silent=True): ''' @@ -1568,7 +1592,7 @@ def sph_ifs_cal_specpos(self, silent=True): self._logger.info('Microspectra positions') # check if recipe can be executed - if not toolbox.recipe_executable(self._recipes_status, 'sph_ifs_cal_specpos', + if not toolbox.recipe_executable(self._recipes_status, self._status, 'sph_ifs_cal_specpos', self.recipe_requirements, logger=self._logger): return @@ -1656,6 +1680,9 @@ def sph_ifs_cal_specpos(self, silent=True): # update recipe execution self._update_recipe_status('sph_ifs_cal_specpos', vltpf.SUCCESS) + # reduction status + self._status = vltpf.INCOMPLETE + def sph_ifs_cal_wave(self, silent=True): ''' @@ -1670,7 +1697,7 @@ def sph_ifs_cal_wave(self, silent=True): self._logger.info('Wavelength calibration') # check if recipe can be executed - if not toolbox.recipe_executable(self._recipes_status, 'sph_ifs_cal_wave', + if not toolbox.recipe_executable(self._recipes_status, self._status, 'sph_ifs_cal_wave', self.recipe_requirements, logger=self._logger): return @@ -1785,6 +1812,9 @@ def sph_ifs_cal_wave(self, silent=True): # update recipe execution self._update_recipe_status('sph_ifs_cal_wave', vltpf.SUCCESS) + # reduction status + self._status = vltpf.INCOMPLETE + def sph_ifs_cal_ifu_flat(self, silent=True): ''' @@ -1799,7 +1829,7 @@ def sph_ifs_cal_ifu_flat(self, silent=True): self._logger.info('Integral-field unit flat') # check if recipe can be executed - if not toolbox.recipe_executable(self._recipes_status, 'sph_ifs_cal_ifu_flat', + if not toolbox.recipe_executable(self._recipes_status, self._status, 'sph_ifs_cal_ifu_flat', self.recipe_requirements, logger=self._logger): return @@ -1938,6 +1968,9 @@ def sph_ifs_cal_ifu_flat(self, silent=True): # update recipe execution self._update_recipe_status('sph_ifs_cal_ifu_flat', vltpf.SUCCESS) + # reduction status + self._status = vltpf.INCOMPLETE + def sph_ifs_preprocess_science(self, subtract_background=True, fix_badpix=True, correct_xtalk=True, @@ -1999,7 +2032,7 @@ def sph_ifs_preprocess_science(self, self._logger.info('Pre-process science files') # check if recipe can be executed - if not toolbox.recipe_executable(self._recipes_status, 'sph_ifs_preprocess_science', + if not toolbox.recipe_executable(self._recipes_status, self._status, 'sph_ifs_preprocess_science', self.recipe_requirements, logger=self._logger): return @@ -2202,6 +2235,9 @@ def sph_ifs_preprocess_science(self, # update recipe execution self._update_recipe_status('sph_ifs_preprocess_science', vltpf.SUCCESS) + # reduction status + self._status = vltpf.INCOMPLETE + def sph_ifs_preprocess_wave(self): ''' @@ -2212,7 +2248,7 @@ def sph_ifs_preprocess_wave(self): self._logger.info('Pre-process wavelength calibration file') # check if recipe can be executed - if not toolbox.recipe_executable(self._recipes_status, 'sph_ifs_preprocess_wave', + if not toolbox.recipe_executable(self._recipes_status, self._status, 'sph_ifs_preprocess_wave', self.recipe_requirements, logger=self._logger): return @@ -2280,6 +2316,9 @@ def sph_ifs_preprocess_wave(self): # update recipe execution self._update_recipe_status('sph_ifs_preprocess_wave', vltpf.SUCCESS) + # reduction status + self._status = vltpf.INCOMPLETE + def sph_ifs_science_cubes(self, silent=True): ''' @@ -2294,7 +2333,7 @@ def sph_ifs_science_cubes(self, silent=True): self._logger.info('Create science cubes') # check if recipe can be executed - if not toolbox.recipe_executable(self._recipes_status, 'sph_ifs_science_cubes', + if not toolbox.recipe_executable(self._recipes_status, self._status, 'sph_ifs_science_cubes', self.recipe_requirements, logger=self._logger): return @@ -2437,6 +2476,9 @@ def sph_ifs_science_cubes(self, silent=True): # update recipe execution self._update_recipe_status('sph_ifs_science_cubes', vltpf.SUCCESS) + # reduction status + self._status = vltpf.INCOMPLETE + def sph_ifs_wavelength_recalibration(self, high_pass=False, offset=(0, 0), plot=True): '''Performs a recalibration of the wavelength, if star center frames @@ -2466,7 +2508,7 @@ def sph_ifs_wavelength_recalibration(self, high_pass=False, offset=(0, 0), plot= self._logger.info('Wavelength recalibration') # check if recipe can be executed - if not toolbox.recipe_executable(self._recipes_status, 'sph_ifs_wavelength_recalibration', + if not toolbox.recipe_executable(self._recipes_status, self._status, 'sph_ifs_wavelength_recalibration', self.recipe_requirements, logger=self._logger): return @@ -2668,6 +2710,9 @@ def sph_ifs_wavelength_recalibration(self, high_pass=False, offset=(0, 0), plot= # update recipe execution self._update_recipe_status('sph_ifs_wavelength_recalibration', vltpf.SUCCESS) + # reduction status + self._status = vltpf.INCOMPLETE + def sph_ifs_star_center(self, high_pass=False, offset=(0, 0), plot=True): '''Determines the star center for all frames where a center can be @@ -2691,7 +2736,7 @@ def sph_ifs_star_center(self, high_pass=False, offset=(0, 0), plot=True): self._logger.info('Star centers determination') # check if recipe can be executed - if not toolbox.recipe_executable(self._recipes_status, 'sph_ifs_star_center', + if not toolbox.recipe_executable(self._recipes_status, self._status, 'sph_ifs_star_center', self.recipe_requirements, logger=self._logger): return @@ -2776,6 +2821,9 @@ def sph_ifs_star_center(self, high_pass=False, offset=(0, 0), plot=True): # update recipe execution self._update_recipe_status('sph_ifs_star_center', vltpf.SUCCESS) + # reduction status + self._status = vltpf.INCOMPLETE + def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_anamorphism=True, shift_method='fft', manual_center=None, coarse_centering=False, save_scaled=False): @@ -2878,7 +2926,7 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a self._logger.info('Combine science data') # check if recipe can be executed - if not toolbox.recipe_executable(self._recipes_status, 'sph_ifs_combine_data', + if not toolbox.recipe_executable(self._recipes_status, self._status, 'sph_ifs_combine_data', self.recipe_requirements, logger=self._logger): return @@ -3247,6 +3295,9 @@ def sph_ifs_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a # update recipe execution self._update_recipe_status('sph_ifs_combine_data', vltpf.SUCCESS) + # reduction status + self._status = vltpf.COMPLETE + def sph_ifs_clean(self, delete_raw=False, delete_products=False): ''' @@ -3264,7 +3315,7 @@ def sph_ifs_clean(self, delete_raw=False, delete_products=False): self._logger.info('Clean reduction data') # check if recipe can be executed - if not toolbox.recipe_executable(self._recipes_status, 'sph_ifs_clean', + if not toolbox.recipe_executable(self._recipes_status, self._status, 'sph_ifs_clean', self.recipe_requirements, logger=self._logger): return @@ -3307,3 +3358,6 @@ def sph_ifs_clean(self, delete_raw=False, delete_products=False): # update recipe execution self._update_recipe_status('sph_ifs_clean', vltpf.SUCCESS) + + # reduction status + self._status = vltpf.INCOMPLETE diff --git a/vltpf/IRDIS/ImagingReduction.py b/vltpf/IRDIS/ImagingReduction.py index e9a0452..d8682ac 100644 --- a/vltpf/IRDIS/ImagingReduction.py +++ b/vltpf/IRDIS/ImagingReduction.py @@ -152,6 +152,7 @@ def __new__(cls, path, log_level='info'): # # reduction status # + reduction._status = vltpf.INIT reduction._recipes_status = collections.OrderedDict() # reload any existing data frames @@ -400,7 +401,7 @@ def _read_info(self): files_info['DATE'] = pd.to_datetime(files_info['DATE'], utc=False) files_info['DET FRAM UTC'] = pd.to_datetime(files_info['DET FRAM UTC'], utc=False) - # update recipe execution + # recipe execution status self._update_recipe_status('sort_files', vltpf.SUCCESS) if np.any(files_info['PRO CATG'] == 'IRD_MASTER_DARK'): self._update_recipe_status('sph_ird_cal_dark', vltpf.SUCCESS) @@ -426,7 +427,7 @@ def _read_info(self): frames_info['TIME'] = pd.to_datetime(frames_info['TIME'], utc=False) frames_info['TIME END'] = pd.to_datetime(frames_info['TIME END'], utc=False) - # update recipe execution + # recipe execution status self._update_recipe_status('sort_frames', vltpf.SUCCESS) else: frames_info = None @@ -452,7 +453,7 @@ def _read_info(self): self._frames_info = frames_info self._frames_info_preproc = frames_info_preproc - # additional checks to update recipe execution + # additional checks to recipe execution status if frames_info_preproc is not None: done = True files = frames_info_preproc.index @@ -474,7 +475,11 @@ def _read_info(self): if done: self._update_recipe_status('sph_ird_star_center', vltpf.SUCCESS) self._logger.debug('> sph_ird_star_center status = {}'.format(done)) + + # reduction status + self._status = vltpf.INCOMPLETE + def _update_recipe_status(self, recipe, status): '''Update execution status for reduction and recipe @@ -507,7 +512,7 @@ def sort_files(self): self._logger.info('Sort raw files') - # update recipe execution + # recipe execution status self._update_recipe_status('sort_files', vltpf.NOTSET) # parameters @@ -518,8 +523,9 @@ def sort_files(self): files = [f.stem for f in files] if len(files) == 0: - self._logger.error('No raw FITS files in reduction path') + self._logger.critical('No raw FITS files in reduction path') self._update_recipe_status('sort_files', vltpf.ERROR) + self._status = vltpf.FATAL return self._logger.info(' * found {0} raw FITS files'.format(len(files))) @@ -565,8 +571,17 @@ def sort_files(self): # check instruments instru = files_info['SEQ ARM'].unique() if len(instru) != 1: - self._logger.error('Sequence is mixing different instruments: {0}'.format(instru)) + self._logger.critical('Sequence is mixing different instruments: {0}'.format(instru)) self._update_recipe_status('sort_files', vltpf.ERROR) + self._status = vltpf.FATAL + return + + # check science files + sci_files = files_info[(files_info['DPR CATG'] == 'SCIENCE') & (files_info['DPR TYPE'] != 'SKY')] + if len(sci_files) == 0: + self._logger.critical('This dataset contains no science frame. There should be at least one!') + self._update_recipe_status('sort_frames', vltpf.ERROR) + self._status = vltpf.FATAL return # processed column @@ -590,10 +605,13 @@ def sort_files(self): files_info.to_csv(path.preproc / 'files.csv') self._files_info = files_info - # update recipe execution + # recipe execution status self._update_recipe_status('sort_files', vltpf.SUCCESS) + # reduction status + self._status = vltpf.INCOMPLETE + def sort_frames(self): ''' Extract the frames information from the science files and save @@ -606,7 +624,7 @@ def sort_frames(self): self._logger.info('Extract frames information') # check if recipe can be executed - if not toolbox.recipe_executable(self._recipes_status, 'sort_frames', + if not toolbox.recipe_executable(self._recipes_status, self._status, 'sort_frames', self.recipe_requirements, logger=self._logger): return @@ -617,12 +635,6 @@ def sort_frames(self): # science files sci_files = files_info[(files_info['DPR CATG'] == 'SCIENCE') & (files_info['DPR TYPE'] != 'SKY')] - # report error when no science frames are present - if len(sci_files) == 0: - self._logger.error('This dataset contains no science frame. There should be at least one!') - self._update_recipe_status('sort_frames', vltpf.ERROR) - return - # build indices files = [] img = [] @@ -646,6 +658,7 @@ def sort_frames(self): ret = toolbox.compute_angles(frames_info, logger=self._logger) if ret == vltpf.ERROR: self._update_recipe_status('sort_frames', vltpf.ERROR) + self._status = vltpf.FATAL return # save @@ -697,9 +710,12 @@ def sort_frames(self): self._logger.info(' * PA: {0:.2f}° ==> {1:.2f}° = {2:.2f}°'.format(pa_start, pa_end, np.abs(pa_end-pa_start))) self._logger.info(' * POSANG: {0}'.format(', '.join(['{:.2f}°'.format(p) for p in posang]))) - # update recipe execution + # recipe execution status self._update_recipe_status('sort_frames', vltpf.SUCCESS) + # reduction status + self._status = vltpf.INCOMPLETE + def check_files_association(self): ''' @@ -712,7 +728,7 @@ def check_files_association(self): self._logger.info('File association for calibrations') # check if recipe can be executed - if not toolbox.recipe_executable(self._recipes_status, 'check_files_association', + if not toolbox.recipe_executable(self._recipes_status, self._status, 'check_files_association', self.recipe_requirements, logger=self._logger): return @@ -792,9 +808,12 @@ def check_files_association(self): else: self._logger.warning('There are {0} warning(s) and {1} error(s) in the classification of files'.format(warning_flag, error_flag)) - # update recipe execution + # recipe execution status self._update_recipe_status('check_files_association', vltpf.SUCCESS) + # reduction status + self._status = vltpf.INCOMPLETE + def sph_ird_cal_dark(self, silent=True): ''' @@ -809,7 +828,7 @@ def sph_ird_cal_dark(self, silent=True): self._logger.info('Darks and backgrounds') # check if recipe can be executed - if not toolbox.recipe_executable(self._recipes_status, 'sph_ird_cal_dark', + if not toolbox.recipe_executable(self._recipes_status, self._status, 'sph_ird_cal_dark', self.recipe_requirements, logger=self._logger): return @@ -919,9 +938,12 @@ def sph_ird_cal_dark(self, silent=True): self._logger.debug('> save files.csv') files_info.to_csv(path.preproc / 'files.csv') - # update recipe execution + # recipe execution status self._update_recipe_status('sph_ird_cal_dark', vltpf.SUCCESS) + # reduction status + self._status = vltpf.INCOMPLETE + def sph_ird_cal_detector_flat(self, silent=True): ''' @@ -936,7 +958,7 @@ def sph_ird_cal_detector_flat(self, silent=True): self._logger.info('Instrument flats') # check if recipe can be executed - if not toolbox.recipe_executable(self._recipes_status, 'sph_ird_cal_detector_flat', + if not toolbox.recipe_executable(self._recipes_status, self._status, 'sph_ird_cal_detector_flat', self.recipe_requirements, logger=self._logger): return @@ -1023,9 +1045,12 @@ def sph_ird_cal_detector_flat(self, silent=True): self._logger.debug('> save files.csv') files_info.to_csv(path.preproc / 'files.csv') - # update recipe execution + # recipe execution status self._update_recipe_status('sph_ird_cal_detector_flat', vltpf.SUCCESS) + # reduction status + self._status = vltpf.INCOMPLETE + def sph_ird_preprocess_science(self, subtract_background=True, fix_badpix=True, @@ -1083,7 +1108,7 @@ def sph_ird_preprocess_science(self, self._logger.info('Pre-process science files') # check if recipe can be executed - if not toolbox.recipe_executable(self._recipes_status, 'sph_ird_preprocess_science', + if not toolbox.recipe_executable(self._recipes_status, self._status, 'sph_ird_preprocess_science', self.recipe_requirements, logger=self._logger): return @@ -1308,9 +1333,12 @@ def sph_ird_preprocess_science(self, self._frames_info_preproc = frames_info_preproc - # update recipe execution + # recipe execution status self._update_recipe_status('sph_ird_preprocess_science', vltpf.SUCCESS) + # reduction status + self._status = vltpf.INCOMPLETE + def sph_ird_star_center(self, high_pass=False, offset=(0, 0), plot=True): '''Determines the star center for all frames where a center can be @@ -1335,7 +1363,7 @@ def sph_ird_star_center(self, high_pass=False, offset=(0, 0), plot=True): self._logger.info('Star centers determination') # check if recipe can be executed - if not toolbox.recipe_executable(self._recipes_status, 'sph_ird_star_center', + if not toolbox.recipe_executable(self._recipes_status, self._status, 'sph_ird_star_center', self.recipe_requirements, logger=self._logger): return @@ -1409,9 +1437,12 @@ def sph_ird_star_center(self, high_pass=False, offset=(0, 0), plot=True): self._logger.debug('> save centers') fits.writeto(path.preproc / '{}_centers.fits'.format(fname), img_center, overwrite=True) - # update recipe execution + # recipe execution status self._update_recipe_status('sph_ird_star_center', vltpf.SUCCESS) + # reduction status + self._status = vltpf.INCOMPLETE + def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_anamorphism=True, shift_method='fft', manual_center=None, coarse_centering=False, save_scaled=False): @@ -1514,7 +1545,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a self._logger.info('Combine science data') # check if recipe can be executed - if not toolbox.recipe_executable(self._recipes_status, 'sph_ird_combine_data', + if not toolbox.recipe_executable(self._recipes_status, self._status, 'sph_ird_combine_data', self.recipe_requirements, logger=self._logger): return @@ -1885,9 +1916,12 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=290, correct_a if save_scaled: del sci_cube_scaled - # update recipe execution + # recipe execution status self._update_recipe_status('sph_ird_combine_data', vltpf.SUCCESS) + # reduction status + self._status = vltpf.COMPLETE + def sph_ird_clean(self, delete_raw=False, delete_products=False): ''' @@ -1905,7 +1939,7 @@ def sph_ird_clean(self, delete_raw=False, delete_products=False): self._logger.info('Clean reduction data') # check if recipe can be executed - if not toolbox.recipe_executable(self._recipes_status, 'sph_ird_clean', + if not toolbox.recipe_executable(self._recipes_status, self._status, 'sph_ird_clean', self.recipe_requirements, logger=self._logger): return @@ -1946,5 +1980,9 @@ def sph_ird_clean(self, delete_raw=False, delete_products=False): self._logger.warning(' ==> delete products') shutil.rmtree(path.products, ignore_errors=True) - # update recipe execution + # recipe execution status self._update_recipe_status('sph_ird_clean', vltpf.SUCCESS) + + # reduction status + self._status = vltpf.COMPLETE + diff --git a/vltpf/IRDIS/SpectroReduction.py b/vltpf/IRDIS/SpectroReduction.py index dfc227a..18953ae 100644 --- a/vltpf/IRDIS/SpectroReduction.py +++ b/vltpf/IRDIS/SpectroReduction.py @@ -201,6 +201,7 @@ def __new__(cls, path, log_level='info'): # # reduction status # + reduction._status = vltpf.INIT reduction._recipes_status = collections.OrderedDict() # reload any existing data frames @@ -541,6 +542,9 @@ def _read_info(self): self._update_recipe_status('sph_ird_star_center', vltpf.SUCCESS) self._logger.debug('> sph_ird_star_center status = {}'.format(done)) + # reduction status + self._status = vltpf.INCOMPLETE + def _update_recipe_status(self, recipe, status): '''Update execution status for reduction and recipe @@ -585,8 +589,9 @@ def sort_files(self): files = [f.stem for f in files] if len(files) == 0: - self._logger.error('No raw FITS files in reduction path') + self._logger.critical('No raw FITS files in reduction path') self._update_recipe_status('sort_files', vltpf.ERROR) + self._status = vltpf.FATAL return self._logger.info(' * found {0} raw FITS files'.format(len(files))) @@ -632,10 +637,19 @@ def sort_files(self): # check instruments instru = files_info['SEQ ARM'].unique() if len(instru) != 1: - self._logger.error('Sequence is mixing different instruments: {0}'.format(instru)) + self._logger.critical('Sequence is mixing different instruments: {0}'.format(instru)) self._update_recipe_status('sort_files', vltpf.ERROR) + self._status = vltpf.FATAL return + # check science files + sci_files = files_info[(files_info['DPR CATG'] == 'SCIENCE') & (files_info['DPR TYPE'] != 'SKY')] + if len(sci_files) == 0: + self._logger.critical('This dataset contains no science frame. There should be at least one!') + self._update_recipe_status('sort_frames', vltpf.ERROR) + self._status = vltpf.FATAL + return + # processed column files_info.insert(len(files_info.columns), 'PROCESSED', False) files_info.insert(len(files_info.columns), 'PRO CATG', ' ') @@ -660,6 +674,9 @@ def sort_files(self): # update recipe execution self._update_recipe_status('sort_files', vltpf.SUCCESS) + # reduction status + self._status = vltpf.INCOMPLETE + def sort_frames(self): ''' @@ -673,7 +690,7 @@ def sort_frames(self): self._logger.info('Extract frames information') # check if recipe can be executed - if not toolbox.recipe_executable(self._recipes_status, 'sort_frames', + if not toolbox.recipe_executable(self._recipes_status, self._status, 'sort_frames', self.recipe_requirements, logger=self._logger): return @@ -684,12 +701,6 @@ def sort_frames(self): # science files sci_files = files_info[(files_info['DPR CATG'] == 'SCIENCE') & (files_info['DPR TYPE'] != 'SKY')] - # report error when no science frames are present - if len(sci_files) == 0: - self._logger.error('This dataset contains no science frame. There should be at least one!') - self._update_recipe_status('sort_frames', vltpf.ERROR) - return - # build indices files = [] img = [] @@ -713,6 +724,7 @@ def sort_frames(self): ret = toolbox.compute_angles(frames_info, logger=self._logger) if ret == vltpf.ERROR: self._update_recipe_status('sort_frames', vltpf.ERROR) + self._status = vltpf.FATAL return # save @@ -767,6 +779,9 @@ def sort_frames(self): # update recipe execution self._update_recipe_status('sort_frames', vltpf.SUCCESS) + # reduction status + self._status = vltpf.INCOMPLETE + def check_files_association(self): ''' @@ -779,7 +794,7 @@ def check_files_association(self): self._logger.info('File association for calibrations') # check if recipe can be executed - if not toolbox.recipe_executable(self._recipes_status, 'check_files_association', + if not toolbox.recipe_executable(self._recipes_status, self._status, 'check_files_association', self.recipe_requirements, logger=self._logger): return @@ -891,6 +906,9 @@ def check_files_association(self): # update recipe execution self._update_recipe_status('check_files_association', vltpf.SUCCESS) + # reduction status + self._status = vltpf.INCOMPLETE + def sph_ird_cal_dark(self, silent=True): ''' @@ -905,7 +923,7 @@ def sph_ird_cal_dark(self, silent=True): self._logger.info('Darks and backgrounds') # check if recipe can be executed - if not toolbox.recipe_executable(self._recipes_status, 'sph_ird_cal_dark', + if not toolbox.recipe_executable(self._recipes_status, self._status, 'sph_ird_cal_dark', self.recipe_requirements, logger=self._logger): return @@ -1018,6 +1036,9 @@ def sph_ird_cal_dark(self, silent=True): # update recipe execution self._update_recipe_status('sph_ird_cal_dark', vltpf.SUCCESS) + # reduction status + self._status = vltpf.INCOMPLETE + def sph_ird_cal_detector_flat(self, silent=True): ''' @@ -1032,7 +1053,7 @@ def sph_ird_cal_detector_flat(self, silent=True): self._logger.info('Instrument flats') # check if recipe can be executed - if not toolbox.recipe_executable(self._recipes_status, 'sph_ird_cal_detector_flat', + if not toolbox.recipe_executable(self._recipes_status, self._status, 'sph_ird_cal_detector_flat', self.recipe_requirements, logger=self._logger): return @@ -1121,6 +1142,9 @@ def sph_ird_cal_detector_flat(self, silent=True): # update recipe execution self._update_recipe_status('sph_ird_cal_detector_flat', vltpf.SUCCESS) + # reduction status + self._status = vltpf.INCOMPLETE + def sph_ird_cal_wave(self, silent=True): ''' @@ -1135,7 +1159,7 @@ def sph_ird_cal_wave(self, silent=True): self._logger.info('Wavelength calibration') # check if recipe can be executed - if not toolbox.recipe_executable(self._recipes_status, 'sph_ird_cal_wave', + if not toolbox.recipe_executable(self._recipes_status, self._status, 'sph_ird_cal_wave', self.recipe_requirements, logger=self._logger): return @@ -1297,6 +1321,9 @@ def sph_ird_cal_wave(self, silent=True): # update recipe execution self._update_recipe_status('sph_ird_cal_wave', vltpf.SUCCESS) + # reduction status + self._status = vltpf.INCOMPLETE + def sph_ird_preprocess_science(self, subtract_background=True, fix_badpix=True, @@ -1339,7 +1366,7 @@ def sph_ird_preprocess_science(self, self._logger.info('Pre-process science files') # check if recipe can be executed - if not toolbox.recipe_executable(self._recipes_status, 'sph_ird_preprocess_science', + if not toolbox.recipe_executable(self._recipes_status, self._status, 'sph_ird_preprocess_science', self.recipe_requirements, logger=self._logger): return @@ -1537,6 +1564,9 @@ def sph_ird_preprocess_science(self, # update recipe execution self._update_recipe_status('sph_ird_preprocess_science', vltpf.SUCCESS) + # reduction status + self._status = vltpf.INCOMPLETE + def sph_ird_star_center(self, high_pass=False, plot=True): '''Determines the star center for all frames where a center can be @@ -1556,7 +1586,7 @@ def sph_ird_star_center(self, high_pass=False, plot=True): self._logger.info('Star centers determination') # check if recipe can be executed - if not toolbox.recipe_executable(self._recipes_status, 'sph_ird_star_center', + if not toolbox.recipe_executable(self._recipes_status, self._status, 'sph_ird_star_center', self.recipe_requirements, logger=self._logger): return @@ -1647,7 +1677,10 @@ def sph_ird_star_center(self, high_pass=False, plot=True): # update recipe execution self._update_recipe_status('sph_ird_star_center', vltpf.SUCCESS) + # reduction status + self._status = vltpf.INCOMPLETE + def sph_ird_wavelength_recalibration(self, fit_scaling=True, plot=True): '''Performs a recalibration of the wavelength, if star center frames are available. Otherwise simply use the ESO pipeline-calibrated law. @@ -1673,7 +1706,7 @@ def sph_ird_wavelength_recalibration(self, fit_scaling=True, plot=True): self._logger.info('Wavelength recalibration') # check if recipe can be executed - if not toolbox.recipe_executable(self._recipes_status, 'sph_ird_wavelength_recalibration', + if not toolbox.recipe_executable(self._recipes_status, self._status, 'sph_ird_wavelength_recalibration', self.recipe_requirements, logger=self._logger): return @@ -1810,6 +1843,9 @@ def sph_ird_wavelength_recalibration(self, fit_scaling=True, plot=True): # update recipe execution self._update_recipe_status('sph_ird_wavelength_recalibration', vltpf.SUCCESS) + # reduction status + self._status = vltpf.INCOMPLETE + def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_mrs_chromatism=True, split_posang=True, shift_method='fft', manual_center=None, coarse_centering=False): @@ -1908,7 +1944,7 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m self._logger.info('Combine science data') # check if recipe can be executed - if not toolbox.recipe_executable(self._recipes_status, 'sph_ird_combine_data', + if not toolbox.recipe_executable(self._recipes_status, self._status, 'sph_ird_combine_data', self.recipe_requirements, logger=self._logger): return @@ -2309,6 +2345,9 @@ def sph_ird_combine_data(self, cpix=True, psf_dim=80, science_dim=800, correct_m # update recipe execution self._update_recipe_status('sph_ird_combine_data', vltpf.SUCCESS) + # reduction status + self._status = vltpf.COMPLETE + def sph_ird_clean(self, delete_raw=False, delete_products=False): ''' @@ -2326,7 +2365,7 @@ def sph_ird_clean(self, delete_raw=False, delete_products=False): self._logger.info('Clean reduction data') # check if recipe can be executed - if not toolbox.recipe_executable(self._recipes_status, 'sph_ird_clean', + if not toolbox.recipe_executable(self._recipes_status, self._status, 'sph_ird_clean', self.recipe_requirements, logger=self._logger): return @@ -2373,3 +2412,6 @@ def sph_ird_clean(self, delete_raw=False, delete_products=False): # update recipe execution self._update_recipe_status('sph_ird_clean', vltpf.SUCCESS) + + # reduction status + self._status = vltpf.INCOMPLETE diff --git a/vltpf/__init__.py b/vltpf/__init__.py index e29daaa..aed3f6e 100644 --- a/vltpf/__init__.py +++ b/vltpf/__init__.py @@ -14,10 +14,11 @@ NOTSET = -1 SUCCESS = 0 ERROR = 1 +FATAL = 2 -# INIT = -1 -# INCOMPLETE = -2 -# COMPLETED = -3 +COMPLETE = 0 +INIT = -10 +INCOMPLETE = -20 # log level diff --git a/vltpf/toolbox.py b/vltpf/toolbox.py index a480185..d9094c7 100644 --- a/vltpf/toolbox.py +++ b/vltpf/toolbox.py @@ -22,7 +22,7 @@ _log = logging.getLogger(__name__) -def recipe_executable(recipes_status, recipe, requirements, logger=_log): +def recipe_executable(recipes_status, reduction_status, recipe, requirements, logger=_log): ''' Check if a recipe is executabled given the status of other recipes @@ -31,6 +31,9 @@ def recipe_executable(recipes_status, recipe, requirements, logger=_log): recipes_status : dict Status of executed recipes + reduction_status : vltpf state + Overall status of the reduction + recipe : str Name of the current recipe @@ -46,6 +49,10 @@ def recipe_executable(recipes_status, recipe, requirements, logger=_log): Current recipe can be executed safely ''' + if reduction_status == vltpf.FATAL: + logger.critical(' ==> reduction is in a FATAL state! See log file for details') + return False + recipes = recipes_status.keys() requirements = requirements[recipe] @@ -60,7 +67,7 @@ def recipe_executable(recipes_status, recipe, requirements, logger=_log): missing.append(r) if not execute_recipe: - logger.error('{} cannot executed because the following recipes have not been executed or have result in unrecoverable errors: {}. '.format(recipe, missing)) + logger.error('{} cannot be executed because the following recipes have not been executed or have result in unrecoverable errors: {}. '.format(recipe, missing)) recipes_status[recipe] = vltpf.ERROR logger.debug('> execution requirements check for {}: {}'.format(recipe, execute_recipe)) From c8b09c1866979b5f5231c7940ab1f61e8931bbc7 Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Sun, 15 Sep 2019 21:33:04 +0200 Subject: [PATCH 098/101] Add SPHERE.Dataset() handler to reduction loggers Ticket #41 Done when reductions are created directly from the SPHERE data set --- examples/sphere_dataset.py | 2 ++ vltpf/IFS.py | 8 +++++++- vltpf/IRDIS/ImagingReduction.py | 8 +++++++- vltpf/IRDIS/SpectroReduction.py | 8 +++++++- vltpf/SPHERE.py | 14 ++++++++++---- 5 files changed, 33 insertions(+), 7 deletions(-) diff --git a/examples/sphere_dataset.py b/examples/sphere_dataset.py index 4e92269..1cc7590 100644 --- a/examples/sphere_dataset.py +++ b/examples/sphere_dataset.py @@ -6,11 +6,13 @@ print('IRDIS reductions:') for red in ds.IRDIS_reductions: print(red) + red.config['clean'] = True print() print('IFS reductions:') for red in ds.IFS_reductions: print(red) + red.config['clean'] = True print() #%% full reduction with default parameters diff --git a/vltpf/IFS.py b/vltpf/IFS.py index 44c775f..ecb9cd5 100644 --- a/vltpf/IFS.py +++ b/vltpf/IFS.py @@ -393,7 +393,7 @@ class Reduction(object): # Constructor ################################################## - def __new__(cls, path, log_level='info'): + def __new__(cls, path, log_level='info', sphere_handler=None): '''Custom instantiation for the class The customized instantiation enables to check that the @@ -409,6 +409,9 @@ def __new__(cls, path, log_level='info'): level : {'debug', 'info', 'warning', 'error', 'critical'} The log level of the handler + sphere_handler : log handler + Higher-level SPHERE.Dataset log handler + ''' # @@ -452,6 +455,9 @@ def __new__(cls, path, log_level='info'): handler.setFormatter(formatter) logger.addHandler(handler) + if sphere_handler: + logger.addHandler(sphere_handler) + reduction._logger = logger reduction._logger.info('Creating IFS reduction at path {}'.format(path)) diff --git a/vltpf/IRDIS/ImagingReduction.py b/vltpf/IRDIS/ImagingReduction.py index d8682ac..85b06a0 100644 --- a/vltpf/IRDIS/ImagingReduction.py +++ b/vltpf/IRDIS/ImagingReduction.py @@ -52,7 +52,7 @@ class ImagingReduction(object): # Constructor ################################################## - def __new__(cls, path, log_level='info'): + def __new__(cls, path, log_level='info', sphere_handler=None): '''Custom instantiation for the class and initialization for the instances @@ -69,6 +69,9 @@ def __new__(cls, path, log_level='info'): level : {'debug', 'info', 'warning', 'error', 'critical'} The log level of the handler + sphere_handler : log handler + Higher-level SPHERE.Dataset log handler + ''' # @@ -113,6 +116,9 @@ def __new__(cls, path, log_level='info'): handler.setFormatter(formatter) logger.addHandler(handler) + if sphere_handler: + logger.addHandler(sphere_handler) + reduction._logger = logger reduction._logger.info('Creating IRDIS imaging reduction at path {}'.format(path)) diff --git a/vltpf/IRDIS/SpectroReduction.py b/vltpf/IRDIS/SpectroReduction.py index 18953ae..66c7680 100644 --- a/vltpf/IRDIS/SpectroReduction.py +++ b/vltpf/IRDIS/SpectroReduction.py @@ -96,7 +96,7 @@ class SpectroReduction(object): # Constructor ################################################## - def __new__(cls, path, log_level='info'): + def __new__(cls, path, log_level='info', sphere_handler=None): '''Custom instantiation for the class and initialization for the instances @@ -113,6 +113,9 @@ def __new__(cls, path, log_level='info'): level : {'debug', 'info', 'warning', 'error', 'critical'} The log level of the handler + sphere_handler : log handler + Higher-level SPHERE.Dataset log handler + ''' # @@ -157,6 +160,9 @@ def __new__(cls, path, log_level='info'): handler.setFormatter(formatter) logger.addHandler(handler) + if sphere_handler: + logger.addHandler(sphere_handler) + reduction._logger = logger reduction._logger.info('Creating IRDIS spectroscopy reduction at path {}'.format(path)) diff --git a/vltpf/SPHERE.py b/vltpf/SPHERE.py index 0eac686..f422932 100644 --- a/vltpf/SPHERE.py +++ b/vltpf/SPHERE.py @@ -394,6 +394,7 @@ def __init__(self, path, log_level='info'): logger.addHandler(handler) self._log_level = log_level + self._handler = handler self._logger = logger self._logger.info('Looking for SPHERE data sets at path {}'.format(path)) @@ -507,7 +508,9 @@ def full_reduction(self): ''' for r in self._reductions: - self._logger.info('Full reduction: {}'.format(str(r))) + self._logger.info('###########################################################################') + self._logger.info('# Full reduction: {}'.format(str(r))) + self._logger.info('###########################################################################') r.full_reduction() @@ -548,19 +551,22 @@ def _create_reductions(self): if mode == 'imaging': self._logger.info(' * IRDIS imaging reduction at path {}'.format(reduction_path)) - reduction = IRDIS.ImagingReduction(reduction_path, log_level=self._log_level) + reduction = IRDIS.ImagingReduction(reduction_path, log_level=self._log_level, + sphere_handler=self._handler) elif mode == 'polar': self._logger.warning('IRDIS DPI not supported yet') elif mode == 'spectro': self._logger.info(' * IRDIS spectro Date: Sun, 15 Sep 2019 21:50:42 +0200 Subject: [PATCH 099/101] Fix missing addition of instru_offset in frames_info['DEROT ANGLE'] Bug fix from @tomasstolker in GH-76 --- vltpf/toolbox.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vltpf/toolbox.py b/vltpf/toolbox.py index d9094c7..85f40af 100644 --- a/vltpf/toolbox.py +++ b/vltpf/toolbox.py @@ -268,7 +268,7 @@ def compute_angles(frames_info, logger=_log): frames_info['PUPIL OFFSET'] = pupoff + instru_offset # final derotation value - frames_info['DEROT ANGLE'] = frames_info['PARANG'] + pupoff + frames_info['DEROT ANGLE'] = frames_info['PARANG'] + pupoff + pupoff + instru_offset return vltpf.SUCCESS From b8de3fc69cc04c33855b9cbc45d0bcdb3cf3e482 Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Sun, 15 Sep 2019 21:57:51 +0200 Subject: [PATCH 100/101] Remove old IRDIFS-reduction.ipynb notebook Replaced by examples in VLTPF/examples/*.py scripts --- examples/IRDIFS-reduction.ipynb | 750 -------------------------------- 1 file changed, 750 deletions(-) delete mode 100644 examples/IRDIFS-reduction.ipynb diff --git a/examples/IRDIFS-reduction.ipynb b/examples/IRDIFS-reduction.ipynb deleted file mode 100644 index 35f4c4a..0000000 --- a/examples/IRDIFS-reduction.ipynb +++ /dev/null @@ -1,750 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "# IRDIFS data reduction" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Reducing an IRDIFS data set can be done in several ways:\n", - "* [SPHERE.Dataset](#sphere_dataset): reduction of multiple IRDIS and IFS data sets\n", - "* [IFS.Reduction](#ifs_reduction): reduction of a single IFS data set\n", - "* [IRDIS.ImagingReduction](#irdis_reduction): reduction of a single IRDIS data set\n", - "\n", - "\n", - "In addition, the automated reductions can be easily configured:\n", - "* [Reduction configuration](#reduction_configuration)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "## Reduction with SPHERE.Dataset" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The easiest is to create a SPHERE.Dataset object that will read the data at a specified location and reduce it using the default parameters.\n", - "\n", - "The first step is to create a Dataset object and provide it with a path:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import vltpf.SPHERE as SPHERE\n", - "\n", - "# data path\n", - "path = '~/data/VLTPF-test-target/'\n", - "\n", - "# create the dataset\n", - "dataset = SPHERE.Dataset(path)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The Dataset object automatically search the path for (in that order):\n", - " 1. xml files that come from datasets downloaded directly from the ESO archive. These files contain the association between the science files and the raw calibration files.\n", - " 2. FITS files\n", - " 3. existing valid reduction paths, i.e. directories containing a \"raw\" sub-directory with FITS files\n", - "\n", - "For case 1 and 2, the FITS files will be automatically sorted in directories per-target (case 1) or per-subsystem (case 2).\n", - "\n", - "At the end, the Dataset will contain a list of valid reductions for IFS and/or IRDIS:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "print('List of valid reductions found in path:')\n", - "dataset.reductions" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "print('IFS reductions:')\n", - "dataset.IFS_reductions" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "print('IRDIS reductions:')\n", - "dataset.IRDIS_reductions" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The data for each reduction can then be reduced in a single command using the default parameters:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "dataset.full_reduction()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "See the full documentation to better understand how to use the Dataset object and what are the limitations." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "## Reduction with IFS.Reduction\n", - "\n", - "If you have already sorted your IFS data in a directory, you can directly create an IFS.Reduction object to reduce the data. If your reduction path is `/path/to/data/`, then the raw data must be in `/path/to/data/raw/`" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import vltpf.IFS as IFS\n", - "\n", - "# data path\n", - "path = '~/data/VLTPF-test-target/IFS/'\n", - "\n", - "# create the reduction\n", - "reduction = IFS.Reduction(path)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "There are different levels of control for the reduction.\n", - "\n", - "### Single line reduction\n", - "\n", - "In this reduction the user has no direct control on the reduction process. In case of failure (missing files, error, etc), the reduction will have to be restarted entirely.\n", - "\n", - "The user can change the main reduction parameters by accessing the `reduction.config` dictionary [(see below)](#reduction_configuration)." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "reduction.full_reduction()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Step-by-step reduction\n", - "\n", - "In this reduction, the user has still no direct control on the individual steps, but (s)he can minimise the impact of any error by restarting only the main step that failed. Note that the object has a knowledge of what steps have been executed so there is no \"risk\" of running steps in the wrong order.\n", - "\n", - "The user can change the main reduction parameters by accessing the reduction.config dictionary [(see below)](#reduction_configuration)." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# initialisation\n", - "reduction.init_reduction()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# static calibrations\n", - "reduction.create_static_calibrations()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# pre-process science data\n", - "reduction.preprocess_science()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# process science data\n", - "reduction.process_science()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# clean-up\n", - "reduction.clean()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Manual reduction\n", - "\n", - "In this reduction, the user has complete control over the reduction steps and their parameters. Again the object has a knowledge of what steps have been executed so there is no \"risk\" of running steps in the wrong order. Available options are detailed in the full documentation." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# sort files\n", - "reduction.sort_files()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# extract frames informations\n", - "reduction.sort_frames()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# check that all required calibration files are available\n", - "reduction.check_files_association()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# generate darks/backgrounds\n", - "reduction.sph_ifs_cal_dark(silent=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# generate detector flats\n", - "reduction.sph_ifs_cal_detector_flat(silent=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# generate microspectra positions\n", - "reduction.sph_ifs_cal_specpos(silent=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# generate wavelength calibration\n", - "reduction.sph_ifs_cal_wave(silent=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# generate IFU flat\n", - "reduction.sph_ifs_cal_ifu_flat(silent=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# pre-process science frames\n", - "reduction.sph_ifs_preprocess_science(subtract_background=True, fix_badpix=True, correct_xtalk=True,\n", - " collapse_science=True, collapse_type='mean', coadd_value=2,\n", - " collapse_psf=True, collapse_center=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# pre-process wavelength calibration file for recalibration\n", - "reduction.sph_ifs_preprocess_wave()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# generate individual (x,y,lambda) cubes\n", - "reduction.sph_ifs_science_cubes(silent=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# recalibrate wavelength\n", - "reduction.sph_ifs_wavelength_recalibration(high_pass=False, offset=(0, 0), display=False, save=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# determine star center\n", - "reduction.sph_ifs_star_center(high_pass=False, offset=(0, 0), display=False, save=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# combine data in (x,y,time,lambda) cubes\n", - "reduction.sph_ifs_combine_data(cpix=True, psf_dim=80, science_dim=290, correct_anamorphism=True, \n", - " shift_method='fft', save_scaled=False)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# clean-up\n", - "reduction.sph_ifs_clean(delete_raw=False, delete_products=False)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "## Reduction with IRDIS.ImagingReduction\n", - "\n", - "If you have already sorted your IRDIS data in a directory, you can directly create an IRDIS.ImagingReduction object to reduce the data. If your reduction path is `/path/to/data/`, then the raw data must be in `/path/to/data/raw/`" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import vltpf.IRDIS as IRDIS\n", - "\n", - "# data path\n", - "path = '~/data/VLTPF-test-target/IRD/'\n", - "\n", - "# create the reduction\n", - "reduction = IRDIS.ImagingReduction(path)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "There are different levels of control for the reduction.\n", - "\n", - "### Single line reduction\n", - "\n", - "In this reduction the user has no direct control on the reduction process. In case of failure (missing files, error, etc), the reduction will have to be restarted entirely.\n", - "\n", - "The user can change the main reduction parameters by accessing the `reduction.config` dictionary [(see below)](#reduction_configuration)." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "reduction.full_reduction()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Step-by-step reduction\n", - "\n", - "In this reduction, the user has still no direct control on the individual steps, but (s)he can minimise the impact of any error by restarting only the main step that failed. Note that the object has a knowledge of what steps have been executed so there is no \"risk\" of running steps in the wrong order.\n", - "\n", - "The user can change the main reduction parameters by accessing the reduction.config dictionary [(see below)](#reduction_configuration)." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# initialisation\n", - "reduction.init_reduction()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# static calibrations\n", - "reduction.create_static_calibrations()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# pre-process science data\n", - "reduction.preprocess_science()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# process science data\n", - "reduction.process_science()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# clean-up\n", - "reduction.clean()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Manual reduction\n", - "\n", - "In this reduction, the user has complete control over the reduction steps and their parameters. Again the object has a knowledge of what steps have been executed so there is no \"risk\" of running steps in the wrong order. Available options are detailed in the full documentation." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# sort files\n", - "reduction.sort_files()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# extract frames informations\n", - "reduction.sort_frames()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# check that all required calibration files are available\n", - "reduction.check_files_association()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# generate darks/backgrounds\n", - "reduction.sph_ird_cal_dark(silent=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# generate detector flats\n", - "reduction.sph_ird_cal_detector_flat(silent=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# pre-process science frames to generate individual (x,y,lambda) cubes\n", - "reduction.sph_ird_preprocess_science(subtract_background=True, fix_badpix=True,\n", - " collapse_science=False, collapse_type='mean', coadd_value=2,\n", - " collapse_psf=True, collapse_center=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# determine star center\n", - "reduction.sph_ird_star_center(high_pass=False, offset=(0, 0), display=False, save=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# combine data in (x,y,time,lambda) cubes\n", - "reduction.sph_ird_combine_data(cpix=True, psf_dim=100, science_dim=400, correct_anamorphism=True, \n", - " shift_method='fft', nocenter=False, save_scaled=False)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# clean-up\n", - "reduction.sph_ird_clean(delete_raw=False, delete_products=False)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "## Reduction configuration" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "When working with the 1-line reduction or with the simplified step-by-step reduction steps, the reduction uses the default reduction parameters, which are stored in the ``config`` dictionary of each reduction object.\n", - "\n", - "The current configuration of a reduction can be accessed with the ``show_config()`` method:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "reduction.show_config()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Then each of the parameters can be independently modifed by hand. For example:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "reduction.config['preproc_collapse_science'] = True # collapse science cubes...\n", - "reduction.config['preproc_collapse_type'] = 'mean' # ... using a mean\n", - "\n", - "reduction.config['combine_science_dim'] = 200 # save only images of size 200x200\n", - "reduction.config['combine_shift_method'] = 'interp' # use interpolation instead of FFT\n", - "\n", - "reduction.config['clean'] = True # make sure we clean at the end (default is no cleanup)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Important note:\n", - "\n", - "> The ``config`` dictionary keys are identical between the ``IFS.Reduction`` and ``IRDIS.ImagingReduction`` objects, except for the ``preproc_correct_xtalk``, which is specific for the IFS because it controls the correction of the spectral crosstalk. However, the default value for this key should not be changed unless you are an expert user." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The new parameter can be checked:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "reduction.show_config()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "And then start the reduction:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "reduction.full_reduction()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3" - }, - "varInspector": { - "cols": { - "lenName": 16, - "lenType": 16, - "lenVar": 40 - }, - "kernels_config": { - "python": { - "delete_cmd_postfix": "", - "delete_cmd_prefix": "del ", - "library": "var_list.py", - "varRefreshCmd": "print(var_dic_list())" - }, - "r": { - "delete_cmd_postfix": ") ", - "delete_cmd_prefix": "rm(", - "library": "var_list.r", - "varRefreshCmd": "cat(var_dic_list()) " - } - }, - "types_to_exclude": [ - "module", - "function", - "builtin_function_or_method", - "instance", - "_Feature" - ], - "window_display": false - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} From b577040e4d732325178eaeb830082cf9ad659eb0 Mon Sep 17 00:00:00 2001 From: Arthur Vigan Date: Thu, 10 Oct 2019 14:14:58 +0200 Subject: [PATCH 101/101] Minor updates for version 1.0 --- LICENSE | 2 +- README.rst | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/LICENSE b/LICENSE index 48694d6..8bebc52 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ MIT License -Copyright (c) 2017 Arthur Vigan +Copyright (c) 2019 Arthur Vigan Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/README.rst b/README.rst index 64f8abd..a24eec2 100644 --- a/README.rst +++ b/README.rst @@ -14,7 +14,7 @@ Credits The development of the SPHERE instrument has demanded a tremendous effort from many scientists, who have devoted several years of their life to design, build, test and commission the instrument. To recognize this work, we kindly ask you to cite the relevant instrumental papers in your scientific work. The reference papers for the instrument and its observing mode are: SPHERE: - * general description: `Beuzit et al., 2019, submitted to A&A `_ + * general description: `Beuzit et al., 2019, A&A `_ IRDIS: * general description: `Dohlen et al., 2008, SPIE, 7014 `_