diff --git a/harnessed_jobs/bias_frame_BOT/v0/bias_frame_jh_task.py b/harnessed_jobs/bias_frame_BOT/v0/bias_frame_jh_task.py index 97990cf..c2636fc 100755 --- a/harnessed_jobs/bias_frame_BOT/v0/bias_frame_jh_task.py +++ b/harnessed_jobs/bias_frame_BOT/v0/bias_frame_jh_task.py @@ -30,10 +30,16 @@ def bias_frame_jh_task(det_name): det_name) return None + bias_frame, pca_files = bias_frame_task(run, det_name, bias_files[1:]) bias_stability_files = sorted(bias_stability_files) - bias_stability_task(run, det_name, bias_stability_files) - return bias_frame_task(run, det_name, bias_files[1:]) + if not os.environ.get('LCATR_USE_PCA_BIAS_FIT', "True") == 'True': + pca_files = None + print("pca_files:", pca_files) + bias_stability_task(run, det_name, bias_stability_files, + pca_files=pca_files) + + return bias_frame if __name__ == '__main__': diff --git a/harnessed_jobs/cti_BOT/v0/cte_jh_task.py b/harnessed_jobs/cti_BOT/v0/cte_jh_task.py index 25e8ac8..d684f5f 100755 --- a/harnessed_jobs/cti_BOT/v0/cte_jh_task.py +++ b/harnessed_jobs/cti_BOT/v0/cte_jh_task.py @@ -35,8 +35,8 @@ def cte_jh_task(det_name): # Write gains to local eotest_results_file, which cte_task will update. namps = 16 if 'SW' not in det_name else 8 results = sensorTest.EOTestResults(eotest_results_file, namps=namps) - for amp, gain in gains.items(): - results.add_seg_result(amp, 'GAIN', gain) + for amp in range(1, namps+1): + results.add_seg_result(amp, 'GAIN', gains[amp]) results.write() bias_frame = bias_filename(run, det_name) diff --git a/harnessed_jobs/flat_pairs_BOT/v0/producer_flat_pairs_BOT.py b/harnessed_jobs/flat_pairs_BOT/v0/producer_flat_pairs_BOT.py index 1cc6ac6..24d0540 100755 --- a/harnessed_jobs/flat_pairs_BOT/v0/producer_flat_pairs_BOT.py +++ b/harnessed_jobs/flat_pairs_BOT/v0/producer_flat_pairs_BOT.py @@ -20,7 +20,15 @@ 'raft_jh_signal_correlations.py') # Run raft-level signal correlations just on science rafts for now. - installed_rafts = camera_info.installed_science_rafts - run_python_task_or_cl_script(raft_jh_signal_correlations, - raft_jh_signal_correlations_script, - device_names=installed_rafts) + # Run raft-level noise correlations just on science rafts for now. + device_names = camera_info.installed_science_rafts + + # Check if rafts are over-ridden in the lcatr.cfg file. + override_rafts = os.environ.get('LCATR_RAFTS', None) + if override_rafts is not None: + device_names = [_ for _ in device_names if _[:3] in override_rafts] + + if device_names: + run_python_task_or_cl_script(raft_jh_signal_correlations, + raft_jh_signal_correlations_script, + device_names=device_names) diff --git a/harnessed_jobs/raft_results_summary_BOT/v0/raft_results_task.py b/harnessed_jobs/raft_results_summary_BOT/v0/raft_results_task.py index c5f9fe8..81a728c 100755 --- a/harnessed_jobs/raft_results_summary_BOT/v0/raft_results_task.py +++ b/harnessed_jobs/raft_results_summary_BOT/v0/raft_results_task.py @@ -27,7 +27,7 @@ def plt_savefig(filename): print("results_files:", results_files) except FileNotFoundError: print("No raft-level results for", raft_name) - results_files = {} + return # Determine the total number of pixels and number of edge rolloff # pixels for the types of CCDs in this raft and update the results diff --git a/harnessed_jobs/read_noise_BOT/v0/producer_read_noise_BOT.py b/harnessed_jobs/read_noise_BOT/v0/producer_read_noise_BOT.py index ed99e98..79322ea 100755 --- a/harnessed_jobs/read_noise_BOT/v0/producer_read_noise_BOT.py +++ b/harnessed_jobs/read_noise_BOT/v0/producer_read_noise_BOT.py @@ -21,7 +21,14 @@ 'v0', 'raft_jh_noise_correlations.py') # Run raft-level noise correlations just on science rafts for now. - installed_rafts = camera_info.installed_science_rafts - run_python_task_or_cl_script(raft_jh_noise_correlations, - raft_jh_noise_correlations_script, - device_names=installed_rafts) + device_names = camera_info.installed_science_rafts + + # Check if rafts are over-ridden in the lcatr.cfg file. + override_rafts = os.environ.get('LCATR_RAFTS', None) + if override_rafts is not None: + device_names = [_ for _ in device_names if _[:3] in override_rafts] + + if device_names: + run_python_task_or_cl_script(raft_jh_noise_correlations, + raft_jh_noise_correlations_script, + device_names=device_names) diff --git a/harnessed_jobs/tearing_BOT/v0/producer_tearing_BOT.py b/harnessed_jobs/tearing_BOT/v0/producer_tearing_BOT.py index 92dbb65..32bae0c 100755 --- a/harnessed_jobs/tearing_BOT/v0/producer_tearing_BOT.py +++ b/harnessed_jobs/tearing_BOT/v0/producer_tearing_BOT.py @@ -18,7 +18,15 @@ divisidero_jh_task_script \ = os.path.join(os.environ['EOANALYSISJOBSDIR'], 'harnessed_jobs', 'tearing_BOT', 'v0', 'raft_divisidero_tearing.py') - installed_rafts = camera_info.get_installed_raft_names() - run_python_task_or_cl_script(raft_divisidero_tearing, - divisidero_jh_task_script, - device_names=installed_rafts) + # Run raft-level noise correlations just on science rafts for now. + device_names = camera_info.installed_science_rafts + + # Check if rafts are over-ridden in the lcatr.cfg file. + override_rafts = os.environ.get('LCATR_RAFTS', None) + if override_rafts is not None: + device_names = [_ for _ in device_names if _[:3] in override_rafts] + + if device_names: + run_python_task_or_cl_script(raft_divisidero_tearing, + divisidero_jh_task_script, + device_names=device_names) diff --git a/python/bot_eo_analyses.py b/python/bot_eo_analyses.py index bb64d83..f840e8c 100644 --- a/python/bot_eo_analyses.py +++ b/python/bot_eo_analyses.py @@ -240,21 +240,47 @@ def bias_filename(run, det_name): """ The bias frame file derived from stacked bias files. """ + use_pca_bias = os.environ.get('LCATR_USE_PCA_BIAS_FIT', "True") == 'True' bias_run = siteUtils.get_analysis_run('bias') if bias_run is None: - filename = make_bias_filename(run, det_name) - if not os.path.isfile(filename): - # Look for bias file from prerequisite job. - return siteUtils.dependency_glob(filename, - description='Bias frames:')[0] + if use_pca_bias: + file_prefix = make_file_prefix(run, det_name) + pca_bias_model = f'{file_prefix}_pca_bias.pickle' + pca_bias_file = f'{file_prefix}_pca_bias.fits' + filename = (pca_bias_model, pca_bias_file) + if (not os.path.isfile(pca_bias_model) + or not os.path.isfile(pca_bias_file)): + model_file = siteUtils.dependency_glob( + pca_bias_model, description='pca bias model') + bias_file = siteUtils.dependency_glob( + pca_bias_file, description='pca bias file') + print("bias_filename:", model_file, bias_file) + return (model_file[0], bias_file[0]) + else: + filename = make_bias_filename(run, det_name) + if not os.path.isfile(filename): + # Look for bias file from prerequisite job. + return siteUtils.dependency_glob(filename, + description='Bias frames:')[0] else: # Retrieve bias file from previous run. with open('hj_fp_server.pkl', 'rb') as fd: hj_fp_server = pickle.load(fd) - filename = hj_fp_server.get_files('bias_frame_BOT', - f'*{det_name}*median_bias.fits', - run=bias_run)[0] - filename = siteUtils.get_scratch_files([filename])[0] + if use_pca_bias: + pca_bias_model \ + = hj_fp_server.get_files('bias_frame_BOT', + f'*{det_name}*_pca_bias.pickle')[0] + pca_bias_file \ + = hj_fp_server.get_files('bias_frame_BOT', + f'*{det_name}*_pca_bias.fits')[0] + pca_bias_model = siteUtils.get_scratch_files([pca_bias_model])[0] + pca_bias_file = siteUtils.get_scratch_files([pca_bias_file])[0] + filename = pca_bias_model, pca_bias_file + else: + filename = hj_fp_server.get_files('bias_frame_BOT', + f'*{det_name}*median_bias.fits', + run=bias_run)[0] + filename = siteUtils.get_scratch_files([filename])[0] print("Bias frame:") print(filename) return filename @@ -529,7 +555,12 @@ def bias_frame_task(run, det_name, bias_files, bias_frame=None): file_prefix = make_file_prefix(run, det_name) rolloff_mask_file = f'{file_prefix}_edge_rolloff_mask.fits' sensorTest.rolloff_mask(bias_files[0], rolloff_mask_file) - return bias_frame + + # Compute PCA model of bias correction. + ccd_pcas = sensorTest.CCD_bias_PCA() + pca_files = ccd_pcas.compute_pcas(bias_files, file_prefix) + + return bias_frame, pca_files def image_stats(image, nsigma=10): """Compute clipped mean and stdev of the image.""" @@ -538,7 +569,8 @@ def image_stats(image, nsigma=10): stats = afwMath.makeStatistics(image, flags=flags, sctrl=stat_ctrl) return stats.getValue(afwMath.MEANCLIP), stats.getValue(afwMath.STDEVCLIP) -def bias_stability_task(run, det_name, bias_files, nsigma=10): +def bias_stability_task(run, det_name, bias_files, nsigma=10, + pca_files=None): """Compute amp-wise bias stability time histories and serial profiles.""" raft, slot = det_name.split('_') file_prefix = make_file_prefix(run, det_name) @@ -556,7 +588,7 @@ def bias_stability_task(run, det_name, bias_files, nsigma=10): key = f'TEMP{i}' if key in hdus['REB_COND'].header: temps[key] = hdus['REB_COND'].header[key] - ccd = sensorTest.MaskedCCD(bias_file) + ccd = sensorTest.MaskedCCD(bias_file, bias_frame=pca_files) for amp in ccd: # Retrieve the per row overscan subtracted imaging section. amp_image = ccd.unbiased_and_trimmed_image(amp) @@ -1223,6 +1255,8 @@ def run_jh_tasks(*jh_tasks, device_names=None, processes=None, walltime=3600): installed_rafts = override_rafts.split('_') device_names = [_ for _ in device_names if _[:3] in installed_rafts] + print('run_jh_tasks: installed_rafts =', installed_rafts) + print('run_jh_tasks: device_names =', device_names) cwd = os.path.abspath('.') diff --git a/python/bot_eo_validators.py b/python/bot_eo_validators.py index 9eb72c4..25c783b 100644 --- a/python/bot_eo_validators.py +++ b/python/bot_eo_validators.py @@ -71,15 +71,23 @@ def validate_bias_frame(results, det_names): file_prefix = make_file_prefix(run, det_name) bias_frame = f'{file_prefix}_median_bias.fits' rolloff_mask = f'{file_prefix}_edge_rolloff_mask.fits' + pca_bias_file = f'{file_prefix}_pca_bias.fits' # Add/update the metadata to the primary HDU of these files. - for fitsfile in (bias_frame, rolloff_mask): + for fitsfile in (bias_frame, rolloff_mask, pca_bias_file): if os.path.isfile(fitsfile): eotestUtils.addHeaderData(fitsfile, TESTTYPE='BIAS', DATE=eotestUtils.utc_now_isoformat()) results.append(lcatr.schema.fileref.make(fitsfile)) else: missing_det_names.add(det_name) + + # Persist the PCA bias model file. + pca_bias_model = f'{file_prefix}_pca_bias.pickle' + if os.path.isfile(pca_bias_model): + results.append(lcatr.schema.fileref.make(pca_bias_model)) + else: + missing_det_names.add(det_name) report_missing_data('validate_bias_frames', missing_det_names) return results diff --git a/python/ssh_dispatcher.py b/python/ssh_dispatcher.py index 0fd4e05..20198bf 100644 --- a/python/ssh_dispatcher.py +++ b/python/ssh_dispatcher.py @@ -253,6 +253,10 @@ def submit_jobs(self, device_names, retry=False): with multiprocessing.Pool(processes=num_tasks) as pool: outputs = [] for device_name, remote_host in self.host_map.items(): + if device_name not in device_names: + # This must be a retry and this device is not + # in the list of devices to retry, so skip it. + continue if device_name not in self.log_files: self.make_log_file(device_name) args = remote_host, device_name diff --git a/python/stage_bot_data.py b/python/stage_bot_data.py index e8967d0..95e4c45 100644 --- a/python/stage_bot_data.py +++ b/python/stage_bot_data.py @@ -93,7 +93,11 @@ def get_isr_files(det_name, run): """Get bias, dark, and mask files.""" files = set() try: - files.add(bias_filename(run, det_name)) + bias_fn = bias_filename(run, det_name) + if isinstance(bias_fn, str): + files.add(bias_fn) + else: + files = files.union(bias_fn) except IndexError: pass try: