diff --git a/src/toast/CMakeLists.txt b/src/toast/CMakeLists.txt index b434667d6..acccd24e5 100644 --- a/src/toast/CMakeLists.txt +++ b/src/toast/CMakeLists.txt @@ -141,6 +141,7 @@ install(FILES observation_dist.py observation_data.py observation_view.py + observation_cache.py pointing_utils.py vis.py rng.py diff --git a/src/toast/data.py b/src/toast/data.py index a33771d39..9923c7910 100644 --- a/src/toast/data.py +++ b/src/toast/data.py @@ -13,6 +13,9 @@ from .utils import Logger +obs_loader_name = "loader" + + class Data(MutableMapping): """Class which represents distributed data @@ -100,7 +103,7 @@ def all_local_detectors(self, selection=None, flagmask=0): all_dets[d] = None return list(all_dets.keys()) - def detector_units(self, det_data): + def detector_units(self, det_data, ob_key="detector_data_units"): """Get the detector data units for a given field. This verifies that the specified detector data field has the same @@ -108,6 +111,8 @@ def detector_units(self, det_data): Args: det_data (str): The detector data field. + ob_key (str): If detector data is not currently loaded, this + optional dictionary specifies the units for each field. Returns: (Unit): The unit used across all observations. @@ -117,8 +122,12 @@ def detector_units(self, det_data): local_units = None for ob in self.obs: if det_data not in ob.detdata: - continue - ob_units = ob.detdata[det_data].units + if ob_key in ob: + ob_units = ob[ob_key][det_data] + else: + continue + else: + ob_units = ob.detdata[det_data].units if local_units is None: local_units = ob_units else: @@ -442,6 +451,65 @@ def select( continue return new_data + def using_loaders(self): + """Pass through Data and determine if Loaders are being used + + This is useful when an observation loader exists, but an operator wants to + perform other operations on an observation by calling load_exec() without + triggering the loading. The returned dictionary can later be passed to + `restore_loaders()`. + + Returns: + (bool): True if any observation is using a loader. + + """ + result = False + for ob in self.obs: + if hasattr(ob, obs_loader_name): + result = True + break + return result + + def save_loaders(self): + """Pass through Data and save the loader instances. + + This is useful when an observation loader exists, but an operator wants to + perform other operations on an observation by calling load_exec() without + triggering the loading. The returned dictionary can later be passed to + `restore_loaders()`. + + Returns: + (dict): The dictionary of loaders, indexed on the observation UID. + + """ + loaders = dict() + for ob in self.obs: + if hasattr(ob, obs_loader_name): + loaders[ob.uid] = ob.loader + del ob.loader + else: + loaders[ob.uid] = None + return loaders + + def restore_loaders(self, saved): + """Pass through Data and restore loader instances. + + This is useful when an observation loader exists, but an operator wants to + perform other operations on an observation by calling load_exec() without + triggering the loading. The input dictionary should be generated with + `save_loaders()`. + + Args: + saved (dict): The dictionary of saved loader instances. + + Returns: + None + + """ + for ob in self.obs: + if saved[ob.uid] is not None: + setattr(ob, obs_loader_name, saved[ob.uid]) + # Accelerator use def accel_create(self, names): diff --git a/src/toast/observation_cache.py b/src/toast/observation_cache.py new file mode 100644 index 000000000..7a1a7a474 --- /dev/null +++ b/src/toast/observation_cache.py @@ -0,0 +1,98 @@ +# Copyright (c) 2025-2025 by the parties listed in the AUTHORS file. +# All rights reserved. Use of this source code is governed by +# a BSD-style license that can be found in the LICENSE file. + + +import os +import hashlib +import glob + +from .utils import Logger + + +def observation_hash(obs): + """Create a unique hash of an observation. + + The goal of this function is to ensure that if two observations are equal then + they have the same unique hash, and if they differ then they have different + hashes. + + Args: + obs (Observation): The observation to hash + + Returns: + (str): The unique hash string. + + """ + # FIXME: Currently this function uses only information available on all + # processes. Instead, we should gather statistics per-detector across + # processes and include that in the hashing. + + dhash = hashlib.md5() + + bytes = obs.name.encode("utf-8") + dhash.update(bytes) + + bytes = str(obs.uid).encode("utf-8") + dhash.update(bytes) + + bytes = obs.telescope.name.encode("utf-8") + dhash.update(bytes) + + bytes = str(obs.session).encode("utf-8") + dhash.update(bytes) + + bytes = str(obs.all_detectors).encode("utf-8") + dhash.update(bytes) + + hex = dhash.hexdigest() + + return hex + + +def observation_cache(root_dir, obs, duplicates=None): + """Get the cache directory for an observation. + + This builds the path to an observation cache directory, and also checks if the + same observation exists with a different hash in the same root directory. If + `duplicates` is "warn", then a warning will be printed if any other observation + cache directories exist for the same observation. If `duplicates` is "fail", + then the existance of duplicates with raise an exception. + + Args: + root_dir (str): The top level cache directory. + obs (Observation): The observation. + duplicates (str): The action to take for duplicates. + + Returns: + (str): The observation cache directory. + + """ + log = Logger.get() + hsh = observation_hash(obs) + + obs_dir = f"{obs.name}_{hsh}" + cache_dir = os.path.join(root_dir, obs_dir) + + if duplicates is not None: + # We care about duplicate observations. Check for those now. + check = None + if obs.comm.group_rank == 0: + check_str = os.path.join(root_dir, f"{obs.name}_*") + check = glob.glob(check_str) + if obs.comm.comm_group is not None: + check = obs.comm.comm_group.bcast(check, root=0) + if len(check) != 0: + # There are existing observation directories... + check_dirs = ", ".join([f"'{os.path.basename(x)}'" for x in check]) + msg = f"{obs_dir}: found existing cache dirs {check_dirs}" + if duplicates == "warn": + log.warning_rank(msg, comm=obs.comm.comm_group) + elif duplicates == "fail": + log.error_rank(msg, comm=obs.comm.comm_group) + raise RuntimeError(msg) + else: + log.debug_rank(msg, comm=obs.comm.comm_group) + + return cache_dir + diff --git a/src/toast/ops/CMakeLists.txt b/src/toast/ops/CMakeLists.txt index ddff8d27e..9e4c8c11e 100644 --- a/src/toast/ops/CMakeLists.txt +++ b/src/toast/ops/CMakeLists.txt @@ -7,6 +7,7 @@ install(FILES pipeline.py delete.py copy.py + create.py reset.py fill_gaps.py arithmetic.py @@ -64,6 +65,7 @@ install(FILES pixels_wcs.py filterbin.py obsmat.py + accum_obs.py save_hdf5.py load_hdf5.py noise_estimation.py diff --git a/src/toast/ops/__init__.py b/src/toast/ops/__init__.py index c6b2faf24..919ec07df 100644 --- a/src/toast/ops/__init__.py +++ b/src/toast/ops/__init__.py @@ -4,6 +4,7 @@ # Import Operators into our public API +from .accum_obs import AccumulateObservation from .arithmetic import Combine from .azimuth_intervals import AzimuthIntervals, AzimuthRanges from .cadence_map import CadenceMap @@ -11,6 +12,7 @@ from .common_mode_noise import CommonModeNoise from .conviqt import SimConviqt, SimTEBConviqt, SimWeightedConviqt from .copy import Copy +from .create import Create from .crosslinking import CrossLinking from .delete import Delete from .demodulation import Demodulate, StokesWeightsDemod @@ -44,7 +46,7 @@ from .noise_weight import NoiseWeight from .obsmat import ObsMat, coadd_observation_matrix from .operator import Operator -from .pipeline import Pipeline +from .pipeline import Pipeline, PipelineLoader from .pixels_healpix import PixelsHealpix from .pixels_wcs import PixelsWCS from .pointing import BuildPixelDistribution diff --git a/src/toast/ops/accum_obs.py b/src/toast/ops/accum_obs.py new file mode 100644 index 000000000..41bddfc63 --- /dev/null +++ b/src/toast/ops/accum_obs.py @@ -0,0 +1,819 @@ +# Copyright (c) 2025-2025 by the parties listed in the AUTHORS file. +# All rights reserved. Use of this source code is governed by +# a BSD-style license that can be found in the LICENSE file. + +import os +import numpy as np +import traitlets +from astropy import units as u + +from ..covariance import covariance_invert +from ..io.hdf_utils import hdf5_open, hdf5_config +from ..io import save_hdf5_detdata, load_hdf5_detdata +from ..mpi import MPI +from ..observation import default_values as defaults +from ..observation_cache import observation_cache +from ..pixels import PixelData, PixelDistribution +from ..timing import function_timer +from ..traits import Bool, Float, Instance, Int, Unicode, Unit, trait_docs +from ..utils import Logger +from .operator import Operator +from .pipeline import Pipeline +from .mapmaker_utils import BuildHitMap, BuildInverseCovariance, BuildNoiseWeighted + + +class CacheLoader(object): + """Loader class to restore signal and flags from an observation cache.""" + + def __init__(self, cache_dir, det_data, det_flags): + self._cache_dir = cache_dir + self._det_data = det_data + self._det_flags = det_flags + + @function_timer + def load(self, obs, **kwargs): + ob_dir = observation_cache(self._cache_dir, obs, duplicates=None) + file = os.path.join(ob_dir, f"{self._det_data}.h5") + if self._det_data in obs.detdata: + msg = f"Detector data {self._det_data} already exists in {obs.name}," + msg += " cannot load" + raise RuntimeError(msg) + if self._det_flags is not None and self._det_flags in obs.detdata: + msg = f"Detector flags {self._det_flags} already exists in {obs.name}," + msg += " cannot load" + raise RuntimeError(msg) + if obs.comm.group_size == 1: + # Force serial usage in this case, to avoid any MPI overhead + force_serial = True + else: + force_serial = False + parallel, _, _ = hdf5_config( + comm=obs.comm.comm_group, force_serial=force_serial + ) + print(f"ACCUM CacheLoader load {file}", flush=True) + hf = hdf5_open(file, "r", comm=obs.comm.comm_group, force_serial=force_serial) + fields = [self._det_data] + if self._det_flags is not None: + fields.append(self._det_flags) + load_hdf5_detdata(obs, hf, fields, "Cache {obs.name} detdata:", parallel) + + if obs.comm.comm_group is not None: + obs.comm.comm_group.barrier() + if hf is not None: + hf.close() + del hf + + def unload(self, obs, **kwargs): + print(f"ACCUM CacheLoader unload {self._det_data}", flush=True) + del obs.detdata[self._det_flags] + del obs.detdata[self._det_data] + + +@trait_docs +class AccumulateObservation(Operator): + """Operator which accumulates pixel-domain observation quantities. + + This operator computes or loads the hits, inverse pixel covariance, and noise + weighted map for each observation. The total quantities can also be accumulated. + The pixel distribution should have already been computed (for example from a + sky footprint file). + + This operator is designed to have its exec() method called on a single observation, + for example as part of a call to Operator.load_exec(). An exception will be raised + if applied to a Data object with multiple observations. + + If caching is enabled, the per-observation quantities are written to disk for + later use. + + If `cache_detdata` is True, then ONLY the signal and flags will be written to disk + and a new Loader will be added to each observation that will load this in later. + + """ + + # Class traits + + API = Int(0, help="Internal interface version for this operator") + + cache_dir = Unicode( + None, + allow_none=True, + help="Directory of per-observation cache directories for reading / writing", + ) + + overwrite_cache = Bool( + False, + help="If True and using a cache, overwrite any inputs found there", + ) + + cache_only = Bool( + False, + help="If True, do not accumulate the total products. Useful for pre-caching", + ) + + cache_detdata = Bool( + False, + help="If True, also cache the detector data", + ) + + pixel_dist = Unicode( + "pixel_dist", + help="The Data key containing the PixelDistribution object", + ) + + inverse_covariance = Unicode( + None, + allow_none=True, + help="The Data key where the inverse covariance should be stored", + ) + + hits = Unicode( + None, + allow_none=True, + help="The Data key where the hit map should be stored", + ) + + rcond = Unicode( + None, + allow_none=True, + help="The Data key where the reciprocal condition number should be stored", + ) + + zmap = Unicode( + None, + allow_none=True, + help="The Data key for the accumulated noise weighted map", + ) + + covariance = Unicode( + None, + allow_none=True, + help="If not None, compute the covariance and store it here", + ) + + det_data = Unicode( + defaults.det_data, + allow_none=True, + help="Observation detdata key for the timestream data", + ) + + det_mask = Int( + defaults.det_mask_nonscience, + help="Bit mask value for per-detector flagging", + ) + + det_flags = Unicode( + defaults.det_flags, + allow_none=True, + help="Observation detdata key for flags to use", + ) + + det_flag_mask = Int( + defaults.det_mask_nonscience, + help="Bit mask value for detector sample flagging", + ) + + det_data_units = Unit(defaults.det_data_units, help="Desired timestream units") + + shared_flags = Unicode( + defaults.shared_flags, + allow_none=True, + help="Observation shared key for telescope flags to use", + ) + + shared_flag_mask = Int( + defaults.shared_mask_nonscience, + help="Bit mask value for optional flagging", + ) + + pixel_pointing = Instance( + klass=Operator, + allow_none=True, + help="This must be an instance of a pointing operator", + ) + + stokes_weights = Instance( + klass=Operator, + allow_none=True, + help="This must be an instance of a Stokes weights operator", + ) + + obs_pointing = Bool( + False, + help="If True, expand pointing for all detectors at once in an observation", + ) + + save_pointing = Bool( + False, + help="If True, leave expanded pointing after processing the observation", + ) + + noise_model = Unicode( + "noise_model", help="Observation key containing the noise model" + ) + + rcond_threshold = Float( + 1.0e-8, help="Minimum value for inverse condition number cut." + ) + + sync_type = Unicode( + "alltoallv", help="Communication algorithm: 'allreduce' or 'alltoallv'" + ) + + @traitlets.validate("det_mask") + def _check_det_mask(self, proposal): + check = proposal["value"] + if check < 0: + raise traitlets.TraitError("Det mask should be a positive integer") + return check + + @traitlets.validate("det_flag_mask") + def _check_flag_mask(self, proposal): + check = proposal["value"] + if check < 0: + raise traitlets.TraitError("Flag mask should be a positive integer") + return check + + @traitlets.validate("shared_flag_mask") + def _check_shared_mask(self, proposal): + check = proposal["value"] + if check < 0: + raise traitlets.TraitError("Shared flag mask should be a positive integer") + return check + + @traitlets.validate("sync_type") + def _check_sync_type(self, proposal): + check = proposal["value"] + if check != "allreduce" and check != "alltoallv": + raise traitlets.TraitError("Invalid communication algorithm") + return check + + @traitlets.validate("pixel_pointing") + def _check_pixel_pointing(self, proposal): + pixels = proposal["value"] + if pixels is not None: + if not isinstance(pixels, Operator): + raise traitlets.TraitError( + "pixel_pointing should be an Operator instance" + ) + # Check that this operator has the traits we expect + for trt in ["pixels", "create_dist", "view"]: + if not pixels.has_trait(trt): + msg = f"pixel_pointing operator should have a '{trt}' trait" + raise traitlets.TraitError(msg) + return pixels + + @traitlets.validate("stokes_weights") + def _check_stokes_weights(self, proposal): + weights = proposal["value"] + if weights is not None: + if not isinstance(weights, Operator): + raise traitlets.TraitError( + "stokes_weights should be an Operator instance" + ) + # Check that this operator has the traits we expect + for trt in ["weights", "view"]: + if not weights.has_trait(trt): + msg = f"stokes_weights operator should have a '{trt}' trait" + raise traitlets.TraitError(msg) + return weights + + # File root name for objects on disk. + obs_file_hits = "hits" + obs_file_invcov = "invcov" + obs_file_zmap = "zmap" + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + @function_timer + def _exec(self, data, detectors=None, **kwargs): + log = Logger.get() + + print(f"ACCUM exec cache: {self.cache_dir} {self.cache_detdata}", flush=True) + + if len(data.obs) > 1: + msg = "Each call to exec() should be used on a Data object with no " + msg += "more than one observation per group." + log.error_rank(msg, comm=data.comm.comm_group) + raise RuntimeError(msg) + elif len(data.obs) == 0: + # Nothing to do + return + + # The observation we are working with + the_obs = data.obs[0] + msg = f"Accumulating observation {the_obs.name}" + log.debug_rank(msg, comm=data.comm.comm_group) + + # Check inputs for consistency and create any map domain objects that do not + # yet exist. + self._check_inputs(data) + + # Compute the cache location for this observation and + # check if we have all the products we need. + use_cache, ob_dir = self._check_cache(the_obs) + all_cached = self._load_obs_products(data, ob_dir) + + if not all_cached: + # We need to compute the products. + + # The observation pixel distribution + obs_pixel_dist = f"{self.name}_{self.pixel_dist}" + + # Pipeline to accumulate a single observation + accum_pipe = self._create_pipeline(obs_pixel_dist) + + # Run the pipeline to accumulate the products for this observation, + # but do not call "finalize" on the pipeline, which syncs data products. + self._compute_obs_products(data, accum_pipe) + + if not self.cache_only: + # Accumulate the per-observation data to the global products. This sums the + # per process submap data (which has not synced yet). + self._accumulate(data) + + if use_cache and not all_cached: + # We are using the cache and we had to compute the per-observation + # products. Write these out. This will first call finalize() on the + # pipeline to sync the per-observation products before writing. + self._write_obs_products(data, ob_dir, accum_pipe) + + def _n_stokes(self): + # Number of map values per pixel + if self.stokes_weights.mode == "IQU": + nvalue = 3 + elif self.stokes_weights.mode == "I": + nvalue = 1 + else: + raise RuntimeError( + f"Invalid Stokes weights mode: {self.stokes_weights.mode}" + ) + return nvalue + + def _check_obs_pixel_dist(self, data): + pix_dist = data[self.pixel_dist] + obs_pixel_dist = f"{self.name}_{self.pixel_dist}" + if obs_pixel_dist in data: + # Verify that it is equal to the global pixel dist with the exception of the + # communicator. This mimics the __eq__ method of the PixelDistribution but + # with the different communicator check. + obs_pix_dist = data[obs_pixel_dist] + dist_equal = True + if obs_pix_dist.n_pix != pix_dist.n_pix: + dist_equal = False + if obs_pix_dist.n_submap != pix_dist.n_submap: + dist_equal = False + if obs_pix_dist.n_pix_submap != pix_dist.n_pix_submap: + dist_equal = False + if not np.array_equal(obs_pix_dist.local_submaps, pix_dist.local_submaps): + dist_equal = False + if obs_pix_dist.comm is None and data.comm.comm_group is not None: + dist_equal = False + if obs_pix_dist.comm is not None and data.comm.comm_group is None: + dist_equal = False + if obs_pix_dist.comm is not None: + comp = MPI.Comm.Compare(obs_pix_dist.comm, data.comm.comm_group) + if comp not in (MPI.IDENT, MPI.CONGRUENT): + dist_equal = False + if not dist_equal: + # Delete it + del data[obs_pixel_dist] + if obs_pixel_dist not in data: + # Create it + data[obs_pixel_dist] = PixelDistribution( + n_pix=pix_dist.n_pix, + n_submap=pix_dist.n_submap, + local_submaps=pix_dist.local_submaps, + comm=data.comm.comm_group, + ) + if hasattr(pix_dist, "nest"): + data[obs_pixel_dist].nest = pix_dist.nest + if hasattr(pix_dist, "wcs"): + data[obs_pixel_dist].wcs = pix_dist.wcs + return obs_pixel_dist + + def _check_inputs(self, data): + for trait in "pixel_pointing", "stokes_weights": + if getattr(self, trait) is None: + msg = f"You must set the '{trait}' trait before calling exec()" + raise RuntimeError(msg) + + if self.covariance is not None and self.inverse_covariance is None: + msg = "You cannot build the covariance without also specifying" + msg += " the inverse covariance to accumulate." + raise RuntimeError(msg) + if self.covariance is None and self.rcond is not None: + msg = "The covariance and rcond traits should be either both set" + msg += " to None, or both set to the data key to use." + raise RuntimeError(msg) + if self.covariance is not None and self.rcond is None: + msg = "The covariance and rcond traits should be either both set" + msg += " to None, or both set to the data key to use." + raise RuntimeError(msg) + + # Set pointing flags + self.pixel_pointing.detector_pointing.det_mask = self.det_mask + self.pixel_pointing.detector_pointing.det_flag_mask = self.det_flag_mask + if hasattr(self.stokes_weights, "detector_pointing"): + self.stokes_weights.detector_pointing.det_mask = self.det_mask + self.stokes_weights.detector_pointing.det_flag_mask = self.det_flag_mask + + # Check that the global pixel distribution exists + if self.pixel_dist not in data: + msg = f"The pixel distribution '{self.pixel_dist}' does not exist in data" + raise RuntimeError(msg) + + # Ensure that the single observation pixel distribution exists + obs_pixel_dist = self._check_obs_pixel_dist(data) + + n_value = self._n_stokes() + + for map_object, map_dt, nval, map_units in [ + (self.hits, np.int64, 1, u.dimensionless_unscaled), + (self.zmap, np.float64, n_value, 1.0 / self.det_data_units), + ( + self.inverse_covariance, + np.float64, + n_value * (n_value + 1) // 2, + 1.0 / (self.det_data_units**2), + ), + ]: + if map_object is None: + continue + if map_object in data: + if data[map_object].distribution != data[self.pixel_dist]: + # Inconsistent. Delete and re-create + print(f"data[{map_object}] has inconsistent dist, delete.", flush=True) + del data[map_object] + if map_object not in data: + print(f"data[{map_object}] does not exist, creating", flush=True) + data[map_object] = PixelData( + data[self.pixel_dist], + map_dt, + n_value=nval, + units=map_units, + ) + obs_object = f"{self.name}_{map_object}" + if obs_object in data: + if data[obs_object].distribution != data[obs_pixel_dist]: + # Inconsistent. Delete and re-create + print(f"data[{obs_object}] has inconsistent dist, delete.", flush=True) + del data[obs_object] + if obs_object not in data: + print(f"data[{obs_object}] does not exist, creating", flush=True) + data[obs_object] = PixelData( + data[obs_pixel_dist], + map_dt, + n_value=nval, + units=map_units, + ) + # Zero for use with the current observation. + print(f"data[{obs_object}]: reset to zero", flush=True) + data[obs_object].reset() + + def _check_cache(self, obs): + if self.cache_dir is None: + ob_dir = None + use_cache = False + else: + ob_dir = observation_cache(self.cache_dir, obs, duplicates=None) + use_cache = True + if obs.comm.group_rank == 0: + if not os.path.exists(ob_dir): + os.makedirs(ob_dir) + # If we are overwriting the observation products, + # delete them from disk now. + if self.overwrite_cache: + for map_object, prefix in [ + (self.hits, self.obs_file_hits), + (self.zmap, self.obs_file_zmap), + (self.inverse_covariance, self.obs_file_invcov), + ]: + if map_object is None: + continue + obs_object = os.path.join(ob_dir, f"{self.name}_{map_object}") + if os.path.exists(obs_object): + os.remove(obs_object) + if self.cache_detdata: + det_object = os.path.join( + ob_dir, f"{self.name}_{self.det_data}.h5" + ) + if os.path.exists(det_object): + os.remove(det_object) + + if obs.comm.comm_group is not None: + obs.comm.comm_group.barrier() + return use_cache, ob_dir + + def _load_obs_products(self, data, ob_dir): + """Load the per-observation objects from disk if they exist.""" + have_all = True + + for map_object, prefix in [ + (self.hits, self.obs_file_hits), + (self.zmap, self.obs_file_zmap), + (self.inverse_covariance, self.obs_file_invcov), + ]: + if map_object is None: + continue + obs_object = f"{self.name}_{map_object}" + + if ob_dir is None: + # Not using cache + have_all = False + continue + + obs_object_path = os.path.join(ob_dir, f"{prefix}.h5") + + obs_have_obj = True + if data.comm.group_rank == 0: + if not os.path.exists(obs_object_path): + obs_have_obj = False + if data.comm.comm_group is not None: + obs_have_obj = data.comm.comm_group.bcast(obs_have_obj, root=0) + if not obs_have_obj: + have_all = False + continue + + # Load the object into the data + print(f"ACCUM load from cache: {obs_object_path}", flush=True) + data[obs_object].read(obs_object_path) + + if self.cache_detdata: + det_object = os.path.join(ob_dir, f"{self.det_data}.h5") + obs_have_obj = True + if data.comm.group_rank == 0: + if not os.path.exists(det_object): + obs_have_obj = False + if data.comm.comm_group is not None: + obs_have_obj = data.comm.comm_group.bcast(obs_have_obj, root=0) + if not obs_have_obj: + have_all = False + else: + print(f"ACCUM load from cache: {det_object} exists", flush=True) + return have_all + + def _create_pipeline(self, ob_dist): + if self.obs_pointing: + # Process all detectors at once + accum = Pipeline(detector_sets=["ALL"]) + else: + # Process one detector at a time. + accum = Pipeline(detector_sets=["SINGLE"]) + accum.operators = [ + self.pixel_pointing, + self.stokes_weights, + ] + + # Hit map operator + if self.hits is not None: + accum.operators.append( + BuildHitMap( + pixel_dist=ob_dist, + hits=f"{self.name}_{self.hits}", + view=self.pixel_pointing.view, + pixels=self.pixel_pointing.pixels, + det_mask=self.det_mask, + det_flags=self.det_flags, + det_flag_mask=self.det_flag_mask, + shared_flags=self.shared_flags, + shared_flag_mask=self.shared_flag_mask, + sync_type=self.sync_type, + ) + ) + + # Inverse covariance. + if self.inverse_covariance is not None: + accum.operators.append( + BuildInverseCovariance( + pixel_dist=ob_dist, + inverse_covariance=f"{self.name}_{self.inverse_covariance}", + view=self.pixel_pointing.view, + pixels=self.pixel_pointing.pixels, + weights=self.stokes_weights.weights, + noise_model=self.noise_model, + det_data_units=self.det_data_units, + det_mask=self.det_mask, + det_flags=self.det_flags, + det_flag_mask=self.det_flag_mask, + shared_flags=self.shared_flags, + shared_flag_mask=self.shared_flag_mask, + sync_type=self.sync_type, + ) + ) + + # Noise weighted map + if self.zmap is not None: + accum.operators.append( + BuildNoiseWeighted( + pixel_dist=ob_dist, + zmap=f"{self.name}_{self.zmap}", + view=self.pixel_pointing.view, + pixels=self.pixel_pointing.pixels, + weights=self.stokes_weights.weights, + noise_model=self.noise_model, + det_data=self.det_data, + det_data_units=self.det_data_units, + det_mask=self.det_mask, + det_flags=self.det_flags, + det_flag_mask=self.det_flag_mask, + shared_flags=self.shared_flags, + shared_flag_mask=self.shared_flag_mask, + sync_type=self.sync_type, + ) + ) + return accum + + def _compute_obs_products(self, data, accum_pipe): + # The observation we are working with + the_obs = data.obs[0] + + # We intentionally DO NOT call finalize() / apply() here. We want to + # have the raw, per-process hits / noise weighted map / inverse covariance. + # We can then accumulate these to the global objects. + accum_pipe.exec(data) + + if self.zmap is not None: + nz = data[f'{self.name}_{self.zmap}'].data[:, :] != 0 + print(f"ACCUM obs zmap = {data[f'{self.name}_{self.zmap}'].data[nz]}") + + if self.obs_pointing and not self.save_pointing: + # We created full detector pointing for this observation, but we are + # not saving it. Clean it up now. + del the_obs.detdata[self.pixel_pointing.pixels] + del the_obs.detdata[self.stokes_weights.weights] + del the_obs.detdata[self.pixel_pointing.detector_pointing.quats] + + def _write_obs_products(self, data, ob_dir, accum_pipe): + if ob_dir is None: + # Not caching anything + return + + # Before writing out the data, we sync it. + accum_pipe.finalize(data) + + for map_object, prefix in [ + (self.hits, self.obs_file_hits), + (self.zmap, self.obs_file_zmap), + (self.inverse_covariance, self.obs_file_invcov), + ]: + if map_object is None: + continue + obs_object = f"{self.name}_{map_object}" + obs_object_path = os.path.join(ob_dir, f"{prefix}.h5") + print(f"ACCUM write {obs_object_path}", flush=True) + data[obs_object].write(obs_object_path) + + if self.cache_detdata: + obs = data.obs[0] + + det_object = os.path.join(ob_dir, f"{self.det_data}.h5") + print(f"ACCUM write {det_object}", flush=True) + temp_path = f"{det_object}.tmp" + if obs.comm.group_size == 1: + # Force serial usage in this case, to avoid any MPI overhead + force_serial = True + else: + force_serial = False + hf = hdf5_open( + temp_path, "w", comm=data.comm.comm_group, force_serial=force_serial + ) + fields = [(self.det_data, None)] + if self.det_flags is not None: + fields.append((self.det_flags, {"type": "gzip"})) + save_hdf5_detdata( + obs, + hf, + fields, + "Cache {obs.name} detdata:", + ) + if hf is not None: + hf.close() + del hf + + if data.comm.comm_group is not None: + data.comm.comm_group.barrier() + + # Move file into place + if data.comm.group_rank == 0: + os.rename(temp_path, det_object) + + # Add a Loader for the future + obs.loader = CacheLoader(self.cache_dir, self.det_data, None) + + # Store units before deleting + obs["detector_data_units"] = { + self.det_data: obs.detdata[self.det_data].units + } + del obs.detdata[self.det_data] + + def _accumulate(self, data): + # The per-observation objects have not yet been synchronized. So each process + # can just accumulate their locally hit submaps. + + n_value = self._n_stokes() + + for map_object, nval in [ + (self.hits, 1), + (self.zmap, n_value), + (self.inverse_covariance, n_value * (n_value + 1) / 2), + ]: + if map_object is None: + continue + obs_object = f"{self.name}_{map_object}" + if map_object == self.zmap: + nz = data[self.zmap].data != 0 + print(f"ACCUM zmap BEFORE = {data[self.zmap].data[nz]}") + data[map_object].data[:] += data[obs_object].data + if map_object == self.zmap: + nz = data[self.zmap].data != 0 + print(f"ACCUM zmap AFTER = {data[self.zmap].data[nz]}") + if self.zmap is not None: + nz = data[self.zmap].data != 0 + print(f"ACCUM zmap OUT = {data[self.zmap].data[nz]}") + + def _finalize(self, data, **kwargs): + log = Logger.get() + + if self.cache_only: + # Nothing to do + return + + # Sync accumulated products + for map_object in [ + self.hits, + self.zmap, + self.inverse_covariance, + ]: + if map_object is None: + continue + if self.sync_type == "alltoallv": + data[map_object].sync_alltoallv() + else: + data[map_object].sync_allreduce() + # Cleanup per-observation objects + obs_object = f"{self.name}_{map_object}" + if obs_object in data: + del data[obs_object] + + # Invert the total covariance + if self.covariance is not None: + # Copy the inverse + data[self.covariance] = data[self.inverse_covariance].duplicate() + # Update units + data[self.covariance].update_units(1.0 / (self.det_data_units**2)) + # Create inverse condition number + data[self.rcond] = PixelData( + data[self.covariance].distribution, np.float64, n_value=1 + ) + # Invert in place + covariance_invert( + data[self.covariance], + self.rcond_threshold, + rcond=data[self.rcond], + use_alltoallv=(self.sync_type == "alltoallv"), + ) + rcond = data[self.rcond] + rcond_good = rcond.data[:, :, 0] > 0.0 + rcond_min = 0.0 + rcond_max = 0.0 + if np.count_nonzero(rcond_good) > 0: + rcond_min = np.amin(rcond.data[rcond_good, 0]) + rcond_max = np.amax(rcond.data[rcond_good, 0]) + if data.comm.comm_world is not None: + rcond_min = data.comm.comm_world.reduce(rcond_min, root=0, op=MPI.MIN) + rcond_max = data.comm.comm_world.reduce(rcond_max, root=0, op=MPI.MAX) + if data.comm.world_rank == 0: + msg = " Pixel covariance condition number range = " + msg += f"{rcond_min:1.3e} ... {rcond_max:1.3e}" + log.debug(msg) + + def _requires(self): + req = self.pixel_pointing.requires() + req.update(self.stokes_weights.requires()) + req["global"].append(self.pixel_dist) + req["meta"].append(self.noise_model) + if self.det_flags is not None: + req["detdata"].append(self.det_flags) + if self.shared_flags is not None: + req["shared"].append(self.shared_flags) + return req + + def _provides(self): + prov = { + "global": list(), + "shared": list(), + "detdata": list(), + } + for map_obj in [ + self.hits, + self.zmap, + self.inverse_covariance, + self.covariance, + self.rcond, + ]: + if map_obj is not None: + prov["global"].append(map_obj) + if self.save_pointing: + prov["detdata"].extend([self.pixels, self.weights]) + return prov diff --git a/src/toast/ops/create.py b/src/toast/ops/create.py new file mode 100644 index 000000000..69e081605 --- /dev/null +++ b/src/toast/ops/create.py @@ -0,0 +1,171 @@ +# Copyright (c) 2025-2025 by the parties listed in the AUTHORS file. +# All rights reserved. Use of this source code is governed by +# a BSD-style license that can be found in the LICENSE file. + +import traitlets +import numpy as np +from astropy import units as u + +from ..mpi import MPI +from ..timing import function_timer +from ..traits import Int, List, trait_docs +from ..utils import Logger +from .operator import Operator + + +@trait_docs +class Create(Operator): + """Class to pre-create detector and shared data. + + Many operators already create data objects if they do not exist, but sometimes it + is necessary to pre-create data objects as part of a Pipeline. + + Each list contains tuples of properties of the field to create. + + Each shared tuple should contain the (name, commtype, shape, dtype) where commtype + may be "group", "row", or "column". dtype should be the string name ("float64", + "int32", "uint8", etc). + + Each detector data tuple should contain the (name, sample_shape, dtype, units) where + dtype and units are a string. + + """ + + # Class traits + + API = Int(0, help="Internal interface version for this operator") + + detdata = List([], help="List of tuples of Observation detdata keys to create") + + shared = List([], help="List of tuples of Observation shared keys to create") + + @traitlets.validate("detdata") + def _check_detdata(self, proposal): + val = proposal["value"] + for v in val: + if not isinstance(v, (tuple, list)): + raise traitlets.TraitError("trait should be a list of tuples") + if len(v) != 4: + raise traitlets.TraitError("key tuples should have 4 values") + if not isinstance(v[0], str) or not isinstance(v[1], str): + raise traitlets.TraitError("key tuples should have string values") + return val + + @traitlets.validate("shared") + def _check_shared(self, proposal): + val = proposal["value"] + for v in val: + if not isinstance(v, (tuple, list)): + raise traitlets.TraitError("trait should be a list of tuples") + if len(v) != 4: + raise traitlets.TraitError("key tuples should have 4 values") + if not isinstance(v[0], str) or not isinstance(v[1], str): + raise traitlets.TraitError("key tuples should have string values") + return val + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + def _dtype_from_str(self, dstr): + if dstr == "int8": + return np.dtype(np.int8) + elif dstr == "uint8": + return np.dtype(np.uint8) + elif dstr == "int16": + return np.dtype(np.int16) + elif dstr == "uint16": + return np.dtype(np.uint16) + elif dstr == "int32": + return np.dtype(np.int32) + elif dstr == "uint32": + return np.dtype(np.uint32) + elif dstr == "int64": + return np.dtype(np.int64) + elif dstr == "uint64": + return np.dtype(np.uint64) + elif dstr == "float32": + return np.dtype(np.float32) + elif dstr == "float64": + return np.dtype(np.float64) + else: + msg = f"Unsupported dtype '{dstr}'" + raise RuntimeError(msg) + + @function_timer + def _exec(self, data, detectors=None, **kwargs): + log = Logger.get() + + det_props = list() + for dp in self.detdata: + if len(dp) != 4: + msg = f"Expected 4 elements in detdata field spec, got '{dp}'" + raise ValueError(msg) + dname = str(dp[0]) + if isinstance(dp[1], tuple): + dshp = dp[1] + else: + dshp = eval(dp[1]) + dt = self._dtype_from_str(dp[2]) + if isinstance(dp[3], u.Unit): + dunit = dp[3] + else: + dunit = u.Unit(dp[3]) + det_props.append((dname, dshp, dt, dunit)) + shared_props = list() + for sp in self.shared: + sname = str(sp[0]) + scomm = str(sp[1]) + if isinstance(sp[2], tuple): + sshp = sp[2] + else: + sshp = eval(sp[2]) + dt = self._dtype_from_str(sp[3]) + shared_props.append((sname, scomm, sshp, dt)) + + for ob in data.obs: + for sprops in shared_props: + if sprops[0] in ob.shared: + # Shared field exists, check for consistency + cur_type = ob.shared.comm_type(sprops[0]) + if cur_type != sprops[1]: + msg = f"Existing shared field '{sprops[0]}' has different " + msg += f"communicator ({cur_type}) than requested ({sprops[1]})" + raise RuntimeError(msg) + cur_shp = ob.shared[sprops[0]].shdata.shape + if cur_shp != sprops[2]: + msg = f"Existing shared field '{sprops[0]}' has different " + msg += f"shape ({cur_shp}) than requested ({sprops[2]})" + raise RuntimeError(msg) + cur_dt = ob.shared[sprops[0]].shdata.dtype + if cur_dt != sprops[3]: + msg = f"Existing shared field '{sprops[0]}' has different " + msg += f"dtype ({cur_dt}) than requested ({sprops[3]})" + raise RuntimeError(msg) + else: + # Create it + ob.shared.create_type( + sprops[1], sprops[0], sprops[2], dtype=sprops[3] + ) + + for dprops in det_props: + exists = ob.detdata.ensure( + dprops[0], + sample_shape=dprops[1], + dtype=dprops[2], + create_units=dprops[3], + ) + + return + + def _finalize(self, data, **kwargs): + return None + + def _requires(self): + req = dict() + return req + + def _provides(self): + prov = dict() + prov["detdata"] = [x[0] for x in self.detdata] + prov["shared"] = [x[0] for x in self.shared] + return prov diff --git a/src/toast/ops/mapmaker.py b/src/toast/ops/mapmaker.py index d4562804e..b1e6cb680 100644 --- a/src/toast/ops/mapmaker.py +++ b/src/toast/ops/mapmaker.py @@ -8,19 +8,19 @@ import traitlets from ..mpi import MPI +from ..footprint import footprint_distribution from ..observation import default_values as defaults from ..timing import Timer, function_timer from ..traits import Bool, Float, Instance, Int, Unicode, trait_docs from ..utils import Logger +from .arithmetic import Combine from .copy import Copy from .delete import Delete -from .mapmaker_templates import ApplyAmplitudes, SolveAmplitudes -from .mapmaker_utils import CovarianceAndHits +from .mapmaker_templates import SolveAmplitudes from .memory_counter import MemoryCounter from .operator import Operator from .pipeline import Pipeline from .pointing import BuildPixelDistribution -from .scan_map import ScanMap, ScanMask @trait_docs @@ -31,7 +31,7 @@ class MapMaker(Operator): that model the timestream contributions from noise, systematics, etc: .. math:: - \left[ M^T N^{-1} Z M + M_p \right] a = M^T N^{-1} Z d + [ M^T N^{-1} Z M + M_p ] a = M^T N^{-1} Z d Where `a` are the solved amplitudes and `d` is the input data. `N` is the diagonal time domain noise covariance. `M` is a matrix of templates that @@ -63,6 +63,16 @@ class MapMaker(Operator): `det_data` key of each observation, or (if overwrite == False) in an obs.detdata key based on the name of this class instance. + A note on the PixelDistribution used for mapmaking: If defaults are used, the + pixel_dist specified by the binning operator will be used if it exists. This + PixelDistribution will be deleted if `reset_pix_dist` is specified and treated + as if it did not exist. If this pixel_dist does not exist, and the footprint + options are specified, then a fixed sky footprint is used on all processes. If + the footprint options are not specified, then the BuildPixelDistribution operator + will be used to pass through the data and expand all detector pointing to compute + the distribution. If loading data from disk, this additional pass through the + data may be expensive! + """ # Class traits @@ -119,7 +129,9 @@ class MapMaker(Operator): True, help="If True, write the projected map *before* template subtraction" ) - write_map = Bool(True, help="If True, write the projected map") + write_map = Bool(True, help="If True, write the template-subtracted final map") + + write_template_map = Bool(True, help="If True, write the template map") write_hdf5 = Bool( False, help="If True, outputs are in HDF5 rather than FITS format." @@ -149,6 +161,10 @@ class MapMaker(Operator): False, help="If True, write out equivalent solver products." ) + write_solver_amplitudes = Bool( + False, help="If True, write out final solved template amplitudes." + ) + keep_solver_products = Bool( False, help="If True, keep the map domain solver products in data" ) @@ -169,6 +185,66 @@ class MapMaker(Operator): False, help="If True and save_cleaned is True, overwrite the input data" ) + footprint_healpix_file = Unicode( + None, + allow_none=True, + help="The healpix coverage file used to determine solver pixel distribution", + ) + + footprint_wcs_file = Unicode( + None, + allow_none=True, + help="The WCS coverage file used to determine solver pixel distribution", + ) + + footprint_healpix_submap_file = Unicode( + None, + allow_none=True, + help="The healpix coverage file used for the *submap* solver distribution", + ) + + footprint_healpix_nside = Int( + None, + allow_none=True, + help="The healpix NSIDE for a full-sky solver pixel distribution", + ) + + footprint_healpix_submap_nside = Int( + None, + allow_none=True, + help="The healpix submap NSIDE for a full-sky solver pixel distribution", + ) + + map_footprint_healpix_file = Unicode( + None, + allow_none=True, + help="The healpix coverage file used to determine final pixel distribution", + ) + + map_footprint_wcs_file = Unicode( + None, + allow_none=True, + help="The WCS coverage file used to determine final pixel distribution", + ) + + map_footprint_healpix_submap_file = Unicode( + None, + allow_none=True, + help="The healpix coverage file used for the final *submap* distribution", + ) + + map_footprint_healpix_nside = Int( + None, + allow_none=True, + help="The healpix NSIDE for a full-sky final pixel distribution", + ) + + map_footprint_healpix_submap_nside = Int( + None, + allow_none=True, + help="The healpix submap NSIDE for a full-sky final pixel distribution", + ) + reset_pix_dist = Bool( False, help="Clear any existing pixel distribution. Useful when applying " @@ -204,6 +280,7 @@ def _check_map_binning(self, proposal): "noise_model", "full_pointing", "sync_type", + "write_binned_path", ]: if not bin.has_trait(trt): msg = "map_binning operator should have a '{}' trait".format(trt) @@ -214,23 +291,12 @@ def __init__(self, **kwargs): super().__init__(**kwargs) @function_timer - def _write_del(self, prod_key, prod_write, force, rootname, extra_header=None): - """Write data object to file and delete it from cache""" + def _write_del( + self, prod_key, prod_write, force, rootname, purge=True, extra_header=None + ): + """Write data object to file and optionally delete it from data""" log = Logger.get() - # FIXME: This I/O technique assumes "known" types of pixel representations. - # Instead, we should associate read / write functions to a particular pixel - # class. - - if self.map_binning is not None and self.map_binning.enabled: - map_binning = self.map_binning - else: - map_binning = self.binning - - is_hpix_nest = True - if not hasattr(map_binning.pixel_pointing, "wcs"): - is_hpix_nest = map_binning.pixel_pointing.nest - wtimer = Timer() wtimer.start() product = prod_key.replace(f"{self.name}_", "") @@ -251,7 +317,7 @@ def _write_del(self, prod_key, prod_write, force, rootname, extra_header=None): ) log.info_rank(f"Wrote {fname} in", comm=self._comm, timer=wtimer) - if not self.keep_final_products and not self.mc_mode: + if not self.keep_final_products and not self.mc_mode and purge: if prod_key in self._data: self._data[prod_key].clear() del self._data[prod_key] @@ -288,27 +354,156 @@ def _setup(self, data, detectors, use_accel): self._comm = data.comm.comm_world self._rank = data.comm.world_rank + # Which binner are we using for the final step? + + if self.map_binning is not None and self.map_binning.enabled: + self.final_binning = self.map_binning + self._final_is_solver = False + else: + self.final_binning = self.binning + self._final_is_solver = True + + # Are we actually solving for templates? + + if ( + self.template_matrix is not None + and self.template_matrix.enabled + and self.template_matrix.n_enabled_templates > 0 + ): + self._using_templates = True + else: + self._using_templates = False + # Data names of outputs + self.pixel_dist = self.binning.pixel_dist + self.final_pixel_dist = self.final_binning.pixel_dist + + # Time domain + self.det_flag_name = f"{self.name}_flags" + self.clean_name = f"{self.name}_cleaned" + + # Map domain self.hits_name = f"{self.name}_hits" self.cov_name = f"{self.name}_cov" self.invcov_name = f"{self.name}_invcov" self.rcond_name = f"{self.name}_rcond" - self.det_flag_name = f"{self.name}_flags" - - self.clean_name = f"{self.name}_cleaned" self.binmap_name = f"{self.name}_binmap" + self.template_map_name = f"{self.name}_template_map" self.map_name = f"{self.name}_map" self.noiseweighted_map_name = f"{self.name}_noiseweighted_map" + if self.reset_pix_dist or self.final_binning.pixel_dist not in data: + # Purge any stale products from previous runs + for name in [ + self.hits_name, + self.cov_name, + self.invcov_name, + self.rcond_name, + self.binmap_name, + self.map_name, + self.template_map_name, + self.noiseweighted_map_name, + self.binning.pixel_dist, + self.final_binning.pixel_dist, + ]: + if name in self._data: + del self._data[name] + + # Check the options that require persistent detector data. If the det_data + # does not exist and a loader is being used, raise an error. + + self._using_loader = False + for ob in data.obs: + if self.det_data not in ob.detdata: + if hasattr(ob, "loader"): + self._using_loader = True + if self.save_cleaned or self.overwrite_cleaned: + msg = f"Observation {ob.name} is loading detector data" + msg += " on demand. Cannot use the `save_cleaned` or" + msg += " `overwrite_cleaned` options." + raise RuntimeError(msg) + else: + msg = f"Detector data {self.det_data} does not exist in " + msg += f"observation {ob.name}, and no loader is present." + raise RuntimeError(msg) + self._timer.start() return + @function_timer + def _create_pixel_dist( + self, + binner, + fp_healpix_file, + fp_healpix_submap_file, + fp_healpix_nside, + fp_healpix_submap_nside, + fp_wcs_file, + ): + """Create a PixelDistribution for use by a binner.""" + + if binner.pixel_dist not in self._data: + self._log.info_rank( + f"{self._log_prefix} Creating pixel distribution for {binner.name}", + comm=self._comm, + ) + # We need to create it. See if we are using a footprint. + if fp_healpix_file is not None: + if fp_healpix_submap_nside is None: + msg = "If using a healpix footprint file, you must specify" + msg += " the submap NSIDE." + raise RuntimeError(msg) + self._data[binner.pixel_dist] = footprint_distribution( + healpix_coverage_file=fp_healpix_file, + healpix_nside_submap=fp_healpix_submap_nside, + comm=self._comm, + ) + elif fp_healpix_submap_file is not None: + if fp_healpix_nside is None: + msg = "If using a healpix submap footprint file, you must specify" + msg += " the map NSIDE." + raise RuntimeError(msg) + self._data[binner.pixel_dist] = footprint_distribution( + healpix_submap_file=fp_healpix_submap_file, + healpix_nside=fp_healpix_nside, + comm=self._comm, + ) + elif fp_wcs_file is not None: + self._data[binner.pixel_dist] = footprint_distribution( + wcs_coverage_file=fp_wcs_file, + comm=self._comm, + ) + else: + # We have to pass through the pointing... + BuildPixelDistribution( + pixel_dist=binner.pixel_dist, + pixel_pointing=binner.pixel_pointing, + save_pointing=binner.full_pointing, + ).apply(self._data) + self._log.info_rank( + f"{self._log_prefix} finished pixel distribution for {binner.name} in", + comm=self._comm, + timer=self._timer, + ) + + self._memreport.prefix = f"After pixel distribution for {binner.name}" + self._memreport.apply(self._data, use_accel=self._use_accel) + else: + msg = f"{self._log_prefix} Using existing pixel distribution " + msg += f"for {binner.name}" + self._log.info_rank(msg, comm=self._comm) + @function_timer def _fit_templates(self): """Solve for template amplitudes""" + print( + f"DEBUG mapmaker {self.name} call SolveAmplitudes, cache_detdata = {self.binning.cache_detdata}", + flush=True, + ) + amplitudes_solve = SolveAmplitudes( name=self.name, det_data=self.det_data, @@ -326,7 +521,6 @@ def _fit_templates(self): output_dir=self.output_dir, mc_mode=self.mc_mode, mc_index=self.mc_index, - reset_pix_dist=self.reset_pix_dist, report_memory=self.report_memory, ) amplitudes_solve.apply( @@ -340,279 +534,193 @@ def _fit_templates(self): timer=self._timer, ) - self._memreport.prefix = "After solving amplitudes" - self._memreport.apply(self._data, use_accel=self._use_accel) - - return template_amplitudes - - @function_timer - def _prepare_binning(self): - """Set up the final map binning""" - - # Map binning operator - if self.map_binning is not None and self.map_binning.enabled: - map_binning = self.map_binning - else: - # Use the same binning used in the solver. - map_binning = self.binning - map_binning.pre_process = None - map_binning.covariance = self.cov_name - - # Pixel distribution - if self.reset_pix_dist: - # Purge any stale products from previous runs - for name in [ - self.hits_name, - self.cov_name, - self.invcov_name, - self.rcond_name, - self.clean_name, - self.binmap_name, - self.map_name, - self.noiseweighted_map_name, - map_binning.pixel_dist, - map_binning.covariance, - ]: - if name in self._data: - del self._data[name] - - if map_binning.pixel_dist not in self._data: - self._log.info_rank( - f"{self._log_prefix} Caching pixel distribution", - comm=self._comm, - ) - pix_dist = BuildPixelDistribution( - pixel_dist=map_binning.pixel_dist, - pixel_pointing=map_binning.pixel_pointing, - save_pointing=map_binning.full_pointing, - ) - pix_dist.apply(self._data, use_accel=self._use_accel) + if self.write_solver_amplitudes: + for tmpl in self.template_matrix.templates: + tmpl_amps = self._data[template_amplitudes][tmpl.name] + out_root = os.path.join(self.output_dir, f"{self._mc_root}_{tmpl.name}") + tmpl.write(tmpl_amps, out_root) self._log.info_rank( - f"{self._log_prefix} finished build of pixel distribution in", + f"{self._log_prefix} finished template amplitude write in", comm=self._comm, timer=self._timer, ) - self._memreport.prefix = "After pixel distribution" - self._memreport.apply(self._data, use_accel=self._use_accel) + self._memreport.prefix = "After solving amplitudes" + self._memreport.apply(self._data, use_accel=self._use_accel) - return map_binning + return template_amplitudes @function_timer - def _build_pixel_covariance(self, map_binning): - """Accumulate hits and pixel covariance""" - - if map_binning.covariance in self._data and self.mc_mode: - # Covariance is already cached - return - - # Construct the noise covariance, hits, and condition number - # mask for the final binned map. - - self._log.info_rank( - f"{self._log_prefix} begin build of final binning covariance", - comm=self._comm, - ) - - final_cov = CovarianceAndHits( - pixel_dist=map_binning.pixel_dist, - covariance=map_binning.covariance, - inverse_covariance=self.invcov_name, - hits=self.hits_name, - rcond=self.rcond_name, - det_mask=map_binning.det_mask, - det_flags=map_binning.det_flags, - det_flag_mask=map_binning.det_flag_mask, - det_data_units=map_binning.det_data_units, - shared_flags=map_binning.shared_flags, - shared_flag_mask=map_binning.shared_flag_mask, - pixel_pointing=map_binning.pixel_pointing, - stokes_weights=map_binning.stokes_weights, - noise_model=map_binning.noise_model, - rcond_threshold=self.map_rcond_threshold, - sync_type=map_binning.sync_type, - save_pointing=map_binning.full_pointing, - ) - - final_cov.apply( - self._data, detectors=self._detectors, use_accel=self._use_accel - ) - - self._log.info_rank( - f"{self._log_prefix} finished build of final covariance in", - comm=self._comm, - timer=self._timer, - ) + def _bin_raw_map(self, extra_header): + # This bins the original input timestream with the original flagging + # (which may differ from the solver flags due to masks). The original + # data may not be persistent in memory and running this operation may + # trigger data loading from disk for each observation. + print("----------- Begin Binned Raw -----------", flush=True) + + # Get the units used across the distributed data for our desired + # input detector data + det_data_units = self._data.detector_units(self.det_data) + + # For final binning, we do not need to do any caching. + self.final_binning.cache_dir = None + self.final_binning.cache_detdata = False + + # Reset any pre / post processing + self.final_binning.pre_process = None + self.final_binning.post_process = None + + self.final_binning.det_data = self.det_data + self.final_binning.det_data_units = det_data_units + self.final_binning.binned = self.binmap_name + self.final_binning.covariance = self.cov_name + + if self.write_hits: + self.final_binning.hits = self.hits_name + else: + self.final_binning.hits = None - self._memreport.prefix = "After constructing final covariance and hits" - self._memreport.apply(self._data, use_accel=self._use_accel) + if self.write_invcov: + self.final_binning.inverse_covariance = self.invcov_name + else: + self.final_binning.inverse_covariance = None - # These data products are not needed later so they can be - # written out and purged + if self.write_noiseweighted_map: + self.final_binning.noiseweighted = self.noiseweighted_map_name + else: + self.final_binning.noiseweighted = None - self._write_del(self.hits_name, self.write_hits, False, self.name) - self._write_del(self.rcond_name, self.write_rcond, False, self.name) - self._write_del(self.invcov_name, self.write_invcov, False, self.name) + if self.write_rcond: + self.final_binning.rcond = self.rcond_name + else: + self.final_binning.rcond = None - return + self.final_binning.apply(self._data) - @function_timer - def _bin_and_write_raw_signal(self, map_binning, extra_header=None): - """Optionally bin and save an undestriped map""" + cdata = self._data[self.cov_name] + nonz = cdata.data != 0 + print(f"Raw covariance = {cdata.data[nonz]}", flush=True) - if not self.write_binmap: - return + # Write outputs - map_binning.det_data = self.det_data - map_binning.binned = self.binmap_name - map_binning.noiseweighted = None - self._log.info_rank( - f"{self._log_prefix} begin map binning", - comm=self._comm, + self._write_del( + self.hits_name, self.write_hits, False, self.name, extra_header=extra_header ) - map_binning.apply( - self._data, detectors=self._detectors, use_accel=self._use_accel + self._write_del( + self.rcond_name, + self.write_rcond, + False, + self.name, + extra_header=extra_header, ) - self._log.info_rank( - f"{self._log_prefix} finished binning in", - comm=self._comm, - timer=self._timer, + self._write_del( + self.invcov_name, + self.write_invcov, + False, + self.name, + extra_header=extra_header, ) self._write_del( - self.binmap_name, - self.write_binmap, + self.noiseweighted_map_name, + self.write_noiseweighted_map, True, self._mc_root, extra_header=extra_header, ) - - self._memreport.prefix = "After binning final map" - self._memreport.apply(self._data, use_accel=self._use_accel) - - return - - @function_timer - def _clean_signal(self, template_amplitudes): - if ( - self.template_matrix is None - or self.template_matrix.n_enabled_templates == 0 - ): - # No templates to subtract, bin the input signal - out_cleaned = self.det_data - else: - # Apply (subtract) solved amplitudes. - - self._log.info_rank( - f"{self._log_prefix} begin apply template amplitudes", - comm=self._comm, - ) - - out_cleaned = self.clean_name - if self.save_cleaned and self.overwrite_cleaned: - # Modify data in place - out_cleaned = None - - amplitudes_apply = ApplyAmplitudes( - op="subtract", - det_data=self.det_data, - amplitudes=template_amplitudes, - template_matrix=self.template_matrix, - output=out_cleaned, - ) - amplitudes_apply.apply( - self._data, detectors=self._detectors, use_accel=self._use_accel - ) - - if not self.keep_solver_products: - del self._data[template_amplitudes] - - self._log.info_rank( - f"{self._log_prefix} finished apply template amplitudes in", - comm=self._comm, - timer=self._timer, - ) - - self._memreport.prefix = "After subtracting templates" - self._memreport.apply(self._data, use_accel=self._use_accel) - - return out_cleaned + print("----------- End Binned Raw -----------", flush=True) @function_timer - def _bin_cleaned_signal(self, map_binning, out_cleaned): - """Bin and save a map of the destriped signal""" - - self._log.info_rank( - f"{self._log_prefix} begin final map binning", - comm=self._comm, - ) + def _bin_template_map(self, template_amplitudes, extra_header): + # This bins the projected, solved templates, and produces a "template + # map" which can be subtracted from the raw binned map to produce + # the destriped map. Depending on other options, the "cleaned" timestreams + # (original - projected templates) are saved. + + print("----------- Begin Binned Templates -----------", flush=True) + + # Get the units used across the distributed data for our desired + # input detector data + det_data_units = self._data.detector_units(self.det_data) + + # Template projection timestreams + projected = f"{self.name}_projected_templates" + + # Re-initialize the template matrix. The original detector flags (which + # may be different than the solver flags), have been restored inside the + # SolveAmplitudes operator. + self.template_matrix.reset() + self.template_matrix.det_data = projected + self.template_matrix.transpose = False + self.template_matrix.det_data_units = det_data_units + self.template_matrix.amplitudes = template_amplitudes + self.template_matrix.view = self.final_binning.pixel_pointing.view + self.template_matrix.initialize(self._data) + + pre_ops = list() + pre_ops.append(self.template_matrix) + do_restore = False - if out_cleaned is None: - map_binning.det_data = self.det_data + if self.save_cleaned: + # We are working with persistent detector data. + if self.overwrite_cleaned: + # We are replacing the original data with the template-subtracted + # timestreams. + pre_ops.append( + Combine( + op="subtract", + first=self.det_data, + second=projected, + result=self.det_data, + ) + ) + else: + # We are creating a separate data object for the cleaned timestream. + pre_ops.append( + Combine( + op="subtract", + first=self.det_data, + second=projected, + result=self.clean_name, + ) + ) else: - map_binning.det_data = out_cleaned - if self.write_noiseweighted_map or self.keep_final_products: - map_binning.noiseweighted = self.noiseweighted_map_name - map_binning.binned = self.map_name + # We are not saving the cleaned time streams, so just project the templates + # and then delete them after binning. Save any loaders in use and restore + # afterwards. + saved = self._data.save_loaders() + do_restore = True - # Do the final binning - map_binning.apply( - self._data, detectors=self._detectors, use_accel=self._use_accel - ) + post_ops = list() + post_ops.append(Delete(detdata=[projected])) - self._log.info_rank( - f"{self._log_prefix} finished final binning in", - comm=self._comm, - timer=self._timer, - ) + pre_pipe = Pipeline(operators=pre_ops) + post_pipe = Pipeline(operators=post_ops) - self._memreport.prefix = "After binning final map" - self._memreport.apply(self._data, use_accel=self._use_accel) + self.final_binning.det_data = projected + self.final_binning.det_data_units = det_data_units + self.final_binning.binned = self.template_map_name + self.final_binning.covariance = self.cov_name - return + self.final_binning.hits = None + self.final_binning.inverse_covariance = None + self.final_binning.noiseweighted = None + self.final_binning.rcond = None - @function_timer - def _purge_cleaned_tod(self): - """If the cleaned TOD is not being returned, purge it""" + self.final_binning.pre_process = pre_pipe + self.final_binning.post_process = post_pipe - if self.save_cleaned: - return + cdata = self._data[self.cov_name] + nonz = cdata.data != 0 + print(f"Template covariance = {cdata.data[nonz]}", flush=True) - del_tod = Delete(detdata=[self.clean_name]) - del_tod.apply(self._data, use_accel=self._use_accel) + self.final_binning.apply(self._data) - self._memreport.prefix = "After purging cleaned TOD" - self._memreport.apply(self._data, use_accel=self._use_accel) + self.final_binning.pre_process = None + self.final_binning.post_process = None - return - - @function_timer - def _write_maps(self, extra_header=None): - """Write and delete the outputs""" - - self._write_del( - self.noiseweighted_map_name, - self.write_noiseweighted_map, - True, - self._mc_root, - ) - self._write_del( - self.map_name, - self.write_map, - True, - self._mc_root, - extra_header=extra_header, - ) - self._write_del( - self.cov_name, self.write_cov, False, self.name, extra_header=extra_header - ) - - self._log.info_rank( - f"{self._log_prefix} finished output write in", - comm=self._comm, - timer=self._timer, - ) - - return + if do_restore: + self._data.restore_loaders(saved) + print("----------- End Binned Templates -----------", flush=True) @function_timer def _closeout(self): @@ -674,15 +782,12 @@ def _get_extra_header(self, data, detectors): @function_timer def _exec(self, data, detectors=None, use_accel=None, **kwargs): - # First confirm that there is at least one valid detector + print(f"DEBUG mapmaker {self.name} call _setup", flush=True) + self._setup(data, detectors, use_accel) - if self.map_binning is not None and self.map_binning.enabled: - map_binning = self.map_binning - else: - # Use the same binning used in the solver. - map_binning = self.binning + # Confirm that there is at least one valid detector all_local_dets = data.all_local_detectors( - selection=detectors, flagmask=map_binning.det_mask + selection=detectors, flagmask=self.final_binning.det_mask ) ndet = len(all_local_dets) if data.comm.comm_world is not None: @@ -693,29 +798,94 @@ def _exec(self, data, detectors=None, use_accel=None, **kwargs): # Destripe data and make maps - self._setup(data, detectors, use_accel) - extra_header = self._get_extra_header(data, detectors) self._memreport.prefix = "Start of mapmaking" self._memreport.apply(self._data, use_accel=self._use_accel) - template_amplitudes = self._fit_templates() - - map_binning = self._prepare_binning() + if self._using_templates or self._final_is_solver: + # We need to create the pixel distribution for the solver binner + self._create_pixel_dist( + self.binning, + self.footprint_healpix_file, + self.footprint_healpix_submap_file, + self.footprint_healpix_nside, + self.footprint_healpix_submap_nside, + self.footprint_wcs_file, + ) - self._build_pixel_covariance(map_binning) + if self._using_templates: + print(f"DEBUG mapmaker {self.name} call _fit_templates", flush=True) + template_amplitudes = self._fit_templates() + + if not self._final_is_solver: + # We have a separate binning for the final map. Compute + # the pixel distribution for this. + self._create_pixel_dist( + self.map_binning, + self.map_footprint_healpix_file, + self.map_footprint_healpix_submap_file, + self.map_footprint_healpix_nside, + self.map_footprint_healpix_submap_nside, + self.map_footprint_wcs_file, + ) - self._bin_and_write_raw_signal(map_binning, extra_header=extra_header) + self._bin_raw_map(extra_header) - out_cleaned = self._clean_signal(template_amplitudes) + if self._using_templates: + self._bin_template_map(template_amplitudes, extra_header) - if self.write_noiseweighted_map or self.write_map or self.keep_final_products: - self._bin_cleaned_signal(map_binning, out_cleaned) + if self.keep_final_products or self.write_map: + # We need to compute the template-cleaned map + self._data[self.map_name] = self._data[self.binmap_name].duplicate() + self._data[self.map_name].data[:] -= self._data[ + self.template_map_name + ].data - self._purge_cleaned_tod() # Potentially frees memory for writing maps + self._write_del( + self.template_map_name, + self.write_template_map, + False, + self.name, + extra_header=extra_header, + ) + self._write_del( + self.map_name, + self.write_map, + True, + self._mc_root, + extra_header=extra_header, + ) + self._write_del( + self.binmap_name, + self.write_binmap, + False, + self.name, + extra_header=extra_header, + ) + else: + # The raw binned map is the final map. + if self.keep_final_products or self.write_map: + self._data[self.map_name] = self._data[self.binmap_name] + self._write_del( + self.map_name, + self.write_map, + True, + self._mc_root, + purge=False, + extra_header=extra_header, + ) + self._write_del( + self.binmap_name, + self.write_binmap, + False, + self.name, + extra_header=extra_header, + ) - self._write_maps(extra_header=extra_header) + self._write_del( + self.cov_name, self.write_cov, False, self.name, extra_header=extra_header + ) self._memreport.prefix = "End of mapmaking" self._memreport.apply(self._data, use_accel=self._use_accel) @@ -754,7 +924,7 @@ class Calibrate(Operator): that model the timestream contributions from noise, systematics, etc: .. math:: - \left[ M^T N^{-1} Z M + M_p \right] a = M^T N^{-1} Z d + [ M^T N^{-1} Z M + M_p ] a = M^T N^{-1} Z d Where `a` are the solved amplitudes and `d` is the input data. `N` is the diagonal time domain noise covariance. `M` is a matrix of templates that diff --git a/src/toast/ops/mapmaker_binning.py b/src/toast/ops/mapmaker_binning.py index f23fe51b1..bc4047d96 100644 --- a/src/toast/ops/mapmaker_binning.py +++ b/src/toast/ops/mapmaker_binning.py @@ -9,10 +9,10 @@ from ..covariance import covariance_apply from ..observation import default_values as defaults from ..timing import Timer, function_timer -from ..traits import Bool, Instance, Int, Unicode, Unit, trait_docs +from ..traits import Bool, Float, Instance, Int, Unicode, Unit, trait_docs from ..utils import Logger from .delete import Delete -from .mapmaker_utils import BuildHitMap, BuildInverseCovariance, BuildNoiseWeighted +from .accum_obs import AccumulateObservation from .operator import Operator from .pipeline import Pipeline @@ -21,9 +21,16 @@ class BinMap(Operator): """Operator which bins a map. - Given a noise model and a pointing operator, build the noise weighted map and + Given a noise model and a pointing model, build the noise weighted map and apply the noise covariance to get resulting binned map. + If the covariance does not yet exist, it is accumulated, optionally with caching. + + By default, detector pointing is expanded for a whole observation at a time and + then deleted. If `full_pointing` is True, all the pointing is expanded and saved. + If `single_det_pointing` is True, the pointing is expanded one detector at a time + for each observation. + """ # Class traits @@ -32,7 +39,7 @@ class BinMap(Operator): pixel_dist = Unicode( "pixel_dist", - help="The Data key where the PixelDist object should be stored", + help="The Data key where the PixelDist object is stored", ) covariance = Unicode( @@ -51,6 +58,28 @@ class BinMap(Operator): help="The Data key where the noiseweighted map should be stored", ) + hits = Unicode( + None, + allow_none=True, + help="Also accumulate the hit map and store in this Data key", + ) + + inverse_covariance = Unicode( + None, + allow_none=True, + help="The Data key where the inverse covariance should be stored", + ) + + rcond = Unicode( + None, + allow_none=True, + help="The Data key for the reciprocal condition number, if computed", + ) + + rcond_threshold = Float( + 1.0e-8, help="Minimum value for inverse condition number cut." + ) + det_data = Unicode( defaults.det_data, help="Observation detdata key for the timestream data" ) @@ -102,6 +131,12 @@ class BinMap(Operator): help="Optional extra operator to run prior to binning", ) + post_process = Instance( + klass=Operator, + allow_none=True, + help="Optional extra operator to run after accumulation", + ) + noise_model = Unicode( defaults.noise_model, help="Observation key containing the noise model" ) @@ -111,7 +146,32 @@ class BinMap(Operator): ) full_pointing = Bool( - False, help="If True, expand pointing for all detectors and save" + False, help="If True, save pointing for all observations and detectors" + ) + + single_det_pointing = Bool( + False, help="If True, expand pointing one detector at a time" + ) + + cache_dir = Unicode( + None, + allow_none=True, + help="Directory of per-observation cache directories for reading / writing", + ) + + overwrite_cache = Bool( + False, + help="If True and using a cache, overwrite any inputs found there", + ) + + cache_only = Bool( + False, + help="If True, do not accumulate the total products. Useful for pre-caching", + ) + + cache_detdata = Bool( + False, + help="If True, also cache the detector data", ) @traitlets.validate("det_mask") @@ -189,30 +249,29 @@ def _exec(self, data, detectors=None, **kwargs): log.verbose_rank(" BinMap building pipeline", comm=data.comm.comm_world) if self.covariance not in data: - msg = f"Data does not contain noise covariance '{self.covariance}'" - log.error(msg) - raise RuntimeError(msg) - - cov = data[self.covariance] - - # Check that covariance has consistent units - if cov.units != (self.det_data_units**2).decompose(): - msg = f"Covariance '{self.covariance}' units {cov.units} do not" - msg += f" equal det_data units ({self.det_data_units}) squared." - log.error(msg) - raise RuntimeError(msg) - - # Sanity check that the covariance pixel distribution agrees - if cov.distribution != data[self.pixel_dist]: - msg = ( - f"Pixel distribution '{self.pixel_dist}' does not match the one " - f"used by covariance '{self.covariance}'" - ) - log.error(msg) - raise RuntimeError(msg) + cov = None + print(f"BinMap covariance {self.covariance} does not exist, will build", flush=True) + else: + cov = data[self.covariance] + print(f"BinMap covariance {self.covariance} already exists", flush=True) - # Set outputs of the pointing operator + # Check that covariance has consistent units + if cov.units != (self.det_data_units**2).decompose(): + msg = f"Covariance '{self.covariance}' units {cov.units} do not" + msg += f" equal det_data units ({self.det_data_units}) squared." + log.error(msg) + raise RuntimeError(msg) + # Sanity check that the covariance pixel distribution agrees + if cov.distribution != data[self.pixel_dist]: + msg = ( + f"Pixel distribution '{self.pixel_dist}' does not match the one " + f"used by covariance '{self.covariance}'" + ) + log.error(msg) + raise RuntimeError(msg) + + # Set outputs of the pointing operator self.pixel_pointing.create_dist = None # If the binned map already exists in the data, verify the distribution and @@ -229,6 +288,11 @@ def _exec(self, data, detectors=None, **kwargs): data[self.binned].reset() data[self.binned].update_units(1.0 / self.det_data_units) + if self.binned in data: + indata = data[self.binned] + nonz = indata.data != 0 + print(f"BinMap input zmap = {indata.data[nonz]}", flush=True) + # Use the same detector mask in the pointing self.pixel_pointing.detector_pointing.det_mask = self.det_mask self.pixel_pointing.detector_pointing.det_flag_mask = self.det_flag_mask @@ -236,25 +300,46 @@ def _exec(self, data, detectors=None, **kwargs): self.stokes_weights.detector_pointing.det_mask = self.det_mask self.stokes_weights.detector_pointing.det_flag_mask = self.det_flag_mask - # Noise weighted map. We output this to the final binned map location, - # since we will multiply by the covariance in-place. - - build_zmap = BuildNoiseWeighted( + # Set up the Accumulation operator + if cov is None: + accum_cov = self.covariance + accum_invcov = self.inverse_covariance + if accum_invcov is None: + accum_invcov = f"{self.name}_invcov" + accum_rcond = self.rcond + if accum_rcond is None: + accum_rcond = f"{self.name}_rcond" + else: + accum_cov = None + accum_invcov = None + accum_rcond = None + obs_accum = AccumulateObservation( + cache_dir=self.cache_dir, + overwrite_cache=self.overwrite_cache, + cache_only=self.cache_only, + cache_detdata=self.cache_detdata, pixel_dist=self.pixel_dist, + inverse_covariance=accum_invcov, + hits=self.hits, + rcond=accum_rcond, zmap=self.binned, - view=self.pixel_pointing.view, - pixels=self.pixel_pointing.pixels, - weights=self.stokes_weights.weights, - noise_model=self.noise_model, + covariance=accum_cov, det_data=self.det_data, - det_data_units=self.det_data_units, det_mask=self.det_mask, det_flags=self.det_flags, det_flag_mask=self.det_flag_mask, + det_data_units=self.det_data_units, shared_flags=self.shared_flags, shared_flag_mask=self.shared_flag_mask, + pixel_pointing=self.pixel_pointing, + stokes_weights=self.stokes_weights, + obs_pointing=(not self.single_det_pointing), + save_pointing=self.full_pointing, + noise_model=self.noise_model, + rcond_threshold=self.rcond_threshold, sync_type=self.sync_type, ) + print(f"BIN accum cache_detdata: {self.cache_detdata} -> {obs_accum.cache_detdata}", flush=True) # Build a pipeline to expand pointing and accumulate @@ -262,26 +347,45 @@ def _exec(self, data, detectors=None, **kwargs): accum_ops = list() if self.pre_process is not None: accum_ops.append(self.pre_process) - if self.full_pointing: - # Process all detectors at once - accum = Pipeline(detector_sets=["ALL"]) - else: + accum_ops.append(obs_accum) + if self.post_process is not None: + accum_ops.append(self.post_process) + + if self.single_det_pointing: # Process one detector at a time. accum = Pipeline(detector_sets=["SINGLE"]) - accum_ops.extend([self.pixel_pointing, self.stokes_weights, build_zmap]) - + else: + # Process all detectors in an observation at once, optionally + # saving the pointing through options to AccumulateObservation + # above. + accum = Pipeline(detector_sets=["ALL"]) accum.operators = accum_ops if data.comm.world_rank == 0: log.verbose(" BinMap running pipeline") - pipe_out = accum.apply(data, detectors=detectors) - - # print("Binned zmap = ", data[self.binned].data) - # Optionally, store the noise-weighted map + # Use `load_apply()` to run the pipeline on one observation at a time, + # optionally loading data on the fly if a loader is defined in each + # observation. + pipe_out = accum.load_apply(data, detectors=detectors) + + # If we built the covariance, and are not keeping the intermediate products, + # delete them now. + if cov is None: + if self.inverse_covariance is None: + del data[accum_invcov] + if self.rcond is None: + del data[accum_rcond] + + # Optionally, store the noise-weighted map before applying the covariance + # in place. if self.noiseweighted is not None: data[self.noiseweighted] = data[self.binned].duplicate() + if cov is None: + # We computed it in our pipeline + cov = data[self.covariance] + # Extract the results binned_map = data[self.binned] @@ -289,8 +393,6 @@ def _exec(self, data, detectors=None, **kwargs): if data.comm.world_rank == 0: log.verbose(" BinMap applying covariance") covariance_apply(cov, binned_map, use_alltoallv=(self.sync_type == "alltoallv")) - # print("Binned final = ", data[self.binned].data) - return def _finalize(self, data, **kwargs): diff --git a/src/toast/ops/mapmaker_solve.py b/src/toast/ops/mapmaker_solve.py index f75dfcc1f..cbf9d29ac 100644 --- a/src/toast/ops/mapmaker_solve.py +++ b/src/toast/ops/mapmaker_solve.py @@ -77,6 +77,8 @@ def _check_binning(self, proposal): "det_data", "binned", "full_pointing", + "cache_dir", + "cache_detdata", ]: if not bin.has_trait(trt): msg = f"binning operator should have a '{trt}' trait" @@ -118,35 +120,20 @@ def _exec(self, data, detectors=None, **kwargs): msg = f"You must set the '{trait}' trait before calling exec()" raise RuntimeError(msg) - # Make a binned map - - timer.start() - log.debug_rank("MapMaker RHS begin binned map", comm=comm) - - self.binning.det_data = self.det_data - self.binning.det_data_units = self.det_data_units - self.binning.apply(data, detectors=detectors) - - log.debug_rank("MapMaker RHS binned map finished in", comm=comm, timer=timer) - - # Build a pipeline for the projection and template matrix application. - # First create the operators that we will use. - - log.debug_rank("MapMaker RHS begin create projection pipeline", comm=comm) - - # Name of the temporary detdata created if we are not overwriting inputs - det_temp = "temp_RHS" - - # Use the same pointing operator as the binning + # Use the same pointing operators as the binning pixels = self.binning.pixel_pointing weights = self.binning.stokes_weights - # Optionally Copy data to a temporary location to avoid overwriting the input. - copy_det = Copy( - detdata=[ - (self.det_data, det_temp), - ] - ) + # Name of the temporary detdata, used if we are not caching detector data. + det_temp = "temp_RHS" + + # Detector data to clean up after each observation + det_cleanup = list() + if not self.binning.single_det_pointing: + det_cleanup.extend([ + pixels.pixels, + weights.weights + ]) # Set up map-scanning operator to project the binned map. scan_map = ScanMap( @@ -154,7 +141,6 @@ def _exec(self, data, detectors=None, **kwargs): weights=weights.weights, view=pixels.view, map_key=self.binning.binned, - det_data=det_temp, det_data_units=self.det_data_units, det_mask=self.binning.det_mask, det_flag_mask=self.binning.det_flag_mask, @@ -164,7 +150,6 @@ def _exec(self, data, detectors=None, **kwargs): # Set up noise weighting operator noise_weight = NoiseWeight( noise_model=self.binning.noise_model, - det_data=det_temp, det_mask=self.binning.det_mask, det_flag_mask=self.binning.det_flag_mask, view=pixels.view, @@ -172,60 +157,89 @@ def _exec(self, data, detectors=None, **kwargs): # Set up template matrix operator. self.template_matrix.transpose = True - self.template_matrix.det_data = det_temp self.template_matrix.det_data_units = self.det_data_units - # Create a pipeline that projects the binned map and applies noise - # weights and templates. - - proj_pipe = None - if self.binning.full_pointing: - # Process all detectors at once, since we have the pointing already - proj_pipe = Pipeline(detector_sets=["ALL"]) - oplist = [ - copy_det, + # The pipeline of operators to subtract the scanned map from the + # original timestreams. + pipe_ops = list() + + if data.using_loaders(): + # Our detector data is being loaded on demand. We need to pass through + # the timestreams twice, and so we enable fast native caching of the + # signal while keeping the rest of the data in memory. + if self.binning.cache_dir is None: + # This is an error- at the workflow level, if lazy-loading of data + # is enabled, the binning operator should specify a cache location. + msg = "Data is using loaders, but no cache location specified " + msg += "for the binner" + raise RuntimeError(msg) + self.binning.cache_detdata = True + scan_map.det_data = self.det_data + noise_weight.det_data = self.det_data + self.template_matrix.det_data = self.det_data + det_cleanup.append(self.det_data) + pipe_ops = [ + pixels, + weights, scan_map, noise_weight, self.template_matrix, + Delete(detdata=det_cleanup), ] - proj_pipe.operators = oplist else: - # Process one detector at a time. - proj_pipe = Pipeline(detector_sets=["SINGLE"]) - oplist = [ - copy_det, + # We have persistent detector timestreams which we do not want to + # overwrite. Copy this to a temp location each observation. + #self.binning.cache_detdata = False + scan_map.det_data = det_temp + noise_weight.det_data = det_temp + self.template_matrix.det_data = det_temp + det_cleanup.append(det_temp) + pipe_ops = [ + Copy(detdata=[(self.det_data, det_temp)]), pixels, weights, scan_map, noise_weight, self.template_matrix, + Delete(detdata=det_cleanup), ] - proj_pipe.operators = oplist - log.debug_rank( - "MapMaker RHS projection pipeline created in", comm=comm, timer=timer - ) + # Make a binned map. + + timer.start() + log.debug_rank("MapMaker RHS begin binned map", comm=comm) + + print(f"Binner rcond set to {self.binning.rcond}", flush=True) + print(f"Binner det_flags set to {self.binning.det_flags}", flush=True) + for ob in data.obs: + print(f"Binner {ob.name} det_flags nz = {np.count_nonzero(ob.detdata[self.binning.det_flags][:])} / {len(ob.local_detectors) * ob.n_local_samples}", flush=True) + + self.binning.det_data = self.det_data + self.binning.det_data_units = self.det_data_units + self.binning.apply(data, detectors=detectors) + + log.debug_rank("MapMaker RHS binned map finished in", comm=comm, timer=timer) + + # Create a pipeline that projects the binned map and applies noise + # weights and templates. + + if self.binning.single_det_pointing: + # Process one detector at a time. + proj_pipe = Pipeline(detector_sets=["SINGLE"]) + else: + # Process all detectors at once in each observation. + proj_pipe = Pipeline(detector_sets=["ALL"]) + proj_pipe.operators = pipe_ops # Run this projection pipeline. log.debug_rank("MapMaker RHS begin run projection pipeline", comm=comm) - - proj_pipe.apply(data, detectors=detectors) + proj_pipe.load_apply(data, detectors=detectors) log.debug_rank( "MapMaker RHS projection pipeline finished in", comm=comm, timer=timer ) - log.debug_rank( - "MapMaker RHS begin cleanup temporary detector data", comm=comm - ) - - # Clean up our temp buffer - delete_temp = Delete(detdata=[det_temp]) - delete_temp.apply(data) - - log.debug_rank("MapMaker RHS cleanup finished in", comm=comm, timer=timer) - return def _finalize(self, data, **kwargs): @@ -353,15 +367,11 @@ def _exec(self, data, detectors=None, **kwargs): msg = f"You must set the '{trait}' trait before calling exec()" raise RuntimeError(msg) - # Clear temp detector data if it exists - for ob in data.obs: - if self.det_temp in ob.detdata: - ob.detdata[self.det_temp][:] = 0 - ob.detdata[self.det_temp].update_units(self.det_data_units) - if ob.detdata[self.det_temp].accel_exists(): - ob.detdata[self.det_temp].accel_reset() + # Delete temp detector data if it exists + delete_temp = Delete(detdata=[self.det_temp]) + delete_temp.apply(data) - # Pointing operator used in the binning + # Pointing operators used in the binning pixels = self.binning.pixel_pointing weights = self.binning.stokes_weights @@ -378,9 +388,12 @@ def _exec(self, data, detectors=None, **kwargs): self.binning.det_data_units = self.det_data_units self.binning.pre_process = self.template_matrix + self.binning.post_process = delete_temp self.binning.apply(data, detectors=detectors) + self.binning.pre_process = None + self.binning.post_process = None # nz = data[self.binning.binned].data != 0 # print( @@ -427,6 +440,16 @@ def _exec(self, data, detectors=None, **kwargs): "MapMaker LHS begin scan map and accumulate amplitudes", comm=comm ) + # At the end of the pipeline, delete the expanded pointing and temporary + # detector data. + det_cleanup = [self.det_temp] + if not self.binning.single_det_pointing: + det_cleanup.extend([ + pixels.pixels, + weights.weights + ]) + delete_det_temp = Delete(detdata=det_cleanup) + # Set up map-scanning operator to project the binned map. scan_map = ScanMap( pixels=pixels.pixels, @@ -456,46 +479,28 @@ def _exec(self, data, detectors=None, **kwargs): template_transpose.amplitudes = self.out template_transpose.transpose = True - # Clear temp detector data - for ob in data.obs: - if self.det_temp in ob.detdata: - ob.detdata[self.det_temp][:] = 0 - ob.detdata[self.det_temp].update_units(self.det_data_units) - if ob.detdata[self.det_temp].accel_exists(): - ob.detdata[self.det_temp].accel_reset() - # Create a pipeline that projects the binned map and applies noise # weights and templates. - proj_pipe = None - if self.binning.full_pointing: - # Process all detectors at once - proj_pipe = Pipeline( - detector_sets=["ALL"], - operators=[ - self.template_matrix, - scan_map, - noise_weight, - template_transpose, - ], - ) - else: + if self.binning.single_det_pointing: # Process one detector at a time. - proj_pipe = Pipeline( - detector_sets=["SINGLE"], - operators=[ - self.template_matrix, - pixels, - weights, - scan_map, - noise_weight, - template_transpose, - ], - ) + proj_pipe = Pipeline(detector_sets=["SINGLE"]) + else: + # Process all detectors at once in each observation. + proj_pipe = Pipeline(detector_sets=["ALL"]) + proj_pipe.operators = [ + self.template_matrix, + pixels, + weights, + scan_map, + noise_weight, + template_transpose, + delete_det_temp, + ] # Run the projection pipeline. - proj_pipe.apply(data, detectors=detectors) + proj_pipe.load_apply(data, detectors=detectors) log.debug_rank( "MapMaker LHS map scan and amplitude accumulate finished in", @@ -589,6 +594,20 @@ def solve( msg = f"starting guess['{k}'] has different n_global than rhs['{k}']" raise RuntimeError(msg) + # Saving any Loaders. The amplitude solve starts and ends with template amplitudes + # and creates temporary detector data as an intermediate product. However, if a + # data loader class exists in the observations, that would be called every + # iteration, even if the original timestreams are not used. To prevent that, we + # move any loaders out of the way and then restore them at the end. + saved_loaders = data.save_loaders() + + # Similarly, if the LHS binning operator is configured to cache data, we want + # to disable that during the solve. + save_cache_dir = lhs_op.binning.cache_dir + lhs_op.binning.cache_dir = None + save_cache_detdata = lhs_op.binning.cache_detdata + lhs_op.binning.cache_detdata = False + # Solving A * x = b ... # Temporary variables. We give things more descriptive names here, but to align @@ -752,4 +771,11 @@ def solve( del lhs_out del data[lhs_out_key] + # Restore loaders + data.restore_loaders(saved_loaders) + + # Restore cache options + lhs_op.binning.cache_dir = save_cache_dir + lhs_op.binning.cache_detdata = save_cache_detdata + return diff --git a/src/toast/ops/mapmaker_templates.py b/src/toast/ops/mapmaker_templates.py index 4094512f5..61ce63e06 100644 --- a/src/toast/ops/mapmaker_templates.py +++ b/src/toast/ops/mapmaker_templates.py @@ -251,10 +251,6 @@ def _exec(self, data, detectors=None, use_accel=None, **kwargs): if tmpl.enabled: tmpl.det_data = self.det_data - # We loop over detectors. Internally, each template loops over observations - # and ignores observations where the detector does not exist. - all_dets = data.all_local_detectors(selection=detectors, flagmask=self.det_mask) - if self.transpose: # Check that the incoming detector data in all observations has the correct # units. @@ -265,7 +261,7 @@ def _exec(self, data, detectors=None, use_accel=None, **kwargs): if ob.detdata[self.det_data].units != input_units: msg = f"obs {ob.name} detdata {self.det_data}" msg += f" does not have units of {input_units}" - msg += f" before template matrix projection" + msg += " before template matrix projection" log.error(msg) raise RuntimeError(msg) @@ -287,14 +283,19 @@ def _exec(self, data, detectors=None, use_accel=None, **kwargs): data[self.amplitudes].accel_create(self.name) data[self.amplitudes].accel_update_device() - for d in all_dets: + for ob in data.obs: + dets = ob.select_local_detectors( + selection=detectors, + flagmask=self.det_mask, + ) for tmpl in self.templates: if tmpl.enabled: log.verbose( - f"TemplateMatrix {d} project_signal {tmpl.name} (use_accel={use_accel})" + f"TemplateMatrix project_signal {tmpl.name} (use_accel={use_accel})" ) tmpl.project_signal( - d, + ob, + dets, data[self.amplitudes][tmpl.name], use_accel=use_accel, **kwargs, @@ -321,6 +322,7 @@ def _exec(self, data, detectors=None, use_accel=None, **kwargs): accel=use_accel, create_units=self.det_data_units, ) + print(f"TemplateMatrix: ensured {self.det_data} in {ob.name}", flush=True) if exists: # We need to clear our detector TOD before projecting amplitudes # into timestreams. Note: in the accelerator case, the reset call @@ -333,14 +335,14 @@ def _exec(self, data, detectors=None, use_accel=None, **kwargs): ob.detdata[self.det_data].update_units(self.det_data_units) - for d in all_dets: for tmpl in self.templates: if tmpl.enabled: log.verbose( - f"TemplateMatrix {d} add to signal {tmpl.name} (use_accel={use_accel})" + f"TemplateMatrix add to signal {tmpl.name} (use_accel={use_accel})" ) tmpl.add_to_signal( - d, + ob, + dets, data[self.amplitudes][tmpl.name], use_accel=use_accel, **kwargs, @@ -418,7 +420,7 @@ class SolveAmplitudes(Operator): that model the timestream contributions from noise, systematics, etc: .. math:: - \left[ M^T N^{-1} Z M + M_p \right] a = M^T N^{-1} Z d + [ M^T N^{-1} Z M + M_p ] a = M^T N^{-1} Z d Where `a` are the solved amplitudes and `d` is the input data. `N` is the diagonal time domain noise covariance. `M` is a matrix of templates that @@ -437,6 +439,15 @@ class SolveAmplitudes(Operator): template matrix `M` and one operator for the binning, `B`. It then uses a conjugate gradient solver to solve for the amplitudes. + The PixelDistribution of the data (specified in the binner pixel_dist trait) + should already be computed before calling this function. This can be produced + from a fixed sky footprint or by passing through the pointing once with the + BuildPixelDistribution operator. + + A note on masking: A map-domain mask can be specified for masking bright + regions or objects during the template solve. This must use the same + PixelDistribution as the other map-domain products in the solver. + """ # Class traits @@ -460,11 +471,6 @@ class SolveAmplitudes(Operator): help="When solving, minimum value for inverse pixel condition number cut.", ) - map_rcond_threshold = Float( - 1.0e-8, - help="For final map, minimum value for inverse pixel condition number cut.", - ) - mask = Unicode( None, allow_none=True, @@ -507,12 +513,6 @@ class SolveAmplitudes(Operator): mc_index = Int(None, allow_none=True, help="The Monte-Carlo index") - reset_pix_dist = Bool( - False, - help="Clear any existing pixel distribution. Useful when applying " - "repeatedly to different data objects.", - ) - report_memory = Bool(False, help="Report memory throughout the execution") @traitlets.validate("binning") @@ -641,13 +641,6 @@ def _prepare_pixels(self): repeatedly with different data objects) """ - if self.reset_pix_dist: - if self.binning.pixel_dist in self._data: - del self._data[self.binning.pixel_dist] - - self._memreport.prefix = "After resetting pixel distribution" - self._memreport.apply(self._data) - # The pointing matrix used for the solve. The per-detector flags # are normally reset when the binner is run, but here we set them # explicitly since we will use these pointing matrix operators for @@ -668,8 +661,8 @@ def _prepare_pixels(self): pixels=solve_pixels.pixels, view=self._solve_view, ) - if self.binning.full_pointing: - # We are caching the pointing anyway- run with all detectors + if not self.binning.single_det_pointing: + # Run with all detectors scan_pipe = Pipeline( detector_sets=["ALL"], operators=[solve_pixels, self._scanner] ) @@ -690,8 +683,10 @@ def _prepare_flagging_ob(self, ob): # Get the detectors we are using for this observation dets = ob.select_local_detectors(self._detectors, flagmask=self._save_det_mask) + print(f"_prepare_flag_ob dets = {dets}", flush=True) if len(dets) == 0: # Nothing to do for this observation + print(f"DEBUG _prepare_flagging_ob {ob.name} has no dets", flush=True) return if self.mc_mode: @@ -710,43 +705,42 @@ def _prepare_flagging_ob(self, ob): return # Create the new solver flags - exists = ob.detdata.ensure(self.solver_flags, dtype=np.uint8, detectors=dets) - - # The data views - views = ob.view[self._solve_view] - # For each view... - for vw in range(len(views)): - view_samples = None - if views[vw].start is None: - # There is one view of the whole obs - view_samples = ob.n_local_samples - else: - view_samples = views[vw].stop - views[vw].start - starting_flags = np.zeros(view_samples, dtype=np.uint8) - if self._save_shared_flags is not None: - starting_flags[:] = np.where( - ( - views.shared[self._save_shared_flags][vw] - & self._save_shared_flag_mask - ) - > 0, - 1, - 0, - ) - for d in dets: - views.detdata[self.solver_flags][vw][d, :] = starting_flags + _ = ob.detdata.ensure( + self.solver_flags, dtype=np.uint8, detectors=self._detectors + ) + ob.detdata[self.solver_flags][:] = defaults.det_mask_processing + print( + f"DEBUG _prepare_flagging_ob {ob.name} called ensure {self.solver_flags}", + flush=True, + ) + + starting_flags = np.zeros(ob.n_local_samples, dtype=np.uint8) + if self._save_shared_flags is not None: + starting_flags[:] = np.where( + (ob.shared[self._save_shared_flags].data & self._save_shared_flag_mask) + > 0, + 1, + 0, + ) + print(f"DEBUG {ob.name} starting flags = {np.count_nonzero(starting_flags)}") + for d in dets: + for vw in ob.intervals[self._solve_view]: + vw_slc = slice(vw.first, vw.last, 1) + detflgs = ob.detdata[self.solver_flags][d] + detflgs[vw_slc] = starting_flags[vw_slc] if self._save_det_flags is not None: - views.detdata[self.solver_flags][vw][d, :] |= np.where( + print(f"DEBUG {ob.name}:{d}:{vw} pre flags = {np.count_nonzero(detflgs[vw_slc])}") + detflgs[vw_slc] |= np.where( ( - views.detdata[self._save_det_flags][vw][d] + detflgs[vw_slc] & self._save_det_flag_mask ) > 0, 1, 0, - ).astype(views.detdata[self.solver_flags][vw].dtype) - - return + ).astype(ob.detdata[self.solver_flags].dtype) + print(f"DEBUG {ob.name}:{d}:{vw} post flags = {np.count_nonzero(detflgs[vw_slc])}") + print(f"_prepare_flag_ob: {ob.name} det_flags nz = {np.count_nonzero(ob.detdata[self.solver_flags][:])} / {len(ob.local_detectors) * ob.n_local_samples}", flush=True) @function_timer def _prepare_flagging(self, scan_pipe): @@ -768,7 +762,7 @@ def _prepare_flagging(self, scan_pipe): if self.mc_mode: # Shortcut, just verified that our flags exist self._log.info_rank( - f"{self._log_prefix} MC mode, reusing flags for solver", comm=comm + f"{self._log_prefix} MC mode, reusing flags for solver", comm=self._comm ) return @@ -809,10 +803,12 @@ def _count_cut_data(self): if len(dets) == 0: # Nothing to do for this observation continue - for vw in ob.view[self._solve_view].detdata[self.solver_flags]: - for d in dets: - local_total += len(vw[d]) - local_cut += np.count_nonzero(vw[d]) + for d in dets: + for vw in ob.intervals[self._solve_view]: + slc = slice(vw.first, vw.last, 1) + dflg = ob.detdata[self.solver_flags][d, slc] + local_total += len(dflg) + local_cut += np.count_nonzero(dflg) if self._comm is None: total = local_total @@ -827,63 +823,9 @@ def _count_cut_data(self): return - @function_timer - def _get_pixel_covariance(self, solve_pixels, solve_weights): - """Construct the noise covariance, hits, and condition number map for - the solver. - """ - - if self.mc_mode: - # Shortcut, verify that our covariance and other products exist. - if self.binning.pixel_dist not in self._data: - msg = f"MC mode, pixel distribution " - msg += f"'{self.binning.pixel_dist}' does not exist" - self._log.error(msg) - raise RuntimeError(msg) - if self.solver_cov_name not in self._data: - msg = f"MC mode, covariance '{self.solver_cov_name}' does not exist" - self._log.error(msg) - raise RuntimeError(msg) - self._log.info_rank( - f"{self._log_prefix} MC mode, reusing covariance for solver", - comm=self._comm, - ) - return - - self._log.info_rank( - f"{self._log_prefix} begin build of solver covariance", - comm=self._comm, - ) - - solver_cov = CovarianceAndHits( - pixel_dist=self.binning.pixel_dist, - covariance=self.solver_cov_name, - hits=self.solver_hits_name, - rcond=self.solver_rcond_name, - det_data_units=self._det_data_units, - det_mask=self._save_det_mask, - det_flags=self.solver_flags, - det_flag_mask=255, - pixel_pointing=solve_pixels, - stokes_weights=solve_weights, - noise_model=self.binning.noise_model, - rcond_threshold=self.solve_rcond_threshold, - sync_type=self.binning.sync_type, - save_pointing=self.binning.full_pointing, - ) - - solver_cov.apply(self._data, detectors=self._detectors) - - self._memreport.prefix = "After constructing covariance and hits" - self._memreport.apply(self._data) - - return - @function_timer def _get_rcond_mask(self, scan_pipe): - """Construct the noise covariance, hits, and condition number mask for - the solver. - """ + """Construct the condition number mask for the solver.""" if self.mc_mode: # The flags are already cached @@ -902,6 +844,7 @@ def _get_rcond_mask(self, scan_pipe): rcond_mask = self._data[self.solver_rcond_mask_name].data bad = rcond < self.solve_rcond_threshold n_bad = np.count_nonzero(bad) + print(f"RCOND MASK {n_bad} bad pixels") n_good = rcond.size - n_bad rcond_mask[bad] = 1 @@ -935,6 +878,7 @@ def _get_rhs(self): ) # Initialize the template matrix + self.template_matrix.reset() self.template_matrix.det_data = self.det_data self.template_matrix.det_data_units = self._det_data_units self.template_matrix.det_flags = self.solver_flags @@ -949,13 +893,26 @@ def _get_rhs(self): self.binning.det_flag_mask = 255 self.binning.det_data_units = self._det_data_units - # Set the binning operator to output to temporary map. This will be - # overwritten on each iteration of the solver. + # The binning operator (calling lower-level operators) will create the map + # domain quantities on the fly if they do not exist. + self.binning.binned = self.solver_bin self.binning.covariance = self.solver_cov_name + self.binning.rcond = self.solver_rcond_name + self.binning.hits = self.solver_hits_name + + write_hits = False + if self.solver_cov_name not in self._data: + # First time, we are going to create the covariance and hits + write_hits = True self.template_matrix.amplitudes = self.solver_rhs + print( + f"SolveAmplitudes _get_rhs cache_detdata: {self.binning.cache_detdata}", + flush=True, + ) + rhs_calc = SolverRHS( name=f"{self.name}_rhs", det_data=self.det_data, @@ -965,6 +922,17 @@ def _get_rhs(self): ) rhs_calc.apply(self._data, detectors=self._detectors) + print("RHS Writing...", flush=True) + for tmpl in self.template_matrix.templates: + tmpl_amps = self._data[self.solver_rhs][tmpl.name] + out_root = os.path.join(self.output_dir, f"RHS_{tmpl.name}") + tmpl.write(tmpl_amps, out_root) + + print(f"RHS rcond = {self._data[self.solver_rcond_name].data}") + + if write_hits: + self._write_del(self.solver_hits_name) + self._log.info_rank( f"{self._log_prefix} finished RHS calculation in", comm=self._comm, @@ -1039,9 +1007,6 @@ def _cleanup(self): self.template_matrix.det_flags = self._save_tmpl_flags self.template_matrix.det_flag_mask = self._save_tmpl_det_mask self.template_matrix.det_mask = self._save_tmpl_mask - # FIXME: this reset does not seem needed - # if not self.mc_mode: - # self.template_matrix.reset_templates() del self._solve_view @@ -1075,6 +1040,8 @@ def _exec(self, data, detectors=None, use_accel=None, **kwargs): ): return + print(f"DEBUG SolveAmplitudes {self.name} call _setup", flush=True) + self._setup(data, detectors, use_accel) self._memreport.prefix = "Start of amplitude solve" @@ -1084,15 +1051,15 @@ def _exec(self, data, detectors=None, use_accel=None, **kwargs): self._timer.start() + print(f"DEBUG SolveAmplitudes {self.name} call _prepare_flagging", flush=True) self._prepare_flagging(scan_pipe) - self._get_pixel_covariance(solve_pixels, solve_weights) - self._write_del(self.solver_hits_name) + print(f"DEBUG SolveAmplitudes {self.name} call _get_rhs", flush=True) + self._get_rhs() self._get_rcond_mask(scan_pipe) self._write_del(self.solver_rcond_mask_name) - self._get_rhs() self._solve_amplitudes() self._write_del(self.solver_cov_name) @@ -1139,127 +1106,3 @@ def _provides(self): ) prov["detdata"] = [self.solver_flags] return prov - - -@trait_docs -class ApplyAmplitudes(Operator): - """Project template amplitudes and do timestream arithmetic. - - This projects amplitudes to the time domain and then adds, subtracts, multiplies, - or divides the original timestream by this result. - - """ - - API = Int(0, help="Internal interface version for this operator") - - op = Unicode( - "subtract", - help="Operation on the timestreams: 'subtract', 'add', 'multiply', or 'divide'", - ) - - det_data = Unicode( - defaults.det_data, help="Observation detdata key for the timestream data" - ) - - amplitudes = Unicode(None, allow_none=True, help="Data key for input amplitudes") - - template_matrix = Instance( - klass=Operator, - allow_none=True, - help="This must be an instance of a template matrix operator", - ) - - output = Unicode( - None, - allow_none=True, - help="Name of output detdata. If None, overwrite input.", - ) - - report_memory = Bool(False, help="Report memory throughout the execution") - - @traitlets.validate("op") - def _check_op(self, proposal): - val = proposal["value"] - if val is not None: - if val not in ["add", "subtract", "multiply", "divide"]: - raise traitlets.TraitError("op must be one of the 4 allowed strings") - return val - - def __init__(self, **kwargs): - super().__init__(**kwargs) - self._initialized = False - - @function_timer - def _exec(self, data, detectors=None, use_accel=None, **kwargs): - log = Logger.get() - - # Check if we have any templates - if self.template_matrix is None: - return - - n_enabled_templates = 0 - for template in self.template_matrix.templates: - if template.enabled: - n_enabled_templates += 1 - - if n_enabled_templates == 0: - # Nothing to do! - return - - # Get the units used across the distributed data for our desired - # input detector data - det_data_units = data.detector_units(self.det_data) - - # Temporary location for single-detector projected template - # timestreams. - projected = f"{self.name}_temp" - - # Are we saving the resulting timestream to a new location? If so, - # create that now for all detectors. - - if self.output is not None: - # We just copy the input here, since it will be overwritten - Copy(detdata=[(self.det_data, self.output)]).apply( - data, use_accel=use_accel - ) - - # Projecting amplitudes to timestreams - self.template_matrix.transpose = False - self.template_matrix.det_data = projected - self.template_matrix.det_data_units = det_data_units - self.template_matrix.amplitudes = self.amplitudes - - # Arithmetic operator - combine = Combine(op=self.op, first=self.det_data, second=projected) - if self.output is None: - combine.result = self.det_data - else: - combine.result = self.output - - # Project and operate, one detector at a time - pipe = Pipeline( - detector_sets=["SINGLE"], - operators=[ - self.template_matrix, - combine, - ], - ) - pipe.apply(data, use_accel=use_accel) - - def _finalize(self, data, **kwargs): - return - - def _requires(self): - # This operator requires everything that its sub-operators need. - req = dict() - req["global"] = [self.amplitudes] - req["detdata"] = list() - if self.template_matrix is not None: - req.update(self.template_matrix.requires()) - req["detdata"].append(self.det_data) - return req - - def _provides(self): - prov = dict() - prov["detdata"] = [self.output] - return prov diff --git a/src/toast/ops/mapmaker_utils/mapmaker_utils.py b/src/toast/ops/mapmaker_utils/mapmaker_utils.py index 2dd4db71d..e1e48e097 100644 --- a/src/toast/ops/mapmaker_utils/mapmaker_utils.py +++ b/src/toast/ops/mapmaker_utils/mapmaker_utils.py @@ -706,6 +706,7 @@ def _exec(self, data, detectors=None, use_accel=None, **kwargs): if self.zmap in data: # We have an existing map from a previous call. Verify # the distribution and units + print(f"BuildNoiseWeighted: existing zmap: {data[self.zmap]}", flush=True) if data[self.zmap].distribution != dist: msg = "Existing zmap '{}' has different data distribution".format( self.zmap @@ -728,6 +729,7 @@ def _exec(self, data, detectors=None, use_accel=None, **kwargs): ) if len(dets) == 0: # Nothing to do for this observation + print(f"BuildNoiseWeighted: ob {ob.name} has no dets", flush=True) continue if self.weights in ob.detdata: if len(ob.detdata[self.weights].detector_shape) == 1: @@ -743,6 +745,7 @@ def _exec(self, data, detectors=None, use_accel=None, **kwargs): data[self.zmap] = PixelData( dist, np.float64, n_value=weight_nnz, units=zmap_units ) + print(f"BuildNoiseWeighted: created zmap: {data[self.zmap]}", flush=True) zmap = data[self.zmap] if use_accel: @@ -799,6 +802,7 @@ def _exec(self, data, detectors=None, use_accel=None, **kwargs): ) if len(dets) == 0: # Nothing to do for this observation + print(f"BuildNoiseWeighted: ob {ob.name} has no dets", flush=True) continue # Check that the noise model exists @@ -844,6 +848,12 @@ def _exec(self, data, detectors=None, use_accel=None, **kwargs): else: shared_flag_data = np.zeros(1, dtype=np.uint8) + # print(f"BuildNoiseWeighted: ob {ob.name} kernel:", flush=True) + # print(f"BuildNoiseWeighted: {ob.detdata[self.det_data]} {flag_data}", flush=True) + # print(f"BuildNoiseWeighted: {ob.detdata[self.pixels].data} {ob.detdata[self.weights].data}", flush=True) + + print(f"BuildNoiseWeighted: IN {np.count_nonzero(zmap.data)}", flush=True) + build_noise_weighted( zmap.distribution.global_submap_to_local, zmap.data, @@ -863,6 +873,9 @@ def _exec(self, data, detectors=None, use_accel=None, **kwargs): impl=implementation, use_accel=use_accel, ) + print(f"BuildNoiseWeighted: OUT {np.count_nonzero(zmap.data)}", flush=True) + print(f"BuildNoiseWeighted: zmap = {zmap.data[:, :]}", flush=True) + print(f"BuildNoiseWeighted: zmap nz = {np.count_nonzero(zmap.data[:, :])}", flush=True) # # DEBUGGING # restore_dev = False diff --git a/src/toast/ops/operator.py b/src/toast/ops/operator.py index 5faceb493..0d8ab873c 100644 --- a/src/toast/ops/operator.py +++ b/src/toast/ops/operator.py @@ -2,6 +2,7 @@ # All rights reserved. Use of this source code is governed by # a BSD-style license that can be found in the LICENSE file. +from ..data import obs_loader_name from ..timing import function_timer_stackskip from ..traits import TraitConfig from ..utils import Logger @@ -138,18 +139,24 @@ def load_exec(self, data, detectors=None, **kwargs): if self.enabled: for iobs, obs in enumerate(data.obs): unload = False - if hasattr(obs, "loader"): - obs.loader.load(obs) + if hasattr(obs, obs_loader_name): + msg = f"Loading detector data for {obs.name}" + log.debug_rank(msg, comm=obs.comm.comm_group) + obs.loader.load(obs, data=data) unload = True + else: + msg = f"No detector data loader used for {obs.name}" + log.debug_rank(msg, comm=obs.comm.comm_group) temp_data = data.select(obs_index=iobs) self.exec(temp_data, detectors=detectors, **kwargs) del temp_data if unload: - obs.loader.unload(obs) + msg = f"Unloading detector data for {obs.name}" + log.debug_rank(msg, comm=obs.comm.comm_group) + obs.loader.unload(obs, data=data) else: - if data.comm.world_rank == 0: - msg = f"Operator {self.name} is disabled, skipping call to load_exec()" - log.debug(msg) + msg = f"Operator {self.name} is disabled, skipping call to load_exec()" + log.debug_rank(msg, comm=data.comm.comm_world) @function_timer_stackskip def load_apply(self, data, detectors=None, **kwargs): diff --git a/src/toast/ops/pipeline.py b/src/toast/ops/pipeline.py index a4f942c35..c3d119d75 100644 --- a/src/toast/ops/pipeline.py +++ b/src/toast/ops/pipeline.py @@ -386,3 +386,37 @@ def __str__(self): Converts the pipeline into a human-readable string. """ return f"Pipeline{[op.__class__.__qualname__ for op in self.operators]}" + + +class PipelineLoader(object): + """Loader class that runs a Pipeline to generate data. + + This class can be used as an observation loader. It will apply the pipeline + to each specifed observation when the `load()` method is called. When the + `unload()` method is called, all detector data created by the pipeline will + be purged. Shared and metadata created by the pipeline will be left in + place. + + Args: + pipeline (Pipeline): The Pipeline instance to run on demand. + + """ + + def __init__(self, pipeline=None): + self._pipe = pipeline + self._det_purge = pipeline.provides()["detdata"] + + def load(self, obs, data=None): + if data is None: + msg = "PipelineLoader should be called with data argument" + raise RuntimeError(msg) + temp_data = data.select(obs_uid=obs.uid) + self._pipe.apply(temp_data) + del temp_data + + def unload(self, obs, data=None): + if data is None: + msg = "PipelineLoader should be called with data argument" + raise RuntimeError(msg) + for dd in self._det_purge: + del obs.detdata[dd] diff --git a/src/toast/ops/sim_ground.py b/src/toast/ops/sim_ground.py index dab9add61..556ab2398 100644 --- a/src/toast/ops/sim_ground.py +++ b/src/toast/ops/sim_ground.py @@ -46,7 +46,7 @@ from .azimuth_intervals import AzimuthRanges from .flag_intervals import FlagIntervals from .operator import Operator -from .pipeline import Pipeline +from .pipeline import Pipeline, PipelineLoader from .sim_ground_utils import ( add_solar_intervals, oscillate_el, @@ -84,7 +84,9 @@ class SimGround(Operator): ) telescope_file = Unicode( - None, allow_none=True, help="Path to HDF5 file containing an instrument group." + None, + allow_none=True, + help="Path to HDF5 file containing an instrument group." ) session_split_key = Unicode( @@ -338,6 +340,12 @@ class SimGround(Operator): help="Bit mask to raise elevation nod flags with", ) + load_pipe = Instance( + klass=Pipeline, + allow_none=True, + help="A Pipeline to add as a Loader to each observation" + ) + @traitlets.validate("telescope") def _check_telescope(self, proposal): tele = proposal["value"] @@ -457,10 +465,14 @@ def _exec(self, data, detectors=None, **kwargs): self.telescope = comm.comm_world.bcast(self.telescope, root=0) if self.schedule is None: - schedule = GroundSchedule() - schedule.read(self.schedule_file, comm=comm.comm_world) - if self.sort_schedule_file: - schedule.sort_by_RA() + schedule = None + if comm.world_rank == 0: + schedule = GroundSchedule() + schedule.read(self.schedule_file, comm=comm.comm_world) + if self.sort_schedule_file: + schedule.sort_by_RA() + if comm.comm_world is not None: + schedule = comm.comm_world.bcast(schedule, root=0) else: schedule = self.schedule @@ -474,6 +486,13 @@ def _exec(self, data, detectors=None, **kwargs): if len(schedule.scans) == 0: raise RuntimeError("Schedule has no scans!") + if self.load_pipe is not None and ( + self.det_data is not None or self.det_flags is not None + ): + msg = "If load_pipe is specified, the detector data and flags should be " + msg += "specified by that operator, not with det_data and det_flags." + raise RuntimeError(msg) + # Get per-observation telescopes obs_tele = self._obs_telescopes(data, det_ranks, detectors) @@ -703,6 +722,9 @@ def _exec(self, data, detectors=None, **kwargs): dtype=np.uint8, ) + if self.load_pipe is not None: + ob.loader = PipelineLoader(pipeline=self.load_pipe) + # Only the first rank of the process grid columns sets / computes these. if sname != last_session: diff --git a/src/toast/ops/sim_satellite.py b/src/toast/ops/sim_satellite.py index ab02879d0..d4471c175 100644 --- a/src/toast/ops/sim_satellite.py +++ b/src/toast/ops/sim_satellite.py @@ -22,7 +22,7 @@ from ..traits import Bool, Float, Instance, Int, Quantity, Unicode, Unit, trait_docs from ..utils import Environment, Logger, name_UID, rate_from_times from .operator import Operator -from .pipeline import Pipeline +from .pipeline import Pipeline, PipelineLoader from .sim_hwp import simulate_hwp_response @@ -210,7 +210,9 @@ class SimSatellite(Operator): ) telescope_file = Unicode( - None, allow_none=True, help="Path to HDF5 file containing an instrument group." + None, + allow_none=True, + help="Path to HDF5 file containing an instrument group." ) schedule = Instance( @@ -220,7 +222,7 @@ class SimSatellite(Operator): schedule_file = Unicode( None, allow_none=True, - help="satellite-based observing schedule file", + help="Satellite-based observing schedule file", ) spin_angle = Quantity( @@ -294,6 +296,12 @@ class SimSatellite(Operator): help="Observation detdata key for flags to initialize", ) + load_pipe = Instance( + klass=Pipeline, + allow_none=True, + help="A Pipeline to add as a Loader to each observation", + ) + @traitlets.validate("coord") def _check_coord(self, proposal): check = proposal["value"] @@ -362,11 +370,22 @@ def _exec(self, data, detectors=None, **kwargs): self.telescope = data.comm.comm_world.bcast(self.telescope, root=0) if self.schedule is None: - schedule = SatelliteSchedule() - schedule.read(self.schedule_file, comm=data.comm.comm_world) + schedule = None + if data.comm.world_rank == 0: + schedule = SatelliteSchedule() + schedule.read(self.schedule_file, comm=data.comm.comm_world) + if data.comm.comm_world is not None: + schedule = data.comm.comm_world.bcast(schedule, root=0) else: schedule = self.schedule + if self.load_pipe is not None and ( + self.det_data is not None or self.det_flags is not None + ): + msg = "If load_pipe is specified, the detector data and flags should be " + msg += "specified by that operator, not with det_data and det_flags." + raise RuntimeError(msg) + focalplane = self.telescope.focalplane rate = focalplane.sample_rate.to_value(u.Hz) site = self.telescope.site @@ -589,6 +608,9 @@ def _exec(self, data, detectors=None, **kwargs): self.det_flags, dtype=np.uint8, detectors=dets ) + if self.load_pipe is not None: + ob.loader = PipelineLoader(pipeline=self.load_pipe) + data.obs.append(ob) return diff --git a/src/toast/pixels.py b/src/toast/pixels.py index 6346f3cae..7dbb1d751 100644 --- a/src/toast/pixels.py +++ b/src/toast/pixels.py @@ -332,7 +332,6 @@ def alltoallv_info(self): return self._alltoallv_info owners = self.submap_owners - our_submaps = self.owned_submaps send_counts = None send_displ = None diff --git a/src/toast/templates/fourier2d.py b/src/toast/templates/fourier2d.py index ce0f9b6a8..5a6326189 100644 --- a/src/toast/templates/fourier2d.py +++ b/src/toast/templates/fourier2d.py @@ -137,8 +137,8 @@ def _initialize(self, new_data): # Build up detector list self._obs_dets[iob] = set() for d in ob.select_local_detectors(flagmask=self.det_mask): - if d not in ob.detdata[self.det_data].detectors: - continue + # if d not in ob.detdata[self.det_data].detectors: + # continue self._obs_dets[iob].add(d) if d not in all_dets: all_dets[d] = None diff --git a/src/toast/templates/gaintemplate.py b/src/toast/templates/gaintemplate.py index 19e558f3b..845140233 100644 --- a/src/toast/templates/gaintemplate.py +++ b/src/toast/templates/gaintemplate.py @@ -73,8 +73,8 @@ def _initialize(self, new_data): for iob, ob in enumerate(new_data.obs): self._obs_dets[iob] = set() for d in ob.select_local_detectors(flagmask=self.det_mask): - if d not in ob.detdata[self.det_data].detectors: - continue + # if d not in ob.detdata[self.det_data].detectors: + # continue self._obs_dets[iob].add(d) if d not in all_dets: all_dets[d] = None diff --git a/src/toast/templates/offset/kernels_numpy.py b/src/toast/templates/offset/kernels_numpy.py index 1809e8b62..5605aceff 100644 --- a/src/toast/templates/offset/kernels_numpy.py +++ b/src/toast/templates/offset/kernels_numpy.py @@ -17,7 +17,7 @@ def offset_add_to_signal_numpy( data_index, det_data, intervals, - use_accel, + use_accel=False, ): """ Accumulate offset amplitudes to timestream data. @@ -61,7 +61,7 @@ def offset_project_signal_numpy( amplitudes, amplitude_flags, intervals, - use_accel, + use_accel=False, ): """ Accumulate timestream data into offset amplitudes. @@ -109,7 +109,7 @@ def offset_project_signal_numpy( @kernel(impl=ImplementationType.NUMPY, name="offset_apply_diag_precond") def offset_apply_diag_precond_numpy( - offset_var, amplitudes_in, amplitude_flags, amplitudes_out, use_accel + offset_var, amplitudes_in, amplitude_flags, amplitudes_out, use_accel=False ): """ Args: diff --git a/src/toast/templates/offset/offset.py b/src/toast/templates/offset/offset.py index 12a92538b..a93ac257c 100644 --- a/src/toast/templates/offset/offset.py +++ b/src/toast/templates/offset/offset.py @@ -124,10 +124,6 @@ def _initialize(self, new_data): # Units for inverse variance weighting detnoise_units = 1.0 / self.det_data_units**2 - # Use this as an "Ordered Set". We want the unique detectors on this process, - # but sorted in order of occurrence. - all_dets = OrderedDict() - # Amplitude lengths of all views for each obs self._obs_views = dict() @@ -140,30 +136,25 @@ def _initialize(self, new_data): # Good detectors to use for each observation self._obs_dets = dict() - for iob, ob in enumerate(new_data.obs): + for ob in new_data.obs: # Compute sample rate from timestamps - (rate, dt, dt_min, dt_max, dt_std) = rate_from_times(ob.shared[self.times]) - self._obs_rate[iob] = rate + rate, _, _, _, _ = rate_from_times(ob.shared[self.times]) + self._obs_rate[ob.uid] = rate # The step length for this observation step_length = self._step_length( - self.step_time.to_value(u.second), self._obs_rate[iob] + self.step_time.to_value(u.second), self._obs_rate[ob.uid] ) - # Track number of offset amplitudes per view, per det. + # Track number of offset amplitudes per interval. ob_views = list() - for view_slice in ob.view[self.view]: - view_len = None - if view_slice.start < 0: - # This is a view of the whole obs - view_len = ob.n_local_samples - else: - view_len = view_slice.stop - view_slice.start + for vw in ob.intervals[self.view]: + view_len = vw.last - vw.first view_n_amp = view_len // step_length if view_n_amp * step_length < view_len: view_n_amp += 1 ob_views.append(view_n_amp) - self._obs_views[iob] = np.array(ob_views, dtype=np.int64) + self._obs_views[ob.uid] = np.array(ob_views, dtype=np.int64) # The noise model. if self.noise_model is not None: @@ -180,33 +171,25 @@ def _initialize(self, new_data): tbase = self.step_time.to_value(u.second) powmin = np.floor(np.log10(1 / obstime)) - 1 powmax = min( - np.ceil(np.log10(1 / tbase)) + 2, np.log10(self._obs_rate[iob]) + np.ceil(np.log10(1 / tbase)) + 2, np.log10(self._obs_rate[ob.uid]) ) - self._freq[iob] = np.logspace(powmin, powmax, 1000) + self._freq[ob.uid] = np.logspace(powmin, powmax, 1000) # Build up detector list - self._obs_dets[iob] = set() + self._obs_dets[ob.uid] = list() for d in ob.select_local_detectors(flagmask=self.det_mask): - if d not in ob.detdata[self.det_data].detectors: - continue - self._obs_dets[iob].add(d) - if d not in all_dets: - all_dets[d] = None - - self._all_dets = list(all_dets.keys()) + self._obs_dets[ob.uid].append(d) - # Go through the data one local detector at a time and compute the offsets - # into the amplitudes. + # Go through the data and compute the offsets into the amplitudes for each + # observation and detector. self._det_start = dict() - offset = 0 - for det in self._all_dets: - self._det_start[det] = offset - for iob, ob in enumerate(new_data.obs): - if det not in self._obs_dets[iob]: - continue - offset += np.sum(self._obs_views[iob]) + for ob in new_data.obs: + self._det_start[ob.uid] = dict() + for det in self._obs_dets[ob.uid]: + self._det_start[ob.uid][det] = offset + offset += np.sum(self._obs_views[ob.uid]) # Now we know the total number of amplitudes. @@ -237,11 +220,8 @@ def _initialize(self, new_data): self._offsetvar = self._offsetvar_raw.array() offset = 0 - for det in self._all_dets: - for iob, ob in enumerate(new_data.obs): - if det not in self._obs_dets[iob]: - continue - + for ob in new_data.obs: + for det in self._obs_dets[ob.uid]: # "Noise weight" (time-domain inverse variance) detnoise = 1.0 if self.noise_model is not None: @@ -253,19 +233,13 @@ def _initialize(self, new_data): # The step length for this observation step_length = self._step_length( - self.step_time.to_value(u.second), self._obs_rate[iob] + self.step_time.to_value(u.second), self._obs_rate[ob.uid] ) # Loop over views - views = ob.view[self.view] - for ivw, vw in enumerate(views): - view_samples = None - if vw.start < 0: - # This is a view of the whole obs - view_samples = ob.n_local_samples - else: - view_samples = vw.stop - vw.start - n_amp_view = self._obs_views[iob][ivw] + for ivw, vw in enumerate(ob.intervals[self.view]): + view_samples = vw.last - vw.first + n_amp_view = self._obs_views[ob.uid][ivw] # Move this loop to compiled code if it is slow... # Note: we are building the offset amplitude *variance*, which is @@ -287,14 +261,14 @@ def _initialize(self, new_data): ) voff += step_length else: - flags = views.detdata[self.det_flags][ivw] - voff = 0 + flags = ob.detdata[self.det_flags][det] + voff = vw.first for amp in range(n_amp_view): amplen = step_length if amp == n_amp_view - 1: amplen = view_samples - voff n_good = amplen - np.count_nonzero( - flags[det][voff : voff + amplen] + flags[voff : voff + amplen] & self.det_flag_mask ) if (n_good / amplen) <= self.good_fraction: @@ -306,7 +280,7 @@ def _initialize(self, new_data): self._offsetvar[offset + amp] = 1.0 / ( detnoise * n_good ) - voff += step_length + voff += amplen offset += n_amp_view # Compute the amplitude noise filter and preconditioner for each detector @@ -321,20 +295,21 @@ def _initialize(self, new_data): if self.use_noise_prior: offset = 0 - for det in self._all_dets: - for iob, ob in enumerate(new_data.obs): - if det not in self._obs_dets[iob]: - continue - if iob not in self._filters: - self._filters[iob] = dict() - self._precond[iob] = dict() + for ob in new_data.obs: + for det in self._obs_dets[ob.uid]: + if ob.uid not in self._filters: + self._filters[ob.uid] = dict() + self._precond[ob.uid] = dict() offset_psd = self._get_offset_psd( ob[self.noise_model], - self._freq[iob], + self._freq[ob.uid], self.step_time.to_value(u.second), det, ) + print(f"OFFSET PSD {ob.name}:{det} = ") + for of, op in zip(self._freq[ob.uid], offset_psd): + print(f" {of} {op}", flush=True) if self.debug_plots is not None: set_matplotlib_backend() @@ -371,9 +346,9 @@ def _initialize(self, new_data): ax = fig.add_subplot(2, 1, 2) ax.loglog( - self._freq[iob], + self._freq[ob.uid], offset_psd, - label=f"Offset PSD", + label="Offset PSD", ) ax.set_xlabel("Frequency [Hz]") ax.set_ylabel("PSD [K$^2$ / Hz]") @@ -389,15 +364,15 @@ def _initialize(self, new_data): ) # Log version of offset PSD and its inverse for interpolation - logfreq = np.log(self._freq[iob]) + logfreq = np.log(self._freq[ob.uid]) logpsd = np.log(offset_psd) logfilter = np.log(1.0 / offset_psd) # Compute the list of filters and preconditioners (one per view) # For this detector. - self._filters[iob][det] = list() - self._precond[iob][det] = list() + self._filters[ob.uid][det] = list() + self._precond[ob.uid][det] = list() if self.debug_plots is not None: ffilter = os.path.join( @@ -412,15 +387,9 @@ def _initialize(self, new_data): axprec = figprec.add_subplot(1, 1, 1) # Loop over views - views = ob.view[self.view] - for ivw, vw in enumerate(views): - view_samples = None - if vw.start < 0: - # This is a view of the whole obs - view_samples = ob.n_local_samples - else: - view_samples = vw.stop - vw.start - n_amp_view = self._obs_views[iob][ivw] + for ivw, vw in enumerate(ob.intervals[self.view]): + view_samples = vw.last - vw.first + n_amp_view = self._obs_views[ob.uid][ivw] offsetvar_slice = self._offsetvar[offset : offset + n_amp_view] filterlen = 2 @@ -444,7 +413,7 @@ def _initialize(self, new_data): ) ) - self._filters[iob][det].append(noisefilter) + self._filters[ob.uid][det].append(noisefilter) if self.debug_plots is not None: axfilter.plot( @@ -470,6 +439,7 @@ def _initialize(self, new_data): ) icenter = preconditioner.size // 2 if detnoise != 0: + print(f"OFFSET PREC {ob.name}:{det} center: {preconditioner[icenter]} += {1/detnoise}", flush=True) preconditioner[icenter] += 1.0 / detnoise if self.debug_plots is not None: axprec.plot( @@ -514,7 +484,7 @@ def _initialize(self, new_data): preconditioner, label=f"Banded preconditioner {ivw}", ) - self._precond[iob][det].append((preconditioner, lower)) + self._precond[ob.uid][det].append((preconditioner, lower)) offset += n_amp_view if self.debug_plots is not None: @@ -597,7 +567,6 @@ def _get_offset_psd(self, noise, freq, step_time, det): """Compute the PSD of the baseline offsets.""" psdfreq = noise.freq(det).to_value(u.Hz) psd = noise.psd(det).to_value(self.det_data_units**2 * u.second) - rate = noise.rate(det).to_value(u.Hz) # Remove the white noise component from the PSD psd = self._remove_white_noise(psdfreq, psd) @@ -645,9 +614,6 @@ def g(f, m): offset_psd *= fbase return offset_psd - def _detectors(self): - return self._all_dets - def _zeros(self): z = Amplitudes(self.data.comm, self._n_global, self._n_local) if z.local_flags is not None: @@ -658,60 +624,24 @@ def _step_length(self, stime, rate): return int(stime * rate + 0.5) @function_timer - def _add_to_signal(self, detector, amplitudes, use_accel=None, **kwargs): - log = Logger.get() - - if detector not in self._all_dets: - # This must have been cut by per-detector flags during initialization - return - + def _add_to_signal(self, obs, detectors, amplitudes, use_accel=None, **kwargs): # Kernel selection implementation, use_accel = self.select_kernels(use_accel=use_accel) - amp_offset = self._det_start[detector] - for iob, ob in enumerate(self.data.obs): - if detector not in self._obs_dets[iob]: + for det in detectors: + if det not in self._det_start[obs.uid]: continue - det_indx = ob.detdata[self.det_data].indices([detector]) + amp_offset = self._det_start[obs.uid][det] + det_indx = obs.detdata[self.det_data].indices([det]) # The step length for this observation step_length = self._step_length( - self.step_time.to_value(u.second), self._obs_rate[iob] + self.step_time.to_value(u.second), self._obs_rate[obs.uid] ) # The number of amplitudes in each view - n_amp_views = self._obs_views[iob] - - # # DEBUGGING - # restore_dev = False - # prefix = "HOST" - # if amplitudes.accel_in_use(): - # amplitudes.accel_update_host() - # restore_dev = True - # prefix = "DEVICE" - # print( - # f"{prefix} Add to signal input: {amp_offset}, {n_amp_views}, {amplitudes.local}", - # flush=True, - # ) - # if restore_dev: - # amplitudes.accel_update_device() - - # # DEBUGGING - # restore_dev = False - # prefix = "HOST" - # if ob.detdata[self.det_data].accel_in_use(): - # ob.detdata[self.det_data].accel_update_host() - # restore_dev = True - # prefix = "DEVICE" - # tod_min = np.amin(ob.detdata[self.det_data]) - # tod_max = np.amax(ob.detdata[self.det_data]) - # print( - # f"{prefix} Add to signal starting TOD output: {ob.detdata[self.det_data]}, min={tod_min}, max={tod_max}", - # flush=True, - # ) - # if (np.absolute(tod_min) < 1.0e-15) and (np.absolute(tod_max) < 1.0e-15): - # ob.detdata[self.det_data][:] = 0 - # if restore_dev: - # ob.detdata[self.det_data].accel_update_device() + n_amp_views = self._obs_views[obs.uid] + + # FIXME: come back to this and make this work with ALL dets. offset_add_to_signal( step_length, @@ -720,72 +650,44 @@ def _add_to_signal(self, detector, amplitudes, use_accel=None, **kwargs): amplitudes.local, amplitudes.local_flags, det_indx[0], - ob.detdata[self.det_data].data, - ob.intervals[self.view].data, + obs.detdata[self.det_data].data, + obs.intervals[self.view].data, impl=implementation, use_accel=use_accel, ) - # # DEBUGGING - # restore_dev = False - # prefix = "HOST" - # if ob.detdata[self.det_data].accel_in_use(): - # ob.detdata[self.det_data].accel_update_host() - # restore_dev = True - # prefix = "DEVICE" - # print( - # f"{prefix} Add to signal output: {ob.detdata[self.det_data]}", - # flush=True, - # ) - # if restore_dev: - # ob.detdata[self.det_data].accel_update_device() - amp_offset += np.sum(n_amp_views) @function_timer - def _project_signal(self, detector, amplitudes, use_accel=None, **kwargs): - log = Logger.get() - - if detector not in self._all_dets: - # This must have been cut by per-detector flags during initialization - return - + def _project_signal(self, obs, detectors, amplitudes, use_accel=None, **kwargs): # Kernel selection implementation, use_accel = self.select_kernels(use_accel=use_accel) - amp_offset = self._det_start[detector] - for iob, ob in enumerate(self.data.obs): - if detector not in self._obs_dets[iob]: + for det in detectors: + if det not in self._det_start[obs.uid]: continue - det_indx = ob.detdata[self.det_data].indices([detector]) + amp_offset = self._det_start[obs.uid][det] + det_indx = obs.detdata[self.det_data].indices([det]) + if self.det_flags is not None: - flag_indx = ob.detdata[self.det_flags].indices([detector]) - flag_data = ob.detdata[self.det_flags].data + flag_indx = obs.detdata[self.det_flags].indices([det]) + flag_data = obs.detdata[self.det_flags].data else: flag_indx = np.array([-1], dtype=np.int32) flag_data = np.zeros(1, dtype=np.uint8) # The step length for this observation step_length = self._step_length( - self.step_time.to_value(u.second), self._obs_rate[iob] + self.step_time.to_value(u.second), self._obs_rate[obs.uid] ) # The number of amplitudes in each view - n_amp_views = self._obs_views[iob] - - # # DEBUGGING - # restore_dev = False - # prefix="HOST" - # if ob.detdata[self.det_data].accel_in_use(): - # ob.detdata[self.det_data].accel_update_host() - # restore_dev = True - # prefix="DEVICE" - # print(f"{prefix} Project signal input: {ob.detdata[self.det_data]}", flush=True) - # if restore_dev: - # ob.detdata[self.det_data].accel_update_device() + n_amp_views = self._obs_views[obs.uid] + + # FIXME: come back to this and make this work with ALL dets. offset_project_signal( det_indx[0], - ob.detdata[self.det_data].data, + obs.detdata[self.det_data].data, flag_indx[0], flag_data, self.det_flag_mask, @@ -794,21 +696,11 @@ def _project_signal(self, detector, amplitudes, use_accel=None, **kwargs): n_amp_views, amplitudes.local, amplitudes.local_flags, - ob.intervals[self.view].data, + obs.intervals[self.view].data, impl=implementation, use_accel=use_accel, ) - # restore_dev = False - # prefix="HOST" - # if amplitudes.accel_in_use(): - # amplitudes.accel_update_host() - # restore_dev = True - # prefix="DEVICE" - # print(f"{prefix} Project signal output: {amp_offset}, {n_amp_views}, {amplitudes.local}", flush=True) - # if restore_dev: - # amplitudes.accel_update_device() - amp_offset += np.sum(n_amp_views) @function_timer @@ -824,29 +716,33 @@ def _add_prior(self, amplitudes_in, amplitudes_out, use_accel=None, **kwargs): set_matplotlib_backend() import matplotlib.pyplot as plt - for det in self._all_dets: - offset = self._det_start[det] - for iob, ob in enumerate(self.data.obs): - if det not in self._obs_dets[iob]: - continue - for ivw, vw in enumerate(ob.view[self.view].detdata[self.det_data]): - n_amp_view = self._obs_views[iob][ivw] + for ob in self.data.obs: + for det in self._obs_dets[ob.uid]: + offset = self._det_start[ob.uid][det] + + for ivw, vw in enumerate(ob.intervals[self.view]): + n_amp_view = self._obs_views[ob.uid][ivw] amp_slice = slice(offset, offset + n_amp_view, 1) amps_in = amplitudes_in.local[amp_slice] amp_flags_in = amplitudes_in.local_flags[amp_slice] amps_out = amplitudes_out.local[amp_slice] - if det in self._filters[iob]: + if det in self._filters[ob.uid]: # There is some contribution from this detector - amps_out[:] += scipy.signal.convolve( + print(f"OFFSET PRIOR {ob.name}:{det}:{ivw} IN = {amps_in}", flush=True) + contrib = scipy.signal.convolve( amps_in, - self._filters[iob][det][ivw], + self._filters[ob.uid][det][ivw], mode="same", method="direct", ) + print(f"OFFSET PRIOR {ob.name}:{det}:{ivw} OUT = {contrib}", flush=True) + + amps_out[:] += contrib if self.debug_plots is not None: # Find the first unused file name in the sequence iter = -1 + fname = None while iter < 0 or os.path.isfile(fname): iter += 1 fname = os.path.join( @@ -896,28 +792,20 @@ def _apply_precond(self, amplitudes_in, amplitudes_out, use_accel=None, **kwargs ) # Our design matrix includes a term with the inverse offset covariance. # This means that our preconditioner should include this term as well. - for det in self._all_dets: - offset = self._det_start[det] - for iob, ob in enumerate(self.data.obs): - if det not in self._obs_dets[iob]: - continue + for ob in self.data.obs: + for det in self._obs_dets[ob.uid]: + offset = self._det_start[ob.uid][det] + # Loop over views - views = ob.view[self.view] - for ivw, vw in enumerate(views): - view_samples = None - if vw.start < 0: - # This is a view of the whole obs - view_samples = ob.n_local_samples - else: - view_samples = vw.stop - vw.start + for ivw, vw in enumerate(ob.intervals[self.view]): + n_amp_view = self._obs_views[ob.uid][ivw] - n_amp_view = self._obs_views[iob][ivw] amp_slice = slice(offset, offset + n_amp_view, 1) amps_in = amplitudes_in.local[amp_slice] amp_flags_in = amplitudes_in.local_flags[amp_slice] amps_out = None - if det in self._precond[iob]: + if det in self._precond[ob.uid]: # We have a contribution from this detector if self.precond_width <= 1: # We are using a Toeplitz preconditioner. @@ -925,15 +813,17 @@ def _apply_precond(self, amplitudes_in, amplitudes_out, use_accel=None, **kwargs # `fftconvolve` depending on the size of the inputs amps_out = scipy.signal.convolve( amps_in, - self._precond[iob][det][ivw][0], + self._precond[ob.uid][det][ivw][0], mode="same", + method="direct", ) + print(f"OFFSET APPLY_PREC {ob.name}:{det}:{ivw}: {amps_in} X {self._precond[ob.uid][det][ivw][0]} -> {amps_out}", flush=True) else: # Use pre-computed Cholesky decomposition. Note that this # is the decomposition of the actual preconditioner (not # its inverse), since we are solving Mx=b. amps_out = scipy.linalg.cho_solve_banded( - self._precond[iob][det][ivw], + self._precond[ob.uid][det][ivw], amps_in, overwrite_b=False, check_finite=True, @@ -996,18 +886,17 @@ def write(self, amplitudes, out): # Copy of the amplitudes, organized by observation and detector obs_det_amps = dict() - for det in self._all_dets: - amp_offset = self._det_start[det] - for iob, ob in enumerate(self.data.obs): - if det not in self._obs_dets[iob]: - continue + for ob in self.data.obs: + for det in self._obs_dets[ob.uid]: + amp_offset = self._det_start[ob.uid][det] + if not ob.is_distributed_by_detector: raise NotImplementedError( "Only observations distributed by detector are supported" ) # The step length for this observation step_length = self._step_length( - self.step_time.to_value(u.second), self._obs_rate[iob] + self.step_time.to_value(u.second), self._obs_rate[ob.uid] ) if ob.name not in obs_det_amps: @@ -1021,7 +910,7 @@ def write(self, amplitudes, out): amp_start = list() amp_stop = list() for ivw, vw in enumerate(ob.intervals[self.view]): - n_amp_view = self._obs_views[iob][ivw] + n_amp_view = self._obs_views[ob.uid][ivw] for istep in range(n_amp_view): istart = vw.first + istep * step_length amp_first.append(istart) @@ -1043,7 +932,7 @@ def write(self, amplitudes, out): det_flags = list() views = ob.view[self.view] for ivw, vw in enumerate(views): - n_amp_view = self._obs_views[iob][ivw] + n_amp_view = self._obs_views[ob.uid][ivw] amp_slice = slice(amp_offset, amp_offset + n_amp_view, 1) det_amps.append(amplitudes.local[amp_slice]) det_flags.append(amplitudes.local_flags[amp_slice]) @@ -1062,7 +951,7 @@ def write(self, amplitudes, out): # them in time rather than just extracting detector data and writing # to the datasets. - for iob, ob in enumerate(self.data.obs): + for ob in self.data.obs: obs_local_amps = obs_det_amps[ob.name] if self.data.comm.group_size == 1: all_obs_amps = [obs_local_amps] @@ -1106,6 +995,7 @@ def write(self, amplitudes, out): if k == "bounds": continue row = det_to_row[k] + print(f"OFFSET {ob.name}:{k} = {v['amps']}", flush=True) hslice = (slice(row, row + 1, 1), slice(0, n_amp, 1)) dslice = (slice(0, n_amp, 1),) hamps.write_direct(v["amps"], dslice, hslice) diff --git a/src/toast/templates/periodic.py b/src/toast/templates/periodic.py index 09a3749b2..f42cb3fe3 100644 --- a/src/toast/templates/periodic.py +++ b/src/toast/templates/periodic.py @@ -162,8 +162,8 @@ def _initialize(self, new_data): # Build up detector list self._obs_dets[iob] = set() for d in ob.select_local_detectors(flagmask=self.det_mask): - if d not in ob.detdata[self.det_data].detectors: - continue + # if d not in ob.detdata[self.det_data].detectors: + # continue self._obs_dets[iob].add(d) if d not in all_dets: all_dets[d] = None @@ -172,7 +172,7 @@ def _initialize(self, new_data): if total_bins == 0: msg = f"Template {self.name} process group {new_data.comm.group}" - msg += f" has zero amplitude bins- change the binning size." + msg += " has zero amplitude bins- change the binning size." raise RuntimeError(msg) # During application of the template, we will be looping over detectors @@ -241,7 +241,6 @@ def _initialize(self, new_data): if self.key not in ob.shared: continue nbins = self._obs_nbins[iob] - det_indx = ob.detdata[self.det_data].indices([det])[0] amp_hits = self._amp_hits[amp_offset : amp_offset + nbins] amp_flags = self._amp_flags[amp_offset : amp_offset + nbins] if self.det_flags is not None: @@ -255,7 +254,6 @@ def _initialize(self, new_data): else: vw_data = ob.shared[self.key].data[vw_slc] good, amp_indx = self._view_flags_and_index( - det_indx, iob, ob, vw, @@ -282,7 +280,7 @@ def _zeros(self): return z def _view_flags_and_index( - self, det_indx, ob_indx, ob, view, flag_indx=None, det_flags=False + self, ob_indx, ob, view, flag_indx=None, det_flags=False ): """Get the flags and amplitude indices for one detector and view.""" vw_slc = slice(view.first, view.last, 1) @@ -339,7 +337,6 @@ def _add_to_signal(self, detector, amplitudes, **kwargs): for vw in ob.intervals[self.view].data: vw_slc = slice(vw.first, vw.last, 1) good, amp_indx = self._view_flags_and_index( - det_indx, iob, ob, vw, @@ -374,7 +371,6 @@ def _project_signal(self, detector, amplitudes, **kwargs): for vw in ob.intervals[self.view].data: vw_slc = slice(vw.first, vw.last, 1) good, amp_indx = self._view_flags_and_index( - det_indx, iob, ob, vw, diff --git a/src/toast/templates/subharmonic.py b/src/toast/templates/subharmonic.py index 84bdf918b..caa5d3702 100644 --- a/src/toast/templates/subharmonic.py +++ b/src/toast/templates/subharmonic.py @@ -66,8 +66,8 @@ def _initialize(self, new_data): for iob, ob in enumerate(new_data.obs): self._obs_dets[iob] = set() for d in ob.select_local_detectors(flagmask=self.det_mask): - if d not in ob.detdata[self.det_data].detectors: - continue + # if d not in ob.detdata[self.det_data].detectors: + # continue self._obs_dets[iob].add(d) if d not in all_dets: all_dets[d] = None diff --git a/src/toast/templates/template.py b/src/toast/templates/template.py index 4f5a7d989..1681909f7 100644 --- a/src/toast/templates/template.py +++ b/src/toast/templates/template.py @@ -121,22 +121,22 @@ def _check_enabled(self, use_accel=None): log.debug(msg) return False - def _detectors(self): - # Derived classes should return the list of detectors they support. - raise NotImplementedError("Derived class must implement _detectors()") + # def _detectors(self): + # # Derived classes should return the list of detectors they support. + # raise NotImplementedError("Derived class must implement _detectors()") - def detectors(self): - """Return a list of detectors supported by the template. + # def detectors(self): + # """Return a list of detectors supported by the template. - This list will change whenever the `data` trait is set, which initializes - the template. + # This list will change whenever the `data` trait is set, which initializes + # the template. - Returns: - (list): The detectors with local amplitudes across all observations. + # Returns: + # (list): The detectors with local amplitudes across all observations. - """ - if self._check_enabled(): - return self._detectors() + # """ + # if self._check_enabled(): + # return self._detectors() def _zeros(self): raise NotImplementedError("Derived class must implement _zeros()") @@ -155,11 +155,11 @@ def zeros(self): if self._check_enabled(): return self._zeros() - def _add_to_signal(self, detector, amplitudes, **kwargs): + def _add_to_signal(self, obs, detectors, amplitudes, **kwargs): raise NotImplementedError("Derived class must implement _add_to_signal()") @function_timer_stackskip - def add_to_signal(self, detector, amplitudes, use_accel=None, **kwargs): + def add_to_signal(self, obs, detectors, amplitudes, use_accel=None, **kwargs): """Accumulate the projected amplitudes to a timestream. This performs the operation: @@ -170,7 +170,8 @@ def add_to_signal(self, detector, amplitudes, use_accel=None, **kwargs): Where `s` is the det_data signal, `F` is the template and `a` is the amplitudes. Args: - detector (str): The detector name. + obs (Observation): The observation to process. + detectors (list): The list of detectors to process. amplitudes (Amplitudes): The Amplitude values for this template. Returns: @@ -178,13 +179,15 @@ def add_to_signal(self, detector, amplitudes, use_accel=None, **kwargs): """ if self._check_enabled(use_accel=use_accel): - self._add_to_signal(detector, amplitudes, use_accel=use_accel, **kwargs) + self._add_to_signal( + obs, detectors, amplitudes, use_accel=use_accel, **kwargs + ) - def _project_signal(self, detector, amplitudes, **kwargs): + def _project_signal(self, obs, detectors, amplitudes, **kwargs): raise NotImplementedError("Derived class must implement _project_signal()") @function_timer_stackskip - def project_signal(self, detector, amplitudes, use_accel=None, **kwargs): + def project_signal(self, obs, detectors, amplitudes, use_accel=None, **kwargs): """Project a timestream into template amplitudes. This performs: @@ -195,7 +198,8 @@ def project_signal(self, detector, amplitudes, use_accel=None, **kwargs): Where `s` is the det_data signal, `F` is the template and `a` is the amplitudes. Args: - detector (str): The detector name. + obs (Observation): The observation to process. + detectors (list): The list of detectors to process. amplitudes (Amplitudes): The Amplitude values for this template. Returns: @@ -203,7 +207,9 @@ def project_signal(self, detector, amplitudes, use_accel=None, **kwargs): """ if self._check_enabled(use_accel=use_accel): - self._project_signal(detector, amplitudes, use_accel=use_accel, **kwargs) + self._project_signal( + obs, detectors, amplitudes, use_accel=use_accel, **kwargs + ) def _add_prior(self, amplitudes_in, amplitudes_out, **kwargs): # Not all Templates implement the prior diff --git a/src/toast/tests/CMakeLists.txt b/src/toast/tests/CMakeLists.txt index 747b87758..d8c2a1a79 100644 --- a/src/toast/tests/CMakeLists.txt +++ b/src/toast/tests/CMakeLists.txt @@ -23,6 +23,7 @@ install(FILES pixels_wcs.py weather.py noise.py + ops_accum_obs.py ops_sim_satellite.py ops_sim_ground.py ops_sim_tod_noise.py diff --git a/src/toast/tests/helpers/ground.py b/src/toast/tests/helpers/ground.py index 6e42e06ed..d812b9a32 100644 --- a/src/toast/tests/helpers/ground.py +++ b/src/toast/tests/helpers/ground.py @@ -94,6 +94,8 @@ def create_ground_data( single_group=False, flagged_pixels=True, schedule_hours=2, + no_det_data=False, + no_det_flags=False, ): """Create a data object with a simple ground sim. @@ -192,6 +194,10 @@ def create_ground_data( sim_ground.turnaround_mask = 1 + 2 else: sim_ground.turnaround_mask = 2 + if no_det_data: + sim_ground.det_data = None + if no_det_flags: + sim_ground.det_flags = None sim_ground.apply(data) if flagged_pixels: @@ -221,6 +227,8 @@ def create_overdistributed_data( turnarounds_invalid=False, single_group=False, schedule_hours=2, + no_det_data=False, + no_det_flags=False, ): """Create a data object with more detectors than processes. @@ -315,6 +323,10 @@ def create_overdistributed_data( sim_ground.turnaround_mask = 1 + 2 else: sim_ground.turnaround_mask = 2 + if no_det_data: + sim_ground.det_data = None + if no_det_flags: + sim_ground.det_flags = None sim_ground.apply(data) return data diff --git a/src/toast/tests/helpers/space.py b/src/toast/tests/helpers/space.py index a6097eb70..5a4395a77 100644 --- a/src/toast/tests/helpers/space.py +++ b/src/toast/tests/helpers/space.py @@ -98,6 +98,8 @@ def create_satellite_data( width=5.0 * u.degree, single_group=False, flagged_pixels=True, + no_det_data=False, + no_det_flags=False, ): """Create a data object with a simple satellite sim. @@ -158,6 +160,10 @@ def create_satellite_data( prec_angle=7.0 * u.degree, detset_key="pixel", ) + if no_det_data: + sim_sat.det_data = None + if no_det_flags: + sim_sat.det_flags = None sim_sat.apply(data) if flagged_pixels: diff --git a/src/toast/tests/ops_accum_obs.py b/src/toast/tests/ops_accum_obs.py new file mode 100644 index 000000000..621d0cb75 --- /dev/null +++ b/src/toast/tests/ops_accum_obs.py @@ -0,0 +1,381 @@ +# Copyright (c) 2024-2024 by the parties listed in the AUTHORS file. +# All rights reserved. Use of this source code is governed by +# a BSD-style license that can be found in the LICENSE file. + +import os + +import numpy as np +from astropy import units as u +import healpy as hp + +from .. import ops as ops +from ..footprint import footprint_distribution +from ..mpi import MPI +from ..observation import default_values as defaults +from .helpers import ( + close_data, + create_ground_data, + create_outdir, + create_fake_healpix_map, +) +from .mpi import MPITestCase + + +class AccumObsTest(MPITestCase): + def setUp(self): + fixture_name = os.path.splitext(os.path.basename(__file__))[0] + self.outdir = create_outdir(self.comm, subdir=fixture_name) + np.random.seed(123456) + + def create_observations(self): + """Generate observations with boresight pointing and no det data.""" + # Slightly slower than 1 Hz + hwp_rpm = 59.0 + + sample_rate = 60 * u.Hz + + # Create a fake ground observation set for testing. + # Detector data is not created. + data = create_ground_data( + self.comm, + sample_rate=sample_rate, + hwp_rpm=hwp_rpm, + no_det_data=True, + ) + return data + + def create_input_sky(self, data, pixel_dist, map_key, test_dir): + """Create the input sky map.""" + if map_key in data: + msg = f"Generated map '{map_key}' already exists in data" + raise RuntimeError(msg) + npix = data[pixel_dist].n_pix + nside = hp.npix2nside(npix) + lmax = 3 * nside + fwhm = 10 * u.arcmin + data[map_key] = create_fake_healpix_map( + os.path.join(test_dir, "input_sky.fits"), + data[pixel_dist], + fwhm=fwhm, + lmax=lmax, + ) + + def create_sim_pipeline(self, data, map_key): + # Create an uncorrelated noise model from focalplane detector properties. + # We do this outside of the pipeline since it does not generate any + # timestreams. + default_model = ops.DefaultNoiseModel(noise_model="noise_model") + default_model.apply(data) + + npix = data[map_key].distribution.n_pix + nside = hp.npix2nside(npix) + + # Now build up the operators to use in the simulation pipeline + operators = list() + + # Generic pointing matrix + detpointing = ops.PointingDetectorSimple( + shared_flag_mask=0, + quats="sim_quats", + ) + pixels = ops.PixelsHealpix( + nside=nside, + detector_pointing=detpointing, + pixels="sim_pixels", + ) + operators.append(pixels) + weights = ops.StokesWeights( + mode="IQU", + hwp_angle=defaults.hwp_angle, + detector_pointing=detpointing, + weights="sim_weights", + ) + operators.append(weights) + + # Scan map into timestreams + scanner = ops.ScanMap( + det_data=defaults.det_data, + pixels=pixels.pixels, + weights=weights.weights, + map_key=map_key, + ) + operators.append(scanner) + + # Simulate noise from the default model + sim_noise = ops.SimNoise( + noise_model=default_model.noise_model, out=defaults.det_data + ) + operators.append(sim_noise) + + # The pipeline to run on each obs + pipe = ops.Pipeline( + detector_sets=["ALL"], + operators=operators, + ) + return pipe + + def add_sim_loader(self, data, pipe, pixel_dist, test_dir): + """Go through all observations and add a loader.""" + + # Add to the observations + for ob in data.obs: + ob.loader = ops.PipelineLoader(pipeline=pipe) + + def test_accum_sim(self): + testdir = os.path.join(self.outdir, "accum_sim") + if self.comm is None or self.comm.rank == 0: + os.makedirs(testdir) + + data = self.create_observations() + + # Generate a full-sky footprint for testing + nside = 256 + pixel_dist = "pixel_dist" + data[pixel_dist] = footprint_distribution( + healpix_nside=nside, + healpix_nside_submap=16, + ) + + # Create a + map_key = "input_sky" + + # First simulate the sky map and store it in the data + self.create_input_sky(data, pixel_dist, map_key, testdir) + + sim_pipe = self.create_sim_pipeline(data, map_key) + + self.add_sim_loader(data, sim_pipe, pixel_dist, testdir) + + # Operator to accumulate our map-domain products with the loader + detpointing = ops.PointingDetectorSimple( + shared_flag_mask=0, + quats="quats", + ) + pixels = ops.PixelsHealpix( + nside=nside, + detector_pointing=detpointing, + pixels="pixels", + ) + weights = ops.StokesWeights( + mode="IQU", + hwp_angle=defaults.hwp_angle, + detector_pointing=detpointing, + weights="weights", + ) + accum_obs = ops.AccumulateObservation( + cache_dir=os.path.join(testdir, "cache"), + pixel_dist=pixel_dist, + inverse_covariance="invcov", + hits="hits", + zmap="zmap", + rcond="rcond", + covariance="cov", + pixel_pointing=pixels, + stokes_weights=weights, + noise_model="noise_model", + obs_pointing=True, + ) + accum_obs.load_apply(data) + + # Run the steps manually + + # Clear the loaders + _ = data.save_loaders() + + # Delete and re-simulate the data + ops.Delete(detdata=[defaults.det_data]).apply(data) + sim_pipe.apply(data) + + # Run all the operators with full pointing + pixels.apply(data) + weights.apply(data) + + # Hit map operator + build_hits = ops.BuildHitMap( + pixel_dist=pixel_dist, + hits="check_hits", + view=pixels.view, + pixels=pixels.pixels, + ) + build_hits.apply(data) + + # Inverse covariance. + build_invcov = ops.BuildInverseCovariance( + pixel_dist=pixel_dist, + inverse_covariance="check_invcov", + view=pixels.view, + pixels=pixels.pixels, + weights=weights.weights, + noise_model="noise_model", + ) + build_invcov.apply(data) + + # Noise weighted map + build_zmap = ops.BuildNoiseWeighted( + pixel_dist=pixel_dist, + zmap="check_zmap", + view=pixels.view, + pixels=pixels.pixels, + weights=weights.weights, + noise_model="noise_model", + ) + build_zmap.apply(data) + + # Verify that the accumulated products agree + for prod in ["hits", "invcov", "zmap"]: + original = data[prod] + check = data[f"check_{prod}"] + failed = False + + if original.distribution != check.distribution: + failed = True + + if not failed: + dist = check.distribution + for sm in range(dist.n_local_submap): + for px in range(dist.n_pix_submap): + if check.data[sm, px, 0] != 0: + if not np.allclose( + check.data[sm, px], + original.data[sm, px], + ): + failed = True + if data.comm.comm_world is not None: + failed = data.comm.comm_world.allreduce(failed, op=MPI.LOR) + self.assertFalse(failed) + + close_data(data) + + def test_accum_cache(self): + testdir = os.path.join(self.outdir, "accum_cache") + if self.comm is None or self.comm.rank == 0: + os.makedirs(testdir) + + data = self.create_observations() + + # Generate a full-sky footprint for testing + nside = 256 + pixel_dist = "pixel_dist" + data[pixel_dist] = footprint_distribution( + healpix_nside=nside, + healpix_nside_submap=16, + ) + + # Create a + map_key = "input_sky" + + # First simulate the sky map and store it in the data + self.create_input_sky(data, pixel_dist, map_key, testdir) + + sim_pipe = self.create_sim_pipeline(data, map_key) + + self.add_sim_loader(data, sim_pipe, pixel_dist, testdir) + + # Operator to accumulate our map-domain products with the loader + detpointing = ops.PointingDetectorSimple( + shared_flag_mask=0, + quats="quats", + ) + pixels = ops.PixelsHealpix( + nside=nside, + detector_pointing=detpointing, + pixels="pixels", + ) + weights = ops.StokesWeights( + mode="IQU", + hwp_angle=defaults.hwp_angle, + detector_pointing=detpointing, + weights="weights", + ) + accum_obs = ops.AccumulateObservation( + cache_dir=os.path.join(testdir, "cache"), + cache_detdata=True, + pixel_dist=pixel_dist, + inverse_covariance="invcov", + hits="hits", + zmap="zmap", + rcond="rcond", + covariance="cov", + pixel_pointing=pixels, + stokes_weights=weights, + noise_model="noise_model", + obs_pointing=True, + ) + + # This will run the sim pipeline to produce data each observation, + # then cache the TOD and insert a loader for future calls. + accum_obs.load_apply(data) + + # Run the steps manually. This will trigger the loading of detector + # data from the cache. + + # Run all the operators with full pointing. We pack these into a pipeline + # so that we can use load_apply and trigger the loading once. + check_pipe_ops = list() + check_pipe_ops.append(pixels) + check_pipe_ops.append(weights) + + # Hit map operator + build_hits = ops.BuildHitMap( + pixel_dist=pixel_dist, + hits="check_hits", + view=pixels.view, + pixels=pixels.pixels, + ) + check_pipe_ops.append(build_hits) + + # Inverse covariance. + build_invcov = ops.BuildInverseCovariance( + pixel_dist=pixel_dist, + inverse_covariance="check_invcov", + view=pixels.view, + pixels=pixels.pixels, + weights=weights.weights, + noise_model="noise_model", + ) + check_pipe_ops.append(build_invcov) + + # Noise weighted map + build_zmap = ops.BuildNoiseWeighted( + pixel_dist=pixel_dist, + zmap="check_zmap", + view=pixels.view, + pixels=pixels.pixels, + weights=weights.weights, + noise_model="noise_model", + ) + check_pipe_ops.append(build_zmap) + + check_pipe = ops.Pipeline(operators=check_pipe_ops) + check_pipe.load_apply(data) + + # Verify that the accumulated products agree + for prod in ["hits", "invcov", "zmap"]: + original = data[prod] + check = data[f"check_{prod}"] + failed = False + + if original.distribution != check.distribution: + failed = True + + if not failed: + dist = check.distribution + for sm in range(dist.n_local_submap): + for px in range(dist.n_pix_submap): + if check.data[sm, px, 0] != 0: + if not np.allclose( + check.data[sm, px], + original.data[sm, px], + rtol=1e-5, + atol=1e-5, + ): + msg = f"{prod} FAIL [{sm},{px}]: " + msg += f"{original.data[sm,px]} " + msg += f"!= {check.data[sm,px]}" + print(msg, flush=True) + failed = True + if data.comm.comm_world is not None: + failed = data.comm.comm_world.allreduce(failed, op=MPI.LOR) + self.assertFalse(failed) + + close_data(data) \ No newline at end of file diff --git a/src/toast/tests/ops_loader.py b/src/toast/tests/ops_loader.py index cb13293ad..7ab4beef5 100644 --- a/src/toast/tests/ops_loader.py +++ b/src/toast/tests/ops_loader.py @@ -25,7 +25,7 @@ class RandomLoader(object): def __init__(self, rms=1.0): self.rms = rms - def load(self, obs): + def load(self, obs, **kwargs): exists_data = obs.detdata.ensure( defaults.det_data, dtype=np.float64, @@ -40,7 +40,7 @@ def load(self, obs): defaults.det_flags, dtype=np.uint8, detectors=obs.local_detectors ) - def unload(self, obs): + def unload(self, obs, **kwargs): del obs.detdata[defaults.det_data] del obs.detdata[defaults.det_flags] diff --git a/src/toast/tests/ops_mapmaker.py b/src/toast/tests/ops_mapmaker.py index e313c6f0a..08bfade5f 100644 --- a/src/toast/tests/ops_mapmaker.py +++ b/src/toast/tests/ops_mapmaker.py @@ -7,17 +7,17 @@ import healpy as hp import numpy as np -import scipy from astropy import units as u from .. import ops as ops from .. import templates from ..accelerator import accel_enabled +from ..mpi import MPI from ..observation import default_values as defaults from ..timing import GlobalTimers from ..timing import dump as dump_timers from ..timing import gather_timers -from ..vis import set_matplotlib_backend +from ..vis import set_matplotlib_backend, plot_healpix_maps from .helpers import ( close_data, create_fake_healpix_scanned_tod, @@ -39,17 +39,8 @@ def setUp(self): if self.comm is not None and self.comm.size >= 2: self.obs_per_group = self.total_obs // 2 - def test_offset(self): - if sys.platform.lower() == "darwin": - print(f"WARNING: Skipping test_offset on MacOS") - return - - testdir = os.path.join(self.outdir, "offset") - if self.comm is None or self.comm.rank == 0: - os.makedirs(testdir) - + def create_fake_data(self, testdir): # Create a fake satellite data set for testing - data = create_satellite_data( self.comm, obs_per_group=self.obs_per_group, obs_time=10.0 * u.minute ) @@ -58,7 +49,7 @@ def test_offset(self): detpointing = ops.PointingDetectorSimple() pixels = ops.PixelsHealpix( nside=64, - create_dist="pixel_dist", + create_dist="sim_pixel_dist", detector_pointing=detpointing, ) pixels.apply(data) @@ -77,7 +68,7 @@ def test_offset(self): pixels, weights, skyfile, - "pixel_dist", + "sim_pixel_dist", map_key=map_key, fwhm=30.0 * u.arcmin, lmax=3 * pixels.nside, @@ -88,6 +79,7 @@ def test_offset(self): ) # Now clear the pointing and reset things for use with the mapmaking test later + del data["sim_pixel_dist"] delete_pointing = ops.Delete( detdata=[detpointing.quats, pixels.pixels, weights.weights] ) @@ -103,285 +95,52 @@ def test_offset(self): noise_model=default_model.noise_model, det_data=defaults.det_data ) sim_noise.apply(data) - - # Set up binning operator for solving - binner = ops.BinMap( - pixel_dist="pixel_dist", - pixel_pointing=pixels, - stokes_weights=weights, - noise_model=default_model.noise_model, - ) - - # Set up template matrix with just an offset template. - - # Use 1/10 of an observation as the baseline length. Make it not evenly - # divisible in order to test handling of the final amplitude. - ob_time = ( - data.obs[0].shared[defaults.times][-1] - - data.obs[0].shared[defaults.times][0] - ) - step_seconds = float(int(ob_time / 10.0)) - tmpl = templates.Offset( - times=defaults.times, - noise_model=default_model.noise_model, - step_time=step_seconds * u.second, - ) - - tmatrix = ops.TemplateMatrix(templates=[tmpl]) - - # Map maker - mapper = ops.MapMaker( - name="test1", - det_data=defaults.det_data, - binning=binner, - template_matrix=tmatrix, - solve_rcond_threshold=1.0e-1, - map_rcond_threshold=1.0e-1, - write_hits=False, - write_map=True, - write_cov=False, - write_rcond=False, - keep_solver_products=False, - keep_final_products=False, - output_dir=testdir, - ) - - # Make the map - mapper.apply(data) - - # Check that we can also run in full-memory mode - tmatrix.reset() - mapper.reset_pix_dist = True - pixels.apply(data) - weights.apply(data) - - use_accel = None - if accel_enabled() and ( - pixels.supports_accel() - and weights.supports_accel() - and mapper.supports_accel() - ): - use_accel = True - data.accel_create(pixels.requires()) - data.accel_create(weights.requires()) - data.accel_create(mapper.requires()) - data.accel_update_device(pixels.requires()) - data.accel_update_device(weights.requires()) - data.accel_update_device(mapper.requires()) - - binner.full_pointing = True - mapper.name = "test2" - mapper.apply(data, use_accel=use_accel) - - close_data(data) - - def test_compare_madam_noprior(self): - if not ops.madam.available(): - print("libmadam not available, skipping destriping comparison") - return - - testdir = os.path.join(self.outdir, "compare_madam_noprior") - if self.comm is None or self.comm.rank == 0: - os.makedirs(testdir) - - # Create a fake satellite data set for testing - data = create_satellite_data( - self.comm, obs_per_group=self.obs_per_group, obs_time=10.0 * u.minute - ) - - # Create some sky signal timestreams. - detpointing = ops.PointingDetectorSimple() - pixels = ops.PixelsHealpix( - nside=16, - nest=True, - create_dist="pixel_dist", - detector_pointing=detpointing, - ) - pixels.apply(data) - weights = ops.StokesWeights( - mode="IQU", - hwp_angle=defaults.hwp_angle, - detector_pointing=detpointing, - ) - weights.apply(data) - - # Create fake polarized sky signal - skyfile = os.path.join(testdir, "input_map.fits") - map_key = "fake_map" - create_fake_healpix_scanned_tod( - data, - pixels, - weights, - skyfile, - "pixel_dist", - map_key=map_key, - fwhm=30.0 * u.arcmin, - lmax=3 * pixels.nside, - I_scale=0.001, - Q_scale=0.0001, - U_scale=0.0001, - det_data=defaults.det_data, - ) - - # Now clear the pointing and reset things for use with the mapmaking test later - delete_pointing = ops.Delete( - detdata=[pixels.pixels, weights.weights, detpointing.quats] - ) - delete_pointing.apply(data) - pixels.create_dist = None - - # Create an uncorrelated noise model from focalplane detector properties - default_model = ops.DefaultNoiseModel(noise_model="noise_model") - default_model.apply(data) - - # Simulate noise and accumulate to signal - sim_noise = ops.SimNoise( - noise_model=default_model.noise_model, det_data=defaults.det_data - ) - sim_noise.apply(data) - - # Set up binning operator for solving - binner = ops.BinMap( - pixel_dist="pixel_dist", - pixel_pointing=pixels, - stokes_weights=weights, - noise_model=default_model.noise_model, - ) - - # Set up template matrix with just an offset template. - - # Use 1/10 of an observation as the baseline length. Make it not evenly - # divisible in order to test handling of the final amplitude. - ob_time = ( - data.obs[0].shared[defaults.times][-1] - - data.obs[0].shared[defaults.times][0] - ) - # step_seconds = float(int(ob_time / 10.0)) - step_seconds = 5.0 - tmpl = templates.Offset( - times=defaults.times, - noise_model=default_model.noise_model, - step_time=step_seconds * u.second, - ) - - tmatrix = ops.TemplateMatrix(templates=[tmpl]) - - ops.Copy(detdata=[(defaults.det_data, "input_signal")]).apply(data) - - gt = GlobalTimers.get() - gt.stop_all() - gt.clear_all() - - # Map maker - mapper = ops.MapMaker( - name="toastmap", - det_data=defaults.det_data, - binning=binner, - template_matrix=tmatrix, - solve_rcond_threshold=1.0e-1, - map_rcond_threshold=1.0e-1, - iter_min=30, - iter_max=100, - write_hits=True, - write_map=True, - write_binmap=False, - write_noiseweighted_map=False, - write_cov=False, - write_rcond=False, - write_solver_products=True, - keep_final_products=False, - output_dir=testdir, - save_cleaned=True, - overwrite_cleaned=False, - copy_groups=2, - purge_det_data=True, - restore_det_data=True, - ) - - # Make the map - mapper.apply(data) - - alltimers = gather_timers(comm=data.comm.comm_world) - if data.comm.world_rank == 0: - out = os.path.join(testdir, "timing") - dump_timers(alltimers, out) - - toast_hit_path = os.path.join(testdir, f"{mapper.name}_hits.fits") - toast_map_path = os.path.join(testdir, f"{mapper.name}_map.fits") - toast_mask_path = os.path.join(testdir, f"{mapper.name}_solve_rcond_mask.fits") - - # Now run Madam on the same data and compare - - sample_rate = data.obs[0]["noise_model"].rate(data.obs[0].local_detectors[0]) - - pars = {} - pars["kfirst"] = "T" - pars["iter_min"] = 30 - pars["iter_max"] = 100 - pars["base_first"] = step_seconds - pars["fsample"] = sample_rate - pars["nside_map"] = pixels.nside - pars["nside_cross"] = pixels.nside - pars["nside_submap"] = min(8, pixels.nside) - pars["good_baseline_fraction"] = tmpl.good_fraction - pars["pixlim_cross"] = 1.0e-1 - pars["pixmode_cross"] = 2 # Use rcond threshold - pars["pixlim_map"] = 1.0e-1 - pars["pixmode_map"] = 2 # Use rcond threshold - pars["write_map"] = "T" - pars["write_binmap"] = "T" - pars["write_matrix"] = "F" - pars["write_wcov"] = "F" - pars["write_hits"] = "T" - pars["write_base"] = "F" - pars["write_mask"] = "T" - pars["kfilter"] = "F" - pars["precond_width_min"] = 1 - pars["precond_width_max"] = 1 - pars["use_cgprecond"] = "F" - pars["use_fprecond"] = "F" - pars["info"] = 3 - pars["path_output"] = testdir - - madam = ops.Madam( - params=pars, - det_data=defaults.det_data, - pixel_pointing=pixels, - stokes_weights=weights, - noise_model="noise_model", - det_out="madam_cleaned", - ) - - # Generate persistent pointing - pixels.apply(data) - weights.apply(data) - - # Run Madam - madam.apply(data) - - madam_hit_path = os.path.join(testdir, "madam_hmap.fits") - madam_map_path = os.path.join(testdir, "madam_map.fits") - madam_mask_path = os.path.join(testdir, "madam_mask.fits") - - fail = False + return data + + def compare_madam_outputs( + self, + testdir, + data, + input="input_signal", + madam_name="madam", + toast_name="toastmap", + ): + madam_root = os.path.join(testdir, madam_name) + toast_root = os.path.join(testdir, toast_name) + + madam_cleaned = f"{madam_name}_cleaned" + toast_cleaned = f"{toast_name}_cleaned" + + madam_hit_path = f"{madam_root}_hmap.fits" + madam_binned_path = f"{madam_root}_bmap.fits" + madam_map_path = f"{madam_root}_map.fits" + madam_mask_path = f"{madam_root}_mask.fits" + + toast_hit_path = f"{toast_root}_hits.fits" + toast_binned_path = f"{toast_root}_binmap.fits" + toast_map_path = f"{toast_root}_map.fits" + toast_mask_path = f"{toast_root}_solve_rcond_mask.fits" + toast_template_map_path = f"{toast_root}_template_map.fits" + + fail = 0 # Compare local destriped TOD on every process for ob in data.obs: for det in ob.select_local_detectors(flagmask=defaults.det_mask_invalid): - input_signal = ob.detdata["input_signal"][det] - madam_signal = ob.detdata["madam_cleaned"][det] - toast_signal = ob.detdata["toastmap_cleaned"][det] + input_signal = ob.detdata[input][det] + madam_signal = ob.detdata[madam_cleaned][det] + toast_signal = ob.detdata[toast_cleaned][det] + cleaned_diff = np.absolute(madam_signal - toast_signal) madam_base = input_signal - madam_signal toast_base = input_signal - toast_signal diff_base = madam_base - toast_base - if not np.allclose(toast_base, madam_base, rtol=0.01, atol=1.0e-6): print( - f"FAIL: {det} diff : PtP = {np.ptp(diff_base)}, " + f"FAIL: {ob.name}:{det} diff : PtP = {np.ptp(diff_base)}, " f"mean = {np.mean(diff_base)}" ) - fail = True + fail = 1 set_matplotlib_backend() import matplotlib.pyplot as plt @@ -426,85 +185,454 @@ def test_compare_madam_noprior(self): set_matplotlib_backend() import matplotlib.pyplot as plt - # Compare hit maps + # First plot everything for visual inspection later + + for hpath, mpaths in [ + (madam_hit_path, [madam_binned_path, madam_map_path]), + ( + toast_hit_path, + [toast_binned_path, toast_map_path, toast_template_map_path], + ), + ]: + for mpath in mpaths: + plot_healpix_maps( + hitfile=hpath, + mapfile=mpath, + range_I=None, + range_Q=None, + range_U=None, + max_hits=None, + truth=None, + gnomview=True, + gnomres=1.5, + cmap="viridis", + format="pdf", + ) + + # Now compare in memory toast_hits = hp.read_map(toast_hit_path, field=None, nest=True) madam_hits = hp.read_map(madam_hit_path, field=None, nest=True) - diff_hits = toast_hits - madam_hits - - outfile = os.path.join(testdir, "madam_hits.png") - hp.mollview(madam_hits, xsize=1600, nest=True) - plt.savefig(outfile) - plt.close() - outfile = os.path.join(testdir, "toast_hits.png") - hp.mollview(toast_hits, xsize=1600, nest=True) - plt.savefig(outfile) - plt.close() - outfile = os.path.join(testdir, "diff_hits.png") - hp.mollview(diff_hits, xsize=1600, nest=True) - plt.savefig(outfile) - plt.close() - - # Compare masks toast_mask = hp.read_map(toast_mask_path, field=None, nest=True) madam_mask = hp.read_map(madam_mask_path, field=None, nest=True) + toast_binned = hp.read_map(toast_binned_path, field=None, nest=True) + madam_binned = hp.read_map(madam_binned_path, field=None, nest=True) + + toast_map = hp.read_map(toast_map_path, field=None, nest=True) + madam_map = hp.read_map(madam_map_path, field=None, nest=True) + + toast_template_map = hp.read_map( + toast_template_map_path, field=None, nest=True + ) + madam_template_map = madam_binned - madam_map + + good_pix = madam_hits > 0 + bad_pix = np.logical_not(good_pix) + + diff_hits = toast_hits - madam_hits + diff_hits[bad_pix] = 0 + + # Compare masks + # Madam uses 1=good, 0=bad, Toast uses 0=good, non-zero=bad: tgood = toast_mask == 0 toast_mask[:] = 0 toast_mask[tgood] = 1 diff_mask = toast_mask - madam_mask[0] - outfile = os.path.join(testdir, "madam_mask.png") - hp.mollview(madam_mask[0], xsize=1600, nest=True) - plt.savefig(outfile) - plt.close() - outfile = os.path.join(testdir, "toast_mask.png") - hp.mollview(toast_mask, xsize=1600, nest=True) - plt.savefig(outfile) - plt.close() - outfile = os.path.join(testdir, "diff_mask.png") - hp.mollview(diff_mask, xsize=1600, nest=True) - plt.savefig(outfile) - plt.close() + n_bad_mask = np.count_nonzero(diff_mask) + if n_bad_mask > 0: + print(f"FAIL: mask has {n_bad_mask} pixels that disagree") + fail = 1 # Compare maps - toast_map = hp.read_map(toast_map_path, field=None, nest=True) - madam_map = hp.read_map(madam_map_path, field=None, nest=True) - # Set madam unhit pixels to zero - for stokes, ststr in zip(range(3), ["I", "Q", "U"]): - good = madam_map[stokes] != hp.UNSEEN - diff_map = toast_map[stokes] - madam_map[stokes] - - outfile = os.path.join(testdir, "madam_map_{}.png".format(ststr)) - hp.mollview(madam_map[stokes], xsize=1600, nest=True) - plt.savefig(outfile) - plt.close() - outfile = os.path.join(testdir, "toast_map_{}.png".format(ststr)) - hp.mollview(toast_map[stokes], xsize=1600, nest=True) - plt.savefig(outfile) - plt.close() - outfile = os.path.join(testdir, "diff_map_{}.png".format(ststr)) - hp.mollview(diff_map, xsize=1600, nest=True) - plt.savefig(outfile) - plt.close() - - if not np.allclose( - toast_map[stokes][good], madam_map[stokes][good], rtol=0.01 - ): - print( - f"FAIL: max {ststr} diff = {np.max(np.absolute(diff_map[good]))}" - ) - fail = True - - if data.comm.comm_world is not None: - fail = data.comm.comm_world.bcast(fail, root=0) + for pix in np.arange(len(madam_hits), dtype=np.int64)[good_pix]: + line = f"{pix}: bin: {madam_binned[0][pix]:0.3e} {toast_binned[0][pix]:0.3e} " + line += f" tmpl: {madam_template_map[0][pix]:0.3e} {toast_template_map[0][pix]:0.3e} " + line += f" destrp: {madam_map[0][pix]:0.3e} {toast_map[0][pix]:0.3e}" + print(line, flush=True) + + for mname, tmap, mmap in [ + ("destriped", toast_map, madam_map), + ("binned", toast_binned, madam_binned), + ("template", toast_template_map, madam_template_map), + ]: + # Set madam unhit pixels to zero + for stokes, ststr in zip(range(3), ["I", "Q", "U"]): + diff_map = tmap[stokes] - mmap[stokes] + diff_map[bad_pix] = hp.UNSEEN + + outfile = os.path.join(testdir, f"diff_{mname}_{ststr}.png") + hp.mollview(diff_map, xsize=1600, nest=True) + plt.savefig(outfile) + plt.close() - self.assertFalse(fail) + if not np.allclose( + tmap[stokes][good_pix], mmap[stokes][good_pix], rtol=0.01 + ): + msg = f"FAIL: {mname} max {ststr} diff = " + msg += f"{np.max(np.absolute(diff_map[good_pix]))}" + print(msg) + fail = 1 - close_data(data) + if data.comm.comm_world is not None: + fail = data.comm.comm_world.allreduce(fail, MPI.SUM) + + self.assertTrue(fail == 0) + + # def test_offset(self): + # if sys.platform.lower() == "darwin": + # print(f"WARNING: Skipping test_offset on MacOS") + # return + + # testdir = os.path.join(self.outdir, "offset") + # if self.comm is None or self.comm.rank == 0: + # os.makedirs(testdir) + + # # Create a fake satellite data set for testing + # data = self.create_fake_data(testdir) + + # detpointing = ops.PointingDetectorSimple() + # pixels = ops.PixelsHealpix( + # nside=64, + # detector_pointing=detpointing, + # ) + + # weights = ops.StokesWeights( + # mode="IQU", + # hwp_angle=defaults.hwp_angle, + # detector_pointing=detpointing, + # ) + + # # Set up binning operator for solving + # binner = ops.BinMap( + # pixel_dist="pixel_dist", + # pixel_pointing=pixels, + # stokes_weights=weights, + # noise_model="noise_model", + # ) + + # # Set up template matrix with just an offset template. + + # # Use 1/10 of an observation as the baseline length. Make it not evenly + # # divisible in order to test handling of the final amplitude. + # ob_time = ( + # data.obs[0].shared[defaults.times][-1] + # - data.obs[0].shared[defaults.times][0] + # ) + # step_seconds = float(int(ob_time / 10.0)) + # tmpl = templates.Offset( + # times=defaults.times, + # noise_model="noise_model", + # step_time=step_seconds * u.second, + # ) + + # tmatrix = ops.TemplateMatrix(templates=[tmpl]) + + # # Map maker + # mapper = ops.MapMaker( + # name="test1", + # det_data=defaults.det_data, + # binning=binner, + # template_matrix=tmatrix, + # solve_rcond_threshold=1.0e-1, + # map_rcond_threshold=1.0e-1, + # write_hits=True, + # write_map=True, + # write_binmap=True, + # write_template_map=True, + # write_cov=False, + # write_rcond=False, + # keep_solver_products=False, + # keep_final_products=False, + # output_dir=testdir, + # ) + + # # Make the map + # mapper.apply(data) + + # # Check that we can also run in full-memory mode + # tmatrix.reset() + # mapper.reset_pix_dist = True + # pixels.apply(data) + # weights.apply(data) + + # use_accel = None + # if accel_enabled() and ( + # pixels.supports_accel() + # and weights.supports_accel() + # and mapper.supports_accel() + # ): + # use_accel = True + # data.accel_create(pixels.requires()) + # data.accel_create(weights.requires()) + # data.accel_create(mapper.requires()) + # data.accel_update_device(pixels.requires()) + # data.accel_update_device(weights.requires()) + # data.accel_update_device(mapper.requires()) + + # binner.full_pointing = True + # mapper.name = "test2" + # mapper.apply(data, use_accel=use_accel) + + # close_data(data) + + # def test_offset_cache(self): + # if sys.platform.lower() == "darwin": + # print(f"WARNING: Skipping test_offset on MacOS") + # return + + # testdir = os.path.join(self.outdir, "offset_cache") + # if self.comm is None or self.comm.rank == 0: + # os.makedirs(testdir) + + # # Create a fake satellite data set for testing + # data = self.create_fake_data(testdir) + + # detpointing = ops.PointingDetectorSimple() + # pixels = ops.PixelsHealpix( + # nside=64, + # detector_pointing=detpointing, + # ) + # weights = ops.StokesWeights( + # mode="IQU", + # hwp_angle=defaults.hwp_angle, + # detector_pointing=detpointing, + # ) + + # # Set up binning operator for solving + # binner = ops.BinMap( + # pixel_dist="pixel_dist", + # pixel_pointing=pixels, + # stokes_weights=weights, + # noise_model="noise_model", + # ) + + # # Set up template matrix with just an offset template. + + # # Use 1/10 of an observation as the baseline length. Make it not evenly + # # divisible in order to test handling of the final amplitude. + # ob_time = ( + # data.obs[0].shared[defaults.times][-1] + # - data.obs[0].shared[defaults.times][0] + # ) + # step_seconds = float(int(ob_time / 10.0)) + # tmpl = templates.Offset( + # times=defaults.times, + # noise_model="noise_model", + # step_time=step_seconds * u.second, + # ) + + # tmatrix = ops.TemplateMatrix(templates=[tmpl]) + + # # Map maker + # mapper = ops.MapMaker( + # name="check", + # det_data=defaults.det_data, + # binning=binner, + # template_matrix=tmatrix, + # solve_rcond_threshold=1.0e-1, + # map_rcond_threshold=1.0e-1, + # write_hits=True, + # write_map=True, + # write_binmap=True, + # write_template_map=True, + # write_cov=False, + # write_rcond=False, + # keep_solver_products=False, + # keep_final_products=True, + # output_dir=testdir, + # ) + + # # Make the map + # mapper.apply(data) + + # # Now enable caching of map-domain products and the signal TOD by + # # the binning operator. + + # binner.cache_dir = os.path.join(testdir, "cache") + # binner.cache_detdata = True + # mapper.name = "test" + + # mapper.apply(data) + + # # Verify that the mapmaker products agree + # for prod in ["hits", "map", "binmap"]: + # original = data[f"test_{prod}"] + # check = data[f"check_{prod}"] + # failed = False + + # if original.distribution != check.distribution: + # failed = True + + # if not failed: + # dist = check.distribution + # for sm in range(dist.n_local_submap): + # for px in range(dist.n_pix_submap): + # if check.data[sm, px, 0] != 0: + # if not np.allclose( + # check.data[sm, px], + # original.data[sm, px], + # rtol=1e-5, + # atol=1e-5, + # ): + # msg = f"{prod} FAIL [{sm},{px}]: " + # msg += f"{original.data[sm,px]} " + # msg += f"!= {check.data[sm,px]}" + # print(msg, flush=True) + # failed = True + # if data.comm.comm_world is not None: + # failed = data.comm.comm_world.allreduce(failed, op=MPI.LOR) + # self.assertFalse(failed) + + # close_data(data) + + # def test_compare_madam_noprior(self): + # if not ops.madam.available(): + # print("libmadam not available, skipping destriping comparison") + # return + + # testdir = os.path.join(self.outdir, "compare_madam_noprior") + # if self.comm is None or self.comm.rank == 0: + # os.makedirs(testdir) + + # # Create a fake satellite data set for testing + # data = self.create_fake_data(testdir) + # print("SIGNAL = ", data.obs[0].detdata["signal"][0]) + + # # Create some sky signal timestreams. + # detpointing = ops.PointingDetectorSimple() + # pixels = ops.PixelsHealpix( + # nside=16, + # nest=True, + # create_dist="pixel_dist", + # detector_pointing=detpointing, + # ) + # weights = ops.StokesWeights( + # mode="IQU", + # hwp_angle=defaults.hwp_angle, + # detector_pointing=detpointing, + # ) + + # # Set up binning operator for solving + # binner = ops.BinMap( + # pixel_dist="pixel_dist", + # pixel_pointing=pixels, + # stokes_weights=weights, + # noise_model="noise_model", + # ) + + # # Set up template matrix with just an offset template. + + # # Use 1/10 of an observation as the baseline length. Make it not evenly + # # divisible in order to test handling of the final amplitude. + # ob_time = ( + # data.obs[0].shared[defaults.times][-1] + # - data.obs[0].shared[defaults.times][0] + # ) + # # step_seconds = float(int(ob_time / 10.0)) + # step_seconds = 5.0 + # tmpl = templates.Offset( + # times=defaults.times, + # noise_model="noise_model", + # step_time=step_seconds * u.second, + # ) + + # tmatrix = ops.TemplateMatrix(templates=[tmpl]) + + # ops.Copy(detdata=[(defaults.det_data, "input_signal")]).apply(data) + + # gt = GlobalTimers.get() + # gt.stop_all() + # gt.clear_all() + + # # Map maker + # mapper = ops.MapMaker( + # name="toastmap", + # det_data=defaults.det_data, + # binning=binner, + # template_matrix=tmatrix, + # solve_rcond_threshold=1.0e-1, + # map_rcond_threshold=1.0e-1, + # iter_min=30, + # iter_max=100, + # write_hits=True, + # write_map=True, + # write_binmap=True, + # write_noiseweighted_map=False, + # write_cov=False, + # write_rcond=False, + # write_solver_products=True, + # keep_final_products=False, + # output_dir=testdir, + # save_cleaned=True, + # overwrite_cleaned=False, + # ) + + # # Make the map + # mapper.apply(data) + + # alltimers = gather_timers(comm=data.comm.comm_world) + # if data.comm.world_rank == 0: + # out = os.path.join(testdir, "timing") + # dump_timers(alltimers, out) + + # # Now run Madam on the same data and compare + + # sample_rate = data.obs[0]["noise_model"].rate(data.obs[0].local_detectors[0]) + + # pars = {} + # pars["kfirst"] = "T" + # pars["iter_min"] = 30 + # pars["iter_max"] = 100 + # pars["base_first"] = step_seconds + # pars["fsample"] = sample_rate + # pars["nside_map"] = pixels.nside + # pars["nside_cross"] = pixels.nside + # pars["nside_submap"] = min(8, pixels.nside) + # pars["good_baseline_fraction"] = tmpl.good_fraction + # pars["pixlim_cross"] = 1.0e-1 + # pars["pixmode_cross"] = 2 # Use rcond threshold + # pars["pixlim_map"] = 1.0e-1 + # pars["pixmode_map"] = 2 # Use rcond threshold + # pars["write_map"] = "T" + # pars["write_binmap"] = "T" + # pars["write_matrix"] = "F" + # pars["write_wcov"] = "F" + # pars["write_hits"] = "T" + # pars["write_base"] = "F" + # pars["write_mask"] = "T" + # pars["kfilter"] = "F" + # pars["precond_width_min"] = 1 + # pars["precond_width_max"] = 1 + # pars["use_cgprecond"] = "F" + # pars["use_fprecond"] = "F" + # pars["info"] = 3 + # pars["path_output"] = testdir + + # madam = ops.Madam( + # params=pars, + # det_data=defaults.det_data, + # pixel_pointing=pixels, + # stokes_weights=weights, + # noise_model="noise_model", + # det_out="madam_cleaned", + # ) + + # # Generate persistent pointing + # pixels.apply(data) + # weights.apply(data) + + # # Run Madam + # madam.apply(data) + + # # Compare outputs + # self.compare_madam_outputs(testdir, data) + + # close_data(data) def test_compare_madam_diagpre(self): if not ops.madam.available(): @@ -516,9 +644,13 @@ def test_compare_madam_diagpre(self): os.makedirs(testdir) # Create a fake satellite data set for testing - data = create_satellite_data( + raw_data = create_satellite_data( self.comm, obs_per_group=self.obs_per_group, obs_time=10.0 * u.minute ) + data = raw_data.select(obs_index=0) + # Cut all but one detector + alldets = data.obs[0].local_detectors + data.obs[0].update_local_detector_flags({x: 1 for x in alldets[1:]}) # Create some sky signal timestreams. detpointing = ops.PointingDetectorSimple() @@ -593,11 +725,13 @@ def test_compare_madam_diagpre(self): step_time=step_seconds * u.second, use_noise_prior=True, precond_width=1, - # debug_plots=testdir, + debug_plots=testdir, ) tmatrix = ops.TemplateMatrix(templates=[tmpl]) + ops.Copy(detdata=[(defaults.det_data, "input_signal")]).apply(data) + gt = GlobalTimers.get() gt.stop_all() gt.clear_all() @@ -611,11 +745,18 @@ def test_compare_madam_diagpre(self): solve_rcond_threshold=1.0e-4, map_rcond_threshold=1.0e-4, iter_max=50, - write_hits=False, - write_map=False, + write_hits=True, + write_map=True, + write_binmap=True, + write_noiseweighted_map=False, write_cov=False, write_rcond=False, - keep_final_products=True, + write_solver_products=True, + write_solver_amplitudes=True, + keep_final_products=False, + output_dir=testdir, + save_cleaned=True, + overwrite_cleaned=False, ) # Make the map @@ -626,17 +767,6 @@ def test_compare_madam_diagpre(self): out = os.path.join(testdir, "timing") dump_timers(alltimers, out) - # Outputs - toast_hits = "toastmap_hits" - toast_map = "toastmap_map" - - # Write map to disk so we can load the whole thing on one process. - - toast_hit_path = os.path.join(testdir, "toast_hits.fits") - toast_map_path = os.path.join(testdir, "toast_map.fits") - data[toast_map].write(toast_map_path) - data[toast_hits].write(toast_hit_path) - # Now run Madam on the same data and compare sample_rate = data.obs[0]["noise_model"].rate(data.obs[0].local_detectors[0]) @@ -656,11 +786,12 @@ def test_compare_madam_diagpre(self): pars["pixlim_map"] = 1.0e-4 pars["pixmode_map"] = 2 # Use rcond threshold pars["write_map"] = "T" - pars["write_binmap"] = "F" + pars["write_binmap"] = "T" pars["write_matrix"] = "F" pars["write_wcov"] = "F" pars["write_hits"] = "T" - pars["write_base"] = "T" + pars["write_base"] = "F" + pars["write_mask"] = "T" pars["kfilter"] = "T" pars["precond_width_min"] = 1 pars["precond_width_max"] = 1 @@ -675,6 +806,7 @@ def test_compare_madam_diagpre(self): pixel_pointing=pixels, stokes_weights=weights, noise_model="noise_model", + det_out="madam_cleaned", ) # Generate persistent pointing @@ -684,352 +816,268 @@ def test_compare_madam_diagpre(self): # Run Madam madam.apply(data) - madam_hit_path = os.path.join(testdir, "madam_hmap.fits") - madam_map_path = os.path.join(testdir, "madam_map.fits") - - fail = False - - if data.comm.world_rank == 0: - set_matplotlib_backend() - import matplotlib.pyplot as plt - - # Compare hit maps - - toast_hits = hp.read_map(toast_hit_path, field=None, nest=True) - madam_hits = hp.read_map(madam_hit_path, field=None, nest=True) - diff_hits = toast_hits - madam_hits - - outfile = os.path.join(testdir, "madam_hits.png") - hp.mollview(madam_hits, xsize=1600, nest=True) - plt.savefig(outfile) - plt.close() - outfile = os.path.join(testdir, "toast_hits.png") - hp.mollview(toast_hits, xsize=1600, nest=True) - plt.savefig(outfile) - plt.close() - outfile = os.path.join(testdir, "diff_hits.png") - hp.mollview(diff_hits, xsize=1600, nest=True) - plt.savefig(outfile) - plt.close() - - # Compare maps - - input_map = hp.read_map( - os.path.join(testdir, "input_map.fits"), field=None, nest=True - ) - toast_map = hp.read_map(toast_map_path, field=None, nest=True) - madam_map = hp.read_map(madam_map_path, field=None, nest=True) - - # Set madam unhit pixels to zero - for stokes, ststr in zip(range(3), ["I", "Q", "U"]): - mask = hp.mask_bad(madam_map[stokes]) - madam_map[stokes][mask] = 0.0 - input_map[stokes][mask] = 0.0 - diff_map = toast_map[stokes] - madam_map[stokes] - madam_diff_truth = madam_map[stokes] - input_map[stokes] - toast_diff_truth = toast_map[stokes] - input_map[stokes] - - print("diff map {} has rms {}".format(ststr, np.std(diff_map))) - print( - "toast-truth map {} has rms {}".format( - ststr, np.std(toast_diff_truth) - ) - ) - print( - "madam-truth map {} has rms {}".format( - ststr, np.std(madam_diff_truth) - ) - ) - - outfile = os.path.join(testdir, "madam_map_{}.png".format(ststr)) - hp.mollview(madam_map[stokes], xsize=1600, nest=True) - plt.savefig(outfile) - plt.close() - outfile = os.path.join(testdir, "toast_map_{}.png".format(ststr)) - hp.mollview(toast_map[stokes], xsize=1600, nest=True) - plt.savefig(outfile) - plt.close() - outfile = os.path.join(testdir, "diff_map_{}.png".format(ststr)) - hp.mollview(diff_map, xsize=1600, nest=True) - plt.savefig(outfile) - plt.close() - outfile = os.path.join(testdir, "madam_diff_truth_{}.png".format(ststr)) - hp.mollview(madam_diff_truth, xsize=1600, nest=True) - plt.savefig(outfile) - plt.close() - outfile = os.path.join(testdir, "toast_diff_truth_{}.png".format(ststr)) - hp.mollview(toast_diff_truth, xsize=1600, nest=True) - plt.savefig(outfile) - plt.close() - - if not np.allclose( - toast_map[stokes], madam_map[stokes], atol=0.1, rtol=0.05 - ): - fail = True - - if data.comm.comm_world is not None: - fail = data.comm.comm_world.bcast(fail, root=0) - - self.assertFalse(fail) - - close_data(data) - - def test_compare_madam_bandpre(self): - if not ops.madam.available(): - print( - "libmadam not available, skipping comparison with banded preconditioner" - ) - return - - testdir = os.path.join(self.outdir, "compare_madam_bandpre") - if self.comm is None or self.comm.rank == 0: - os.makedirs(testdir) - - # Create a fake satellite data set for testing - data = create_satellite_data( - self.comm, obs_per_group=self.obs_per_group, obs_time=10.0 * u.minute - ) - - # Create some sky signal timestreams. - detpointing = ops.PointingDetectorSimple() - pixels = ops.PixelsHealpix( - nside=16, - nest=True, - create_dist="pixel_dist", - detector_pointing=detpointing, - ) - pixels.apply(data) - weights = ops.StokesWeights( - mode="IQU", - hwp_angle=defaults.hwp_angle, - detector_pointing=detpointing, - ) - weights.apply(data) - - # Create fake polarized sky signal - skyfile = os.path.join(testdir, "input_map.fits") - map_key = "fake_map" - create_fake_healpix_scanned_tod( - data, - pixels, - weights, - skyfile, - "pixel_dist", - map_key=map_key, - fwhm=30.0 * u.arcmin, - lmax=3 * pixels.nside, - I_scale=0.001, - Q_scale=0.0001, - U_scale=0.0001, - det_data=defaults.det_data, - ) - - # Now clear the pointing and reset things for use with the mapmaking test later - delete_pointing = ops.Delete( - detdata=[pixels.pixels, weights.weights, detpointing.quats] - ) - delete_pointing.apply(data) - pixels.create_dist = None - - # Create an uncorrelated noise model from focalplane detector properties - default_model = ops.DefaultNoiseModel(noise_model="noise_model") - default_model.apply(data) - - # Simulate noise and accumulate to signal - sim_noise = ops.SimNoise( - noise_model=default_model.noise_model, det_data=defaults.det_data - ) - sim_noise.apply(data) - - # Set up binning operator for solving - binner = ops.BinMap( - pixel_dist="pixel_dist", - pixel_pointing=pixels, - stokes_weights=weights, - noise_model=default_model.noise_model, - full_pointing=True, - ) - - # Set up template matrix with just an offset template. - - # Use 1/10 of an observation as the baseline length. Make it not evenly - # divisible in order to test handling of the final amplitude. - ob_time = ( - data.obs[0].shared[defaults.times][-1] - - data.obs[0].shared[defaults.times][0] - ) - # step_seconds = float(int(ob_time / 10.0)) - step_seconds = 5.0 - tmpl = templates.Offset( - times=defaults.times, - noise_model=default_model.noise_model, - step_time=step_seconds * u.second, - use_noise_prior=True, - precond_width=10, - # debug_plots=testdir, - ) - - tmatrix = ops.TemplateMatrix(templates=[tmpl]) - - gt = GlobalTimers.get() - gt.stop_all() - gt.clear_all() - - # Map maker - mapper = ops.MapMaker( - name="toastmap", - det_data=defaults.det_data, - binning=binner, - template_matrix=tmatrix, - solve_rcond_threshold=1.0e-4, - map_rcond_threshold=1.0e-4, - iter_max=50, - write_hits=False, - write_map=False, - write_cov=False, - write_rcond=False, - keep_final_products=True, - ) - - # Make the map - mapper.apply(data) - - alltimers = gather_timers(comm=data.comm.comm_world) - if data.comm.world_rank == 0: - out = os.path.join(testdir, "timing") - dump_timers(alltimers, out) - - # Outputs - toast_hits = "toastmap_hits" - toast_map = "toastmap_map" - - # Write map to disk so we can load the whole thing on one process. - - toast_hit_path = os.path.join(testdir, "toast_hits.fits") - toast_map_path = os.path.join(testdir, "toast_map.fits") - data[toast_map].write(toast_map_path) - data[toast_hits].write(toast_hit_path) - - # Now run Madam on the same data and compare - - sample_rate = data.obs[0]["noise_model"].rate(data.obs[0].local_detectors[0]) - - pars = {} - pars["kfirst"] = "T" - pars["basis_order"] = 0 - pars["iter_max"] = 50 - pars["base_first"] = step_seconds - pars["fsample"] = sample_rate - pars["nside_map"] = pixels.nside - pars["nside_cross"] = pixels.nside - pars["nside_submap"] = min(8, pixels.nside) - pars["good_baseline_fraction"] = tmpl.good_fraction - pars["pixlim_cross"] = 1.0e-4 - pars["pixmode_cross"] = 2 # Use rcond threshold - pars["pixlim_map"] = 1.0e-4 - pars["pixmode_map"] = 2 # Use rcond threshold - pars["write_map"] = "T" - pars["write_binmap"] = "F" - pars["write_matrix"] = "F" - pars["write_wcov"] = "F" - pars["write_hits"] = "T" - pars["write_base"] = "T" - pars["kfilter"] = "T" - pars["precond_width_min"] = 10 - pars["precond_width_max"] = 10 - pars["use_cgprecond"] = "F" - pars["use_fprecond"] = "F" - pars["info"] = 3 - pars["path_output"] = testdir - - madam = ops.Madam( - params=pars, - det_data=defaults.det_data, - pixel_pointing=pixels, - stokes_weights=weights, - noise_model="noise_model", - ) - - # Generate persistent pointing - pixels.apply(data) - weights.apply(data) - - # Run Madam - madam.apply(data) - - madam_hit_path = os.path.join(testdir, "madam_hmap.fits") - madam_map_path = os.path.join(testdir, "madam_map.fits") - - fail = False - - if data.comm.world_rank == 0: - set_matplotlib_backend() - import matplotlib.pyplot as plt - - # Compare hit maps - - toast_hits = hp.read_map(toast_hit_path, field=None, nest=True) - madam_hits = hp.read_map(madam_hit_path, field=None, nest=True) - diff_hits = toast_hits - madam_hits - - outfile = os.path.join(testdir, "madam_hits.png") - hp.mollview(madam_hits, xsize=1600, nest=True) - plt.savefig(outfile) - plt.close() - outfile = os.path.join(testdir, "toast_hits.png") - hp.mollview(toast_hits, xsize=1600, nest=True) - plt.savefig(outfile) - plt.close() - outfile = os.path.join(testdir, "diff_hits.png") - hp.mollview(diff_hits, xsize=1600, nest=True) - plt.savefig(outfile) - plt.close() - - # Compare maps - - input_map = hp.read_map( - os.path.join(testdir, "input_map.fits"), field=None, nest=True - ) - toast_map = hp.read_map(toast_map_path, field=None, nest=True) - madam_map = hp.read_map(madam_map_path, field=None, nest=True) - # Set madam unhit pixels to zero - for stokes, ststr in zip(range(3), ["I", "Q", "U"]): - mask = hp.mask_bad(madam_map[stokes]) - madam_map[stokes][mask] = 0.0 - input_map[stokes][mask] = 0.0 - diff_map = toast_map[stokes] - madam_map[stokes] - madam_diff_truth = madam_map[stokes] - input_map[stokes] - toast_diff_truth = toast_map[stokes] - input_map[stokes] - # print("diff map {} has rms {}".format(ststr, np.std(diff_map))) - outfile = os.path.join(testdir, "madam_map_{}.png".format(ststr)) - hp.mollview(madam_map[stokes], xsize=1600, nest=True) - plt.savefig(outfile) - plt.close() - outfile = os.path.join(testdir, "toast_map_{}.png".format(ststr)) - hp.mollview(toast_map[stokes], xsize=1600, nest=True) - plt.savefig(outfile) - plt.close() - outfile = os.path.join(testdir, "diff_map_{}.png".format(ststr)) - hp.mollview(diff_map, xsize=1600, nest=True) - plt.savefig(outfile) - plt.close() - outfile = os.path.join(testdir, "madam_diff_truth_{}.png".format(ststr)) - hp.mollview(madam_diff_truth, xsize=1600, nest=True) - plt.savefig(outfile) - plt.close() - outfile = os.path.join(testdir, "toast_diff_truth_{}.png".format(ststr)) - hp.mollview(toast_diff_truth, xsize=1600, nest=True) - plt.savefig(outfile) - plt.close() - - if not np.allclose( - toast_map[stokes], madam_map[stokes], atol=0.1, rtol=0.05 - ): - fail = True - - if data.comm.comm_world is not None: - fail = data.comm.comm_world.bcast(fail, root=0) - - self.assertFalse(fail) - - close_data(data) + # Compare outputs + self.compare_madam_outputs(testdir, data) + + del data + close_data(raw_data) + + # def test_compare_madam_bandpre(self): + # if not ops.madam.available(): + # print( + # "libmadam not available, skipping comparison with banded preconditioner" + # ) + # return + + # testdir = os.path.join(self.outdir, "compare_madam_bandpre") + # if self.comm is None or self.comm.rank == 0: + # os.makedirs(testdir) + + # # Create a fake satellite data set for testing + # data = create_satellite_data( + # self.comm, obs_per_group=self.obs_per_group, obs_time=10.0 * u.minute + # ) + + # # Create some sky signal timestreams. + # detpointing = ops.PointingDetectorSimple() + # pixels = ops.PixelsHealpix( + # nside=16, + # nest=True, + # create_dist="pixel_dist", + # detector_pointing=detpointing, + # ) + # pixels.apply(data) + # weights = ops.StokesWeights( + # mode="IQU", + # hwp_angle=defaults.hwp_angle, + # detector_pointing=detpointing, + # ) + # weights.apply(data) + + # # Create fake polarized sky signal + # skyfile = os.path.join(testdir, "input_map.fits") + # map_key = "fake_map" + # create_fake_healpix_scanned_tod( + # data, + # pixels, + # weights, + # skyfile, + # "pixel_dist", + # map_key=map_key, + # fwhm=30.0 * u.arcmin, + # lmax=3 * pixels.nside, + # I_scale=0.001, + # Q_scale=0.0001, + # U_scale=0.0001, + # det_data=defaults.det_data, + # ) + + # # Now clear the pointing and reset things for use with the mapmaking test later + # delete_pointing = ops.Delete( + # detdata=[pixels.pixels, weights.weights, detpointing.quats] + # ) + # delete_pointing.apply(data) + # pixels.create_dist = None + + # # Create an uncorrelated noise model from focalplane detector properties + # default_model = ops.DefaultNoiseModel(noise_model="noise_model") + # default_model.apply(data) + + # # Simulate noise and accumulate to signal + # sim_noise = ops.SimNoise( + # noise_model=default_model.noise_model, det_data=defaults.det_data + # ) + # sim_noise.apply(data) + + # # Set up binning operator for solving + # binner = ops.BinMap( + # pixel_dist="pixel_dist", + # pixel_pointing=pixels, + # stokes_weights=weights, + # noise_model=default_model.noise_model, + # full_pointing=True, + # ) + + # # Set up template matrix with just an offset template. + + # # Use 1/10 of an observation as the baseline length. Make it not evenly + # # divisible in order to test handling of the final amplitude. + # ob_time = ( + # data.obs[0].shared[defaults.times][-1] + # - data.obs[0].shared[defaults.times][0] + # ) + # # step_seconds = float(int(ob_time / 10.0)) + # step_seconds = 5.0 + # tmpl = templates.Offset( + # times=defaults.times, + # noise_model=default_model.noise_model, + # step_time=step_seconds * u.second, + # use_noise_prior=True, + # precond_width=10, + # # debug_plots=testdir, + # ) + + # tmatrix = ops.TemplateMatrix(templates=[tmpl]) + + # gt = GlobalTimers.get() + # gt.stop_all() + # gt.clear_all() + + # # Map maker + # mapper = ops.MapMaker( + # name="toastmap", + # det_data=defaults.det_data, + # binning=binner, + # template_matrix=tmatrix, + # solve_rcond_threshold=1.0e-4, + # map_rcond_threshold=1.0e-4, + # iter_max=50, + # write_hits=True, + # write_map=True, + # write_cov=False, + # write_rcond=False, + # keep_final_products=True, + # ) + + # # Make the map + # mapper.apply(data) + + # alltimers = gather_timers(comm=data.comm.comm_world) + # if data.comm.world_rank == 0: + # out = os.path.join(testdir, "timing") + # dump_timers(alltimers, out) + + # # Outputs + # toast_hits = "toastmap_hits" + # toast_map = "toastmap_map" + + # # Write map to disk so we can load the whole thing on one process. + + # toast_hit_path = os.path.join(testdir, "toast_hits.fits") + # toast_map_path = os.path.join(testdir, "toast_map.fits") + # data[toast_map].write(toast_map_path) + # data[toast_hits].write(toast_hit_path) + + # # Now run Madam on the same data and compare + + # sample_rate = data.obs[0]["noise_model"].rate(data.obs[0].local_detectors[0]) + + # pars = {} + # pars["kfirst"] = "T" + # pars["basis_order"] = 0 + # pars["iter_max"] = 50 + # pars["base_first"] = step_seconds + # pars["fsample"] = sample_rate + # pars["nside_map"] = pixels.nside + # pars["nside_cross"] = pixels.nside + # pars["nside_submap"] = min(8, pixels.nside) + # pars["good_baseline_fraction"] = tmpl.good_fraction + # pars["pixlim_cross"] = 1.0e-4 + # pars["pixmode_cross"] = 2 # Use rcond threshold + # pars["pixlim_map"] = 1.0e-4 + # pars["pixmode_map"] = 2 # Use rcond threshold + # pars["write_map"] = "T" + # pars["write_binmap"] = "F" + # pars["write_matrix"] = "F" + # pars["write_wcov"] = "F" + # pars["write_hits"] = "T" + # pars["write_base"] = "T" + # pars["kfilter"] = "T" + # pars["precond_width_min"] = 10 + # pars["precond_width_max"] = 10 + # pars["use_cgprecond"] = "F" + # pars["use_fprecond"] = "F" + # pars["info"] = 3 + # pars["path_output"] = testdir + + # madam = ops.Madam( + # params=pars, + # det_data=defaults.det_data, + # pixel_pointing=pixels, + # stokes_weights=weights, + # noise_model="noise_model", + # ) + + # # Generate persistent pointing + # pixels.apply(data) + # weights.apply(data) + + # # Run Madam + # madam.apply(data) + + # madam_hit_path = os.path.join(testdir, "madam_hmap.fits") + # madam_map_path = os.path.join(testdir, "madam_map.fits") + + # fail = False + + # if data.comm.world_rank == 0: + # set_matplotlib_backend() + # import matplotlib.pyplot as plt + + # # Compare hit maps + + # toast_hits = hp.read_map(toast_hit_path, field=None, nest=True) + # madam_hits = hp.read_map(madam_hit_path, field=None, nest=True) + # diff_hits = toast_hits - madam_hits + + # outfile = os.path.join(testdir, "madam_hits.png") + # hp.mollview(madam_hits, xsize=1600, nest=True) + # plt.savefig(outfile) + # plt.close() + # outfile = os.path.join(testdir, "toast_hits.png") + # hp.mollview(toast_hits, xsize=1600, nest=True) + # plt.savefig(outfile) + # plt.close() + # outfile = os.path.join(testdir, "diff_hits.png") + # hp.mollview(diff_hits, xsize=1600, nest=True) + # plt.savefig(outfile) + # plt.close() + + # # Compare maps + + # input_map = hp.read_map( + # os.path.join(testdir, "input_map.fits"), field=None, nest=True + # ) + # toast_map = hp.read_map(toast_map_path, field=None, nest=True) + # madam_map = hp.read_map(madam_map_path, field=None, nest=True) + # # Set madam unhit pixels to zero + # for stokes, ststr in zip(range(3), ["I", "Q", "U"]): + # mask = hp.mask_bad(madam_map[stokes]) + # madam_map[stokes][mask] = 0.0 + # input_map[stokes][mask] = 0.0 + # diff_map = toast_map[stokes] - madam_map[stokes] + # madam_diff_truth = madam_map[stokes] - input_map[stokes] + # toast_diff_truth = toast_map[stokes] - input_map[stokes] + # # print("diff map {} has rms {}".format(ststr, np.std(diff_map))) + # outfile = os.path.join(testdir, "madam_map_{}.png".format(ststr)) + # hp.mollview(madam_map[stokes], xsize=1600, nest=True) + # plt.savefig(outfile) + # plt.close() + # outfile = os.path.join(testdir, "toast_map_{}.png".format(ststr)) + # hp.mollview(toast_map[stokes], xsize=1600, nest=True) + # plt.savefig(outfile) + # plt.close() + # outfile = os.path.join(testdir, "diff_map_{}.png".format(ststr)) + # hp.mollview(diff_map, xsize=1600, nest=True) + # plt.savefig(outfile) + # plt.close() + # outfile = os.path.join(testdir, "madam_diff_truth_{}.png".format(ststr)) + # hp.mollview(madam_diff_truth, xsize=1600, nest=True) + # plt.savefig(outfile) + # plt.close() + # outfile = os.path.join(testdir, "toast_diff_truth_{}.png".format(ststr)) + # hp.mollview(toast_diff_truth, xsize=1600, nest=True) + # plt.savefig(outfile) + # plt.close() + + # if not np.allclose( + # toast_map[stokes], madam_map[stokes], atol=0.1, rtol=0.05 + # ): + # fail = True + + # if data.comm.comm_world is not None: + # fail = data.comm.comm_world.bcast(fail, root=0) + + # self.assertFalse(fail) + + # close_data(data) diff --git a/src/toast/tests/ops_mapmaker_binning.py b/src/toast/tests/ops_mapmaker_binning.py index 099472308..673b3a551 100644 --- a/src/toast/tests/ops_mapmaker_binning.py +++ b/src/toast/tests/ops_mapmaker_binning.py @@ -6,13 +6,11 @@ import healpy as hp import numpy as np -import numpy.testing as nt -from astropy import units as u from .. import ops as ops from ..covariance import covariance_apply +from ..footprint import footprint_distribution from ..mpi import MPI -from ..noise import Noise from ..observation import default_values as defaults from ..vis import set_matplotlib_backend from .helpers import close_data, create_outdir, create_satellite_data @@ -128,6 +126,126 @@ def test_binned(self): close_data(data) + def test_binned_load_and_cache(self): + np.random.seed(123456) + testdir = os.path.join(self.outdir, "bin_cache") + cache_dir = os.path.join(testdir, "cache") + + # Create a fake satellite data set for testing + data = create_satellite_data(self.comm, no_det_data=True) + + sim_pipe_ops = list() + + # Create an uncorrelated noise model from focalplane detector properties + default_model = ops.DefaultNoiseModel(noise_model="noise_model") + sim_pipe_ops.append(default_model) + + # Simulate noise + sim_noise = ops.SimNoise(noise_model="noise_model") + sim_pipe_ops.append(sim_noise) + + # Pointing operator + detpointing = ops.PointingDetectorSimple() + pixels = ops.PixelsHealpix( + nside=64, + detector_pointing=detpointing, + ) + weights = ops.StokesWeights( + mode="IQU", + hwp_angle=defaults.hwp_angle, + detector_pointing=detpointing, + ) + sim_pipe_ops.append(pixels) + sim_pipe_ops.append(weights) + + sim_pipe = ops.Pipeline(operators=sim_pipe_ops) + + # Add this simulation pipeline as a loader to each observation + for ob in data.obs: + ob.loader = ops.PipelineLoader(pipeline=sim_pipe) + + # Create a pixel distribution based on a footprint + data["pixel_dist"] = footprint_distribution( + healpix_nside=64, + healpix_nside_submap=16, + comm=data.comm.comm_world, + ) + + # Set up binned map + binner = ops.BinMap( + pixel_dist="pixel_dist", + covariance="covariance", + binned="binned", + hits="hits", + pixel_pointing=pixels, + stokes_weights=weights, + noise_model=default_model.noise_model, + cache_dir=cache_dir, + ) + binner.apply(data) + + binmap = data[binner.binned] + + toast_hit_path = os.path.join( + testdir, "toast_hits.fits" + ) + toast_bin_path = os.path.join( + testdir, "toast_bin.fits" + ) + toast_cov_path = os.path.join( + testdir, "toast_cov.fits" + ) + data[binner.binned].write(toast_bin_path) + data[binner.hits].write(toast_hit_path) + data[binner.covariance].write(toast_cov_path) + + # Manual check + + for ob in data.obs: + ob.detdata.create("signal", units=defaults.det_data_units) + sim_pipe.apply(data) + + pixels.apply(data) + weights.apply(data) + + noise_weight = ops.BuildNoiseWeighted( + pixel_dist="pixel_dist", + noise_model=default_model.noise_model, + pixels=pixels.pixels, + weights=weights.weights, + det_data=sim_noise.det_data, + zmap="zmap", + ) + noise_weight.apply(data) + + cov_and_hits = ops.CovarianceAndHits( + pixel_dist="pixel_dist", + pixel_pointing=pixels, + stokes_weights=weights, + noise_model="noise_model", + covariance="cov_check", + hits="hits_check", + rcond="rcond_check", + ) + cov_and_hits.apply(data) + + covariance_apply( + data[cov_and_hits.covariance], data["zmap"], use_alltoallv=False + ) + + comm = binmap.distribution.comm + failed = False + for sm in range(binmap.distribution.n_local_submap): + for px in range(binmap.distribution.n_pix_submap): + if not np.allclose(binmap.data[sm, px], data["zmap"].data[sm, px]): + print(f"FAIL {binmap.data[sm, px]} != {data['zmap'].data[sm, px]}", flush=True) + failed = True + if comm is not None: + failed = comm.allreduce(failed, op=MPI.LOR) + self.assertFalse(failed) + + close_data(data) + def test_compare_madam(self): if not ops.madam.available(): print("libmadam not available, skipping binned map comparison") diff --git a/src/toast/tests/ops_mapmaker_solve.py b/src/toast/tests/ops_mapmaker_solve.py index 6c858f00b..08016edd0 100644 --- a/src/toast/tests/ops_mapmaker_solve.py +++ b/src/toast/tests/ops_mapmaker_solve.py @@ -38,6 +38,9 @@ def test_rhs(self): ) sim_noise.apply(data) + # Make a copy to restore later + ops.Copy(detdata=[(defaults.det_data, "original")]).apply(data) + # Pointing operator detpointing = ops.PointingDetectorSimple() pixels = ops.PixelsHealpix( @@ -105,6 +108,9 @@ def test_rhs(self): # checks things along the way. And these lower-level operators are unit # tested elsewhere as well... + # Restore original TOD + ops.Copy(detdata=[("original", defaults.det_data)]).apply(data) + # Make the binned map in a different location binner.binned = "check" binner.det_data = sim_noise.det_data diff --git a/src/toast/tests/ops_sim_ground.py b/src/toast/tests/ops_sim_ground.py index 9ae84a1c1..651f95b58 100644 --- a/src/toast/tests/ops_sim_ground.py +++ b/src/toast/tests/ops_sim_ground.py @@ -323,3 +323,117 @@ def test_phase(self): del data2 close_data(data1) + + def test_load_pipe(self): + # Slow sampling + fp = fake_hexagon_focalplane( + n_pix=self.npix, + sample_rate=10.0 * u.Hz, + ) + + site = GroundSite("Atacama", "-22:57:30", "-67:47:10", 5200.0 * u.meter) + + tele = Telescope("telescope", focalplane=fp, site=site) + + sch_file = os.path.join(self.outdir, "exec_schedule.txt") + schedule = None + + if self.comm is None or self.comm.rank == 0: + run_scheduler( + opts=[ + "--site-name", + site.name, + "--telescope", + tele.name, + "--site-lon", + "{}".format(site.earthloc.lon.to_value(u.degree)), + "--site-lat", + "{}".format(site.earthloc.lat.to_value(u.degree)), + "--site-alt", + "{}".format(site.earthloc.height.to_value(u.meter)), + "--patch", + "small_patch,1,40,-40,44,-44", + "--start", + "2020-01-01 00:00:00", + "--stop", + "2020-01-01 12:00:00", + "--out", + sch_file, + "--field-separator", + "|", + ] + ) + schedule = GroundSchedule() + schedule.read(sch_file) + if self.comm is not None: + schedule = self.comm.bcast(schedule, root=0) + + # Sort the schedule to exercise that method + schedule.sort_by_RA() + + data = Data(self.toastcomm) + + # Build the loading pipe + detpointing = ops.PointingDetectorSimple() + + load_pipe = ops.Pipeline( + operators = [ + ops.Create(detdata=[("flags", "(1)", "uint8", None)]), + ops.DefaultNoiseModel(noise_model="noise_model"), + ops.ElevationNoise( + noise_model="noise_model", + out_model="el_weighted", + detector_pointing=detpointing, + noise_a=0.5, + noise_c=0.5, + ), + ops.SimNoise( + noise_model="el_weighted", det_data=defaults.det_data + ), + ] + ) + + sim_ground = ops.SimGround( + name="sim_ground", + telescope=tele, + schedule=schedule, + hwp_angle=defaults.hwp_angle, + hwp_rpm=1.0, + max_pwv=5 * u.mm, + load_pipe=load_pipe, + det_data=None, + det_flags=None, + ) + sim_ground.apply(data) + + # Expand pointing and make a hit map. + + pixels = ops.PixelsHealpix( + nest=True, + create_dist="pixel_dist", + detector_pointing=detpointing, + ) + + pixels.nside_submap = 8 + pixels.nside = 512 + pixels.apply(data) + + build_hits = ops.BuildHitMap(pixel_dist="pixel_dist", pixels=pixels.pixels) + build_hits.load_apply(data) + + # Plot the hits + + hit_path = os.path.join(self.outdir, "hits.fits") + data[build_hits.hits].write(hit_path) + + if data.comm.world_rank == 0: + set_matplotlib_backend() + import matplotlib.pyplot as plt + + hits = hp.read_map(hit_path, field=None, nest=pixels.nest) + outfile = os.path.join(self.outdir, "hits.png") + hp.mollview(hits, xsize=1600, max=50, nest=pixels.nest) + plt.savefig(outfile) + plt.close() + + close_data(data) diff --git a/src/toast/tests/ops_sim_satellite.py b/src/toast/tests/ops_sim_satellite.py index adc3fb4a9..6f2c9aa31 100644 --- a/src/toast/tests/ops_sim_satellite.py +++ b/src/toast/tests/ops_sim_satellite.py @@ -145,3 +145,82 @@ def test_precession(self): check = qa.rotate(q_prec, zaxis) np.testing.assert_almost_equal(np.dot(check[0], check[-1]), 1.0, decimal=5) + + def test_load_pipe(self): + # Slow sampling + fp = fake_hexagon_focalplane( + n_pix=self.npix, + sample_rate=(5.0 / 60.0) * u.Hz, + ) + site = SpaceSite("L2") + + sch = create_satellite_schedule( + prefix="test_", + mission_start=datetime(2023, 2, 23), + observation_time=24 * u.hour, + gap_time=0 * u.second, + num_observations=30, + prec_period=90 * u.minute, + spin_period=10 * u.minute, + ) + + tele = Telescope("test", focalplane=fp, site=site) + + data = Data(self.toastcomm) + + # Build the loading pipe + load_pipe = ops.Pipeline( + operators = [ + ops.Create(detdata=[("flags", "(1)", "uint8", None)]), + ops.DefaultNoiseModel(noise_model="noise_model"), + ops.SimNoise( + noise_model="noise_model", det_data=defaults.det_data + ), + ] + ) + + # Scan fast enough to cover some sky in a short amount of time. Reduce the + # angles to achieve a more compact hit map. + sim_sat = ops.SimSatellite( + name="sim_sat", + telescope=tele, + schedule=sch, + hwp_angle=defaults.hwp_angle, + hwp_rpm=1.0, + spin_angle=30.0 * u.degree, + prec_angle=65.0 * u.degree, + load_pipe=load_pipe, + det_data=None, + det_flags=None, + ) + sim_sat.apply(data) + + # Expand pointing and make a hit map. + detpointing = ops.PointingDetectorSimple() + pixels = ops.PixelsHealpix( + nest=True, + create_dist="pixel_dist", + detector_pointing=detpointing, + ) + pixels.nside_submap = 2 + pixels.nside = 8 + pixels.apply(data) + + build_hits = ops.BuildHitMap(pixel_dist="pixel_dist", pixels=pixels.pixels) + build_hits.load_apply(data) + + # Plot the hits + + hit_path = os.path.join(self.outdir, "hits.fits") + data[build_hits.hits].write(hit_path) + + if data.comm.world_rank == 0: + set_matplotlib_backend() + import matplotlib.pyplot as plt + + hits = hp.read_map(hit_path, field=None, nest=pixels.nest) + outfile = os.path.join(self.outdir, "hits.png") + hp.mollview(hits, xsize=1600, nest=True) + plt.savefig(outfile) + plt.close() + close_data(data) diff --git a/src/toast/tests/runner.py b/src/toast/tests/runner.py index 5701ca52a..d3b25d235 100644 --- a/src/toast/tests/runner.py +++ b/src/toast/tests/runner.py @@ -28,6 +28,7 @@ from . import math_misc as test_math_misc from . import noise as test_noise from . import observation as test_observation +from . import ops_accum_obs as test_ops_accum_obs from . import ops_azimuth_intervals as test_ops_azimuth_intervals from . import ops_cadence_map as test_ops_cadence_map from . import ops_common_mode_noise as test_ops_common_mode_noise diff --git a/src/toast/tests/template_offset.py b/src/toast/tests/template_offset.py index dec68e0d6..41ff58d25 100644 --- a/src/toast/tests/template_offset.py +++ b/src/toast/tests/template_offset.py @@ -54,9 +54,9 @@ def test_projection(self): amps.local[:] = 1.0 # Project. - for det in tmpl.detectors(): - for ob in data.obs: - tmpl.add_to_signal(det, amps) + for ob in data.obs: + dets = ob.select_local_detectors() + tmpl.add_to_signal(ob, dets, amps) # Verify for ob in data.obs: @@ -64,9 +64,9 @@ def test_projection(self): np.testing.assert_equal(ob.detdata[defaults.det_data][det], 1.0) # Accumulate amplitudes - for det in tmpl.detectors(): - for ob in data.obs: - tmpl.project_signal(det, amps) + for ob in data.obs: + dets = ob.select_local_detectors() + tmpl.project_signal(ob, dets, amps) # Verify for ob in data.obs: @@ -140,14 +140,14 @@ def test_accel(self): amps.accel_update_device() # Project. - for det in tmpl.detectors(): - for ob in data.obs: - tmpl.add_to_signal(det, amps, use_accel=True) + for ob in data.obs: + dets = ob.select_local_detectors() + tmpl.add_to_signal(ob, dets, amps, use_accel=True) # Accumulate amplitudes - for det in tmpl.detectors(): - for ob in data.obs: - tmpl.project_signal(det, amps, use_accel=True) + for ob in data.obs: + dets = ob.select_local_detectors() + tmpl.project_signal(ob, dets, amps, use_accel=True) data.accel_update_host(data_names) amps.accel_update_host() @@ -270,19 +270,19 @@ def test_accel_pipeline(self): data.obs[0].select_local_detectors(flagmask=defaults.det_mask_invalid) ) expected = list() - for det in all_dets: - for iob, ob in enumerate(data.obs): - # Get the step boundaries - (rate, dt, dt_min, dt_max, dt_std) = rate_from_times( - ob.shared[defaults.times] - ) - step_samples = int(step_seconds * rate + 0.5) - - n_step = ob.n_local_samples // step_samples - if n_step * step_samples < ob.n_local_samples: - n_step += 1 - sizes = [step_samples for x in range(n_step - 1)] - sizes.append(ob.n_local_samples - (n_step - 1) * step_samples) + for iob, ob in enumerate(data.obs): + # Get the step boundaries + (rate, dt, dt_min, dt_max, dt_std) = rate_from_times( + ob.shared[defaults.times] + ) + step_samples = int(step_seconds * rate + 0.5) + + n_step = ob.n_local_samples // step_samples + if n_step * step_samples < ob.n_local_samples: + n_step += 1 + sizes = [step_samples for x in range(n_step - 1)] + sizes.append(ob.n_local_samples - (n_step - 1) * step_samples) + for det in ob.select_local_detectors(): expected.extend(sizes) offset += len(sizes) @@ -338,12 +338,12 @@ def test_compare(self): pyamps.local[:] = 1.0 # Project. - for det in tmpl.detectors(): - for ob in data.obs: - tmpl.add_to_signal(det, amps) - for det in pytmpl.detectors(): - for ob in data.obs: - pytmpl.add_to_signal(det, pyamps) + for ob in data.obs: + dets = ob.select_local_detectors() + tmpl.add_to_signal(ob, dets, amps) + for ob in data.obs: + dets = ob.select_local_detectors() + pytmpl.add_to_signal(ob, dets, pyamps) for ob in data.obs: np.testing.assert_allclose( @@ -351,12 +351,12 @@ def test_compare(self): ) # Accumulate amplitudes - for det in tmpl.detectors(): - for ob in data.obs: - tmpl.project_signal(det, amps) - for det in pytmpl.detectors(): - for ob in data.obs: - pytmpl.project_signal(det, pyamps) + for ob in data.obs: + dets = ob.select_local_detectors() + tmpl.project_signal(ob, dets, amps) + for ob in data.obs: + dets = ob.select_local_detectors() + pytmpl.project_signal(ob, dets, pyamps) # Verify np.testing.assert_allclose(amps.local, pyamps.local)