diff --git a/CHANGELOG.md b/CHANGELOG.md index 111f92510..0e0ece52c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,7 +1,7 @@ ## 3.x ### Cosmology -- Added DESI 1yr BAO data, Pantheon Plus and DESY5 SN (thanks DESI team, @WillMatt4 and @rodri981) +- Added DESI 1yr BAO data and SN from Pantheon Plus, DESY5 and Union3 (thanks DESI team, @adematti, @rubind, @WillMatt4 and @rodri981) ## 3.5 – 2024-02-16 diff --git a/cobaya/cosmo_input/input_database.py b/cobaya/cosmo_input/input_database.py index 50f050c36..6414d74d3 100644 --- a/cobaya/cosmo_input/input_database.py +++ b/cobaya/cosmo_input/input_database.py @@ -543,6 +543,14 @@ "desc": "Supernovae data from the Pantheon+ sample", "theory": theory, "likelihood": {"sn.pantheonplus": None}}, + "Union3": { + "desc": "Supernovae data from Union3", + "theory": theory, + "likelihood": {"sn.union3": None}}, + "DESY5": { + "desc": "Supernovae data from the DES Y5 sample", + "theory": theory, + "likelihood": {"sn.desy5": None}}, "Pantheon": { "desc": "Supernovae data from the Pantheon sample", "theory": theory, diff --git a/cobaya/install.py b/cobaya/install.py index 102700288..af6a6e8a5 100644 --- a/cobaya/install.py +++ b/cobaya/install.py @@ -275,11 +275,11 @@ def _imported_class(): continue elif is_old_version_msg: logger.info(f"Version check failed: {is_old_version_msg}") - obsolete_components += [name_w_kind] if kwargs.get("test"): continue if not kwargs.get("upgrade") and not kwargs.get("force"): logger.info("Skipping because '--upgrade' not requested.") + obsolete_components += [name_w_kind] continue else: logger.info("Check found no existing installation") @@ -396,7 +396,7 @@ def download_file(url, path, *, size=None, no_progress_bars=False, logger=None): file (e.g. when running on a cluster). :param logger: logger to use for reporting information; a new logger is created if not specified. - :return: ``True`` if the download (and decompression, if requested) was successfull, + :return: ``True`` if the download (and decompression, if requested) was successful, and ``False`` otherwise. """ logger = logger or get_logger("install") @@ -471,7 +471,7 @@ def download_github_release(base_directory, repo_name, release_name=None, *, ass file (e.g. when running on a cluster). :param logger: logger to use for reporting information; a new logger is created if not specified. - :return: ``True`` if the download was successfull, and ``False`` otherwise. + :return: ``True`` if the download was successful, and ``False`` otherwise. """ logger = logger or get_logger("install") if "/" in repo_name: diff --git a/cobaya/likelihoods/base_classes/sn.py b/cobaya/likelihoods/base_classes/sn.py index 49179e3a3..9b5d070b7 100644 --- a/cobaya/likelihoods/base_classes/sn.py +++ b/cobaya/likelihoods/base_classes/sn.py @@ -119,7 +119,7 @@ class SN(DataSetLikelihood): type = "SN" install_options = {"github_repository": "CobayaSampler/sn_data", - "github_release": "v1.5"} + "github_release": "v1.6"} def init_params(self, ini): @@ -303,7 +303,7 @@ def alpha_beta_logp(self, lumdists, alpha=0, beta=0, Mb=0, invcovmat=None): if self.alphabeta_covmat: if self.use_abs_mag: self.log.warning("You seem to be using JLA with the absolute magnitude " - "module. JLA uses a different callibration, the Mb " + "module. JLA uses a different calibration, the Mb " "module only works with Pantheon SNe!") estimated_scriptm = Mb + 25 else: @@ -388,3 +388,7 @@ def logp(self, **params_values): params_values[self.beta_name], Mb) else: return self.alpha_beta_logp(lumdists, Mb=Mb) + + @classmethod + def get_file_base_name(cls) -> str: + return cls.__dict__.get('file_base_name') or cls.__name__.lower() diff --git a/cobaya/likelihoods/sn/desy5.py b/cobaya/likelihoods/sn/desy5.py index 768e2a8ff..4702dd81d 100644 --- a/cobaya/likelihoods/sn/desy5.py +++ b/cobaya/likelihoods/sn/desy5.py @@ -1,9 +1,7 @@ -import os -import numpy as np -from .pantheonplus import pantheonplus +from .pantheonplus import PantheonPlus -class desy5(pantheonplus): +class DESy5(PantheonPlus): """ Likelihood for DES-Y5 type Ia supernovae sample. @@ -12,46 +10,10 @@ class desy5(pantheonplus): https://arxiv.org/abs/2401.02929 """ - def init_params(self, ini): - self.twoscriptmfit = False - data_file = os.path.normpath(os.path.join(self.path, ini.string("data_file"))) - self._read_data_file(data_file) - self.covs = {} - for name in ['mag']: - self.log.debug('Reading covmat for: %s ' % name) - self.covs[name] = self._read_covmat( - os.path.join(self.path, ini.string('%s_covmat_file' % name))) - self.alphabeta_covmat = False - self.pre_vars = self.mag_err ** 2 # diagonal component - self.inverse_covariance_matrix() - if not self.use_abs_mag: - self._marginalize_abs_mag() - self.marginalize = False + def configure(self): + self.pre_vars = self.mag_err ** 2 def _read_data_file(self, data_file): - self.log.debug('Reading %s' % data_file) - sep = ',' - self.names = [] - oldcols = ['zhd', 'zhel', 'mu', 'muerr_final'] + file_cols = ['zhd', 'zhel', 'mu', 'muerr_final'] self.cols = ['zcmb', 'zhel', 'mag', 'mag_err'] - with open(data_file, 'r') as f: - lines = f.readlines() - for iline, line in enumerate(lines): - if not line.startswith('#'): - break - lines = lines[iline:] - line = lines[0] - cols = [col.strip().lower() for col in line.split(sep)] - indices = [cols.index(col) for col in oldcols] - zeros = np.zeros(len(lines) - 1) - dtypes = {} - for col in self.cols: - setattr(self, col, zeros.astype(dtype=dtypes.get(col, 'f8'), copy=True)) - for ix, line in enumerate(lines[1:]): - vals = [val.strip() for val in line.split(sep)] - vals = [vals[i] for i in indices] - for i, (col, val) in enumerate(zip(self.cols, vals)): - tmp = getattr(self, col) - tmp[ix] = np.asarray(val, dtype=tmp.dtype) - self.nsn = ix + 1 - self.log.debug('Number of SN read: %s ' % self.nsn) + self._read_cols(data_file, file_cols, sep=',') diff --git a/cobaya/likelihoods/sn/desy5.yaml b/cobaya/likelihoods/sn/desy5.yaml index bec3bf335..f7ac4417f 100644 --- a/cobaya/likelihoods/sn/desy5.yaml +++ b/cobaya/likelihoods/sn/desy5.yaml @@ -9,7 +9,7 @@ dataset_params: # field: value # Aliases for automatic covariance matrix aliases: [DESY5] -# Use absolute magnitude +# Use absolute magnitude use_abs_mag: False # Speed in evaluations/second speed: 100 diff --git a/cobaya/likelihoods/sn/jla.py b/cobaya/likelihoods/sn/jla.py index f38cfcc34..1058d017b 100644 --- a/cobaya/likelihoods/sn/jla.py +++ b/cobaya/likelihoods/sn/jla.py @@ -1,7 +1,7 @@ from cobaya.likelihoods.base_classes import SN -class jla(SN): +class JLA(SN): r""" Likelihood of the JLA type Ia supernova sample \cite{Betoule:2014frx}, based on observations obtained by the SDSS-II and SNLS collaborations. diff --git a/cobaya/likelihoods/sn/jla_lite.py b/cobaya/likelihoods/sn/jla_lite.py index 0689b92fa..079e0becf 100644 --- a/cobaya/likelihoods/sn/jla_lite.py +++ b/cobaya/likelihoods/sn/jla_lite.py @@ -1,7 +1,7 @@ from cobaya.likelihoods.base_classes import SN -class jla_lite(SN): +class JLA_lite(SN): r""" Likelihood (marginalized over nuisance parameters) of the JLA type Ia supernova sample \cite{Betoule:2014frx}, based on observations obtained by the SDSS-II and SNLS diff --git a/cobaya/likelihoods/sn/jla_lite.yaml b/cobaya/likelihoods/sn/jla_lite.yaml index dee25b424..2bd40202f 100644 --- a/cobaya/likelihoods/sn/jla_lite.yaml +++ b/cobaya/likelihoods/sn/jla_lite.yaml @@ -15,7 +15,7 @@ marginalize: True # If marginalizing, pre-compute covariance inverses. # Faster, at expense of memory (~600MB). precompute_covmats: True -# Use absolute magnitude +# Use absolute magnitude use_abs_mag: False # Options for the grid marginalization marginalize_params: diff --git a/cobaya/likelihoods/sn/pantheon.py b/cobaya/likelihoods/sn/pantheon.py index 62bb1cc8b..35c557004 100644 --- a/cobaya/likelihoods/sn/pantheon.py +++ b/cobaya/likelihoods/sn/pantheon.py @@ -1,7 +1,7 @@ from cobaya.likelihoods.base_classes import SN -class pantheon(SN): +class Pantheon(SN): r""" Likelihood of the Pantheon type Ia supernova sample \cite{Scolnic:2017caz}, including data from the Pan-STARRS1 (PS1) Medium Deep Survey. diff --git a/cobaya/likelihoods/sn/pantheon.yaml b/cobaya/likelihoods/sn/pantheon.yaml index d46e6e7cc..f0e3eacd1 100644 --- a/cobaya/likelihoods/sn/pantheon.yaml +++ b/cobaya/likelihoods/sn/pantheon.yaml @@ -6,10 +6,10 @@ path: null dataset_file: Pantheon/full_long.dataset # Overriding of .dataset parameters dataset_params: - # field: value +# field: value # Aliases for automatic covariance matrix -aliases: [Pantheon, Pantheon18] -# Use absolute magnitude +aliases: [ Pantheon, Pantheon18 ] +# Use absolute magnitude use_abs_mag: False # Speed in evaluations/second speed: 100 \ No newline at end of file diff --git a/cobaya/likelihoods/sn/pantheonplus.py b/cobaya/likelihoods/sn/pantheonplus.py index 3a29baf82..1a52f5145 100644 --- a/cobaya/likelihoods/sn/pantheonplus.py +++ b/cobaya/likelihoods/sn/pantheonplus.py @@ -3,7 +3,7 @@ from cobaya.likelihoods.base_classes import SN -class pantheonplus(SN): +class PantheonPlus(SN): """ Likelihood for Pantheon+ (without SH0ES) type Ia supernovae sample. @@ -14,8 +14,6 @@ class pantheonplus(SN): def init_params(self, ini): self.twoscriptmfit = False - # self.pecz = 0. - # self.has_third_var = False data_file = os.path.normpath(os.path.join(self.path, ini.string("data_file"))) self._read_data_file(data_file) self.covs = {} @@ -24,32 +22,37 @@ def init_params(self, ini): self.covs[name] = self._read_covmat( os.path.join(self.path, ini.string('%s_covmat_file' % name))) self.alphabeta_covmat = False - zmask = self.zcmb > 0.01 + self.configure() + self.inverse_covariance_matrix() + if not self.use_abs_mag: + self._marginalize_abs_mag() + self.marginalize = False + + def _apply_mask(self, zmask): for col in self.cols: setattr(self, col, getattr(self, col)[zmask]) for name, cov in self.covs.items(): self.covs[name] = cov[np.ix_(zmask, zmask)] + + def configure(self): + self._apply_mask(zmask=self.zcmb > 0.01) self.pre_vars = 0. # diagonal component - self.inverse_covariance_matrix() - if not self.use_abs_mag: - self._marginalize_abs_mag() - self.marginalize = False - def _read_data_file(self, data_file): + def _read_cols(self, data_file, file_cols, sep=None): self.log.debug('Reading %s' % data_file) - oldcols = ['m_b_corr', 'zhd', 'zhel', 'is_calibrator'] - self.cols = ['mag', 'zcmb', 'zhel', 'is_calibrator'] with open(data_file, 'r') as f: lines = f.readlines() line = lines[0] - cols = [col.strip().lower() for col in line.split()] - indices = [cols.index(col) for col in oldcols] + if line.startswith('#'): + line = line[1:] + cols = [col.strip().lower() for col in line.split(sep)] + assert cols[0].isalpha() + indices = [cols.index(col) for col in file_cols] zeros = np.zeros(len(lines) - 1) - dtypes = {'is_calibrator': '?'} for col in self.cols: - setattr(self, col, zeros.astype(dtype=dtypes.get(col, 'f8'), copy=True)) + setattr(self, col, zeros.astype(dtype='f8', copy=True)) for ix, line in enumerate(lines[1:]): - vals = [val.strip() for val in line.split()] + vals = [val.strip() for val in line.split(sep)] vals = [vals[i] for i in indices] for i, (col, val) in enumerate(zip(self.cols, vals)): tmp = getattr(self, col) @@ -57,13 +60,18 @@ def _read_data_file(self, data_file): self.nsn = ix + 1 self.log.debug('Number of SN read: %s ' % self.nsn) + def _read_data_file(self, data_file): + file_cols = ['m_b_corr', 'zhd', 'zhel'] + self.cols = ['mag', 'zcmb', 'zhel'] + self._read_cols(data_file, file_cols) + def _marginalize_abs_mag(self): deriv = np.ones_like(self.mag)[:, None] derivp = self.invcov.dot(deriv) fisher = deriv.T.dot(derivp) self.invcov = self.invcov - derivp.dot(np.linalg.solve(fisher, derivp.T)) - def alpha_beta_logp(self, lumdists, Mb=0): + def alpha_beta_logp(self, lumdists, Mb=0., **kwargs): if self.use_abs_mag: estimated_scriptm = Mb + 25 else: diff --git a/cobaya/likelihoods/sn/pantheonplusshoes.py b/cobaya/likelihoods/sn/pantheonplusshoes.py index 3dc6579c5..6e1da8693 100644 --- a/cobaya/likelihoods/sn/pantheonplusshoes.py +++ b/cobaya/likelihoods/sn/pantheonplusshoes.py @@ -1,9 +1,7 @@ -import os -import numpy as np -from .pantheonplus import pantheonplus +from .pantheonplus import PantheonPlus -class pantheonplusshoes(pantheonplus): +class PantheonPlusShoes(PantheonPlus): """ Likelihood for Pantheon+ (with SH0ES) type Ia supernovae sample. @@ -12,37 +10,23 @@ class pantheonplusshoes(pantheonplus): https://arxiv.org/abs/2202.04077 """ - def init_params(self, ini): - self.twoscriptmfit = False - # self.pecz = 0. - # self.has_third_var = False - data_file = os.path.normpath(os.path.join(self.path, ini.string("data_file"))) - self._read_data_file(data_file) - self.covs = {} - for name in ['mag']: - self.log.debug('Reading covmat for: %s ' % name) - self.covs[name] = self._read_covmat( - os.path.join(self.path, ini.string('%s_covmat_file' % name))) - self.alphabeta_covmat = False - zmask = (self.zcmb > 0.01) | self.is_calibrator - for col in self.cols: - setattr(self, col, getattr(self, col)[zmask]) - for name, cov in self.covs.items(): - self.covs[name] = cov[np.ix_(zmask, zmask)] + def configure(self): + self._apply_mask((self.zcmb > 0.01) | self.is_calibrator) self.pre_vars = 0. # diagonal component - self.inverse_covariance_matrix() - if not self.use_abs_mag: - self._marginalize_abs_mag() - self.marginalize = False - print(self.zcmb) - def alpha_beta_logp(self, lumdists, Mb=0): + def _read_data_file(self, data_file): + file_cols = ['m_b_corr', 'zhd', 'zhel', 'is_calibrator', 'ceph_dist'] + self.cols = ['mag', 'zcmb', 'zhel', 'is_calibrator', 'ceph_dist'] + self._read_cols(data_file, file_cols) + self.is_calibrator = self.is_calibrator.astype(dtype='?') + + def alpha_beta_logp(self, lumdists, Mb=0, **kwargs): if self.use_abs_mag: estimated_scriptm = Mb + 25 else: estimated_scriptm = 0. # Use Cepheids host distances as theory - lumdists[self.is_calibrator] = self.ceph_dist[self.is_calibrator] + lumdists[self.is_calibrator] = self.ceph_dist[self.is_calibrator] - 25. diffmag = self.mag - lumdists - estimated_scriptm return - diffmag.dot(self.invcov).dot(diffmag) / 2. diff --git a/cobaya/likelihoods/sn/union3.bibtex b/cobaya/likelihoods/sn/union3.bibtex new file mode 100644 index 000000000..77d112af8 --- /dev/null +++ b/cobaya/likelihoods/sn/union3.bibtex @@ -0,0 +1,9 @@ +@article{Rubin:2023ovl, + author = "Rubin, David and others", + title = "{Union Through UNITY: Cosmology with 2,000 SNe Using a Unified Bayesian Framework}", + eprint = "2311.12098", + archivePrefix = "arXiv", + primaryClass = "astro-ph.CO", + month = "11", + year = "2023" +} \ No newline at end of file diff --git a/cobaya/likelihoods/sn/union3.py b/cobaya/likelihoods/sn/union3.py new file mode 100644 index 000000000..e5bb7ab9a --- /dev/null +++ b/cobaya/likelihoods/sn/union3.py @@ -0,0 +1,20 @@ +from .pantheonplus import PantheonPlus + + +class Union3(PantheonPlus): + """ + Likelihood for the Union3 & UNITY1.5 type Ia supernovae sample. + + Reference + --------- + https://arxiv.org/pdf/2311.12098.pdf + """ + + def configure(self): + self.pre_vars = 0. # diagonal component + + def _read_data_file(self, data_file): + file_cols = ['zcmb', 'mb'] + self.cols = ['zcmb', 'mag'] + self._read_cols(data_file, file_cols) + self.zhel = self.zcmb diff --git a/cobaya/likelihoods/sn/union3.yaml b/cobaya/likelihoods/sn/union3.yaml new file mode 100644 index 000000000..87792d6cd --- /dev/null +++ b/cobaya/likelihoods/sn/union3.yaml @@ -0,0 +1,13 @@ +# Settings for the Union3 SN Ia analysis. + +# .dataset file with settings +dataset_file: Union3/full_long.dataset +# Overriding of .dataset parameters +dataset_params: +# field: value +# Aliases for automatic covariance matrix +aliases: [ Union3 ] +# Use absolute magnitude +use_abs_mag: False +# Speed in evaluations/second +speed: 100 \ No newline at end of file diff --git a/tests/test_cosmo_sn.py b/tests/test_cosmo_sn.py index b04856585..d297471ea 100644 --- a/tests/test_cosmo_sn.py +++ b/tests/test_cosmo_sn.py @@ -3,21 +3,21 @@ from .common_cosmo import body_of_test +def _test_sn(packages_path, skip_not_installed, lik, theory='camb', lik_params={}): + info_likelihood = {lik: lik_params} + info_theory = {theory: None} + ref_chi2 = {"tolerance": 0.1, lik: chi2_sn[lik]} + body_of_test(packages_path, best_fit, info_likelihood, info_theory, ref_chi2, + skip_not_installed=skip_not_installed) + + # Pantheon (alpha and beta not used - no nuisance parameters), fast def test_sn_pantheon_camb(packages_path, skip_not_installed): - lik = "sn.pantheon" - info_likelihood = {lik: {}} - info_theory = {"camb": None} - body_of_test(packages_path, best_fit, info_likelihood, info_theory, chi2_sn_pantheon, - skip_not_installed=skip_not_installed) + _test_sn(packages_path, skip_not_installed, "sn.pantheon") def test_sn_pantheon_classy(packages_path, skip_not_installed): - lik = "sn.pantheon" - info_likelihood = {lik: {}} - info_theory = {"classy": None} - body_of_test(packages_path, best_fit, info_likelihood, info_theory, chi2_sn_pantheon, - skip_not_installed=skip_not_installed) + _test_sn(packages_path, skip_not_installed, "sn.pantheon", "classy") # JLA @@ -27,7 +27,8 @@ def test_sn_jla_camb(packages_path, skip_not_installed): lik = "sn.jla" info_likelihood = {lik: {}} info_theory = {"camb": None} - body_of_test(packages_path, best_fit_test, info_likelihood, info_theory, chi2_sn_jla, + ref_chi2 = {"tolerance": 0.1, lik: chi2_sn[lik]} + body_of_test(packages_path, best_fit_test, info_likelihood, info_theory, ref_chi2, skip_not_installed=skip_not_installed) @@ -37,26 +38,21 @@ def test_sn_jla_classy(packages_path, skip_not_installed): lik = "sn.jla" info_likelihood = {lik: {}} info_theory = {"classy": None} - body_of_test(packages_path, best_fit_test, info_likelihood, info_theory, chi2_sn_jla, + ref_chi2 = {"tolerance": 0.1, lik: chi2_sn[lik]} + body_of_test(packages_path, best_fit_test, info_likelihood, info_theory, ref_chi2, skip_not_installed=skip_not_installed) # JLA marginalized over alpha, beta def test_sn_jla_lite_camb(packages_path, skip_not_installed): - lik = "sn.jla_lite" - info_likelihood = {lik: {"marginalize": True}} - info_theory = {"camb": None} - body_of_test(packages_path, best_fit, info_likelihood, info_theory, chi2_sn_jla_lite, - skip_not_installed=skip_not_installed) + _test_sn(packages_path, skip_not_installed, "sn.jla_lite", "camb", + {"marginalize": True}) # JLA marginalized over alpha, beta (slow version!) def test_sn_jla_lite_slow_camb(packages_path, skip_not_installed): - lik = "sn.jla_lite" - info_likelihood = {lik: {"marginalize": True, "precompute_covmats": False}} - info_theory = {"camb": None} - body_of_test(packages_path, best_fit, info_likelihood, info_theory, chi2_sn_jla_lite, - skip_not_installed=skip_not_installed) + _test_sn(packages_path, skip_not_installed, "sn.jla_lite", "camb", + {"marginalize": True, "precompute_covmats": False}) def test_sn_pantheon_Mb(packages_path, skip_not_installed): @@ -68,13 +64,29 @@ def test_sn_pantheon_Mb(packages_path, skip_not_installed): chi2_sn_pantheon_Mb, skip_not_installed=skip_not_installed) +def test_sn_pantheonplus_camb(packages_path, skip_not_installed): + _test_sn(packages_path, skip_not_installed, "sn.pantheonplus") + + +def test_sn_pantheonplusshoes_camb(packages_path, skip_not_installed): + _test_sn(packages_path, skip_not_installed, "sn.pantheonplusshoes") + + +def test_sn_union3_camb(packages_path, skip_not_installed): + _test_sn(packages_path, skip_not_installed, "sn.union3") + + +def test_sn_desy5_camb(packages_path, skip_not_installed): + _test_sn(packages_path, skip_not_installed, "sn.desy5") + + # BEST FIT AND REFERENCE VALUES ########################################################## best_fit = deepcopy(params_lowTEB_highTTTEEE) best_fit_sn = {"alpha_jla": 0.1325237, "beta_jla": 2.959805} best_fit_Mb = {"Mb": -19.2} -chi2_sn_pantheon = {"sn.pantheon": 1035.30, "tolerance": 0.1} chi2_sn_pantheon_Mb = {"sn.pantheon": 4025.30, "H0.riess2020Mb": 1.65, "tolerance": 0.1} -chi2_sn_jla = {"sn.jla": 700.582, "tolerance": 0.1} -chi2_sn_jla_lite = {"sn.jla_lite": 706.882, "tolerance": 0.1} +chi2_sn = {"sn.pantheon": 1035.30, "sn.jla": 700.582, "sn.jla_lite": 706.882, + "sn.pantheonplus": 1403.69, "sn.pantheonplusshoes": 1496.97, + "sn.union3": 26.31, "sn.desy5": 1644.94}