From 73a3af68095e58f4c3842b0a75732f14bf9583d5 Mon Sep 17 00:00:00 2001 From: Tomas Stolker Date: Tue, 24 Sep 2024 13:25:08 +0200 Subject: [PATCH 1/9] Added the MultiNest class to the sampler module --- docs/faq.rst | 6 +- orbitize/driver.py | 4 +- orbitize/results.py | 19 +++-- orbitize/sampler.py | 194 ++++++++++++++++++++++++++++++++++++++++---- requirements.txt | 1 + 5 files changed, 196 insertions(+), 28 deletions(-) diff --git a/docs/faq.rst b/docs/faq.rst index 3badf038..d62eb1b1 100644 --- a/docs/faq.rst +++ b/docs/faq.rst @@ -27,10 +27,10 @@ imaged planets. A detailed description of how τ is related to other quantities Our goal with the default prior is to have an isotropic distribution of the orbital plane. To obtain this, we use inclination and position angle of the ascending node (PAN) to -define the orbital plane. They actually coorespond to the two angles in a spherical coordinate system: -inclinaion is the polar angle and PAN is the azimuthal angle. Becuase of this choice of coordinates, +define the orbital plane. They actually correspond to the two angles in a spherical coordinate system: +inclination is the polar angle and PAN is the azimuthal angle. Because of this choice of coordinates, there are less orbital configurations near the poles (when inclination is near 0 or 180), so we use -a sine prior to downweigh those areas to give us an isotropic distribution. +a sine prior to downweight those areas to give us an isotropic distribution. A more detailed discussion of this is here: .. toctree:: diff --git a/orbitize/driver.py b/orbitize/driver.py index f92b2cdf..ebcbdd01 100644 --- a/orbitize/driver.py +++ b/orbitize/driver.py @@ -16,7 +16,9 @@ class Driver(object): input_data: Either a relative path to data file or astropy.table.Table object in the orbitize format. See ``orbitize.read_input`` sampler_str (str): algorithm to use for orbit computation. "MCMC" for - Markov Chain Monte Carlo, "OFTI" for Orbits for the Impatient + Markov Chain Monte Carlo, "OFTI" for Orbits for the Impatient, + "NestedSampler" for nested sampling with Dynesty, or "MultiNest" + for nested sampling with (Py)MultiNest. num_secondary_bodies (int): number of secondary bodies in the system. Should be at least 1. stellar_or_system_mass (float): mass of the primary star (if fitting for diff --git a/orbitize/results.py b/orbitize/results.py index cecacaff..12500e24 100644 --- a/orbitize/results.py +++ b/orbitize/results.py @@ -45,6 +45,8 @@ def __init__( self.lnlike = lnlike self.curr_pos = curr_pos self.version_number = version_number + self.ln_evidence = None + self.ln_evidence_err = None if self.system is not None: self.tau_ref_epoch = self.system.tau_ref_epoch @@ -114,6 +116,12 @@ def save_results(self, filename): hf.attrs['sampler_name'] = self.sampler_name hf.attrs['version_number'] = self.version_number + if self.ln_evidence is not None: + hf.attrs['ln_evidence'] = self.ln_evidence + + if self.ln_evidence_err is not None: + hf.attrs['ln_evidence_err'] = self.ln_evidence_err + # Now add post and lnlike from the results object as datasets hf.create_dataset('post', data=self.post) # hf.create_dataset('data', data=self.data) @@ -147,19 +155,18 @@ def load_results(self, filename, append=False): hf = h5py.File(filename, 'r') # Opens file for reading # Load up each dataset from hdf5 file sampler_name = str(hf.attrs['sampler_name']) - try: + if 'version_number' in hf.attrs: version_number = str(hf.attrs['version_number']) - except KeyError: + else: version_number = "<= 1.13" post = np.array(hf.get('post')) lnlike = np.array(hf.get('lnlike')) - - try: + if 'num_secondary_bodies' in hf.attrs: num_secondary_bodies = int(hf.attrs['num_secondary_bodies']) - except KeyError: + else: # old, has to be single planet fit - num_secondary_bodies = 1 + num_secondary_bodies = 1 try: data_table = table.Table(np.array(hf.get('data'))) diff --git a/orbitize/sampler.py b/orbitize/sampler.py index 5ee50f21..1e7fb82a 100644 --- a/orbitize/sampler.py +++ b/orbitize/sampler.py @@ -1319,8 +1319,18 @@ class NestedSampler(Sampler): Thea McKenna, Sarah Blunt, & Lea Hirsch 2024 """ - def __init__(self, system): - super(NestedSampler, self).__init__(system) + def __init__(self, + system, + chi2_type="standard", + like="chi2_lnlike", + custom_lnlike=None, + ): + super(NestedSampler, self).__init__( + system, + like=like, + chi2_type=chi2_type, + custom_lnlike=custom_lnlike, + ) # create an empty results object self.results = orbitize.results.Results( @@ -1346,14 +1356,16 @@ def ptform(self, u): """ utform = np.zeros(len(u)) for i in range(len(u)): - try: + if hasattr(self.system.sys_priors[i], 'transform_samples'): utform[i] = self.system.sys_priors[i].transform_samples(u[i]) - except AttributeError: # prior is a fixed number + else: + # prior is a fixed number utform[i] = self.system.sys_priors[i] return utform def run_sampler( self, + nlive=500, static=False, bound="multi", pfrac=1.0, @@ -1364,6 +1376,11 @@ def run_sampler( """Runs the nested sampler from the Dynesty package. Args: + nlive (int): Number of live points. A larger numbers results + in a more finely sampled posterior and a more accurate + evidence, but also a larger number of iterations is + required to converge (default: 500). The value is only + used by the static sampler (i.e. with static=True). static (bool): True if using static nested sampling, False if using dynamic. bound (str): Method used to approximately bound the prior @@ -1391,10 +1408,9 @@ def run_sampler( """ mp.set_start_method(start_method, force=True) - if static and pfrac != 1.0: - raise ValueError( - """The static nested sampler does not take alternate values for pfrac.""" - ) + + if not static: + run_nested_kwargs['wt_kwargs'] = {"pfrac": pfrac} if num_threads > 1: with dynesty.pool.Pool(num_threads, self._logl, self.ptform) as pool: @@ -1406,8 +1422,8 @@ def run_sampler( pool=pool, bound=bound, bootstrap=False, + nlive=nlive, ) - self.dynesty_sampler.run_nested(**run_nested_kwargs) else: self.dynesty_sampler = dynesty.DynamicNestedSampler( pool.loglike, @@ -1416,10 +1432,10 @@ def run_sampler( pool=pool, bound=bound, bootstrap=False, + nlive=None, ) - self.dynesty_sampler.run_nested( - wt_kwargs={"pfrac": pfrac}, **run_nested_kwargs - ) + self.dynesty_sampler.run_nested(**run_nested_kwargs) + else: if static: self.dynesty_sampler = dynesty.NestedSampler( @@ -1427,23 +1443,165 @@ def run_sampler( self.ptform, len(self.system.sys_priors), bound=bound, + nlive=nlive, ) - self.dynesty_sampler.run_nested(**run_nested_kwargs) else: self.dynesty_sampler = dynesty.DynamicNestedSampler( self._logl, self.ptform, len(self.system.sys_priors), bound=bound, + nlive=None, ) - self.dynesty_sampler.run_nested( - wt_kwargs={"pfrac": pfrac}, **run_nested_kwargs - ) + self.dynesty_sampler.run_nested(**run_nested_kwargs) self.results.add_samples( self.dynesty_sampler.results["samples"], self.dynesty_sampler.results["logl"], ) - num_iter = self.dynesty_sampler.results["niter"] - return self.dynesty_sampler.results["samples"], num_iter + self.results.ln_evidence = self.dynesty_sampler.results["logz"][-1] + self.results.ln_evidence_err = self.dynesty_sampler.results["logzerr"][-1] + + return self.dynesty_sampler.results["samples"], self.dynesty_sampler.results["niter"] + + +class MultiNest(Sampler): + """ + Implements nested sampling using MultiNest package. + + Thea McKenna, Sarah Blunt, & Lea Hirsch 2024 + """ + + def __init__(self, + system, + chi2_type="standard", + like="chi2_lnlike", + custom_lnlike=None, + ): + super(MultiNest, self).__init__( + system, + like=like, + chi2_type=chi2_type, + custom_lnlike=custom_lnlike, + ) + + # create an empty results object + self.results = orbitize.results.Results( + self.system, + sampler_name=self.__class__.__name__, + post=None, + lnlike=None, + version_number=orbitize.__version__, + ) + + def run_sampler( + self, + n_live_points=500, + outputfiles_basename='./multinest', + ): + """Runs the nested sampler from the (Py)MultiNest package. + + Args: + n_live_points (int): Number of live points. A larger numbers results + in a more finely sampled posterior and a more accurate + evidence, but also a larger number of iterations is + required to converge (default: 500). + outputfiles_basename (str): Basename for theMultiNest output. Can be + a folder and/or prefix for the filenames. Any (sub)folder should + first be manually created. + + Returns: + numpy.array of float: posterior samples + """ + + # Import here because it will otherwise give a warning + # if the compiled MultiNest library is not found + # when importing orbitize + import pymultinest + + # Get the MPI rank of the process + # try: + # from mpi4py import MPI + # mpi_rank = MPI.COMM_WORLD.Get_rank() + # except ModuleNotFoundError: + # mpi_rank = 0 + + # Create the output folder if not existing + # if mpi_rank == 0 and not os.path.exists(output_folder): + # os.mkdir(output_folder) + + # Number of parameters to fit + n_parameters = len(self.system.labels) + + def _logprior_multinest(param_cube, n_dim, n_param): + """ + Parameters + ---------- + param_cube : LP_c_double + Unit cube. + n_dim : int + Number of dimensions. This parameter is mandatory. + n_param : int + Number of parameters. This parameter is mandatory. + + Returns + ------- + LP_c_double + Parameter cube. + """ + + for i in range(n_param): + if hasattr(self.system.sys_priors[i], 'transform_samples'): + param_cube[i] = self.system.sys_priors[i].transform_samples(param_cube[i]) + else: + # The prior is a fixed number + param_cube[i] = self.system.sys_priors[i] + + return param_cube + + def _loglike_multinest(param_cube, n_dim, n_param): + """ + Parameters + ---------- + param_cube : LP_c_double + Parameter cube. + n_dim : int + Number of dimensions. This parameter is mandatory. + n_param : int + Number of parameters. This parameter is mandatory. + + Returns + ------- + float + Log-likelihood. + """ + + # Convert LP_c_double to np.float64 + param_array = np.zeros(n_param) + for i in range(n_param): + param_array[i] = param_cube[i] + + return self._logl(param_array) + + pymultinest.run( + _loglike_multinest, + _logprior_multinest, + n_parameters, + outputfiles_basename=outputfiles_basename, + resume=False, + n_live_points=n_live_points, + ) + + analyzer = pymultinest.analyse.Analyzer( + n_parameters, outputfiles_basename=outputfiles_basename) + + sampling_stats = analyzer.get_stats() + post_samples = analyzer.get_equal_weighted_posterior() + + # The log-likelihood is stored in the last column + self.results.add_samples(post_samples[:, :-1], post_samples[:, -1]) + self.results.ln_evidence = sampling_stats["nested sampling global log-evidence"] + self.results.ln_evidence_err = sampling_stats["nested sampling global log-evidence error"] + + return post_samples[:, :-1] diff --git a/requirements.txt b/requirements.txt index ddc47100..3af429eb 100644 --- a/requirements.txt +++ b/requirements.txt @@ -16,3 +16,4 @@ sphinx docutils<0.17 rebound dynesty +pymultinest From 72fef02266d08cbf0f5e6b8ed379048be436203d Mon Sep 17 00:00:00 2001 From: Tomas Stolker Date: Tue, 24 Sep 2024 14:40:45 +0200 Subject: [PATCH 2/9] Added hdf5_file parameter, updated docstrings --- orbitize/sampler.py | 75 +++++++++++++++++++++++++++++++++------------ 1 file changed, 55 insertions(+), 20 deletions(-) diff --git a/orbitize/sampler.py b/orbitize/sampler.py index 1e7fb82a..deffb137 100644 --- a/orbitize/sampler.py +++ b/orbitize/sampler.py @@ -1314,7 +1314,19 @@ def check_prior_support(self): class NestedSampler(Sampler): """ - Implements nested sampling using Dynesty package. + Implements nested sampling using the Dynesty package. + + Args: + system (system.System): system.System object + chi2_type (str, optional): either "standard", or "log" + like (str): name of likelihood function in ``lnlike.py`` + custom_lnlike (func): ability to include an addition custom likelihood + function in the fit. The function looks like + ``clnlikes = custon_lnlike(params)`` where ``params`` is a RxM array + of fitting parameters, where R is the number of orbital paramters + (can be passed in system.compute_model()), and M is the number of + orbits we need model predictions for. It returns ``clnlikes`` + which is an array of length M, or it can be a single float if M = 1. Thea McKenna, Sarah Blunt, & Lea Hirsch 2024 """ @@ -1468,9 +1480,25 @@ def run_sampler( class MultiNest(Sampler): """ - Implements nested sampling using MultiNest package. + Implements nested sampling using the (Py)MultiNest package. In order + to use this sampler, MultiNest should be `manually compiled + `_. + The sampler supports multiprocessing with MPI, which requires the + installation of mpi4py (e.g. as "pip install mpi4py"). - Thea McKenna, Sarah Blunt, & Lea Hirsch 2024 + Args: + system (system.System): system.System object + chi2_type (str, optional): either "standard", or "log" + like (str): name of likelihood function in ``lnlike.py`` + custom_lnlike (func): ability to include an addition custom likelihood + function in the fit. The function looks like + ``clnlikes = custon_lnlike(params)`` where ``params`` is a RxM array + of fitting parameters, where R is the number of orbital paramters + (can be passed in system.compute_model()), and M is the number of + orbits we need model predictions for. It returns ``clnlikes`` + which is an array of length M, or it can be a single float if M = 1. + + Tomas Stolker 2024 """ def __init__(self, @@ -1498,7 +1526,8 @@ def __init__(self, def run_sampler( self, n_live_points=500, - outputfiles_basename='./multinest', + output_basename='./multinest', + hdf5_file=None, ): """Runs the nested sampler from the (Py)MultiNest package. @@ -1507,9 +1536,16 @@ def run_sampler( in a more finely sampled posterior and a more accurate evidence, but also a larger number of iterations is required to converge (default: 500). - outputfiles_basename (str): Basename for theMultiNest output. Can be - a folder and/or prefix for the filenames. Any (sub)folder should - first be manually created. + output_basename (str): Basename for the MultiNest output. + Can be a folder and/or prefix for the filenames. Any + (sub)folder should first be manually created. + hdf5_file (str): HDF5 filename in which the results are stored. + Setting the argument will store the results by calling the + save_results method of the Results objects. This parameter + was implemented because calling save_results separately + after the sampling has finished, may cause an error when + using MPI because only one process should write the results. + The results are not stored if the argument is set to None. Returns: numpy.array of float: posterior samples @@ -1520,17 +1556,6 @@ def run_sampler( # when importing orbitize import pymultinest - # Get the MPI rank of the process - # try: - # from mpi4py import MPI - # mpi_rank = MPI.COMM_WORLD.Get_rank() - # except ModuleNotFoundError: - # mpi_rank = 0 - - # Create the output folder if not existing - # if mpi_rank == 0 and not os.path.exists(output_folder): - # os.mkdir(output_folder) - # Number of parameters to fit n_parameters = len(self.system.labels) @@ -1588,13 +1613,13 @@ def _loglike_multinest(param_cube, n_dim, n_param): _loglike_multinest, _logprior_multinest, n_parameters, - outputfiles_basename=outputfiles_basename, + outputfiles_basename=output_basename, resume=False, n_live_points=n_live_points, ) analyzer = pymultinest.analyse.Analyzer( - n_parameters, outputfiles_basename=outputfiles_basename) + n_parameters, outputfiles_basename=output_basename) sampling_stats = analyzer.get_stats() post_samples = analyzer.get_equal_weighted_posterior() @@ -1604,4 +1629,14 @@ def _loglike_multinest(param_cube, n_dim, n_param): self.results.ln_evidence = sampling_stats["nested sampling global log-evidence"] self.results.ln_evidence_err = sampling_stats["nested sampling global log-evidence error"] + # Get the MPI rank of the process + try: + from mpi4py import MPI + mpi_rank = MPI.COMM_WORLD.Get_rank() + except ModuleNotFoundError: + mpi_rank = 0 + + if save_results is not None and mpi_rank == 0: + self.results.save_results(output_file=hdf5_file) + return post_samples[:, :-1] From 7c13cf8b478f51394230a2611449300c069d7329 Mon Sep 17 00:00:00 2001 From: Tomas Stolker Date: Tue, 24 Sep 2024 14:46:45 +0200 Subject: [PATCH 3/9] Minor fix --- orbitize/sampler.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/orbitize/sampler.py b/orbitize/sampler.py index deffb137..87b6efab 100644 --- a/orbitize/sampler.py +++ b/orbitize/sampler.py @@ -1636,7 +1636,7 @@ def _loglike_multinest(param_cube, n_dim, n_param): except ModuleNotFoundError: mpi_rank = 0 - if save_results is not None and mpi_rank == 0: + if hdf5_file is not None and mpi_rank == 0: self.results.save_results(output_file=hdf5_file) return post_samples[:, :-1] From 827c44f3d929e381ff58bc279d65a8579987f561 Mon Sep 17 00:00:00 2001 From: Tomas Stolker Date: Tue, 24 Sep 2024 15:02:22 +0200 Subject: [PATCH 4/9] Fixed parameter name --- orbitize/sampler.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/orbitize/sampler.py b/orbitize/sampler.py index 87b6efab..dfe19d9b 100644 --- a/orbitize/sampler.py +++ b/orbitize/sampler.py @@ -1637,6 +1637,7 @@ def _loglike_multinest(param_cube, n_dim, n_param): mpi_rank = 0 if hdf5_file is not None and mpi_rank == 0: - self.results.save_results(output_file=hdf5_file) + # Only a single process should write to the HDF5 file + self.results.save_results(filename=hdf5_file) return post_samples[:, :-1] From 804377f59337f3c31840d6cb852d4d8aca23afad Mon Sep 17 00:00:00 2001 From: Tomas Stolker Date: Tue, 24 Sep 2024 15:35:17 +0200 Subject: [PATCH 5/9] Updated unit tests --- tests/test_nested_sampler.py | 49 +++++++++++++++++++++++++++++------- 1 file changed, 40 insertions(+), 9 deletions(-) diff --git a/tests/test_nested_sampler.py b/tests/test_nested_sampler.py index a6130c35..a00fb8ca 100644 --- a/tests/test_nested_sampler.py +++ b/tests/test_nested_sampler.py @@ -1,5 +1,5 @@ """ -Tests the NestedSampler class by fixing all parameters except for eccentricity. +Tests the NestedSampler and MultiNest classes by fixing all parameters except for eccentricity. """ from orbitize import system, sampler @@ -40,25 +40,56 @@ def test_nested_sampler(): # run both static & dynamic nested samplers mysampler = sampler.NestedSampler(sys) - _ = mysampler.run_sampler(bound="multi", pfrac=0.95, static=False, num_threads=8) + _ = mysampler.run_sampler(bound="multi", pfrac=0.95, static=False, num_threads=8, run_nested_kwargs={}) print("Finished first run!") dynamic_eccentricities = mysampler.results.post[:, lab["ecc1"]] assert np.median(dynamic_eccentricities) == pytest.approx(ecc, abs=0.1) - _ = mysampler.run_sampler(bound="multi", static=True, num_threads=8) + _ = mysampler.run_sampler(bound="multi", static=True, num_threads=8, run_nested_kwargs={}) print("Finished second run!") static_eccentricities = mysampler.results.post[:, lab["ecc1"]] assert np.median(static_eccentricities) == pytest.approx(ecc, abs=0.1) - # check that the static sampler raises an error when user tries to set pfrac - # for static sampler - try: - mysampler.run_sampler(pfrac=0.1, static=True) - except ValueError: - pass + +def test_multinest(): + # generate data + mtot = 1.2 # total system mass [M_sol] + plx = 60.0 # parallax [mas] + orbit_frac = 95 + data_table, sma = generate_synthetic_data( + orbit_frac, + mtot, + plx, + num_obs=30, + ) + + # assumed ecc value + ecc = 0.5 + + # initialize orbitize `System` object + sys = system.System(1, data_table, mtot, plx) + lab = sys.param_idx + + ecc = 0.5 # eccentricity + + # set all parameters except eccentricity to fixed values (same as used to generate data) + sys.sys_priors[lab["inc1"]] = np.pi / 4 + sys.sys_priors[lab["sma1"]] = sma + sys.sys_priors[lab["aop1"]] = np.pi / 4 + sys.sys_priors[lab["pan1"]] = np.pi / 4 + sys.sys_priors[lab["tau1"]] = 0.8 + sys.sys_priors[lab["plx"]] = plx + sys.sys_priors[lab["mtot"]] = mtot + + # running the actual sampler is not possible without compiling MultiNest + mysampler = sampler.MultiNest(sys) + assert hasattr(mysampler, "run_sampler") + assert hasattr(mysampler, "results") + assert hasattr(mysampler, "system") if __name__ == "__main__": test_nested_sampler() + test_multinest() From ed80f58b0416cc374d4c4bc4749c01aa64174153 Mon Sep 17 00:00:00 2001 From: Tomas Stolker Date: Tue, 24 Sep 2024 18:09:09 +0200 Subject: [PATCH 6/9] Remove labels of fixed parameters in plot_corner --- orbitize/plot.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/orbitize/plot.py b/orbitize/plot.py index a02cfc1e..7832236a 100644 --- a/orbitize/plot.py +++ b/orbitize/plot.py @@ -107,21 +107,23 @@ def plot_corner(results, param_list=None, **corner_kwargs): param_indices = [] angle_indices = [] secondary_mass_indices = [] - for i, param in enumerate(param_list): - index_num = results.param_idx[param] + fixed_indices = [] + for i, label_key in enumerate(param_list): + index_num = results.param_idx[label_key] # only plot non-fixed parameters if np.std(results.post[:, index_num]) > 0: param_indices.append(index_num) - label_key = param if ( label_key.startswith("aop") or label_key.startswith("pan") or label_key.startswith("inc") ): - angle_indices.append(i) + angle_indices.append(i-len(fixed_indices)) if label_key.startswith("m") and label_key != "m0" and label_key != "mtot": - secondary_mass_indices.append(i) + secondary_mass_indices.append(i-len(fixed_indices)) + else: + fixed_indices.append(i) samples = np.copy( results.post[:, param_indices] @@ -137,7 +139,7 @@ def plot_corner(results, param_list=None, **corner_kwargs): "labels" not in corner_kwargs ): # use default labels if user didn't already supply them reduced_labels_list = [] - for i in np.arange(len(param_indices)): + for i in param_indices: label_key = param_list[i] if label_key.startswith("m") and label_key != "m0" and label_key != "mtot": body_num = label_key[1] From c409f492d9114e4c9e7b62b2cb1ce4f147f7ecc1 Mon Sep 17 00:00:00 2001 From: Tomas Stolker Date: Tue, 24 Sep 2024 20:02:43 +0200 Subject: [PATCH 7/9] Check if standard deviation is zero with np.isclose --- orbitize/plot.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/orbitize/plot.py b/orbitize/plot.py index 7832236a..52e1f30e 100644 --- a/orbitize/plot.py +++ b/orbitize/plot.py @@ -112,7 +112,7 @@ def plot_corner(results, param_list=None, **corner_kwargs): index_num = results.param_idx[label_key] # only plot non-fixed parameters - if np.std(results.post[:, index_num]) > 0: + if not np.isclose(0.0, np.std(results.post[:, index_num])): param_indices.append(index_num) if ( label_key.startswith("aop") From 03b994c3a484057c11569a6e65c5d675f0434e99 Mon Sep 17 00:00:00 2001 From: Tomas Stolker Date: Wed, 25 Sep 2024 18:16:44 +0200 Subject: [PATCH 8/9] Added multinest_kwargs parameter --- orbitize/sampler.py | 37 ++++++++++++++++++++++++++++++++++++- 1 file changed, 36 insertions(+), 1 deletion(-) diff --git a/orbitize/sampler.py b/orbitize/sampler.py index dfe19d9b..5fdfba0c 100644 --- a/orbitize/sampler.py +++ b/orbitize/sampler.py @@ -3,6 +3,7 @@ import astropy.constants as consts import abc import time +import warnings from astropy.time import Time import dynesty @@ -1528,6 +1529,7 @@ def run_sampler( n_live_points=500, output_basename='./multinest', hdf5_file=None, + multinest_kwargs=None, ): """Runs the nested sampler from the (Py)MultiNest package. @@ -1546,6 +1548,8 @@ def run_sampler( after the sampling has finished, may cause an error when using MPI because only one process should write the results. The results are not stored if the argument is set to None. + multinest_kwargs (dict): dictionary of keywords that will be + passed to pymultinest.run(). Returns: numpy.array of float: posterior samples @@ -1559,6 +1563,37 @@ def run_sampler( # Number of parameters to fit n_parameters = len(self.system.labels) + # Create empty dictionary if needed + + if multinest_kwargs is None: + multinest_kwargs = {} + + # Add the resume parameter + + if "resume" not in multinest_kwargs: + multinest_kwargs["resume"] = False + + # Check multinest_kwargs keywords + + if "n_live_points" in multinest_kwargs: + warnings.warn( + "Please specify the number of live points " + "as argument of 'n_live_points' instead " + "of using 'multinest_kwargs'." + ) + + del multinest_kwargs["n_live_points"] + + if "outputfiles_basename" in multinest_kwargs: + warnings.warn( + "Please use the 'output_basename' " + "parameter instead of setting the " + "value of 'outputfiles_basename' " + "in 'multinest_kwargs'." + ) + + del multinest_kwargs["outputfiles_basename"] + def _logprior_multinest(param_cube, n_dim, n_param): """ Parameters @@ -1614,8 +1649,8 @@ def _loglike_multinest(param_cube, n_dim, n_param): _logprior_multinest, n_parameters, outputfiles_basename=output_basename, - resume=False, n_live_points=n_live_points, + **multinest_kwargs, ) analyzer = pymultinest.analyse.Analyzer( From d6ca0f5f5a25a913a6256cf991a89c30cbbbf9ae Mon Sep 17 00:00:00 2001 From: Tomas Stolker Date: Wed, 25 Sep 2024 20:21:40 +0200 Subject: [PATCH 9/9] Revert "Merge branch 'plot_fix_param' into multinest" This reverts commit 9b141741c32fcbd51480b870de2b69c3ff2c29e0, reversing changes made to 804377f59337f3c31840d6cb852d4d8aca23afad. --- orbitize/plot.py | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/orbitize/plot.py b/orbitize/plot.py index 52e1f30e..a02cfc1e 100644 --- a/orbitize/plot.py +++ b/orbitize/plot.py @@ -107,23 +107,21 @@ def plot_corner(results, param_list=None, **corner_kwargs): param_indices = [] angle_indices = [] secondary_mass_indices = [] - fixed_indices = [] - for i, label_key in enumerate(param_list): - index_num = results.param_idx[label_key] + for i, param in enumerate(param_list): + index_num = results.param_idx[param] # only plot non-fixed parameters - if not np.isclose(0.0, np.std(results.post[:, index_num])): + if np.std(results.post[:, index_num]) > 0: param_indices.append(index_num) + label_key = param if ( label_key.startswith("aop") or label_key.startswith("pan") or label_key.startswith("inc") ): - angle_indices.append(i-len(fixed_indices)) + angle_indices.append(i) if label_key.startswith("m") and label_key != "m0" and label_key != "mtot": - secondary_mass_indices.append(i-len(fixed_indices)) - else: - fixed_indices.append(i) + secondary_mass_indices.append(i) samples = np.copy( results.post[:, param_indices] @@ -139,7 +137,7 @@ def plot_corner(results, param_list=None, **corner_kwargs): "labels" not in corner_kwargs ): # use default labels if user didn't already supply them reduced_labels_list = [] - for i in param_indices: + for i in np.arange(len(param_indices)): label_key = param_list[i] if label_key.startswith("m") and label_key != "m0" and label_key != "mtot": body_num = label_key[1]