diff --git a/docs/tutorials/fitting_model_spectra.ipynb b/docs/tutorials/fitting_model_spectra.ipynb index dbac68e..80e5c1a 100644 --- a/docs/tutorials/fitting_model_spectra.ipynb +++ b/docs/tutorials/fitting_model_spectra.ipynb @@ -89,7 +89,6 @@ "Configuration settings:\n", " - Database: /Users/tomasstolker/applications/species/docs/tutorials/species_database.hdf5\n", " - Data folder: /Users/tomasstolker/applications/species/docs/tutorials/data\n", - " - Interpolation method: linear\n", " - Magnitude of Vega: 0.03\n", "Creating species_database.hdf5... [DONE]\n", "Creating data folder... [DONE]\n", @@ -101,7 +100,7 @@ { "data": { "text/plain": [ - "" + "" ] }, "execution_count": 3, @@ -128,7 +127,7 @@ { "data": { "text/plain": [ - "('betapicb_gpi_h.dat', )" + "('betapicb_gpi_h.dat', )" ] }, "execution_count": 4, @@ -205,7 +204,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "100%|████████████████████████████████████████| 240M/240M [00:00<00:00, 142GB/s]\n", + "100%|████████████████████████████████████████| 240M/240M [00:00<00:00, 442GB/s]\n", "SHA256 hash of downloaded file: ba71a5e4d3d399a6f8ae249590c2e174e90ec2b55e712d350dad8ca1ae83a907\n", "Use this value as the 'known_hash' argument of 'pooch.retrieve' to ensure that the file hasn't changed if it is downloaded again in the future.\n" ] @@ -311,7 +310,7 @@ "output_type": "stream", "text": [ "Downloading data from 'https://archive.stsci.edu/hlsps/reference-atlases/cdbs/current_calspec/alpha_lyr_stis_011.fits' to file '/Users/tomasstolker/applications/species/docs/tutorials/data/alpha_lyr_stis_011.fits'.\n", - "100%|███████████████████████████████████████| 288k/288k [00:00<00:00, 92.5MB/s]" + "100%|████████████████████████████████████████| 288k/288k [00:00<00:00, 177MB/s]" ] }, { @@ -325,7 +324,9 @@ "name": "stderr", "output_type": "stream", "text": [ - "\n" + "\n", + "/Users/tomasstolker/applications/species/species/data/database.py:1355: UserWarning: Found 33 fluxes with NaN in the data of GPI_YJHK. Removing the spectral fluxes that contain a NaN.\n", + " warnings.warn(\n" ] }, { @@ -396,18 +397,10 @@ " - Database tag: GRAVITY\n", " - Filename: ./data/companion_data/BetaPictorisb_2018-09-22.fits\n", " - Data shape: (237, 237)\n", - " - Spectral resolution:\n", + " - Resolution:\n", " - GPI_YJHK: 40.0\n", " - GRAVITY: 500.0\n" ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/Users/tomasstolker/applications/species/species/data/database.py:1355: UserWarning: Found 33 fluxes with NaN in the data of GPI_YJHK. Removing the spectral fluxes that contain a NaN.\n", - " warnings.warn(\n" - ] } ], "source": [ @@ -488,7 +481,7 @@ " - Database tag: GRAVITY\n", " - Filename: BetaPictorisb_2018-09-22.fits\n", " - Data shape: (237, 237)\n", - " - Spectral resolution:\n", + " - Resolution:\n", " - GRAVITY: 500.0\n", " - GPI_Y: 40.0\n", " - GPI_J: 40.0\n", @@ -633,7 +626,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "We are now ready to sample the posterior distribution by either using [MultiNest](https://johannesbuchner.github.io/PyMultiNest/index.html) or [UltraNest](https://johannesbuchner.github.io/UltraNest/index.html) with the [run_multinest](https://species.readthedocs.io/en/latest/species.fit.html#species.fit.fit_model.FitModel.run_multinest) and [run_ultranest](https://species.readthedocs.io/en/latest/species.fit.html#species.fit.fit_model.FitModel.run_ultranest) methods of `FitModel`. Both are nested sampling algorithms which are powerful in sampling multi-modal distributions and will estimate the marginalized likelihood (i.e. *model evidence*), which enables pair-wise model comparison through the Bayes factor." + "We are now ready to sample the posterior distribution by either using [MultiNest](https://johannesbuchner.github.io/PyMultiNest/index.html), [UltraNest](https://johannesbuchner.github.io/UltraNest/index.html), or [Dynesty](https://dynesty.readthedocs.io/en/latest/index.html) with the [run_multinest](https://species.readthedocs.io/en/latest/species.fit.html#species.fit.fit_model.FitModel.run_multinest), [run_ultranest](https://species.readthedocs.io/en/latest/species.fit.html#species.fit.fit_model.FitModel.run_ultranest), and [run_dynesty](https://species.readthedocs.io/en/latest/species.fit.html#species.fit.fit_model.FitModel.run_dynesty) methods of `FitModel`. Both are nested sampling algorithms which are powerful in sampling multi-modal distributions and will estimate the marginalized likelihood (i.e. *model evidence*), which enables pair-wise model comparison through the Bayes factor." ] }, { @@ -647,7 +640,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "To speed up the computation, it is possible to run the nested sampling in parallel (e.g. with `mpirun`) to benefit from the multiprocessing support by `UltraNest` and `MultiNest`. In that case it is important that any functions of `species` that will write to the [Database](https://species.readthedocs.io/en/latest/species.data.html#species.data.database.Database) will be commented out since simultaneous writing to the HDF5 database by different processes is not possible. It is therefore recommended to first add all the required data to the database and then only run [SpeciesInit](https://species.readthedocs.io/en/latest/species.core.html#species.core.species_init.SpeciesInit), [FitModel](https://species.readthedocs.io/en/latest/species.fit.html#species.fit.fit_model.FitModel), and the sampler ([run_multinest](https://species.readthedocs.io/en/latest/species.fit.html#species.fit.fit_model.FitModel.run_multinest) or [run_ultranest](https://species.readthedocs.io/en/latest/species.fit.html#species.fit.fit_model.FitModel.run_ultranest)) in parallel with MPI." + "To speed up the computation, it is possible to run the nested sampling in parallel (e.g. with `mpirun`) to benefit from the multiprocessing support by `MultiNest`, `UltraNest`, and `Dynesty`. In that case it is important that any functions of `species` that will write to the [Database](https://species.readthedocs.io/en/latest/species.data.html#species.data.database.Database) will be commented out since simultaneous writing to the HDF5 database by different processes is not possible. It is therefore recommended to first add all the required data to the database and then only run [SpeciesInit](https://species.readthedocs.io/en/latest/species.core.html#species.core.species_init.SpeciesInit), [FitModel](https://species.readthedocs.io/en/latest/species.fit.html#species.fit.fit_model.FitModel), and the sampler ([run_multinest](https://species.readthedocs.io/en/latest/species.fit.html#species.fit.fit_model.FitModel.run_multinest), [run_ultranest](https://species.readthedocs.io/en/latest/species.fit.html#species.fit.fit_model.FitModel.run_ultranest), [run_dynesty](https://species.readthedocs.io/en/latest/species.fit.html#species.fit.fit_model.FitModel.run_dynesty)) in parallel with MPI." ] }, { @@ -664,6 +657,10 @@ "Nested sampling with MultiNest\n", "------------------------------\n", "\n", + "Database tag: betapic\n", + "Number of live points: 500\n", + "Resume previous fit: False\n", + "Output folder: multinest/\n", " *****************************************************\n", " MultiNest v3.10\n", " Copyright Farhan Feroz & Mike Hobson\n", @@ -672,21 +669,21 @@ " no. of live points = 500\n", " dimensionality = 6\n", " *****************************************************\n", - " ln(ev)= 16478.369649839929 +/- 0.19072622217532367 \n", - " analysing data from multinest/.txt\n", - " Total Likelihood Evaluations: 49040\n", + " analysing data from multinest/.txt ln(ev)= 16478.154973650067 +/- 0.19161049564628616 \n", + " Total Likelihood Evaluations: 46185\n", " Sampling finished. Exiting MultiNest\n", "\n", - "Nested sampling global log-evidence: 16478.37 +/- 0.19\n", - "Nested importance sampling global log-evidence: 16476.34 +/- 0.01\n", + "\n", + "Nested sampling global log-evidence: 16478.15 +/- 0.19\n", + "Nested importance sampling global log-evidence: 16476.48 +/- 0.09\n", "\n", "Sample with the highest probability:\n", " - Log-likelihood = 16499.09\n", - " - teff = 1710.52\n", - " - logg = 3.85\n", + " - teff = 1709.63\n", + " - logg = 3.84\n", " - feh = 0.12\n", - " - radius = 1.48\n", - " - parallax = 50.97\n", + " - radius = 1.49\n", + " - parallax = 50.88\n", " - scaling_GPI_H = 1.11\n", "\n", "---------------------\n", @@ -695,15 +692,15 @@ "\n", "Database tag: betapic\n", "Sampler: multinest\n", - "Array shape: (2757, 6)\n", + "Array shape: (2755, 6)\n", "\n", "Integrated autocorrelation time:\n", - " - teff: 1.05\n", - " - logg: 1.30\n", - " - feh: 1.24\n", - " - radius: 1.13\n", - " - parallax: 1.34\n", - " - scaling_GPI_H: 1.20\n" + " - teff: 1.27\n", + " - logg: 1.28\n", + " - feh: 1.21\n", + " - radius: 1.24\n", + " - parallax: 1.30\n", + " - scaling_GPI_H: 1.14\n" ] } ], diff --git a/species/data/database.py b/species/data/database.py index 5ef75ab..3cc136e 100644 --- a/species/data/database.py +++ b/species/data/database.py @@ -1826,7 +1826,7 @@ def add_samples( print(f"Database tag: {tag}") print(f"Sampler: {sampler}") - print(f"Array shape: {samples.shape}") + print(f"Samples shape: {samples.shape}") if spec_labels is None: spec_labels = [] @@ -1905,7 +1905,7 @@ def add_samples( for i, item in enumerate(modelpar): auto_corr = integrated_time(samples[:, i], quiet=True)[0] - if np.allclose(samples[:, i], np.mean(samples[:, i])): + if np.allclose(samples[:, i], np.mean(samples[:, i]), atol=0.0): print(f" - {item}: fixed") else: print(f" - {item}: {auto_corr:.2f}") @@ -2792,7 +2792,7 @@ def get_samples( print(f"Database tag: {tag}") print(f"Random samples: {random}") - print(f"Array shape: {samples.shape}") + print(f"Samples shape: {samples.shape}") attributes = {} for item in dset.attrs: diff --git a/species/fit/fit_model.py b/species/fit/fit_model.py index e94d69a..a378897 100644 --- a/species/fit/fit_model.py +++ b/species/fit/fit_model.py @@ -3,10 +3,12 @@ """ import os +import sys import warnings from typing import Optional, Union, List, Tuple, Dict +import dynesty import numpy as np import spectres @@ -29,6 +31,7 @@ ) from PyAstronomy.pyasl import fastRotBroad +from schwimmbad import MPIPool from scipy import interpolate, stats from typeguard import typechecked @@ -137,7 +140,7 @@ def __init__( - Radial velocity can be included with the ``rad_vel`` parameter (km/s). This parameter will only be relevant if the radial velocity shift can be spectrally - resolved given the instrument resolution. + resolved given the instrument resolution. - Rotational broadening can be fitted by including the ``vsini`` parameter (km/s). This parameter will only @@ -1924,6 +1927,12 @@ def run_multinest( print_section("Nested sampling with MultiNest") + print(f"Database tag: {tag}") + print(f"Number of live points: {n_live_points}") + print(f"Resume previous fit: {resume}") + print(f"Output folder: {output}") + print() + # Set attributes if "prior" in kwargs: @@ -2074,6 +2083,7 @@ def _lnlike_multinest( ) # Get the best-fit (highest likelihood) point + print("\nSample with the highest probability:") best_params = analyzer.get_best_fit() @@ -2081,7 +2091,10 @@ def _lnlike_multinest( print(f" - Log-likelihood = {max_lnlike:.2f}") for i, item in enumerate(best_params["parameters"]): - print(f" - {self.modelpar[i]} = {item:.2f}") + if item < 0.1 and item > -0.1: + print(f" - {self.modelpar[i]} = {item:.2e}") + else: + print(f" - {self.modelpar[i]} = {item:.2f}") # Get the posterior samples samples = analyzer.get_equal_weighted_posterior() @@ -2104,16 +2117,6 @@ def _lnlike_multinest( samples = np.append(samples, app_param, axis=1) - # Get the MPI rank of the process - - try: - from mpi4py import MPI - - mpi_rank = MPI.COMM_WORLD.Get_rank() - - except ModuleNotFoundError: - mpi_rank = 0 - # Dictionary with attributes that will be stored attr_dict = { @@ -2126,6 +2129,16 @@ def _lnlike_multinest( if self.ext_filter is not None: attr_dict["ext_filter"] = self.ext_filter + # Get the MPI rank of the process + + try: + from mpi4py import MPI + + mpi_rank = MPI.COMM_WORLD.Get_rank() + + except ModuleNotFoundError: + mpi_rank = 0 + # Add samples to the database if mpi_rank == 0: @@ -2201,6 +2214,12 @@ def run_ultranest( print_section("Nested sampling with UltraNest") + print(f"Database tag: {tag}") + print(f"Minimum number of live points: {min_num_live_points}") + print(f"Resume previous fit: {resume}") + print(f"Output folder: {output}") + print() + # Check if resume is set to a non-UltraNest value if isinstance(resume, bool) and not resume: @@ -2328,15 +2347,18 @@ def _lnlike_ultranest(params: np.ndarray) -> Union[float, np.float64]: print(f" - {item} = {mean:.2e} +/- {sigma:.2e}") - # Maximum likelihood sample - - print("\nMaximum likelihood sample:") + # Get the best-fit (highest likelihood) point max_lnlike = result["maximum_likelihood"]["logl"] + + print("\nSample with the highest probability:") print(f" - Log-likelihood = {max_lnlike:.2f}") for i, item in enumerate(result["maximum_likelihood"]["point"]): - print(f" - {self.modelpar[i]} = {item:.2f}") + if best_params[i] < 0.1 and best_params[i] > 0.1: + print(f" - {self.modelpar[i]} = {item:.2e}") + else: + print(f" - {self.modelpar[i]} = {item:.2f}") # Create a list with scaling labels @@ -2361,6 +2383,18 @@ def _lnlike_ultranest(params: np.ndarray) -> Union[float, np.float64]: samples = np.append(samples, app_param, axis=1) + # Dictionary with attributes that will be stored + + attr_dict = { + "spec_type": "model", + "spec_name": self.model, + "ln_evidence": (ln_z, ln_z_error), + "parallax": self.parallax[0], + } + + if self.ext_filter is not None: + attr_dict["ext_filter"] = self.ext_filter + # Get the MPI rank of the process try: @@ -2371,6 +2405,353 @@ def _lnlike_ultranest(params: np.ndarray) -> Union[float, np.float64]: except ModuleNotFoundError: mpi_rank = 0 + # Add samples to the database + + if mpi_rank == 0: + # Writing the samples to the database is only + # possible when using a single process + from species.data.database import Database + + species_db = Database() + + species_db.add_samples( + sampler="ultranest", + samples=samples, + ln_prob=ln_prob, + tag=tag, + modelpar=self.modelpar, + bounds=self.bounds, + normal_prior=self.normal_prior, + spec_labels=spec_labels, + attr_dict=attr_dict, + ) + + @typechecked + def run_dynesty( + self, + tag: str, + n_live_points: int = 2000, + resume: bool = False, + output: str = "dynesty/", + evidence_tolerance: float = 0.5, + dynamic: bool = False, + sample_method: str = "auto", + bound: str = "multi", + n_pool: Optional[int] = None, + mpi_pool: bool = False, + ) -> None: + """ + Function for running the atmospheric retrieval. The parameter + estimation and computation of the marginalized likelihood (i.e. + model evidence), is done with ``Dynesty``. + + When using MPI, it is also required to install ``mpi4py`` (e.g. + ``pip install mpi4py``), otherwise an error may occur when the + ``output_folder`` is created by multiple processes. + + Parameters + ---------- + tag : str + Database tag where the samples will be stored. + n_live_points : int + Number of live points used by the nested sampling + with ``Dynesty``. + resume : bool + Resume the posterior sampling from a previous run. + output : str + Path that is used for the output files from ``Dynesty``. + evidence_tolerance : float + The dlogZ value used to terminate a nested sampling run, + or the initial dlogZ value passed to a dynamic nested + sampling run. + dynamic : bool + Whether to use static or dynamic nested sampling (see + `Dynesty documentation `_). + sample_method : str + The sampling method that should be used ('auto', 'unif', + 'rwalk', 'slice', 'rslice' (see `sampling documentation + `_). + bound : str + Method used to approximately bound the prior using the + current set of live points ('none', 'single', 'multi', + 'balls', 'cubes'). `Conditions the sampling methods + `_ used + to propose new live points + n_pool : int + The number of processes for the local multiprocessing. The + parameter is not used when the argument is set to ``None``. + mpi_pool : bool + Distribute the workers to an ``MPIPool`` on a cluster, + using ``schwimmbad``. + + Returns + ------- + NoneType + None + """ + + print_section("Nested sampling with Dynesty") + + print(f"Database tag: {tag}") + print(f"Number of live points: {n_live_points}") + print(f"Resume previous fit: {resume}") + + # Get the MPI rank of the process + + try: + from mpi4py import MPI + + mpi_rank = MPI.COMM_WORLD.Get_rank() + + except ModuleNotFoundError: + mpi_rank = 0 + + # Create the output folder if required + + if not os.path.exists(output): + print(f"Creating output folder: {output}") + os.mkdir(output) + + else: + print(f"Output folder: {output}") + + print() + + out_basename = os.path.join(output, "retrieval_") + + if not mpi_pool: + if n_pool is not None: + with dynesty.pool.Pool( + n_pool, + self._lnlike_func, + self._prior_transform, + ptform_args=[self.bounds, self.cube_index], + ) as pool: + print(f"Initialized a Dynesty.pool with {n_pool} workers") + + if dynamic: + if resume: + dsampler = dynesty.DynamicNestedSampler.restore( + fname=out_basename + "dynesty.save", + pool=pool, + ) + + print( + "Resumed a Dynesty run from " + f"{out_basename}dynesty.save" + ) + + else: + dsampler = dynesty.DynamicNestedSampler( + loglikelihood=pool.loglike, + prior_transform=pool.prior_transform, + ndim=len(self.modelpar), + pool=pool, + sample=sample_method, + bound=bound, + ) + + dsampler.run_nested( + dlogz_init=evidence_tolerance, + nlive_init=n_live_points, + checkpoint_file=out_basename + "dynesty.save", + resume=resume, + ) + + else: + if resume: + dsampler = dynesty.NestedSampler.restore( + fname=out_basename + "dynesty.save", + pool=pool, + ) + + print( + "Resumed a Dynesty run from " + f"{out_basename}dynesty.save" + ) + + else: + dsampler = dynesty.NestedSampler( + loglikelihood=pool.loglike, + prior_transform=pool.prior_transform, + ndim=len(self.modelpar), + pool=pool, + nlive=n_live_points, + sample=sample_method, + bound=bound, + ) + + dsampler.run_nested( + dlogz=evidence_tolerance, + checkpoint_file=out_basename + "dynesty.save", + resume=resume, + ) + else: + if dynamic: + if resume: + dsampler = dynesty.DynamicNestedSampler.restore( + fname=out_basename + "dynesty.save" + ) + + print( + "Resumed a Dynesty run from " + f"{out_basename}dynesty.save" + ) + + else: + dsampler = dynesty.DynamicNestedSampler( + loglikelihood=self._lnlike_func, + prior_transform=self._prior_transform, + ndim=len(self.modelpar), + ptform_args=[self.bounds, self.cube_index], + sample=sample_method, + bound=bound, + ) + + dsampler.run_nested( + dlogz_init=evidence_tolerance, + nlive_init=n_live_points, + checkpoint_file=out_basename + "dynesty.save", + resume=resume, + ) + + else: + if resume: + dsampler = dynesty.NestedSampler.restore( + fname=out_basename + "dynesty.save" + ) + + print( + "Resumed a Dynesty run from " + f"{out_basename}dynesty.save" + ) + + else: + dsampler = dynesty.NestedSampler( + loglikelihood=self._lnlike_func, + prior_transform=self._prior_transform, + ndim=len(self.modelpar), + ptform_args=[self.bounds, self.cube_index], + sample=sample_method, + bound=bound, + ) + + dsampler.run_nested( + dlogz=evidence_tolerance, + checkpoint_file=out_basename + "dynesty.save", + resume=resume, + ) + + else: + pool = MPIPool() + + if not pool.is_master(): + pool.wait() + sys.exit(0) + + print("Created an MPIPool object.") + + if dynamic: + if resume: + dsampler = dynesty.DynamicNestedSampler.restore( + fname=out_basename + "dynesty.save", + pool=pool, + ) + + else: + dsampler = dynesty.DynamicNestedSampler( + loglikelihood=self._lnlike_func, + prior_transform=self._prior_transform, + ndim=len(self.modelpar), + ptform_args=[self.bounds, self.cube_index], + pool=pool, + sample=sample_method, + bound=bound, + ) + + dsampler.run_nested( + dlogz_init=evidence_tolerance, + nlive_init=n_live_points, + checkpoint_file=out_basename + "dynesty.save", + resume=resume, + ) + + else: + if resume: + dsampler = dynesty.NestedSampler.restore( + fname=out_basename + "dynesty.save", + pool=pool, + ) + + else: + dsampler = dynesty.NestedSampler( + loglikelihood=self._lnlike_func, + prior_transform=self._prior_transform, + ndim=len(self.modelpar), + ptform_args=[self.bounds, self.cube_index], + pool=pool, + nlive=n_live_points, + sample=sample_method, + bound=bound, + ) + + dsampler.run_nested( + dlogz=evidence_tolerance, + checkpoint_file=out_basename + "dynesty.save", + resume=resume, + ) + + results = dsampler.results + samples = results.samples_equal() + ln_prob = results.logl + + print(f"\nSamples shape: {samples.shape}") + print(f"Number of iterations: {results.niter}") + + out_file = out_basename + "post_equal_weights.dat" + print(f"Storing samples: {out_file}") + np.savetxt(out_file, np.c_[samples, ln_prob]) + + # Nested sampling global log-evidence + # TODO check if selecting the last index is correct + + ln_z = results.logz[-1] + ln_z_error = results.logzerr[-1] + print(f"\nNested sampling log-evidence: {ln_z:.2f} +/- {ln_z_error:.2f}") + + # Get the best-fit (highest likelihood) point + + max_idx = np.argmax(ln_prob) + max_lnlike = ln_prob[max_idx] + best_params = samples[max_idx] + + print("\nSample with the highest probability:") + print(f" - Log-likelihood = {max_lnlike:.2f}") + + for i, item in enumerate(self.modelpar): + if best_params[i] < 0.1 and best_params[i] > 0.1: + print(f" - {self.modelpar[i]} = {best_params[i]:.2e}") + else: + print(f" - {self.modelpar[i]} = {best_params[i]:.2f}") + + spec_labels = [] + for item in self.spectrum: + if f"scaling_{item}" in self.bounds: + spec_labels.append(f"scaling_{item}") + + # Adding the fixed parameters to the samples + + for key, value in self.fix_param.items(): + self.modelpar.append(key) + + app_param = np.full(samples.shape[0], value) + app_param = app_param[..., np.newaxis] + + samples = np.append(samples, app_param, axis=1) + # Dictionary with attributes that will be stored attr_dict = { @@ -2393,7 +2774,7 @@ def _lnlike_ultranest(params: np.ndarray) -> Union[float, np.float64]: species_db = Database() species_db.add_samples( - sampler="ultranest", + sampler="multinest", samples=samples, ln_prob=ln_prob, tag=tag, diff --git a/species/fit/retrieval.py b/species/fit/retrieval.py index 6a4ef12..c367b3d 100644 --- a/species/fit/retrieval.py +++ b/species/fit/retrieval.py @@ -287,7 +287,9 @@ def __init__( species_db = Database() - objectbox = species_db.get_object(object_name, inc_phot=True, inc_spec=True, verbose=False) + objectbox = species_db.get_object( + object_name, inc_phot=True, inc_spec=True, verbose=False + ) # Copy the cloud species into a new list because the values will be adjusted by Radtrans @@ -2805,12 +2807,14 @@ def _lnlike( # Evaluate the spectra for i, spec_item in enumerate(self.spectrum.keys()): - # Select data wavelength range from the model spectrum + # Select data wavelength range from the model spectrum wlen_min = self.spectrum[spec_item][0][0, 0] wlen_max = self.spectrum[spec_item][0][-1, 0] - wlen_select = (wlen_micron > wlen_min-0.1*wlen_min) & (wlen_micron < wlen_max+0.1*wlen_max) + wlen_select = (wlen_micron > wlen_min - 0.1 * wlen_min) & ( + wlen_micron < wlen_max + 0.1 * wlen_max + ) if spec_item in self.cross_corr: model_wavel = ccf_wavel[spec_item] @@ -2920,7 +2924,9 @@ def _lnlike( else: # Ratio of the inflated and original uncertainties - sigma_ratio = np.sqrt(data_var) / self.spectrum[spec_item][0][:, 2] + sigma_ratio = ( + np.sqrt(data_var) / self.spectrum[spec_item][0][:, 2] + ) sigma_j, sigma_i = np.meshgrid(sigma_ratio, sigma_ratio) # Calculate the inversion of the infalted covariances @@ -3036,7 +3042,6 @@ def _lnlike( alpha=0.2, ) - if self.plotting and len(self.spectrum) > 0: plt.xlabel(r"Wavelength ($\mu$m)") plt.ylabel(r"Flux (W m$^{-2}$ $\mu$m$^{-1}$)") @@ -3368,7 +3373,7 @@ def setup_retrieval( which the broadening will not matter), the computation will be a bit faster. This parameter is only used when the ``vsini`` model parameter has been include in - ``bounds``. The :math:`v \sin(i)` is applied to all + ``bounds``. The :math:`v \\sin(i)` is applied to all spectra by setting the argument of ``apply_vsini`` to ``None``. @@ -4130,7 +4135,7 @@ def run_dynesty( ) print( - "Resumed a dynesty run from " + "Resumed a Dynesty run from " f"{self.out_basename}dynesty.save" ) @@ -4159,7 +4164,7 @@ def run_dynesty( ) print( - "Resumed a dynesty run from " + "Resumed a Dynesty run from " f"{self.out_basename}dynesty.save" ) @@ -4187,7 +4192,7 @@ def run_dynesty( ) print( - "Resumed a dynesty run from " + "Resumed a Dynesty run from " f"{self.out_basename}dynesty.save" ) @@ -4221,7 +4226,7 @@ def run_dynesty( ) print( - "Resumed a dynesty run from " + "Resumed a Dynesty run from " f"{self.out_basename}dynesty.save" )