diff --git a/README.rst b/README.rst
index dd975ad9..5b1cc307 100644
--- a/README.rst
+++ b/README.rst
@@ -1,7 +1,7 @@
*species*
=========
-**s**\ pectral and **p**\ hotometric **e**\ xamination **c**\ ode for **i**\ nvestigating **e**\ xoplanet and **s**\ ubstellar atmospheres
+**spe**\ctral **c**\ haracterization and **i**\ nference for **e**\ xoplanet **s**\ cience
.. image:: https://badge.fury.io/py/species.svg
:target: https://badge.fury.io/py/species
@@ -27,7 +27,7 @@
.. image:: https://img.shields.io/badge/arXiv-1912.13316-orange.svg?style=flat
:target: https://arxiv.org/abs/1912.13316
-*species* is a toolkit for atmospheric characterization of exoplanets and brown dwarfs. It provides a coherent framework for spectral and photometric analysis which builds on publicly-available data from various resources such as spectral and photometric libraries, atmospheric models, and evolutionary tracks. There are tools available for grid retrievals with Bayesian inference, color-magnitude diagrams, spectral and photometric calibration, and synthetic photometry. The package has been released on `PyPI `_ and is actively developed and maintained on Github.
+*species* is a toolkit for atmospheric characterization of directly imaged exoplanets and brown dwarfs. It provides a coherent framework for spectral and photometric analysis which builds on publicly-available data and models from various resources. There are tools available for grid and free atmospheric retrievals with Bayesian inference, color-magnitude and color-color diagrams, spectral and photometric calibration, and synthetic photometry. The package is has been released on `PyPI `_ and is actively developed and maintained on Github.
Documentation
-------------
@@ -42,11 +42,11 @@ Please cite `Stolker et al. (2020) `_ on the Github page.
+Contributions are welcome so please consider forking the repository and creating a pull request. Bug reports can be provided by creating an `issue `_ on the Github page.
License
-------
-Copyright 2018-2020 Tomas Stolker
+Copyright 2018-2021 Tomas Stolker
*species* is distributed under the MIT License. See the LICENSE file for the terms and conditions.
diff --git a/docs/conf.py b/docs/conf.py
index e2bbaaae..d56432b1 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -20,7 +20,7 @@
# -- Project information -----------------------------------------------------
project = 'species'
-copyright = '2018-2020, Tomas Stolker'
+copyright = '2018-2021, Tomas Stolker'
author = 'Tomas Stolker'
# The short X.Y version
diff --git a/docs/configuration.rst b/docs/configuration.rst
index f648c610..dc498ef1 100644
--- a/docs/configuration.rst
+++ b/docs/configuration.rst
@@ -21,14 +21,14 @@ In this case the database is stored in the working folder and an absolute path p
>>> import os
>>> os.getcwd()
-The workflow with *species* is now initiated with :class:`~species.core.setup.SpeciesInit`:
+The workflow with *species* can now be initiated with the :class:`~species.core.setup.SpeciesInit` class:
.. code-block:: python
>>> import species
>>> species.SpeciesInit()
-A configuration file with default values is automatically created when `species` is initiated and the configuration file is not present in the working folder.
+Alternatively, a configuration file with default values is automatically created when `species` is initiated and the configuration file is not present in the working folder.
.. tip::
The same `data_folder` can be used in multiple configuration files. In this way, the data is only downloaded once and easily reused by a new instance of :class:`~species.core.setup.SpeciesInit`. Also the HDF5 database can be reused by simply including the same `database` in the configuration file.
diff --git a/species/analysis/fit_model.py b/species/analysis/fit_model.py
index 6cf8a235..d0f3117a 100644
--- a/species/analysis/fit_model.py
+++ b/species/analysis/fit_model.py
@@ -141,7 +141,7 @@ def lnlike(param: np.ndarray,
param_dict = {}
spec_scaling = {}
- err_offset = {}
+ err_scaling = {}
corr_len = {}
corr_amp = {}
@@ -150,7 +150,7 @@ def lnlike(param: np.ndarray,
spec_scaling[item[8:]] = param[param_index[item]]
elif item[:6] == 'error_' and item[6:] in spectrum:
- err_offset[item[6:]] = param[param_index[item]]
+ err_scaling[item[6:]] = param[param_index[item]]
elif item[:9] == 'corr_len_' and item[9:] in spectrum:
corr_len[item[9:]] = 10.**param[param_index[item]] # (um)
@@ -175,8 +175,8 @@ def lnlike(param: np.ndarray,
if item not in spec_scaling:
spec_scaling[item] = 1.
- if item not in err_offset:
- err_offset[item] = None
+ if item not in err_scaling:
+ err_scaling[item] = None
ln_like = 0.
@@ -205,15 +205,42 @@ def lnlike(param: np.ndarray,
ln_like += -0.5 * (obj_item[0, j] - phot_flux)**2 / obj_item[1, j]**2
for i, item in enumerate(spectrum.keys()):
+ # Calculate or interpolate the model spectrum
+ if model == 'planck':
+ # Calculate a blackbody spectrum
+ readplanck = read_planck.ReadPlanck((0.9*spectrum[item][0][0, 0],
+ 1.1*spectrum[item][0][-1, 0]))
+
+ model_box = readplanck.get_spectrum(param_dict, 1000., smooth=True)
+
+ # Resample the spectrum to the observed wavelengths
+ model_flux = spectres.spectres(spectrum[item][0][:, 0],
+ model_box.wavelength,
+ model_box.flux)
+
+ else:
+ # Interpolate the model spectrum
+ model_flux = modelspec[i].spectrum_interp(list(param_dict.values()))[0, :]
+
+ # Scale the spectrum by the (radius/distance)^2
+ model_flux *= flux_scaling
+
+ # Scale the spectrum data
data_flux = spec_scaling[item]*spectrum[item][0][:, 1]
- if err_offset[item] is None:
+ if err_scaling[item] is None:
+ # Variance without error inflation
data_var = spectrum[item][0][:, 2]**2
+
else:
- data_var = (spectrum[item][0][:, 2] + 10.**err_offset[item])**2
+ # Variance with error inflation (see Piette & Madhusudhan 2020)
+ data_var = spectrum[item][0][:, 2]**2 + (err_scaling[item]*model_flux)**2
if spectrum[item][2] is not None:
- if err_offset[item] is None:
+ # The inverted covariance matrix is available
+
+ if err_scaling[item] is None:
+ # Use the inverted covariance matrix directly
data_cov_inv = spectrum[item][2]
else:
@@ -221,30 +248,17 @@ def lnlike(param: np.ndarray,
sigma_ratio = np.sqrt(data_var) / spectrum[item][0][:, 2]
sigma_j, sigma_i = np.meshgrid(sigma_ratio, sigma_ratio)
- # Calculate the inversion of the infalted covariances
+ # Calculate the inverted matrix of the inflated covariances
data_cov_inv = np.linalg.inv(spectrum[item][1]*sigma_i*sigma_j)
- if model == 'planck':
- readplanck = read_planck.ReadPlanck((0.9*spectrum[item][0][0, 0],
- 1.1*spectrum[item][0][-1, 0]))
-
- model_box = readplanck.get_spectrum(param_dict, 1000., smooth=True)
-
- model_flux = spectres.spectres(spectrum[item][0][:, 0],
- model_box.wavelength,
- model_box.flux)
-
- else:
- model_flux = modelspec[i].spectrum_interp(list(param_dict.values()))[0, :]
- model_flux *= flux_scaling
-
if spectrum[item][2] is not None:
+ # Calculate the log-likelihood with the covariance matrix
dot_tmp = np.dot(data_flux-model_flux, np.dot(data_cov_inv, data_flux-model_flux))
ln_like += -0.5*dot_tmp - 0.5*np.nansum(np.log(2.*np.pi*data_var))
else:
if item in fit_corr:
- # Covariance model (Wang et al. 2020)
+ # Calculate the log-likelihood with the covariance model (see Wang et al. 2020)
wavel = spectrum[item][0][:, 0] # (um)
wavel_j, wavel_i = np.meshgrid(wavel, wavel)
@@ -261,6 +275,8 @@ def lnlike(param: np.ndarray,
ln_like += -0.5*dot_tmp - 0.5*np.nansum(np.log(2.*np.pi*data_var))
else:
+ # Calculate the log-likelihood without the covariance matrix but with the
+ # normalization term in case of the errors are inflated
ln_like += np.nansum(-0.5 * (data_flux-model_flux)**2 / data_var -
0.5 * np.log(2.*np.pi*data_var))
@@ -511,7 +527,7 @@ def __init__(self,
pre-tabulated grid.
- The prior boundaries of ``powerlaw_max``, ``powerlaw_exp``, and ``powerlaw_ext``
- should be provided in the ``bounds`` dictionary, for example ``'powerlaw_max':
+ should be provided in the ``bounds`` dictionary, for example ``{'powerlaw_max':
(0.01, 100.), 'powerlaw_exp': (-10., 10.), 'powerlaw_ext': (0., 5.)}``.
- A uniform prior is used for ``powerlaw_exp`` and ``powerlaw_ext``, and a
@@ -789,6 +805,26 @@ def __init__(self,
self.modelpar.append(f'error_{item}')
self.bounds[f'error_{item}'] = (bounds[item][1][0], bounds[item][1][1])
+ if self.bounds[f'error_{item}'][0] < 0.:
+ warnings.warn(f'The lower bound of \'error_{item}\' is smaller than 0. The '
+ f'error inflation should be given relative to the model fluxes '
+ f'so the boundaries are typically between 0 and 1.')
+
+ if self.bounds[f'error_{item}'][1] < 0.:
+ warnings.warn(f'The upper bound of \'error_{item}\' is smaller than 0. The '
+ f'error inflation should be given relative to the model fluxes '
+ f'so the boundaries are typically between 0 and 1.')
+
+ if self.bounds[f'error_{item}'][0] > 1.:
+ warnings.warn(f'The lower bound of \'error_{item}\' is larger than 1. The '
+ f'error inflation should be given relative to the model fluxes '
+ f'so the boundaries are typically between 0 and 1.')
+
+ if self.bounds[f'error_{item}'][1] > 1.:
+ warnings.warn(f'The upper bound of \'error_{item}\' is larger than 1. The '
+ f'error inflation should be given relative to the model fluxes '
+ f'so the boundaries are typically between 0 and 1.')
+
if item in self.bounds:
del self.bounds[item]
@@ -939,8 +975,8 @@ def run_mcmc(self,
guess[f'scaling_{item}'] = guess[item][0]
if f'error_{item}' in self.bounds:
- sigma[f'error_{item}'] = 0.1 # (dex)
- guess[f'error_{item}'] = guess[item][1] # (dex)
+ sigma[f'error_{item}'] = 0.1
+ guess[f'error_{item}'] = guess[item][1]
if item in guess:
del guess[item]
@@ -1087,37 +1123,41 @@ def lnlike_multinest(cube,
n_dim: int,
n_param: int) -> np.float64:
"""
- Function for the logarithm of the likelihood, computed from the parameter cube.
+ Function for calculating the log-likelihood for the sampled parameter cube.
Parameters
----------
cube : pymultinest.run.LP_c_double
Unit cube.
n_dim : int
- Number of dimensions.
+ Number of dimensions. This parameter is mandatory but not used by the function.
n_param : int
- Number of parameters.
+ Number of parameters. This parameter is mandatory but not used by the function.
Returns
-------
float
- Log likelihood.
+ Log-likelihood.
"""
- param_dict = {}
+ # Initilize dictionaries for different parameter types
+
spec_scaling = {}
- err_offset = {}
+ err_scaling = {}
corr_len = {}
corr_amp = {}
dust_param = {}
disk_param = {}
+ param_dict = {}
for item in self.bounds:
+ # Add the parameters from the cube to their dictionaries
+
if item[:8] == 'scaling_' and item[8:] in self.spectrum:
spec_scaling[item[8:]] = cube[cube_index[item]]
elif item[:6] == 'error_' and item[6:] in self.spectrum:
- err_offset[item[6:]] = cube[cube_index[item]] # log10(um)
+ err_scaling[item[6:]] = cube[cube_index[item]]
elif item[:9] == 'corr_len_' and item[9:] in self.spectrum:
corr_len[item[9:]] = 10.**cube[cube_index[item]] # (um)
@@ -1144,7 +1184,37 @@ def lnlike_multinest(cube,
param_dict[item] = cube[cube_index[item]]
for item in self.fix_param:
- param_dict[item] = self.fix_param[item]
+ # Add the fixed parameters to their dictionaries
+
+ if item[:8] == 'scaling_' and item[8:] in self.spectrum:
+ spec_scaling[item[8:]] = self.fix_param[item]
+
+ elif item[:6] == 'error_' and item[6:] in self.spectrum:
+ err_scaling[item[6:]] = self.fix_param[item]
+
+ elif item[:9] == 'corr_len_' and item[9:] in self.spectrum:
+ corr_len[item[9:]] = self.fix_param[item] # (um)
+
+ elif item[:9] == 'corr_amp_' and item[9:] in self.spectrum:
+ corr_amp[item[9:]] = self.fix_param[item]
+
+ elif item[:8] == 'lognorm_':
+ dust_param[item] = self.fix_param[item]
+
+ elif item[:9] == 'powerlaw_':
+ dust_param[item] = self.fix_param[item]
+
+ elif item[:4] == 'ism_':
+ dust_param[item] = self.fix_param[item]
+
+ elif item == 'disk_teff':
+ disk_param['teff'] = self.fix_param[item]
+
+ elif item == 'disk_radius':
+ disk_param['radius'] = self.fix_param[item]
+
+ else:
+ param_dict[item] = self.fix_param[item]
if self.model == 'planck' and self.n_planck > 1:
for i in range(self.n_planck-1):
@@ -1175,8 +1245,8 @@ def lnlike_multinest(cube,
if item not in spec_scaling:
spec_scaling[item] = 1.
- if item not in err_offset:
- err_offset[item] = None
+ if item not in err_scaling:
+ err_scaling[item] = None
if self.param_interp is not None:
# Sort the parameters in the correct order for spectrum_interp because
@@ -1287,45 +1357,61 @@ def lnlike_multinest(cube,
ln_like += -0.5 * np.log(2.*np.pi*phot_var)
for i, item in enumerate(self.spectrum.keys()):
- data_flux = spec_scaling[item]*self.spectrum[item][0][:, 1]
-
- if err_offset[item] is None:
- data_var = self.spectrum[item][0][:, 2]**2
- else:
- data_var = (self.spectrum[item][0][:, 2] + 10.**err_offset[item])**2
-
- if self.spectrum[item][2] is not None:
- if err_offset[item] is None:
- data_cov_inv = self.spectrum[item][2]
-
- else:
- # Ratio of the inflated and original uncertainties
- sigma_ratio = np.sqrt(data_var) / self.spectrum[item][0][:, 2]
- sigma_j, sigma_i = np.meshgrid(sigma_ratio, sigma_ratio)
-
- # Calculate the inversion of the infalted covariances
- data_cov_inv = np.linalg.inv(self.spectrum[item][1]*sigma_i*sigma_j)
+ # Calculate or interpolate the model spectrum
if self.model == 'planck':
+ # Calculate a blackbody spectrum
readplanck = read_planck.ReadPlanck((0.9*self.spectrum[item][0][0, 0],
1.1*self.spectrum[item][0][-1, 0]))
model_box = readplanck.get_spectrum(param_dict, 1000., smooth=True)
+ # Resample the spectrum to the observed wavelengths
model_flux = spectres.spectres(self.spectrum[item][0][:, 0],
model_box.wavelength,
model_box.flux)
else:
+ # Interpolate the model spectrum from the grid
model_flux = self.modelspec[i].spectrum_interp(list(param_dict.values()))[0, :]
+
+ # Scale the spectrum by (radius/distance)^2
model_flux *= flux_scaling
+ # Scale the spectrum data
+ data_flux = spec_scaling[item]*self.spectrum[item][0][:, 1]
+
+ if err_scaling[item] is None:
+ # Variance without error inflation
+ data_var = self.spectrum[item][0][:, 2]**2
+
+ else:
+ # Variance with error inflation (see Piette & Madhusudhan 2020)
+ data_var = self.spectrum[item][0][:, 2]**2 + (err_scaling[item]*model_flux)**2
+
+ if self.spectrum[item][2] is not None:
+ # The inverted covariance matrix is available
+
+ if err_scaling[item] is None:
+ # Use the inverted covariance matrix directly
+ data_cov_inv = self.spectrum[item][2]
+
+ else:
+ # Ratio of the inflated and original uncertainties
+ sigma_ratio = np.sqrt(data_var) / self.spectrum[item][0][:, 2]
+ sigma_j, sigma_i = np.meshgrid(sigma_ratio, sigma_ratio)
+
+ # Calculate the inverted matrix of the inflated covariances
+ data_cov_inv = np.linalg.inv(self.spectrum[item][1]*sigma_i*sigma_j)
+
if disk_param:
model_tmp = self.diskspec[i].spectrum_interp([disk_param['teff']])[0, :]
- model_flux += model_tmp * (disk_param['radius']*constants.R_JUP)**2 / \
+ model_tmp *= (disk_param['radius']*constants.R_JUP)**2 / \
(self.distance[0]*constants.PARSEC)**2
+ model_flux += model_tmp
+
if 'lognorm_ext' in dust_param:
for j, cross_item in enumerate(self.cross_sections[item]):
cross_tmp = cross_item(dust_param['lognorm_sigma'],
@@ -1335,6 +1421,7 @@ def lnlike_multinest(cube,
elif 'powerlaw_ext' in dust_param:
for j, cross_item in enumerate(self.cross_sections[item]):
+ # For loop over all wavelengths of a spectrum
cross_tmp = cross_item(dust_param['powerlaw_exp'],
10.**dust_param['powerlaw_max'])[0]
diff --git a/species/data/companions.py b/species/data/companions.py
index 9cabcb9c..155e6394 100644
--- a/species/data/companions.py
+++ b/species/data/companions.py
@@ -150,21 +150,21 @@ def get_data() -> Dict[str, Dict[str, Union[Tuple[float, float],
'Paranal/SPHERE.IRDIS_B_Ks': (13.51, 0.20)}}, # Kennedy et al. 2020
'GQ Lup B': {'distance': (151.82, 1.10),
- 'app_mag': {'HST/WFPC2-PC.F606W': (19.19, 0.07), # Marois et al. 2006
- 'HST/WFPC2-PC.F814W': (17.67, 0.05), # Marois et al. 2006
- 'HST/NICMOS2.F171M': (13.84, 0.13), # Marois et al. 2006
- 'HST/NICMOS2.F190N': (14.08, 0.20), # Marois et al. 2006
- 'HST/NICMOS2.F215N': (13.40, 0.15), # Marois et al. 2006
- 'Magellan/VisAO.ip': (18.89, 0.24), # Wu et al. 20017
- 'Magellan/VisAO.zp': (16.40, 0.10), # Wu et al. 20017
- 'Magellan/VisAO.Ys': (15.88, 0.10), # Wu et al. 20017
+ 'app_mag': {'HST/WFPC2-PC.F606W': (19.19, 0.07), # Marois et al. 2007
+ 'HST/WFPC2-PC.F814W': (17.67, 0.05), # Marois et al. 2007
+ 'HST/NICMOS2.F171M': (13.84, 0.13), # Marois et al. 2007
+ 'HST/NICMOS2.F190N': (14.08, 0.20), # Marois et al. 2007
+ 'HST/NICMOS2.F215N': (13.40, 0.15), # Marois et al. 2007
+ 'Magellan/VisAO.ip': (18.89, 0.24), # Wu et al. 2017
+ 'Magellan/VisAO.zp': (16.40, 0.10), # Wu et al. 2017
+ 'Magellan/VisAO.Ys': (15.88, 0.10), # Wu et al. 2017
'Paranal/NACO.Ks': [(13.474, 0.031), # Ginski et al. 2014
(13.386, 0.032), # Ginski et al. 2014
(13.496, 0.050), # Ginski et al. 2014
- (13.501, 0.028), ], # Ginski et al. 2014
- 'Subaru/CIAO.CH4s': (13.76, 0.26), # Marois et al. 2006
- 'Subaru/CIAO.K': (13.37, 0.12), # Marois et al. 2006
- 'Subaru/CIAO.Lp': (12.44, 0.22)}}, # Marois et al. 2006
+ (13.501, 0.028)], # Ginski et al. 2014
+ 'Subaru/CIAO.CH4s': (13.76, 0.26), # Marois et al. 2007
+ 'Subaru/CIAO.K': (13.37, 0.12), # Marois et al. 2007
+ 'Subaru/CIAO.Lp': (12.44, 0.22)}}, # Marois et al. 2007
'PZ Tel B': {'distance': (47.13, 0.13),
'app_mag': {'Paranal/SPHERE.ZIMPOL_R_PRIM': (17.84, 0.31), # Maire et al. 2015
diff --git a/species/data/database.py b/species/data/database.py
index 87811a69..e088a7b8 100644
--- a/species/data/database.py
+++ b/species/data/database.py
@@ -539,7 +539,7 @@ def add_object(self,
Tuple[str,
Optional[str],
Optional[float]]]] = None,
- deredden: Dict[str, float] = None) -> None:
+ deredden: Union[Dict[str, float], float] = None) -> None:
"""
Function for adding the photometric and/or spectroscopic data of an object to the database.
@@ -573,7 +573,7 @@ def add_object(self,
50.)}``. No covariance data is stored if set to None, for example, ``{'SPHERE':
('spectrum.dat', None, 50.)}``. The ``spectrum`` parameter is ignored if set to None.
For GRAVITY data, the same FITS file can be provided as spectrum and covariance matrix.
- deredden : dict, None
+ deredden : dict, float, None
Dictionary with ``spectrum`` and ``app_mag`` names that will de dereddened with the
provided A_V. For example, ``deredden={'SPHERE': 1.5, 'Keck/NIRC2.J': 1.5}`` will
deredden the provided spectrum named 'SPHERE' and the Keck/NIRC2 J-band photometry with
@@ -630,12 +630,16 @@ def add_object(self,
if app_mag is not None:
for mag_item in app_mag:
- if mag_item in deredden:
+ if isinstance(deredden, float) or mag_item in deredden:
read_filt = read_filter.ReadFilter(mag_item)
filter_profile = read_filt.get_filter()
- ext_mag = dust_util.ism_extinction(deredden[mag_item], 3.1,
- filter_profile[:, 0])
+ if isinstance(deredden, float):
+ ext_mag = dust_util.ism_extinction(deredden, 3.1, filter_profile[:, 0])
+
+ else:
+ ext_mag = dust_util.ism_extinction(deredden[mag_item], 3.1,
+ filter_profile[:, 0])
synphot = photometry.SyntheticPhotometry(mag_item)
@@ -719,7 +723,10 @@ def add_object(self,
print(f' - Flux (W m-2 um-1) = {flux[mag_item]:.2e} +/- '
f'{error[mag_item]:.2e}')
- if mag_item in deredden:
+ if isinstance(deredden, float):
+ print(f' - Dereddening A_V: {deredden}')
+
+ elif mag_item in deredden:
print(f' - Dereddening A_V: {deredden[mag_item]}')
data = np.asarray([app_mag[mag_item][0],
@@ -747,7 +754,10 @@ def add_object(self,
mag_list.append(app_mag_item[0])
mag_err_list.append(app_mag_item[1])
- if mag_item in deredden:
+ if isinstance(deredden, float):
+ print(f' - Dereddening A_V: {deredden}')
+
+ elif mag_item in deredden:
print(f' - Dereddening A_V: {deredden[mag_item]}')
data = np.asarray([mag_list,
@@ -763,7 +773,7 @@ def add_object(self,
if flux_density is not None:
for flux_item in flux_density:
- if flux_item in deredden:
+ if isinstance(deredden, float) or flux_item in deredden:
warnings.warn(f'The deredden parameter is not supported by flux_density. '
f'Please use app_mag instead or contact Tomas Stolker and ask '
f'if the flux_density support can be added. Ignoring the '
@@ -845,7 +855,11 @@ def add_object(self,
print(' - Spectrum:')
read_spec[key] = data
- if key in deredden:
+ if isinstance(deredden, float):
+ ext_mag = dust_util.ism_extinction(deredden, 3.1, read_spec[key][:, 0])
+ read_spec[key][:, 1] *= 10.**(0.4*ext_mag)
+
+ elif key in deredden:
ext_mag = dust_util.ism_extinction(deredden[key], 3.1, read_spec[key][:, 0])
read_spec[key][:, 1] *= 10.**(0.4*ext_mag)
@@ -860,7 +874,10 @@ def add_object(self,
print(f' - Mean flux (W m-2 um-1): {np.mean(flux):.2e}')
print(f' - Mean error (W m-2 um-1): {np.mean(error):.2e}')
- if key in deredden:
+ if isinstance(deredden, float):
+ print(f' - Dereddening A_V: {deredden}')
+
+ elif key in deredden:
print(f' - Dereddening A_V: {deredden[key]}')
# Read covariance matrix
diff --git a/species/plot/plot_color.py b/species/plot/plot_color.py
index cd1a6be7..aa9f2403 100644
--- a/species/plot/plot_color.py
+++ b/species/plot/plot_color.py
@@ -52,7 +52,7 @@ def plot_color_magnitude(boxes: list,
objects : list(tuple(str, str, str, str), ),
list(tuple(str, str, str, str, dict, dict), ), None
Tuple with individual objects. The objects require a tuple with their database tag, the two
- filter names for the color, and the filter names for the absolute magnitude. Optionally, a
+ filter names for the color, and the filter name for the absolute magnitude. Optionally, a
dictionary with keyword arguments can be provided for the object's marker and label,
respectively. For example, ``{'marker': 'o', 'ms': 10}`` for the marker and
``{'ha': 'left', 'va': 'bottom', 'xytext': (5, 5)})`` for the label. Not used if set to
diff --git a/species/plot/plot_mcmc.py b/species/plot/plot_mcmc.py
index 60852da1..c33ae19a 100644
--- a/species/plot/plot_mcmc.py
+++ b/species/plot/plot_mcmc.py
@@ -65,7 +65,7 @@ def plot_walkers(tag: str,
f'for plotting the walkers. The plot_walkers function can only be '
f'used after running the MCMC with run_mcmc and not after running '
f'MultiNest with run_multinest.')
-
+
ndim = samples.shape[-1]
plt.figure(1, figsize=(6, ndim*1.5))
@@ -202,6 +202,13 @@ def plot_posterior(tag: str,
luminosity = 4. * np.pi * (samples[..., radius_index]*constants.R_JUP)**2 * \
constants.SIGMA_SB * samples[..., teff_index]**4. / constants.L_SUN
+ if 'disk_teff' in box.parameters and 'disk_radius' in box.parameters:
+ teff_index = np.argwhere(np.array(box.parameters) == 'disk_teff')[0]
+ radius_index = np.argwhere(np.array(box.parameters) == 'disk_radius')[0]
+
+ luminosity += 4. * np.pi * (samples[..., radius_index]*constants.R_JUP)**2 * \
+ constants.SIGMA_SB * samples[..., teff_index]**4. / constants.L_SUN
+
samples = np.append(samples, np.log10(luminosity), axis=-1)
box.parameters.append('luminosity')
ndim += 1
@@ -283,10 +290,6 @@ def plot_posterior(tag: str,
else:
warnings.warn('Samples with the log(g) and radius are required for \'inc_mass=True\'.')
- if isinstance(title_fmt, list) and len(title_fmt) != ndim:
- raise ValueError(f'The number of items in the list of \'title_fmt\' ({len(title_fmt)}) is '
- f'not equal to the number of dimensions of the samples ({ndim}).')
-
labels = plot_util.update_labels(box.parameters)
# Check if parameter values were fixed
@@ -308,6 +311,10 @@ def plot_posterior(tag: str,
ndim -= len(index_del)
+ if isinstance(title_fmt, list) and len(title_fmt) != ndim:
+ raise ValueError(f'The number of items in the list of \'title_fmt\' ({len(title_fmt)}) is '
+ f'not equal to the number of dimensions of the samples ({ndim}).')
+
samples = samples.reshape((-1, ndim))
hist_titles = []
@@ -486,7 +493,7 @@ def plot_size_distributions(tag: str,
offset: Optional[Tuple[float, float]] = None,
output: str = 'size_distributions.pdf') -> None:
"""
- Function to plot random samples of the log-normal or power-law size distribution.
+ Function to plot random samples of the log-normal or power-law size distributions.
Parameters
----------
@@ -590,6 +597,9 @@ def plot_size_distributions(tag: str,
if 'lognorm_radius' in box.parameters:
dn_dr, _, radii = dust_util.log_normal_distribution(10.**log_r_g[i], sigma_g[i], 1000)
+ # Set the number density to zero for grain radii smaller than 1 nm
+ dn_dr[radii < 1e-3] = 0.
+
elif 'powerlaw_max' in box.parameters:
dn_dr, _, radii = dust_util.power_law_distribution(
exponent[i], 1e-3, 10.**r_max[i], 1000)
diff --git a/species/plot/plot_spectrum.py b/species/plot/plot_spectrum.py
index 77688c0e..c0e41d30 100644
--- a/species/plot/plot_spectrum.py
+++ b/species/plot/plot_spectrum.py
@@ -230,7 +230,7 @@ def plot_spectrum(boxes: list,
ax1.set_xlabel('Wavelength (µm)', fontsize=13)
if filters is not None:
- ax2.set_ylabel('Transmission', fontsize=13)
+ ax2.set_ylabel('T$_\lambda$', fontsize=13)
if residuals is not None:
if quantity == 'flux density':
diff --git a/species/util/plot_util.py b/species/util/plot_util.py
index e5e752b8..e651aa65 100644
--- a/species/util/plot_util.py
+++ b/species/util/plot_util.py
@@ -266,15 +266,15 @@ def update_labels(param: List[str]) -> List[str]:
if 'log_powerlaw_a' in param:
index = param.index('log_powerlaw_a')
- param[index] = r'$\mathregular{log}\,a$'
+ param[index] = r'$a_\mathregular{powerlaw}$'
if 'log_powerlaw_b' in param:
index = param.index('log_powerlaw_b')
- param[index] = r'$\mathregular{log}\,b$'
+ param[index] = r'$b_\mathregular{powerlaw}$'
if 'log_powerlaw_c' in param:
index = param.index('log_powerlaw_c')
- param[index] = r'$\mathregular{log}\,c$'
+ param[index] = r'$c_\mathregular{powerlaw}$'
return param
diff --git a/species/util/read_util.py b/species/util/read_util.py
index cdf7a1a9..4e6231fd 100644
--- a/species/util/read_util.py
+++ b/species/util/read_util.py
@@ -5,13 +5,13 @@
import math
import warnings
-from typing import Dict, Tuple, Union
+from typing import Dict, Optional, Tuple, Union
import numpy as np
-from typeguard import typechecked
from scipy.integrate import simps
from scipy.ndimage.filters import gaussian_filter
+from typeguard import typechecked
from species.core import box, constants
from species.read import read_model, read_planck
@@ -96,21 +96,27 @@ def add_luminosity(modelbox):
@typechecked
def update_spectra(objectbox: box.ObjectBox,
- model_param: Dict[str, float]) -> box.ObjectBox:
+ model_param: Dict[str, float],
+ model: Optional[str] = None) -> box.ObjectBox:
"""
- Function for applying a best-fit scaling and/or error inflation to the spectra of an object.
+ Function for applying a flux scaling and/or error inflation to the spectra of an
+ :class:`~species.core.box.ObjectBox`.
Parameters
----------
objectbox : species.core.box.ObjectBox
Box with the object's data, including the spectra.
model_param : dict
- Model parameter values. Should contain the scaling and/or error inflation values.
+ Dictionary with the model parameters. Should contain the value(s) of the flux scaling
+ and/or the error inflation.
+ model : str, None
+ Name of the atmospheric model. Only required for inflating the errors. Otherwise, the
+ argument can be set to ``None``.
Returns
-------
species.core.box.ObjectBox
- The input box with the scaled and/or error inflated spectra.
+ The input box which includes the spectra with the scaled fluxes and/or inflated errors.
"""
if objectbox.flux is not None:
@@ -129,26 +135,41 @@ def update_spectra(objectbox: box.ObjectBox,
objectbox.flux[key] = value
if objectbox.spectrum is not None:
+ # Check if there are any spectra
for key, value in objectbox.spectrum.items():
+ # Get the spectrum (3 columns)
spec_tmp = value[0]
if f'scaling_{key}' in model_param:
+ # Scale the flux of the spectrum
scaling = model_param[f'scaling_{key}']
print(f'Scaling the flux of {key}: {scaling:.2f}...', end='', flush=True)
spec_tmp[:, 1] *= model_param[f'scaling_{key}']
- # spec_tmp[:, 2] *= model_param[f'scaling_{key}']
print(' [DONE]')
if f'error_{key}' in model_param:
- error = 10.**model_param[f'error_{key}']
- log_msg = f'Inflating the error of {key} (W m-2 um-1): {error:.2e}...'
+ # Calculate the model spectrum
+ wavel_range = (0.9*spec_tmp[0, 0], 1.1*spec_tmp[-1, 0])
+ readmodel = read_model.ReadModel('atmo', wavel_range=wavel_range)
+
+ model = readmodel.get_model(model_param,
+ spec_res=value[3],
+ wavel_resample=spec_tmp[:, 0],
+ smooth=True)
+
+ # Scale the errors relative to the model spectrum
+ err_scaling = model_param[f'error_{key}']
+ log_msg = f'Inflating the error of {key}: {err_scaling:.2e}...'
print(log_msg, end='', flush=True)
- spec_tmp[:, 2] += error
+ spec_tmp[:, 2] = np.sqrt(spec_tmp[:, 2]**2 + (err_scaling*model.flux)**2)
print(' [DONE]')
+ # Store the spectra with the scaled fluxes and/or errors
+ # The other three elements (i.e. the covariance matrix, the inverted covariance matrix,
+ # and the spectral resolution) remain unaffected
objectbox.spectrum[key] = (spec_tmp, value[1], value[2], value[3])
return objectbox