Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Transition to Ultranest #205

Draft
wants to merge 5 commits into
base: master
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions ompy/ensembleNormalizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,14 @@ def __init__(self, *, extractor: Extractor,
self.normalizer_nld = copy.deepcopy(normalizer_nld)
self.normalizer_gsf = copy.deepcopy(normalizer_gsf)

if self.normalizer_nld is not None:
self.normalizer_nld.ultranest_kwargs['viz_callback'] = False
self.normalizer_nld.ultranest_kwargs['show_status'] = False

self.normalizer_simultan = copy.deepcopy(normalizer_simultan)
if self.normalizer_simultan is not None:
self.normalizer_simultan.ultranest_kwargs['viz_callback'] = False
self.normalizer_simultan.ultranest_kwargs['show_status'] = False

self.nprocesses: int = cpu_count()-1 if cpu_count() > 1 else 1

Expand Down
110 changes: 64 additions & 46 deletions ompy/normalizer_nld.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
import logging
import termtables as tt
import json
import pymultinest
import ultranest
import matplotlib.pyplot as plt
import warnings
from contextlib import redirect_stdout
Expand All @@ -12,7 +12,7 @@
from typing import Optional, Tuple, Any, Union, Callable, Dict
from pathlib import Path

from .stats import truncnorm_ppf
from .stats import truncnorm_ppf, truncnorm_ppf_vec
from .vector import Vector
from .library import self_if_none
from .spinfunctions import SpinFunctions
Expand Down Expand Up @@ -118,8 +118,9 @@ def __init__(self, *,
self.model: Optional[Callable[..., ndarray]] = self.const_temperature
# self.curried_model = lambda *arg: None
self.de_kwargs = {"seed": 65424}
self.multinest_path = Path('multinest')
self.multinest_kwargs: dict = {"seed": 65498, "resume": False}
self.ultranest_path: Optional[Path] = Path('ultranest')
self.ultranest_kwargs: dict = \
{'region_class': ultranest.mlfriends.MLFriends}

# Handle the method parameters
self.smooth_levels_fwhm = 0.1
Expand Down Expand Up @@ -287,7 +288,8 @@ def neglnlike(*args, **kwargs):
return args, p0

def optimize(self, num: int, args,
guess: Dict[str, float]) -> Tuple[Dict[str, float], Dict[str, float]]:
guess: Dict[str, float]) -> Tuple[Dict[str, float],
Dict[str, float]]:
"""Find parameters given model constraints and an initial guess

Employs Multinest
Expand Down Expand Up @@ -336,55 +338,59 @@ def optimize(self, num: int, args,
a_Eshift = (lower_Eshift - mu_Eshift) / sigma_Eshift
b_Eshift = (upper_Eshift - mu_Eshift) / sigma_Eshift

def prior(cube, ndim, nparams):
def prior(cube):
cube = cube.copy().T
# NOTE: You may want to adjust this for your case!
# truncated normal prior
cube[0] = truncnorm_ppf(cube[0], a_A, b_A)*sigma_A+mu_A
cube[0] = truncnorm_ppf_vec(cube[0], a_A, b_A)*sigma_A+mu_A
# log-uniform prior
# if alpha = 1e2, it's between 1e1 and 1e3
cube[1] = 10**(cube[1]*2 + (alpha_exponent-1))
# log-uniform prior
# if T = 1e2, it's between 1e1 and 1e3
cube[2] = 10**(cube[2]*2 + (T_exponent-1))
# truncated normal prior
cube[3] = truncnorm_ppf(cube[3], a_Eshift,
b_Eshift)*sigma_Eshift + mu_Eshift
cube[3] = truncnorm_ppf_vec(cube[3], a_Eshift,
b_Eshift)*sigma_Eshift + mu_Eshift

if np.isinf(cube[3]):
if np.any(np.isinf(cube[3])):
self.LOG.debug("Encountered inf in cube[3]:\n%s", cube[3])
return cube.T

def loglike(cube, ndim, nparams):
def loglike(cube):
return self.lnlike(cube, *args)

self.multinest_path.mkdir(exist_ok=True)
path = self.multinest_path / f"nld_norm_{num}_"
self.ultranest_path.mkdir(exist_ok=True)
path = self.ultranest_path / f"nld_norm_{num}"
assert len(str(path)) < 60, "Total path length too long for multinest"

self.LOG.info("Starting multinest")
self.LOG.debug("with following keywords %s:", self.multinest_kwargs)
# Hack where stdout from Multinest is redirected as info messages
self.LOG.write = lambda msg: (self.LOG.info(msg) if msg != '\n'
else None)
with redirect_stdout(self.LOG):
pymultinest.run(loglike, prior, len(guess),
outputfiles_basename=str(path),
**self.multinest_kwargs)

# Save parameters for analyzer
names = list(guess.keys())
json.dump(names, open(str(path) + 'params.json', 'w'))
analyzer = pymultinest.Analyzer(len(guess),
outputfiles_basename=str(path))

stats = analyzer.get_stats()

samples = analyzer.get_equal_weighted_posterior()[:, :-1]
self.LOG.info("Starting Ultranest")
self.LOG.debug("with following keywords %s:", self.ultranest_kwargs)
sampler = ultranest.ReactiveNestedSampler(names, loglike,
transform=prior,
log_dir=path,
vectorized=True,
resume="overwrite",
storage_backend="csv")

result = sampler.run(**self.ultranest_kwargs)

stats = []
for i in range(len(result['posterior']['mean'])):
stats.append({'mean': result['posterior']['mean'][i],
'median': result['posterior']['median'][i],
'1sigma': (result['posterior']['errlo'][i],
result['posterior']['errup'][i])})

samples = result['samples']
samples = dict(zip(names, samples.T))

# Format the output
popt = dict()
vals = []
for name, m in zip(names, stats['marginals']):
for name, m in zip(names, stats):
lo, hi = m['1sigma']
med = m['median']
sigma = (hi - lo) / 2
Expand All @@ -394,7 +400,7 @@ def loglike(cube, ndim, nparams):
fmts = '\t'.join([fmt + " ± " + fmt])
vals.append(fmts % (med, sigma))

self.LOG.info("Multinest results:\n%s", tt.to_string([vals],
self.LOG.info("Ultranest results:\n%s", tt.to_string([vals],
header=['A', 'α [MeV⁻¹]', 'T [MeV]', 'Eshift [MeV]']))

return popt, samples
Expand Down Expand Up @@ -482,10 +488,10 @@ def plot(self, *, ax: Any = None,
return fig, ax

@staticmethod
def lnlike(x: Tuple[float, float, float, float], nld_low: Vector,
def lnlike(x: ndarray, nld_low: Vector,
nld_high: Vector, discrete: Vector,
model: Callable[..., ndarray],
Sn, nldSn) -> float:
model: Callable[..., ndarray], Sn: float,
nldSn: float) -> Union[float, ndarray]:
""" Compute log likelihood of the normalization fitting

This is the result up a, which is irrelevant for the maximization
Expand All @@ -503,23 +509,35 @@ def lnlike(x: Tuple[float, float, float, float], nld_low: Vector,

Returns:
lnlike: log likelihood

TODO: The changes from the previous version is significantly quicker
and can take arbitrary sized parameters, given that the second
axis of x has size 4. However, the code is more complex than the
original code, and might not be as easy to read. One might consider
improving the readability. Question is if that might affect the
performance.
"""
A, alpha, T, Eshift = x[:4] # slicing needed for multinest?
transformed_low = nld_low.transform(A, alpha, inplace=False)
transformed_high = nld_high.transform(A, alpha, inplace=False)

err_low = transformed_low.error(discrete)
expected = Vector(E=transformed_high.E,
values=model(E=transformed_high.E,
T=T, Eshift=Eshift))
err_high = transformed_high.error(expected)
A, alpha, T, Eshift = x.T

transformed_low = A*nld_low.values[np.newaxis].T * \
np.exp(alpha*nld_low.E[np.newaxis].T)
std_low = A*nld_low.std[np.newaxis].T * \
np.exp(alpha*nld_low.E[np.newaxis].T)
err_low = np.sum(((transformed_low.T - discrete.values)/std_low.T)**2,
axis=1)

transformed_high = A*nld_high.values[np.newaxis].T * \
np.exp(alpha*nld_high.E[np.newaxis].T)
std_high = A*nld_high.std[np.newaxis].T * \
np.exp(alpha*nld_high.E[np.newaxis].T)
expected = model(E=nld_high.E[np.newaxis].T, T=T, Eshift=Eshift)
err_high = np.sum(((transformed_high - expected)/std_high)**2, axis=0)

nldSn_model = model(E=Sn, T=T, Eshift=Eshift)
err_nldSn = ((nldSn[0] - nldSn_model)/nldSn[1])**2

ln_stds = (np.log(transformed_low.std).sum()
+ np.log(transformed_high.std).sum())

ln_stds = np.log(std_low).sum(axis=0) + np.log(std_high).sum(axis=0)
return -0.5*(err_low + err_high + err_nldSn + ln_stds)

@staticmethod
Expand Down
76 changes: 41 additions & 35 deletions ompy/normalizer_simultan.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,19 +7,20 @@
from pathlib import Path
from typing import Optional, Union, Tuple, Any, Callable, Dict, Iterable, List
import pymultinest
import ultranest
import matplotlib.pyplot as plt
from contextlib import redirect_stdout

from .abstract_normalizer import AbstractNormalizer
from .extractor import Extractor
from .library import log_interp1d, self_if_none
from .models import Model, ResultsNormalized, ExtrapolationModelLow,\
ExtrapolationModelHigh, NormalizationParameters
from .models import (Model, ResultsNormalized, ExtrapolationModelLow,
ExtrapolationModelHigh, NormalizationParameters)
from .normalizer_nld import NormalizerNLD
from .normalizer_gsf import NormalizerGSF
from .spinfunctions import SpinFunctions
from .vector import Vector
from .stats import truncnorm_ppf
from .stats import truncnorm_ppf, truncnorm_ppf_vec


class NormalizerSimultan(AbstractNormalizer):
Expand Down Expand Up @@ -88,8 +89,9 @@ def __init__(self, *,

self.res: Optional[ResultsNormalized] = None

self.multinest_path: Optional[Path] = Path('multinest')
self.multinest_kwargs: dict = {"seed": 65498, "resume": False}
self.ultranest_path: Optional[Path] = Path('ultranest')
self.ultranest_kwargs: dict = \
{'region_class': ultranest.mlfriends.MLFriends}

if path is None:
self.path = None
Expand Down Expand Up @@ -270,10 +272,11 @@ def optimize(self, num: int,
a_B = (lower_B - mu_B) / sigma_B
b_B = (upper_B - mu_B) / sigma_B

def prior(cube, ndim, nparams):
def prior(cube):
cube = cube.copy().T
# NOTE: You may want to adjust this for your case!
# truncated normal prior
cube[0] = truncnorm_ppf(cube[0], a_A, b_A)*sigma_A + mu_A
cube[0] = truncnorm_ppf_vec(cube[0], a_A, b_A)*sigma_A + mu_A

# log-uniform prior
# if alpha = 1e2, it's between 1e1 and 1e3
Expand All @@ -282,50 +285,53 @@ def prior(cube, ndim, nparams):
# if T = 1e2, it's between 1e1 and 1e3
cube[2] = 10**(cube[2]*2 + (T_exponent-1))
# truncated normal prior
cube[3] = truncnorm_ppf(cube[3], a_Eshift,
b_Eshift)*sigma_Eshift + mu_Eshift
cube[3] = truncnorm_ppf_vec(cube[3], a_Eshift,
b_Eshift)*sigma_Eshift + mu_Eshift
# truncated normal prior
cube[4] = truncnorm_ppf(cube[4], a_B, b_B)*sigma_B + mu_B
cube[4] = truncnorm_ppf_vec(cube[4], a_B, b_B)*sigma_B + mu_B

if np.isinf(cube[3]):
if np.any(np.isinf(cube[3])):
self.LOG.debug("Encountered inf in cube[3]:\n%s", cube[3])
return cube.T

def loglike(cube, ndim, nparams):
return self.lnlike(cube, args_nld=args_nld)
def loglike(cube):
l = self.lnlike(cube, args_nld=args_nld)
return float(l)

# parameters are changed in the lnlike
norm_pars_org = copy.deepcopy(self.normalizer_gsf.norm_pars)

self.multinest_path.mkdir(exist_ok=True)
path = self.multinest_path / f"sim_norm_{num}_"
self.ultranest_path.mkdir(exist_ok=True)
path = self.ultranest_path / f"sim_norm_{num}"
assert len(str(path)) < 60, "Total path length too long for multinest"

self.LOG.info("Starting multinest: ")
self.LOG.debug("with following keywords %s:", self.multinest_kwargs)
# Hack where stdout from Multinest is redirected as info messages
self.LOG.write = lambda msg: (self.LOG.info(msg) if msg != '\n'
else None)

with redirect_stdout(self.LOG):
pymultinest.run(loglike, prior, len(guess),
outputfiles_basename=str(path),
**self.multinest_kwargs)

# Save parameters for analyzer
names = list(guess.keys())
json.dump(names, open(str(path) + 'params.json', 'w'))
analyzer = pymultinest.Analyzer(len(guess),
outputfiles_basename=str(path))

stats = analyzer.get_stats()

samples = analyzer.get_equal_weighted_posterior()[:, :-1]
self.LOG.info("Starting Ultranest")
self.LOG.debug("with following keywords %s:", self.ultranest_kwargs)
sampler = ultranest.ReactiveNestedSampler(names, loglike,
transform=prior,
log_dir=path,
vectorized=False,
resume="overwrite",
storage_backend="csv")

result = sampler.run(**self.ultranest_kwargs)

stats = []
for i in range(len(result['posterior']['mean'])):
stats.append({'mean': result['posterior']['mean'][i],
'median': result['posterior']['median'][i],
'1sigma': (result['posterior']['errlo'][i],
result['posterior']['errup'][i])})

samples = result['samples']
samples = dict(zip(names, samples.T))

# Format the output
popt = dict()
vals = []
for name, m in zip(names, stats['marginals']):
for name, m in zip(names, stats):
lo, hi = m['1sigma']
med = m['median']
sigma = (hi - lo) / 2
Expand All @@ -335,7 +341,7 @@ def loglike(cube, ndim, nparams):
fmts = '\t'.join([fmt + " ± " + fmt])
vals.append(fmts % (med, sigma))

self.LOG.info("Multinest results:\n%s", tt.to_string([vals],
self.LOG.info("Ultranest results:\n%s", tt.to_string([vals],
header=['A', 'α [MeV⁻¹]', 'T [MeV]',
'Eshift [MeV]', 'B']))

Expand Down
2 changes: 1 addition & 1 deletion ompy/spinfunctions.py
Original file line number Diff line number Diff line change
Expand Up @@ -180,7 +180,7 @@ def gDisc_and_EB05(self, mass: int, NLDa: float, Eshift: float, Sn: float,
sigma2_Sn = self.gEB05(mass, NLDa, Eshift, Ex=Sn)
sigma2_EB05 = lambda Ex: self.gEB05(mass, NLDa, Eshift, Ex=Ex) # noqa
x = [sigma2_disc[0], Sn]
y = [sigma2_disc[1], sigma2_EB05(Sn)]
y = [sigma2_disc[1], float(sigma2_EB05(Sn))]
sigma2 = interp1d(x, y,
bounds_error=False,
fill_value=(sigma2_disc[1], sigma2_Sn))
Expand Down