Skip to content

Commit

Permalink
bugfix for loguniform transform_samples; pep8 cleanup
Browse files Browse the repository at this point in the history
  • Loading branch information
sblunt committed Jul 7, 2023
1 parent fa8b4a1 commit 4d2e0da
Showing 1 changed file with 82 additions and 61 deletions.
143 changes: 82 additions & 61 deletions orbitize/priors.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,6 @@
import numpy as np
import sys
import abc
import scipy.special
import statistics

"""
This module defines priors with methods to draw samples and compute log(probability)
Expand All @@ -18,7 +16,7 @@ class Prior(abc.ABC):
"""

is_correlated = False

@abc.abstractmethod
def draw_samples(self, num_samples):
pass
Expand All @@ -27,25 +25,27 @@ def draw_samples(self, num_samples):
def compute_lnprob(self, element_array):
pass


class NearestNDInterpPrior(Prior):
"""
Nearest Neighbor interp. This class is
a wrapper for scipy.interpolate.NearestNDInterpolator.
Args:
interp_fct (scipy.interpolate.NearestNDInterpolator): scipy Interpolator
interp_fct (scipy.interpolate.NearestNDInterpolator): scipy Interpolator
object containing the NDInterpolator defined by the user
total_params (float): number of parameters
total_params (float): number of parameters
Written: Jorge LLop-Sayson (2021)
"""

is_correlated = True

def __init__(self, interp_fct, total_params):
self.interp_fct = interp_fct
self.total_params = total_params
self.param_num = 0
self.correlated_drawn_samples = None
self.correlated_drawn_samples = None
self.correlated_input_samples = None
self.num_priorsFromArr = interp_fct.values.size
self.ind_draw = None
Expand Down Expand Up @@ -74,27 +74,27 @@ def draw_samples(self, num_samples):
num_samples (float): the number of samples to generate.
Returns:
numpy array of float: samples drawn from the ND interpolator
numpy array of float: samples drawn from the ND interpolator
distribution. Array has length `num_samples`.
"""
if self.param_num == 0:
ind_draw = np.random.randint(self.num_priorsFromArr,size=num_samples)
ind_draw = np.random.randint(self.num_priorsFromArr, size=num_samples)
self.ind_draw = ind_draw
return_me = self.interp_fct.points[self.ind_draw,self.param_num]
return_me = self.interp_fct.points[self.ind_draw, self.param_num]
self.increment_param_num()
return return_me
else:
return_me = self.interp_fct.points[self.ind_draw,self.param_num]
return_me = self.interp_fct.points[self.ind_draw, self.param_num]
self.increment_param_num()
return return_me

def compute_lnprob(self, element_array):
"""
Compute log(probability) of an array of numbers wrt a the defined ND
Compute log(probability) of an array of numbers wrt a the defined ND
interpolator. Negative numbers return a probability of -inf.
Args:
element_array (float or np.array of float): array of numbers. We want
element_array (float or np.array of float): array of numbers. We want
the probability of drawing each of these from the ND interpolator.
Returns:
Expand All @@ -105,8 +105,10 @@ def compute_lnprob(self, element_array):
if self.param_num == 0:
self.correlated_input_samples = element_array
else:
self.correlated_input_samples = np.append(self.correlated_input_samples, element_array)
if self.param_num == self.total_params-1:
self.correlated_input_samples = np.append(
self.correlated_input_samples, element_array
)
if self.param_num == self.total_params - 1:
lnlike = self.interp_fct(self.correlated_input_samples)
self.increment_param_num()
self.logparam_corr = 1
Expand All @@ -115,38 +117,40 @@ def compute_lnprob(self, element_array):
self.increment_param_num()
return 0


class KDEPrior(Prior):
"""
Gaussian kernel density estimation (KDE) prior. This class is
a wrapper for scipy.stats.gaussian_kde.
Args:
gaussian_kde (scipy.stats.gaussian_kde): scipy KDE object containing the
gaussian_kde (scipy.stats.gaussian_kde): scipy KDE object containing the
KDE defined by the user
total_params (float): number of parameters in the KDE
bounds (array_like, optional): bounds for the KDE out of which the prob
bounds (array_like, optional): bounds for the KDE out of which the prob
returned is -Inf
bounds (array_like of bool, optional): if True for a parameter the
bounds (array_like of bool, optional): if True for a parameter the
parameter is fit to the KDE in log-scale
Written: Jorge LLop-Sayson, Sarah Blunt (2021)
"""

is_correlated = True

def __init__(self, gaussian_kde, total_params, bounds=[], log_scale_arr=[]):
self.gaussian_kde = gaussian_kde
self.total_params = total_params
self.param_num = 0
self.logparam_corr = 1
if not bounds:
self.bounds = [[-np.inf,np.inf] for i in range(total_params)]
self.bounds = [[-np.inf, np.inf] for i in range(total_params)]
else:
self.bounds = bounds
if not log_scale_arr:
self.log_scale_arr = [False for i in range(total_params)]
self.log_scale_arr = [False for i in range(total_params)]
else:
self.log_scale_arr = log_scale_arr
self.correlated_drawn_samples = None
self.correlated_drawn_samples = None
self.correlated_input_samples = None

def __repr__(self):
Expand Down Expand Up @@ -176,7 +180,7 @@ def draw_samples(self, num_samples):
num_samples (float): the number of samples to generate.
Returns:
numpy array of float: samples drawn from the KDE
numpy array of float: samples drawn from the KDE
distribution. Array has length `num_samples`.
"""
if self.param_num == 0:
Expand All @@ -187,7 +191,7 @@ def draw_samples(self, num_samples):
return_me = self.correlated_drawn_samples[self.param_num]
self.increment_param_num()
return return_me

def compute_lnprob(self, element_array):
"""
Compute log(probability) of an array of numbers wrt a the defined KDE.
Expand All @@ -202,28 +206,35 @@ def compute_lnprob(self, element_array):
corresponding to the probability of drawing each of the numbers
in the input `element_array`.
"""
if element_array<self.bounds[self.param_num][0] or element_array>self.bounds[self.param_num][1]:
if (
element_array < self.bounds[self.param_num][0]
or element_array > self.bounds[self.param_num][1]
):
if self.log_scale_arr[self.param_num]:
element_array_lin = element_array
element_array = np.log10(element_array)
if np.isnan(element_array):
element_array = 0 #set to zero bc doesn't matter what it is since we're already returning a small prob
element_array = 0 # set to zero bc doesn't matter what it is since we're already returning a small prob
if self.param_num == 0:
self.correlated_input_samples = element_array
else:
self.correlated_input_samples = np.append(self.correlated_input_samples, element_array)
self.correlated_input_samples = np.append(
self.correlated_input_samples, element_array
)
self.increment_param_num()
self.logparam_corr = 1
return -1e10
if self.log_scale_arr[self.param_num]:
element_array_lin = element_array
element_array = np.log10(element_array)
self.logparam_corr = self.logparam_corr*(element_array_lin)
self.logparam_corr = self.logparam_corr * (element_array_lin)
if self.param_num == 0:
self.correlated_input_samples = element_array
else:
self.correlated_input_samples = np.append(self.correlated_input_samples, element_array)
if self.param_num == self.total_params-1:
self.correlated_input_samples = np.append(
self.correlated_input_samples, element_array
)
if self.param_num == self.total_params - 1:
lnlike = self.gaussian_kde.logpdf(self.correlated_input_samples)
self.increment_param_num()
self.logparam_corr = 1
Expand All @@ -232,6 +243,7 @@ def compute_lnprob(self, element_array):
self.increment_param_num()
return 0


class GaussianPrior(Prior):
"""Gaussian prior.
Expand All @@ -243,7 +255,7 @@ class GaussianPrior(Prior):
mu (float): mean of the distribution
sigma (float): standard deviation of the distribution
no_negatives (bool): if True, only positive values will be drawn from
this prior, and the probability of negative values will be 0
this prior, and the probability of negative values will be 0
(default:True).
(written) Sarah Blunt, 2018
Expand Down Expand Up @@ -271,7 +283,7 @@ def transform_samples(self, u):
"""
# generate samples following a gaussian distribution
z = scipy.special.ndtri(u)
samples = z*self.sigma + self.mu
samples = z * self.sigma + self.mu
return samples

def draw_samples(self, num_samples):
Expand Down Expand Up @@ -306,10 +318,9 @@ def compute_lnprob(self, element_array):
corresponding to the probability of drawing each of the numbers
in the input `element_array`.
"""
lnprob = - 0.5*((element_array - self.mu) / self.sigma)**2
lnprob = -0.5 * ((element_array - self.mu) / self.sigma) ** 2

if self.no_negatives:

bad_samples = np.where(element_array < 0)[0]
lnprob[bad_samples] = -np.inf

Expand Down Expand Up @@ -353,8 +364,10 @@ def transform_samples(self, u):
numpy array of floats: 1D u samples transformed to a Log Uniform
distribution.
"""
samples = (self.logmax - self.logmin) * u + self.logmin

# generate samples following a log uniform distribution
samples = np.exp(u)
samples = np.exp(samples)

return samples

Expand Down Expand Up @@ -388,14 +401,16 @@ def compute_lnprob(self, element_array):
"""
normalizer = self.logmax - self.logmin

lnprob = -np.log((element_array*normalizer))
lnprob = -np.log((element_array * normalizer))

# account for scalar inputs
if np.shape(lnprob) == ():
if (element_array > self.maxval) or (element_array < self.minval):
lnprob = -np.inf
else:
lnprob[(element_array > self.maxval) | (element_array < self.minval)] = -np.inf
lnprob[
(element_array > self.maxval) | (element_array < self.minval)
] = -np.inf

return lnprob

Expand Down Expand Up @@ -460,14 +475,16 @@ def compute_lnprob(self, element_array):
Returns:
np.array: array of prior probabilities
"""
lnprob = np.log(np.ones(np.size(element_array))/(self.maxval - self.minval))
lnprob = np.log(np.ones(np.size(element_array)) / (self.maxval - self.minval))

# account for scalar inputs
if np.shape(lnprob) == ():
if (element_array > self.maxval) or (element_array < self.minval):
lnprob = -np.inf
else:
lnprob[(element_array > self.maxval) | (element_array < self.minval)] = -np.inf
lnprob[
(element_array > self.maxval) | (element_array < self.minval)
] = -np.inf

return lnprob

Expand Down Expand Up @@ -498,7 +515,7 @@ def transform_samples(self, u):
distribution.
"""
# generate samples following a sin distribution
samples = np.arccos(1-2*u)
samples = np.arccos(1 - 2 * u)

return samples

Expand Down Expand Up @@ -530,9 +547,9 @@ def compute_lnprob(self, element_array):
Returns:
np.array: array of prior probabilities
"""
normalization = 2.
normalization = 2.0

lnprob = np.log(np.sin(element_array)/normalization)
lnprob = np.log(np.sin(element_array) / normalization)

# account for scalar inputs
if np.shape(lnprob) == ():
Expand Down Expand Up @@ -580,11 +597,12 @@ def transform_samples(self, u):
numpy array of floats: 1D u samples transformed to a Linear
distribution.
"""
norm = -0.5*self.b**2/self.m
norm = -0.5 * self.b**2 / self.m

# generate samples following a linear distribution
linear_samples = -np.sqrt(2.*norm*u/self.m +
(self.b/self.m)**2) - (self.b/self.m)
linear_samples = -np.sqrt(2.0 * norm * u / self.m + (self.b / self.m) ** 2) - (
self.b / self.m
)

return linear_samples

Expand All @@ -608,11 +626,10 @@ def draw_samples(self, num_samples):
return linear_samples

def compute_lnprob(self, element_array):
x_intercept = -self.b / self.m
normalizer = -0.5 * self.b**2 / self.m

x_intercept = -self.b/self.m
normalizer = -0.5*self.b**2/self.m

lnprob = np.log((self.m*element_array + self.b)/normalizer)
lnprob = np.log((self.m * element_array + self.b) / normalizer)

# account for scalar inputs
if np.shape(lnprob) == ():
Expand All @@ -635,27 +652,31 @@ def all_lnpriors(params, priors):
Returns:
float: prior probability of this set of parameters
"""
logp = 0.
logp = 0.0

for param, prior in zip(params, priors):
param = np.array([param])

logp += prior.compute_lnprob(param) # return a float

return logp


if __name__ == '__main__':
if __name__ == "__main__":
# myPrior = LinearPrior(-1.0, 1.0)
# mySamples = myPrior.draw_samples(1000)
# print(mySamples)
# myProbs = myPrior.compute_lnprob(mySamples)
# print(myProbs)

myPrior = LinearPrior(-1., 1.)
mySamples = myPrior.draw_samples(1000)
print(mySamples)
myProbs = myPrior.compute_lnprob(mySamples)
print(myProbs)
# myPrior = GaussianPrior(1.3, 0.2)
# mySamples = myPrior.draw_samples(1)
# print(mySamples)

myPrior = GaussianPrior(1.3, 0.2)
mySamples = myPrior.draw_samples(1)
print(mySamples)
# myProbs = myPrior.compute_lnprob(mySamples)
# print(myProbs)

myProbs = myPrior.compute_lnprob(mySamples)
print(myProbs)
myPrior = LogUniformPrior(0.01, 1e4)
u = np.random.uniform(0, 1, int(1e4))
samps = myPrior.transform_samples(u)
print(samps.min(), samps.max())

0 comments on commit 4d2e0da

Please sign in to comment.