Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Utils, linting #11

Merged
merged 10 commits into from
Jan 25, 2024
6 changes: 3 additions & 3 deletions .github/workflows/python-package.yml
Original file line number Diff line number Diff line change
Expand Up @@ -16,12 +16,12 @@ jobs:
strategy:
fail-fast: false
matrix:
python-version: ["3.8", "3.9", "3.10"]
python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"]

steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v2
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies (Linux)
Expand Down
8 changes: 7 additions & 1 deletion flexknot/__init__.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
"""
# Flex-Knot
# Flex-Knot.

This repo contains flex-knots and associated likelihoods used in my
toy_sine project, and likelihoods associated with them.
Expand All @@ -10,3 +10,9 @@
from flexknot.core import AdaptiveKnot, FlexKnot
from flexknot.likelihoods import FlexKnotLikelihood
from flexknot.priors import AdaptiveKnotPrior, FlexKnotPrior

__all__ = [
"AdaptiveKnot", "FlexKnot",
"FlexKnotLikelihood",
"AdaptiveKnotPrior", "FlexKnotPrior",
]
87 changes: 61 additions & 26 deletions flexknot/core.py
Original file line number Diff line number Diff line change
@@ -1,16 +1,14 @@
"""
Linear INterpolation Functions.
Flex-knot.

theta refers to the full set of parameters for an adaptive linear interpolation model,
[n, y0, x1, y1, x2, y2, ..., x_n, y_n, y_n+1],
where n is the greatest allowed value of ceil(n).

The reason for the interleaving of x and y is it avoids the need to know n.
x and y are interleaved so that N does not need to be provided for
a non-adaptive flex-knot.
"""

import numpy as np
from scipy.integrate import quad

from flexknot.helper_functions import (
from flexknot.utils import (
get_theta_n,
get_x_nodes_from_theta,
get_y_nodes_from_theta,
Expand All @@ -24,10 +22,14 @@ class FlexKnot:
x_min: float
x_max: float > x_min

Returns:
flexknot(x, theta)
Returns
-------
flexknot(x, theta): callable

theta has format
[y0, x1, y1, x2, y2, ..., x_(N-2), y_(N-2), y_(N-1)]
for N nodes.

theta in format [y0, x1, y1, x2, y2, ..., x_(N-2), y_(N-2), y_(N-1)] for N nodes.
"""

def __init__(self, x_min, x_max):
Expand All @@ -36,14 +38,26 @@ def __init__(self, x_min, x_max):

def __call__(self, x, theta):
"""
Flex-knot with end nodes at x_min and x_max
Flex-knot with end nodes at x_min and x_max.

For N nodes:
theta = [y0, x1, y1, x2, y2, ..., x_(N-2), y_(N-2), y_(N-1)].

y0 and y_(N-1) are the y values at x_min and x_max respecively.

If theta only contains a single element, the flex-knot is constant.
If theta is empty, the flex-knot is constant at -1 (cosmology!)

Parameters
----------
x : float or array-like

theta = [y0, x1, y1, x2, y2, ..., x_(N-2), y_(N-2), y_(N-1)] for N nodes.
theta : array-like

y0 and y_(N-1) are the y values corresponding to x_min and x_max respecively.
Returns
-------
float or array-like

If theta only contains a single element, the flex-knot is constant at that value.
If theta is empty, the flex-knot if constant at -1 (cosmology!)
"""
if 0 == len(theta):
return np.full_like(x, -1)
Expand All @@ -63,25 +77,31 @@ def __call__(self, x, theta):

def area(self, theta0, theta1):
"""
Calculate the area between the flex-knot with parameters
theta_0 and theta_1.
Area between two flex-knots.

Parameters
----------
theta0 : array-like

theta1 : array-like
"""
return quad(lambda x: np.abs(self(x, theta0)-self(x, theta1)),
self.x_min, self.x_max)[0] / (self.x_max - self.x_min)
self.x_min, self.x_max)[0] / (self.x_max - self.x_min)


class AdaptiveKnot(FlexKnot):
"""
Adaptive flex-knot which allows the number of parameters being used to vary.
Adaptive flex-knot which allows the number of knots to vary.

x_min: float
x_max: float > x_min

Returns:
adaptive_flexknot(x, theta)
Returns
-------
adaptive_flexknot(x, theta): callable

The first element of theta is N; floor(N)-2 is number of interior nodes used in
the linear interpolation model.
The first element of theta is N; floor(N)-2 is number of interior nodes
used by the flexknot.

theta = [N, y0, x1, y1, x2, y2, ..., x_(Nmax-2), y_(Nmax-2), y_(Nmax-1)],
where Nmax is the greatest allowed value of floor(N).
Expand All @@ -92,14 +112,29 @@ class AdaptiveKnot(FlexKnot):

def __call__(self, x, theta):
"""
The first element of theta is N; floor(N)-2 is number of interior nodes used in
the linear interpolation model. This is then used to select the
Adaptive flex-knot with end nodes at x_min and x_max.

The first element of theta is N; floor(N)-2 is number of interior nodes
used in the flexknot. This is then used to select the
appropriate other elements of params to pass to flexknot()

theta = [N, y0, x1, y1, x2, y2, ..., x_(Nmax-2), y_(Nmax-2), y_(Nmax-1)],
For a maximum of Nmax nodes:
theta = [N, y0, x1, y1, x2, y2, ...,
x_(Nmax-2), y_(Nmax-2), y_(Nmax-1)],
where Nmax is the greatest allowed value of floor(N).

if floor(N) = 1, the flex-knot is constant at theta[-1] = y_(Nmax-1).
if floor(N) = 0, the flex-knot is constant at -1 (cosmology!)

Parameters
----------
x : float or array-like

theta : array-like

Returns
-------
float or array-like

"""
return super().__call__(x, get_theta_n(theta))
93 changes: 0 additions & 93 deletions flexknot/helper_functions.py

This file was deleted.

71 changes: 50 additions & 21 deletions flexknot/likelihoods.py
Original file line number Diff line number Diff line change
@@ -1,26 +1,24 @@
"""
Likelihoods using flex-knots.
"""
"""Likelihoods using flex-knots."""

import numpy as np
from scipy.special import erf, logsumexp
from flexknot.helper_functions import (
get_x_nodes_from_theta,
get_theta_n,
)
from flexknot.utils import get_x_nodes_from_theta

from flexknot.core import AdaptiveKnot, FlexKnot


class FlexKnotLikelihood:
"""
Likelihood for a flex-knot, relative to data described by xs, ys, and sigma.
Likelihood for a flex-knot, with data described by xs, ys, and sigma.

sigma is either sigma_y, [sigma_x, sigma_y], [sigma_ys] or [[sigma_xs], [sigma_ys]].
sigma is either sigma_y, [sigma_x, sigma_y], [sigma_ys],
or [[sigma_xs], [sigma_ys]].

(obviously the middle two are degenerate when len(sigma) = 2, in which case
(Obviously the middle two are degenerate when len(sigma) = 2, in which case
[sigma_x, sigma_y] is assumed.)

Returns likelihood(theta) -> log(L), [] where [] is the (lack of) derived parameters.
Returns likelihood(theta) -> log(L), [] where [] is the (lack of)
derived parameters.
"""

def __init__(self, x_min, x_max, xs, ys, sigma, adaptive):
Expand All @@ -30,28 +28,56 @@ def __init__(self, x_min, x_max, xs, ys, sigma, adaptive):

def __call__(self, theta):
"""
Likelihood relative to a flex-knot with parameters theta.
Likelihood of the data being described by flex-knot(theta).

If self.adaptive = True, the first element of theta is N; floor(N) is the number of
nodes used to calculate the likelihood.
If self.adaptive = True, the first element of theta is N; floor(N) is
the number of nodes in the flex-knot.

theta = [N, y0, x1, y1, x2, y2, ..., x_(N-2), y_(N-2), y_(N-1)] for N nodes.
For N nodes,
theta = [N, y0, x1, y1, x2, y2, ..., x_(N-2), y_(N-2), y_(N-1)].

Otherwise, theta is the same but without N.

theta = [y0, x1, y1, x2, y2, ..., x_(N-2), y_(N-2), y_(N-1)].

Parameters
----------
theta : array-like

Returns
-------
tuple(float, [])

"""
return self._likelihood_function(theta)


def create_likelihood_function(x_min, x_max, xs, ys, sigma, adaptive):
"""
Creates a likelihood function for a flex-knot, relative to data descrived by xs, ys, and sigma.
Create a likelihood function for a flex-knot, for data xs, ys, sigma.

sigma is either sigma_y, [sigma_x, sigma_y], [sigma_ys] or [[sigma_xs], [sigma_ys]].
sigma is either sigma_y, [sigma_x, sigma_y], [sigma_ys],
or [[sigma_xs], [sigma_ys]].

(obviously the middle two are degenerate when len(sigma) = 2, in which case
(Obviously the middle two are degenerate when len(sigma) = 2, in which case
[sigma_x, sigma_y] is assumed.)

Returns likelihood(theta) -> log(L), [] where [] is the (lack of)
derived parameters.

Parameters
----------
x_min : float
x_max : float > x_min
xs : array-like
ys : array-like
sigma : float or array-like
adaptive : bool

Returns
-------
likelihood(theta) -> log(L), []

"""
LOG_2_SQRT_2πλ = np.log(2) + 0.5 * np.log(2 * np.pi * (x_max - x_min))

Expand Down Expand Up @@ -79,7 +105,9 @@ def xy_errors_likelihood(theta):
x_nodes = np.concatenate(
([x_min], get_x_nodes_from_theta(theta, adaptive), [x_max])
)
# use flex-knots to get y nodes, as this is simplest way of dealing with N=0 or 1
# use flex-knots to get y nodes, as this is
# the simplest way to deal with N=0 or 1

y_nodes = flexknot(x_nodes, theta)

ms = (y_nodes[1:] - y_nodes[:-1]) / (x_nodes[1:] - x_nodes[:-1])
Expand All @@ -92,8 +120,9 @@ def xy_errors_likelihood(theta):
beta = (xs * var_y + (delta * ms).T * var_x).T / q
gamma = (np.outer(xs, ms) - delta) ** 2 / 2 / q

t_minus = (np.sqrt(q / 2).T / (sigma_x * sigma_y)).T * (x_nodes[:-1] - beta)
t_plus = (np.sqrt(q / 2).T / (sigma_x * sigma_y)).T * (x_nodes[1:] - beta)
t = (np.sqrt(q / 2).T / (sigma_x * sigma_y)).T
t_minus = t * (x_nodes[:-1] - beta)
t_plus = t * (x_nodes[1:] - beta)

logL = -len(xs) * LOG_2_SQRT_2πλ
logL += np.sum(
Expand Down
Loading