Skip to content

Commit

Permalink
ran more test training.
Browse files Browse the repository at this point in the history
  • Loading branch information
PatReis committed Dec 14, 2023
1 parent 637aba2 commit 71e9bba
Show file tree
Hide file tree
Showing 11 changed files with 1,002 additions and 54 deletions.
35 changes: 22 additions & 13 deletions kgcnn/layers/geom.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,9 @@
from keras import ops, Layer
from keras.layers import Layer, Subtract, Multiply, Add, Subtract
from kgcnn.layers.gather import GatherNodes, GatherState, GatherNodesOutgoing
from kgcnn.layers.polynom import spherical_bessel_jn_zeros, spherical_bessel_jn_normalization_prefactor, \
tf_spherical_bessel_jn, tf_spherical_harmonics_yl
from kgcnn.layers.polynom import spherical_bessel_jn_zeros, spherical_bessel_jn_normalization_prefactor
from kgcnn.layers.polynom import tf_spherical_bessel_jn, tf_spherical_harmonics_yl
from kgcnn.layers.polynom import SphericalBesselJnExplicit, SphericalHarmonicsYl
from kgcnn.ops.axis import get_positive_axis
from kgcnn.ops.core import cross as kgcnn_cross

Expand Down Expand Up @@ -1107,23 +1108,26 @@ def get_config(self):
class SphericalBasisLayer(Layer):
r"""Expand a distance into a Bessel Basis with :math:`l=m=0`, according to
`Klicpera et al. 2020 <https://arxiv.org/abs/2011.14115>`__ .
Args:
num_spherical (int): Number of spherical basis functions
num_radial (int): Number of radial basis functions
cutoff (float): Cutoff distance c
envelope_exponent (int): Degree of the envelope to smoothen at cutoff. Default is 5.
"""

def __init__(self, num_spherical,
num_radial,
cutoff,
envelope_exponent=5,
fused: bool = True,
**kwargs):
super(SphericalBasisLayer, self).__init__(**kwargs)
"""Initialize layer.
Args:
num_spherical (int): Number of spherical basis functions
num_radial (int): Number of radial basis functions
cutoff (float): Cutoff distance c
envelope_exponent (int): Degree of the envelope to smoothen at cutoff. Default is 5.
fused (bool): Whether to use fused implementation. Default is True.
"""
super(SphericalBasisLayer, self).__init__(**kwargs)
assert num_radial <= 64
self.fused = fused
self.num_radial = int(num_radial)
self.num_spherical = num_spherical
self.cutoff = cutoff
Expand All @@ -1135,6 +1139,9 @@ def __init__(self, num_spherical,
self.bessel_norm = spherical_bessel_jn_normalization_prefactor(num_spherical, num_radial)

self.layer_gather_out = GatherNodesOutgoing()
# non-explicit spherical bessel function seems faster.
# self.layers_spherical_jn = [SphericalBesselJnExplicit(n=n, fused=fused) for n in range(self.num_spherical)]
self.layers_spherical_yl = [SphericalHarmonicsYl(l=l, fused=fused) for l in range(self.num_spherical)]

def envelope(self, inputs):
p = self.envelope_exponent + 1
Expand Down Expand Up @@ -1164,6 +1171,7 @@ def call(self, inputs, **kwargs):
rbf = []
for n in range(self.num_spherical):
for k in range(self.num_radial):
# rbf += [self.bessel_norm[n, k] * self.layers_spherical_jn[n](d_scaled * self.bessel_n_zeros[n][k])]
rbf += [self.bessel_norm[n, k] * tf_spherical_bessel_jn(d_scaled * self.bessel_n_zeros[n][k], n)]
rbf = ops.stack(rbf, axis=1)

Expand All @@ -1172,7 +1180,8 @@ def call(self, inputs, **kwargs):
rbf_env = self.layer_gather_out([rbf_env, angle_index], **kwargs)
# rbf_env = tf.gather(rbf_env, id_expand_kj[:, 1])

cbf = [tf_spherical_harmonics_yl(angles[:, 0], n) for n in range(self.num_spherical)]
# cbf = [tf_spherical_harmonics_yl(angles[:, 0], n) for n in range(self.num_spherical)]
cbf = [self.layers_spherical_yl[n](angles[:, 0]) for n in range(self.num_spherical)]
cbf = ops.stack(cbf, axis=1)
cbf = ops.repeat(cbf, self.num_radial, axis=1)
out = rbf_env * cbf
Expand All @@ -1182,6 +1191,6 @@ def call(self, inputs, **kwargs):
def get_config(self):
"""Update config."""
config = super(SphericalBasisLayer, self).get_config()
config.update({"num_radial": self.num_radial, "cutoff": self.cutoff,
config.update({"num_radial": self.num_radial, "cutoff": self.cutoff, "fused": self.fused,
"envelope_exponent": self.envelope_exponent, "num_spherical": self.num_spherical})
return config
return config
86 changes: 64 additions & 22 deletions kgcnn/layers/polynom.py
Original file line number Diff line number Diff line change
Expand Up @@ -127,25 +127,29 @@ class SphericalBesselJnExplicit(Layer):
\sum_{k=0}^{\left\lfloor(n-1)/2\right\rfloor}(-1)^{k}\frac{a_{2k+1}(n+\tfrac{1}{2})}{z^{2k+2}}.`
"""

def __init__(self, n=0, **kwargs):
def __init__(self, n=0, fused: bool = False, **kwargs):
r"""Initialize layer with constant n.
Args:
n (int): Positive integer for the bessel order :math:`n`.
fused (bool): Whether to compute polynomial in a fused tensor representation.
"""
super(SphericalBesselJnExplicit, self).__init__(**kwargs)
self.n = n
self.fused = fused
self._pre_factor_sin = []
self._pre_factor_cos = []
self._powers_sin = []
self._powers_cos = []

for k in range(int(np.floor(n / 2)) + 1):
if 2 * k < n + 1:
fac_sin = float(sp.special.factorial(n + 2 * k) / np.power(2, 2 * k) / sp.special.factorial(
2 * k) / sp.special.factorial(n - 2 * k) * np.power(-1, k))
pow_sin = - (2 * k + 1)
self._pre_factor_sin.append(fac_sin)
self._powers_sin.append(pow_sin)

for k in range(int(np.floor((n - 1) / 2)) + 1):
if 2 * k + 1 < n + 1:
fac_cos = float(sp.special.factorial(n + 2 * k + 1) / np.power(2, 2 * k + 1) / sp.special.factorial(
Expand All @@ -154,6 +158,12 @@ def __init__(self, n=0, **kwargs):
self._pre_factor_cos.append(fac_cos)
self._powers_cos.append(pow_cos)

if self.fused:
self._pre_factor_sin = ops.convert_to_tensor(self._pre_factor_sin, dtype=self.dtype)
self._pre_factor_cos = ops.convert_to_tensor(self._pre_factor_cos, dtype=self.dtype)
self._powers_sin = ops.convert_to_tensor(self._powers_sin, dtype=self.dtype)
self._powers_cos = ops.convert_to_tensor(self._powers_cos, dtype=self.dtype)

def build(self, input_shape):
"""Build layer."""
super(SphericalBesselJnExplicit, self).build(input_shape)
Expand All @@ -170,12 +180,16 @@ def call(self, x, **kwargs):
n = self.n
sin_x = ops.sin(x - n * np.pi / 2)
cos_x = ops.cos(x - n * np.pi / 2)
sum_sin = ops.zeros_like(x)
sum_cos = ops.zeros_like(x)
for a, r in zip(self._pre_factor_sin, self._powers_sin):
sum_sin += a * ops.power(x, r)
for b, s in zip(self._pre_factor_cos, self._powers_cos):
sum_cos += b * ops.power(x, s)
if not self.fused:
sum_sin = ops.zeros_like(x)
sum_cos = ops.zeros_like(x)
for a, r in zip(self._pre_factor_sin, self._powers_sin):
sum_sin += a * ops.power(x, r)
for b, s in zip(self._pre_factor_cos, self._powers_cos):
sum_cos += b * ops.power(x, s)
else:
sum_sin = ops.sum(self._pre_factor_sin * ops.power(ops.expand_dims(x, axis=-1), self._powers_sin), axis=-1)
sum_cos = ops.sum(self._pre_factor_cos * ops.power(ops.expand_dims(x, axis=-1), self._powers_cos), axis=-1)
return sum_sin * sin_x + sum_cos * cos_x

def get_config(self):
Expand Down Expand Up @@ -261,19 +275,25 @@ class LegendrePolynomialPn(Layer):
"""

def __init__(self, n=0, **kwargs):
def __init__(self, n=0, fused: bool = False, **kwargs):
r"""Initialize layer with constant n.
Args:
n (int): Positive integer for :math:`n` in :math:`P_n(x)`.
fused (bool): Whether to compute polynomial in a fused tensor representation.
"""
super(LegendrePolynomialPn, self).__init__(**kwargs)
self.fused = fused
self.n = n
self._pre_factors = [
float((-1) ** k * sp.special.factorial(2 * n - 2 * k) / sp.special.factorial(n - k) / sp.special.factorial(
n - 2 * k) / sp.special.factorial(k) / 2 ** n) for k in range(0, int(np.floor(n / 2)) + 1)
]
self._powers = [float(n - 2 * k) for k in range(0, int(np.floor(n / 2)) + 1)]
if self.fused:
# Or maybe also as weight.
self._powers = ops.convert_to_tensor(self._powers, dtype=self.dtype)
self._pre_factors = ops.convert_to_tensor(self._pre_factors, dtype=self.dtype)

def build(self, input_shape):
"""Build layer."""
Expand All @@ -288,15 +308,18 @@ def call(self, x, **kwargs):
Returns:
Tensor: Legendre Polynomial of order :math:`n`.
"""
out_sum = ops.zeros_like(x)
for a, r in zip(self._pre_factors, self._powers):
out_sum = out_sum + a * ops.power(x, r)
if not self.fused:
out_sum = ops.zeros_like(x)
for a, r in zip(self._pre_factors, self._powers):
out_sum = out_sum + a * ops.power(x, r)
else:
out_sum = ops.sum(self._pre_factors * ops.power(ops.expand_dims(x, axis=-1), self._powers), axis=-1)
return out_sum

def get_config(self):
"""Update layer config."""
config = super(LegendrePolynomialPn, self).get_config()
config.update({"n": self.n})
config.update({"n": self.n, "fused": self.fused})
return config


Expand Down Expand Up @@ -347,19 +370,25 @@ class SphericalHarmonicsYl(Layer):
"""

def __init__(self, l=0, **kwargs):
def __init__(self, l=0, fused: bool = False, **kwargs):
r"""Initialize layer with constant l.
Args:
l (int): Positive integer for :math:`l` in :math:`Y_l(\cos\theta)`.
fused (bool): Whether to compute polynomial in a fused tensor representation.
"""
super(SphericalHarmonicsYl, self).__init__(**kwargs)
self.l = l
self.fused = fused
self._pre_factors = [
float((-1) ** k * sp.special.factorial(2 * l - 2 * k) / sp.special.factorial(l - k) / sp.special.factorial(
l - 2 * k) / sp.special.factorial(k) / 2 ** l) for k in range(0, int(np.floor(l / 2)) + 1)]
self._powers = [float(l - 2 * k) for k in range(0, int(np.floor(l / 2)) + 1)]
self._scale = float(np.sqrt((2 * l + 1) / 4 / np.pi))
if self.fused:
# Or maybe also as weight.
self._powers = ops.convert_to_tensor(self._powers, dtype=self.dtype)
self._pre_factors = ops.convert_to_tensor(self._pre_factors, dtype=self.dtype)

def build(self, input_shape):
"""Build layer."""
Expand All @@ -375,16 +404,19 @@ def call(self, theta, **kwargs):
Tensor: Spherical harmonics for :math:`m=0` and constant non-integer :math:`l`.
"""
x = ops.cos(theta)
out_sum = ops.zeros_like(x)
for a, r in zip(self._pre_factors, self._powers):
out_sum = out_sum + a * ops.power(x, r)
if not self.fused:
out_sum = ops.zeros_like(x)
for a, r in zip(self._pre_factors, self._powers):
out_sum = out_sum + a * ops.power(x, r)
else:
out_sum = ops.sum(self._pre_factors * ops.power(ops.expand_dims(x, axis=-1), self._powers), axis=-1)
out_sum = out_sum * self._scale
return out_sum

def get_config(self):
"""Update layer config."""
config = super(SphericalHarmonicsYl, self).get_config()
config.update({"l": self.l})
config.update({"l": self.l, "fused": self.fused})
return config


Expand Down Expand Up @@ -433,15 +465,17 @@ class AssociatedLegendrePolynomialPlm(Layer):
\cdot \binom{l}{k}\binom{\frac{l+k-1}{2}}{l}`.
"""

def __init__(self, l: int = 0, m: int = 0, **kwargs):
def __init__(self, l: int = 0, m: int = 0, fused: bool = False, **kwargs):
r"""Initialize layer with constant m, l.
Args:
l (int): Positive integer for :math:`l` in :math:`P_{l}^{m}(x)`.
m (int): Positive/Negative integer for :math:`m` in :math:`P_{l}^{m}(x)`.
fused (bool): Whether to compute polynomial in a fused tensor representation.
"""
super(AssociatedLegendrePolynomialPlm, self).__init__(**kwargs)
self.m = m
self.fused = fused
self.l = l
if np.abs(m) > l:
raise ValueError("Error: Legendre polynomial must have -l<= m <= l")
Expand All @@ -464,6 +498,11 @@ def __init__(self, l: int = 0, m: int = 0, **kwargs):
sp.special.binom((l + k - 1) / 2, l))
self._pre_factors.append(fac)

if self.fused:
# Or maybe also as weight.
self._powers = ops.convert_to_tensor(self._powers, dtype=self.dtype)
self._pre_factors = ops.convert_to_tensor(self._pre_factors, dtype=self.dtype)

def build(self, input_shape):
"""Build layer."""
super(AssociatedLegendrePolynomialPlm, self).build(input_shape)
Expand All @@ -480,13 +519,16 @@ def call(self, x, **kwargs):
neg_m = self._neg_m
m = self._m
x_pre_factor = ops.power(1 - ops.square(x), m / 2) * self._x_pre_factor
sum_out = ops.zeros_like(x)
for a, r in zip(self._pre_factors, self._powers):
sum_out += ops.power(x, r) * a
if not self.fused:
sum_out = ops.zeros_like(x)
for a, r in zip(self._pre_factors, self._powers):
sum_out += ops.power(x, r) * a
else:
sum_out = ops.sum(self._pre_factors * ops.power(ops.expand_dims(x, axis=-1), self._powers), axis=-1)
return sum_out * x_pre_factor * neg_m

def get_config(self):
"""Update layer config."""
config = super(AssociatedLegendrePolynomialPlm, self).get_config()
config.update({"l": self.l, "m": self.m})
config.update({"l": self.l, "m": self.m, "fused": self.fused})
return config
2 changes: 1 addition & 1 deletion kgcnn/layers/relational.py
Original file line number Diff line number Diff line change
Expand Up @@ -259,6 +259,6 @@ def get_config(self):
})
config_act = self._layer_activation.get_config()
for x in ["activation", "activity_regularizer"]:
if x in config_act:
if x in config_act.keys():
config.update({x: config_act[x]})
return config
Loading

0 comments on commit 71e9bba

Please sign in to comment.