Skip to content

Commit

Permalink
Format model docs
Browse files Browse the repository at this point in the history
  • Loading branch information
nyLiao committed Jul 14, 2024
1 parent 0fc31aa commit 73f33e6
Show file tree
Hide file tree
Showing 10 changed files with 197 additions and 337 deletions.
6 changes: 2 additions & 4 deletions pyg_spectral/nn/conv/base_mp.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,8 +56,7 @@ def get_propagate_mat(self,
:meth:`forward()` with same input.
Args:
x: from :class:`torch_geometric.data.Data`
edge_index: from :class:`torch_geometric.data.Data`
x, edge_index: from :class:`torch_geometric.data.Data`
Attributes:
propagate_mat (str): propagation schemes, separated by ``,``.
Each scheme starts with ``A`` or ``L`` for adjacency or Laplacian,
Expand Down Expand Up @@ -153,8 +152,7 @@ def get_forward_mat(self,
r"""Get matrices for :meth:`forward()`. Called during :meth:`forward()`.
Args:
x: from :class:`torch_geometric.data.Data`
edge_index: from :class:`torch_geometric.data.Data`
x, edge_index: from :class:`torch_geometric.data.Data`
Returns:
out (Tensor): output tensor (shape: :math:`(|\mathcal{V}|, F)`)
prop (Adj): propagation matrix
Expand Down
56 changes: 20 additions & 36 deletions pyg_spectral/nn/models/acm_gnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,30 +11,22 @@

class ACMGNN(BaseNN):
r"""Iterative structure for ACM conv.
paper: Revisiting Heterophily For Graph Neural Networks
paper: Complete the Missing Half: Augmenting Aggregation Filtering with Diversification for Graph Convolutional Networks
ref: https://github.com/SitaoLuan/ACM-GNN
:paper: Revisiting Heterophily For Graph Neural Networks
:paper: Complete the Missing Half: Augmenting Aggregation Filtering with Diversification for Graph Convolutional Networks
:ref: https://github.com/SitaoLuan/ACM-GNN
Args:
theta_scheme (str): Channel list. "FBGNN"="low-high", "ACMGNN"="low-high-id",
("ACMGNN+"="low-high-id-struct", not implemented).
weight_initializer (str, optional): The initializer for the weight.
--- BaseNN Args ---
conv (str): Name of :class:`pyg_spectral.nn.conv` module.
num_hops (int): Total number of conv hops.
in_channels (int): Size of each input sample.
hidden_channels (int): Size of each hidden sample.
out_channels (int): Size of each output sample.
in_layers (int): Number of MLP layers before conv.
out_layers (int): Number of MLP layers after conv.
dropout_lin (float, optional): Dropout probability for both MLPs.
dropout_conv (float, optional): Dropout probability before conv.
conv, num_hops, in_channels, hidden_channels, out_channels:
args for :class:`BaseNN`
in_layers, out_layers, dropout_lin, dropout_conv, lib_conv:
args for :class:`BaseNN`
act, act_first, act_kwargs, norm, norm_kwargs, plain_last, bias:
args for :class:`pyg.nn.models.MLP`.
lib_conv (str, optional): Parent module library other than
:class:`pyg_spectral.nn.conv`.
**kwargs (optional): Additional arguments of the
:class:`pyg_spectral.nn.conv` module.
args for :class:`torch_geometric.nn.models.MLP`.
**kwargs: Additional arguments of :class:`pyg_spectral.nn.conv`.
"""

def init_conv(self,
Expand Down Expand Up @@ -68,30 +60,22 @@ def init_conv(self,

class ACMGNNDec(BaseNN):
r"""Decoupled structure for ACM conv.
paper: Revisiting Heterophily For Graph Neural Networks
paper: Complete the Missing Half: Augmenting Aggregation Filtering with Diversification for Graph Convolutional Networks
ref: https://github.com/SitaoLuan/ACM-GNN
:paper: Revisiting Heterophily For Graph Neural Networks
:paper: Complete the Missing Half: Augmenting Aggregation Filtering with Diversification for Graph Convolutional Networks
:ref: https://github.com/SitaoLuan/ACM-GNN
Args:
theta_scheme (str): Channel list. "FBGNN"="low-high", "ACMGNN"="low-high-id",
("ACMGNN+"="low-high-id-struct", not implemented).
weight_initializer (str, optional): The initializer for the weight.
--- BaseNN Args ---
conv (str): Name of :class:`pyg_spectral.nn.conv` module.
num_hops (int): Total number of conv hops.
in_channels (int): Size of each input sample.
hidden_channels (int): Size of each hidden sample.
out_channels (int): Size of each output sample.
in_layers (int): Number of MLP layers before conv.
out_layers (int): Number of MLP layers after conv.
dropout_lin (float, optional): Dropout probability for both MLPs.
dropout_conv (float, optional): Dropout probability before conv.
conv, num_hops, in_channels, hidden_channels, out_channels:
args for :class:`BaseNN`
in_layers, out_layers, dropout_lin, dropout_conv, lib_conv:
args for :class:`BaseNN`
act, act_first, act_kwargs, norm, norm_kwargs, plain_last, bias:
args for :class:`pyg.nn.models.MLP`.
lib_conv (str, optional): Parent module library other than
:class:`pyg_spectral.nn.conv`.
**kwargs (optional): Additional arguments of the
:class:`pyg_spectral.nn.conv` module.
args for :class:`torch_geometric.nn.models.MLP`.
**kwargs: Additional arguments of :class:`pyg_spectral.nn.conv`.
"""

def init_conv(self,
Expand Down
26 changes: 9 additions & 17 deletions pyg_spectral/nn/models/ada_gnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,26 +10,18 @@

class AdaGNN(BaseNN):
r"""Decoupled structure with diag transformation each hop of propagation.
paper: AdaGNN: Graph Neural Networks with Adaptive Frequency Response Filter
ref: https://github.com/yushundong/AdaGNN
:paper: AdaGNN: Graph Neural Networks with Adaptive Frequency Response Filter
:ref: https://github.com/yushundong/AdaGNN
Args:
--- BaseNN Args ---
conv (str): Name of :class:`pyg_spectral.nn.conv` module.
num_hops (int): Total number of conv hops.
in_channels (int): Size of each input sample.
hidden_channels (int): Size of each hidden sample.
out_channels (int): Size of each output sample.
in_layers (int): Number of MLP layers before conv.
out_layers (int): Number of MLP layers after conv.
dropout_lin (float, optional): Dropout probability for both MLPs.
dropout_conv (float, optional): Dropout probability before conv.
conv, num_hops, in_channels, hidden_channels, out_channels:
args for :class:`BaseNN`
in_layers, out_layers, dropout_lin, dropout_conv, lib_conv:
args for :class:`BaseNN`
act, act_first, act_kwargs, norm, norm_kwargs, plain_last, bias:
args for :class:`pyg.nn.models.MLP`.
lib_conv (str, optional): Parent module library other than
:class:`pyg_spectral.nn.conv`.
**kwargs (optional): Additional arguments of the
:class:`pyg_spectral.nn.conv` module.
args for :class:`torch_geometric.nn.models.MLP`.
**kwargs: Additional arguments of :class:`pyg_spectral.nn.conv`.
"""

def init_conv(self,
Expand Down
69 changes: 32 additions & 37 deletions pyg_spectral/nn/models/base_nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,19 +21,18 @@ class BaseNN(nn.Module):
r"""Base NN structure with MLP before and after convolution layers.
Args:
conv (str): Name of :class:`pyg_spectral.nn.conv` module.
num_hops (int): Total number of conv hops.
in_channels (int): Size of each input sample.
hidden_channels (int): Size of each hidden sample.
out_channels (int): Size of each output sample.
in_layers (int): Number of MLP layers before conv.
out_layers (int): Number of MLP layers after conv.
dropout_lin (float, optional): Dropout probability for both MLPs.
dropout_conv (float, optional): Dropout probability before conv.
conv: Name of :class:`pyg_spectral.nn.conv` module.
num_hops: Total number of conv hops.
in_channels: Size of each input sample.
hidden_channels: Size of each hidden sample.
out_channels: Size of each output sample.
in_layers: Number of MLP layers before conv.
out_layers: Number of MLP layers after conv.
dropout_lin: Dropout probability for both MLPs.
dropout_conv: Dropout probability before conv.
act, act_first, act_kwargs, norm, norm_kwargs, plain_last, bias:
args for :class:`pyg.nn.models.MLP`.
lib_conv (str, optional): Parent module library other than
:class:`pyg_spectral.nn.conv`.
args for :class:`torch_geometric.nn.models.MLP`.
lib_conv: Parent module library other than :class:`pyg_spectral.nn.conv`.
**kwargs: Additional arguments of :class:`pyg_spectral.nn.conv`.
"""
supports_edge_weight: Final[bool] = False
Expand Down Expand Up @@ -168,7 +167,7 @@ def preprocess(self,
x: Tensor,
edge_index: Adj
) -> Any:
r"""Preprocessing step that not counted in forward() overhead.
r"""Preprocessing step that not counted in :meth:`forward()` overhead.
Here mainly transforming graph adjacency to actual propagation matrix.
"""
return self.get_propagate_mat(x, edge_index)
Expand Down Expand Up @@ -199,18 +198,16 @@ def forward(self,
) -> Tensor:
r"""
Args:
x (Tensor), edge_index (Adj): from pyg.data.Data
batch (Tensor, optional): The batch vector
x, edge_index: from :class:`torch_geometric.data.Data`
batch: The batch vector
:math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns
each element to a specific example.
Only needs to be passed in case the underlying normalization
layers require the :obj:`batch` information.
(default: :obj:`None`)
batch_size (int, optional): The number of examples :math:`B`.
batch_size: The number of examples :math:`B`.
Automatically calculated if not given.
Only needs to be passed in case the underlying normalization
layers require the :obj:`batch` information.
(default: :obj:`None`)
"""
if self.in_layers > 0:
x = self.in_mlp(x, batch=batch, batch_size=batch_size)
Expand All @@ -224,29 +221,27 @@ class BaseNNCompose(BaseNN):
r"""Base NN structure with multiple conv channels.
Args:
combine (str): How to combine different channels of convs. (one of
"sum", "sum_weighted", "cat").
--- BaseNN Args ---
conv (str): Name of :class:`pyg_spectral.nn.conv` module.
num_hops (int): Total number of conv hops.
in_channels (int): Size of each input sample.
hidden_channels (int): Size of each hidden sample.
out_channels (int): Size of each output sample.
in_layers (int): Number of MLP layers before conv.
out_layers (int): Number of MLP layers after conv.
dropout_lin (float, optional): Dropout probability for both MLPs.
dropout_conv (float, optional): Dropout probability before conv.
combine (str): How to combine different channels of convs. (:obj:`sum`,
:obj:`sum_weighted`, or :obj:`cat`).
conv: Name of :class:`pyg_spectral.nn.conv` module.
num_hops: Total number of conv hops.
in_channels: Size of each input sample.
hidden_channels: Size of each hidden sample.
out_channels: Size of each output sample.
in_layers: Number of MLP layers before conv.
out_layers: Number of MLP layers after conv.
dropout_lin: Dropout probability for both MLPs.
dropout_conv: Dropout probability before conv.
act, act_first, act_kwargs, norm, norm_kwargs, plain_last, bias:
args for :class:`pyg.nn.models.MLP`.
lib_conv (str, optional): Parent module library other than
:class:`pyg_spectral.nn.conv`.
**kwargs (optional): Additional arguments of the
:class:`pyg_spectral.nn.conv` module.
args for :class:`torch_geometric.nn.models.MLP`.
lib_conv: Parent module library other than :class:`pyg_spectral.nn.conv`.
**kwargs: Additional arguments of :class:`pyg_spectral.nn.conv`.
"""

def init_channel_list(self, conv: str, in_channels: int, hidden_channels: int, out_channels: int, **kwargs) -> List[int]:
"""
self.channel_list: width for each conv channel
Attributes:
channel_list: width for each conv channel
"""
self.combine = kwargs.pop('combine', 'sum')
n_conv = len(conv.split(','))
Expand Down Expand Up @@ -317,7 +312,7 @@ def preprocess(self,
x: Tensor,
edge_index: Adj
) -> Any:
r"""Preprocessing step that not counted in forward() overhead.
r"""Preprocessing step that not counted in :meth:`forward()` overhead.
Here mainly transforming graph adjacency to actual propagation matrix.
"""
return [f(x, edge_index) for f in self.get_propagate_mat()]
Expand Down
4 changes: 4 additions & 0 deletions pyg_spectral/nn/models/cpp_comp.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,10 @@


class CppCompFixed(PrecomputedFixed):
r"""Decoupled structure with C++ propagation precomputation.
Fixed scalar propagation parameters and accumulating precompute results.
"""

def preprocess(self,
x: Tensor,
edge_index: Adj
Expand Down
Loading

0 comments on commit 73f33e6

Please sign in to comment.