Skip to content

Commit

Permalink
bump to 0.1.42: try another formula
Browse files Browse the repository at this point in the history
  • Loading branch information
mountain committed Aug 14, 2021
1 parent 6e869f4 commit c547f6b
Show file tree
Hide file tree
Showing 3 changed files with 28 additions and 31 deletions.
47 changes: 23 additions & 24 deletions leibniz/nn/layer/hyperbolic.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
# -*- coding: utf-8 -*-

import numpy as np
import torch as th
import torch.nn as nn

Expand All @@ -15,7 +14,7 @@ def __init__(self, in_channel, out_channel, step, relu, conv, reduction=16):

self.conv1 = conv(in_channel, in_channel, kernel_size=3, stride=1, padding=1)
self.conv2 = conv(in_channel, out_channel, kernel_size=3, stride=1, padding=1)
self.simam = SimAM(out_channel, reduction=reduction, conv=conv)
self.simam = SimAM(out_channel, reduction)

def forward(self, x):
y = self.conv1(x)
Expand All @@ -30,12 +29,12 @@ def __init__(self, in_channel, out_channel, step, relu, conv, reduction=16):
super(Bottleneck, self).__init__()
self.step = step
self.relu = relu
hidden = max(in_channel, out_channel) // 4
hidden = min(in_channel, out_channel) // 4 + 1

self.conv1 = conv(in_channel, hidden, kernel_size=1, bias=False)
self.conv2 = conv(hidden, hidden, kernel_size=3, bias=False, padding=1)
self.conv3 = conv(hidden, out_channel, kernel_size=1, bias=False)
self.simam = SimAM(out_channel, reduction=reduction, conv=conv)
self.simam = SimAM(out_channel, reduction)

def forward(self, x):
y = self.conv1(x)
Expand All @@ -57,20 +56,20 @@ def __init__(self, dim, step, relu, conv, reduction=16):
self.step = step

self.input = BasicBlock(dim, 2 * dim, step, relu, conv, reduction=reduction)
self.output = BasicBlock(5 * dim, dim, step, relu, conv, reduction=reduction)
self.output = BasicBlock(2 * dim, 2 * dim, step, relu, conv, reduction=reduction)

def forward(self, x):
input = self.input(x) * self.step
u = input[:, :self.dim]
v = input[:, self.dim:]
r = self.input(x) * self.step
u = r[:, :self.dim]
v = r[:, self.dim:]

y1 = x * v + u
y2 = x * u - v
y3 = - x * v - u
y4 = - x * u + v
ys = th.cat((y1, y2, y3, y4, x), dim=1)
y1 = x * (1 + v) + u
ys = th.cat([y1, x], dim=1)
r = self.output(ys) * self.step
u = r[:, :self.dim]
v = r[:, self.dim:]

return x + self.output(ys)
return y1 * (1 + v) + u


class HyperBottleneck(nn.Module):
Expand All @@ -83,17 +82,17 @@ def __init__(self, dim, step, relu, conv, reduction=16):
self.step = step

self.input = Bottleneck(dim, 2 * dim, step, relu, conv, reduction=reduction)
self.output = Bottleneck(5 * dim, dim, step, relu, conv, reduction=reduction)
self.output = Bottleneck(2 * dim, 2 * dim, step, relu, conv, reduction=reduction)

def forward(self, x):
input = self.input(x) * self.step
u = input[:, :self.dim]
v = input[:, self.dim:]
r = self.input(x) * self.step
u = r[:, :self.dim]
v = r[:, self.dim:]

y1 = x * v + u
y2 = x * u - v
y3 = - x * v - u
y4 = - x * u + v
ys = th.cat((y1, y2, y3, y4, x), dim=1)
y1 = x * (1 + v) + u
ys = th.cat([y1, x], dim=1)
r = self.output(ys) * self.step
u = r[:, :self.dim]
v = r[:, self.dim:]

return x + self.output(ys)
return y1 * (1 + v) + u
10 changes: 4 additions & 6 deletions leibniz/nn/net/unet.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
import torch.nn as nn

from leibniz.nn.conv import DepthwiseSeparableConv1d, DepthwiseSeparableConv2d, DepthwiseSeparableConv3d
from leibniz.nn.layer.cbam import CBAM
from leibniz.nn.layer.senet import SELayer
from leibniz.nn.net.hyptube import HypTube

logger = logging.getLogger()
Expand Down Expand Up @@ -115,7 +115,7 @@ def forward(self, x):


class Block(nn.Module):
def __init__(self, transform, activation=True, dropout=-1, relu=None, attn=CBAM, dim=2, normalizor='batch', conv=None):
def __init__(self, transform, activation=True, dropout=-1, relu=None, attn=SELayer, dim=2, normalizor='batch', conv=None):

super(Block, self).__init__()
self.activation = activation
Expand Down Expand Up @@ -240,7 +240,7 @@ def __init__(self, in_channels, out_channels, block=None, attn=None, relu=None,
relu = nn.ReLU(inplace=True)

if attn is None:
attn = CBAM
attn = SELayer

ex = extension
c0 = int(ex * num_filters)
Expand All @@ -256,7 +256,7 @@ def __init__(self, in_channels, out_channels, block=None, attn=None, relu=None,
)
else:
self.conv_padding = 1
self.iconv = Conv(in_channels, c0, kernel_size=5, padding=2, groups=1)
self.iconv = Conv(in_channels, c0, kernel_size=ksize_in, padding=(ksize_in - 1) // 2, groups=1)
self.oconv = Conv(c0, out_channels, kernel_size=3, padding=self.conv_padding, bias=False, groups=1)

if final_normalized:
Expand Down Expand Up @@ -301,8 +301,6 @@ def __init__(self, in_channels, out_channels, block=None, attn=None, relu=None,
raise ValueError('scales exceeded!')

if self.dim == 2 and enhencer is not None:
self.enhencer_in = enhencer(c0, (c0 + 1) // 2, c0)
self.enhencer_out = enhencer(c0, (c0 + 1) // 2, c0)
self.enhencer_mid = enhencer(co, (c0 + 1) // 2, co)

def get_conv_for_prepare(self):
Expand Down
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@

setuptools.setup(
name="leibniz",
version="0.1.40",
version="0.1.42",
author="Mingli Yuan",
author_email="[email protected]",
description="Leibniz is a package providing facilities to express learnable differential equations based on PyTorch",
Expand Down

0 comments on commit c547f6b

Please sign in to comment.