Module phiml.backend.torch.nets
PyTorch implementation of the unified machine learning API. Equivalent functions also exist for the other frameworks.
For API documentation, see phiml.nn
.
Functions
def adagrad(net: torch.nn.modules.module.Module, learning_rate: float = 0.001, lr_decay=0.0, weight_decay=0.0, initial_accumulator_value=0.0, eps=1e-10)
def adam(net: torch.nn.modules.module.Module, learning_rate: float = 0.001, betas=(0.9, 0.999), epsilon=1e-07)
def conv_classifier(in_features: int, in_spatial: Union[tuple, list], num_classes: int, blocks=(64, 128, 256, 256, 512, 512), block_sizes=(2, 2, 3, 3, 3), dense_layers=(4096, 4096, 100), batch_norm=True, activation='ReLU', softmax=True, periodic=False)
def conv_net(in_channels: int, out_channels: int, layers: Sequence[int], batch_norm: bool = False, activation: Union[str, type] = 'ReLU', in_spatial: Union[int, tuple] = 2, periodic=False) ‑> torch.nn.modules.module.Module
def coupling_layer(in_channels: int, activation: Union[str, type] = 'ReLU', batch_norm=False, reverse_mask=False, in_spatial: Union[int, tuple] = 2)
def get_mask(inputs, reverse_mask, data_format='NHWC')
-
Compute mask for slicing input feature map for Invertible Nets
def get_parameters(net: torch.nn.modules.module.Module, wrap=True) ‑> dict
def invertible_net(num_blocks: int, construct_net: Union[str, Callable], **construct_kwargs)
def load_state(obj: Union[torch.nn.modules.module.Module, torch.optim.optimizer.Optimizer], path: str)
def mlp(in_channels: int, out_channels: int, layers: Sequence[int], batch_norm=False, activation: Union[str, Callable] = 'ReLU', softmax=False) ‑> torch.nn.modules.module.Module
def res_net(in_channels: int, out_channels: int, layers: Sequence[int], batch_norm: bool = False, activation: Union[str, type] = 'ReLU', in_spatial: Union[int, tuple] = 2, periodic=False) ‑> torch.nn.modules.module.Module
def rmsprop(net: torch.nn.modules.module.Module, learning_rate: float = 0.001, alpha=0.99, eps=1e-08, weight_decay=0.0, momentum=0.0, centered=False)
def save_state(obj: Union[torch.nn.modules.module.Module, torch.optim.optimizer.Optimizer], path: str)
def sgd(net: torch.nn.modules.module.Module, learning_rate: float = 0.001, momentum=0.0, dampening=0.0, weight_decay=0.0, nesterov=False)
def u_net(in_channels: int, out_channels: int, levels: int = 4, filters: Union[int, Sequence[+T_co]] = 16, batch_norm: bool = True, activation: Union[str, type] = 'ReLU', in_spatial: Union[int, tuple] = 2, periodic=False, use_res_blocks: bool = False, down_kernel_size=3, up_kernel_size=3) ‑> torch.nn.modules.module.Module
def update_weights(net: torch.nn.modules.module.Module, optimizer: torch.optim.optimizer.Optimizer, loss_function: Callable, *loss_args, check_nan=False, **loss_kwargs)
Classes
class ConvClassifier (in_features, in_spatial: list, num_classes: int, batch_norm: bool, use_softmax: bool, blocks: tuple, block_sizes: tuple, dense_layers: tuple, periodic: bool, activation)
-
Base class for all neural network modules.
Your models should also subclass this class.
Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::
import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(1, 20, 5) self.conv2 = nn.Conv2d(20, 20, 5) def forward(self, x): x = F.relu(self.conv1(x)) return F.relu(self.conv2(x))
Submodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:
to
, etc.Note
As per the example above, an
__init__()
call to the parent class must be made before assignment on the child.:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool
Initializes internal Module state, shared by both nn.Module and ScriptModule.
Expand source code
class ConvClassifier(nn.Module): def __init__(self, in_features, in_spatial: list, num_classes: int, batch_norm: bool, use_softmax: bool, blocks: tuple, block_sizes: tuple, dense_layers: tuple, periodic: bool, activation): super(ConvClassifier, self).__init__() d = len(in_spatial) self.in_spatial = in_spatial self._blocks = blocks self.add_module('maxpool', MAX_POOL[d](2)) for i, (prev, next) in enumerate(zip((in_features,) + tuple(blocks[:-1]), blocks)): block_size = block_sizes[i] layers = [] for j in range(block_size): layers.append(CONV[d](prev if j == 0 else next, next, kernel_size=3, padding=1, padding_mode='circular' if periodic else 'zeros')) layers.append(NORM[d](next) if batch_norm else nn.Identity()) layers.append(activation()) self.add_module(f'conv{i+1}', nn.Sequential(*layers)) flat_size = int(np.prod(in_spatial) * blocks[-1] / (2**d) ** len(blocks)) self.mlp = mlp(flat_size, num_classes, dense_layers, batch_norm, activation, use_softmax) self.flatten = nn.Flatten() def forward(self, x): for i in range(len(self._blocks)): x = getattr(self, f'conv{i+1}')(x) x = self.maxpool(x) xf = self.flatten(x) y = self.mlp(xf) return y
Ancestors
- torch.nn.modules.module.Module
Class variables
var call_super_init : bool
var dump_patches : bool
var training : bool
Methods
def forward(self, x) ‑> Callable[..., Any]
-
Defines the computation performed at every call.
Should be overridden by all subclasses.
Note
Although the recipe for forward pass needs to be defined within this function, one should call the :class:
Module
instance afterwards instead of this since the former takes care of running the registered hooks while the latter silently ignores them.
class ConvNet (in_spatial, in_channels, out_channels, layers, batch_norm, activation, periodic: bool)
-
Base class for all neural network modules.
Your models should also subclass this class.
Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::
import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(1, 20, 5) self.conv2 = nn.Conv2d(20, 20, 5) def forward(self, x): x = F.relu(self.conv1(x)) return F.relu(self.conv2(x))
Submodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:
to
, etc.Note
As per the example above, an
__init__()
call to the parent class must be made before assignment on the child.:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool
Initializes internal Module state, shared by both nn.Module and ScriptModule.
Expand source code
class ConvNet(nn.Module): def __init__(self, in_spatial, in_channels, out_channels, layers, batch_norm, activation, periodic: bool): super(ConvNet, self).__init__() activation = ACTIVATIONS[activation] if len(layers) < 1: layers.append(out_channels) self.layers = layers self.add_module(f'Conv_in', nn.Sequential( CONV[in_spatial](in_channels, layers[0], kernel_size=3, padding=1, padding_mode='circular' if periodic else 'zeros'), NORM[in_spatial](layers[0]) if batch_norm else nn.Identity(), activation())) for i in range(1, len(layers)): self.add_module(f'Conv{i}', nn.Sequential( CONV[in_spatial](layers[i - 1], layers[i], kernel_size=3, padding=1, padding_mode='circular' if periodic else 'zeros'), NORM[in_spatial](layers[i]) if batch_norm else nn.Identity(), activation())) self.add_module(f'Conv_out', CONV[in_spatial](layers[len(layers) - 1], out_channels, kernel_size=1)) def forward(self, x): x = getattr(self, f'Conv_in')(x) for i in range(1, len(self.layers)): x = getattr(self, f'Conv{i}')(x) x = getattr(self, f'Conv_out')(x) return x
Ancestors
- torch.nn.modules.module.Module
Class variables
var call_super_init : bool
var dump_patches : bool
var training : bool
Methods
def forward(self, x) ‑> Callable[..., Any]
-
Defines the computation performed at every call.
Should be overridden by all subclasses.
Note
Although the recipe for forward pass needs to be defined within this function, one should call the :class:
Module
instance afterwards instead of this since the former takes care of running the registered hooks while the latter silently ignores them.
class CouplingLayer (construct_net: Callable, construction_kwargs: dict, reverse_mask)
-
Base class for all neural network modules.
Your models should also subclass this class.
Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::
import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(1, 20, 5) self.conv2 = nn.Conv2d(20, 20, 5) def forward(self, x): x = F.relu(self.conv1(x)) return F.relu(self.conv2(x))
Submodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:
to
, etc.Note
As per the example above, an
__init__()
call to the parent class must be made before assignment on the child.:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool
Initializes internal Module state, shared by both nn.Module and ScriptModule.
Expand source code
class CouplingLayer(nn.Module): def __init__(self, construct_net: Callable, construction_kwargs: dict, reverse_mask): super(CouplingLayer, self).__init__() self.reverse_mask = reverse_mask self.s1 = construct_net(**construction_kwargs) self.t1 = construct_net(**construction_kwargs) self.s2 = construct_net(**construction_kwargs) self.t2 = construct_net(**construction_kwargs) def forward(self, x, invert=False): x = TORCH.as_tensor(x) mask = get_mask(x, self.reverse_mask, 'NCHW') if invert: v1 = x * mask v2 = x * (1 - mask) u2 = (1 - mask) * (v2 - self.t1(v1)) * torch.exp(-self.s1(v1)) u1 = mask * (v1 - self.t2(u2)) * torch.exp(-self.s2(u2)) return u1 + u2 else: u1 = x * mask u2 = x * (1 - mask) v1 = mask * (u1 * torch.exp(self.s2(u2)) + self.t2(u2)) v2 = (1 - mask) * (u2 * torch.exp(self.s1(v1)) + self.t1(v1)) return v1 + v2
Ancestors
- torch.nn.modules.module.Module
Class variables
var call_super_init : bool
var dump_patches : bool
var training : bool
Methods
def forward(self, x, invert=False) ‑> Callable[..., Any]
-
Defines the computation performed at every call.
Should be overridden by all subclasses.
Note
Although the recipe for forward pass needs to be defined within this function, one should call the :class:
Module
instance afterwards instead of this since the former takes care of running the registered hooks while the latter silently ignores them.
class DenseNet (layers: list, activation: type, batch_norm: bool, use_softmax: bool)
-
Base class for all neural network modules.
Your models should also subclass this class.
Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::
import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(1, 20, 5) self.conv2 = nn.Conv2d(20, 20, 5) def forward(self, x): x = F.relu(self.conv1(x)) return F.relu(self.conv2(x))
Submodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:
to
, etc.Note
As per the example above, an
__init__()
call to the parent class must be made before assignment on the child.:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool
Initializes internal Module state, shared by both nn.Module and ScriptModule.
Expand source code
class DenseNet(nn.Module): def __init__(self, layers: list, activation: type, batch_norm: bool, use_softmax: bool): super(DenseNet, self).__init__() self._layers = layers self._activation = activation self._batch_norm = batch_norm for i, (s1, s2) in enumerate(zip(layers[:-2], layers[1:-1])): self.add_module(f'linear{i}', _bias0(nn.Linear)(s1, s2, bias=True)) if batch_norm: self.add_module(f'norm{i}', nn.BatchNorm1d(s2)) self.add_module(f'linear_out', _bias0(nn.Linear)(layers[-2], layers[-1], bias=True)) self.softmax = nn.Softmax() if use_softmax else None def forward(self, x): register_module_call(self) x = TORCH.as_tensor(x) for i in range(len(self._layers) - 2): x = self._activation()(getattr(self, f'linear{i}')(x)) if self._batch_norm: x = getattr(self, f'norm{i}')(x) x = getattr(self, f'linear_out')(x) if self.softmax: x = self.softmax(x) return x
Ancestors
- torch.nn.modules.module.Module
Class variables
var call_super_init : bool
var dump_patches : bool
var training : bool
Methods
def forward(self, x) ‑> Callable[..., Any]
-
Defines the computation performed at every call.
Should be overridden by all subclasses.
Note
Although the recipe for forward pass needs to be defined within this function, one should call the :class:
Module
instance afterwards instead of this since the former takes care of running the registered hooks while the latter silently ignores them.
class DenseResNetBlock (layers, batch_norm, activation)
-
Base class for all neural network modules.
Your models should also subclass this class.
Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::
import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(1, 20, 5) self.conv2 = nn.Conv2d(20, 20, 5) def forward(self, x): x = F.relu(self.conv1(x)) return F.relu(self.conv2(x))
Submodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:
to
, etc.Note
As per the example above, an
__init__()
call to the parent class must be made before assignment on the child.:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool
Initializes internal Module state, shared by both nn.Module and ScriptModule.
Expand source code
class DenseResNetBlock(nn.Module): def __init__(self, layers, batch_norm, activation): super(DenseResNetBlock, self).__init__() self._layers = layers self._activation = activation self._batch_norm = batch_norm for i, (s1, s2) in enumerate(zip(layers[:-1], layers[1:])): self.add_module(f'linear{i}', _bias0(nn.Linear)(s1, s2, bias=True)) if batch_norm: self.add_module(f'norm{i}', nn.BatchNorm1d(s2)) def forward(self, x): x0 = x for i in range(len(self._layers) - 1): x = self._activation()(getattr(self, f'linear{i}')(x)) if self._batch_norm: x = getattr(self, f'norm{i}')(x) return x + x0
Ancestors
- torch.nn.modules.module.Module
Class variables
var call_super_init : bool
var dump_patches : bool
var training : bool
Methods
def forward(self, x) ‑> Callable[..., Any]
-
Defines the computation performed at every call.
Should be overridden by all subclasses.
Note
Although the recipe for forward pass needs to be defined within this function, one should call the :class:
Module
instance afterwards instead of this since the former takes care of running the registered hooks while the latter silently ignores them.
class DoubleConv (d: int, in_channels: int, out_channels: int, mid_channels: int, batch_norm: bool, activation: type, periodic: bool, kernel_size=3)
-
(convolution => [BN] => ReLU) * 2
Initializes internal Module state, shared by both nn.Module and ScriptModule.
Expand source code
class DoubleConv(nn.Module): """(convolution => [BN] => ReLU) * 2""" def __init__(self, d: int, in_channels: int, out_channels: int, mid_channels: int, batch_norm: bool, activation: type, periodic: bool, kernel_size=3): super().__init__() self.add_module('double_conv', nn.Sequential( CONV[d](in_channels, mid_channels, kernel_size=kernel_size, padding=1, padding_mode='circular' if periodic else 'zeros'), NORM[d](mid_channels) if batch_norm else nn.Identity(), activation(), CONV[d](mid_channels, out_channels, kernel_size=kernel_size, padding=1, padding_mode='circular' if periodic else 'zeros'), NORM[d](out_channels) if batch_norm else nn.Identity(), nn.ReLU(inplace=True) )) def forward(self, x): return self.double_conv(x)
Ancestors
- torch.nn.modules.module.Module
Class variables
var call_super_init : bool
var dump_patches : bool
var training : bool
Methods
def forward(self, x) ‑> Callable[..., Any]
-
Defines the computation performed at every call.
Should be overridden by all subclasses.
Note
Although the recipe for forward pass needs to be defined within this function, one should call the :class:
Module
instance afterwards instead of this since the former takes care of running the registered hooks while the latter silently ignores them.
class Down (d: int, in_channels: int, out_channels: int, batch_norm: bool, activation: Union[str, type], use_res_blocks: bool, periodic, kernel_size: int)
-
Downscaling with maxpool then double conv or resnet_block
Initializes internal Module state, shared by both nn.Module and ScriptModule.
Expand source code
class Down(nn.Module): """Downscaling with maxpool then double conv or resnet_block""" def __init__(self, d: int, in_channels: int, out_channels: int, batch_norm: bool, activation: Union[str, type], use_res_blocks: bool, periodic, kernel_size: int): super().__init__() self.add_module('maxpool', MAX_POOL[d](2)) if use_res_blocks: self.add_module('conv', ResNetBlock(d, in_channels, out_channels, batch_norm, activation, periodic, kernel_size)) else: self.add_module('conv', DoubleConv(d, in_channels, out_channels, out_channels, batch_norm, activation, periodic, kernel_size)) def forward(self, x): x = self.maxpool(x) return self.conv(x)
Ancestors
- torch.nn.modules.module.Module
Class variables
var call_super_init : bool
var dump_patches : bool
var training : bool
Methods
def forward(self, x) ‑> Callable[..., Any]
-
Defines the computation performed at every call.
Should be overridden by all subclasses.
Note
Although the recipe for forward pass needs to be defined within this function, one should call the :class:
Module
instance afterwards instead of this since the former takes care of running the registered hooks while the latter silently ignores them.
class InvertibleNet (num_blocks: int, construct_net, construction_kwargs: dict)
-
Base class for all neural network modules.
Your models should also subclass this class.
Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::
import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(1, 20, 5) self.conv2 = nn.Conv2d(20, 20, 5) def forward(self, x): x = F.relu(self.conv1(x)) return F.relu(self.conv2(x))
Submodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:
to
, etc.Note
As per the example above, an
__init__()
call to the parent class must be made before assignment on the child.:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool
Initializes internal Module state, shared by both nn.Module and ScriptModule.
Expand source code
class InvertibleNet(nn.Module): def __init__(self, num_blocks: int, construct_net, construction_kwargs: dict): super(InvertibleNet, self).__init__() self.num_blocks = num_blocks for i in range(num_blocks): self.add_module(f'coupling_block{i + 1}', CouplingLayer(construct_net, construction_kwargs, (i % 2 == 0))) def forward(self, x, backward=False): if backward: for i in range(self.num_blocks, 0, -1): x = getattr(self, f'coupling_block{i}')(x, backward) else: for i in range(1, self.num_blocks + 1): x = getattr(self, f'coupling_block{i}')(x, backward) return x
Ancestors
- torch.nn.modules.module.Module
Class variables
var call_super_init : bool
var dump_patches : bool
var training : bool
Methods
def forward(self, x, backward=False) ‑> Callable[..., Any]
-
Defines the computation performed at every call.
Should be overridden by all subclasses.
Note
Although the recipe for forward pass needs to be defined within this function, one should call the :class:
Module
instance afterwards instead of this since the former takes care of running the registered hooks while the latter silently ignores them.
class ResNet (in_spatial, in_channels, out_channels, layers, batch_norm, activation, periodic: bool)
-
Base class for all neural network modules.
Your models should also subclass this class.
Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::
import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(1, 20, 5) self.conv2 = nn.Conv2d(20, 20, 5) def forward(self, x): x = F.relu(self.conv1(x)) return F.relu(self.conv2(x))
Submodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:
to
, etc.Note
As per the example above, an
__init__()
call to the parent class must be made before assignment on the child.:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool
Initializes internal Module state, shared by both nn.Module and ScriptModule.
Expand source code
class ResNet(nn.Module): def __init__(self, in_spatial, in_channels, out_channels, layers, batch_norm, activation, periodic: bool): super(ResNet, self).__init__() self.layers = layers if len(self.layers) < 1: layers.append(out_channels) self.add_module('Res_in', ResNetBlock(in_spatial, in_channels, layers[0], batch_norm, activation, periodic)) for i in range(1, len(layers)): self.add_module(f'Res{i}', ResNetBlock(in_spatial, layers[i - 1], layers[i], batch_norm, activation, periodic)) self.add_module('Res_out', CONV[in_spatial](layers[len(layers) - 1], out_channels, kernel_size=1)) def forward(self, x): x = TORCH.as_tensor(x) x = getattr(self, 'Res_in')(x) for i in range(1, len(self.layers)): x = getattr(self, f'Res{i}')(x) x = getattr(self, 'Res_out')(x) return x
Ancestors
- torch.nn.modules.module.Module
Class variables
var call_super_init : bool
var dump_patches : bool
var training : bool
Methods
def forward(self, x) ‑> Callable[..., Any]
-
Defines the computation performed at every call.
Should be overridden by all subclasses.
Note
Although the recipe for forward pass needs to be defined within this function, one should call the :class:
Module
instance afterwards instead of this since the former takes care of running the registered hooks while the latter silently ignores them.
class ResNetBlock (in_spatial, in_channels, out_channels, batch_norm, activation, periodic: bool, kernel_size=3)
-
Base class for all neural network modules.
Your models should also subclass this class.
Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::
import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(1, 20, 5) self.conv2 = nn.Conv2d(20, 20, 5) def forward(self, x): x = F.relu(self.conv1(x)) return F.relu(self.conv2(x))
Submodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:
to
, etc.Note
As per the example above, an
__init__()
call to the parent class must be made before assignment on the child.:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool
Initializes internal Module state, shared by both nn.Module and ScriptModule.
Expand source code
class ResNetBlock(nn.Module): def __init__(self, in_spatial, in_channels, out_channels, batch_norm, activation, periodic: bool, kernel_size=3): # Since in_channels and out_channels might be different, we need a sampling layer for up/down sampling input in order to add it as a skip connection super(ResNetBlock, self).__init__() if in_channels != out_channels: self.sample_input = CONV[in_spatial](in_channels, out_channels, kernel_size=1, padding=0) self.bn_sample = NORM[in_spatial](out_channels) if batch_norm else nn.Identity() else: self.sample_input = nn.Identity() self.bn_sample = nn.Identity() self.activation = ACTIVATIONS[activation] if isinstance(activation, str) else activation self.bn1 = NORM[in_spatial](out_channels) if batch_norm else nn.Identity() self.conv1 = CONV[in_spatial](in_channels, out_channels, kernel_size=kernel_size, padding=1, padding_mode='circular' if periodic else 'zeros') self.bn2 = NORM[in_spatial](out_channels) if batch_norm else nn.Identity() self.conv2 = CONV[in_spatial](out_channels, out_channels, kernel_size=kernel_size, padding=1, padding_mode='circular' if periodic else 'zeros') def forward(self, x): x = TORCH.as_tensor(x) out = self.activation()(self.bn1(self.conv1(x))) out = self.activation()(self.bn2(self.conv2(out))) out = (out + self.bn_sample(self.sample_input(x))) return out
Ancestors
- torch.nn.modules.module.Module
Class variables
var call_super_init : bool
var dump_patches : bool
var training : bool
Methods
def forward(self, x) ‑> Callable[..., Any]
-
Defines the computation performed at every call.
Should be overridden by all subclasses.
Note
Although the recipe for forward pass needs to be defined within this function, one should call the :class:
Module
instance afterwards instead of this since the former takes care of running the registered hooks while the latter silently ignores them.
class UNet (d: int, in_channels: int, out_channels: int, filters: tuple, batch_norm: bool, activation: type, periodic: bool, use_res_blocks: bool, down_kernel_size: int, up_kernel_size: int)
-
Base class for all neural network modules.
Your models should also subclass this class.
Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::
import torch.nn as nn import torch.nn.functional as F class Model(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(1, 20, 5) self.conv2 = nn.Conv2d(20, 20, 5) def forward(self, x): x = F.relu(self.conv1(x)) return F.relu(self.conv2(x))
Submodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:
to
, etc.Note
As per the example above, an
__init__()
call to the parent class must be made before assignment on the child.:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool
Initializes internal Module state, shared by both nn.Module and ScriptModule.
Expand source code
class UNet(nn.Module): def __init__(self, d: int, in_channels: int, out_channels: int, filters: tuple, batch_norm: bool, activation: type, periodic: bool, use_res_blocks: bool, down_kernel_size: int, up_kernel_size: int): super(UNet, self).__init__() self._levels = len(filters) self._spatial_rank = d if use_res_blocks: self.add_module('inc', ResNetBlock(d, in_channels, filters[0], batch_norm, activation, periodic, down_kernel_size)) else: self.add_module('inc', DoubleConv(d, in_channels, filters[0], filters[0], batch_norm, activation, periodic, down_kernel_size)) for i in range(1, self._levels): self.add_module(f'down{i}', Down(d, filters[i - 1], filters[i], batch_norm, activation, periodic, use_res_blocks, down_kernel_size)) self.add_module(f'up{i}', Up(d, filters[-i] + filters[-i - 1], filters[-i - 1], batch_norm, activation, periodic, use_res_blocks, up_kernel_size)) self.add_module('outc', CONV[d](filters[0], out_channels, kernel_size=1)) def forward(self, x): register_module_call(self) x = TORCH.as_tensor(x) for size in x.shape[2:]: assert size % 2 ** (self._levels-1) == 0, f"All spatial dims must be divisible by {2 ** (self._levels-1)} for U-Nets with {self._levels} levels but got {x.shape}. Please pad the input." x = self.inc(x) xs = [x] for i in range(1, self._levels): x = getattr(self, f'down{i}')(x) xs.insert(0, x) for i in range(1, self._levels): x = getattr(self, f'up{i}')(x, xs[i]) x = self.outc(x) return x
Ancestors
- torch.nn.modules.module.Module
Class variables
var call_super_init : bool
var dump_patches : bool
var training : bool
Methods
def forward(self, x) ‑> Callable[..., Any]
-
Defines the computation performed at every call.
Should be overridden by all subclasses.
Note
Although the recipe for forward pass needs to be defined within this function, one should call the :class:
Module
instance afterwards instead of this since the former takes care of running the registered hooks while the latter silently ignores them.
class Up (d: int, in_channels: int, out_channels: int, batch_norm: bool, activation: type, periodic: bool, use_res_blocks: bool, kernel_size: int)
-
Upscaling then double conv
Initializes internal Module state, shared by both nn.Module and ScriptModule.
Expand source code
class Up(nn.Module): """Upscaling then double conv""" _MODES = [None, 'linear', 'bilinear', 'trilinear'] def __init__(self, d: int, in_channels: int, out_channels: int, batch_norm: bool, activation: type, periodic: bool, use_res_blocks: bool, kernel_size: int): super().__init__() up = nn.Upsample(scale_factor=2, mode=Up._MODES[d]) if use_res_blocks: conv = ResNetBlock(d, in_channels, out_channels, batch_norm, activation, periodic, kernel_size) else: conv = DoubleConv(d, in_channels, out_channels, in_channels // 2, batch_norm, activation, periodic, kernel_size) self.add_module('up', up) self.add_module('conv', conv) def forward(self, x1, x2): x1 = self.up(x1) # input is CHW # diff = [x2.size()[i] - x1.size()[i] for i in range(2, len(x1.shape))] # x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2, # diffY // 2, diffY - diffY // 2]) # if you have padding issues, see # https://github.com/HaiyongJiang/U-Net-Pytorch-Unstructured-Buggy/commit/0e854509c2cea854e247a9c615f175f76fbb2e3a # https://github.com/xiaopeng-liao/Pytorch-UNet/commit/8ebac70e633bac59fc22bb5195e513d5832fb3bd x = torch.cat([x2, x1], dim=1) return self.conv(x)
Ancestors
- torch.nn.modules.module.Module
Class variables
var call_super_init : bool
var dump_patches : bool
var training : bool
Methods
def forward(self, x1, x2) ‑> Callable[..., Any]
-
Defines the computation performed at every call.
Should be overridden by all subclasses.
Note
Although the recipe for forward pass needs to be defined within this function, one should call the :class:
Module
instance afterwards instead of this since the former takes care of running the registered hooks while the latter silently ignores them.