Commit 6a9d31a3 authored by Lukas Wolf's avatar Lukas Wolf
Browse files

created padding modules

parent 2b566eea
import torch
import torch.nn as nn import torch.nn as nn
import math
class conv_block(nn.Module): class Pad_Pool(nn.Module):
""" """
Basic convolutional module for use in our modules Implements a padding layer in front of pool1d layers used in our architectures to achieve padding=same output shape
Pads 0 to the left and 1 to the right side of x
""" """
def __init__(self, in_channels, out_channels, **kwargs): def __init__(self, left=0, right=1, value=0):
super(conv_block, self).__init__() super().__init__()
self.relu = nn.ReLU() self.left = left
self.conv = nn.Conv1d(in_channels, out_channels, **kwargs) self.rigth = right
self.batchnorm = nn.BatchNorm1d(out_channels) self.value = value
self.maxpool = nn.MaxPool1d(**kwargs)
def forward(self, x):
return self.maxpool(self.relu(self.batchnorm(self.conv(x))))
class Inception_block(nn.Module):
Implements one Inception module that can be stacked into a model
def __init__(
self, in_channels, out_1x1, red_3x3, out_3x3, red_5x5, out_5x5, out_1x1pool
super(Inception_block, self).__init__()
self.branch1 = conv_block(in_channels, out_1x1, kernel_size=(1, 1))
self.branch2 = nn.Sequential(
conv_block(in_channels, red_3x3, kernel_size=(1, 1)),
conv_block(red_3x3, out_3x3, kernel_size=(3, 3), padding=(1, 1)),
self.branch3 = nn.Sequential(
conv_block(in_channels, red_5x5, kernel_size=(1, 1)),
conv_block(red_5x5, out_5x5, kernel_size=(5, 5), padding=(2, 2)),
self.branch4 = nn.Sequential(
nn.MaxPool2d(kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
conv_block(in_channels, out_1x1pool, kernel_size=(1, 1)),
def forward(self, x): def forward(self, x):
return pad = nn.ConstantPad1d(padding=(self.left, self.right), value=self.value)
[self.branch1(x), self.branch2(x), self.branch3(x), self.branch4(x)], 1 return pad(x)
class Pad_Conv(nn.Module):
class Shortcut_layer(nn.Module):
""" """
Implements the functionality of a shortcut with a conv1d module in between Implements a padding layer in front of conv1d layers used in our architectures to achieve padding=same output shape
Pads 0 to the left and 1 to the right side of x
""" """
def __init__(self, in_channels, out_channels) -> None: def __init__(self, kernel_size, value=0):
super().__init__() super().__init__()
self.conv = nn.Conv1d(in_channels=in_channels, out_channels=out_channels, kernel_size=1) self.kernel_size = kernel_size
self.batch_norm = nn.BatchNorm1d(num_features=out_channels) self.value = value
self.relu = nn.ReLU() self.left = math.floor(kernel_size/2)-1
self.right = math.floor(kernel_size/2)
def forward(self, shortcut, concat_x): def forward(self, x):
""" pad = nn.ConstantPad1d(padding=(self.left, self.right), value=self.value)
Implements a shortcut layer with x being the shortcut with additional convolution return pad(x)
Then concatenated with y, being the input that goes through the normal layers \ No newline at end of file
shortcut = self.conv(shortcut)
shortcut = self.batch_norm(shortcut)
concat = torch.add(shortcut, concat_x)
out = self.relu(concat)
return out
\ No newline at end of file
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment