Commit 6a9d31a3 authored by Lukas Wolf's avatar Lukas Wolf
Browse files

created padding modules

parent 2b566eea
import torch
import torch.nn as nn
import math
class conv_block(nn.Module):
class Pad_Pool(nn.Module):
"""
Basic convolutional module for use in our modules
Implements a padding layer in front of pool1d layers used in our architectures to achieve padding=same output shape
Pads 0 to the left and 1 to the right side of x
"""
def __init__(self, in_channels, out_channels, **kwargs):
super(conv_block, self).__init__()
self.relu = nn.ReLU()
self.conv = nn.Conv1d(in_channels, out_channels, **kwargs)
self.batchnorm = nn.BatchNorm1d(out_channels)
self.maxpool = nn.MaxPool1d(**kwargs)
def forward(self, x):
return self.maxpool(self.relu(self.batchnorm(self.conv(x))))
class Inception_block(nn.Module):
"""
Implements one Inception module that can be stacked into a model
"""
def __init__(
self, in_channels, out_1x1, red_3x3, out_3x3, red_5x5, out_5x5, out_1x1pool
):
super(Inception_block, self).__init__()
self.branch1 = conv_block(in_channels, out_1x1, kernel_size=(1, 1))
self.branch2 = nn.Sequential(
conv_block(in_channels, red_3x3, kernel_size=(1, 1)),
conv_block(red_3x3, out_3x3, kernel_size=(3, 3), padding=(1, 1)),
)
self.branch3 = nn.Sequential(
conv_block(in_channels, red_5x5, kernel_size=(1, 1)),
conv_block(red_5x5, out_5x5, kernel_size=(5, 5), padding=(2, 2)),
)
self.branch4 = nn.Sequential(
nn.MaxPool2d(kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
conv_block(in_channels, out_1x1pool, kernel_size=(1, 1)),
)
def __init__(self, left=0, right=1, value=0):
super().__init__()
self.left = left
self.rigth = right
self.value = value
def forward(self, x):
return torch.cat(
[self.branch1(x), self.branch2(x), self.branch3(x), self.branch4(x)], 1
)
pad = nn.ConstantPad1d(padding=(self.left, self.right), value=self.value)
return pad(x)
class Shortcut_layer(nn.Module):
class Pad_Conv(nn.Module):
"""
Implements the functionality of a shortcut with a conv1d module in between
Implements a padding layer in front of conv1d layers used in our architectures to achieve padding=same output shape
Pads 0 to the left and 1 to the right side of x
"""
def __init__(self, in_channels, out_channels) -> None:
def __init__(self, kernel_size, value=0):
super().__init__()
self.conv = nn.Conv1d(in_channels=in_channels, out_channels=out_channels, kernel_size=1)
self.batch_norm = nn.BatchNorm1d(num_features=out_channels)
self.relu = nn.ReLU()
self.kernel_size = kernel_size
self.value = value
self.left = math.floor(kernel_size/2)-1
self.right = math.floor(kernel_size/2)
def forward(self, shortcut, concat_x):
"""
Implements a shortcut layer with x being the shortcut with additional convolution
Then concatenated with y, being the input that goes through the normal layers
"""
shortcut = self.conv(shortcut)
shortcut = self.batch_norm(shortcut)
concat = torch.add(shortcut, concat_x)
out = self.relu(concat)
return out
\ No newline at end of file
def forward(self, x):
pad = nn.ConstantPad1d(padding=(self.left, self.right), value=self.value)
return pad(x)
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment