To receive notifications about scheduled maintenance, please subscribe to the mailing-list gitlab-operations@sympa.ethz.ch. You can subscribe to the mailing-list at https://sympa.ethz.ch

Commit 51193468 authored by Lukas Wolf's avatar Lukas Wolf
Browse files

removed bias in convolutions

parent 98b6e7a7
......@@ -33,14 +33,16 @@ class EEGNet(BaseNet):
self.conv1 = nn.Conv2d(
in_channels=1,
out_channels=self.F1,
kernel_size=(1, self.kernel_size)
kernel_size=(1, self.kernel_size),
bias=False
)
self.batchnorm1 = nn.BatchNorm2d(self.F1, False)
self.depthwise_conv1 = nn.Conv2d(
in_channels=self.F1,
out_channels=self.F1 * self.D,
groups=self.F1,
kernel_size=(self.channels, 1)
kernel_size=(self.channels, 1),
bias=False
)
self.batchnorm1_2 = nn.BatchNorm2d(self.F1 * self.D)
self.activation1 = nn.ELU()
......@@ -54,12 +56,14 @@ class EEGNet(BaseNet):
in_channels=self.F1 * self.D,
out_channels=self.F2,
groups=self.F1*self.D,
kernel_size=(1,64)
kernel_size=(1,64),
bias=False
)
self.pointwise_conv2 = nn.Conv2d( # no need for padding or pointwise
in_channels=self.F2,
out_channels=4,
kernel_size=1
kernel_size=1,
bias=False
)
self.batchnorm2 = nn.BatchNorm2d(4, False)
self.activation2 = nn.ELU()
......
......@@ -67,20 +67,32 @@ class Inception_module(nn.Module):
self.pad_conv_in = Pad_Conv(kernel_size=kernel_size)
# This is the bottleneck convolution
self.conv_in = nn.Conv1d(in_channels=nb_channels if depth==0 else nb_features,
out_channels=bottleneck_size, kernel_size=kernel_size, bias=False)
out_channels=bottleneck_size,
kernel_size=kernel_size,
bias=False)
self.pad_pool_in = Pad_Pool(left=1, right=1)
self.maxpool_in = nn.MaxPool1d(kernel_size=3, stride=1)
# 3 parallel convolutions taking the bottleneck as input
self.conv1 = nn.Conv1d(in_channels=bottleneck_size, out_channels=nb_filters, kernel_size=kernel_size_s[0], bias=False)
self.conv1 = nn.Conv1d(in_channels=bottleneck_size,
out_channels=nb_filters,
kernel_size=kernel_size_s[0],
bias=False)
self.pad1 = Pad_Conv(kernel_size=kernel_size_s[0])
self.conv2 = nn.Conv1d(in_channels=bottleneck_size, out_channels=nb_filters, kernel_size=kernel_size_s[1], bias=False)
self.conv2 = nn.Conv1d(in_channels=bottleneck_size,
out_channels=nb_filters,
kernel_size=kernel_size_s[1],
bias=False)
self.pad2 = Pad_Conv(kernel_size=kernel_size_s[1])
self.conv3 = nn.Conv1d(in_channels=bottleneck_size, out_channels=nb_filters, kernel_size=kernel_size_s[2], bias=False)
self.conv3 = nn.Conv1d(in_channels=bottleneck_size,
out_channels=nb_filters,
kernel_size=kernel_size_s[2],
bias=False)
self.pad3 = Pad_Conv(kernel_size=kernel_size_s[2])
# and the 4th parallel convolution following the maxpooling, no padding needed since 1x1 convolution
self.conv4 = nn.Conv1d(in_channels=nb_channels if depth==0 else nb_features,
out_channels=nb_filters,
kernel_size=1, bias=False)
kernel_size=1,
bias=False)
self.batchnorm = nn.BatchNorm1d(num_features=nb_features)
self.activation = nn.ReLU()
......
......@@ -71,16 +71,19 @@ class TCSConv1d(nn.Module):
"""
Implements a 1D separable convolution with constant tensor shape, similar to padding='same' in keras
"""
def __init__(self, mother, depth):
def __init__(self, mother, depth, bias=False):
super(TCSConv1d, self).__init__()
self.pad_depthwise = Pad_Conv(mother.kernel_size)
# groups=in_channels makes it separable
self.depthwise = nn.Conv1d(in_channels=mother.nb_channels if depth==0 else mother.nb_features,
out_channels=mother.nb_channels if depth==0 else mother.nb_features,
groups=mother.nb_channels if depth==0 else mother.nb_features,
kernel_size=mother.kernel_size, bias=False)
kernel_size=mother.kernel_size,
bias=bias)
self.pointwise = nn.Conv1d(in_channels=mother.nb_channels if depth==0 else mother.nb_features,
out_channels=mother.nb_features, kernel_size=1)
out_channels=mother.nb_features,
kernel_size=1,
bias=bias)
def forward(self, x):
x = self.pad_depthwise(x)
......@@ -95,10 +98,16 @@ class SeparableConv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, padding=0, bias=False):
super(SeparableConv2d, self).__init__()
self.depthwise = nn.Conv2d(in_channels, in_channels, kernel_size=kernel_size,
groups=in_channels, bias=bias, padding=padding)
self.pointwise = nn.Conv2d(in_channels, out_channels,
kernel_size=1, bias=bias)
self.depthwise = nn.Conv2d(in_channels,
in_channels,
kernel_size=kernel_size,
groups=in_channels,
bias=bias,
padding=padding)
self.pointwise = nn.Conv2d(in_channels,
out_channels,
kernel_size=1,
bias=bias)
def forward(self, x):
out = self.depthwise(x)
......
......@@ -24,7 +24,9 @@ class PyramidalCNN(ConvNet):
return nn.Sequential(
Pad_Conv(kernel_size=self.kernel_size),
nn.Conv1d(in_channels=129 if depth==0 else depth*self.nb_filters,
out_channels=(depth+1)*self.nb_filters, kernel_size=self.kernel_size, bias=False),
out_channels=(depth+1)*self.nb_filters,
kernel_size=self.kernel_size,
bias=False),
nn.BatchNorm1d(num_features=(depth+1)*self.nb_filters),
nn.ReLU(),
Pad_Pool(left=0, right=1),
......
......@@ -24,7 +24,7 @@ class XCEPTION(ConvNet):
The module of Xception. Consists of a separable convolution followed by batch normalization and a ReLu activation function.
"""
return nn.Sequential(
TCSConv1d(mother=self, depth=depth),
TCSConv1d(mother=self, depth=depth, bias=False),
nn.BatchNorm1d(num_features=self.nb_features),
nn.ReLU()
)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment