To receive notifications about scheduled maintenance, please subscribe to the mailing-list gitlab-operations@sympa.ethz.ch. You can subscribe to the mailing-list at https://sympa.ethz.ch

Commit 6545065f authored by Lukas Wolf's avatar Lukas Wolf
Browse files

torch inception change out channels of right path conv

parent aaecb2ec
......@@ -49,10 +49,13 @@ class Trainer:
"""
try:
if config['task'] == 'prosaccade-clf':
logging.info("Loading LR task data")
data = np.load('./data/prepared/LR_task_with_antisaccade_synchronised_' + config['preprocessing'] + '.npz')
elif config['task'] == 'gaze-reg':
logging.info("Loading coordinate task data")
data = np.load('./data/prepared/Position_task_with_dots_synchronised_' + config['preprocessing'] + '.npz')
elif config['task'] == 'angle-reg':
logging.info("Loading angle regression data")
if config['dataset'] == 'calibration_task':
data = np.load('./data/prepared/Direction_task_with_dots_synchronised_' + config['preprocessing'] + '.npz')
else:
......
......@@ -41,7 +41,7 @@
"regularization": 0
}
},
"eegnet": {
"pyramidal_cnn": {
"prosaccade-clf": {
"learning_rate": 1e-3,
"regularization": 0
......@@ -55,7 +55,7 @@
"regularization": 0
}
},
"pyramidal_cnn": {
"eegnet": {
"prosaccade-clf": {
"learning_rate": 1e-3,
"regularization": 0
......
......@@ -61,11 +61,13 @@ class ConvNet(ABC, BaseNet):
# Stack the modules
shortcut_cnt = 0
for d in range(self.depth):
#print(f"x after block {d} {x.size()}")
print(f"x after block {d} {x.size()}")
x = self.conv_blocks[d](x)
if self.use_residual and d % 3 == 2:
res = self.shortcuts[shortcut_cnt](input_res)
shortcut_cnt += 1
print(f"x before add {x.size()}")
print(f"res before add {res.size()}")
x = torch.add(x, res)
x = nn.functional.relu(x)
input_res = x
......@@ -73,11 +75,11 @@ class ConvNet(ABC, BaseNet):
x = self.gap_layer_pad(x)
x = self.gap_layer(x)
#print(f"x after gap {x.size()}")
print(f"x after gap {x.size()}")
x = x.view(self.batch_size, -1)
#print(f"x before output {x.size()}")
print(f"x before output {x.size()}")
output = self.output_layer(x) # Defined in BaseNet
return output
......
......@@ -32,7 +32,8 @@ class Inception(ConvNet):
In parallel it uses a simple convolution with kernel size 1 with max pooling for stability during training.
The outputs of each convolution are concatenated, followed by batch normalization and a ReLu activation.
"""
return Inception_module(mother=self, depth=depth)
return Inception_module(self.kernel_size, self.nb_features, self.nb_channels,
self.nb_filters, self.bottleneck_size, depth)
"""
Tensorflow code:
if int(input_tensor.shape[-1]) > 1:
......@@ -59,29 +60,28 @@ class Inception(ConvNet):
"""
class Inception_module(nn.Module):
def __init__(self, mother, depth):
def __init__(self, kernel_size, nb_features, nb_channels, nb_filters, bottleneck_size, depth):
super().__init__()
self.mother = mother
kernel_size_s = [mother.kernel_size // (2 ** i) for i in range(3)]
kernel_size_s = [kernel_size // (2 ** i) for i in range(3)]
# Define all the layers and modules we need in the forward pass: first the initial convolution and the parallel maxpooling
self.pad_conv_in = Pad_Conv(kernel_size=mother.kernel_size)
self.pad_conv_in = Pad_Conv(kernel_size=kernel_size)
# This is the bottleneck convolution
self.conv_in = nn.Conv1d(in_channels=mother.nb_channels if depth==0 else mother.nb_features,
out_channels=mother.bottleneck_size, kernel_size=mother.kernel_size, bias=False)
self.conv_in = nn.Conv1d(in_channels=nb_channels if depth==0 else nb_features,
out_channels=bottleneck_size, kernel_size=kernel_size, bias=False)
self.pad_pool_in = Pad_Pool(left=1, right=1)
self.maxpool_in = nn.MaxPool1d(kernel_size=3, stride=1)
# 3 parallel convolutions taking the bottleneck as input
self.conv1 = nn.Conv1d(in_channels=mother.bottleneck_size, out_channels=mother.nb_filters, kernel_size=kernel_size_s[0], bias=False)
self.conv1 = nn.Conv1d(in_channels=bottleneck_size, out_channels=nb_filters, kernel_size=kernel_size_s[0], bias=False)
self.pad1 = Pad_Conv(kernel_size=kernel_size_s[0])
self.conv2 = nn.Conv1d(in_channels=mother.bottleneck_size, out_channels=mother.nb_filters, kernel_size=kernel_size_s[1], bias=False)
self.conv2 = nn.Conv1d(in_channels=bottleneck_size, out_channels=nb_filters, kernel_size=kernel_size_s[1], bias=False)
self.pad2 = Pad_Conv(kernel_size=kernel_size_s[1])
self.conv3 = nn.Conv1d(in_channels=mother.bottleneck_size, out_channels=mother.nb_filters, kernel_size=kernel_size_s[2], bias=False)
self.conv3 = nn.Conv1d(in_channels=bottleneck_size, out_channels=nb_filters, kernel_size=kernel_size_s[2], bias=False)
self.pad3 = Pad_Conv(kernel_size=kernel_size_s[2])
# and the 4th parallel convolution following the maxpooling, no padding needed since 1x1 convolution
self.conv4 = nn.Conv1d(in_channels=mother.nb_channels if depth==0 else mother.nb_features,
out_channels=mother.nb_channels if depth==0 else mother.nb_filters,
self.conv4 = nn.Conv1d(in_channels=nb_channels if depth==0 else nb_features,
out_channels=nb_filters,
kernel_size=1, bias=False)
self.batchnorm = nn.BatchNorm1d(num_features=mother.nb_features)
self.batchnorm = nn.BatchNorm1d(num_features=nb_features)
self.activation = nn.ReLU()
def forward(self, x):
......@@ -102,7 +102,7 @@ class Inception_module(nn.Module):
x_right = self.pad_pool_in(x)
x_right = self.maxpool_in(x_right)
x_right = self.conv4(x_right)
# Concatenate the 4 outputs
x = torch.cat(tensors=(x_left1, x_left2, x_left3, x_right), dim=2) # concatenate along the feature dimension
# Concatenate the 4 outputs
x = torch.cat(tensors=(x_left1, x_left2, x_left3, x_right), dim=1) # concatenate along the feature dimension
x = self.batchnorm(x)
return self.activation(x)
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment