Commit 4375c072 authored by Lukas Wolf's avatar Lukas Wolf
Browse files

saccade only task

parent 14344e29
......@@ -11,7 +11,9 @@ class Classifier_CNN(ConvNet):
The Classifier_CNN is one of the simplest classifiers. It implements the class ConvNet, which is made of modules with a specific depth.
"""
def __init__(self, input_shape, kernel_size=64, epochs = 50, nb_filters=16, verbose=True, batch_size=64, use_residual=True, depth=12):
def __init__(self, input_shape, kernel_size=64, epochs = 50, nb_filters=16, verbose=True, batch_size=64, use_residual=True, depth=12, regularization=0.01):
self.regularization = regularization
super(Classifier_CNN, self).__init__(input_shape, kernel_size=kernel_size, epochs=epochs, nb_filters=nb_filters,
verbose=verbose, batch_size=batch_size, use_residual=use_residual,
......
......@@ -16,7 +16,7 @@ class Regression_DEEPEYE(Regression_ConvNet):
def __init__(self, input_shape, kernel_size=40, nb_filters=32, verbose=True, batch_size=64, use_residual=True,
depth=6, bottleneck_size=32, preprocessing=True, preprocessing_F1 = 8, preprocessing_D = 2,
preprocessing_kernLength = 250, use_simple_convolution=True, use_separable_convolution=True,
epochs=1, learning_rate=0.01):
epochs=1, learning_rate=0.01, regularization=0.01):
"""
The DeepEye architecture has the following basic structures. It offers the possibility to do a preprocessing inspired by EEGNet.
It is made of modules of specific depth. Each module is made the inceptionTime submodule, a separable convolution and a simple
......@@ -28,6 +28,8 @@ class Regression_DEEPEYE(Regression_ConvNet):
self.bottleneck_size = bottleneck_size
self.use_simple_convolution = use_simple_convolution
self.use_separable_convolution = use_separable_convolution
self.regularization = regularization
if preprocessing: input_shape = input_shape + (1,)
super(Regression_DEEPEYE, self).__init__(input_shape=input_shape, kernel_size=kernel_size, nb_filters=nb_filters,
......@@ -72,8 +74,8 @@ class Regression_DEEPEYE(Regression_ConvNet):
"""
if int(input_tensor.shape[-1]) > 1:
input_inception = tf.keras.layers.Conv1D(filters=self.bottleneck_size, kernel_size=1, padding='same', use_bias=False,
kernel_regularizer=tf.keras.regularizers.l1(0.01),
activity_regularizer=tf.keras.regularizers.l2(0.01))(input_tensor)
kernel_regularizer=tf.keras.regularizers.l1(self.regularization),
activity_regularizer=tf.keras.regularizers.l2(self.regularization))(input_tensor)
else:
input_inception = input_tensor
......@@ -83,26 +85,26 @@ class Regression_DEEPEYE(Regression_ConvNet):
for i in range(len(kernel_size_s)):
conv_list.append(
tf.keras.layers.Conv1D(filters=self.nb_filters, kernel_size=kernel_size_s[i], padding='same', use_bias=False,
kernel_regularizer=tf.keras.regularizers.l1(0.01),
activity_regularizer=tf.keras.regularizers.l2(0.01))(input_inception))
kernel_regularizer=tf.keras.regularizers.l1(self.regularization),
activity_regularizer=tf.keras.regularizers.l2(self.regularization))(input_inception))
max_pool_1 = tf.keras.layers.MaxPool1D(pool_size=10, strides=1, padding='same')(input_tensor)
conv_6 = tf.keras.layers.Conv1D(filters=self.nb_filters, kernel_size=1, padding='same', use_bias=False,
kernel_regularizer=tf.keras.regularizers.l1(0.01),
activity_regularizer=tf.keras.regularizers.l2(0.01))(max_pool_1)
kernel_regularizer=tf.keras.regularizers.l1(self.regularization),
activity_regularizer=tf.keras.regularizers.l2(self.regularization))(max_pool_1)
conv_list.append(conv_6)
if self.use_simple_convolution:
max_pool_2 = tf.keras.layers.MaxPool1D(pool_size=10, strides=1, padding='same')(input_tensor)
conv_7 = tf.keras.layers.Conv1D(filters=self.nb_filters / 8, kernel_size=16, padding='same', use_bias=False,
kernel_regularizer=tf.keras.regularizers.l1(0.01),
activity_regularizer=tf.keras.regularizers.l2(0.01))(max_pool_2)
kernel_regularizer=tf.keras.regularizers.l1(self.regularization),
activity_regularizer=tf.keras.regularizers.l2(self.regularization))(max_pool_2)
conv_list.append(conv_7)
if self.use_separable_convolution:
conv_8 = tf.keras.layers.SeparableConv1D(filters=self.nb_filters, kernel_size=32, padding='same', use_bias=False, depth_multiplier=1,
kernel_regularizer=tf.keras.regularizers.l1(0.01),
activity_regularizer=tf.keras.regularizers.l2(0.01))(input_tensor)
kernel_regularizer=tf.keras.regularizers.l1(self.regularization),
activity_regularizer=tf.keras.regularizers.l2(self.regularization))(input_tensor)
conv_list.append(conv_8)
x = tf.keras.layers.Concatenate(axis=2)(conv_list)
......
......@@ -14,7 +14,7 @@ class Regression_INCEPTION(Regression_ConvNet):
"""
def __init__(self, input_shape, kernel_size=64, epochs = 50, nb_filters=16, verbose=True, batch_size=64, use_residual=True, depth=12, bottleneck_size=16,
learning_rate=0.01, regularization=0.03):
learning_rate=0.01, regularization=0.01):
self.bottleneck_size = bottleneck_size
self.regularization = regularization
......
......@@ -11,7 +11,9 @@ class Regression_PyramidalCNN(Regression_ConvNet):
The Regression_PyramidalCNN is one of the simplest regressors. It implements the class Regression_ConvNet, which is made of modules with a
specific depth, where for each depth the number of filters is increased.
"""
def __init__(self, input_shape, kernel_size=16, epochs = 50, nb_filters=16, verbose=True, batch_size=64, use_residual=False, depth=6, learning_rate=0.01):
def __init__(self, input_shape, kernel_size=16, epochs = 50, nb_filters=16, verbose=True, batch_size=64, use_residual=False, depth=6, learning_rate=0.01, regularization=0.01):
self.regularization = regularization
super(Regression_PyramidalCNN, self).__init__(input_shape, kernel_size=kernel_size, epochs=epochs, nb_filters=nb_filters,
verbose=verbose, batch_size=batch_size, use_residual=use_residual, depth=depth,
......@@ -22,8 +24,8 @@ class Regression_PyramidalCNN(Regression_ConvNet):
The module of CNN is made of a simple convolution with batch normalization and ReLu activation. Finally, MaxPooling is also used.
"""
x = tf.keras.layers.Conv1D(filters=self.nb_filters*(current_depth + 1), kernel_size=self.kernel_size, padding='same', use_bias=False,
kernel_regularizer=tf.keras.regularizers.l1(0.01),
activity_regularizer=tf.keras.regularizers.l2(0.01))(input_tensor)
kernel_regularizer=tf.keras.regularizers.l1(self.regularization),
activity_regularizer=tf.keras.regularizers.l2(self.regularization))(input_tensor)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Activation(activation='relu')(x)
x = tf.keras.layers.MaxPool1D(pool_size=2, strides=2)(x)
......
......@@ -12,7 +12,10 @@ class Regression_XCEPTION(Regression_ConvNet):
Each module, in our implementation, consists of a separable convolution followed by batch normalization and a ReLu activation layer.
"""
def __init__(self, input_shape, kernel_size=40, nb_filters=128, verbose=True, epochs=1,
batch_size=64, use_residual=True, depth=6, learning_rate=0.01):
batch_size=64, use_residual=True, depth=6, learning_rate=0.01, regularization=0.01):
self.regularization = regularization
super(Regression_XCEPTION, self).__init__( input_shape,
kernel_size=kernel_size,
nb_filters=nb_filters,
......@@ -29,8 +32,8 @@ class Regression_XCEPTION(Regression_ConvNet):
The module of Xception. Consists of a separable convolution followed by batch normalization and a ReLu activation function.
"""
x = tf.keras.layers.SeparableConv1D(filters=self.nb_filters, kernel_size=self.kernel_size, padding='same', use_bias=False, depth_multiplier=1,
kernel_regularizer=tf.keras.regularizers.l1(0.01),
activity_regularizer=tf.keras.regularizers.l2(0.01))(input_tensor)
kernel_regularizer=tf.keras.regularizers.l1(self.regularization),
activity_regularizer=tf.keras.regularizers.l2(self.regularization))(input_tensor)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Activation(activation='relu')(x)
return x
......@@ -60,8 +60,8 @@ config['run'] = 'ensemble'
config['ensemble'] = 1 #number of models in the ensemble method
# Choosing model
config['model'] = 'cnn'
#config['model'] = 'inception'
#config['model'] = 'cnn'
config['model'] = 'inception'
#config['model'] = 'eegnet'
#config['model'] = 'deepeye'
#config['model'] = 'xception'
......@@ -108,17 +108,26 @@ config['eegnet'] = {}
config['deepeye-rnn'] = {}
if config['gaze-reg']:
# Compute the maximum fixation duration, which is used for preprocessing, e.g. zero-padding
#TODO: automatically set the input shapes depending on the dataset to run, i.e. fix only, sacc only, etc.
#config['cnn']['input_shape'] = (int(config['max_duration']), 129) # e.g. for max_duration 300 we have shape (150,129)
config['cnn']['input_shape'] = (100, 129)
config['pyramidal_cnn']['input_shape'] = (int(config['max_duration']), 129)
config['inception']['input_shape'] = (int(config['max_duration']), 129)
config['deepeye']['input_shape'] = (int(config['max_duration']), 129)
config['deepeye-rnn']['input_shape'] = (int(config['max_duration']), 129)
config['xception']['input_shape'] = (int(config['max_duration']), 129)
#config['pyramidal_cnn']['input_shape'] = (int(config['max_duration']), 129)
config['pyramidal_cnn']['input_shape'] = (100, 129)
#config['inception']['input_shape'] = (int(config['max_duration']), 129)
config['inception']['input_shape'] = (100, 129)
#config['deepeye']['input_shape'] = (int(config['max_duration']), 129)
config['deepeye']['input_shape'] = (100, 129)
#config['xception']['input_shape'] = (int(config['max_duration']), 129)
config['xception']['input_shape'] = (100, 129)
#TODO: EEGnet not yet implemented for regression
#config['deepeye-rnn']['input_shape'] = (int(config['max_duration']), 129)
#config['eegnet']['channels'] = 129
#config['eegnet']['samples'] = config['max_duration'] = 150
......@@ -139,6 +148,7 @@ model_folder_name = timestamp if config['model'] == '' else timestamp + "_" + co
# Modify the model folder name depending on which task tuns
if config['gaze-reg']:
model_folder_name += '_gaze-reg'
model_folder_name += '_' + config['data_mode']
else:
model_folder_name += '_left-right-pred'
......
......@@ -36,30 +36,30 @@ def run(trainX, trainY):
print('Beginning model number {}/{} ...'.format(i+1, config['ensemble']))
if config['model'] == 'cnn':
reg = Regression_CNN(input_shape=config['cnn']['input_shape'], kernel_size=64, epochs = 10,
reg = Regression_CNN(input_shape=config['cnn']['input_shape'], kernel_size=64, epochs = 50,
nb_filters=16, verbose=True, batch_size=64, use_residual=True, depth=12,
learning_rate=0.001)#, regularization=0.03)
learning_rate=0.001, regularization=0.02)
elif config['model'] == 'inception':
reg = Regression_INCEPTION(input_shape=config['inception']['input_shape'], use_residual=True,
kernel_size=64, nb_filters=32, depth=16, bottleneck_size=32, epochs=50,
learning_rate=0.001)#, regularization=0.03)
learning_rate=0.001, regularization=0.02)
elif config['model'] == 'xception':
reg = Regression_XCEPTION(input_shape=config['inception']['input_shape'], use_residual=True,
kernel_size=64, nb_filters=32, depth=24, epochs=50,
learning_rate=0.001)#, regularization=0.03)
learning_rate=0.001, regularization=0.02)
elif config['model'] == 'deepeye':
reg = Regression_DEEPEYE(input_shape=config['deepeye']['input_shape'], use_residual=True,
kernel_size=64, nb_filters=32, depth=10, epochs=50, preprocessing=False,
use_separable_convolution=True, use_simple_convolution=True,
bottleneck_size=32,
learning_rate=0.001)#, regularization=0.03)
learning_rate=0.001, regularization=0.02)
elif config['model'] == 'pyramidal_cnn':
reg = Regression_PyramidalCNN(input_shape=config['cnn']['input_shape'], epochs=50,
learning_rate=0.001)#, regularization=0.03)
learning_rate=0.001, regularization=0.02)
else:
logging.info('Cannot start the program. Please choose one model in the config.py file')
......
......@@ -3,8 +3,8 @@
#SBATCH --mail-type=ALL # mail configuration: NONE, BEGIN, END, FAIL, REQUEUE, ALL
#SBATCH --output=log/%j.out # where to store the output (%j is the JOBID), subdirectory must exist
#SBATCH --error=log/%j.err # where to store error messages
#SBATCH --gres=gpu:2
#SBATCH --mem=60G
#SBATCH --gres=gpu:1
#SBATCH --mem=40G
echo "Running on host: $(hostname)"
echo "In directory: $(pwd)"
......
......@@ -152,7 +152,7 @@ def get_sacc_data(verbose=True):
x_len, y_len = x_datapoint.shape
# Pad the saccade only data, currently pad all to length 100
if x_len < 20 or x_len > 100:
if x_len < 10 or x_len > 100:
continue
padding_size = 100 - x_len
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment