Commit 9140093a authored by Lukas Wolf's avatar Lukas Wolf
Browse files

new loss for fix_sacc_fix dataset

parent 1d88c038
......@@ -39,10 +39,9 @@ class Regression_INCEPTION(Regression_ConvNet):
else:
input_inception = input_tensor
# kernel_size_s = [3, 5, 8, 11, 17]
kernel_size_s = [self.kernel_size // (2 ** i) for i in range(3)]
kernel_size_s = [self.kernel_size // (2 ** i) for i in range(3)] # for kernelsize=40 we have [40,20,10]
conv_list = []
# create the filters in parallel
for i in range(len(kernel_size_s)):
conv_list.append(
tf.keras.layers.Conv1D(filters=self.nb_filters, kernel_size=kernel_size_s[i], padding='same', use_bias=False,
......@@ -50,6 +49,7 @@ class Regression_INCEPTION(Regression_ConvNet):
activity_regularizer=tf.keras.regularizers.l2(self.regularization))(input_inception))
max_pool_1 = tf.keras.layers.MaxPool1D(pool_size=3, strides=1, padding='same')(input_tensor)
conv_6 = tf.keras.layers.Conv1D(filters=self.nb_filters, kernel_size=1, padding='same', use_bias=False,
kernel_regularizer=tf.keras.regularizers.l1(self.regularization),
activity_regularizer=tf.keras.regularizers.l2(self.regularization))(max_pool_1)
......
......@@ -48,7 +48,7 @@ class Regression_ConvNet(ABC):
else:
self.model = self._build_model()
self.model.compile(loss='mean_squared_error', optimizer=keras.optimizers.Adam(learning_rate=learning_rate))
self.model.compile(loss=config['loss'], optimizer=keras.optimizers.Adam(learning_rate=learning_rate))
if self.verbose:
self.model.summary()
......@@ -105,11 +105,15 @@ class Regression_ConvNet(ABC):
if self.use_residual and d % 3 == 2:
x = self._shortcut_layer(input_res, x)
input_res = x
gap_layer = tf.keras.layers.GlobalAveragePooling1D()(x)
if config['split']:
return gap_layer
# Optional: add some more dense layers here
gap_layer = tf.keras.layers.Dense(300)(gap_layer)
gap_layer = tf.keras.layers.Dense(50)(gap_layer)
if config['data_mode'] == "fix_sacc_fix":
output_layer = tf.keras.layers.Dense(1, activation='linear')(gap_layer) # only predict the angle in this task
else:
......
......@@ -65,8 +65,30 @@ class Siamese_ConvNet(ABC):
x = keras.layers.Add()([shortcut_y, out_tensor])
x = keras.layers.Activation('relu')(x)
return x
def _cnn_module(self, input_tensor, current_depth):
"""
The module of CNN is made of a simple convolution with batch normalization and ReLu activation. Finally, MaxPooling is also used.
"""
x = tf.keras.layers.Conv1D(filters=self.nb_filters, kernel_size=self.kernel_size, padding='same', use_bias=False)(input_tensor)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Activation(activation='relu')(x)
x = tf.keras.layers.MaxPool1D(pool_size=2, strides=1, padding='same')(x)
return x
def _xception_module(self, input_tensor, current_depth):
"""
The module of Xception. Consists of a separable convolution followed by batch normalization and a ReLu activation function.
"""
x = tf.keras.layers.SeparableConv1D(filters=self.nb_filters, kernel_size=self.kernel_size, padding='same', use_bias=False, depth_multiplier=1,
kernel_regularizer=tf.keras.regularizers.l1(self.regularization),
activity_regularizer=tf.keras.regularizers.l2(self.regularization))(input_tensor)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Activation(activation='relu')(x)
return x
def _module(self, input_tensor, current_depth):
def _inception_module(self, input_tensor, current_depth):
"""
The module of InceptionTime (Taken from the implementation of InceptionTime paper).
It is made of a bottleneck convolution that reduces the number of channels from 128 -> 32.
......@@ -107,12 +129,15 @@ class Siamese_ConvNet(ABC):
saccade_input_layer = tf.keras.layers.Input(shape=(config['max_saccade'], 129))
fixation_input_layer = tf.keras.layers.Input(shape=(config['max_fixation'], 129))
SACCADE_DEPTH = 5
FIXATION_DEPTH = self.depth
# Build the saccade network
x_sac = saccade_input_layer
input_res_sac = saccade_input_layer
for d in range(self.depth):
x_sac = self._module(x_sac, d)
for d in range(SACCADE_DEPTH):
x_sac = self._cnn_module(x_sac, d)
if self.use_residual and d % 3 == 2:
x_sac = self._shortcut_layer(input_res_sac, x_sac)
input_res_sac = x_sac
......@@ -123,8 +148,8 @@ class Siamese_ConvNet(ABC):
x_fix = fixation_input_layer
input_res_fix = fixation_input_layer
for d in range(self.depth):
x_fix = self._module(x_fix, d)
for d in range(FIXATION_DEPTH):
x_fix = self._inception_module(x_fix, d)
if self.use_residual and d % 3 == 2:
x_fix = self._shortcut_layer(input_res_fix, x_fix)
input_res_fix = x_fix
......
......@@ -24,10 +24,10 @@ config['root_dir'] = '.'
# You can modify the rest or add new fields as you need.
# Hyper-parameters and training configuration.
config['learning_rate'] = 1e-6
config['regularization'] = 0.1
config['epochs'] = 150
config['batch_size'] = 512
config['learning_rate'] = 1e-3 # fix only: 1e-2, sac only: 1e-3, sac_fix: 1e-3 , fix_sac_fix: 1e-5
config['regularization'] = 1e-2 # fix only: 0, sac only: 1e-2, sac_fix: 1e-1, fix_sac_fix: 0.5
config['epochs'] = 250
config['batch_size'] = 64
"""
Parameters that can be chosen:
......@@ -53,10 +53,17 @@ Cluster can be set to clustering(), clustering2() or clustering3(), where differ
config['gaze-reg'] = True # Set to False if you want to run the saccade classification task
config['data-fraction'] = 1.0 # Set to 1.0 if you want to use the whole dataset, experimental feature only for regression task \
#config['data_mode'] = 'fix_only'
#config['data_mode'] = 'fix_only'
#config['data_mode'] = 'sacc_only'
#config['data_mode'] = 'sacc_fix'
config['data_mode'] = 'fix_sacc_fix'
config['data_mode'] = 'sacc_fix'
#config['data_mode'] = 'fix_sacc_fix'
# Set loss automatically depending on the dataset/task to run
if config['data_mode'] == 'fix_sacc_fix':
from utils.losses import angle_loss
config['loss'] = angle_loss
else:
config['loss'] = 'mean_squared_error'
# Choose to either run the kerastuner on the model or
#config['run'] = 'kerastuner'
......@@ -78,13 +85,12 @@ config['split'] = False
#config['cluster'] = clustering()
if config['gaze-reg']:
config['trainX_file'] = 'EEGdata-002.mat'
config['trainY_file'] = 'label.mat'
config['trainX_variable'] = 'EEGdata'
config['trainY_variable'] = 'label'
config['padding'] = 'zero' # options: zero, repeat #TODO: find more options for clever padding
config['padding'] = 'repeat' # options: zero, repeat #TODO: find more options for clever padding
config['min_fixation'] = 50 # choose a minimum length for the gaze fixation
config['max_fixation'] = 150 # choose a maximum length for the gaze fixation
......@@ -96,6 +102,7 @@ if config['gaze-reg']:
config['y_screen'] = 800 #TODO: Kick out measurements where people look somewhere off the screen
else:
# Left right classification task
config['trainX_file'] = 'noweEEG.mat' if config['downsampled'] else 'all_EEGprocuesan.mat'
config['trainY_file'] = 'all_trialinfoprosan.mat'
config['trainX_variable'] = 'noweEEG' if config['downsampled'] else 'all_EEGprocuesan'
......@@ -153,6 +160,7 @@ if config['gaze-reg']:
#config['eegnet']['samples'] = config['max_duration'] = 150
else:
# Left-right classification task
config['cnn']['input_shape'] = (125, 129) if config['downsampled'] else (500, 129)
config['pyramidal_cnn']['input_shape'] = (125, 129) if config['downsampled'] else (500, 129)
config['inception']['input_shape'] = (125, 129) if config['downsampled'] else (500, 129)
......@@ -185,33 +193,4 @@ if not os.path.exists(config['model_dir']):
os.makedirs(config['model_dir'])
config['info_log'] = config['model_dir'] + '/' + 'info.log'
config['batches_log'] = config['model_dir'] + '/' + 'batches.log'
# Before we were loading directly the data from the server, or used the code to merge them.
# Deprecated now. We don't really use the functions anymore in the IOHelper that make use of the following configurations.
"""
config['cnn']['trainX_variable1'] = "EEGprocue"
config['cnn']['trainX_variable2'] = "data"
config['cnn']['trainX_filename'] = "EEGprocue"
config['cnn']['trainY_variable1'] = "trialinfopro"
config['cnn']['trainY_variable2'] = "cues"
config['cnn']['trainY_filename'] = "trialinfocuelocked"
config['inception']['trainX_variable1'] = "EEGprocue"
config['inception']['trainX_variable2'] = "data"
config['inception']['trainX_filename'] = "EEGprocue"
config['inception']['trainY_variable1'] = "trialinfopro"
config['inception']['trainY_variable2'] = "cues"
config['inception']['trainY_filename'] = "trialinfocuelocked"
config['deepeye']['trainX_variable1'] = "EEGprocue"
config['deepeye']['trainX_variable2'] = "data"
config['deepeye']['trainX_filename'] = "EEGprocue"
config['deepeye']['trainY_variable1'] = "trialinfopro"
config['deepeye']['trainY_variable2'] = "cues"
config['deepeye']['trainY_filename'] = "trialinfocuelocked"
"""
\ No newline at end of file
config['batches_log'] = config['model_dir'] + '/' + 'batches.log'
\ No newline at end of file
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -38,32 +38,32 @@ def run(trainX, trainY):
if config['model'] == 'cnn':
reg = Regression_CNN(input_shape=config['cnn']['input_shape'], kernel_size=64, epochs = config['epochs'],
nb_filters=16, verbose=True, batch_size=64, use_residual=True, depth=12,
nb_filters=16, verbose=True, batch_size=config['batch_size'], use_residual=True, depth=12,
learning_rate=config['learning_rate'])
elif config['model'] == 'inception':
reg = Regression_INCEPTION(input_shape=config['inception']['input_shape'], use_residual=True,
kernel_size=64, nb_filters=64, depth=12, bottleneck_size=16, epochs=config['epochs'],
reg = Regression_INCEPTION(input_shape=config['inception']['input_shape'], use_residual=True, batch_size=config['batch_size'],
kernel_size=64, nb_filters=32, depth=6, bottleneck_size=32, epochs=config['epochs'],
learning_rate=config['learning_rate'], regularization=config['regularization'])
elif config['model'] == 'xception':
reg = Regression_XCEPTION(input_shape=config['inception']['input_shape'], use_residual=True,
kernel_size=40, nb_filters=64, depth=18, epochs=config['epochs'],
kernel_size=40, nb_filters=64, depth=18, epochs=config['epochs'], batch_size=config['batch_size'],
learning_rate=config['learning_rate'], regularization=config['regularization'])
elif config['model'] == 'deepeye':
reg = Regression_DEEPEYE(input_shape=config['deepeye']['input_shape'], use_residual=True,
reg = Regression_DEEPEYE(input_shape=config['deepeye']['input_shape'], use_residual=True, batch_size=config['batch_size'],
kernel_size=64, nb_filters=32, depth=10, epochs=config['epochs'], preprocessing=False,
use_separable_convolution=True, use_simple_convolution=True, bottleneck_size=16,
learning_rate=config['learning_rate'], regularization=config['regularization'])
elif config['model'] == 'pyramidal_cnn':
reg = Regression_PyramidalCNN(input_shape=config['cnn']['input_shape'], epochs=config['epochs'],
reg = Regression_PyramidalCNN(input_shape=config['cnn']['input_shape'], epochs=config['epochs'], batch_size=config['batch_size'],
learning_rate=config['learning_rate'], regularization=config['regularization'])
elif config['model'] == 'siamese':
reg = Siamese_ConvNet(input_shape=None, use_residual=True,
kernel_size=40, nb_filters=32, depth=14, epochs=config['epochs'], # nb_filters=64 was default from inception
reg = Siamese_ConvNet(input_shape=None, use_residual=True, batch_size=config['batch_size'],
kernel_size=40, nb_filters=64, depth=12, epochs=config['epochs'], # nb_filters=64 was default from inception
learning_rate=config['learning_rate'], regularization=config['regularization'])
else:
......
......@@ -32,12 +32,12 @@ def main():
logging.info("Running the saccade classification task")
# Log some parameters to better distinguish between tasks
logging.info("Loss: {}".format(config['loss']))
logging.info("Learning rate: {}".format(config['learning_rate']))
logging.info("Regularization: {}".format(config['regularization']))
logging.info("Batch size: {}".format(config['batch_size']))
logging.info("Maximal number of epochs: {}".format(config['epochs']))
if config['run'] == "kerastuner":
logging.info("Running the keras-tuner")
else:
......
......@@ -4,7 +4,7 @@
#SBATCH --output=log/%j.out # where to store the output (%j is the JOBID), subdirectory must exist
#SBATCH --error=log/%j.err # where to store error messages
#SBATCH --gres=gpu:1
#SBATCH --mem=40G
#SBATCH --mem=60G
echo "Running on host: $(hostname)"
echo "In directory: $(pwd)"
......
"""
Definition of custum loss functions that can be used in our models
"""
import tensorflow as tf
def angle_loss(a, b):
"""
Custom loss function for models that predict the angle on the fix-sacc-fix dataset
Angles -pi and pi should lead to 0 loss, since this is actually the same angle on the unit circle
Angles pi/2 and -pi/2 should lead to a large loss, since this is a difference by pi on the unit circle
Therefore we compute the absolute error of the "shorter" direction on the unit circle
"""
return tf.reduce_mean(tf.abs(tf.atan2(tf.sin(a - b), tf.cos(a - b))))
\ No newline at end of file
......@@ -118,7 +118,7 @@ def get_fix_data(verbose=True):
logging.info("X training loaded.")
logging.info(X.shape)
np.savetxt("fix_only_y", y, delimiter=',')
#np.savetxt("fix_only_y", y, delimiter=',')
return X, y
......@@ -192,7 +192,7 @@ def get_sacc_data(verbose=True):
logging.info("X training loaded.")
logging.info(X.shape)
np.savetxt("sacc_only_y", y, delimiter=',')
#np.savetxt("sacc_only_y", y, delimiter=',')
return X, y
......@@ -303,7 +303,7 @@ def get_sacc_fix_data(verbose=True):
logging.info("X training loaded.")
logging.info(X.shape)
np.savetxt("sacc_fix_y", y, delimiter=',')
#np.savetxt("sacc_fix_y", y, delimiter=',')
return X, y
......@@ -433,8 +433,6 @@ def get_fix_sacc_fix_data(verbose=True):
#y_datapoint = np.array([rho, phi])
#y_datapoint = np.array([dx, dy])
if phi < 0: # Only positive gradients, therefore add 2*Pi if necessary
phi = phi + 2 * np.pi
y_datapoint = np.array([phi])
# Append to X and y
......@@ -455,7 +453,7 @@ def get_fix_sacc_fix_data(verbose=True):
logging.info("X training loaded.")
logging.info(X.shape)
np.savetxt("fix_sacc_fix_y", y, delimiter=',')
#np.savetxt("fix_sacc_fix_y", y, delimiter=',')
return X, y
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment