Commit dc8d4ef3 authored by Lukas Wolf's avatar Lukas Wolf
Browse files

added functionality for siamese model

parent 870fa62b
......@@ -12,4 +12,5 @@ notebook.ipynb
.ipynb_checkpoints/*
models_scratch.py
/log/*
/images/*
\ No newline at end of file
/images/*
/archive_runs/*
\ No newline at end of file
......@@ -22,7 +22,7 @@ class prediction_history(tf.keras.callbacks.Callback):
class Regression_ConvNet(ABC):
def __init__(self, input_shape, kernel_size=32, nb_filters=32, verbose=True, batch_size=64, use_residual=False, depth=6,
learning_rate=0.1, epochs=2, preprocessing = False):
learning_rate=0.001, epochs=2, preprocessing = False):
self.use_residual = use_residual
self.depth = depth
......@@ -48,7 +48,7 @@ class Regression_ConvNet(ABC):
else:
self.model = self._build_model()
self.model.compile(loss='mean_squared_error', optimizer=keras.optimizers.Adam(learning_rate=learning_rate), metrics=['mean_squared_error'])
self.model.compile(loss='mean_squared_error', optimizer=keras.optimizers.Adam(learning_rate=learning_rate))
if self.verbose:
self.model.summary()
......@@ -79,9 +79,8 @@ class Regression_ConvNet(ABC):
pass
def _shortcut_layer(self, input_tensor, out_tensor):
shortcut_y = tf.keras.layers.Conv1D(filters=int(out_tensor.shape[-1]), kernel_size=1, padding='same', use_bias=False,
kernel_regularizer=tf.keras.regularizers.l1(0.01),
activity_regularizer=tf.keras.regularizers.l2(0.01))(input_tensor)
shortcut_y = tf.keras.layers.Conv1D(filters=int(out_tensor.shape[-1]), kernel_size=1,
padding='same', use_bias=False)(input_tensor)
shortcut_y = tf.keras.layers.BatchNormalization()(shortcut_y)
x = keras.layers.Add()([shortcut_y, out_tensor])
x = keras.layers.Activation('relu')(x)
......@@ -111,7 +110,10 @@ class Regression_ConvNet(ABC):
if config['split']:
return gap_layer
output_layer = tf.keras.layers.Dense(2, activation='linear')(gap_layer) # linear activation for the 2D regression task
if config['data_mode'] == "fix_sacc_fix":
output_layer = tf.keras.layers.Dense(1, activation='linear')(gap_layer) # only predict the angle in this task
else:
output_layer = tf.keras.layers.Dense(2, activation='linear')(gap_layer) # linear activation for the 2D regression task
model = tf.keras.models.Model(inputs=input_layer, outputs=output_layer)
return model
......@@ -121,11 +123,11 @@ class Regression_ConvNet(ABC):
def fit(self, x, y, verbose=2):
csv_logger = CSVLogger(config['batches_log'], append=True, separator=';')
early_stop = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=20) #TODO: we monitored mse before, but i guess it makes more sense with validation loss
early_stop = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=20)
ckpt_dir = config['model_dir'] + '/' + config['model'] + '_' + 'best_model.h5'
ckpt = tf.keras.callbacks.ModelCheckpoint(ckpt_dir, verbose=1, monitor='val_loss', save_best_only=True, mode='auto') #TODO: same here
ckpt = tf.keras.callbacks.ModelCheckpoint(ckpt_dir, verbose=1, monitor='val_loss', save_best_only=True, mode='auto')
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=42)
prediction_ensemble = prediction_history((X_val,y_val))
hist = self.model.fit(X_train, y_train, verbose=verbose, batch_size=self.batch_size, validation_data=(X_val,y_val),
epochs=self.epochs, callbacks=[csv_logger, ckpt])
return hist , prediction_ensemble
return hist , prediction_ensemble
\ No newline at end of file
from abc import ABC, abstractmethod
from sklearn.model_selection import train_test_split
import tensorflow as tf
import tensorflow.keras as keras
from config import config
from tensorflow.keras.callbacks import CSVLogger
import logging
from InceptionTime.Regression_inception import Regression_INCEPTION
class prediction_history(tf.keras.callbacks.Callback):
def __init__(self, validation_data):
self.validation_data = validation_data
self.predhis = []
self.targets = validation_data[1]
def on_epoch_end(self, epoch, logs={}):
y_pred = self.model.predict(self.validation_data[0])
self.predhis.append(y_pred)
class Siamese_ConvNet(ABC):
def __init__(self, input_shape, kernel_size=32, nb_filters=32, verbose=True, batch_size=64, use_residual=False, depth=6,
learning_rate=0.001, epochs=2, preprocessing = False, regularization=0.001, bottleneck_size=16):
self.use_residual = use_residual
self.depth = depth
self.callbacks = None
self.bottleneck_size = bottleneck_size
self.batch_size = batch_size
self.verbose = verbose
self.kernel_size = kernel_size
self.nb_filters = nb_filters
self.preprocessing = preprocessing
self.input_shape = input_shape
self.learning_rate = learning_rate
self.regularization = regularization
self.epochs = epochs
# TODO: would be nice to pass the conv modules as parameter and then build the network with different modules without changing the _module function manually
# Problem: cannot access the _module method of other convnet classes without instantiating it.
logging.info('Parameters: ')
logging.info('--------------- use residual : ' + str(self.use_residual))
logging.info('--------------- depth : ' + str(self.depth))
logging.info('--------------- batch size : ' + str(self.batch_size))
logging.info('--------------- kernel size : ' + str(self.kernel_size))
logging.info('--------------- nb filters : ' + str(self.nb_filters))
logging.info('--------------- preprocessing: ' + str(self.preprocessing))
self.model = self._build_model()
self.model.compile(loss='mean_squared_error', optimizer=keras.optimizers.Adam(learning_rate=learning_rate))
if self.verbose:
self.model.summary()
def _shortcut_layer(self, input_tensor, out_tensor):
shortcut_y = tf.keras.layers.Conv1D(filters=int(out_tensor.shape[-1]), kernel_size=1, padding='same', use_bias=False,
kernel_regularizer=tf.keras.regularizers.l1(self.regularization),
activity_regularizer=tf.keras.regularizers.l2(self.regularization))(input_tensor)
shortcut_y = tf.keras.layers.BatchNormalization()(shortcut_y)
x = keras.layers.Add()([shortcut_y, out_tensor])
x = keras.layers.Activation('relu')(x)
return x
def _module(self, input_tensor, current_depth):
"""
The module of InceptionTime (Taken from the implementation of InceptionTime paper).
It is made of a bottleneck convolution that reduces the number of channels from 128 -> 32.
Then it uses 3 filters with different kernel sizes (Default values are 40, 20, and 10)
In parallel it uses a simple convolution with kernel size 1 with max pooling for stability during training.
The outputs of each convolution are concatenated, followed by batch normalization and a ReLu activation.
"""
if int(input_tensor.shape[-1]) > 1:
input_inception = tf.keras.layers.Conv1D(filters=self.bottleneck_size, kernel_size=1, padding='same', use_bias=False,
kernel_regularizer=tf.keras.regularizers.l1(self.regularization),
activity_regularizer=tf.keras.regularizers.l2(self.regularization))(input_tensor)
else:
input_inception = input_tensor
# kernel_size_s = [3, 5, 8, 11, 17]
kernel_size_s = [self.kernel_size // (2 ** i) for i in range(3)]
conv_list = []
for i in range(len(kernel_size_s)):
conv_list.append(
tf.keras.layers.Conv1D(filters=self.nb_filters, kernel_size=kernel_size_s[i], padding='same', use_bias=False,
kernel_regularizer=tf.keras.regularizers.l1(self.regularization),
activity_regularizer=tf.keras.regularizers.l2(self.regularization))(input_inception))
max_pool_1 = tf.keras.layers.MaxPool1D(pool_size=3, strides=1, padding='same')(input_tensor)
conv_6 = tf.keras.layers.Conv1D(filters=self.nb_filters, kernel_size=1, padding='same', use_bias=False,
kernel_regularizer=tf.keras.regularizers.l1(self.regularization),
activity_regularizer=tf.keras.regularizers.l2(self.regularization))(max_pool_1)
conv_list.append(conv_6)
x = tf.keras.layers.Concatenate(axis=2)(conv_list)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Activation(activation='relu')(x)
return x
def _build_model(self):
# Define the inputs
saccade_input_layer = tf.keras.layers.Input(shape=(config['max_saccade'], 129))
fixation_input_layer = tf.keras.layers.Input(shape=(config['max_fixation'], 129))
# Build the saccade network
x_sac = saccade_input_layer
input_res_sac = saccade_input_layer
for d in range(self.depth):
x_sac = self._module(x_sac, d)
if self.use_residual and d % 3 == 2:
x_sac = self._shortcut_layer(input_res_sac, x_sac)
input_res_sac = x_sac
saccade_gap_layer = tf.keras.layers.GlobalAveragePooling1D()(x_sac)
# Build the fixation network
x_fix = fixation_input_layer
input_res_fix = fixation_input_layer
for d in range(self.depth):
x_fix = self._module(x_fix, d)
if self.use_residual and d % 3 == 2:
x_fix = self._shortcut_layer(input_res_fix, x_fix)
input_res_fix = x_fix
fixation_gap_layer = tf.keras.layers.GlobalAveragePooling1D()(x_fix)
# Concatenate the networks and generate output
concat = tf.keras.layers.concatenate([saccade_gap_layer, fixation_gap_layer])
dense1 = tf.keras.layers.Dense(300)(concat)
dense2 = tf.keras.layers.Dense(100)(dense1)
output = tf.keras.layers.Dense(2, name="output")(dense2)
model = keras.Model(inputs=[saccade_input_layer, fixation_input_layer], outputs=[output])
return model
def get_model(self):
return self.model
def fit(self, x, y, verbose=2):
csv_logger = CSVLogger(config['batches_log'], append=True, separator=';')
early_stop = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=20)
ckpt_dir = config['model_dir'] + '/' + config['model'] + '_' + 'best_model.h5'
ckpt = tf.keras.callbacks.ModelCheckpoint(ckpt_dir, verbose=1, monitor='val_loss', save_best_only=True, mode='auto')
# Prepare the data as tuples for the siamese input
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=42)
X_train_sac, X_train_fix = X_train[:, :config['max_saccade'], :], X_train[:, config['max_saccade']:, :] # first elements are saccade signal, second fixation
X_val_sac, X_val_fix = X_val[:, :config['max_saccade'], :], X_val[:, config['max_saccade']:, :]
logging.info("Saccade siamese input shape: {}".format(X_train_sac[0].shape))
logging.info("Fixation siamese input shape: {}".format(X_train_fix[0].shape))
prediction_ensemble = prediction_history(((X_val_sac, X_val_fix), y_val))
hist = self.model.fit((X_train_sac, X_train_fix), y_train, verbose=verbose, batch_size=self.batch_size,
validation_data=((X_val_sac, X_val_fix), y_val),
epochs=self.epochs, callbacks=[csv_logger, ckpt])
return hist , prediction_ensemble
......@@ -24,12 +24,15 @@ config['root_dir'] = '.'
# You can modify the rest or add new fields as you need.
# Hyper-parameters and training configuration.
config['learning_rate'] = 1e-4
config['learning_rate'] = 1e-5
config['regularization'] = 1e-4
config['epochs'] = 100
"""
Parameters that can be chosen:
gaze-reg: set this to true if you want to work on the regression task (Lukas thesis)
gaze-reg: set this to true if you want to work on the
regression task (Lukas thesis)
The corresponding data is EEGdata-002.mat and label.mat
cnn: The simple CNN architecture
......@@ -60,12 +63,13 @@ config['run'] = 'ensemble'
config['ensemble'] = 1 #number of models in the ensemble method
# Choosing model
config['model'] = 'cnn'
#config['model'] = 'cnn'
#config['model'] = 'inception'
#config['model'] = 'eegnet'
#config['model'] = 'deepeye'
#config['model'] = 'xception'
#config['model'] = 'pyramidal_cnn'
config['model'] = 'siamese' # Note that you have to set data_mode to sacc_fix for this
# Options for classification task, currently not used for regression
config['downsampled'] = False
......@@ -73,16 +77,21 @@ config['split'] = False
#config['cluster'] = clustering()
if config['gaze-reg']:
config['trainX_file'] = 'EEGdata-002.mat'
config['trainY_file'] = 'label.mat'
config['trainX_variable'] = 'EEGdata'
config['trainY_variable'] = 'label'
config['padding'] = 'repeat' # options: zero, repeat #TODO: find more options for clever padding
config['min_duration'] = 50 # choose a minimum length for the gaze fixation
config['max_duration'] = 150 # choose a maximum length for the gaze fixation
config['x_screen'] = 600 #TOD: check what is x and y in the plot sent by Martyna
config['min_fixation'] = 50 # choose a minimum length for the gaze fixation
config['max_fixation'] = 150 # choose a maximum length for the gaze fixation
config['min_saccade'] = 10 # minimum number of samples for a saccade that we want to use
config['max_saccade'] = 30 # maximum number of samples for a saccade that we want to use
config['x_screen'] = 600
config['y_screen'] = 800 #TODO: Kick out measurements where people look somewhere off the screen
else:
......@@ -111,33 +120,32 @@ if config['gaze-reg']:
#TODO: automatically set the input shapes depending on the dataset to run, i.e. fix only, sacc only, etc.
if config['data_mode'] == 'fix_only':
config['cnn']['input_shape'] = (int(config['max_duration']), 129) # e.g. for max_duration 300 we have shape (150,129)
config['pyramidal_cnn']['input_shape'] = (int(config['max_duration']), 129)
config['inception']['input_shape'] = (int(config['max_duration']), 129)
config['deepeye']['input_shape'] = (int(config['max_duration']), 129)
config['xception']['input_shape'] = (int(config['max_duration']), 129)
config['cnn']['input_shape'] = (int(config['max_fixation']), 129) # e.g. for max_duration 300 we have shape (150,129)
config['pyramidal_cnn']['input_shape'] = (int(config['max_fixation']), 129)
config['inception']['input_shape'] = (int(config['max_fixation']), 129)
config['deepeye']['input_shape'] = (int(config['max_fixation']), 129)
config['xception']['input_shape'] = (int(config['max_fixation']), 129)
elif config['data_mode'] == 'sacc_only':
config['cnn']['input_shape'] = (100, 129)
config['pyramidal_cnn']['input_shape'] = (100, 129)
config['inception']['input_shape'] = (100, 129)
config['deepeye']['input_shape'] = (100, 129)
config['xception']['input_shape'] = (100, 129)
config['cnn']['input_shape'] = (config['max_saccade'], 129)
config['pyramidal_cnn']['input_shape'] = (config['max_saccade'], 129)
config['inception']['input_shape'] = (config['max_saccade'], 129)
config['deepeye']['input_shape'] = (config['max_saccade'], 129)
config['xception']['input_shape'] = (config['max_saccade'], 129)
elif config['data_mode'] == 'sacc_fix':
config['cnn']['input_shape'] = (100 + config['max_duration'], 129)
config['pyramidal_cnn']['input_shape'] = (100 + config['max_duration'], 129)
config['inception']['input_shape'] = (100 + config['max_duration'], 129)
config['deepeye']['input_shape'] = (100 + config['max_duration'], 129)
config['xception']['input_shape'] = (100 + config['max_duration'], 129)
config['cnn']['input_shape'] = (config['max_saccade'] + config['max_fixation'], 129)
config['pyramidal_cnn']['input_shape'] = (config['max_saccade'] + config['max_fixation'], 129)
config['inception']['input_shape'] = (config['max_saccade'] + config['max_fixation'], 129)
config['deepeye']['input_shape'] = (config['max_saccade'] + config['max_fixation'], 129)
config['xception']['input_shape'] = (config['max_saccade'] + config['max_fixation'], 129)
else: # data mode is fix_sacc_fix
config['cnn']['input_shape'] = (100 + 2 * config['max_duration'], 129)
config['pyramidal_cnn']['input_shape'] = (100 + 2 * config['max_duration'], 129)
config['inception']['input_shape'] = (100 + 2 * config['max_duration'], 129)
config['deepeye']['input_shape'] = (100 + 2 * config['max_duration'], 129)
config['xception']['input_shape'] = (100 + 2 * config['max_duration'], 129)
config['cnn']['input_shape'] = (config['max_saccade'] + 2 * config['max_fixation'], 129)
config['pyramidal_cnn']['input_shape'] = (config['max_saccade'] + 2 * config['max_fixation'], 129)
config['inception']['input_shape'] = (config['max_saccade'] + 2 * config['max_fixation'], 129)
config['deepeye']['input_shape'] = (config['max_saccade'] + 2 * config['max_fixation'], 129)
config['xception']['input_shape'] = (config['max_saccade'] + 2 * config['max_fixation'], 129)
#TODO: EEGnet not yet implemented for regression
#config['deepeye-rnn']['input_shape'] = (int(config['max_duration']), 129)
......
This diff is collapsed.
This diff is collapsed.
......@@ -9,6 +9,7 @@ from InceptionTime.Regression_inception import Regression_INCEPTION
from Xception.Regression_xception import Regression_XCEPTION
from DeepEye.Regression_deepeye import Regression_DEEPEYE
from PyramidalCNN.Regression_PyramidalCNN import Regression_PyramidalCNN
from Siamese.Siamese import Siamese_ConvNet
#TODO: rewrite the other classes
#from DeepEyeRNN.deepeyeRNN import Classifier_DEEPEYE_RNN
......@@ -36,30 +37,34 @@ def run(trainX, trainY):
print('Beginning model number {}/{} ...'.format(i+1, config['ensemble']))
if config['model'] == 'cnn':
reg = Regression_CNN(input_shape=config['cnn']['input_shape'], kernel_size=64, epochs = 50,
reg = Regression_CNN(input_shape=config['cnn']['input_shape'], kernel_size=64, epochs = config['epochs'],
nb_filters=16, verbose=True, batch_size=64, use_residual=True, depth=12,
learning_rate=0.001)
learning_rate=config['learning_rate'])
elif config['model'] == 'inception':
reg = Regression_INCEPTION(input_shape=config['inception']['input_shape'], use_residual=True,
kernel_size=64, nb_filters=32, depth=16, bottleneck_size=32, epochs=50,
learning_rate=0.001, regularization=0.02)
kernel_size=64, nb_filters=64, depth=12, bottleneck_size=16, epochs=config['epochs'],
learning_rate=config['learning_rate'], regularization=config['regularization'])
elif config['model'] == 'xception':
reg = Regression_XCEPTION(input_shape=config['inception']['input_shape'], use_residual=True,
kernel_size=64, nb_filters=32, depth=24, epochs=50,
learning_rate=0.001, regularization=0.02)
kernel_size=40, nb_filters=64, depth=18, epochs=config['epochs'],
learning_rate=config['learning_rate'], regularization=config['regularization'])
elif config['model'] == 'deepeye':
reg = Regression_DEEPEYE(input_shape=config['deepeye']['input_shape'], use_residual=True,
kernel_size=64, nb_filters=32, depth=10, epochs=50, preprocessing=False,
use_separable_convolution=True, use_simple_convolution=True,
bottleneck_size=32,
learning_rate=0.001, regularization=0.02)
kernel_size=64, nb_filters=32, depth=10, epochs=config['epochs'], preprocessing=False,
use_separable_convolution=True, use_simple_convolution=True, bottleneck_size=16,
learning_rate=config['learning_rate'], regularization=config['regularization'])
elif config['model'] == 'pyramidal_cnn':
reg = Regression_PyramidalCNN(input_shape=config['cnn']['input_shape'], epochs=50,
learning_rate=0.001, regularization=0.02)
reg = Regression_PyramidalCNN(input_shape=config['cnn']['input_shape'], epochs=config['epochs'],
learning_rate=config['learning_rate'], regularization=config['regularization'])
elif config['model'] == 'siamese':
reg = Siamese_ConvNet(input_shape=None, use_residual=True,
kernel_size=40, nb_filters=64, depth=18, epochs=config['epochs'],
learning_rate=config['learning_rate'], regularization=config['regularization'])
else:
logging.info('Cannot start the program. Please choose one model in the config.py file')
......
......@@ -24,6 +24,7 @@ def build_model(hp):
bottleneck_size=hp.Choice('bottleneck_size', values=[32, 64]),
use_simple_convolution= hp.Choice('use_simple_convolution', values=[True, False]),
use_separable_convolution= hp.Choice('use_separable_convolution', values = [True, False]),
learning_rate=hp.Choice('learning_rate', values=[1e-2,1e-3,1e-4,1e-5,1e-6]),
preprocessing=False)
elif config['model'] == 'cnn':
......@@ -33,27 +34,33 @@ def build_model(hp):
kernel_size=hp.Choice('kernel_size', values=[40, 32, 64]),
nb_filters=hp.Choice('nb_filters', values=[16, 32, 64]),
depth=hp.Int('depth', min_value=6, max_value=20, step=3),
learning_rate=hp.Choice('learning_rate', values=[1e-2,1e-3,1e-4,1e-5,1e-6]),
preprocessing=False
)
elif config['model'] == 'inception':
reg = Regression_INCEPTION(input_shape=config['inception']['input_shape'],
epochs=15, verbose=True, batch_size=64,
epochs=15,
verbose=True,
batch_size=64,
use_residual=hp.Choice('use_residual', values=[True, False]),
kernel_size=hp.Choice('kernel_size', values=[40, 32, 64]),
nb_filters=hp.Choice('nb_filters', values=[16, 32, 64]),
depth=hp.Int('depth', min_value=6, max_value=20, step=3),
learning_rate=hp.Choice('learning_rate', values=[1e-2,1e-3,1e-4,1e-5,1e-6]),
bottleneck_size=hp.Choice('bottleneck_size', values=[16, 32, 64])
)
elif config['model'] == 'xception':
reg = Regression_XCEPTION(input_shape=config['inception']['input_shape'],
epochs=15, verbose=True, batch_size=64,
epochs=15, verbose=True,
batch_size=64,
use_residual=hp.Choice('use_residual', values=[True, False]),
kernel_size=hp.Choice('kernel_size', values=[40, 32, 64]),
nb_filters=hp.Choice('nb_filters', values=[16, 32, 64]),
depth=hp.Int('depth', min_value=6, max_value=20, step=3)
depth=hp.Int('depth', min_value=6, max_value=20, step=3),
learning_rate=hp.Choice('learning_rate', values=[1e-2,1e-3,1e-4,1e-5,1e-6])
)
elif config['model'] == 'pyramidal_cnn':
......@@ -62,7 +69,21 @@ def build_model(hp):
kernel_size=hp.Choice('kernel_size', values=[40, 32, 64]),
nb_filters=hp.Choice('nb_filters', values=[16, 32, 64]),
depth=hp.Int('depth', min_value=6, max_value=20, step=3),
learning_rate=hp.Choice('learning_rate', values=[1e-2,1e-3,1e-4,1e-5,1e-6])
)
elif config['model'] == 'siamese':
reg = Regression_INCEPTION(input_shape=config['inception']['input_shape'],
epochs=15,
verbose=True,
batch_size=64,
use_residual=hp.Choice('use_residual', values=[True, False]),
kernel_size=hp.Choice('kernel_size', values=[40, 32, 64]),
nb_filters=hp.Choice('nb_filters', values=[16, 32, 64]),
depth=hp.Int('depth', min_value=6, max_value=20, step=3),
learning_rate=hp.Choice('learning_rate', values=[1e-2,1e-3,1e-4,1e-5,1e-6]),
bottleneck_size=hp.Choice('bottleneck_size', values=[16, 32, 64])
)
else:
logging.info('Cannot start the program. Please choose one model in the config.py file')
......@@ -96,5 +117,5 @@ def tune(trainX, trainY):
#print(trainX.shape)
tuner.search_space_summary()
X_train, X_val, y_train, y_val = train_test_split(trainX, trainY, test_size=0.2, random_state=42)
tuner.search(X_train, y_train, epochs=15, validation_data=(X_val, y_val), verbose=2)
tuner.search(X_train, y_train, epochs=20, validation_data=(X_val, y_val), verbose=2)
tuner.results_summary()
......@@ -24,19 +24,32 @@ def main():
if config['gaze-reg']:
logging.info("Running the gaze regression task")
logging.info("Using {} padding".format(config['padding']))
logging.info("Using fixations between {} ms and {} ms, 1 sample equals 2ms".format((2 * config['min_duration']), (2 * config['max_duration'])))
if config["data_mode"] != "sacc_only":
logging.info("Using fixations between {} ms and {} ms, 1 sample equals 2ms".format((2 * config['min_fixation']), (2 * config['max_fixation'])))
if config['data_mode'] != "fix_only":
logging.info("Using saccades between {} ms and {} ms, 1 sample equals 2ms".format((2 * config['min_saccade']), (2 * config['max_saccade'])))
else:
logging.info("Running the saccade classification task")
# Log some parameters to better distinguish between tasks
logging.info("Learning rate: {}".format(config['learning_rate']))
logging.info("Regularization: {}".format(config['regularization']))
if config['run'] == "kerastuner":
logging.info("Running the keras-tuner")
else:
logging.info("Running the ensemble with {} ensemble models".format(config['ensemble']))
try:
trainX, trainY = IOHelper.get_mat_data(config['data_dir'], verbose=True)
except:
raise Exception("Could not load mat data")
"""
if config['model'] == 'eegnet' or config['model'] == 'eegnet_cluster':
trainX = np.transpose(trainX, (0, 2, 1))
logging.info(trainX.shape)
"""
if config['run'] == 'kerastuner':
tune(trainX,trainY)
elif config['run'] == 'ensemble':
......
......@@ -37,6 +37,7 @@ def max_fixation_duration():
def load_regression_data(verbose=True):
"""
Load the data for the regression task with padding as specified in config
This function only contains the logic to call the appropriate loader specified as data_mode in config.py
Returns
-------
......@@ -52,8 +53,17 @@ def load_regression_data(verbose=True):
elif config['data_mode'] == 'fix_sacc_fix':
logging.info("Using fixation-saccade-fixation dataset")
return get_fix_sacc_fix_data(verbose=verbose)
#ELSE load the dataset with fixations only with the code below
elif config['data_mode'] == 'fix_only':
logging.info("Using fixation only dataset")
return get_fix_data(verbose=verbose)
else:
raise Exception("Choose a valid data_mode in config.py")
def get_fix_data(verbose=True):
"""
Returns X, y for the gaze regression task with EEG data X only from fixations
"""
# Load the labels
y = scipy.io.loadmat(config['data_dir'] + config['trainY_variable'])
labels = y['label'] # shape (85413, 1) for label.mat
......@@ -76,7 +86,7 @@ def load_regression_data(verbose=True):
x_len, y_len = x_datapoint.shape # x is number of time samples (2*x in ms is time), y_len=129 channels
# Check whether the point fits the desired range
if x_len < config['min_duration'] or x_len > config['max_duration']:
if x_len < config['min_fixation'] or x_len > config['max_fixation']:
continue
# Create the 2D regression label
......@@ -84,7 +94,7 @@ def load_regression_data(verbose=True):
y_datapoint = np.array([label[0][1][0][0], label[0][2][0][0]])
# Pad the data
padding_size = config['max_duration'] - x_len
padding_size = config['max_fixation'] - x_len
if config['padding'] == 'zero':
x_datapoint = np.pad(x_datapoint, pad_width=((0,padding_size),(0,0)))#.flatten()
elif config['padding'] == 'repeat':
......@@ -92,10 +102,14 @@ def load_regression_data(verbose=True):
else:
raise Exception("Choose a valid padding scheme in config.py")
x_list.append([x_datapoint])
y_list.append([y_datapoint])
x_list.append(x_datapoint)
y_list.append(y_datapoint)
X = np.asarray(x_list)
# Reshape data and normalize it
norm = np.linalg.norm(X)
X = X / norm
y = np.asarray(y_list)
if verbose:
......@@ -104,22 +118,9 @@ def load_regression_data(verbose=True):
logging.info("X training loaded.")
logging.info(X.shape)
# Reshape data and normalize it
X_reshaped = np.transpose(X, (0,2,3,1))
norm = np.linalg.norm(X_reshaped)
X_reshaped = X_reshaped / norm
y_reshaped = np.transpose(y, (0,2,1))
np.savetxt("fix_only_y", y, delimiter=',')