Commit ec1c2165 authored by Ard Kastrati's avatar Ard Kastrati
Browse files

Implemented downsampling in the architectures

parent fb7cb10e
......@@ -5,4 +5,5 @@
/data/*
all_EEGprocuesan.mat
all_trialinfoprosan.mat
noweEEG.mat
/runs/*
......@@ -14,11 +14,10 @@ sns.set_style('darkgrid')
def run(trainX, trainY):
classifier = Classifier_DEEPEYE(output_directory=config['root_dir'], input_shape=(129, 500))
classifier = Classifier_DEEPEYE(output_directory=config['root_dir'], input_shape=config['deepeye']['input_shape'])
hist = classifier.fit(deepeye_x=trainX, y=trainY)
plot_loss(hist, 'DeepEye', True)
plot_acc(hist, 'DeepEye', True)
# Newly added lines below
plot_loss(hist, config['model_dir'], config['model'], True)
plot_acc(hist, config['model_dir'], config['model'], True)
save_logs(hist, config['model_dir'], config['model'], pytorch=False)
save_model_param(classifier.model, config['model_dir'], config['model'], pytorch=False)
......@@ -65,18 +64,17 @@ class Classifier_DEEPEYE:
# self.model.save_weights(self.output_directory + 'model_init.hdf5')
@staticmethod
def _eeg_preprocessing(self, input_tensor, F1=8, D=2, kernLength=125):
def _eeg_preprocessing(input_tensor, F1=8, D=2, kernLength=250):
"""
Static method since this function does not receive any reference argument from this class.
"""
# EEGNet feature extraction
Chans = input_tensor.shape[1]
Samples = input_tensor.shape[2]
Chans = config['deepeye']['channels']
Samples = config['deepeye']['samples']
# Filter slides horizontally
horizontal_tensor = Conv2D(F1, (1, kernLength), padding='same',
input_shape=(Chans, Samples, 1),
use_bias=False)(input_tensor)
input_shape=(Chans, Samples, 1), use_bias=False)(input_tensor)
horizontal_tensor = BatchNormalization()(horizontal_tensor)
# Filter slides vertically
......@@ -88,45 +86,30 @@ class Classifier_DEEPEYE:
# Reshape the tensor (129, 500, 1) to (129, 500), and feed into the inception module
output_tensor = eeg_tensor[:, 0, :, :]
output_tensor = tf.transpose(output_tensor, perm=[0, 2, 1]) # For the input of Inception it should be
# transposed.
output_tensor = tf.transpose(output_tensor, perm=[0, 2, 1])
return output_tensor
def _inception_module(self, input_tensor, stride=1, activation='linear'):
'''
Inception Network
Input:
input_tensor : input of size (128 * 500 * 1) to be forwarded
stride : 1
F1 : number of filters of the first convolution
kernLength : 25, second dimension of the kernel in the first convolution, the first dimension is 1
D : 2, depth multiplier
F1 : 8,
activation function : linear
Output:
output_tensor : input through the inception network
'''
if self.use_bottleneck and int(input_tensor.shape[-1]) > 1:
input_inception = keras.layers.Conv1D(filters=self.bottleneck_size, kernel_size=1,
def _inception_module(self, input_tensor, nb_filters=32, use_bottleneck=True, kernel_size=40, bottleneck_size=32,
stride=1, activation='linear'):
if use_bottleneck and int(input_tensor.shape[-1]) > 1:
input_inception = keras.layers.Conv1D(filters=bottleneck_size, kernel_size=1,
padding='same', activation=activation, use_bias=False)(input_tensor)
else:
input_inception = input_tensor
# kernel_size_s = [3, 5, 8, 11, 17]
kernel_size_s = [self.kernel_size // (2 ** i) for i in range(3)]
kernel_size_s = [kernel_size // (2 ** i) for i in range(3)]
conv_list = []
for i in range(len(kernel_size_s)):
conv_list.append(keras.layers.Conv1D(filters=self.nb_filters, kernel_size=kernel_size_s[i],
conv_list.append(keras.layers.Conv1D(filters=nb_filters, kernel_size=kernel_size_s[i],
strides=stride, padding='same', activation=activation,
use_bias=False)(input_inception))
max_pool_1 = keras.layers.MaxPool1D(pool_size=3, strides=stride, padding='same')(
input_tensor) # I think it should be eeg_tensor here!
max_pool_1 = keras.layers.MaxPool1D(pool_size=3, strides=stride, padding='same')(input_tensor)
conv_6 = keras.layers.Conv1D(filters=self.nb_filters, kernel_size=1, padding='same', activation=activation,
conv_6 = keras.layers.Conv1D(filters=nb_filters, kernel_size=1, padding='same', activation=activation,
use_bias=False)(max_pool_1)
conv_list.append(conv_6)
......@@ -134,22 +117,20 @@ class Classifier_DEEPEYE:
x = keras.layers.Concatenate(axis=2)(conv_list)
x = keras.layers.BatchNormalization()(x)
x = keras.layers.Activation(activation='relu')(x)
return x
@staticmethod
def _shortcut_layer(self, input_tensor, out_tensor):
'''
implementation of a shortcut layer inspired by the Residual NN
'''
shortcut_y = keras.layers.Conv1D(filters=int(out_tensor.shape[-1]), kernel_size=1,
padding='same', use_bias=False)(input_tensor)
shortcut_y = keras.layers.normalization.BatchNormalization()(shortcut_y)
shortcut_y = keras.layers.BatchNormalization()(shortcut_y)
x = keras.layers.Add()([shortcut_y, out_tensor])
x = keras.layers.Activation('relu')(x)
return x
def build_model(self, input_shape, nb_filters=32, use_residual=True, use_bottleneck=True, depth=6, kernel_size=40, F1=8, D=2, kernLength=125):
def build_model(self, input_shape, nb_filters=32, use_residual=True, use_bottleneck=True, depth=6, kernel_size=40, F1=8,
D=2, kernLength=125):
input_layer = keras.layers.Input((input_shape[0], input_shape[1], 1))
eeg_tensor = self._eeg_preprocessing(input_layer, F1, D, kernLength)
x = eeg_tensor
......@@ -171,7 +152,7 @@ class Classifier_DEEPEYE:
def fit(self, deepeye_x, y):
self.model.compile(loss='binary_crossentropy', optimizer=keras.optimizers.Adam(), metrics=['accuracy'])
csv_logger = CSVLogger(config=['batches_log'], append=True, separator=';')
hist = self.model.fit(deepeye_x, y, verbose=1, validation_split=0.2, epochs=10, callbacks=[csv_logger])
csv_logger = CSVLogger(config['batches_log'], append=True, separator=';')
hist = self.model.fit(deepeye_x, y, verbose=1, validation_split=0.2, epochs=1, callbacks=[csv_logger])
return hist
......@@ -24,8 +24,8 @@ def run(trainX, trainY):
save_model_param(classifier.model, config['model_dir'], config['model'], pytorch=False)
class Classifier_EEGNet:
def __init__(self, output_directory, nb_classes=1, chans = 129, samples = 500, dropoutRate = 0.5, kernLength = 64, F1 = 8,
D = 2, F2 = 16, norm_rate = 0.25, dropoutType = 'Dropout', verbose = True, build=True):
def __init__(self, output_directory, nb_classes=1, chans = config['eegnet']['channels'], samples = config['eegnet']['samples'], dropoutRate = 0.5, kernLength = 250, F1 = 32,
D = 8, F2 = 256, norm_rate = 0.25, dropoutType = 'Dropout', verbose = True, build=True):
self.output_directory = output_directory
self.nb_classes = nb_classes
......@@ -67,14 +67,14 @@ class Classifier_EEGNet:
depthwise_constraint=max_norm(1.))(block1)
block1 = BatchNormalization()(block1)
block1 = Activation('elu')(block1)
block1 = AveragePooling2D((1, 4))(block1)
block1 = AveragePooling2D((1, 16))(block1)
block1 = dropoutType(self.dropoutRate)(block1)
block2 = SeparableConv2D(self.F2, (1, 16),
block2 = SeparableConv2D(self.F2, (1, 64),
use_bias=False, padding='same')(block1)
block2 = BatchNormalization()(block2)
block2 = Activation('elu')(block2)
block2 = AveragePooling2D((1, 8))(block2)
block2 = AveragePooling2D((1, 6))(block2)
block2 = dropoutType(self.dropoutRate)(block2)
flatten = Flatten(name='flatten')(block2)
......
......@@ -7,7 +7,7 @@ from keras.callbacks import CSVLogger
def run(trainX, trainY):
logging.info("Starting InceptionTime.")
classifier = Classifier_INCEPTION(output_directory=config['root_dir'], input_shape=(500, 129))
classifier = Classifier_INCEPTION(output_directory=config['root_dir'], input_shape=config['inception']['input_shape'])
hist = classifier.fit(trainX, trainY)
plot_loss(hist, config['model_dir'], config['model'], True)
plot_acc(hist, config['model_dir'], config['model'], True)
......
# configuration used by the training and evaluation methods
# let's keep it here to have a clean code on other methods that we try
import time
import logging
import os
config = dict()
......@@ -21,6 +19,7 @@ config['data_dir'] = './'
config['data_dir_server'] = '/cluster/project/infk/zigeng/preprocessed2/'
# Path of root
config['root_dir'] = '.'
##################################################################
# You can modify the rest or add new fields as you need.
......@@ -29,7 +28,6 @@ config['learning_rate'] = 1e-4
"""
Models:
cnn: First try: CNN to predict movement towards left or right (prosaccade) with 1 second data.
inception: The InceptionTime baseline
eegnet: The other baseline
......@@ -37,10 +35,51 @@ deepeye: Our method
"""
# Choosing model
config['model'] = 'inception'
config['model'] = 'deepeye'
config['downsampled'] = True
# CNN - 1
config['trainX_file'] = 'noweEEG.mat' if config['downsampled'] else 'all_EEGprocuesan.mat'
config['trainY_file'] = 'all_trialinfoprosan.mat'
config['trainX_variable'] = 'noweEEG' if config['downsampled'] else 'all_EEGprocuesan'
config['trainY_variable'] = 'all_trialinfoprosan'
# CNN
config['cnn'] = {}
# InceptionTime
config['inception'] = {}
# DeepEye
config['deepeye'] = {}
# EEGNet
config['eegnet'] = {}
config['inception']['input_shape'] = (125, 129) if config['downsampled'] else (500, 129)
config['eegnet']['channels'] = 129
config['eegnet']['samples'] = 125 if config['downsampled'] else 500
config['deepeye']['input_shape'] = (129, 125) if config['downsampled'] else (129, 500)
config['deepeye']['channels'] = 129
config['deepeye']['samples'] = 125 if config['downsampled'] else 500
# You can set descriptive experiment names or simply set empty string ''.
# config['model_name'] = 'skeleton_code'
# Create a unique output directory for this experiment.
timestamp = str(int(time.time()))
model_folder_name = timestamp if config['model'] == '' else timestamp + "_" + config['model']
if config['downsampled']:
model_folder_name += '_downsampled'
config['model_dir'] = os.path.abspath(os.path.join(config['log_dir'], model_folder_name))
if not os.path.exists(config['model_dir']):
os.makedirs(config['model_dir'])
config['info_log'] = config['model_dir'] + '/' + 'info.log'
config['batches_log'] = config['model_dir'] + '/' + 'batches.log'
# Before we were loading directly the data from the server, or used the code to merge them.
# Deprecated now. We don't really use the functions anymore in the IOHelper that make use of the following configurations.
config['cnn']['trainX_variable1'] = "EEGprocue"
config['cnn']['trainX_variable2'] = "data"
config['cnn']['trainX_filename'] = "EEGprocue"
......@@ -49,8 +88,6 @@ config['cnn']['trainY_variable1'] = "trialinfopro"
config['cnn']['trainY_variable2'] = "cues"
config['cnn']['trainY_filename'] = "trialinfocuelocked"
# InceptionTime
config['inception'] = {}
config['inception']['trainX_variable1'] = "EEGprocue"
config['inception']['trainX_variable2'] = "data"
config['inception']['trainX_filename'] = "EEGprocue"
......@@ -59,8 +96,6 @@ config['inception']['trainY_variable1'] = "trialinfopro"
config['inception']['trainY_variable2'] = "cues"
config['inception']['trainY_filename'] = "trialinfocuelocked"
# DeepEye
config['deepeye'] = {}
config['deepeye']['trainX_variable1'] = "EEGprocue"
config['deepeye']['trainX_variable2'] = "data"
config['deepeye']['trainX_filename'] = "EEGprocue"
......@@ -68,17 +103,3 @@ config['deepeye']['trainX_filename'] = "EEGprocue"
config['deepeye']['trainY_variable1'] = "trialinfopro"
config['deepeye']['trainY_variable2'] = "cues"
config['deepeye']['trainY_filename'] = "trialinfocuelocked"
\ No newline at end of file
# You can set descriptive experiment names or simply set empty string ''.
# config['model_name'] = 'skeleton_code'
# Create a unique output directory for this experiment.
timestamp = str(int(time.time()))
model_folder_name = timestamp if config['model'] == '' else timestamp + "_" + config['model']
config['model_dir'] = os.path.abspath(os.path.join(config['log_dir'], model_folder_name))
if not os.path.exists(config['model_dir']):
os.makedirs(config['model_dir'])
config['info_log'] = config['model_dir'] + '/' + 'info.log'
config['batches_log'] = config['model_dir'] + '/' + 'batches.log'
......@@ -13,12 +13,13 @@ def main():
logging.basicConfig(filename=config['info_log'], level=logging.INFO)
logging.info('Started the Logging')
start_time = time.time()
try:
# try:
trainX, trainY = IOHelper.get_mat_data(config['data_dir'], verbose=True)
# trainX, trainY = IOHelper.get_pickle_data(config['data_dir'], verbose=True)
# IOHelper.store(trainX, trainY)
except:
return
#except:
# print("ERROR while loading data.")
# return
if config['model'] == 'cnn':
logging.info("Started running CNN-1. If you want to run other methods please choose another model in the config.py file.")
......@@ -36,7 +37,9 @@ def main():
elif config['model'] == 'deepeye':
logging.info("Started running DeepEye. If you want to run other methods please choose another model in the config.py file.")
deepEye.run(trainX=trainX, trainY=trainY)
deepeye_x = np.transpose(trainX, (0, 2, 1))
logging.info(deepeye_x.shape)
deepEye.run(trainX=deepeye_x, trainY=trainY)
else:
logging.info('Cannot start the program. Please choose one model in the config.py file')
......
......@@ -7,14 +7,14 @@ import h5py
import logging
def get_mat_data(data_dir, verbose=True):
with h5py.File(data_dir + 'all_EEGprocuesan.mat', 'r') as f:
X = f['all_EEGprocuesan'][:]
with h5py.File(data_dir + config['trainX_file'], 'r') as f:
X = f[config['trainX_variable']][:]
if verbose:
logging.info("X training loaded.")
logging.info(X.shape)
with h5py.File(data_dir + 'all_trialinfoprosan.mat', 'r') as f:
y = f['all_trialinfoprosan'][:]
with h5py.File(data_dir + config['trainY_file'], 'r') as f:
y = f[config['trainY_variable']][:]
if verbose:
logging.info("y training loaded.")
logging.info(y.shape)
......@@ -22,6 +22,7 @@ def get_mat_data(data_dir, verbose=True):
if verbose: logging.info("Setting the shapes")
X = np.transpose(X, (2, 1, 0))
y = np.transpose(y, (1, 0))
if config['downsampled']: X = np.transpose(X, (0, 2, 1))
if verbose:
logging.info(X.shape)
logging.info(y.shape)
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment