Commit 22aa55fa authored by zpgeng's avatar zpgeng
Browse files

Update related modules

parent a7faec59
......@@ -20,9 +20,8 @@ def run(trainX, trainY):
classifier = Classifier_DEEPEYE_LSTM(output_directory=config['root_dir'],
input_shape=config['deepeye-lstm']['input_shape'])
hist = classifier.fit(trainX, trainY)
plot_loss(config['model_dir'], hist, config['model'], True)
plot_acc(config['model_dir'], hist, config['model'], True)
# Newly added lines below
plot_loss(hist, config['model_dir'], config['model'], True)
plot_acc(hist, config['model_dir'], config['model'], True)
save_logs(hist, config['model_dir'], config['model'], pytorch=False)
#save_model_param(classifier.model, config['model_dir'], config['model'], pytorch=False)
......@@ -46,8 +45,8 @@ class Classifier_DEEPEYE_LSTM:
"""
def __init__(self, output_directory, input_shape, dropoutRate=0.25, verbose=True, build=True,
batch_size=64, nb_filters=64, use_residual=True, use_bottleneck=True, depth=6,
kernel_size=41, nb_epochs=300):
batch_size=32, nb_filters=32, use_residual=True, use_bottleneck=True, depth=12,
kernel_size=41, nb_epochs=100):
self.output_directory = output_directory
self.nb_filters = nb_filters
......@@ -60,12 +59,13 @@ class Classifier_DEEPEYE_LSTM:
self.bottleneck_size = 32
self.nb_epochs = nb_epochs
self.dropoutRate = dropoutRate
self.momentum = 0.9
self.lr = 0.001
self.feature_nb = 75
self.jump = 3
self.lr = 0.01
self.mmt = 0.95
self.feature_nb = 32
if build:
# build mode
# build model
if config['split']:
self.model = self.split_model(input_shape)
else:
......@@ -73,13 +73,13 @@ class Classifier_DEEPEYE_LSTM:
if verbose:
self.model.summary()
print(20*'*')
print("Parameters are: Dropout rate:", self.dropoutRate, " Momentum:", self.momentum,
"Learning rate:", self.lr, "# of features:", self.feature_nb, "# of filters:", self.nb_filters)
print("Parameters are: Dropout rate:", self.dropoutRate, "# of features:", self.feature_nb,
"# of filters:", self.nb_filters, "# of depth:", self.depth)
print(20*'*')
self.verbose = verbose
def split_model(self,input_shape):
def split_model(self, input_shape):
input_layer = keras.layers.Input((input_shape[0], input_shape[1]))
output=[]
......@@ -88,7 +88,7 @@ class Classifier_DEEPEYE_LSTM:
for c in config['cluster'].keys():
output.append(self.build_model(input_shape = None, X = tf.transpose(tf.nn.embedding_lookup(
tf.transpose(input_layer,(1,0,2)),config['cluster'][c]),(1,0,2))))
tf.transpose(input_layer,(1,0,2)), config['cluster'][c]),(1,0,2))))
# append the results and perform 1 dense layer with last_channel dimension and the output layer
......@@ -105,8 +105,6 @@ class Classifier_DEEPEYE_LSTM:
lstm = Sequential()
lstm.add(LSTM(output_feature_nb, return_sequences=True))
lstm.add(Dropout(self.dropoutRate))
lstm.add(LSTM(output_feature_nb, return_sequences=True))
lstm.add(Dropout(self.dropoutRate))
lstm.add(keras.layers.BatchNormalization())
output_tensor = lstm(input_tensor)
return output_tensor
......@@ -187,7 +185,7 @@ class Classifier_DEEPEYE_LSTM:
gap_layer = tf.keras.layers.GlobalAveragePooling1D()(x)
# Add Dropout layer
gap_layer = tf.keras.layers.Dropout(self.dropoutRate)(gap_layer)
#gap_layer = tf.keras.layers.Dropout(self.dropoutRate)(gap_layer)
if config['split']:
return gap_layer
output_layer = tf.keras.layers.Dense(1, activation='sigmoid')(gap_layer)
......@@ -196,19 +194,14 @@ class Classifier_DEEPEYE_LSTM:
return model
def fit(self, lstm_x, y):
early_stop = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=50)
early_stop = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=10)
csv_logger = CSVLogger(config['batches_log'], append=True, separator=';')
ckpt_dir = config['model_dir'] + '/' + config['model'] + '_' + 'best_model.h5'
ckpt = tf.keras.callbacks.ModelCheckpoint(ckpt_dir, verbose=1, monitor='val_accuracy', save_best_only=True, mode='auto')
if self.batch_size is None:
mini_batch_size = int(min(deepeye_x.shape[0] / 10, 16))
else:
mini_batch_size = self.batch_size
#self.model.compile(loss='binary_crossentropy', optimizer=keras.optimizers.Adam(learning_rate=self.lr), metrics=['accuracy'])
ckpt = tf.keras.callbacks.ModelCheckpoint(ckpt_dir, verbose=1, monitor='val_accuracy',
save_best_only=True, mode='auto')
#self.model.compile(loss='binary_crossentropy', optimizer=keras.optimizers.Adam(), metrics=['accuracy'])
self.model.compile(loss='binary_crossentropy',optimizer=keras.optimizers.RMSprop(learning_rate=self.lr,
momentum=self.momentum), metrics=['accuracy'])
hist = self.model.fit(lstm_x, y, batch_size=mini_batch_size, verbose=1, validation_split=0.2,
momentum=self.mmt), metrics=['accuracy'])
hist = self.model.fit(lstm_x, y, verbose=1, validation_split=0.2,
epochs=self.nb_epochs, callbacks=[early_stop, csv_logger, ckpt])
return hist
\ No newline at end of file
return hist
......@@ -20,7 +20,8 @@ def run(trainX, trainY):
plot_loss(hist, config['model_dir'], config['model'], True)
plot_acc(hist, config['model_dir'], config['model'], True)
save_logs(hist, config['model_dir'], config['model'], pytorch=False)
save_model_param(classifier.model, config['model_dir'], config['model'], pytorch=False)
#save_model_param(classifier.model, config['model_dir'], config['model'], pytorch=False)
class Classifier_DEEPEYE:
"""
......@@ -42,19 +43,23 @@ class Classifier_DEEPEYE:
"""
def __init__(self, output_directory, input_shape, verbose=False, build=True,
batch_size=64, nb_filters=32, use_residual=True, use_bottleneck=True, depth=6,
kernel_size=41, nb_epochs=100):
batch_size=64, nb_filters=32, use_residual=True, use_bottleneck=True, depth=9,
kernel_size=40, nb_epochs=100):
self.output_directory = output_directory
self.nb_filters = nb_filters
self.use_residual = use_residual
self.use_bottleneck = use_bottleneck
self.depth = depth
self.kernel_size = kernel_size - 1
self.kernel_size = kernel_size
self.callbacks = None
self.batch_size = batch_size
self.bottleneck_size = 32
self.nb_epochs = nb_epochs
self.F1 = 32
self.D = 8
self.kernLength = 250
self.res_jump = 3
if build:
if config['split']:
......@@ -63,37 +68,22 @@ class Classifier_DEEPEYE:
self.model = self.build_model(input_shape)
if verbose:
self.model.summary()
print(40*'*')
print("Parameters are: nb_epochs:", self.nb_epochs, " F1:", self.F1, " D:", self.D,
" kernel length:", self.kernLength, " Residual jump:", self.res_jump)
print(40*'*')
# self.model.save_weights(self.output_directory + 'model_init.hdf5')
def split_model(self,input_shape):
input_layer = keras.layers.Input((input_shape[0], input_shape[1]))
output=[]
# run inception over the cluster
for c in config['cluster'].keys():
output.append(self.build_model(input_shape = None, X = tf.expand_dims(tf.transpose(tf.nn.embedding_lookup(
tf.transpose(input_layer,(1,0,2)),config['cluster'][c]),(1,0,2)),axis=-1), c = c))
# append the results and perform 1 dense layer with last_channel dimension and the output layer
x = tf.keras.layers.Concatenate(axis=1)(output)
dense=tf.keras.layers.Dense(32, activation='relu')(x)
output_layer=tf.keras.layers.Dense(1,activation='sigmoid')(dense)
model = tf.keras.models.Model(inputs=input_layer, outputs=output_layer)
return model
def _eeg_preprocessing(input_tensor, F1=8, D=2, kernLength=250, c = None):
@staticmethod
def _eeg_preprocessing(input_tensor, c, F1, D, kernLength):
"""
Static method since this function does not receive any reference argument from this class.
"""
# EEGNet feature extraction
if config['split']:
Chans=len(config['cluster'][c])
Chans = len(config['cluster'][c])
else:
Chans = config['deepeye']['channels']
Samples = config['deepeye']['samples']
......@@ -110,34 +100,33 @@ class Classifier_DEEPEYE:
vertical_tensor = BatchNormalization()(vertical_tensor)
eeg_tensor = Activation('elu')(vertical_tensor)
eeg_tensor = tf.keras.layers.Dropout(0.5)(eeg_tensor)
# Reshape the tensor (129, 500, 1) to (129, 500), and feed into the inception module
output_tensor = eeg_tensor[:, 0, :, :]
#output_tensor = tf.transpose(output_tensor, perm=[0, 2, 1])
return output_tensor
def _inception_module(self, input_tensor, nb_filters=32, use_bottleneck=True, kernel_size=40, bottleneck_size=32,
stride=1, activation='linear'):
def _inception_module(self, input_tensor, stride=1, activation='linear'):
if use_bottleneck and int(input_tensor.shape[-1]) > 1:
input_inception = keras.layers.Conv1D(filters=bottleneck_size, kernel_size=1,
if self.use_bottleneck and int(input_tensor.shape[-1]) > 1:
input_inception = keras.layers.Conv1D(filters=self.bottleneck_size, kernel_size=1,
padding='same', activation=activation, use_bias=False)(input_tensor)
else:
input_inception = input_tensor
# kernel_size_s = [3, 5, 8, 11, 17]
kernel_size_s = [kernel_size // (2 ** i) for i in range(3)]
kernel_size_s = [self.kernel_size // (2 ** i) for i in range(3)]
conv_list = []
for i in range(len(kernel_size_s)):
conv_list.append(keras.layers.Conv1D(filters=nb_filters, kernel_size=kernel_size_s[i],
conv_list.append(keras.layers.Conv1D(filters=self.nb_filters, kernel_size=kernel_size_s[i],
strides=stride, padding='same', activation=activation,
use_bias=False)(input_inception))
max_pool_1 = keras.layers.MaxPool1D(pool_size=3, strides=stride, padding='same')(input_tensor)
max_pool_1 = keras.layers.MaxPool1D(pool_size=10, strides=stride, padding='same')(input_tensor)
conv_6 = keras.layers.Conv1D(filters=nb_filters, kernel_size=1, padding='same', activation=activation,
conv_6 = keras.layers.Conv1D(filters=self.nb_filters, kernel_size=1, padding='same', activation=activation,
use_bias=False)(max_pool_1)
conv_list.append(conv_6)
......@@ -157,28 +146,28 @@ class Classifier_DEEPEYE:
x = keras.layers.Activation('relu')(x)
return x
def build_model(self, input_shape, X = None, c = None, nb_filters=32, use_residual=True, use_bottleneck=True, depth=6, kernel_size=40, F1=8,
D=2, kernLength=125):
def build_model(self, input_shape, X = None, c = None):
if config['split']:
input_layer = X
eeg_tensor = self._eeg_preprocessing(input_layer, c, self.F1, self.D, self.kernLength)
else:
input_layer = keras.layers.Input((input_shape[0], input_shape[1], 1))
eeg_tensor = self._eeg_preprocessing(input_layer, F1, D, kernLength, c = c)
eeg_tensor = self._eeg_preprocessing(input_layer, None, self.F1, self.D, self.kernLength)
x = eeg_tensor
input_res = eeg_tensor
for d in range(depth):
for d in range(self.depth):
x = self._inception_module(x)
if use_residual and d % 3 == 2:
if self.use_residual and d % self.res_jump == (self.res_jump - 1):
x = self._shortcut_layer(input_res, x)
input_res = x
gap_layer = tf.keras.layers.GlobalAveragePooling1D()(x)
gap_layer = tf.keras.layers.Dropout(0.5)(gap_layer)
if config['split']:
return gap_layer
output_layer = tf.keras.layers.Dense(1, activation='sigmoid')(gap_layer)
......@@ -186,12 +175,38 @@ class Classifier_DEEPEYE:
return model
def split_model(self, input_shape):
input_layer = keras.layers.Input((input_shape[0], input_shape[1]))
output=[]
# run inception over the cluster
for cluster in config['cluster'].keys():
output.append(self.build_model(input_shape = None,
X = tf.expand_dims(tf.transpose(tf.nn.embedding_lookup(tf.transpose(input_layer,(1,0,2)), config['cluster'][cluster]),(1,0,2)),axis=-1),
c = cluster))
# append the results and perform 1 dense layer with last_channel dimension and the output layer
x = tf.keras.layers.Concatenate(axis=1)(output)
dense=tf.keras.layers.Dense(32, activation='relu')(x)
output_layer=tf.keras.layers.Dense(1,activation='sigmoid')(dense)
model = tf.keras.models.Model(inputs=input_layer, outputs=output_layer)
return model
def fit(self, deepeye_x, y):
reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_accuracy', factor=0.5, patience=20,
min_lr=0.0001)
self.model.compile(loss='binary_crossentropy', optimizer=keras.optimizers.Adam(), metrics=['accuracy'])
#self.model.compile(loss='binary_crossentropy',
# optimizer=keras.optimizers.SGD(learning_rate=0.001, momentum=0.95, nesterov=True), metrics=['accuracy'])
csv_logger = CSVLogger(config['batches_log'], append=True, separator=';')
early_stop = tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=20)
early_stop = tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=10)
ckpt_dir = config['model_dir'] + '/' + config['model'] + '_' + 'best_model.h5'
ckpt = tf.keras.callbacks.ModelCheckpoint(ckpt_dir, verbose=1, monitor='val_accuracy', save_best_only=True, mode='auto')
hist = self.model.fit(deepeye_x, y, verbose=1, validation_split=0.2, epochs=self.nb_epochs,
callbacks=[csv_logger, ckpt, early_stop])
return hist
\ No newline at end of file
callbacks=[csv_logger, ckpt, early_stop, reduce_lr])
return hist
......@@ -17,8 +17,8 @@ def run(trainX, trainY):
# save_model_param(classifier.model, config['model_dir'], config['model'], pytorch=False)
class Classifier_DEEPEYE3:
def __init__(self, input_shape, verbose=True, build=True, batch_size=64, nb_filters=32,
use_residual=True, use_bottleneck=True, depth=9, kernel_size=40, nb_epochs=100):
def __init__(self, input_shape, verbose=True, build=True, batch_size=64, nb_filters=16,
use_residual=True, use_bottleneck=True, depth=8, kernel_size=40, nb_epochs=50):
self.nb_filters = nb_filters
self.use_residual = use_residual
......@@ -29,7 +29,7 @@ class Classifier_DEEPEYE3:
self.batch_size = batch_size
self.bottleneck_size = 32
self.nb_epochs = nb_epochs
self.residual_jump = 3
self.residual_jump = 4
self.verbose = verbose
if build:
......@@ -40,6 +40,7 @@ class Classifier_DEEPEYE3:
if self.verbose:
self.model.summary()
def _inception_module(self, input_tensor, stride=1, activation='linear'):
if self.use_bottleneck and int(input_tensor.shape[-1]) > 1:
......@@ -101,6 +102,7 @@ class Classifier_DEEPEYE3:
input_res = x
gap_layer = tf.keras.layers.GlobalAveragePooling1D()(x)
#gap_layer = tf.keras.layers.Dropout(0.5)(gap_layer)
if config['split']:
return gap_layer
output_layer = tf.keras.layers.Dense(1, activation='sigmoid')(gap_layer)
......@@ -125,7 +127,9 @@ class Classifier_DEEPEYE3:
# append the results and perform 1 dense layer with last_channel dimension and the output layer
x = tf.keras.layers.Concatenate()(output)
dense = tf.keras.layers.Dense(32, activation='relu')(x)
dense = tf.keras.layers.Dense(8, activation='relu')(x)
# Add dropout
dense = tf.keras.layers.Dropout(0.5)(dense)
output_layer = tf.keras.layers.Dense(1, activation='sigmoid')(dense)
model = tf.keras.models.Model(inputs=input_layer, outputs=output_layer)
model.compile(loss='binary_crossentropy', optimizer=keras.optimizers.Adam(), metrics=['accuracy'])
......@@ -133,7 +137,7 @@ class Classifier_DEEPEYE3:
def fit(self, inception_x, y):
csv_logger = CSVLogger(config['batches_log'], append=True, separator=';')
early_stop = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=10)
early_stop = tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=5)
ckpt_dir = config['model_dir'] + '/' + config['model'] + '_' + 'best_model.h5'
ckpt = tf.keras.callbacks.ModelCheckpoint(ckpt_dir, verbose=1, monitor='val_accuracy', save_best_only=True, mode='auto')
hist = self.model.fit(inception_x, y, verbose=1, validation_split=0.2,
......
......@@ -25,11 +25,10 @@ def run(trainX, trainY):
save_logs(hist, config['model_dir'], config['model'], pytorch=False)
#save_model_param(classifier.model, config['model_dir'], config['model'], pytorch=False)
class Classifier_EEGNet:
def __init__(self, output_directory, nb_classes=1, chans = config['eegnet']['channels'], samples = config['eegnet']['samples'], dropoutRate = 0.5, kernLength = 250, F1 = 32,
D = 8, F2 = 256, norm_rate = 0.25, dropoutType = 'Dropout', verbose = True, build = True, X = None):
def __init__(self, output_directory, nb_classes=1, chans = config['eegnet']['channels'],
samples = config['eegnet']['samples'], dropoutRate = 0.5, kernLength = 250, F1 = 16,
D = 4, F2 = 256, norm_rate = 0.5, dropoutType = 'Dropout', verbose = True, build = True, X = None):
self.output_directory = output_directory
self.nb_classes = nb_classes
......@@ -51,7 +50,6 @@ class Classifier_EEGNet:
self.model = self.build_model()
if verbose:
self.model.summary()
# self.model.save_weights(self.output_directory + 'model_init.hdf5')
def split_model(self):
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment