Commit d48a7a8f authored by Ard Kastrati's avatar Ard Kastrati
Browse files

Merge branch 'patch-1' into 'master'

Update deepEye.py

See merge request kard/dl-project!3
parents 6c66c92d 98639b3f
import keras
import numpy as np
import time
import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, Activation, Permute, Dropout
from tensorflow.keras.layers import Conv2D, MaxPooling2D, AveragePooling2D
from tensorflow.keras.layers import SeparableConv2D, DepthwiseConv2D
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.constraints import max_norm
from utils.utils import save_logs
from utils.utils import calculate_metrics
from utils.utils import save_test_duration
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('darkgrid')
from sklearn.model_selection import train_test_split
import os
def run(trainX,trainY):
nb_class=2
kf = KFold(n_splits=2)
input_shape=np.shape(trainX)
output_dir=os.getcwd()
x_train, x_val, y_train, y_val = train_test_split(trainX, trainY, test_size=0.2,shuffle=True, random_state=42)
deepeye_classifier=Classifier_DEEPEYE(output_directory, input_shape, nb_classes)
df_metrics=deepeye_classifier.fit(x_train, y_train, x_val, y_val, y_true, plot_test_acc=True)
print(df_metrics)
print(10*'*','end training')
class Classifier_DEEPEYE:
"""
Inputs:
nb_classes : int, number of classes to classify
input_shape : shape of the input tensor, in our case: 128 * 500 * 1
use_bottleneck : use Bottleneck layer to select the most informative channels
use_residual : use a shortcut layer (RNN) to try to avoid vanishing gradient
kernel_size : 41
batch_size : 64
epochs : 1500
output_directory: directory where plot weights and results are stored
depth : 6, number of repetion of the inception module
Outputs:
y_pred : class (left/right for nb_class=2) of the given input_tensor
"""
def __init__(self, output_directory, input_shape, nb_classes, verbose=False, build=True,
batch_size=64, nb_filters=32, use_residual=True, use_bottleneck=True, depth=6,
kernel_size=41, nb_epochs=1500):
self.output_directory = output_directory
self.nb_filters = nb_filters
self.use_residual = use_residual
self.use_bottleneck = use_bottleneck
self.depth = depth
self.kernel_size = kernel_size - 1
self.callbacks = None
self.batch_size = batch_size
self.bottleneck_size = 32
self.nb_epochs = nb_epochs
if build == True:
# build model
self.model = self.build_model(input_shape, nb_classes)
if (verbose is True):
self.model.summary()
self.verbose = verbose
self.model.save_weights(self.output_directory + 'model_init.hdf5')
@staticmethod
def _eeg_preprocessing(self, input_tensor, F1=8, D=2, kernLength=125):
"""
Static method since this function does not receive any reference argument from this class.
"""
# EEGNet feature extraction
Chans = input_tensor.shape[1]
Samples = input_tensor.shape[2]
# Filter slides horizontally
horizontal_tensor = Conv2D(F1, (1, kernLength), padding='same',
input_shape=(Chans, Samples, 1),
use_bias=False)(input_tensor)
horizontal_tensor = BatchNormalization()(horizontal_tensor)
# Filter slides vertically
vertical_tensor = DepthwiseConv2D((Chans, 1), use_bias=False,
depth_multiplier=D,
depthwise_constraint=max_norm(1.))(horizontal_tensor)
vertical_tensor = BatchNormalization()(vertical_tensor)
eeg_tensor = Activation('elu')(vertical_tensor)
# Reshape the tensor (129, 500, 1) to (129, 500), and feed into the inception module
output_tensor = eeg_tensor[:, 0, :, :]
output_tensor = tf.transpose(output_tensor, perm=[0, 2, 1]) # For the input of Inception it should be
# transposed.
return output_tensor
def _inception_module(self, input_tensor, stride=1, activation='linear'):
'''
Inception Network
Input:
input_tensor : input of size (128 * 500 * 1) to be forwarded
stride : 1
F1 : number of filters of the first convolution
kernLength : 25, second dimension of the kernel in the first convolution, the first dimension is 1
D : 2, depth multiplier
F1 : 8,
activation function : linear
Output:
output_tensor : input through the inception network
'''
if self.use_bottleneck and int(input_tensor.shape[-1]) > 1:
input_inception = keras.layers.Conv1D(filters=self.bottleneck_size, kernel_size=1,
padding='same', activation=activation, use_bias=False)(input_tensor)
else:
input_inception = input_tensor
# kernel_size_s = [3, 5, 8, 11, 17]
kernel_size_s = [self.kernel_size // (2 ** i) for i in range(3)]
conv_list = []
for i in range(len(kernel_size_s)):
conv_list.append(keras.layers.Conv1D(filters=self.nb_filters, kernel_size=kernel_size_s[i],
strides=stride, padding='same', activation=activation,
use_bias=False)(input_inception))
max_pool_1 = keras.layers.MaxPool1D(pool_size=3, strides=stride, padding='same')(
input_tensor) # I think it should be eeg_tensor here!
conv_6 = keras.layers.Conv1D(filters=self.nb_filters, kernel_size=1, padding='same', activation=activation,
use_bias=False)(max_pool_1)
conv_list.append(conv_6)
x = keras.layers.Concatenate(axis=2)(conv_list)
x = keras.layers.BatchNormalization()(x)
x = keras.layers.Activation(activation='relu')(x)
return x
@staticmethod
def _shortcut_layer(self, input_tensor, out_tensor):
'''
implementation of a shortcut layer inspired by the Residual NN
'''
shortcut_y = keras.layers.Conv1D(filters=int(out_tensor.shape[-1]), kernel_size=1,
padding='same', use_bias=False)(input_tensor)
shortcut_y = keras.layers.normalization.BatchNormalization()(shortcut_y)
x = keras.layers.Add()([shortcut_y, out_tensor])
x = keras.layers.Activation('relu')(x)
return x
def build_model(self, input_shape, nb_classes, F1=8, D=2, kernLength=125):
'''
full model
the network is composed by:
-data preprocessing using the EEGNet
-inception network
-shortcut layer
-average pooling
-fully connected Layer
-softmax for class prediction
'''
input_layer = keras.layers.Input((input_shape[0], input_shape[1], 1))
eeg_tensor = self._eeg_preprocessing(input_layer, F1, D, kernLength)
x = eeg_tensor
input_res = eeg_tensor
for d in range(self.depth):
x = self._inception_module(x)
if self.use_residual and d % 3 == 2:
x = self._shortcut_layer(input_res, x)
input_res = x
gap_layer = keras.layers.GlobalAveragePooling1D()(x)
output_layer = keras.layers.Dense(nb_classes, activation='softmax')(gap_layer)
model = keras.models.Model(inputs=input_layer, outputs=output_layer)
model.compile(loss='categorical_crossentropy', optimizer=keras.optimizers.Adam(), metrics=['accuracy'])
reduce_lr = keras.callbacks.ReduceLROnPlateau(monitor='loss', factor=0.5, patience=50, min_lr=0.0001)
file_path = self.output_directory + 'best_model.hdf5'
model_checkpoint = keras.callbacks.ModelCheckpoint(filepath=file_path, monitor='loss', save_best_only=True)
self.callbacks = [reduce_lr, model_checkpoint]
return model
def fit(self, x_train, y_train, x_val, y_val, y_true, plot_test_acc=False):
'''
Inputs:
x_train, y_train : training data and labels
x_val, y_val : validation data and labels
y_true : true labels for testing
plot_test_acc : bool, True if you want to use the validation set during training, otherwise it is only used for testing (default)
Outputs:
df_metrics : binary cross entropy between yhe true and predicted validation test
weights, accuracy plot and prediction are saved in the output_directory
'''
if len(keras.backend.tensorflow_backend._get_available_gpus()) == 0:
print('error no gpu')
exit()
# x_val and y_val are only used to monitor the test loss and NOT for training
if self.batch_size is None:
mini_batch_size = int(min(x_train.shape[0] / 10, 16))
else:
mini_batch_size = self.batch_size
start_time = time.time()
if plot_test_acc:
hist = self.model.fit(x_train, y_train, batch_size=mini_batch_size, epochs=self.nb_epochs,
verbose=self.verbose, validation_data=(x_val, y_val), callbacks=self.callbacks)
self.plot_loss(hist,name='DeepEye',val=True)
else:
hist = self.model.fit(x_train, y_train, batch_size=mini_batch_size, epochs=self.nb_epochs,
verbose=self.verbose, callbacks=self.callbacks)
self.plot_loss(hist,name='DeepEye')
duration = time.time() - start_time
self.model.save(self.output_directory + 'last_model.hdf5')
y_pred = self.predict(x_val, y_true, x_train, y_train, y_val, return_df_metrics=False)
# save predictions
np.save(self.output_directory + 'y_pred.npy', y_pred)
# convert the predicted from binary to integer
y_pred = np.argmax(y_pred, axis=1)
df_metrics = save_logs(self.output_directory, hist, y_pred, y_true, duration, plot_test_acc=plot_test_acc)
keras.backend.clear_session()
return df_metrics
def predict(self, x_test, y_true, x_train, y_train, y_test, return_df_metrics=True):
'''
Inputs:
x_train, y_train : training data and labels
x_val, y_val : validation data and labels
x_test,y_test : test label to evaluate the model
return_df_metrics : bool, True if you want to compute the binary crossentropy between the prediction and the true labels
False if you prefer to return only the prediction y_pred
Outputs:
df_metrics : binary cross entropy between the true and predicted validation test
y_pred : prediction on test set x_test
'''
start_time = time.time()
model_path = self.output_directory + 'best_model.hdf5'
model = keras.models.load_model(model_path)
y_pred = model.predict(x_test, batch_size=self.batch_size)
if return_df_metrics:
y_pred = np.argmax(y_pred, axis=1)
df_metrics = calculate_metrics(y_true, y_pred, 0.0)
return df_metrics
else:
test_duration = time.time() - start_time
save_test_duration(self.output_directory + 'test_duration.csv', test_duration)
return y_pred
def plot(self,hist,name,val=False):
'''
plot the accuracy against the epochs during training
'''
epochs=len(hist.history['accuracy'])
epochs=np.arange(epochs)
plt.figure()
plt.title(name+ ' accuracy')
plt.plot(epochs,hist.history['accuracy'],'b-',label='training')
if val:
plt.plot(epochs,hist.history['val_accuracy'],'g-',label='validation')
plt.legend()
plt.xlabel('epochs')
plt.ylabel('Accuracy')
plt.savefig(self.output_directory+'accuracy_'+name+'.png')
plt.figure()
plt.title(name+ ' loss')
plt.plot(epochs,hist.history['loss'],'b-',label='training')
if val:
plt.plot(epochs,hist.history['val_loss'],'g-',label='validation')
plt.legend()
plt.xlabel('epochs')
plt.ylabel('Binary Cross Entropie')
plt.savefig(self.output_directory+'loss_'+name+'.png')
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment