Commit 6e7c7e00 authored by okiss's avatar okiss
Browse files

Ensebmle

parent 86e21dcd
......@@ -8,21 +8,59 @@ from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.constraints import max_norm
from keras.callbacks import CSVLogger
from sklearn.model_selection import train_test_split
from config import config
from utils.utils import *
import seaborn as sns
sns.set_style('darkgrid')
class prediction_history(keras.callbacks.Callback):
def __init__(self, val_data):
self.val_data = val_data
self.predhis = []
self.targets = []
def on_batch_end(self, epoch, logs={}):
x_val, y_val = self.val_data
self.targets.append(y_val)
prediction = self.model.predict(x_val)
self.predhis.append(prediction)
def run(trainX, trainY):
classifier = Classifier_DEEPEYE(output_directory=config['root_dir'], input_shape=config['deepeye']['input_shape'])
hist = classifier.fit(deepeye_x=trainX, y=trainY)
plot_loss(hist, config['model_dir'], config['model'], True)
plot_acc(hist, config['model_dir'], config['model'], True)
save_logs(hist, config['model_dir'], config['model'], pytorch=False)
acc = tf.keras.metrics.BinaryAccuracy()
bce = tf.keras.losses.BinaryCrossentropy()
if config['ensemble']>1:
config['model']+='_ensemble'
loss=[]
accuracy=[]
for i in range(config['ensemble']):
classifier = Classifier_DEEPEYE(output_directory=config['root_dir'], input_shape=config['deepeye']['input_shape'])
hist, pred_ens = classifier.fit(deepeye_x=trainX, y=trainY)
if i == 0:
pred = pred_ens.predhis
else:
for j, pred_epoch in enumerate(pred_ens.predhis):
pred[j] = (np.array(pred[j])+np.array(pred_epoch))
for j, pred_epoch in enumerate(pred):
pred_epoch = (pred_epoch/config['ensemble']).tolist()
loss.append(bce(pred_ens.targets[j],pred_epoch).numpy())
pred_epoch = np.round(pred_epoch,0)
# acc.reset_states()
# acc.update_state(pred[j],pred_ens.targets[j])
accuracy.append(np.mean((pred_epoch.reshape(-1)+pred_ens.targets[j].reshape(-1)-1)**2))
hist.history['val_loss'] = loss
hist.history['val_accuracy'] = accuracy
plot_loss(hist, config['model_dir'], config['model'],val = True)
plot_acc(hist, config['model_dir'], config['model'], val = True)
save_logs(hist, config['model_dir'], config['model'], pytorch = False)
#save_model_param(classifier.model, config['model_dir'], config['model'], pytorch=False)
class Classifier_DEEPEYE:
"""
Inputs:
......@@ -43,8 +81,8 @@ class Classifier_DEEPEYE:
"""
def __init__(self, output_directory, input_shape, verbose=False, build=True,
batch_size=64, nb_filters=32, use_residual=True, use_bottleneck=True, depth=9,
kernel_size=40, nb_epochs=100):
batch_size=32, nb_filters=16, use_residual=True, use_bottleneck=True, depth=4,
kernel_size=20, nb_epochs=4):
self.output_directory = output_directory
self.nb_filters = nb_filters
......@@ -74,7 +112,7 @@ class Classifier_DEEPEYE:
print(40*'*')
# self.model.save_weights(self.output_directory + 'model_init.hdf5')
@staticmethod
def _eeg_preprocessing(input_tensor, c, F1, D, kernLength):
"""
......@@ -153,8 +191,8 @@ class Classifier_DEEPEYE:
else:
input_layer = keras.layers.Input((input_shape[0], input_shape[1], 1))
eeg_tensor = self._eeg_preprocessing(input_layer, None, self.F1, self.D, self.kernLength)
x = eeg_tensor
input_res = eeg_tensor
......@@ -183,9 +221,9 @@ class Classifier_DEEPEYE:
# run inception over the cluster
for cluster in config['cluster'].keys():
output.append(self.build_model(input_shape = None,
X = tf.expand_dims(tf.transpose(tf.nn.embedding_lookup(tf.transpose(input_layer,(1,0,2)), config['cluster'][cluster]),(1,0,2)),axis=-1),
c = cluster))
output.append(self.build_model(input_shape = None,
X = tf.expand_dims(tf.transpose(tf.nn.embedding_lookup(tf.transpose(input_layer,(1,0,2)),
config['cluster'][cluster]),(1,0,2)),axis=-1), c = cluster))
# append the results and perform 1 dense layer with last_channel dimension and the output layer
......@@ -207,6 +245,8 @@ class Classifier_DEEPEYE:
early_stop = tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=10)
ckpt_dir = config['model_dir'] + '/' + config['model'] + '_' + 'best_model.h5'
ckpt = tf.keras.callbacks.ModelCheckpoint(ckpt_dir, verbose=1, monitor='val_accuracy', save_best_only=True, mode='auto')
hist = self.model.fit(deepeye_x, y, verbose=1, validation_split=0.2, epochs=self.nb_epochs,
callbacks=[csv_logger, ckpt, early_stop, reduce_lr])
return hist
X_train, X_val, y_train, y_val = train_test_split(deepeye_x, y, test_size=0.2, random_state=42)
pred_ens = prediction_history((X_val,y_val))
hist = self.model.fit(X_train, y_train, verbose=1, validation_data=(X_val,y_val), epochs=self.nb_epochs,
callbacks=[csv_logger, ckpt, early_stop, reduce_lr,pred_ens])
return hist, pred_ens
......@@ -9,7 +9,7 @@ from keras.callbacks import CSVLogger
def run(trainX, trainY):
logging.info("Starting InceptionTime.")
classifier = Classifier_INCEPTION(output_directory=config['root_dir'], input_shape=config['inception']['input_shape'])
hist = classifier.fit(trainX, trainY)
plot_loss(hist, config['model_dir'], config['model'], True)
plot_acc(hist, config['model_dir'], config['model'], True)
......@@ -66,7 +66,7 @@ class Classifier_INCEPTION:
return model
def _inception_module(self, input_tensor, nb_filters=32, use_bottleneck=True, kernel_size=40, bottleneck_size=32,
def _inception_module(self, input_tensor, nb_filters=64, use_bottleneck=True, kernel_size=40, bottleneck_size=32,
stride=1, activation='linear'):
if use_bottleneck and int(input_tensor.shape[-1]) > 1:
......@@ -76,7 +76,7 @@ class Classifier_INCEPTION:
input_inception = input_tensor
# kernel_size_s = [3, 5, 8, 11, 17]
kernel_size_s = [kernel_size // (2 ** i) for i in range(3)]
kernel_size_s = [kernel_size // (3 ** i) for i in range(3)]
conv_list = []
for i in range(len(kernel_size_s)):
......@@ -105,7 +105,7 @@ class Classifier_INCEPTION:
x = keras.layers.Activation('relu')(x)
return x
def _build_model(self, input_shape, X=[], nb_filters=32, use_residual=True, use_bottleneck=True, depth=6, kernel_size=40):
def _build_model(self, input_shape, X=[], nb_filters=32, use_residual=True, use_bottleneck=True, depth=9, kernel_size=40):
if config['split']:
input_layer = X
......@@ -137,6 +137,6 @@ class Classifier_INCEPTION:
early_stop = tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=20)
ckpt_dir = config['model_dir'] + '/' + config['model'] + '_' + 'best_model.h5'
ckpt = tf.keras.callbacks.ModelCheckpoint(ckpt_dir, verbose=1, monitor='val_accuracy', save_best_only=True, mode='auto')
hist = self.model.fit(xception_x, y, verbose=1, validation_split=0.2, epochs=35,
hist = self.model.fit(inception_x, y, verbose=1, validation_split=0.2, epochs=50,
callbacks=[csv_logger, ckpt, early_stop])
return hist
......@@ -15,7 +15,8 @@ config['tmp_dir'] = './tmp/'
# Path to training, validation and test data folders.
# PS: Note that we have to upload the data to the server!!!
# config['data_dir'] = '/cluster/home/your_username/data/'
config['data_dir'] = './'
# config['data_dir'] = '/cluster/home/okiss/Project/Project/data/'
config['data_dir'] = '/Users/oki/Master/DL_clean/data/'
config['data_dir_server'] = '/cluster/project/infk/zigeng/preprocessed2/'
# Path of root
config['root_dir'] = '.'
......@@ -35,13 +36,14 @@ deepeye: Our method
"""
# Choosing model
config['model'] = 'cnn'
config['model'] = 'deepeye'
config['downsampled'] = False
config['split'] = True
config['split'] = False
config['cluster'] = clustering()
if config['split']:
config['model'] = config['model'] + '_cluster'
config['ensemble'] = 3 #number of models in the ensemble method
config['trainX_file'] = 'noweEEG.mat' if config['downsampled'] else 'all_EEGprocuesan.mat'
config['trainY_file'] = 'all_trialinfoprosan.mat'
......
......@@ -2,19 +2,26 @@ from config import config
import time
from CNN import CNN
from utils import IOHelper
from DeepEye import deepEye, RNNdeep, deepEye2, deepEye3, Xception
from InceptionTime import inception
from EEGNet import eegNet
import numpy as np
import logging
import scipy
import h5py
def main():
logging.basicConfig(filename=config['info_log'], level=logging.INFO)
logging.info('Started the Logging')
start_time = time.time()
# try:
trainX, trainY = IOHelper.get_mat_data(config['data_dir'], verbose=True)
# trainX, trainY = IOHelper.get_mat_data(config['data_dir'], verbose=True)
f = scipy.io.loadmat('trainX.mat')
trainX = f['trainX'].reshape(-1,500,129)[:20,...]
trainY=scipy.io.loadmat('trainY.mat')['trainY'][:20]
print(np.shape(trainX))
if config['model'] == 'cnn' or config['model'] == 'cnn_cluster':
logging.info("Started running CNN. If you want to run other methods please choose another model in the config.py file.")
......@@ -47,7 +54,7 @@ def main():
elif config['model'] == 'xception' or config['model'] == 'xception_cluster':
logging.info("Started running XceptionTime. If you want to run other methods please choose another model in the config.py file.")
Xception.run(trainX=trainX, trainY=trainY)
elif config['model'] == 'deepeye-lstm' or config['model'] == 'deepeye-lstm_cluster':
logging.info("Started running deepeye-lstm. If you want to run other methods please choose another model in the config.py file.")
RNNdeep.run(trainX=trainX, trainY=trainY)
......
Accuracy,Loss
0.5900683403015137,0.6709842085838318
loss,accuracy,val_loss,val_accuracy
0.6709842085838318,0.5900683403015137,0.6837942004203796,0.6404416561126709
best_model_train_loss,best_model_val_loss,best_model_train_acc,best_model_val_acc
0.706407904624939,0.6844089031219482,0.5264661908149719,0.5702930688858032
Accuracy,Loss
0.5264661908149719,0.706407904624939
loss,accuracy,val_loss,val_accuracy
0.706407904624939,0.5264661908149719,0.6844089031219482,0.5702930688858032
loss,accuracy,val_loss,val_accuracy
0.7035536170005798,0.5250993967056274,0.7370455265045166,0.5136612057685852
loss,accuracy,val_loss,val_accuracy
0.7065919041633606,0.5247266292572021,0.7046213150024414,0.508693516254425
File added
File added
......@@ -10,7 +10,7 @@ import os
sns.set_style('darkgrid')
import logging
def plot_acc(hist, output_directory, model,val=False):
def plot_acc(hist, output_directory, model, val=False):
'''
plot the accuracy against the epochs during training
'''
......@@ -21,6 +21,7 @@ def plot_acc(hist, output_directory, model,val=False):
plt.plot(epochs, hist.history['accuracy'],'b-',label='training')
if val:
plt.plot(epochs, hist.history['val_accuracy'],'g-',label='validation')
plt.legend()
plt.xlabel('epochs')
plt.ylabel('Accuracy')
......@@ -29,14 +30,16 @@ def plot_acc(hist, output_directory, model,val=False):
logging.info(10*'*'+'\n')
def plot_loss(hist, output_directory, model,val=False):
def plot_loss(hist, output_directory, model, val=False):
epochs = len(hist.history['accuracy'])
epochs = np.arange(epochs)
plt.figure()
plt.title(model + ' loss')
plt.plot(epochs,hist.history['loss'], 'b-', label='training')
plt.plot(epochs, hist.history['loss'], 'b-', label='training')
if val:
plt.plot(epochs,hist.history['val_loss'], 'g-', label='validation')
plt.plot(epochs, hist.history['val_loss'],'g-',label='validation')
plt.legend()
plt.xlabel('epochs')
plt.ylabel('Binary Cross Entropy')
......@@ -56,7 +59,7 @@ def plot_loss_torch(loss, output_directory, model):
# plt.show()
# Save the logs
# Save the logs
def save_logs(hist, output_directory, model, pytorch=False):
# os.mkdir(output_directory)
if pytorch:
......@@ -70,7 +73,7 @@ def save_logs(hist, output_directory, model, pytorch=False):
try:
hist_df = pd.DataFrame(hist.history)
hist_df.to_csv(output_directory + '/' + model + '_' + 'history.csv', index=False)
#df_metrics = {'Accuracy': hist_df['accuracy'], 'Loss': hist_df['loss']}
#df_metrics = pd.DataFrame(df_metrics)
#df_metrics.to_csv(output_directory + '/' + model + '_' + 'df_metrics.csv', index=False)
......@@ -80,12 +83,12 @@ def save_logs(hist, output_directory, model, pytorch=False):
df_best_model = pd.DataFrame(data=np.zeros((1, 4), dtype=np.float), index=[0],
columns=['best_model_train_loss', 'best_model_val_loss', 'best_model_train_acc', 'best_model_val_acc'])
df_best_model['best_model_train_loss'] = row_best_model['loss']
df_best_model['best_model_val_loss'] = row_best_model['val_loss']
df_best_model['best_model_train_acc'] = row_best_model['accuracy']
df_best_model['best_model_val_acc'] = row_best_model['val_accuracy']
df_best_model.to_csv(output_directory + '/' + model + '_' + 'df_best_model.csv', index=False)
except:
return
......@@ -99,4 +102,4 @@ def save_logs(hist, output_directory, model, pytorch=False):
# else:
# classifier.save(output_directory + '/' + model + '_' + 'model.h5')
# except:
# return
\ No newline at end of file
# return
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment