Commit 6529c1ed authored by okiss's avatar okiss
Browse files

ensemble clean

parent eb6d6f28
......@@ -5,16 +5,6 @@ import logging
from ConvNet import ConvNet
from tensorflow.keras.constraints import max_norm
def run(trainX, trainY):
"""
Starts the CNN and stores the histogram, the plots of loss and accuracy.
"""
logging.info("Starting CNN.")
classifier = Classifier_CNN(input_shape=config['cnn']['input_shape'])
hist = classifier.fit(trainX, trainY)
plot_loss(hist, config['model_dir'], config['model'], True)
plot_acc(hist, config['model_dir'], config['model'], True)
save_logs(hist, config['model_dir'], config['model'], pytorch=False)
class Classifier_CNN(ConvNet):
"""
......@@ -30,4 +20,4 @@ class Classifier_CNN(ConvNet):
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Activation(activation='relu')(x)
x = tf.keras.layers.MaxPool1D()(x)
return x
\ No newline at end of file
return x
......@@ -19,9 +19,8 @@ class prediction_history(tf.keras.callbacks.Callback):
self.predhis.append(prediction)
class ConvNet(ABC):
def __init__(self, input_shape, kernel_size=32, nb_filters=32, verbose=True, batch_size=64, use_residual=False, depth=6, preprocessing = False):
def __init__(self, input_shape, kernel_size=32, nb_filters=32, verbose=True, batch_size=64, use_residual=False, depth=6, epochs=2, preprocessing = False):
self.use_residual = use_residual
self.depth = depth
self.callbacks = None
......@@ -31,6 +30,7 @@ class ConvNet(ABC):
self.nb_filters = nb_filters
self.preprocessing = preprocessing
self.input_shape = input_shape
self.epochs = epochs
if config['split']:
self.model = self._split_model()
......@@ -109,5 +109,5 @@ class ConvNet(ABC):
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=42)
pred_ensemble = prediction_history((X_val,y_val))
hist = self.model.fit(X_train, y_train, verbose=1, batch_size=self.batch_size, validation_data=(X_val,y_val),
epochs=2, callbacks=[csv_logger, ckpt, early_stop,pred_ensemble])
epochs=self.epochs, callbacks=[csv_logger, ckpt, early_stop,pred_ensemble])
return hist, pred_ensemble
......@@ -6,41 +6,6 @@ from ConvNet import ConvNet
from tensorflow.keras.constraints import max_norm
def run(trainX, trainY):
"""
Starts the DeepEye and stores the histogram, the plots of loss and accuracy.
"""
logging.info("Starting DeepEye.")
acc = tf.keras.metrics.BinaryAccuracy()
bce = tf.keras.losses.BinaryCrossentropy()
if config['ensemble']>1:
config['model']+='_ensemble'
loss=[]
accuracy=[]
for i in range(config['ensemble']):
print('begin model number {}'.format(i))
classifier = Classifier_DEEPEYE(input_shape=config['deepeye']['input_shape'])
hist, pred_ensemble = classifier.fit(trainX,trainY)
if i == 0:
pred = pred_ensemble.predhis
else:
for j, pred_epoch in enumerate(pred_ensemble.predhis):
pred[j] = (np.array(pred[j])+np.array(pred_epoch))
for j, pred_epoch in enumerate(pred):
pred_epoch = (pred_epoch/config['ensemble']).tolist()
loss.append(bce(pred_ensemble.targets[j],pred_epoch).numpy())
pred_epoch = np.round(pred_epoch,0)
accuracy.append(np.mean((np.array(pred_epoch).reshape(-1)+np.array(pred_ensemble.targets[j]).reshape(-1)-1)**2))
hist.history['val_loss'] = loss
hist.history['val_accuracy'] = accuracy
plot_loss(hist, config['model_dir'], config['model'],val = True)
plot_acc(hist, config['model_dir'], config['model'], val = True)
save_logs(hist, config['model_dir'], config['model'], pytorch = False)
#save_model_param(classifier.model, config['model_dir'], config['model'], pytorch=False)
class Classifier_DEEPEYE(ConvNet):
"""
The Classifier_DeepEye is the architecture that combines many ideas from InceptionTime, Xception ana EEGNet.
......
......@@ -5,16 +5,6 @@ import logging
from ConvNet import ConvNet
from tensorflow.keras.constraints import max_norm
def run(trainX, trainY):
"""
Starts the DeepEye-RNN and stores the histogram, the plots of loss and accuracy.
"""
logging.info("Starting DeepEye with RNN.")
classifier = Classifier_DEEPEYE_RNN(input_shape=config['deepeye-rnn']['input_shape'])
hist = classifier.fit(trainX, trainY)
plot_loss(hist, config['model_dir'], config['model'], True)
plot_acc(hist, config['model_dir'], config['model'], True)
save_logs(hist, config['model_dir'], config['model'], pytorch=False)
class Classifier_DEEPEYE_RNN(ConvNet):
"""
......@@ -69,4 +59,4 @@ class Classifier_DEEPEYE_RNN(ConvNet):
x = tf.keras.layers.Concatenate(axis=2)(conv_list)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Activation(activation='relu')(x)
return x
\ No newline at end of file
return x
......@@ -15,16 +15,19 @@ from keras.callbacks import CSVLogger
import numpy as np
import logging
def run(trainX, trainY):
"""
Starts the EEGNet and stores the histogram, the plots of loss and accuracy.
"""
classifier = Classifier_EEGNet()
hist = classifier.fit(trainX, trainY)
plot_loss(hist, config['model_dir'], config['model'], True)
plot_acc(hist, config['model_dir'], config['model'], True)
# Newly added lines below
save_logs(hist, config['model_dir'], config['model'], pytorch=False)
from sklearn.model_selection import train_test_split
class prediction_history(tf.keras.callbacks.Callback):
def __init__(self, val_data):
self.val_data = val_data
self.predhis = []
self.targets = []
def on_batch_end(self, epoch, logs={}):
x_val, y_val = self.val_data
self.targets.append(y_val)
prediction = self.model.predict(x_val)
self.predhis.append(prediction)
class Classifier_EEGNet:
"""
......@@ -36,7 +39,7 @@ class Classifier_EEGNet:
def __init__(self, nb_classes=1, chans = config['eegnet']['channels'],
samples = config['eegnet']['samples'], dropoutRate = 0.5, kernLength = 250, F1 = 16,
D = 4, F2 = 256, norm_rate = 0.5, dropoutType = 'Dropout', verbose = True, build = True, X = None):
D = 4, F2 = 256, norm_rate = 0.5, dropoutType = 'Dropout', epochs = 50, verbose = True, build = True, X = None):
self.nb_classes = nb_classes
self.chans = chans
......@@ -48,6 +51,7 @@ class Classifier_EEGNet:
self.F2 = F2
self.norm_rate = norm_rate
self.dropoutType = dropoutType
self.epochs = epochs
self.verbose = verbose
if build:
......@@ -135,5 +139,9 @@ class Classifier_EEGNet:
early_stop = tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=20)
ckpt_dir = config['model_dir'] + '/' + config['model'] + '_' + 'best_model.h5'
ckpt = tf.keras.callbacks.ModelCheckpoint(ckpt_dir, verbose=1, monitor='val_accuracy', save_best_only=True, mode='auto')
hist = self.model.fit(eegnet_x, y, verbose=1, validation_split=0.2, epochs=50, callbacks=[csv_logger, ckpt, early_stop])
return hist
X_train, X_val, y_train, y_val = train_test_split(eegnet_x, y, test_size=0.2, random_state=42)
pred_ensemble = prediction_history((X_val,y_val))
hist = self.model.fit(X_train, y_train, verbose=1, validation_data=(X_val,y_val),
epochs=self.epochs, callbacks=[csv_logger, ckpt, early_stop,pred_ensemble])
return hist, pred_ensemble
......@@ -5,19 +5,6 @@ import logging
from ConvNet import ConvNet
def run(trainX, trainY):
"""
Starts the InceptionTime and stores the histogram, the plots of loss and accuracy.
"""
logging.info("Starting InceptionTime.")
classifier = Classifier_INCEPTION(input_shape=config['inception']['input_shape'])
hist = classifier.fit(trainX, trainY)
plot_loss(hist, config['model_dir'], config['model'], True)
plot_acc(hist, config['model_dir'], config['model'], True)
save_logs(hist, config['model_dir'], config['model'], pytorch=False)
class Classifier_INCEPTION(ConvNet):
"""
The InceptionTime architecture used as baseline. This is the architecture explained in the paper
......
......@@ -4,17 +4,6 @@ from utils.utils import *
import logging
from ConvNet import ConvNet
def run(trainX, trainY):
"""
Starts the Xception and stores the histogram, the plots of loss and accuracy.
"""
logging.info("Starting InceptionTime.")
classifier = Classifier_XCEPTION(input_shape=config['inception']['input_shape'])
hist = classifier.fit(trainX, trainY)
plot_loss(hist, config['model_dir'], config['model'], True)
plot_acc(hist, config['model_dir'], config['model'], True)
save_logs(hist, config['model_dir'], config['model'], pytorch=False)
class Classifier_XCEPTION(ConvNet):
"""
......
......@@ -49,7 +49,7 @@ config['cluster'] = clustering()
if config['split']:
config['model'] = config['model'] + '_cluster'
config['ensemble'] = 3 #number of models in the ensemble method
config['ensemble'] = 9 #number of models in the ensemble method
config['trainX_file'] = 'noweEEG.mat' if config['downsampled'] else 'all_EEGprocuesan.mat'
config['trainY_file'] = 'all_trialinfoprosan.mat'
......@@ -81,7 +81,8 @@ config['eegnet']['samples'] = 125 if config['downsampled'] else 500
# Create a unique output directory for this experiment.
timestamp = str(int(time.time()))
model_folder_name = timestamp if config['model'] == '' else timestamp + "_" + config['model']
if config['split']:
model_folder_name += '_cluster'
if config['downsampled']:
model_folder_name += '_downsampled'
if config['ensemble']>1:
......
import tensorflow as tf
from config import config
from utils.utils import *
import logging
from CNN.CNN import Classifier_CNN
from DeepEye.deepeye import Classifier_DEEPEYE
from DeepEyeRNN.deepeyeRNN import Classifier_DEEPEYE_RNN
from Xception.xception import Classifier_XCEPTION
from InceptionTime.inception import Classifier_INCEPTION
from EEGNet.eegNet import Classifier_EEGNet
import numpy as np
def run(trainX, trainY):
"""
Starts the multiples Classifier in the Ensemble and stores the histogram, the plots of loss and accuracy.
validation is of the ensemble model and training just the last one
"""
logging.info("Started running "+config['model']+". If you want to run other methods please choose another model in the config.py file.")
# acc = tf.keras.metrics.BinaryAccuracy()
bce = tf.keras.losses.BinaryCrossentropy()
loss=[]
accuracy=[]
for i in range(config['ensemble']):
print('beginning model number {}/{} ...'.format(i,config['ensemble']))
if config['model'] == 'deepeye':
classifier = Classifier_DEEPEYE(input_shape = config['deepeye']['input_shape'])
elif config['model'] == 'cnn':
classifier = Classifier_CNN(input_shape = config['cnn']['input_shape'])
elif config['model'] == 'eegnet':
classifier = Classifier_EEGNet()
elif config['model'] == 'inception':
classifier = Classifier_INCEPTION(input_shape=config['inception']['input_shape'])
elif config['model'] == 'xception' :
classifier = Classifier_XCEPTION(input_shape=config['inception']['input_shape'])
elif config['model'] == 'deepeye-rnn':
classifier = Classifier_DEEPEYE_RNN(input_shape=config['deepeye-rnn']['input_shape'])
else:
logging.info('Cannot start the program. Please choose one model in the config.py file')
hist, pred_ensemble = classifier.fit(trainX,trainY)
if i == 0:
pred = pred_ensemble.predhis
else:
for j, pred_epoch in enumerate(pred_ensemble.predhis):
pred[j] = (np.array(pred[j])+np.array(pred_epoch))
for j, pred_epoch in enumerate(pred):
pred_epoch = (pred_epoch/config['ensemble']).tolist()
loss.append(bce(pred_ensemble.targets[j],pred_epoch).numpy())
pred_epoch = np.round(pred_epoch,0)
accuracy.append(np.mean((np.array(pred_epoch).reshape(-1)+np.array(pred_ensemble.targets[j]).reshape(-1)-1)**2))
if config['ensemble']>1:
config['model']+='_ensemble'
if config['split']:
config['model'] = config['model'] + '_cluster'
hist.history['val_loss'] = loss
hist.history['val_accuracy'] = accuracy
plot_loss(hist, config['model_dir'], config['model'], val = True)
plot_acc(hist, config['model_dir'], config['model'], val = True)
save_logs(hist, config['model_dir'], config['model'], pytorch = False)
from config import config
import time
from CNN import CNN
from utils import IOHelper
from DeepEye import deepeye
from DeepEyeRNN import deepeyeRNN
from Xception import xception
from InceptionTime import inception
from EEGNet import eegNet
from ensemble import run
import numpy as np
import logging
import scipy
from scipy import io
import h5py
import logging
import time
def main():
logging.basicConfig(filename=config['info_log'], level=logging.INFO)
logging.info('Started the Logging')
start_time = time.time()
# try:
trainX, trainY = IOHelper.get_mat_data(config['data_dir'], verbose=True)
# trainX, trainY = IOHelper.get_mat_data(config['data_dir'], verbose=True)
f = io.loadmat('trainX.mat')
trainX = f['trainX'].reshape(-1,500,129)[:20,...]
trainY=io.loadmat('trainY.mat')['trainY'][:20]
if config['model'] == 'eegnet' or config['model'] == 'eegnet_cluster':
trainX = np.transpose(trainX, (0, 2, 1))
logging.info(trainX.shape)
run(trainX,trainY)
'''
if config['model'] == 'cnn' or config['model'] == 'cnn_cluster':
logging.info("Started running CNN. If you want to run other methods please choose another model in the config.py file.")
CNN.run(trainX, trainY)
......@@ -49,7 +51,7 @@ def main():
else:
logging.info('Cannot start the program. Please choose one model in the config.py file')
'''
logging.info("--- Runtime: %s seconds ---" % (time.time() - start_time))
logging.info('Finished Logging')
......
File added
File added
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment