Commit aac28309 authored by Ard Kastrati's avatar Ard Kastrati
Browse files

Added keras tuner

parent bbbffb59
......@@ -101,6 +101,9 @@ class ConvNet(ABC):
model.compile(loss='binary_crossentropy', optimizer=keras.optimizers.Adam(), metrics=['accuracy'])
return model
def get_model(self):
return self.model
def fit(self, x, y):
csv_logger = CSVLogger(config['batches_log'], append=True, separator=';')
early_stop = tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=20)
......@@ -109,5 +112,5 @@ class ConvNet(ABC):
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=42)
pred_ensemble = prediction_history((X_val,y_val))
hist = self.model.fit(X_train, y_train, verbose=1, batch_size=self.batch_size, validation_data=(X_val,y_val),
epochs=self.epochs, callbacks=[csv_logger, ckpt, early_stop,pred_ensemble])
epochs=self.epochs, callbacks=[csv_logger, ckpt, early_stop,pred_ensemble])
return hist, pred_ensemble
......@@ -15,7 +15,7 @@ class Classifier_DEEPEYE(ConvNet):
def __init__(self, input_shape, kernel_size=40, nb_filters=32, verbose=True, batch_size=64, use_residual=True,
depth=6, bottleneck_size=32, preprocessing=True, preprocessing_F1 = 8, preprocessing_D = 2,
preprocessing_kernLength = 250, use_simple_convolution=True, use_separable_convolution=True):
preprocessing_kernLength = 250, use_simple_convolution=True, use_separable_convolution=True, epochs=1):
"""
The DeepEye architecture has the following basic structures. It offers the possibility to do a preprocessing inspired by EEGNet.
It is made of modules of specific depth. Each module is made the inceptionTime submodule, a separable convolution and a simple
......@@ -30,7 +30,7 @@ class Classifier_DEEPEYE(ConvNet):
if preprocessing: input_shape = input_shape + (1,)
super(Classifier_DEEPEYE, self).__init__(input_shape=input_shape, kernel_size=kernel_size, nb_filters=nb_filters,
verbose=verbose, batch_size=batch_size, use_residual=use_residual,
depth=depth, preprocessing=preprocessing)
depth=depth, preprocessing=preprocessing, epochs=epochs)
def _preprocessing(self, input_tensor):
"""
......
......@@ -14,9 +14,9 @@ class Classifier_INCEPTION(ConvNet):
Daniel F. Schmidt, Jonathan Weber, Geoffrey I. Webb, Lhassane Idoumghar, Pierre-Alain Muller, François Petitjean
"""
def __init__(self, input_shape, kernel_size=40, nb_filters=32, verbose=True, batch_size=64, use_residual=True, depth=10, bottleneck_size=32):
def __init__(self, input_shape, kernel_size=40, epochs = 1, nb_filters=32, verbose=True, batch_size=64, use_residual=True, depth=10, bottleneck_size=32):
self.bottleneck_size = bottleneck_size
super(Classifier_INCEPTION, self).__init__(input_shape, kernel_size=kernel_size, nb_filters=nb_filters, verbose=verbose, batch_size=batch_size, use_residual=use_residual, depth=depth)
super(Classifier_INCEPTION, self).__init__(input_shape, kernel_size=kernel_size, epochs=epochs, nb_filters=nb_filters, verbose=verbose, batch_size=batch_size, use_residual=use_residual, depth=depth)
def _module(self, input_tensor, current_depth):
"""
......
......@@ -11,9 +11,9 @@ class Classifier_XCEPTION(ConvNet):
as separable convolutions and can achieve better accuracy then the Inception architecture. It is made of modules in a specific depth.
Each module, in our implementation, consists of a separable convolution followed by batch normalization and a ReLu activation layer.
"""
def __init__(self, input_shape, kernel_size=40, nb_filters=128, verbose=True, batch_size=64, use_residual=True, depth=6):
def __init__(self, input_shape, kernel_size=40, nb_filters=128, verbose=True, epochs=1, batch_size=64, use_residual=True, depth=6):
super(Classifier_XCEPTION, self).__init__(input_shape, kernel_size=kernel_size, nb_filters=nb_filters,
verbose=verbose, batch_size=batch_size, use_residual=use_residual, depth=depth,
verbose=verbose, epochs=epochs, batch_size=batch_size, use_residual=use_residual, depth=depth,
preprocessing=False)
def _module(self, input_tensor, current_depth):
......
from tensorflow import keras
from tensorflow.keras import layers
from kerastuner.tuners import RandomSearch
from sklearn.model_selection import train_test_split
from config import config
import logging
from CNN.CNN import Classifier_CNN
from DeepEye.deepeye import Classifier_DEEPEYE
from DeepEyeRNN.deepeyeRNN import Classifier_DEEPEYE_RNN
from Xception.xception import Classifier_XCEPTION
from InceptionTime.inception import Classifier_INCEPTION
from EEGNet.eegNet import Classifier_EEGNet
def build_model(hp):
model = keras.Sequential()
model.add(layers.Dense(units=hp.Int('units',
min_value=32,
max_value=512,
step=32),
activation='relu'))
model.add(layers.Dense(10, activation='softmax'))
model.compile(
optimizer=keras.optimizers.Adam(
hp.Choice('learning_rate',
values=[1e-2, 1e-3, 1e-4])),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
return model
tuner = RandomSearch(
build_model,
objective='val_accuracy',
max_trials=5,
executions_per_trial=3,
directory='my_dir',
project_name='helloworld')
tuner.search_space_summary()
\ No newline at end of file
classifier = None
logging.info('Starting tuning ' + config['model'])
if config['model'] == 'deepeye':
classifier = Classifier_DEEPEYE(input_shape=config['deepeye']['input_shape'],
epochs=2, verbose=True, batch_size=64, use_residual=True,
kernel_size=hp.Choice('kernel_size', values=[40, 64]),
nb_filters=hp.Choice('nb_filters', values=[32, 64]),
depth=hp.Int('depth',min_value=6,max_value=20, step=4),
bottleneck_size=hp.Choice('bottleneck_size', values=[32, 64]),
use_simple_convolution= hp.Choice('use_simple_convolution', values=[True, False]),
use_separable_convolution=hp.Choice('use_separable_convolution', values=[True, False]),
preprocessing=False)
# preprocessing_F1 = hp.Choice('preprocessing_F1', values=[8, 16, 32, 64]),
# preprocessing_D = hp.Choice('preprocessing_F1', values=[2, 4, 6, 8]),
# preprocessing_kernLength = hp.Choice('preprocessing_kernlength', values=[64, 125, 250]),
elif config['model'] == 'cnn':
classifier = Classifier_CNN(input_shape=config['cnn']['input_shape'],
epochs=2, verbose=True, batch_size=64,
use_residual=hp.Choice('use_residual', values=[True, False]),
kernel_size=hp.Choice('kernel_size', values=[40, 32, 64]),
nb_filters=hp.Choice('nb_filters', values=[16, 32, 64]),
depth=hp.Int('depth', min_value=6, max_value=20, step=3),
preprocessing=False
)
elif config['model'] == 'eegnet':
classifier = Classifier_EEGNet(dropoutRate = 0.5, kernLength = 250, F1 = 16,
D = 4, F2 = 256, norm_rate = 0.5, dropoutType = 'Dropout',
epochs = 50)
elif config['model'] == 'inception':
classifier = Classifier_INCEPTION(input_shape=config['inception']['input_shape'],
epochs=2, verbose=True, batch_size=64,
use_residual=hp.Choice('use_residual', values=[True, False]),
kernel_size=hp.Choice('kernel_size', values=[40, 32, 64]),
nb_filters=hp.Choice('nb_filters', values=[16, 32, 64]),
depth=hp.Int('depth', min_value=6, max_value=20, step=3),
bottleneck_size=hp.Choice('bottleneck_size', values=[16, 32, 64])
)
elif config['model'] == 'xception':
classifier = Classifier_XCEPTION(input_shape=config['inception']['input_shape'],
epochs=2, verbose=True, batch_size=64,
use_residual=hp.Choice('use_residual', values=[True, False]),
kernel_size=hp.Choice('kernel_size', values=[40, 32, 64]),
nb_filters=hp.Choice('nb_filters', values=[16, 32, 64]),
depth=hp.Int('depth', min_value=6, max_value=20, step=3)
)
elif config['model'] == 'deepeye-rnn':
classifier = Classifier_DEEPEYE_RNN(input_shape=config['deepeye-rnn']['input_shape'])
else:
logging.info('Cannot start the program. Please choose one model in the config.py file')
return classifier.get_model()
def tune(trainX, trainY):
tuner = RandomSearch(
build_model,
objective='val_accuracy',
max_trials=5,
executions_per_trial=3,
directory='my_dir',
project_name='testKerasTuner')
print(trainX.shape)
tuner.search_space_summary()
X_train, X_val, y_train, y_val = train_test_split(trainX, trainY, test_size=0.2, random_state=42)
tuner.search(X_train, y_train, epochs=1, validation_data=(X_val, y_val))
tuner.results_summary()
......@@ -7,6 +7,10 @@ import h5py
import logging
import time
from kerasTuner import tune
from utils import IOHelper
def main():
logging.basicConfig(filename=config['info_log'], level=logging.INFO)
logging.info('Started the Logging')
......@@ -17,7 +21,9 @@ def main():
if config['model'] == 'eegnet' or config['model'] == 'eegnet_cluster':
trainX = np.transpose(trainX, (0, 2, 1))
logging.info(trainX.shape)
run(trainX,trainY)
# run(trainX,trainY)
tune(trainX,trainY)
logging.info("--- Runtime: %s seconds ---" % (time.time() - start_time))
logging.info('Finished Logging')
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment