Commit 45fb2bc7 authored by Martyna Plomecka's avatar Martyna Plomecka
Browse files

Prepared kerastuner

parent bfa14c94
......@@ -7,3 +7,5 @@ all_EEGprocuesan.mat
all_trialinfoprosan.mat
noweEEG.mat
/runs/*
/kerasTunerResults/*
lsf*
......@@ -49,7 +49,7 @@ config['cluster'] = clustering()
if config['split']:
config['model'] = config['model'] + '_cluster'
config['ensemble'] = 5 #number of models in the ensemble method
config['ensemble'] = 1 #number of models in the ensemble method
config['trainX_file'] = 'noweEEG.mat' if config['downsampled'] else 'all_EEGprocuesan.mat'
config['trainY_file'] = 'all_trialinfoprosan.mat'
......
......@@ -16,14 +16,16 @@ def build_model(hp):
logging.info('Starting tuning ' + config['model'])
if config['model'] == 'deepeye':
classifier = Classifier_DEEPEYE(input_shape=config['deepeye']['input_shape'],
epochs=2, verbose=True, batch_size=64, use_residual=True,
kernel_size=hp.Choice('kernel_size', values=[40, 64]),
epochs=15, verbose=True, batch_size=64, use_residual=True,
kernel_size=hp.Choice('kernel_size', values=[32, 40, 64]),
nb_filters=hp.Choice('nb_filters', values=[32, 64]),
depth=hp.Int('depth',min_value=6,max_value=20, step=4),
depth=hp.Int('depth',min_value=6,max_value=14, step=4),
bottleneck_size=hp.Choice('bottleneck_size', values=[32, 64]),
use_simple_convolution= hp.Choice('use_simple_convolution', values=[True, False]),
use_separable_convolution=hp.Choice('use_separable_convolution', values=[True, False]),
use_separable_convolution= hp.Choice('use_separable_convolution', values = [True, False]),
preprocessing=False)
# use_separable_convolution=hp.Choice('use_separable_convolution', values=[True, False]),
# preprocessing_F1 = hp.Choice('preprocessing_F1', values=[8, 16, 32, 64]),
# preprocessing_D = hp.Choice('preprocessing_F1', values=[2, 4, 6, 8]),
# preprocessing_kernLength = hp.Choice('preprocessing_kernlength', values=[64, 125, 250]),
......@@ -44,7 +46,7 @@ def build_model(hp):
elif config['model'] == 'inception':
classifier = Classifier_INCEPTION(input_shape=config['inception']['input_shape'],
epochs=2, verbose=True, batch_size=64,
epochs=15, verbose=True, batch_size=64,
use_residual=hp.Choice('use_residual', values=[True, False]),
kernel_size=hp.Choice('kernel_size', values=[40, 32, 64]),
nb_filters=hp.Choice('nb_filters', values=[16, 32, 64]),
......@@ -54,7 +56,7 @@ def build_model(hp):
elif config['model'] == 'xception':
classifier = Classifier_XCEPTION(input_shape=config['inception']['input_shape'],
epochs=2, verbose=True, batch_size=64,
epochs=15, verbose=True, batch_size=64,
use_residual=hp.Choice('use_residual', values=[True, False]),
kernel_size=hp.Choice('kernel_size', values=[40, 32, 64]),
nb_filters=hp.Choice('nb_filters', values=[16, 32, 64]),
......@@ -73,14 +75,14 @@ def tune(trainX, trainY):
tuner = RandomSearch(
build_model,
objective='val_accuracy',
max_trials=200,
executions_per_trial=2,
directory='my_dir',
project_name='testKerasTuner')
max_trials=32,
executions_per_trial=1,
directory='kerasTunerResults',
project_name='KerasTuner')
print(trainX.shape)
tuner.search_space_summary()
X_train, X_val, y_train, y_val = train_test_split(trainX, trainY, test_size=0.2, random_state=42)
tuner.search(X_train, y_train, epochs=1, validation_data=(X_val, y_val))
tuner.search(X_train, y_train, epochs=15, validation_data=(X_val, y_val), verbose=2)
tuner.results_summary()
......@@ -24,9 +24,9 @@ def main():
trainX = np.transpose(trainX, (0, 2, 1))
logging.info(trainX.shape)
# tune(trainX,trainY)
tune(trainX,trainY)
run(trainX,trainY)
# run(trainX,trainY)
# select_best_model()
# comparison_plot(n_best = 4)
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment