Commit bfa14c94 authored by Ard Kastrati's avatar Ard Kastrati
Browse files

Logging also the configuration parameters

parent 067b5b8e
......@@ -5,6 +5,7 @@ import tensorflow as tf
import tensorflow.keras as keras
from config import config
from keras.callbacks import CSVLogger
import logging
class prediction_history(tf.keras.callbacks.Callback):
def __init__(self, val_data):
......@@ -31,6 +32,13 @@ class ConvNet(ABC):
self.preprocessing = preprocessing
self.input_shape = input_shape
self.epochs = epochs
logging.info('Parameters: ')
logging.info('--------------- use residual : ' + str(self.use_residual))
logging.info('--------------- depth : ' + str(self.depth))
logging.info('--------------- batch size : ' + str(self.batch_size))
logging.info('--------------- kernel size : ' + str(self.kernel_size))
logging.info('--------------- nb filters : ' + str(self.nb_filters))
logging.info('--------------- preprocessing: ' + str(self.preprocessing))
if config['split']:
self.model = self._split_model()
......
......@@ -32,6 +32,13 @@ class Classifier_DEEPEYE(ConvNet):
verbose=verbose, batch_size=batch_size, use_residual=use_residual,
depth=depth, preprocessing=preprocessing, epochs=epochs)
if preprocessing: logging.info('--------------- preprocessing_F1 : ' + str(self.preprocessing_F1))
if preprocessing: logging.info('--------------- preprocessing_D : ' + str(self.preprocessing_D))
if preprocessing: logging.info('--------------- preprocessing_kernLength : ' + str(self.preprocessing_kernLength))
logging.info('--------------- bottleneck_size : ' + str(self.bottleneck_size))
logging.info('--------------- use_simple_convolution : ' + str(self.use_simple_convolution))
logging.info('--------------- use_separable_convolution : ' + str(self.use_separable_convolution))
def _preprocessing(self, input_tensor):
"""
This is the implementation of preprocessing for deepeye. It is inpired by EEGNet which offers a way to filter the signal
......
......@@ -53,6 +53,15 @@ class Classifier_EEGNet:
self.dropoutType = dropoutType
self.epochs = epochs
self.verbose = verbose
logging.info('Parameters...')
logging.info('--------------- chans : ' + str(self.chans))
logging.info('--------------- samples : ' + str(self.samples))
logging.info('--------------- dropoutRate : ' + str(self.dropoutRate))
logging.info('--------------- kernLength : ' + str(self.kernLength))
logging.info('--------------- F1 : ' + str(self.F1))
logging.info('--------------- D : ' + str(self.D))
logging.info('--------------- F2 : ' + str(self.F2))
logging.info('--------------- norm_rate : ' + str(self.norm_rate))
if build:
if config['split']:
......
......@@ -17,6 +17,7 @@ class Classifier_INCEPTION(ConvNet):
def __init__(self, input_shape, kernel_size=40, epochs = 1, nb_filters=32, verbose=True, batch_size=64, use_residual=True, depth=10, bottleneck_size=32):
self.bottleneck_size = bottleneck_size
super(Classifier_INCEPTION, self).__init__(input_shape, kernel_size=kernel_size, epochs=epochs, nb_filters=nb_filters, verbose=verbose, batch_size=batch_size, use_residual=use_residual, depth=depth)
logging.info('--------------- bottleneck_size : ' + str(self.bottleneck_size))
def _module(self, input_tensor, current_depth):
"""
......
......@@ -49,7 +49,7 @@ config['cluster'] = clustering()
if config['split']:
config['model'] = config['model'] + '_cluster'
config['ensemble'] = 9 #number of models in the ensemble method
config['ensemble'] = 5 #number of models in the ensemble method
config['trainX_file'] = 'noweEEG.mat' if config['downsampled'] else 'all_EEGprocuesan.mat'
config['trainY_file'] = 'all_trialinfoprosan.mat'
......
......@@ -73,8 +73,8 @@ def tune(trainX, trainY):
tuner = RandomSearch(
build_model,
objective='val_accuracy',
max_trials=5,
executions_per_trial=3,
max_trials=200,
executions_per_trial=2,
directory='my_dir',
project_name='testKerasTuner')
......
......@@ -24,9 +24,9 @@ def main():
trainX = np.transpose(trainX, (0, 2, 1))
logging.info(trainX.shape)
tune(trainX,trainY)
# tune(trainX,trainY)
# run(trainX,trainY)
run(trainX,trainY)
# select_best_model()
# comparison_plot(n_best = 4)
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment