Commit 38d229d1 authored by Lukas Wolf's avatar Lukas Wolf
Browse files

removed trash

parent 0e074dce
......@@ -23,4 +23,7 @@ retrain_model.ipynb
conv_analysis.ipynb
./scripts/*
./src/*
./archive_code/*
\ No newline at end of file
./archive_code/*
./images/*
./scripts/*
./src/*
from abc import ABC, abstractmethod
from sklearn.model_selection import train_test_split
import tensorflow as tf
import tensorflow.keras as keras
from config import config
from tensorflow.keras.callbacks import CSVLogger
import logging
from utils.analysis import sanity_check
class prediction_history(tf.keras.callbacks.Callback):
def __init__(self,validation_data):
"""
Predhis is list of arrays, one for each epoch, that contains the predicted values. These can be added and divided by the number of ensemble
"""
self.validation_data = validation_data
self.predhis = []
self.targets = validation_data[1]
def on_epoch_end(self, epoch, logs={}):
"""
Each epoch add an array of predictions to predhis
"""
y_pred = self.model.predict(self.validation_data[0])
self.predhis.append(y_pred)
class Regression_ConvNet(ABC):
def __init__(self, input_shape, kernel_size=32, nb_filters=32, verbose=True, batch_size=64, use_residual=False, depth=6,
learning_rate=0.001, epochs=2, preprocessing = False):
self.use_residual = use_residual
self.depth = depth
self.callbacks = None
self.batch_size = config['batch_size']
self.verbose = verbose
self.kernel_size = kernel_size
self.nb_filters = nb_filters
self.preprocessing = preprocessing
self.input_shape = input_shape
self.learning_rate=learning_rate
self.epochs = epochs
logging.info('Parameters: ')
logging.info('--------------- use residual : ' + str(self.use_residual))
logging.info('--------------- depth : ' + str(self.depth))
logging.info('--------------- batch size : ' + str(self.batch_size))
logging.info('--------------- kernel size : ' + str(self.kernel_size))
logging.info('--------------- nb filters : ' + str(self.nb_filters))
logging.info('--------------- preprocessing: ' + str(self.preprocessing))
if config['split']:
self.model = self._split_model()
else:
self.model = self._build_model()
self.model.compile(loss=config['loss'], optimizer=keras.optimizers.Adam(learning_rate=learning_rate))
if self.verbose:
self.model.summary()
def _split_model(self):
input_layer = keras.layers.Input(self.input_shape)
output = []
for c in config['cluster'].keys():
a = [self.input_shape[0]]
a.append(len(config['cluster'][c]))
input_shape = tuple(a)
output.append(self._build_model(X=tf.transpose(tf.nn.embedding_lookup(tf.transpose(input_layer), config['cluster'][c]))))
# append the results and perform 1 dense layer with last_channel dimension and the output layer
x = tf.keras.layers.Concatenate()(output)
dense = tf.keras.layers.Dense(32, activation='relu')(x)
output_layer = tf.keras.layers.Dense(1, activation='sigmoid')(dense)
model = tf.keras.models.Model(inputs=input_layer, outputs=output_layer)
return model
# abstract method
def _preprocessing(self, input_tensor):
pass
# abstract method
def _module(self, input_tensor, current_depth):
pass
def _shortcut_layer(self, input_tensor, out_tensor):
shortcut_y = tf.keras.layers.Conv1D(filters=int(out_tensor.shape[-1]), kernel_size=1,
padding='same', use_bias=False)(input_tensor)
shortcut_y = tf.keras.layers.BatchNormalization()(shortcut_y)
x = keras.layers.Add()([shortcut_y, out_tensor])
x = keras.layers.Activation('relu')(x)
return x
def _build_model(self, X=[]):
if config['split']:
input_layer = X
else:
input_layer = tf.keras.layers.Input(self.input_shape)
if self.preprocessing:
preprocessed = self._preprocessing(input_layer)
x = preprocessed
input_res = preprocessed
else:
x = input_layer
input_res = input_layer # for the residual connection
for d in range(self.depth):
x = self._module(x, d)
if self.use_residual and d % 3 == 2:
x = self._shortcut_layer(input_res, x)
input_res = x
gap_layer = tf.keras.layers.GlobalAveragePooling1D()(x)
if config['split']:
return gap_layer
# Optional: add some more dense layers here
#gap_layer = tf.keras.layers.Dense(300)(gap_layer)
#gap_layer = tf.keras.layers.Dense(50)(gap_layer)
# Add dropout for regularization
#gap_layer = keras.layers.Dropout(0.5)(gap_layer)
if config['data_mode'] == "fix_sacc_fix":
output_layer = tf.keras.layers.Dense(1, activation='linear')(gap_layer) # only predict the angle in this task
else:
output_layer = tf.keras.layers.Dense(2, activation='linear')(gap_layer) # linear activation for the 2D regression task
model = tf.keras.models.Model(inputs=input_layer, outputs=output_layer)
return model
def get_model(self):
return self.model
def fit(self, x, y, verbose=2):
# Split data
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=42)
# Define callbacks
csv_logger = CSVLogger(config['batches_log'], append=True, separator=';')
early_stop = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=20)
prediction_ensemble = prediction_history((X_val,y_val))
ckpt_dir = config['model_dir'] + '/' + config['model'] + '_' + 'best_model.h5'
ckpt = tf.keras.callbacks.ModelCheckpoint(ckpt_dir, verbose=1, monitor='val_loss', save_best_only=True, mode='auto')
# Fit model
hist = self.model.fit(X_train, y_train, verbose=verbose, batch_size=self.batch_size, validation_data=(X_val,y_val),
epochs=self.epochs, callbacks=[csv_logger, ckpt, prediction_ensemble])
# Log how good predictions in x and y directions are
if config['sanity_check'] and not config['data_mode'] == 'fix_sacc_fix':
x_mean_err, y_mean_err = sanity_check(self.model, X_val, y_val)
logging.info("x mean coordinate error: {:.2f}, y mean coordinate error: {:.2f}".format(x_mean_err, y_mean_err))
return hist , prediction_ensemble
\ No newline at end of file
import tensorflow as tf
import numpy as np
from config import config
from utils.utils import *
from utils.plot import plot_model
from utils.plot import plot_filters
import logging
from CNN.CNN import Classifier_CNN
from PyramidalCNN.PyramidalCNN import Classifier_PyramidalCNN
from DeepEye.deepeye import Classifier_DEEPEYE
from DeepEyeRNN.deepeyeRNN import Classifier_DEEPEYE_RNN
from Xception.Xception import Classifier_XCEPTION
from InceptionTime.Inception import Classifier_INCEPTION
from EEGNet.eegNet import Classifier_EEGNet
def run(trainX, trainY):
"""
Starts the multiples Classifier in the Ensemble and stores the histogram, the plots of loss and accuracy.
validation is of the ensemble model and training just the last one
"""
logging.info("Started running "+config['model']+". If you want to run other methods please choose another model in the config.py file.")
# acc = tf.keras.metrics.BinaryAccuracy()
bce = tf.keras.losses.BinaryCrossentropy()
loss=[]
accuracy=[]
for i in range(config['ensemble']):
print('beginning model number {}/{} ...'.format(i,config['ensemble']))
if config['model'] == 'deepeye':
classifier = Classifier_DEEPEYE(input_shape=config['deepeye']['input_shape'])
elif config['model'] == 'cnn':
classifier = Classifier_CNN(input_shape=config['cnn']['input_shape'], kernel_size=64, epochs = 50,
nb_filters=16, verbose=True, batch_size=64, use_residual=True, depth=12)
elif config['model'] == 'pyramidal_cnn':
classifier = Classifier_PyramidalCNN(input_shape=config['cnn']['input_shape'], epochs=50)
elif config['model'] == 'eegnet':
classifier = Classifier_EEGNet(dropoutRate = 0.5, kernLength = 64, F1 = 32,
D = 8, F2 = 512, norm_rate = 0.5, dropoutType = 'Dropout',
epochs = 50)
elif config['model'] == 'inception':
classifier = Classifier_INCEPTION(input_shape=config['inception']['input_shape'], use_residual=True,
kernel_size=64, nb_filters=16, depth=12, bottleneck_size=16, epochs=50)
elif config['model'] == 'xception':
classifier = Classifier_XCEPTION(input_shape=config['inception']['input_shape'], use_residual=True,
kernel_size=40, nb_filters=64, depth=18, epochs=50)
elif config['model'] == 'deepeye-rnn':
classifier = Classifier_DEEPEYE_RNN(input_shape=config['deepeye-rnn']['input_shape'])
else:
logging.info('Cannot start the program. Please choose one model in the config.py file')
hist, pred_ensemble = classifier.fit(trainX,trainY)
if i == 0:
pred = pred_ensemble.predhis
else:
for j, pred_epoch in enumerate(pred_ensemble.predhis):
pred[j] = (np.array(pred[j])+np.array(pred_epoch))
# Plot the model as a graph with keras
if config['plot_model'] and i == 0: # only for the first model
plot_model(classifier.model)
# Plot the patterns that the filters look for
if config['plot_filters'] and i == 0:
plot_filters(classifier.model, config['model_dir'])
for j, pred_epoch in enumerate(pred):
pred_epoch = (pred_epoch/config['ensemble']).tolist()
loss.append(bce(pred_ensemble.targets,pred_epoch).numpy())
pred_epoch = np.round(pred_epoch,0)
accuracy.append(np.mean((np.array(pred_epoch).reshape(-1)+np.array(pred_ensemble.targets).reshape(-1)-1)**2))
if config['ensemble']>1:
config['model']+='_ensemble'
if config['split']:
config['model'] = config['model'] + '_cluster'
hist.history['val_loss'] = loss
hist.history['val_accuracy'] = accuracy
plot_loss(hist, config['model_dir'], config['model'], val = True)
plot_acc(hist, config['model_dir'], config['model'], val = True)
save_logs(hist, config['model_dir'], config['model'], pytorch = False)
import tensorflow as tf
from config import config
from utils.utils import *
from utils.losses import angle_loss
from utils.plot import plot_model
from utils.plot import plot_filters
import logging
from CNN.Regression_CNN import Regression_CNN
from InceptionTime.Regression_inception import Regression_INCEPTION
from Xception.Regression_xception import Regression_XCEPTION
from DeepEye.Regression_deepeye import Regression_DEEPEYE
from PyramidalCNN.Regression_PyramidalCNN import Regression_PyramidalCNN
from Siamese.Siamese import Siamese_ConvNet
from Pretrained.Pretrained import Pretrained_Model
#TODO: rewrite the other classes
#from DeepEyeRNN.deepeyeRNN import Classifier_DEEPEYE_RNN
#from EEGNet.eegNet import Classifier_EEGNet
import numpy as np
def run(trainX, trainY):
"""
Starts the multiples Regressors in the Ensemble and stores the histogram, the plots of loss and accuracy.
validation is of the ensemble model and training just the last one
"""
logging.info("Started running " + config['model'] + ". If you want to run other methods please choose another model in the config.py file.")
# Metrics
mse = tf.keras.losses.MeanSquaredError()
hist = None
reg = None
loss = []
for i in range(config['ensemble']):
print('Beginning model number {}/{} ...'.format(i+1, config['ensemble']))
if config['pretrained']:
reg = Pretrained_Model(input_shape=config['inception']['input_shape'], batch_size=config['batch_size'],
epochs=config['epochs'], learning_rate=config['learning_rate'])
elif config['model'] == 'cnn':
reg = Regression_CNN(input_shape=config['cnn']['input_shape'], kernel_size=64, epochs = config['epochs'],
nb_filters=16, verbose=True, batch_size=config['batch_size'], use_residual=True, depth=10,
learning_rate=config['learning_rate'])
elif config['model'] == 'inception':
reg = Regression_INCEPTION(input_shape=config['inception']['input_shape'], use_residual=True, batch_size=config['batch_size'],
kernel_size=64, nb_filters=64, depth=12, bottleneck_size=32, epochs=config['epochs'],
learning_rate=config['learning_rate'], regularization=config['regularization'])
elif config['model'] == 'xception':
reg = Regression_XCEPTION(input_shape=config['inception']['input_shape'], use_residual=True,
kernel_size=40, nb_filters=64, depth=18, epochs=config['epochs'], batch_size=config['batch_size'],
learning_rate=config['learning_rate'], regularization=config['regularization'])
elif config['model'] == 'deepeye':
reg = Regression_DEEPEYE(input_shape=config['deepeye']['input_shape'], use_residual=True, batch_size=config['batch_size'],
kernel_size=64, nb_filters=32, depth=10, epochs=config['epochs'], preprocessing=False,
use_separable_convolution=True, use_simple_convolution=True, bottleneck_size=16,
learning_rate=config['learning_rate'], regularization=config['regularization'])
elif config['model'] == 'pyramidal_cnn':
reg = Regression_PyramidalCNN(input_shape=config['cnn']['input_shape'], epochs=config['epochs'], depth=8,
batch_size=config['batch_size'], learning_rate=config['learning_rate'],
regularization=config['regularization'])
elif config['model'] == 'siamese':
reg = Siamese_ConvNet(input_shape=None, use_residual=True, batch_size=config['batch_size'],
kernel_size=40, nb_filters=64, depth=12, epochs=config['epochs'], # nb_filters=64 was default from inception
learning_rate=config['learning_rate'], regularization=config['regularization'])
else:
logging.info('Cannot start the program. Please choose one model in the config.py file')
"""
elif config['model'] == 'eegnet':
classifier = Classifier_EEGNet(dropoutRate = 0.5, kernLength = 64, F1 = 32,
D = 8, F2 = 512, norm_rate = 0.5, dropoutType = 'Dropout',
epochs = 50)
elif config['model'] == 'deepeye-rnn':
classifier = Classifier_DEEPEYE_RNN(input_shape=config['deepeye-rnn']['input_shape'])
"""
hist, pred_ensemble = reg.fit(trainX, trainY)
if i == 0:
# store the prediction on the validation set of the first model in pred
pred = pred_ensemble.predhis
# Save the targets of the validation set, only once for first model
#targets_val_fname = config['model_dir'] + "/" + "targets_val.txt"
#logging.info("targets shape: {}".format(pred_ensemble.targets.shape))
#np.savetxt(targets_val_fname, pred_ensemble.targets, delimiter=',')
else:
for j, pred_epoch in enumerate(pred_ensemble.predhis):
# add up the resuls of the different models for each epoch into pred, which is a list of lists (one for each epoch prediction)
pred[j] = (np.array(pred[j]) + np.array(pred_epoch))
# Plot the model as a graph with keras
if config['plot_model'] and i == 0: # only for the first model
plot_model(reg.model)
# Plot the patterns that the filters look for
if config['plot_filters'] and i == 0:
plot_filters(reg.model, config['model_dir'])
for j, pred_epoch in enumerate(pred):
pred_epoch = (pred_epoch / config['ensemble']).tolist() # divide by number of ensembles to get mean prediction
# Compute the loss
if config['data_mode'] == 'fix_sacc_fix':
loss.append(angle_loss(pred_ensemble.targets, pred_epoch).numpy())
else:
loss.append(mse(pred_ensemble.targets, pred_epoch).numpy())
pred_epoch = np.round(pred_epoch, 0) # round to integral number
# save the ensemble loss to the model directory
loss_fname = config['model_dir'] + "/" + "ensemble_loss.txt"
np.savetxt(loss_fname, loss, delimiter=',')
# save the validation set predictions to the model directory
#pred_val_fname = config['model_dir'] + "/" + "pred_val.txt"
#logging.info("prediction val shape: {}".format(pred[-1]))
#np.savetxt(pred_val_fname, pred[-1], delimiter=',') # this is the prediction in the last epoch
if(config['ensemble'] == 1):
# Only one model, just plot the loss
plot_loss(hist, config['model_dir'], config['model'], val = True)
elif config['ensemble'] > 1:
# Ensemble, plot the ensemble loss
config['model'] += '_ensemble'
hist.history['val_loss'] = np.array(loss)
# plot ensemble loss
plot_loss(hist, config['model_dir'], config['model'], val = True)
#if config['split']:
#config['model'] = config['model'] + '_cluster'
#TODO: rewrite the function below to properly store stats and results
#save_logs(hist, config['model_dir'], config['model'], pytorch = False)
logging.info("Done with training and plotting.")
\ No newline at end of file
from kerastuner.tuners import RandomSearch
from kerastuner.tuners import BayesianOptimization
from sklearn.model_selection import train_test_split
from config import config
import logging
from CNN.Regression_CNN import Regression_CNN
from InceptionTime.Regression_inception import Regression_INCEPTION
from Xception.Regression_xception import Regression_XCEPTION
from DeepEye.Regression_deepeye import Regression_DEEPEYE
from PyramidalCNN.Regression_PyramidalCNN import Regression_PyramidalCNN
def build_model(hp):
reg = None
logging.info('Starting tuning ' + config['model'])
if config['model'] == 'deepeye':
reg = Regression_DEEPEYE(input_shape=config['deepeye']['input_shape'],
epochs=15, verbose=True, batch_size=64, use_residual=True,
kernel_size=hp.Choice('kernel_size', values=[32, 40, 64]),
nb_filters=hp.Choice('nb_filters', values=[32, 64]),
depth=hp.Int('depth',min_value=6,max_value=14, step=4),
bottleneck_size=hp.Choice('bottleneck_size', values=[32, 64]),
use_simple_convolution= hp.Choice('use_simple_convolution', values=[True, False]),
use_separable_convolution= hp.Choice('use_separable_convolution', values = [True, False]),
learning_rate=hp.Choice('learning_rate', values=[1e-2,1e-3,1e-4,1e-5,1e-6]),
preprocessing=False)
elif config['model'] == 'cnn':
reg = Regression_CNN(input_shape=config['cnn']['input_shape'],
epochs=15, verbose=True, batch_size=64,
use_residual=hp.Choice('use_residual', values=[True, False]),
kernel_size=hp.Choice('kernel_size', values=[40, 32, 64]),
nb_filters=hp.Choice('nb_filters', values=[16, 32, 64]),
depth=hp.Int('depth', min_value=6, max_value=20, step=3),
learning_rate=hp.Choice('learning_rate', values=[1e-2,1e-3,1e-4,1e-5,1e-6]),
preprocessing=False
)
elif config['model'] == 'inception':
reg = Regression_INCEPTION(input_shape=config['inception']['input_shape'],
epochs=15,
verbose=True,
batch_size=64,
use_residual=hp.Choice('use_residual', values=[True, False]),
kernel_size=hp.Choice('kernel_size', values=[40, 32, 64]),
nb_filters=hp.Choice('nb_filters', values=[16, 32, 64]),
depth=hp.Int('depth', min_value=6, max_value=20, step=3),
learning_rate=hp.Choice('learning_rate', values=[1e-2,1e-3,1e-4,1e-5,1e-6]),
bottleneck_size=hp.Choice('bottleneck_size', values=[16, 32, 64])
)
elif config['model'] == 'xception':
reg = Regression_XCEPTION(input_shape=config['inception']['input_shape'],
epochs=15, verbose=True,
batch_size=64,
use_residual=hp.Choice('use_residual', values=[True, False]),
kernel_size=hp.Choice('kernel_size', values=[40, 32, 64]),
nb_filters=hp.Choice('nb_filters', values=[16, 32, 64]),
depth=hp.Int('depth', min_value=6, max_value=20, step=3),
learning_rate=hp.Choice('learning_rate', values=[1e-2,1e-3,1e-4,1e-5,1e-6])
)
elif config['model'] == 'pyramidal_cnn':
reg = Regression_PyramidalCNN(input_shape=config['pyramidal_cnn']['input_shape'],
epochs=15, verbose=True, batch_size=64,
kernel_size=hp.Choice('kernel_size', values=[40, 32, 64]),
nb_filters=hp.Choice('nb_filters', values=[16, 32, 64]),
depth=hp.Int('depth', min_value=6, max_value=20, step=3),
learning_rate=hp.Choice('learning_rate', values=[1e-2,1e-3,1e-4,1e-5,1e-6])
)
elif config['model'] == 'siamese':
reg = Regression_INCEPTION(input_shape=config['inception']['input_shape'],
epochs=15,
verbose=True,
batch_size=64,
use_residual=hp.Choice('use_residual', values=[True, False]),
kernel_size=hp.Choice('kernel_size', values=[40, 32, 64]),
nb_filters=hp.Choice('nb_filters', values=[16, 32, 64]),
depth=hp.Int('depth', min_value=6, max_value=20, step=3),
learning_rate=hp.Choice('learning_rate', values=[1e-2,1e-3,1e-4,1e-5,1e-6]),
bottleneck_size=hp.Choice('bottleneck_size', values=[16, 32, 64]),
regularization=hp.Choice('regularization', values=[0.0, 1e-1, 1e-2, 1e-3])
)
else:
logging.info('Cannot start the program. Please choose one model in the config.py file')
"""
elif config['model'] == 'eegnet':
classifier = Classifier_EEGNet(dropoutRate = 0.5,
kernLength = hp.Choice('kernelLength', values=[64, 125, 250]),
F1 = hp.Choice('F1', values=[16, 32, 64]),
D = hp.Choice('D', values=[2, 4, 8]),
F2 = hp.Choice('F2', values=[32, 64, 128, 256, 512]),
norm_rate = 0.5, dropoutType = 'Dropout', epochs = 50)
elif config['model'] == 'deepeye-rnn':
classifier = Classifier_DEEPEYE_RNN(input_shape=config['deepeye-rnn']['input_shape'])
"""
return reg.get_model()
def tune(trainX, trainY):
#TODO: also tune the tuner :)
tuner = RandomSearch(
build_model,
objective='mean_squared_error',
max_trials=32,
executions_per_trial=1,
directory='kerasTunerResults',
project_name='KerasTuner')
#print(trainX.shape)
tuner.search_space_summary()
X_train, X_val, y_train, y_val = train_test_split(trainX, trainY, test_size=0.2, random_state=42)
tuner.search(X_train, y_train, epochs=20, validation_data=(X_val, y_val), verbose=2)
tuner.results_summary()
%% Cell type:code id: tags:
```
import torch
from torch_models.CNN.CNN import CNN
```
%% Cell type:code id: tags:
```
model = CNN(input_shape=256)
print(model)
```
%% Output
CNN\nUsing cpu device\nCNN
%% Cell type:code id: tags:
```
```