Commit 1d88c038 authored by Lukas Wolf's avatar Lukas Wolf
Browse files

siamese for sac-fix, angle adjustment for fix-sac-fix

parent dc8d4ef3
......@@ -27,7 +27,7 @@ class Regression_ConvNet(ABC):
self.use_residual = use_residual
self.depth = depth
self.callbacks = None
self.batch_size = batch_size
self.batch_size = config['batch_size']
self.verbose = verbose
self.kernel_size = kernel_size
self.nb_filters = nb_filters
......
......@@ -24,9 +24,10 @@ config['root_dir'] = '.'
# You can modify the rest or add new fields as you need.
# Hyper-parameters and training configuration.
config['learning_rate'] = 1e-5
config['regularization'] = 1e-4
config['epochs'] = 100
config['learning_rate'] = 1e-6
config['regularization'] = 0.1
config['epochs'] = 150
config['batch_size'] = 512
"""
Parameters that can be chosen:
......@@ -54,8 +55,8 @@ config['data-fraction'] = 1.0 # Set to 1.0 if you want to use the whole dataset,
#config['data_mode'] = 'fix_only'
#config['data_mode'] = 'sacc_only'
config['data_mode'] = 'sacc_fix'
#config['data_mode'] = 'fix_sacc_fix'
#config['data_mode'] = 'sacc_fix'
config['data_mode'] = 'fix_sacc_fix'
# Choose to either run the kerastuner on the model or
#config['run'] = 'kerastuner'
......@@ -64,12 +65,12 @@ config['ensemble'] = 1 #number of models in the ensemble method
# Choosing model
#config['model'] = 'cnn'
#config['model'] = 'inception'
config['model'] = 'inception'
#config['model'] = 'eegnet'
#config['model'] = 'deepeye'
#config['model'] = 'xception'
#config['model'] = 'pyramidal_cnn'
config['model'] = 'siamese' # Note that you have to set data_mode to sacc_fix for this
#config['model'] = 'siamese' # Note that you have to set data_mode to sacc_fix for this
# Options for classification task, currently not used for regression
config['downsampled'] = False
......@@ -83,7 +84,7 @@ if config['gaze-reg']:
config['trainX_variable'] = 'EEGdata'
config['trainY_variable'] = 'label'
config['padding'] = 'repeat' # options: zero, repeat #TODO: find more options for clever padding
config['padding'] = 'zero' # options: zero, repeat #TODO: find more options for clever padding
config['min_fixation'] = 50 # choose a minimum length for the gaze fixation
config['max_fixation'] = 150 # choose a maximum length for the gaze fixation
......@@ -117,7 +118,6 @@ config['eegnet'] = {}
config['deepeye-rnn'] = {}
if config['gaze-reg']:
#TODO: automatically set the input shapes depending on the dataset to run, i.e. fix only, sacc only, etc.
if config['data_mode'] == 'fix_only':
config['cnn']['input_shape'] = (int(config['max_fixation']), 129) # e.g. for max_duration 300 we have shape (150,129)
......
This diff is collapsed.
This diff is collapsed.
......@@ -63,7 +63,7 @@ def run(trainX, trainY):
elif config['model'] == 'siamese':
reg = Siamese_ConvNet(input_shape=None, use_residual=True,
kernel_size=40, nb_filters=64, depth=18, epochs=config['epochs'],
kernel_size=40, nb_filters=32, depth=14, epochs=config['epochs'], # nb_filters=64 was default from inception
learning_rate=config['learning_rate'], regularization=config['regularization'])
else:
......@@ -78,7 +78,7 @@ def run(trainX, trainY):
classifier = Classifier_DEEPEYE_RNN(input_shape=config['deepeye-rnn']['input_shape'])
"""
hist, pred_ensemble = reg.fit(trainX,trainY)
hist, pred_ensemble = reg.fit(trainX, trainY)
if i == 0:
pred = pred_ensemble.predhis
......
......@@ -82,7 +82,8 @@ def build_model(hp):
nb_filters=hp.Choice('nb_filters', values=[16, 32, 64]),
depth=hp.Int('depth', min_value=6, max_value=20, step=3),
learning_rate=hp.Choice('learning_rate', values=[1e-2,1e-3,1e-4,1e-5,1e-6]),
bottleneck_size=hp.Choice('bottleneck_size', values=[16, 32, 64])
bottleneck_size=hp.Choice('bottleneck_size', values=[16, 32, 64]),
regularization=hp.Choice('regularization', values=[0.0, 1e-1, 1e-2, 1e-3])
)
else:
......@@ -106,7 +107,7 @@ def build_model(hp):
def tune(trainX, trainY):
#TODO: also tune the tuner :)
tuner = BayesianOptimization(
tuner = RandomSearch(
build_model,
objective='mean_squared_error',
max_trials=32,
......
......@@ -34,6 +34,10 @@ def main():
# Log some parameters to better distinguish between tasks
logging.info("Learning rate: {}".format(config['learning_rate']))
logging.info("Regularization: {}".format(config['regularization']))
logging.info("Batch size: {}".format(config['batch_size']))
logging.info("Maximal number of epochs: {}".format(config['epochs']))
if config['run'] == "kerastuner":
logging.info("Running the keras-tuner")
else:
......
......@@ -3,13 +3,12 @@ import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
path = '../runs/1615920751_xception_gaze-reg/batches.log'
#path = '../runs/1615920751_xception_gaze-reg/batches.log'
def plot_batches_log_loss(model_name):
"""
Create loss and validation loss plots from the batches.log file
"""
dir = './runs/' # must be correct relative to caller
path = dir + model_name + 'batches.log'
df = pd.read_csv(path, sep=';')
......
......@@ -432,6 +432,9 @@ def get_fix_sacc_fix_data(verbose=True):
rho, phi = cart2pol(dx, dy)
#y_datapoint = np.array([rho, phi])
#y_datapoint = np.array([dx, dy])
if phi < 0: # Only positive gradients, therefore add 2*Pi if necessary
phi = phi + 2 * np.pi
y_datapoint = np.array([phi])
# Append to X and y
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment