Commit 435a1428 authored by Lukas Wolf's avatar Lukas Wolf
Browse files

created sanity check plots

parent 341469b4
......@@ -11,11 +11,17 @@ from utils.analysis import sanity_check
class prediction_history(tf.keras.callbacks.Callback):
def __init__(self,validation_data):
"""
Predhis is list of arrays, one for each epoch, that contains the predicted values. These can be added and divided by the number of ensemble
"""
self.validation_data = validation_data
self.predhis = []
self.targets = validation_data[1]
def on_epoch_end(self, epoch, logs={}):
"""
Each epoch add an array of predictions to predhis
"""
y_pred = self.model.predict(self.validation_data[0])
self.predhis.append(y_pred)
......
......@@ -42,22 +42,25 @@ If split set to true, the data will be clustered and fed each to a separate arch
finally used for classification.
Cluster can be set to clustering(), clustering2() or clustering3(), where different clusters based on literature are used.
"""
# Choose experiment TODO: create config['experiment'] which can be chosen like the model
config['gaze-reg'] = False # Set to False if you want to run the saccade classification task
config['prosaccade'] = True
config['calibration-task'] = False
# Choose how much data to use on gaze-reg
config['data-fraction'] = 1.0 # Set to 1.0 if you want to use the whole dataset, experimental feature only for regression task \
# Hyper-parameters and training configuration.
config['learning_rate'] = 1e-3 # fix only: 1e-2, sac only: 1e-3, sac_fix: 1e-3 , fix_sac_fix: 1e-4
config['regularization'] = 1e-1 # fix only: 1e-3, sac only: 1e-2, sac_fix: 1e-1, fix_sac_fix: 5
config['epochs'] = 2
config['learning_rate'] = 1e-4 # fix only: 1e-2, sac only: 1e-3, sac_fix: 1e-3 , fix_sac_fix: 1e-4
config['regularization'] = 5 # fix only: 1e-3, sac only: 1e-2, sac_fix: 1, fix_sac_fix: 5
config['epochs'] = 5
config['batch_size'] = 64
# Choose experiment
config['gaze-reg'] = False # Set to False if you want to run the saccade classification task
config['data-fraction'] = 0.1 # Set to 1.0 if you want to use the whole dataset, experimental feature only for regression task \
# Choose which dataset to run the gaze regression on
#config['data_mode'] = 'fix_only'
#config['data_mode'] = 'sacc_only'
config['data_mode'] = 'sacc_fix'
#config['data_mode'] = 'fix_sacc_fix'
#config['data_mode'] = 'sacc_fix'
config['data_mode'] = 'fix_sacc_fix'
# Choose model
config['model'] = 'cnn'
......@@ -71,11 +74,11 @@ config['model'] = 'cnn'
# Choose the kerastuner or an ensemble of models
#config['run'] = 'kerastuner'
config['run'] = 'ensemble'
config['ensemble'] = 1 #number of models in the ensemble method
config['ensemble'] = 5 #number of models in the ensemble method
# Other functions that can be chosen optionally
config['sanity_check'] = False
config['plot_filters'] = False
config['plot_filters'] = False #TODO: fix the plot_filters function
# Set loss automatically depending on the dataset/task to run
if config['data_mode'] == 'fix_sacc_fix':
......
This diff is collapsed.
This diff is collapsed.
......@@ -40,7 +40,7 @@ def run(trainX, trainY):
if config['model'] == 'cnn':
reg = Regression_CNN(input_shape=config['cnn']['input_shape'], kernel_size=64, epochs = config['epochs'],
nb_filters=16, verbose=True, batch_size=config['batch_size'], use_residual=True, depth=12,
nb_filters=16, verbose=True, batch_size=config['batch_size'], use_residual=True, depth=10,
learning_rate=config['learning_rate'])
elif config['model'] == 'inception':
......@@ -84,27 +84,45 @@ def run(trainX, trainY):
hist, pred_ensemble = reg.fit(trainX, trainY)
if i == 0:
# store the prediction on the validation set of the first model in pred
pred = pred_ensemble.predhis
# Plot the filters of the conv modules only once for the first model
if config['plot_filters']:
plot_filters(reg.model, config['model_dir'])
# Save the targets of the validation set, only once for first model
targets_val_fname = config['model_dir'] + "/" + "targets_val.txt"
logging.info("targets shape: {}".format(pred_ensemble.targets.shape))
np.savetxt(targets_val_fname, pred_ensemble.targets, delimiter=',')
else:
for j, pred_epoch in enumerate(pred_ensemble.predhis):
pred[j] = (np.array(pred[j]) + np.array(pred_epoch))
# add up the resuls of the different models for each epoch into pred, which is a list of lists (one for each epoch prediction)
pred[j] = (np.array(pred[j]) + np.array(pred_epoch))
for j, pred_epoch in enumerate(pred):
pred_epoch = (pred_epoch / config['ensemble']).tolist()
pred_epoch = (pred_epoch / config['ensemble']).tolist() # divide by number of ensembles to get mean prediction
# Compute the loss
if config['data_mode'] == 'fix_sacc_fix':
loss.append(angle_loss(pred_ensemble.targets, pred_epoch).numpy())
else:
loss.append(mse(pred_ensemble.targets, pred_epoch).numpy())
pred_epoch = np.round(pred_epoch, 0)
#accuracy.append(np.mean((np.array(pred_epoch).reshape(-1)+np.array(pred_ensemble.targets).reshape(-1)-1)**2))
if not config['gaze-reg']:
# Compute loss for the classification task
accuracy.append(np.mean((np.array(pred_epoch).reshape(-1) + np.array(pred_ensemble.targets).reshape(-1) - 1)**2))
pred_epoch = np.round(pred_epoch, 0) # round to integral number
# save the ensemble loss to the model directory
loss_fname = config['model_dir'] + "/" + "ensemble_loss.txt"
np.savetxt(loss_fname, loss, delimiter=',')
# save the validation predictions to the model directory
pred_val_fname = config['model_dir'] + "/" + "pred_val.txt"
logging.info("prediction val shape: {}".format(pred[-1]))
np.savetxt(pred_val_fname, pred[-1], delimiter=',') # this is the prediction in the last epoch
if(config['ensemble'] == 1):
# Only one model, just plot the loss
plot_loss(hist, config['model_dir'], config['model'], val = True)
......
This diff is collapsed.
This diff is collapsed.
......@@ -4,7 +4,7 @@
#SBATCH --output=log/%j.out # where to store the output (%j is the JOBID), subdirectory must exist
#SBATCH --error=log/%j.err # where to store error messages
#SBATCH --gres=gpu:1
#SBATCH --mem=20G
#SBATCH --mem=60G
echo "Running on host: $(hostname)"
echo "In directory: $(pwd)"
......
......@@ -286,14 +286,14 @@ def get_sacc_fix_data(verbose=True):
y_datapoint = np.array([fix_avg_x, fix_avg_y])
# Append to X and y
x_list.append([x_datapoint])
y_list.append([y_datapoint])
x_list.append(x_datapoint)
y_list.append(y_datapoint)
X = np.asarray(x_list)
X = X[:,:,:129] # Cut off the last 4 columns (time, x, y, pupil size)
# Normalize the data
norm = np.linalg.norm(X)
X = X / norm
#norm = np.linalg.norm(X)
#X = X / norm
y = np.asarray(y_list)
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment