Commit a4e1a499 authored by Lukas Wolf's avatar Lukas Wolf
Browse files

clean

parent a32fc593
......@@ -33,9 +33,7 @@ class Regression_INCEPTION(Regression_ConvNet):
The outputs of each convolution are concatenated, followed by batch normalization and a ReLu activation.
"""
if int(input_tensor.shape[-1]) > 1:
input_inception = tf.keras.layers.Conv1D(filters=self.bottleneck_size, kernel_size=1, padding='same', use_bias=False,
kernel_regularizer=tf.keras.regularizers.l1(self.regularization),
activity_regularizer=tf.keras.regularizers.l2(self.regularization))(input_tensor)
input_inception = tf.keras.layers.Conv1D(filters=self.bottleneck_size, kernel_size=1, padding='same', use_bias=False)(input_tensor)
else:
input_inception = input_tensor
......@@ -44,15 +42,11 @@ class Regression_INCEPTION(Regression_ConvNet):
# create the filters in parallel
for i in range(len(kernel_size_s)):
conv_list.append(
tf.keras.layers.Conv1D(filters=self.nb_filters, kernel_size=kernel_size_s[i], padding='same', use_bias=False,
kernel_regularizer=tf.keras.regularizers.l1(self.regularization),
activity_regularizer=tf.keras.regularizers.l2(self.regularization))(input_inception))
tf.keras.layers.Conv1D(filters=self.nb_filters, kernel_size=kernel_size_s[i], padding='same', use_bias=False)(input_inception))
max_pool_1 = tf.keras.layers.MaxPool1D(pool_size=3, strides=1, padding='same')(input_tensor)
conv_6 = tf.keras.layers.Conv1D(filters=self.nb_filters, kernel_size=1, padding='same', use_bias=False,
kernel_regularizer=tf.keras.regularizers.l1(self.regularization),
activity_regularizer=tf.keras.regularizers.l2(self.regularization))(max_pool_1)
conv_6 = tf.keras.layers.Conv1D(filters=self.nb_filters, kernel_size=1, padding='same', use_bias=False)(max_pool_1)
conv_list.append(conv_6)
x = tf.keras.layers.Concatenate(axis=2)(conv_list)
......
......@@ -16,14 +16,24 @@ class Pretrained_Model(Regression_ConvNet):
# Overwrite this method to load an existing model instead of building a new one
def _build_model(self, X=[]):
# Define the model paths to the used pretrained models
if config['model'] == 'inception':
name = 'inception'
model_dir= "./archive_runs/proc_speed_task/angle_reg/1618902918_inception_angle-reg_630_fix_sacc_fix_processing_speed_task/"
elif config['model'] == 'cnn':
name = 'cnn'
model_dir = "./archive_runs/proc_speed_task/angle_reg/1618901397_cnn_angle-reg_630_fix_sacc_fix_processing_speed_task/"
else:
raise Exception("No valid path to a pretrained model")
if config['data_mode'] == 'fix_sacc_fix':
if config['model'] == 'inception':
name = 'inception'
model_dir= "./archive_runs/proc_speed_task/angle_reg/1618902918_inception_angle-reg_630_fix_sacc_fix_processing_speed_task/"
elif config['model'] == 'cnn':
name = 'cnn'
model_dir = "./archive_runs/proc_speed_task/angle_reg/1618901397_cnn_angle-reg_630_fix_sacc_fix_processing_speed_task/"
else:
raise Exception("No valid path to a pretrained model")
elif config['data_mode'] == 'sacc_fix':
if config['model'] == 'inception':
name = 'inception'
model_dir= "./archive_runs/proc_speed_task/sacc_fix/1619085877_inception_gaze-reg_330_sacc_fix_processing_speed_task_ensemble/"
elif config['model'] == 'cnn':
name = 'cnn'
model_dir = "./archive_runs/proc_speed_task/sacc_fix/1619161485_cnn_gaze-reg_330_sacc_fix_processing_speed_task_ensemble/"
else:
raise Exception("No valid path to a pretrained model")
# Load the model
model = keras.models.load_model(model_dir + name + "_best_model.h5", compile=False)
......
......@@ -120,6 +120,9 @@ class Regression_ConvNet(ABC):
#gap_layer = tf.keras.layers.Dense(300)(gap_layer)
#gap_layer = tf.keras.layers.Dense(50)(gap_layer)
# Add dropout for regularization
#gap_layer = keras.layers.Dropout(0.5)(gap_layer)
if config['data_mode'] == "fix_sacc_fix":
output_layer = tf.keras.layers.Dense(1, activation='linear')(gap_layer) # only predict the angle in this task
else:
......
......@@ -30,8 +30,8 @@ TODO: write a proper description how to set the fields in the config
# Choose which task to run
#config['task'] = 'prosaccade-clf'
#config['task'] = 'gaze-reg'
config['task'] = 'angle-reg'
config['task'] = 'gaze-reg'
#config['task'] = 'angle-reg'
# Choose from which experiment the dataset to load. Can only be chosen for angle-pred and gaze-reg
# TODO: also make calibration task data available for gaze-reg
......@@ -66,7 +66,7 @@ Cluster can be set to clustering(), clustering2() or clustering3(), where differ
"""
# We can use a model pretrained on processing speed task
config['pretrained'] = True
config['pretrained'] = False
# Choose model
config['model'] = 'cnn'
......@@ -78,9 +78,9 @@ config['model'] = 'cnn'
#config['model'] = 'siamese' # Note that you have to set data_mode to sacc_fix for this model
# Hyper-parameters and training configuration.
config['learning_rate'] = 1e-5 # fix only: 1e-2, sac only: 1e-3, sac_fix: 1e-3 , fix_sac_fix: 1e-4, for inception on angle 1e-5
config['regularization'] = 0 # fix only: 1e-3, sac only: 1e-2, sac_fix: 1, fix_sac_fix: 5, for inception on angle 0
config['epochs'] = 150
config['learning_rate'] = 1e-3 # fix only: 1e-2, sac only: 1e-3, sac_fix: 1e-3 , fix_sac_fix: 1e-4, for inception on angle 1e-5
config['regularization'] = 1 # fix only: 1e-3, sac only: 1e-2, sac_fix: 1, fix_sac_fix: 5, for inception on angle 0
config['epochs'] = 100
config['batch_size'] = 64
# Choose the kerastuner or an ensemble of models
......@@ -90,7 +90,7 @@ config['ensemble'] = 1 #number of models in the ensemble method
# Other functions that can be chosen optionally
config['sanity_check'] = False
config['plot_model'] = False
config['plot_model'] = True
config['plot_filters'] = False #TODO: make this work, valueerror from tf because of (1,w,h) instead of (none,w,h) expected
# Set loss automatically depending on the dataset/task to run
......
......@@ -248,7 +248,7 @@ def get_sacc_fix_data(task='processing_speed_task', verbose=True):
path = os.path.join(subdir, file)
events = load_sEEG_events(path) # access event i via events[i]
data = load_sEEG_data(path)
print("we have a file")
# Extract X and y from sEEG.data and sEEG.events
for i in range(len(events)):
event = events[i]
......@@ -286,15 +286,11 @@ def get_sacc_fix_data(task='processing_speed_task', verbose=True):
fixation_end_time = int(fixation[4])
fixation_datapoint = np.array(data[fixation_start_time:fixation_end_time])
x_len_fix, y_len_fix = fixation_datapoint.shape
print(x_len_fix)
print(config['max_fixation'])
print(config['fixation_padlength'])
if x_len_fix < config['min_fixation'] or x_len_fix > config['max_fixation']:
continue
if x_len_fix > config['fixation_padlength']:
fixation_datapoint = fixation_datapoint[:config['fixation_padlength'], :]
x_len_fix = config['fixation_padlength']
print("Found one")
# Pad the first fixation
fixation_padding_size = config['fixation_padlength'] - x_len_fix
if config['padding'] == 'zero':
......
......@@ -51,7 +51,10 @@ def plot_loss(hist, output_directory, model, val=False, savefig=True):
epochs = np.arange(epochs)
plt.figure()
plt.title(model + ' loss')
if config['pretrained']:
plt.title("Pretrained " + model + ' loss')
else:
plt.title(model + ' loss')
# plot the training curve
plt.plot(epochs, np.array(hist.history['loss']), 'b-', label='training')
# plot the validation curve
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment